xref: /illumos-gate/usr/src/boot/efi/loader/copy.c (revision f334afcf)
1 /*
2  * Copyright (c) 2013 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Benno Rice under sponsorship from
6  * the FreeBSD Foundation.
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 
31 #include <sys/param.h>
32 #include <sys/multiboot2.h>
33 
34 #include <stand.h>
35 #include <bootstrap.h>
36 
37 #include <efi.h>
38 #include <efilib.h>
39 
40 #include "loader_efi.h"
41 
42 /*
43  * Verify the address is not in use by existing modules.
44  */
45 static vm_offset_t
addr_verify(multiboot_tag_module_t * module,vm_offset_t addr,size_t size)46 addr_verify(multiboot_tag_module_t *module, vm_offset_t addr, size_t size)
47 {
48 	vm_offset_t start, end;
49 
50 	for (; module->mb_type == MULTIBOOT_TAG_TYPE_MODULE;
51 	    module = (multiboot_tag_module_t *)
52 	    roundup((uintptr_t)module + module->mb_size, MULTIBOOT_TAG_ALIGN)) {
53 
54 		start = module->mb_mod_start;
55 		end = module->mb_mod_end;
56 
57 		/* Does this module have address assigned? */
58 		if (start == 0)
59 			continue;
60 
61 		if ((start <= addr) && (end >= addr)) {
62 			return (0);
63 		}
64 		if ((start >= addr) && (start <= addr + size)) {
65 			return (0);
66 		}
67 	}
68 	return (addr);
69 }
70 
71 /*
72  * Find memory map entry above 1MB, able to contain size bytes from addr.
73  */
74 static vm_offset_t
memmap_find(EFI_MEMORY_DESCRIPTOR * map,size_t count,UINTN dsize,vm_offset_t addr,size_t size)75 memmap_find(EFI_MEMORY_DESCRIPTOR *map, size_t count, UINTN dsize,
76     vm_offset_t addr, size_t size)
77 {
78 	int i;
79 
80 	for (i = 0; i < count; i++, map = NextMemoryDescriptor(map, dsize)) {
81 
82 		if (map->Type != EfiConventionalMemory)
83 			continue;
84 
85 		/* We do not want address below 1MB. */
86 		if (map->PhysicalStart < 0x100000)
87 			continue;
88 
89 		/* Do we fit into current entry? */
90 		if ((map->PhysicalStart <= addr) &&
91 		    (map->PhysicalStart +
92 		    (map->NumberOfPages << EFI_PAGE_SHIFT) >= addr + size)) {
93 			return (addr);
94 		}
95 
96 		/* Do we fit into new entry? */
97 		if ((map->PhysicalStart > addr) &&
98 		    (map->NumberOfPages >= EFI_SIZE_TO_PAGES(size))) {
99 			return (map->PhysicalStart);
100 		}
101 	}
102 	return (0);
103 }
104 
105 /*
106  * Find usable address for loading. The address for the kernel is fixed, as
107  * it is determined by kernel linker map (dboot PT_LOAD address).
108  * For modules, we need to consult memory map, the module address has to be
109  * aligned to page boundary and we have to fit into map entry.
110  */
111 vm_offset_t
efi_physaddr(multiboot_tag_module_t * module,vm_offset_t addr,EFI_MEMORY_DESCRIPTOR * map,size_t count,UINTN dsize,vm_offset_t laddr,size_t size)112 efi_physaddr(multiboot_tag_module_t *module, vm_offset_t addr,
113     EFI_MEMORY_DESCRIPTOR *map, size_t count, UINTN dsize, vm_offset_t laddr,
114     size_t size)
115 {
116 	multiboot_tag_module_t *mp;
117 	vm_offset_t off;
118 
119 	if (addr == 0)
120 		return (addr);
121 
122 	mp = module;
123 	do {
124 		off = addr;
125 		/* Test proposed address */
126 		off = memmap_find(map, count, dsize, off, size);
127 		if (off != 0)
128 			off = addr_verify(module, off, size);
129 		if (off != 0)
130 			break;
131 
132 		/* The module list is exhausted */
133 		if (mp->mb_type != MULTIBOOT_TAG_TYPE_MODULE)
134 			break;
135 
136 		if (mp->mb_mod_start != 0) {
137 			addr = roundup2(mp->mb_mod_end + 1,
138 			    MULTIBOOT_MOD_ALIGN);
139 		}
140 		mp = (multiboot_tag_module_t *)
141 		    roundup((uintptr_t)mp + mp->mb_size, MULTIBOOT_TAG_ALIGN);
142 	} while (off == 0);
143 
144 	/*
145 	 * memmap_find failed to get us address, try to use load
146 	 * address.
147 	 */
148 	if (off == 0 || off >= UINT32_MAX)
149 		off = addr_verify(module, laddr, size);
150 
151 	return (off);
152 }
153 
154 /*
155  * Allocate pages for data to be loaded. As we can not expect AllocateAddress
156  * to succeed, we allocate using AllocateMaxAddress from 4GB limit.
157  * 4GB limit is because reportedly some 64bit systems are reported to have
158  * issues with memory above 4GB. It should be quite enough anyhow.
159  * Note: AllocateMaxAddress will only make sure we are below the specified
160  * address, we can not make any assumptions about actual location or
161  * about the order of the allocated blocks.
162  */
163 vm_offset_t
efi_loadaddr(uint_t type,void * data,vm_offset_t addr)164 efi_loadaddr(uint_t type, void *data, vm_offset_t addr)
165 {
166 	EFI_PHYSICAL_ADDRESS paddr;
167 	struct stat st;
168 	size_t size;
169 	uint64_t pages;
170 	EFI_STATUS status;
171 
172 	if (addr == 0)
173 		return (addr);	/* nothing to do */
174 
175 	if (type == LOAD_ELF)
176 		return (0);	/* not supported */
177 
178 	if (type == LOAD_MEM)
179 		size = *(size_t *)data;
180 	else {
181 		stat(data, &st);
182 		size = st.st_size;
183 	}
184 
185 	/* AllocatePages can not allocate 0 pages. */
186 	if (size == 0)
187 		return (addr);
188 
189 	pages = EFI_SIZE_TO_PAGES(size);
190 	/* 4GB upper limit */
191 	paddr = UINT32_MAX;
192 
193 	status = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData,
194 	    pages, &paddr);
195 
196 	if (EFI_ERROR(status)) {
197 		printf("failed to allocate %zu bytes for staging area: %lu\n",
198 		    size, DECODE_ERROR(status));
199 		return (0);
200 	}
201 
202 	return (paddr);
203 }
204 
205 void
efi_free_loadaddr(vm_offset_t addr,size_t pages)206 efi_free_loadaddr(vm_offset_t addr, size_t pages)
207 {
208 	(void) BS->FreePages(addr, pages);
209 }
210 
211 void *
efi_translate(vm_offset_t ptr)212 efi_translate(vm_offset_t ptr)
213 {
214 	return ((void *)ptr);
215 }
216 
217 ssize_t
efi_copyin(const void * src,vm_offset_t dest,const size_t len)218 efi_copyin(const void *src, vm_offset_t dest, const size_t len)
219 {
220 	if (dest + len >= dest && (uint64_t)dest + len <= UINT32_MAX) {
221 		bcopy(src, (void *)(uintptr_t)dest, len);
222 		return (len);
223 	} else {
224 		errno = EFBIG;
225 		return (-1);
226 	}
227 }
228 
229 ssize_t
efi_copyout(const vm_offset_t src,void * dest,const size_t len)230 efi_copyout(const vm_offset_t src, void *dest, const size_t len)
231 {
232 	if (src + len >= src && (uint64_t)src + len <= UINT32_MAX) {
233 		bcopy((void *)(uintptr_t)src, dest, len);
234 		return (len);
235 	} else {
236 		errno = EFBIG;
237 		return (-1);
238 	}
239 }
240 
241 
242 ssize_t
efi_readin(const int fd,vm_offset_t dest,const size_t len)243 efi_readin(const int fd, vm_offset_t dest, const size_t len)
244 {
245 	if (dest + len >= dest && (uint64_t)dest + len <= UINT32_MAX) {
246 		return (read(fd, (void *)dest, len));
247 	} else {
248 		errno = EFBIG;
249 		return (-1);
250 	}
251 }
252 
253 /*
254  * Relocate chunks and return pointer to MBI.
255  * This function is relocated before being called and we only have
256  * memmove() available, as most likely moving chunks into the final
257  * destination will destroy the rest of the loader code.
258  *
259  * In safe area we have relocator data, multiboot_tramp, efi_copy_finish,
260  * memmove and stack.
261  */
262 multiboot2_info_header_t *
efi_copy_finish(struct relocator * relocator)263 efi_copy_finish(struct relocator *relocator)
264 {
265 	multiboot2_info_header_t *mbi;
266 	struct chunk *chunk, *c;
267 	struct chunk_head *head;
268 	bool done = false;
269 	void (*move)(void *s1, const void *s2, size_t n);
270 
271 	move = (void *)relocator->rel_memmove;
272 
273 	/* MBI is the last chunk in the list. */
274 	head = &relocator->rel_chunk_head;
275 	chunk = STAILQ_LAST(head, chunk, chunk_next);
276 	mbi = (multiboot2_info_header_t *)(uintptr_t)chunk->chunk_paddr;
277 
278 	/*
279 	 * If chunk paddr == vaddr, the chunk is in place.
280 	 * If all chunks are in place, we are done.
281 	 */
282 	chunk = NULL;
283 	while (!done) {
284 		/* Advance to next item in list. */
285 		if (chunk != NULL)
286 			chunk = STAILQ_NEXT(chunk, chunk_next);
287 
288 		/*
289 		 * First check if we have anything to do.
290 		 * We set chunk to NULL every time we move the data.
291 		 */
292 		done = true;
293 		STAILQ_FOREACH_FROM(chunk, head, chunk_next) {
294 			if (chunk->chunk_paddr != chunk->chunk_vaddr) {
295 				done = false;
296 				break;
297 			}
298 		}
299 		if (done)
300 			break;
301 
302 		/*
303 		 * Make sure the destination is not conflicting
304 		 * with rest of the modules.
305 		 */
306 		STAILQ_FOREACH(c, head, chunk_next) {
307 			/* Moved already? */
308 			if (c->chunk_vaddr == c->chunk_paddr)
309 				continue;
310 
311 			/* Is it the chunk itself? */
312 			if (c->chunk_vaddr == chunk->chunk_vaddr &&
313 			    c->chunk_size == chunk->chunk_size)
314 				continue;
315 
316 			/*
317 			 * Check for overlaps.
318 			 */
319 			if ((c->chunk_vaddr >= chunk->chunk_paddr &&
320 			    c->chunk_vaddr <=
321 			    chunk->chunk_paddr + chunk->chunk_size) ||
322 			    (c->chunk_vaddr + c->chunk_size >=
323 			    chunk->chunk_paddr &&
324 			    c->chunk_vaddr + c->chunk_size <=
325 			    chunk->chunk_paddr + chunk->chunk_size)) {
326 				break;
327 			}
328 		}
329 		/* If there are no conflicts, move to place and restart. */
330 		if (c == NULL) {
331 			move((void *)(uintptr_t)chunk->chunk_paddr,
332 			    (void *)(uintptr_t)chunk->chunk_vaddr,
333 			    chunk->chunk_size);
334 			chunk->chunk_vaddr = chunk->chunk_paddr;
335 			chunk = NULL;
336 			continue;
337 		}
338 	}
339 
340 	return (mbi);
341 }
342