2 * This file is part of the coreboot project.
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5 * Copyright (C) 2009 Ron Minnich <rminnich@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
21 #include <arch/byteorder.h>
22 #include <console/console.h>
25 #include <boot/elf_boot.h>
26 #include <boot/coreboot_tables.h>
27 #include <ip_checksum.h>
34 /* Maximum physical address we can use for the coreboot bounce buffer. */
39 /* from coreboot_ram.ld: */
40 extern unsigned char _ram_seg;
41 extern unsigned char _eram_seg;
43 static const unsigned long lb_start = (unsigned long)&_ram_seg;
44 static const unsigned long lb_end = (unsigned long)&_eram_seg;
49 struct segment *phdr_next;
50 struct segment *phdr_prev;
51 unsigned long s_dstaddr;
52 unsigned long s_srcaddr;
53 unsigned long s_memsz;
54 unsigned long s_filesz;
59 * Static executables all want to share the same addresses
60 * in memory because only a few addresses are reliably present on
61 * a machine, and implementing general relocation is hard.
64 * - Allocate a buffer the size of the coreboot image plus additional
66 * - Anything that would overwrite coreboot copy into the lower part of
68 * - After loading an ELF image copy coreboot to the top of the buffer.
69 * - Then jump to the loaded image.
72 * - Nearly arbitrary standalone executables can be loaded.
73 * - Coreboot is preserved, so it can be returned to.
74 * - The implementation is still relatively simple,
75 * and much simpler than the general case implemented in kexec.
78 static unsigned long bounce_size, bounce_buffer;
80 static void get_bounce_buffer(struct lb_memory *mem, unsigned long req_size)
82 unsigned long lb_size;
83 unsigned long mem_entries;
86 lb_size = lb_end - lb_start;
87 /* Plus coreboot size so I have somewhere
88 * to place a copy to return to.
90 lb_size = req_size + lb_size;
91 mem_entries = (mem->size - sizeof(*mem)) / sizeof(mem->map[0]);
93 for(i = 0; i < mem_entries; i++) {
94 unsigned long mstart, mend;
96 unsigned long tbuffer;
97 if (mem->map[i].type != LB_MEM_RAM)
99 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
101 if (unpack_lb64(mem->map[i].size) < lb_size)
103 mstart = unpack_lb64(mem->map[i].start);
104 msize = MAX_ADDR - mstart +1;
105 if (msize > unpack_lb64(mem->map[i].size))
106 msize = unpack_lb64(mem->map[i].size);
107 mend = mstart + msize;
108 tbuffer = mend - lb_size;
109 if (tbuffer < buffer)
113 bounce_buffer = buffer;
114 bounce_size = req_size;
117 static int valid_area(struct lb_memory *mem, unsigned long buffer,
118 unsigned long start, unsigned long len)
120 /* Check through all of the memory segments and ensure
121 * the segment that was passed in is completely contained
125 unsigned long end = start + len;
126 unsigned long mem_entries = (mem->size - sizeof(*mem)) /
129 /* See if I conflict with the bounce buffer */
134 /* Walk through the table of valid memory ranges and see if I
137 for(i = 0; i < mem_entries; i++) {
138 uint64_t mstart, mend;
140 mtype = mem->map[i].type;
141 mstart = unpack_lb64(mem->map[i].start);
142 mend = mstart + unpack_lb64(mem->map[i].size);
143 if ((mtype == LB_MEM_RAM) && (start >= mstart) && (end < mend)) {
146 if ((mtype == LB_MEM_TABLE) && (start >= mstart) && (end < mend)) {
147 printk(BIOS_ERR, "Payload is overwriting coreboot tables.\n");
151 if (i == mem_entries) {
152 printk(BIOS_ERR, "No matching ram area found for range:\n");
153 printk(BIOS_ERR, " [0x%016lx, 0x%016lx)\n", start, end);
154 printk(BIOS_ERR, "Ram areas\n");
155 for(i = 0; i < mem_entries; i++) {
156 uint64_t mstart, mend;
158 mtype = mem->map[i].type;
159 mstart = unpack_lb64(mem->map[i].start);
160 mend = mstart + unpack_lb64(mem->map[i].size);
161 printk(BIOS_ERR, " [0x%016lx, 0x%016lx) %s\n",
162 (unsigned long)mstart,
164 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
173 static int overlaps_coreboot(struct segment *seg)
175 unsigned long start, end;
176 start = seg->s_dstaddr;
177 end = start + seg->s_memsz;
178 return !((end <= lb_start) || (start >= lb_end));
181 static int relocate_segment(unsigned long buffer, struct segment *seg)
183 /* Modify all segments that want to load onto coreboot
184 * to load onto the bounce buffer instead.
186 /* ret: 1 : A new segment is inserted before the seg.
187 * 0 : A new segment is inserted after the seg, or no new one.
189 unsigned long start, middle, end, ret = 0;
191 printk(BIOS_SPEW, "lb: [0x%016lx, 0x%016lx)\n",
194 /* I don't conflict with coreboot so get out of here */
195 if (!overlaps_coreboot(seg))
198 start = seg->s_dstaddr;
199 middle = start + seg->s_filesz;
200 end = start + seg->s_memsz;
202 printk(BIOS_SPEW, "segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
205 if (seg->compression == CBFS_COMPRESS_NONE) {
206 /* Slice off a piece at the beginning
207 * that doesn't conflict with coreboot.
209 if (start < lb_start) {
211 unsigned long len = lb_start - start;
212 new = malloc(sizeof(*new));
216 seg->s_dstaddr += len;
217 seg->s_srcaddr += len;
218 if (seg->s_filesz > len) {
220 seg->s_filesz -= len;
225 /* Order by stream offset */
227 new->prev = seg->prev;
228 seg->prev->next = new;
230 /* Order by original program header order */
231 new->phdr_next = seg;
232 new->phdr_prev = seg->phdr_prev;
233 seg->phdr_prev->phdr_next = new;
234 seg->phdr_prev = new;
236 /* compute the new value of start */
237 start = seg->s_dstaddr;
239 printk(BIOS_SPEW, " early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
241 new->s_dstaddr + new->s_filesz,
242 new->s_dstaddr + new->s_memsz);
247 /* Slice off a piece at the end
248 * that doesn't conflict with coreboot
251 unsigned long len = lb_end - start;
253 new = malloc(sizeof(*new));
257 new->s_dstaddr += len;
258 new->s_srcaddr += len;
259 if (seg->s_filesz > len) {
261 new->s_filesz -= len;
265 /* Order by stream offset */
266 new->next = seg->next;
268 seg->next->prev = new;
270 /* Order by original program header order */
271 new->phdr_next = seg->phdr_next;
272 new->phdr_prev = seg;
273 seg->phdr_next->phdr_prev = new;
274 seg->phdr_next = new;
276 printk(BIOS_SPEW, " late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
278 new->s_dstaddr + new->s_filesz,
279 new->s_dstaddr + new->s_memsz);
283 /* Now retarget this segment onto the bounce buffer */
284 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
285 * so you will make the dstaddr be this buffer, and it will get copied
286 * later to where coreboot lives.
288 seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
290 printk(BIOS_SPEW, " bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
292 seg->s_dstaddr + seg->s_filesz,
293 seg->s_dstaddr + seg->s_memsz);
299 static int build_self_segment_list(
300 struct segment *head,
301 struct lb_memory *mem,
302 struct cbfs_payload *payload, u32 *entry)
306 struct cbfs_payload_segment *segment, *first_segment;
307 memset(head, 0, sizeof(*head));
308 head->phdr_next = head->phdr_prev = head;
309 head->next = head->prev = head;
310 first_segment = segment = &payload->segments;
313 printk(BIOS_DEBUG, "Loading segment from rom address 0x%p\n", segment);
314 switch(segment->type) {
315 case PAYLOAD_SEGMENT_PARAMS:
316 printk(BIOS_DEBUG, " parameter section (skipped)\n");
320 case PAYLOAD_SEGMENT_CODE:
321 case PAYLOAD_SEGMENT_DATA:
322 printk(BIOS_DEBUG, " %s (compression=%x)\n",
323 segment->type == PAYLOAD_SEGMENT_CODE ? "code" : "data",
324 ntohl(segment->compression));
325 new = malloc(sizeof(*new));
326 new->s_dstaddr = ntohll(segment->load_addr);
327 new->s_memsz = ntohl(segment->mem_len);
328 new->compression = ntohl(segment->compression);
330 new->s_srcaddr = (u32) ((unsigned char *)first_segment)
331 + ntohl(segment->offset);
332 new->s_filesz = ntohl(segment->len);
333 printk(BIOS_DEBUG, " New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
334 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
335 /* Clean up the values */
336 if (new->s_filesz > new->s_memsz) {
337 new->s_filesz = new->s_memsz;
339 printk(BIOS_DEBUG, " (cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
340 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
343 case PAYLOAD_SEGMENT_BSS:
344 printk(BIOS_DEBUG, " BSS 0x%p (%d byte)\n", (void *)
345 (intptr_t)ntohll(segment->load_addr),
346 ntohl(segment->mem_len));
347 new = malloc(sizeof(*new));
349 new->s_dstaddr = ntohll(segment->load_addr);
350 new->s_memsz = ntohl(segment->mem_len);
353 case PAYLOAD_SEGMENT_ENTRY:
354 printk(BIOS_DEBUG, " Entry Point 0x%p\n", (void *) ntohl((u32) segment->load_addr));
355 *entry = ntohll(segment->load_addr);
356 /* Per definition, a payload always has the entry point
357 * as last segment. Thus, we use the occurence of the
358 * entry point as break condition for the loop.
359 * Can we actually just look at the number of section?
364 /* We found something that we don't know about. Throw
365 * hands into the sky and run away!
367 printk(BIOS_EMERG, "Bad segment type %x\n", segment->type);
373 // FIXME: Explain what this is
374 for(ptr = head->next; ptr != head; ptr = ptr->next) {
375 if (new->s_srcaddr < ntohll(segment->load_addr))
379 /* Order by stream offset */
381 new->prev = ptr->prev;
382 ptr->prev->next = new;
385 /* Order by original program header order */
386 new->phdr_next = head;
387 new->phdr_prev = head->phdr_prev;
388 head->phdr_prev->phdr_next = new;
389 head->phdr_prev = new;
395 static int load_self_segments(
396 struct segment *head,
397 struct lb_memory *mem,
398 struct cbfs_payload *payload)
402 unsigned long bounce_high = lb_end;
403 for(ptr = head->next; ptr != head; ptr = ptr->next) {
404 if (!overlaps_coreboot(ptr)) continue;
405 if (ptr->s_dstaddr + ptr->s_memsz > bounce_high)
406 bounce_high = ptr->s_dstaddr + ptr->s_memsz;
408 get_bounce_buffer(mem, bounce_high - lb_start);
409 if (!bounce_buffer) {
410 printk(BIOS_ERR, "Could not find a bounce buffer...\n");
413 for(ptr = head->next; ptr != head; ptr = ptr->next) {
414 /* Verify the memory addresses in the segment are valid */
415 if (!valid_area(mem, bounce_buffer, ptr->s_dstaddr, ptr->s_memsz))
418 for(ptr = head->next; ptr != head; ptr = ptr->next) {
419 unsigned char *dest, *src;
420 printk(BIOS_DEBUG, "Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
421 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
423 /* Modify the segment to load onto the bounce_buffer if necessary.
425 if (relocate_segment(bounce_buffer, ptr)) {
426 ptr = (ptr->prev)->prev;
430 printk(BIOS_DEBUG, "Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
431 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
433 /* Compute the boundaries of the segment */
434 dest = (unsigned char *)(ptr->s_dstaddr);
435 src = (unsigned char *)(ptr->s_srcaddr);
437 /* Copy data from the initial buffer */
439 unsigned char *middle, *end;
442 switch(ptr->compression) {
443 case CBFS_COMPRESS_LZMA: {
444 printk(BIOS_DEBUG, "using LZMA\n");
445 len = ulzma(src, dest);
446 if (!len) /* Decompression Error. */
450 #if CONFIG_COMPRESSED_PAYLOAD_NRV2B
451 case CBFS_COMPRESS_NRV2B: {
452 printk(BIOS_DEBUG, "using NRV2B\n");
453 unsigned long unrv2b(u8 *src, u8 *dst, unsigned long *ilen_p);
455 len = unrv2b(src, dest, &tmp);
459 case CBFS_COMPRESS_NONE: {
460 printk(BIOS_DEBUG, "it's not compressed!\n");
461 memcpy(dest, src, len);
465 printk(BIOS_INFO, "CBFS: Unknown compression type %d\n", ptr->compression);
468 end = dest + ptr->s_memsz;
470 printk(BIOS_SPEW, "[ 0x%08lx, %08lx, 0x%08lx) <- %08lx\n",
472 (unsigned long)middle,
476 /* Zero the extra bytes between middle & end */
478 printk(BIOS_DEBUG, "Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
479 (unsigned long)middle, (unsigned long)(end - middle));
481 /* Zero the extra bytes */
482 memset(middle, 0, end - middle);
484 /* Copy the data that's outside the area that shadows coreboot_ram */
485 printk(BIOS_DEBUG, "dest %p, end %p, bouncebuffer %lx\n", dest, end, bounce_buffer);
486 if ((unsigned long)end > bounce_buffer) {
487 if ((unsigned long)dest < bounce_buffer) {
488 unsigned char *from = dest;
489 unsigned char *to = (unsigned char*)(lb_start-(bounce_buffer-(unsigned long)dest));
490 unsigned long amount = bounce_buffer-(unsigned long)dest;
491 printk(BIOS_DEBUG, "move prefix around: from %p, to %p, amount: %lx\n", from, to, amount);
492 memcpy(to, from, amount);
494 if ((unsigned long)end > bounce_buffer + (lb_end - lb_start)) {
495 unsigned long from = bounce_buffer + (lb_end - lb_start);
496 unsigned long to = lb_end;
497 unsigned long amount = (unsigned long)end - from;
498 printk(BIOS_DEBUG, "move suffix around: from %lx, to %lx, amount: %lx\n", from, to, amount);
499 memcpy((char*)to, (char*)from, amount);
507 static int selfboot(struct lb_memory *mem, struct cbfs_payload *payload)
512 /* Preprocess the self segments */
513 if (!build_self_segment_list(&head, mem, payload, &entry))
516 /* Load the segments */
517 if (!load_self_segments(&head, mem, payload))
520 printk(BIOS_SPEW, "Loaded segments\n");
522 /* Reset to booting from this image as late as possible */
525 printk(BIOS_DEBUG, "Jumping to boot code at %x\n", entry);
526 post_code(POST_ENTER_ELF_BOOT);
529 jmp_to_elf_entry((void*)entry, bounce_buffer, bounce_size);
536 void *cbfs_load_payload(struct lb_memory *lb_mem, const char *name)
538 struct cbfs_payload *payload;
540 payload = (struct cbfs_payload *)cbfs_find_file(name, CBFS_TYPE_PAYLOAD);
543 printk(BIOS_DEBUG, "Got a payload\n");
545 selfboot(lb_mem, payload);
546 printk(BIOS_EMERG, "SELFBOOT RETURNED!\n");