1 #include <console/console.h>
2 #include <part/fallback_boot.h>
4 #include <boot/elf_boot.h>
5 #include <boot/coreboot_tables.h>
6 #include <ip_checksum.h>
7 #include <stream/read_bytes.h>
12 /* Maximum physical address we can use for the coreboot bounce buffer.
18 extern unsigned char _ram_seg;
19 extern unsigned char _eram_seg;
24 struct segment *phdr_next;
25 struct segment *phdr_prev;
27 unsigned long s_memsz;
28 unsigned long s_offset;
29 unsigned long s_filesz;
32 struct verify_callback {
33 struct verify_callback *next;
34 int (*callback)(struct verify_callback *vcb,
35 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
36 unsigned long desc_offset;
37 unsigned long desc_addr;
40 struct ip_checksum_vcb {
41 struct verify_callback data;
42 unsigned short ip_checksum;
45 int verify_ip_checksum(
46 struct verify_callback *vcb,
47 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head)
49 struct ip_checksum_vcb *cb;
52 unsigned long checksum;
53 unsigned char buff[2], *n_desc;
54 cb = (struct ip_checksum_vcb *)vcb;
55 /* zero the checksum so it's value won't
56 * get in the way of verifying the checksum.
60 n_desc = (unsigned char *)(vcb->desc_addr);
61 memcpy(buff, n_desc, 2);
65 checksum = compute_ip_checksum(ehdr, sizeof(*ehdr));
66 bytes += sizeof(*ehdr);
67 checksum = add_ip_checksums(bytes, checksum,
68 compute_ip_checksum(phdr, ehdr->e_phnum*sizeof(*phdr)));
69 bytes += ehdr->e_phnum*sizeof(*phdr);
70 for(ptr = head->phdr_next; ptr != head; ptr = ptr->phdr_next) {
71 checksum = add_ip_checksums(bytes, checksum,
72 compute_ip_checksum((void *)ptr->s_addr, ptr->s_memsz));
73 bytes += ptr->s_memsz;
76 memcpy(n_desc, buff, 2);
78 if (checksum != cb->ip_checksum) {
79 printk_err("Image checksum: %04x != computed checksum: %04x\n",
80 cb->ip_checksum, checksum);
82 return checksum == cb->ip_checksum;
86 * Static executables all want to share the same addresses
87 * in memory because only a few addresses are reliably present on
88 * a machine, and implementing general relocation is hard.
91 * - Allocate a buffer twice the size of the coreboot image.
92 * - Anything that would overwrite coreboot copy into the lower half of
94 * - After loading an ELF image copy coreboot to the upper half of the
96 * - Then jump to the loaded image.
99 * - Nearly arbitrary standalone executables can be loaded.
100 * - Coreboot is preserved, so it can be returned to.
101 * - The implementation is still relatively simple,
102 * and much simpler then the general case implemented in kexec.
106 static unsigned long get_bounce_buffer(struct lb_memory *mem)
108 unsigned long lb_size;
109 unsigned long mem_entries;
110 unsigned long buffer;
112 lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
113 /* Double coreboot size so I have somewhere to place a copy to return to */
114 lb_size = lb_size + lb_size;
115 mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
117 for(i = 0; i < mem_entries; i++) {
118 unsigned long mstart, mend;
120 unsigned long tbuffer;
121 if (mem->map[i].type != LB_MEM_RAM)
123 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
125 if (unpack_lb64(mem->map[i].size) < lb_size)
127 mstart = unpack_lb64(mem->map[i].start);
128 msize = MAX_ADDR - mstart +1;
129 if (msize > unpack_lb64(mem->map[i].size))
130 msize = unpack_lb64(mem->map[i].size);
131 mend = mstart + msize;
132 tbuffer = mend - lb_size;
133 if (tbuffer < buffer)
141 static struct verify_callback *process_elf_notes(
142 unsigned char *header,
143 unsigned long offset, unsigned long length)
145 struct verify_callback *cb_chain;
146 unsigned char *note, *end;
147 unsigned char *program, *version;
150 note = header + offset;
152 program = version = 0;
155 unsigned char *n_name, *n_desc, *next;
156 hdr = (Elf_Nhdr *)note;
157 n_name = note + sizeof(*hdr);
158 n_desc = n_name + ((hdr->n_namesz + 3) & ~3);
159 next = n_desc + ((hdr->n_descsz + 3) & ~3);
163 if ((hdr->n_namesz == sizeof(ELF_NOTE_BOOT)) &&
164 (memcmp(n_name, ELF_NOTE_BOOT, sizeof(ELF_NOTE_BOOT)) == 0)) {
165 switch(hdr->n_type) {
166 case EIN_PROGRAM_NAME:
167 if (n_desc[hdr->n_descsz -1] == 0) {
171 case EIN_PROGRAM_VERSION:
172 if (n_desc[hdr->n_descsz -1] == 0) {
176 case EIN_PROGRAM_CHECKSUM:
178 struct ip_checksum_vcb *cb;
179 cb = malloc(sizeof(*cb));
180 cb->ip_checksum = *((uint16_t *)n_desc);
181 cb->data.callback = verify_ip_checksum;
182 cb->data.next = cb_chain;
183 cb->data.desc_offset = n_desc - header;
184 cb_chain = &cb->data;
189 printk_spew("n_type: %08x n_name(%d): %-*.*s n_desc(%d): %-*.*s\n",
191 hdr->n_namesz, hdr->n_namesz, hdr->n_namesz, n_name,
192 hdr->n_descsz,hdr->n_descsz, hdr->n_descsz, n_desc);
195 if (program && version) {
196 printk_info("Loading %s version: %s\n",
202 static int valid_area(struct lb_memory *mem, unsigned long buffer,
203 unsigned long start, unsigned long len)
205 /* Check through all of the memory segments and ensure
206 * the segment that was passed in is completely contained
210 unsigned long end = start + len;
211 unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
213 /* See if I conflict with the bounce buffer */
218 /* Walk through the table of valid memory ranges and see if I
221 for(i = 0; i < mem_entries; i++) {
222 uint64_t mstart, mend;
224 mtype = mem->map[i].type;
225 mstart = unpack_lb64(mem->map[i].start);
226 mend = mstart + unpack_lb64(mem->map[i].size);
227 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
230 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
231 printk_err("Payload is overwriting Coreboot tables.\n");
235 if (i == mem_entries) {
236 printk_err("No matching ram area found for range:\n");
237 printk_err(" [0x%016lx, 0x%016lx)\n", start, end);
238 printk_err("Ram areas\n");
239 for(i = 0; i < mem_entries; i++) {
240 uint64_t mstart, mend;
242 mtype = mem->map[i].type;
243 mstart = unpack_lb64(mem->map[i].start);
244 mend = mstart + unpack_lb64(mem->map[i].size);
245 printk_err(" [0x%016lx, 0x%016lx) %s\n",
246 (unsigned long)mstart,
248 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
256 static void relocate_segment(unsigned long buffer, struct segment *seg)
258 /* Modify all segments that want to load onto coreboot
259 * to load onto the bounce buffer instead.
261 unsigned long lb_start = (unsigned long)&_ram_seg;
262 unsigned long lb_end = (unsigned long)&_eram_seg;
263 unsigned long start, middle, end;
265 printk_spew("lb: [0x%016lx, 0x%016lx)\n",
269 middle = start + seg->s_filesz;
270 end = start + seg->s_memsz;
271 /* I don't conflict with coreboot so get out of here */
272 if ((end <= lb_start) || (start >= lb_end))
275 printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
278 /* Slice off a piece at the beginning
279 * that doesn't conflict with coreboot.
281 if (start < lb_start) {
283 unsigned long len = lb_start - start;
284 new = malloc(sizeof(*new));
289 seg->s_offset += len;
290 if (seg->s_filesz > len) {
292 seg->s_filesz -= len;
297 /* Order by stream offset */
299 new->prev = seg->prev;
300 seg->prev->next = new;
302 /* Order by original program header order */
303 new->phdr_next = seg;
304 new->phdr_prev = seg->phdr_prev;
305 seg->phdr_prev->phdr_next = new;
306 seg->phdr_prev = new;
308 /* compute the new value of start */
311 printk_spew(" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
313 new->s_addr + new->s_filesz,
314 new->s_addr + new->s_memsz);
317 /* Slice off a piece at the end
318 * that doesn't conflict with coreboot
321 unsigned long len = lb_end - start;
323 new = malloc(sizeof(*new));
328 new->s_offset += len;
329 if (seg->s_filesz > len) {
331 new->s_filesz -= len;
335 /* Order by stream offset */
336 new->next = seg->next;
338 seg->next->prev = new;
340 /* Order by original program header order */
341 new->phdr_next = seg->phdr_next;
342 new->phdr_prev = seg;
343 seg->phdr_next->phdr_prev = new;
344 seg->phdr_next = new;
346 /* compute the new value of end */
349 printk_spew(" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
351 new->s_addr + new->s_filesz,
352 new->s_addr + new->s_memsz);
355 /* Now retarget this segment onto the bounce buffer */
356 seg->s_addr = buffer + (seg->s_addr - lb_start);
358 printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
360 seg->s_addr + seg->s_filesz,
361 seg->s_addr + seg->s_memsz);
365 static int build_elf_segment_list(
366 struct segment *head,
367 unsigned long bounce_buffer, struct lb_memory *mem,
368 Elf_phdr *phdr, int headers)
372 memset(head, 0, sizeof(*head));
373 head->phdr_next = head->phdr_prev = head;
374 head->next = head->prev = head;
375 for(i = 0; i < headers; i++) {
377 /* Ignore data that I don't need to handle */
378 if (phdr[i].p_type != PT_LOAD) {
379 printk_debug("Dropping non PT_LOAD segment\n");
382 if (phdr[i].p_memsz == 0) {
383 printk_debug("Dropping empty segment\n");
386 new = malloc(sizeof(*new));
387 new->s_addr = phdr[i].p_paddr;
388 new->s_memsz = phdr[i].p_memsz;
389 new->s_offset = phdr[i].p_offset;
390 new->s_filesz = phdr[i].p_filesz;
391 printk_debug("New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
392 new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
393 /* Clean up the values */
394 if (new->s_filesz > new->s_memsz) {
395 new->s_filesz = new->s_memsz;
397 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
398 new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
399 for(ptr = head->next; ptr != head; ptr = ptr->next) {
400 if (new->s_offset < ptr->s_offset)
403 /* Order by stream offset */
405 new->prev = ptr->prev;
406 ptr->prev->next = new;
408 /* Order by original program header order */
409 new->phdr_next = head;
410 new->phdr_prev = head->phdr_prev;
411 head->phdr_prev->phdr_next = new;
412 head->phdr_prev = new;
414 /* Verify the memory addresses in the segment are valid */
415 if (!valid_area(mem, bounce_buffer, new->s_addr, new->s_memsz))
418 /* Modify the segment to load onto the bounce_buffer if necessary.
420 relocate_segment(bounce_buffer, new);
427 static int load_elf_segments(
428 struct segment *head, unsigned char *header, unsigned long header_size)
430 unsigned long offset;
434 for(ptr = head->next; ptr != head; ptr = ptr->next) {
435 unsigned long start_offset;
436 unsigned long skip_bytes, read_bytes;
437 unsigned char *dest, *middle, *end;
438 byte_offset_t result;
439 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
440 ptr->s_addr, ptr->s_memsz, ptr->s_filesz);
442 /* Compute the boundaries of the segment */
443 dest = (unsigned char *)(ptr->s_addr);
444 end = dest + ptr->s_memsz;
445 middle = dest + ptr->s_filesz;
446 start_offset = ptr->s_offset;
447 /* Ignore s_offset if I have a pure bss segment */
448 if (ptr->s_filesz == 0) {
449 start_offset = offset;
452 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
454 (unsigned long)middle,
456 (unsigned long)start_offset);
458 /* Skip intial buffer unused bytes */
459 if (offset < header_size) {
460 if (start_offset < header_size) {
461 offset = start_offset;
463 offset = header_size;
467 /* Skip the unused bytes */
468 skip_bytes = start_offset - offset;
470 ((result = stream_skip(skip_bytes)) != skip_bytes)) {
471 printk_err("ERROR: Skip of %ld bytes skipped %ld bytes\n",
475 offset = start_offset;
477 /* Copy data from the initial buffer */
478 if (offset < header_size) {
480 if ((ptr->s_filesz + start_offset) > header_size) {
481 len = header_size - start_offset;
486 memcpy(dest, &header[start_offset], len);
490 /* Read the segment into memory */
491 read_bytes = middle - dest;
493 ((result = stream_read(dest, read_bytes)) != read_bytes)) {
494 printk_err("ERROR: Read of %ld bytes read %ld bytes...\n",
498 offset += ptr->s_filesz;
500 /* Zero the extra bytes between middle & end */
502 printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
503 (unsigned long)middle, end - middle);
505 /* Zero the extra bytes */
506 memset(middle, 0, end - middle);
514 static int verify_loaded_image(
515 struct verify_callback *vcb,
516 Elf_ehdr *ehdr, Elf_phdr *phdr,
523 for(; ok && vcb ; vcb = vcb->next) {
524 /* Find where the note is loaded */
525 /* The whole note must be loaded intact
526 * so an address of 0 for the descriptor is impossible
529 for(ptr = head->next; ptr != head; ptr = ptr->next) {
530 unsigned long desc_addr;
531 desc_addr = ptr->s_addr + vcb->desc_offset - ptr->s_offset;
532 if ((desc_addr >= ptr->s_addr) &&
533 (desc_addr < (ptr->s_addr + ptr->s_filesz))) {
534 vcb->desc_addr = desc_addr;
537 ok = vcb->callback(vcb, ehdr, phdr, head);
542 int elfload(struct lb_memory *mem,
543 unsigned char *header, unsigned long header_size)
549 struct verify_callback *cb_chain;
550 unsigned long bounce_buffer;
552 /* Find a bounce buffer so I can load to coreboot's current location */
553 bounce_buffer = get_bounce_buffer(mem);
554 if (!bounce_buffer) {
555 printk_err("Could not find a bounce buffer...\n");
559 ehdr = (Elf_ehdr *)header;
560 entry = (void *)(ehdr->e_entry);
561 phdr = (Elf_phdr *)(&header[ehdr->e_phoff]);
563 /* Digest elf note information... */
565 if ((phdr[0].p_type == PT_NOTE) &&
566 ((phdr[0].p_offset + phdr[0].p_filesz) < header_size)) {
567 cb_chain = process_elf_notes(header,
568 phdr[0].p_offset, phdr[0].p_filesz);
571 /* Preprocess the elf segments */
572 if (!build_elf_segment_list(&head,
573 bounce_buffer, mem, phdr, ehdr->e_phnum))
576 /* Load the segments */
577 if (!load_elf_segments(&head, header, header_size))
580 printk_spew("Loaded segments\n");
581 /* Verify the loaded image */
582 if (!verify_loaded_image(cb_chain, ehdr, phdr, &head))
585 printk_spew("verified segments\n");
586 /* Shutdown the stream device */
589 printk_spew("closed down stream\n");
590 /* Reset to booting from this image as late as possible */
593 printk_debug("Jumping to boot code at 0x%x\n", entry);
597 jmp_to_elf_entry(entry, bounce_buffer);
604 int elfboot(struct lb_memory *mem)
607 static unsigned char header[ELF_HEAD_SIZE];
613 printk_info("Welcome to %s, the open sourced starter.\n", BOOTLOADER);
614 printk_info("January 2002, Eric Biederman.\n");
615 printk_info("Version %s\n", BOOTLOADER_VERSION);
619 if (stream_init() < 0) {
620 printk_err("Could not initialize driver...\n");
624 /* Read in the initial ELF_HEAD_SIZE bytes */
625 if (stream_read(header, ELF_HEAD_SIZE) != ELF_HEAD_SIZE) {
626 printk_err("Read failed...\n");
629 /* Scan for an elf header */
631 for(i = 0; i < ELF_HEAD_SIZE - (sizeof(Elf_ehdr) + sizeof(Elf_phdr)); i+=16) {
632 ehdr = (Elf_ehdr *)(&header[i]);
633 if (memcmp(ehdr->e_ident, ELFMAG, 4) != 0) {
634 printk_debug("No header at %d\n", i);
637 printk_debug("Found ELF candidate at offset %d\n", i);
638 /* Sanity check the elf header */
639 if ((ehdr->e_type == ET_EXEC) &&
640 elf_check_arch(ehdr) &&
641 (ehdr->e_ident[EI_VERSION] == EV_CURRENT) &&
642 (ehdr->e_version == EV_CURRENT) &&
643 (ehdr->e_ehsize == sizeof(Elf_ehdr)) &&
644 (ehdr->e_phentsize = sizeof(Elf_phdr)) &&
645 (ehdr->e_phoff < (ELF_HEAD_SIZE - i)) &&
646 ((ehdr->e_phoff + (ehdr->e_phentsize * ehdr->e_phnum)) <=
647 (ELF_HEAD_SIZE - i))) {
653 printk_debug("header_offset is %d\n", header_offset);
654 if (header_offset == -1) {
658 printk_debug("Try to load at offset 0x%x\n", header_offset);
659 result = elfload(mem,
660 header + header_offset , ELF_HEAD_SIZE - header_offset);
663 /* Shutdown the stream device */
666 printk_err("Can not load ELF Image.\n");