1 #include <console/console.h>
2 #include <part/fallback_boot.h>
4 #include <boot/elf_boot.h>
5 #include <boot/linuxbios_tables.h>
6 #include <ip_checksum.h>
7 #include <stream/read_bytes.h>
12 /* Maximum physical address we can use for the linuxBIOS bounce buffer.
18 extern unsigned char _ram_seg;
19 extern unsigned char _eram_seg;
24 struct segment *phdr_next;
25 struct segment *phdr_prev;
27 unsigned long s_memsz;
28 unsigned long s_offset;
29 unsigned long s_filesz;
32 struct verify_callback {
33 struct verify_callback *next;
34 int (*callback)(struct verify_callback *vcb,
35 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
36 unsigned long desc_offset;
37 unsigned long desc_addr;
40 struct ip_checksum_vcb {
41 struct verify_callback data;
42 unsigned short ip_checksum;
45 int verify_ip_checksum(
46 struct verify_callback *vcb,
47 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head)
49 struct ip_checksum_vcb *cb;
52 unsigned long checksum;
53 unsigned char buff[2], *n_desc;
54 cb = (struct ip_checksum_vcb *)vcb;
55 /* zero the checksum so it's value won't
56 * get in the way of verifying the checksum.
60 n_desc = (unsigned char *)(vcb->desc_addr);
61 memcpy(buff, n_desc, 2);
65 checksum = compute_ip_checksum(ehdr, sizeof(*ehdr));
66 bytes += sizeof(*ehdr);
67 checksum = add_ip_checksums(bytes, checksum,
68 compute_ip_checksum(phdr, ehdr->e_phnum*sizeof(*phdr)));
69 bytes += ehdr->e_phnum*sizeof(*phdr);
70 for(ptr = head->phdr_next; ptr != head; ptr = ptr->phdr_next) {
71 checksum = add_ip_checksums(bytes, checksum,
72 compute_ip_checksum((void *)ptr->s_addr, ptr->s_memsz));
73 bytes += ptr->s_memsz;
76 memcpy(n_desc, buff, 2);
78 if (checksum != cb->ip_checksum) {
79 printk_err("Image checksum: %04x != computed checksum: %04x\n",
80 cb->ip_checksum, checksum);
82 return checksum == cb->ip_checksum;
86 * Static executables all want to share the same addresses
87 * in memory because only a few addresses are reliably present on
88 * a machine, and implementing general relocation is hard.
91 * - Allocate a buffer twice the size of the linuxBIOS image.
92 * - Anything that would overwrite linuxBIOS copy into the lower half of
94 * - After loading an ELF image copy linuxBIOS to the upper half of the
96 * - Then jump to the loaded image.
99 * - Nearly arbitrary standalone executables can be loaded.
100 * - LinuxBIOS is preserved, so it can be returned to.
101 * - The implementation is still relatively simple,
102 * and much simpler then the general case implemented in kexec.
106 static unsigned long get_bounce_buffer(struct lb_memory *mem)
108 unsigned long lb_size;
109 unsigned long mem_entries;
110 unsigned long buffer;
112 lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
113 /* Double linuxBIOS size so I have somewhere to place a copy to return to */
114 lb_size = lb_size + lb_size;
115 mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
117 for(i = 0; i < mem_entries; i++) {
118 unsigned long mstart, mend;
120 unsigned long tbuffer;
121 if (mem->map[i].type != LB_MEM_RAM)
123 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
125 if (unpack_lb64(mem->map[i].size) < lb_size)
127 mstart = unpack_lb64(mem->map[i].start);
128 msize = MAX_ADDR - mstart +1;
129 if (msize > unpack_lb64(mem->map[i].size))
130 msize = unpack_lb64(mem->map[i].size);
131 mend = mstart + msize;
132 tbuffer = mend - lb_size;
133 if (tbuffer < buffer)
141 static struct verify_callback *process_elf_notes(
142 unsigned char *header,
143 unsigned long offset, unsigned long length)
145 struct verify_callback *cb_chain;
146 unsigned char *note, *end;
147 char *program, *version;
150 note = header + offset;
152 program = version = 0;
155 unsigned char *n_name, *n_desc, *next;
156 hdr = (Elf_Nhdr *)note;
157 n_name = note + sizeof(*hdr);
158 n_desc = n_name + ((hdr->n_namesz + 3) & ~3);
159 next = n_desc + ((hdr->n_descsz + 3) & ~3);
163 if ((hdr->n_namesz == sizeof(ELF_NOTE_BOOT)) &&
164 (memcmp(n_name, ELF_NOTE_BOOT, sizeof(ELF_NOTE_BOOT)) == 0)) {
165 switch(hdr->n_type) {
166 case EIN_PROGRAM_NAME:
167 if (n_desc[hdr->n_descsz -1] == 0) {
171 case EIN_PROGRAM_VERSION:
172 if (n_desc[hdr->n_descsz -1] == 0) {
176 case EIN_PROGRAM_CHECKSUM:
178 struct ip_checksum_vcb *cb;
179 cb = malloc(sizeof(*cb));
180 cb->ip_checksum = *((uint16_t *)n_desc);
181 cb->data.callback = verify_ip_checksum;
182 cb->data.next = cb_chain;
183 cb->data.desc_offset = n_desc - header;
184 cb_chain = &cb->data;
189 printk_spew("n_type: %08x n_name(%d): %-*.*s n_desc(%d): %-*.*s\n",
191 hdr->n_namesz, hdr->n_namesz, hdr->n_namesz, n_name,
192 hdr->n_descsz,hdr->n_descsz, hdr->n_descsz, n_desc);
195 if (program && version) {
196 printk_info("Loading %s version: %s\n",
202 static int valid_area(struct lb_memory *mem, unsigned long buffer,
203 unsigned long start, unsigned long len)
205 /* Check through all of the memory segments and ensure
206 * the segment that was passed in is completely contained
210 unsigned long end = start + len;
211 unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
213 /* See if I conflict with the bounce buffer */
218 /* Walk through the table of valid memory ranges and see if I
221 for(i = 0; i < mem_entries; i++) {
222 uint64_t mstart, mend;
224 mtype = mem->map[i].type;
225 mstart = unpack_lb64(mem->map[i].start);
226 mend = mstart + unpack_lb64(mem->map[i].size);
227 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
231 if (i == mem_entries) {
232 printk_err("No matching ram area found for range:\n");
233 printk_err(" [0x%016lx, 0x%016lx)\n", start, end);
234 printk_err("Ram areas\n");
235 for(i = 0; i < mem_entries; i++) {
236 uint64_t mstart, mend;
238 mtype = mem->map[i].type;
239 mstart = unpack_lb64(mem->map[i].start);
240 mend = mstart + unpack_lb64(mem->map[i].size);
241 printk_err(" [0x%016lx, 0x%016lx) %s\n",
242 (unsigned long)mstart,
244 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
252 static void relocate_segment(unsigned long buffer, struct segment *seg)
254 /* Modify all segments that want to load onto linuxBIOS
255 * to load onto the bounce buffer instead.
257 unsigned long lb_start = (unsigned long)&_ram_seg;
258 unsigned long lb_end = (unsigned long)&_eram_seg;
259 unsigned long start, middle, end;
261 printk_spew("lb: [0x%016lx, 0x%016lx)\n",
265 middle = start + seg->s_filesz;
266 end = start + seg->s_memsz;
267 /* I don't conflict with linuxBIOS so get out of here */
268 if ((end <= lb_start) || (start >= lb_end))
271 printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
274 /* Slice off a piece at the beginning
275 * that doesn't conflict with linuxBIOS.
277 if (start < lb_start) {
279 unsigned long len = lb_start - start;
280 new = malloc(sizeof(*new));
285 seg->s_offset += len;
286 if (seg->s_filesz > len) {
288 seg->s_filesz -= len;
293 /* Order by stream offset */
295 new->prev = seg->prev;
296 seg->prev->next = new;
298 /* Order by original program header order */
299 new->phdr_next = seg;
300 new->phdr_prev = seg->phdr_prev;
301 seg->phdr_prev->phdr_next = new;
302 seg->phdr_prev = new;
304 /* compute the new value of start */
307 printk_spew(" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
309 new->s_addr + new->s_filesz,
310 new->s_addr + new->s_memsz);
313 /* Slice off a piece at the end
314 * that doesn't conflict with linuxBIOS
317 unsigned long len = lb_end - start;
319 new = malloc(sizeof(*new));
324 new->s_offset += len;
325 if (seg->s_filesz > len) {
327 new->s_filesz -= len;
331 /* Order by stream offset */
332 new->next = seg->next;
334 seg->next->prev = new;
336 /* Order by original program header order */
337 new->phdr_next = seg->phdr_next;
338 new->phdr_prev = seg;
339 seg->phdr_next->phdr_prev = new;
340 seg->phdr_next = new;
342 /* compute the new value of end */
345 printk_spew(" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
347 new->s_addr + new->s_filesz,
348 new->s_addr + new->s_memsz);
351 /* Now retarget this segment onto the bounce buffer */
352 seg->s_addr = buffer + (seg->s_addr - lb_start);
354 printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
356 seg->s_addr + seg->s_filesz,
357 seg->s_addr + seg->s_memsz);
361 static int build_elf_segment_list(
362 struct segment *head,
363 unsigned long bounce_buffer, struct lb_memory *mem,
364 Elf_phdr *phdr, int headers)
368 memset(head, 0, sizeof(*head));
369 head->phdr_next = head->phdr_prev = head;
370 head->next = head->prev = head;
371 for(i = 0; i < headers; i++) {
373 /* Ignore data that I don't need to handle */
374 if (phdr[i].p_type != PT_LOAD) {
375 printk_debug("Dropping non PT_LOAD segment\n");
378 if (phdr[i].p_memsz == 0) {
379 printk_debug("Dropping empty segment\n");
382 new = malloc(sizeof(*new));
383 new->s_addr = phdr[i].p_paddr;
384 new->s_memsz = phdr[i].p_memsz;
385 new->s_offset = phdr[i].p_offset;
386 new->s_filesz = phdr[i].p_filesz;
387 printk_debug("New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
388 new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
389 /* Clean up the values */
390 if (new->s_filesz > new->s_memsz) {
391 new->s_filesz = new->s_memsz;
393 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
394 new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
395 for(ptr = head->next; ptr != head; ptr = ptr->next) {
396 if (new->s_offset < ptr->s_offset)
399 /* Order by stream offset */
401 new->prev = ptr->prev;
402 ptr->prev->next = new;
404 /* Order by original program header order */
405 new->phdr_next = head;
406 new->phdr_prev = head->phdr_prev;
407 head->phdr_prev->phdr_next = new;
408 head->phdr_prev = new;
410 /* Verify the memory addresses in the segment are valid */
411 if (!valid_area(mem, bounce_buffer, new->s_addr, new->s_memsz))
414 /* Modify the segment to load onto the bounce_buffer if necessary.
416 relocate_segment(bounce_buffer, new);
423 static int load_elf_segments(
424 struct segment *head, unsigned char *header, unsigned long header_size)
426 unsigned long offset;
430 for(ptr = head->next; ptr != head; ptr = ptr->next) {
431 unsigned long start_offset;
432 unsigned long skip_bytes, read_bytes;
433 unsigned char *dest, *middle, *end;
434 byte_offset_t result;
435 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
436 ptr->s_addr, ptr->s_memsz, ptr->s_filesz);
438 /* Compute the boundaries of the segment */
439 dest = (unsigned char *)(ptr->s_addr);
440 end = dest + ptr->s_memsz;
441 middle = dest + ptr->s_filesz;
442 start_offset = ptr->s_offset;
443 /* Ignore s_offset if I have a pure bss segment */
444 if (ptr->s_filesz == 0) {
445 start_offset = offset;
448 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
450 (unsigned long)middle,
452 (unsigned long)start_offset);
454 /* Skip intial buffer unused bytes */
455 if (offset < header_size) {
456 if (start_offset < header_size) {
457 offset = start_offset;
459 offset = header_size;
463 /* Skip the unused bytes */
464 skip_bytes = start_offset - offset;
466 ((result = stream_skip(skip_bytes)) != skip_bytes)) {
467 printk_err("ERROR: Skip of %ld bytes skipped %ld bytes\n",
471 offset = start_offset;
473 /* Copy data from the initial buffer */
474 if (offset < header_size) {
476 if ((ptr->s_filesz + start_offset) > header_size) {
477 len = header_size - start_offset;
482 memcpy(dest, &header[start_offset], len);
486 /* Read the segment into memory */
487 read_bytes = middle - dest;
489 ((result = stream_read(dest, read_bytes)) != read_bytes)) {
490 printk_err("ERROR: Read of %ld bytes read %ld bytes...\n",
494 offset += ptr->s_filesz;
496 /* Zero the extra bytes between middle & end */
498 printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
499 (unsigned long)middle, end - middle);
501 /* Zero the extra bytes */
502 memset(middle, 0, end - middle);
510 static int verify_loaded_image(
511 struct verify_callback *vcb,
512 Elf_ehdr *ehdr, Elf_phdr *phdr,
519 for(; ok && vcb ; vcb = vcb->next) {
520 /* Find where the note is loaded */
521 /* The whole note must be loaded intact
522 * so an address of 0 for the descriptor is impossible
525 for(ptr = head->next; ptr != head; ptr = ptr->next) {
526 unsigned long desc_addr;
527 desc_addr = ptr->s_addr + vcb->desc_offset - ptr->s_offset;
528 if ((desc_addr >= ptr->s_addr) &&
529 (desc_addr < (ptr->s_addr + ptr->s_filesz))) {
530 vcb->desc_addr = desc_addr;
533 ok = vcb->callback(vcb, ehdr, phdr, head);
538 int elfload(struct lb_memory *mem,
539 unsigned char *header, unsigned long header_size)
545 struct verify_callback *cb_chain;
546 unsigned long bounce_buffer;
548 /* Find a bounce buffer so I can load to linuxBIOS's current location */
549 bounce_buffer = get_bounce_buffer(mem);
550 if (!bounce_buffer) {
551 printk_err("Could not find a bounce buffer...\n");
555 ehdr = (Elf_ehdr *)header;
556 entry = (void *)(ehdr->e_entry);
557 phdr = (Elf_phdr *)(&header[ehdr->e_phoff]);
559 /* Digest elf note information... */
561 if ((phdr[0].p_type == PT_NOTE) &&
562 ((phdr[0].p_offset + phdr[0].p_filesz) < header_size)) {
563 cb_chain = process_elf_notes(header,
564 phdr[0].p_offset, phdr[0].p_filesz);
567 /* Preprocess the elf segments */
568 if (!build_elf_segment_list(&head,
569 bounce_buffer, mem, phdr, ehdr->e_phnum))
572 /* Load the segments */
573 if (!load_elf_segments(&head, header, header_size))
576 printk_spew("Loaded segments\n");
577 /* Verify the loaded image */
578 if (!verify_loaded_image(cb_chain, ehdr, phdr, &head))
581 printk_spew("verified segments\n");
582 /* Shutdown the stream device */
585 printk_spew("closed down stream\n");
586 /* Reset to booting from this image as late as possible */
589 printk_debug("Jumping to boot code at 0x%x\n", entry);
593 jmp_to_elf_entry(entry, bounce_buffer);
600 int elfboot(struct lb_memory *mem)
603 static unsigned char header[ELF_HEAD_SIZE];
609 printk_info("Welcome to %s, the open sourced starter.\n", BOOTLOADER);
610 printk_info("January 2002, Eric Biederman.\n");
611 printk_info("Version %s\n", BOOTLOADER_VERSION);
615 if (stream_init() < 0) {
616 printk_err("Could not initialize driver...\n");
620 /* Read in the initial ELF_HEAD_SIZE bytes */
621 if (stream_read(header, ELF_HEAD_SIZE) != ELF_HEAD_SIZE) {
622 printk_err("Read failed...\n");
625 /* Scan for an elf header */
627 for(i = 0; i < ELF_HEAD_SIZE - (sizeof(Elf_ehdr) + sizeof(Elf_phdr)); i+=16) {
628 ehdr = (Elf_ehdr *)(&header[i]);
629 if (memcmp(ehdr->e_ident, ELFMAG, 4) != 0) {
630 printk_debug("No header at %d\n", i);
633 printk_debug("Found ELF candidate at offset %d\n", i);
634 /* Sanity check the elf header */
635 if ((ehdr->e_type == ET_EXEC) &&
636 elf_check_arch(ehdr) &&
637 (ehdr->e_ident[EI_VERSION] == EV_CURRENT) &&
638 (ehdr->e_version == EV_CURRENT) &&
639 (ehdr->e_ehsize == sizeof(Elf_ehdr)) &&
640 (ehdr->e_phentsize = sizeof(Elf_phdr)) &&
641 (ehdr->e_phoff < (ELF_HEAD_SIZE - i)) &&
642 ((ehdr->e_phoff + (ehdr->e_phentsize * ehdr->e_phnum)) <=
643 (ELF_HEAD_SIZE - i))) {
649 printk_debug("header_offset is %d\n", header_offset);
650 if (header_offset == -1) {
654 printk_debug("Try to load at offset 0x%x\n", header_offset);
655 result = elfload(mem,
656 header + header_offset , ELF_HEAD_SIZE - header_offset);
659 /* Shutdown the stream device */
662 printk_err("Can not load ELF Image.\n");