2 * This file is part of the coreboot project.
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
20 #include <console/console.h>
21 #include <part/fallback_boot.h>
23 #include <boot/elf_boot.h>
24 #include <boot/coreboot_tables.h>
25 #include <ip_checksum.h>
26 #include <stream/read_bytes.h>
31 /* Maximum physical address we can use for the coreboot bounce buffer.
37 extern unsigned char _ram_seg;
38 extern unsigned char _eram_seg;
43 struct segment *phdr_next;
44 struct segment *phdr_prev;
46 unsigned long s_memsz;
47 unsigned long s_offset;
48 unsigned long s_filesz;
51 struct verify_callback {
52 struct verify_callback *next;
53 int (*callback)(struct verify_callback *vcb,
54 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
55 unsigned long desc_offset;
56 unsigned long desc_addr;
59 struct ip_checksum_vcb {
60 struct verify_callback data;
61 unsigned short ip_checksum;
64 int verify_ip_checksum(
65 struct verify_callback *vcb,
66 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head)
68 struct ip_checksum_vcb *cb;
71 unsigned long checksum;
72 unsigned char buff[2], *n_desc;
73 cb = (struct ip_checksum_vcb *)vcb;
74 /* zero the checksum so it's value won't
75 * get in the way of verifying the checksum.
79 n_desc = (unsigned char *)(vcb->desc_addr);
80 memcpy(buff, n_desc, 2);
84 checksum = compute_ip_checksum(ehdr, sizeof(*ehdr));
85 bytes += sizeof(*ehdr);
86 checksum = add_ip_checksums(bytes, checksum,
87 compute_ip_checksum(phdr, ehdr->e_phnum*sizeof(*phdr)));
88 bytes += ehdr->e_phnum*sizeof(*phdr);
89 for(ptr = head->phdr_next; ptr != head; ptr = ptr->phdr_next) {
90 checksum = add_ip_checksums(bytes, checksum,
91 compute_ip_checksum((void *)ptr->s_addr, ptr->s_memsz));
92 bytes += ptr->s_memsz;
95 memcpy(n_desc, buff, 2);
97 if (checksum != cb->ip_checksum) {
98 printk_err("Image checksum: %04x != computed checksum: %04lx\n",
99 cb->ip_checksum, checksum);
101 return checksum == cb->ip_checksum;
105 * Static executables all want to share the same addresses
106 * in memory because only a few addresses are reliably present on
107 * a machine, and implementing general relocation is hard.
110 * - Allocate a buffer twice the size of the coreboot image.
111 * - Anything that would overwrite coreboot copy into the lower half of
113 * - After loading an ELF image copy coreboot to the upper half of the
115 * - Then jump to the loaded image.
118 * - Nearly arbitrary standalone executables can be loaded.
119 * - Coreboot is preserved, so it can be returned to.
120 * - The implementation is still relatively simple,
121 * and much simpler then the general case implemented in kexec.
125 static unsigned long bounce_size;
127 static unsigned long get_bounce_buffer(struct lb_memory *mem)
129 unsigned long lb_size;
130 unsigned long mem_entries;
131 unsigned long buffer;
133 lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
134 /* Double coreboot size so I have somewhere to place a copy to return to */
135 bounce_size = lb_size;
136 lb_size = bounce_size + lb_size;
137 mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
139 for(i = 0; i < mem_entries; i++) {
140 unsigned long mstart, mend;
142 unsigned long tbuffer;
143 if (mem->map[i].type != LB_MEM_RAM)
145 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
147 if (unpack_lb64(mem->map[i].size) < lb_size)
149 mstart = unpack_lb64(mem->map[i].start);
150 msize = MAX_ADDR - mstart +1;
151 if (msize > unpack_lb64(mem->map[i].size))
152 msize = unpack_lb64(mem->map[i].size);
153 mend = mstart + msize;
154 tbuffer = mend - lb_size;
155 if (tbuffer < buffer)
163 static struct verify_callback *process_elf_notes(
164 unsigned char *header,
165 unsigned long offset, unsigned long length)
167 struct verify_callback *cb_chain;
168 unsigned char *note, *end;
169 unsigned char *program, *version;
172 note = header + offset;
174 program = version = 0;
177 unsigned char *n_name, *n_desc, *next;
178 hdr = (Elf_Nhdr *)note;
179 n_name = note + sizeof(*hdr);
180 n_desc = n_name + ((hdr->n_namesz + 3) & ~3);
181 next = n_desc + ((hdr->n_descsz + 3) & ~3);
185 if ((hdr->n_namesz == sizeof(ELF_NOTE_BOOT)) &&
186 (memcmp(n_name, ELF_NOTE_BOOT, sizeof(ELF_NOTE_BOOT)) == 0)) {
187 switch(hdr->n_type) {
188 case EIN_PROGRAM_NAME:
189 if (n_desc[hdr->n_descsz -1] == 0) {
193 case EIN_PROGRAM_VERSION:
194 if (n_desc[hdr->n_descsz -1] == 0) {
198 case EIN_PROGRAM_CHECKSUM:
200 struct ip_checksum_vcb *cb;
201 cb = malloc(sizeof(*cb));
202 cb->ip_checksum = *((uint16_t *)n_desc);
203 cb->data.callback = verify_ip_checksum;
204 cb->data.next = cb_chain;
205 cb->data.desc_offset = n_desc - header;
206 cb_chain = &cb->data;
211 printk_spew("n_type: %08x n_name(%d): %-*.*s n_desc(%d): %-*.*s\n",
213 hdr->n_namesz, hdr->n_namesz, hdr->n_namesz, n_name,
214 hdr->n_descsz,hdr->n_descsz, hdr->n_descsz, n_desc);
217 if (program && version) {
218 printk_info("Loading %s version: %s\n",
224 static int valid_area(struct lb_memory *mem, unsigned long buffer,
225 unsigned long start, unsigned long len)
227 /* Check through all of the memory segments and ensure
228 * the segment that was passed in is completely contained
232 unsigned long end = start + len;
233 unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
235 /* See if I conflict with the bounce buffer */
240 /* Walk through the table of valid memory ranges and see if I
243 for(i = 0; i < mem_entries; i++) {
244 uint64_t mstart, mend;
246 mtype = mem->map[i].type;
247 mstart = unpack_lb64(mem->map[i].start);
248 mend = mstart + unpack_lb64(mem->map[i].size);
249 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
252 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
253 printk_err("Payload is overwriting Coreboot tables.\n");
257 if (i == mem_entries) {
258 printk_err("No matching ram area found for range:\n");
259 printk_err(" [0x%016lx, 0x%016lx)\n", start, end);
260 printk_err("Ram areas\n");
261 for(i = 0; i < mem_entries; i++) {
262 uint64_t mstart, mend;
264 mtype = mem->map[i].type;
265 mstart = unpack_lb64(mem->map[i].start);
266 mend = mstart + unpack_lb64(mem->map[i].size);
267 printk_err(" [0x%016lx, 0x%016lx) %s\n",
268 (unsigned long)mstart,
270 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
278 static void relocate_segment(unsigned long buffer, struct segment *seg)
280 /* Modify all segments that want to load onto coreboot
281 * to load onto the bounce buffer instead.
283 unsigned long lb_start = (unsigned long)&_ram_seg;
284 unsigned long lb_end = (unsigned long)&_eram_seg;
285 unsigned long start, middle, end;
287 printk_spew("lb: [0x%016lx, 0x%016lx)\n",
291 middle = start + seg->s_filesz;
292 end = start + seg->s_memsz;
293 /* I don't conflict with coreboot so get out of here */
294 if ((end <= lb_start) || (start >= lb_end))
297 printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
300 /* Slice off a piece at the beginning
301 * that doesn't conflict with coreboot.
303 if (start < lb_start) {
305 unsigned long len = lb_start - start;
306 new = malloc(sizeof(*new));
311 seg->s_offset += len;
312 if (seg->s_filesz > len) {
314 seg->s_filesz -= len;
319 /* Order by stream offset */
321 new->prev = seg->prev;
322 seg->prev->next = new;
324 /* Order by original program header order */
325 new->phdr_next = seg;
326 new->phdr_prev = seg->phdr_prev;
327 seg->phdr_prev->phdr_next = new;
328 seg->phdr_prev = new;
330 /* compute the new value of start */
333 printk_spew(" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
335 new->s_addr + new->s_filesz,
336 new->s_addr + new->s_memsz);
339 /* Slice off a piece at the end
340 * that doesn't conflict with coreboot
343 unsigned long len = lb_end - start;
345 new = malloc(sizeof(*new));
350 new->s_offset += len;
351 if (seg->s_filesz > len) {
353 new->s_filesz -= len;
357 /* Order by stream offset */
358 new->next = seg->next;
360 seg->next->prev = new;
362 /* Order by original program header order */
363 new->phdr_next = seg->phdr_next;
364 new->phdr_prev = seg;
365 seg->phdr_next->phdr_prev = new;
366 seg->phdr_next = new;
368 printk_spew(" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
370 new->s_addr + new->s_filesz,
371 new->s_addr + new->s_memsz);
374 /* Now retarget this segment onto the bounce buffer */
375 seg->s_addr = buffer + (seg->s_addr - lb_start);
377 printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
379 seg->s_addr + seg->s_filesz,
380 seg->s_addr + seg->s_memsz);
384 static int build_elf_segment_list(
385 struct segment *head,
386 unsigned long bounce_buffer, struct lb_memory *mem,
387 Elf_phdr *phdr, int headers)
391 memset(head, 0, sizeof(*head));
392 head->phdr_next = head->phdr_prev = head;
393 head->next = head->prev = head;
394 for(i = 0; i < headers; i++) {
396 /* Ignore data that I don't need to handle */
397 if (phdr[i].p_type != PT_LOAD) {
398 printk_debug("Dropping non PT_LOAD segment\n");
401 if (phdr[i].p_memsz == 0) {
402 printk_debug("Dropping empty segment\n");
405 new = malloc(sizeof(*new));
406 new->s_addr = phdr[i].p_paddr;
407 new->s_memsz = phdr[i].p_memsz;
408 new->s_offset = phdr[i].p_offset;
409 new->s_filesz = phdr[i].p_filesz;
410 printk_debug("New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
411 new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
412 /* Clean up the values */
413 if (new->s_filesz > new->s_memsz) {
414 new->s_filesz = new->s_memsz;
416 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
417 new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
418 for(ptr = head->next; ptr != head; ptr = ptr->next) {
419 if (new->s_offset < ptr->s_offset)
422 /* Order by stream offset */
424 new->prev = ptr->prev;
425 ptr->prev->next = new;
427 /* Order by original program header order */
428 new->phdr_next = head;
429 new->phdr_prev = head->phdr_prev;
430 head->phdr_prev->phdr_next = new;
431 head->phdr_prev = new;
433 /* Verify the memory addresses in the segment are valid */
434 if (!valid_area(mem, bounce_buffer, new->s_addr, new->s_memsz))
437 /* Modify the segment to load onto the bounce_buffer if necessary.
439 relocate_segment(bounce_buffer, new);
446 static int load_elf_segments(
447 struct segment *head, unsigned char *header, unsigned long header_size)
449 unsigned long offset;
453 for(ptr = head->next; ptr != head; ptr = ptr->next) {
454 unsigned long start_offset;
455 unsigned long skip_bytes, read_bytes;
456 unsigned char *dest, *middle, *end;
457 byte_offset_t result;
458 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
459 ptr->s_addr, ptr->s_memsz, ptr->s_filesz);
461 /* Compute the boundaries of the segment */
462 dest = (unsigned char *)(ptr->s_addr);
463 end = dest + ptr->s_memsz;
464 middle = dest + ptr->s_filesz;
465 start_offset = ptr->s_offset;
466 /* Ignore s_offset if I have a pure bss segment */
467 if (ptr->s_filesz == 0) {
468 start_offset = offset;
471 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
473 (unsigned long)middle,
475 (unsigned long)start_offset);
477 /* Skip intial buffer unused bytes */
478 if (offset < header_size) {
479 if (start_offset < header_size) {
480 offset = start_offset;
482 offset = header_size;
486 /* Skip the unused bytes */
487 skip_bytes = start_offset - offset;
489 ((result = stream_skip(skip_bytes)) != skip_bytes)) {
490 printk_err("ERROR: Skip of %ld bytes skipped %ld bytes\n",
494 offset = start_offset;
496 /* Copy data from the initial buffer */
497 if (offset < header_size) {
499 if ((ptr->s_filesz + start_offset) > header_size) {
500 len = header_size - start_offset;
505 memcpy(dest, &header[start_offset], len);
509 /* Read the segment into memory */
510 read_bytes = middle - dest;
512 ((result = stream_read(dest, read_bytes)) != read_bytes)) {
513 printk_err("ERROR: Read of %ld bytes read %ld bytes...\n",
517 offset += ptr->s_filesz;
519 /* Zero the extra bytes between middle & end */
521 printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
522 (unsigned long)middle, (unsigned long)(end - middle));
524 /* Zero the extra bytes */
525 memset(middle, 0, end - middle);
533 static int verify_loaded_image(
534 struct verify_callback *vcb,
535 Elf_ehdr *ehdr, Elf_phdr *phdr,
542 for(; ok && vcb ; vcb = vcb->next) {
543 /* Find where the note is loaded */
544 /* The whole note must be loaded intact
545 * so an address of 0 for the descriptor is impossible
548 for(ptr = head->next; ptr != head; ptr = ptr->next) {
549 unsigned long desc_addr;
550 desc_addr = ptr->s_addr + vcb->desc_offset - ptr->s_offset;
551 if ((desc_addr >= ptr->s_addr) &&
552 (desc_addr < (ptr->s_addr + ptr->s_filesz))) {
553 vcb->desc_addr = desc_addr;
556 ok = vcb->callback(vcb, ehdr, phdr, head);
561 int elfload(struct lb_memory *mem,
562 unsigned char *header, unsigned long header_size)
568 struct verify_callback *cb_chain;
569 unsigned long bounce_buffer;
571 /* Find a bounce buffer so I can load to coreboot's current location */
572 bounce_buffer = get_bounce_buffer(mem);
573 if (!bounce_buffer) {
574 printk_err("Could not find a bounce buffer...\n");
578 ehdr = (Elf_ehdr *)header;
579 entry = (void *)(ehdr->e_entry);
580 phdr = (Elf_phdr *)(&header[ehdr->e_phoff]);
582 /* Digest elf note information... */
584 if ((phdr[0].p_type == PT_NOTE) &&
585 ((phdr[0].p_offset + phdr[0].p_filesz) < header_size)) {
586 cb_chain = process_elf_notes(header,
587 phdr[0].p_offset, phdr[0].p_filesz);
590 /* Preprocess the elf segments */
591 if (!build_elf_segment_list(&head,
592 bounce_buffer, mem, phdr, ehdr->e_phnum))
595 /* Load the segments */
596 if (!load_elf_segments(&head, header, header_size))
599 printk_spew("Loaded segments\n");
600 /* Verify the loaded image */
601 if (!verify_loaded_image(cb_chain, ehdr, phdr, &head))
604 printk_spew("verified segments\n");
605 /* Shutdown the stream device */
608 printk_spew("closed down stream\n");
609 /* Reset to booting from this image as late as possible */
612 printk_debug("Jumping to boot code at %p\n", entry);
616 jmp_to_elf_entry(entry, bounce_buffer, bounce_size);
623 int elfboot(struct lb_memory *mem)
626 static unsigned char header[ELF_HEAD_SIZE];
631 printk_debug("\nelfboot: Attempting to load payload.\n");
634 if (stream_init() < 0) {
635 printk_err("Could not initialize driver...\n");
639 /* Read in the initial ELF_HEAD_SIZE bytes */
640 if (stream_read(header, ELF_HEAD_SIZE) != ELF_HEAD_SIZE) {
641 printk_err("Read failed...\n");
644 /* Scan for an elf header */
646 for(i = 0; i < ELF_HEAD_SIZE - (sizeof(Elf_ehdr) + sizeof(Elf_phdr)); i+=16) {
647 ehdr = (Elf_ehdr *)(&header[i]);
648 if (memcmp(ehdr->e_ident, ELFMAG, 4) != 0) {
649 printk_debug("No header at %d\n", i);
652 printk_debug("Found ELF candidate at offset %d\n", i);
653 /* Sanity check the elf header */
654 if ((ehdr->e_type == ET_EXEC) &&
655 elf_check_arch(ehdr) &&
656 (ehdr->e_ident[EI_VERSION] == EV_CURRENT) &&
657 (ehdr->e_version == EV_CURRENT) &&
658 (ehdr->e_ehsize == sizeof(Elf_ehdr)) &&
659 (ehdr->e_phentsize = sizeof(Elf_phdr)) &&
660 (ehdr->e_phoff < (ELF_HEAD_SIZE - i)) &&
661 ((ehdr->e_phoff + (ehdr->e_phentsize * ehdr->e_phnum)) <=
662 (ELF_HEAD_SIZE - i))) {
668 printk_debug("header_offset is %d\n", header_offset);
669 if (header_offset == -1) {
673 printk_debug("Try to load at offset 0x%x\n", header_offset);
674 result = elfload(mem,
675 header + header_offset , ELF_HEAD_SIZE - header_offset);
678 /* Shutdown the stream device */
681 printk_err("Can not load ELF Image.\n");