2 * This file is part of the coreboot project.
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5 * Copyright (C) 2009 Ron Minnich <rminnich@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
21 #include <console/console.h>
22 #include <part/fallback_boot.h>
24 #include <boot/elf_boot.h>
25 #include <boot/coreboot_tables.h>
26 #include <ip_checksum.h>
27 #include <stream/read_bytes.h>
33 #ifndef CONFIG_BIG_ENDIAN
34 #define ntohl(x) ( ((x&0xff)<<24) | ((x&0xff00)<<8) | \
35 ((x&0xff0000) >> 8) | ((x&0xff000000) >> 24) )
40 /* Maximum physical address we can use for the coreboot bounce buffer.
46 extern unsigned char _ram_seg;
47 extern unsigned char _eram_seg;
52 struct segment *phdr_next;
53 struct segment *phdr_prev;
54 unsigned long s_dstaddr;
55 unsigned long s_srcaddr;
56 unsigned long s_memsz;
57 unsigned long s_filesz;
61 struct verify_callback {
62 struct verify_callback *next;
63 int (*callback)(struct verify_callback *vcb,
64 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
65 unsigned long desc_offset;
66 unsigned long desc_addr;
69 struct ip_checksum_vcb {
70 struct verify_callback data;
71 unsigned short ip_checksum;
74 void * cbfs_load_payload(struct lb_memory *lb_mem, const char *name)
76 int selfboot(struct lb_memory *mem, struct cbfs_payload *payload);
77 struct cbfs_payload *payload;
79 payload = (struct cbfs_payload *)cbfs_find_file(name, CBFS_TYPE_PAYLOAD);
82 printk_debug("Got a payload\n");
84 selfboot(lb_mem, payload);
85 printk_emerg("SELFBOOT RETURNED!\n");
91 * Static executables all want to share the same addresses
92 * in memory because only a few addresses are reliably present on
93 * a machine, and implementing general relocation is hard.
96 * - Allocate a buffer the size of the coreboot image plus additional
98 * - Anything that would overwrite coreboot copy into the lower part of
100 * - After loading an ELF image copy coreboot to the top of the buffer.
101 * - Then jump to the loaded image.
104 * - Nearly arbitrary standalone executables can be loaded.
105 * - Coreboot is preserved, so it can be returned to.
106 * - The implementation is still relatively simple,
107 * and much simpler then the general case implemented in kexec.
111 static unsigned long bounce_size, bounce_buffer;
113 static void get_bounce_buffer(struct lb_memory *mem, unsigned long bounce_size)
115 unsigned long lb_size;
116 unsigned long mem_entries;
117 unsigned long buffer;
119 lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
120 /* Double coreboot size so I have somewhere to place a copy to return to */
121 lb_size = bounce_size + lb_size;
122 mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
124 for(i = 0; i < mem_entries; i++) {
125 unsigned long mstart, mend;
127 unsigned long tbuffer;
128 if (mem->map[i].type != LB_MEM_RAM)
130 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
132 if (unpack_lb64(mem->map[i].size) < lb_size)
134 mstart = unpack_lb64(mem->map[i].start);
135 msize = MAX_ADDR - mstart +1;
136 if (msize > unpack_lb64(mem->map[i].size))
137 msize = unpack_lb64(mem->map[i].size);
138 mend = mstart + msize;
139 tbuffer = mend - lb_size;
140 if (tbuffer < buffer)
144 bounce_buffer = buffer;
147 static int valid_area(struct lb_memory *mem, unsigned long buffer,
148 unsigned long start, unsigned long len)
150 /* Check through all of the memory segments and ensure
151 * the segment that was passed in is completely contained
155 unsigned long end = start + len;
156 unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
158 /* See if I conflict with the bounce buffer */
163 /* Walk through the table of valid memory ranges and see if I
166 for(i = 0; i < mem_entries; i++) {
167 uint64_t mstart, mend;
169 mtype = mem->map[i].type;
170 mstart = unpack_lb64(mem->map[i].start);
171 mend = mstart + unpack_lb64(mem->map[i].size);
172 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
175 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
176 printk_err("Payload is overwriting Coreboot tables.\n");
180 if (i == mem_entries) {
181 printk_err("No matching ram area found for range:\n");
182 printk_err(" [0x%016lx, 0x%016lx)\n", start, end);
183 printk_err("Ram areas\n");
184 for(i = 0; i < mem_entries; i++) {
185 uint64_t mstart, mend;
187 mtype = mem->map[i].type;
188 mstart = unpack_lb64(mem->map[i].start);
189 mend = mstart + unpack_lb64(mem->map[i].size);
190 printk_err(" [0x%016lx, 0x%016lx) %s\n",
191 (unsigned long)mstart,
193 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
201 static const unsigned long lb_start = (unsigned long)&_ram_seg;
202 static const unsigned long lb_end = (unsigned long)&_eram_seg;
204 static int overlaps_coreboot(struct segment *seg)
206 unsigned long start, end;
207 start = seg->s_dstaddr;
208 end = start + seg->s_memsz;
209 return !((end <= lb_start) || (start >= lb_end));
212 static void relocate_segment(unsigned long buffer, struct segment *seg)
214 /* Modify all segments that want to load onto coreboot
215 * to load onto the bounce buffer instead.
217 unsigned long start, middle, end;
219 printk_spew("lb: [0x%016lx, 0x%016lx)\n",
222 /* I don't conflict with coreboot so get out of here */
223 if (!overlaps_coreboot(seg))
226 start = seg->s_dstaddr;
227 middle = start + seg->s_filesz;
228 end = start + seg->s_memsz;
230 printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
233 if (seg->compression == CBFS_COMPRESS_NONE) {
234 /* Slice off a piece at the beginning
235 * that doesn't conflict with coreboot.
237 if (start < lb_start) {
239 unsigned long len = lb_start - start;
240 new = malloc(sizeof(*new));
244 seg->s_dstaddr += len;
245 seg->s_srcaddr += len;
246 if (seg->s_filesz > len) {
248 seg->s_filesz -= len;
253 /* Order by stream offset */
255 new->prev = seg->prev;
256 seg->prev->next = new;
258 /* Order by original program header order */
259 new->phdr_next = seg;
260 new->phdr_prev = seg->phdr_prev;
261 seg->phdr_prev->phdr_next = new;
262 seg->phdr_prev = new;
264 /* compute the new value of start */
265 start = seg->s_dstaddr;
267 printk_spew(" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
269 new->s_dstaddr + new->s_filesz,
270 new->s_dstaddr + new->s_memsz);
273 /* Slice off a piece at the end
274 * that doesn't conflict with coreboot
277 unsigned long len = lb_end - start;
279 new = malloc(sizeof(*new));
283 new->s_dstaddr += len;
284 new->s_srcaddr += len;
285 if (seg->s_filesz > len) {
287 new->s_filesz -= len;
291 /* Order by stream offset */
292 new->next = seg->next;
294 seg->next->prev = new;
296 /* Order by original program header order */
297 new->phdr_next = seg->phdr_next;
298 new->phdr_prev = seg;
299 seg->phdr_next->phdr_prev = new;
300 seg->phdr_next = new;
302 printk_spew(" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
304 new->s_dstaddr + new->s_filesz,
305 new->s_dstaddr + new->s_memsz);
309 /* Now retarget this segment onto the bounce buffer */
310 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
311 * so you will make the dstaddr be this buffer, and it will get copied
312 * later to where coreboot lives.
314 seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
316 printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
318 seg->s_dstaddr + seg->s_filesz,
319 seg->s_dstaddr + seg->s_memsz);
323 static int build_self_segment_list(
324 struct segment *head,
325 struct lb_memory *mem,
326 struct cbfs_payload *payload, u32 *entry)
330 struct cbfs_payload_segment *segment, *first_segment;
331 memset(head, 0, sizeof(*head));
332 head->phdr_next = head->phdr_prev = head;
333 head->next = head->prev = head;
334 first_segment = segment = &payload->segments;
337 printk_debug("Loading segment from rom address 0x%p\n", segment);
338 switch(segment->type) {
339 case PAYLOAD_SEGMENT_PARAMS:
340 printk_debug(" parameter section (skipped)\n");
344 case PAYLOAD_SEGMENT_CODE:
345 case PAYLOAD_SEGMENT_DATA:
346 printk_debug(" %s (compression=%x)\n",
347 segment->type == PAYLOAD_SEGMENT_CODE ? "code" : "data",
348 ntohl(segment->compression));
349 new = malloc(sizeof(*new));
350 new->s_dstaddr = ntohl((u32) segment->load_addr);
351 new->s_memsz = ntohl(segment->mem_len);
352 new->compression = ntohl(segment->compression);
354 new->s_srcaddr = (u32) ((unsigned char *) first_segment) + ntohl(segment->offset);
355 new->s_filesz = ntohl(segment->len);
356 printk_debug(" New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
357 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
358 /* Clean up the values */
359 if (new->s_filesz > new->s_memsz) {
360 new->s_filesz = new->s_memsz;
362 printk_debug(" (cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
363 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
366 case PAYLOAD_SEGMENT_BSS:
367 printk_debug(" BSS 0x%p (%d byte)\n", (void *) ntohl((u32) segment->load_addr),
368 ntohl(segment->mem_len));
369 new = malloc(sizeof(*new));
371 new->s_dstaddr = ntohl((u32) segment->load_addr);
372 new->s_memsz = ntohl(segment->mem_len);
375 case PAYLOAD_SEGMENT_ENTRY:
376 printk_debug(" Entry Point 0x%p\n", (void *) ntohl((u32) segment->load_addr));
377 *entry = ntohl((u32) segment->load_addr);
378 /* Per definition, a payload always has the entry point
379 * as last segment. Thus, we use the occurence of the
380 * entry point as break condition for the loop.
381 * Can we actually just look at the number of section?
386 /* We found something that we don't know about. Throw
387 * hands into the sky and run away!
389 printk_emerg("Bad segment type %x\n", segment->type);
395 for(ptr = head->next; ptr != head; ptr = ptr->next) {
396 if (new->s_srcaddr < ntohl((u32) segment->load_addr))
400 /* Order by stream offset */
402 new->prev = ptr->prev;
403 ptr->prev->next = new;
406 /* Order by original program header order */
407 new->phdr_next = head;
408 new->phdr_prev = head->phdr_prev;
409 head->phdr_prev->phdr_next = new;
410 head->phdr_prev = new;
416 static int load_self_segments(
417 struct segment *head,
418 struct lb_memory *mem,
419 struct cbfs_payload *payload)
423 unsigned long required_bounce_size = lb_end - lb_start;
424 for(ptr = head->next; ptr != head; ptr = ptr->next) {
425 if (!overlaps_coreboot(ptr)) continue;
426 unsigned long bounce = ptr->s_dstaddr + ptr->s_memsz - lb_start;
427 if (bounce > required_bounce_size) required_bounce_size = bounce;
429 get_bounce_buffer(mem, required_bounce_size);
430 if (!bounce_buffer) {
431 printk_err("Could not find a bounce buffer...\n");
434 for(ptr = head->next; ptr != head; ptr = ptr->next) {
435 /* Verify the memory addresses in the segment are valid */
436 if (!valid_area(mem, bounce_buffer, ptr->s_dstaddr, ptr->s_memsz))
439 for(ptr = head->next; ptr != head; ptr = ptr->next) {
440 unsigned char *dest, *src;
441 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
442 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
444 /* Modify the segment to load onto the bounce_buffer if necessary.
446 relocate_segment(bounce_buffer, ptr);
448 printk_debug("Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
449 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
451 /* Compute the boundaries of the segment */
452 dest = (unsigned char *)(ptr->s_dstaddr);
453 src = (unsigned char *)(ptr->s_srcaddr);
455 /* Copy data from the initial buffer */
457 unsigned char *middle, *end;
460 switch(ptr->compression) {
461 #if CONFIG_COMPRESSED_PAYLOAD_LZMA==1
462 case CBFS_COMPRESS_LZMA: {
463 printk_debug("using LZMA\n");
464 unsigned long ulzma(unsigned char *src, unsigned char *dst);
465 len = ulzma(src, dest);
469 #if CONFIG_COMPRESSED_PAYLOAD_NRV2B==1
470 case CBFS_COMPRESS_NRV2B: {
471 printk_debug("using NRV2B\n");
472 unsigned long unrv2b(u8 *src, u8 *dst, unsigned long *ilen_p);
474 len = unrv2b(src, dest, &tmp);
478 case CBFS_COMPRESS_NONE: {
479 printk_debug("it's not compressed!\n");
480 memcpy(dest, src, len);
484 printk_info( "CBFS: Unknown compression type %d\n", ptr->compression);
487 end = dest + ptr->s_memsz;
489 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
491 (unsigned long)middle,
495 /* Zero the extra bytes between middle & end */
497 printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
498 (unsigned long)middle, (unsigned long)(end - middle));
500 /* Zero the extra bytes */
501 memset(middle, 0, end - middle);
508 int selfboot(struct lb_memory *mem, struct cbfs_payload *payload)
513 /* Preprocess the self segments */
514 if (!build_self_segment_list(&head, mem, payload, &entry))
517 /* Load the segments */
518 if (!load_self_segments(&head, mem, payload))
521 printk_spew("Loaded segments\n");
523 /* Reset to booting from this image as late as possible */
526 printk_debug("Jumping to boot code at %x\n", entry);
530 jmp_to_elf_entry((void*)entry, bounce_buffer, bounce_size);