Move decompression further down the code flow, so that - where
[coreboot.git] / src / boot / elfboot.c
1 /*
2  * This file is part of the coreboot project.
3  *
4  * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
18  */
19
20 #include <console/console.h>
21 #include <part/fallback_boot.h>
22 #include <boot/elf.h>
23 #include <boot/elf_boot.h>
24 #include <boot/coreboot_tables.h>
25 #include <ip_checksum.h>
26 #include <stream/read_bytes.h>
27 #include <stdint.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 /* Maximum physical address we can use for the coreboot bounce buffer.
32  */
33 #ifndef MAX_ADDR
34 #define MAX_ADDR -1UL
35 #endif
36
37 extern unsigned char _ram_seg;
38 extern unsigned char _eram_seg;
39
40 struct segment {
41         struct segment *next;
42         struct segment *prev;
43         struct segment *phdr_next;
44         struct segment *phdr_prev;
45         unsigned long s_addr;
46         unsigned long s_memsz;
47         unsigned long s_offset;
48         unsigned long s_filesz;
49 };
50
51 struct verify_callback {
52         struct verify_callback *next;
53         int (*callback)(struct verify_callback *vcb, 
54                 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
55         unsigned long desc_offset;
56         unsigned long desc_addr;
57 };
58
59 struct ip_checksum_vcb {
60         struct verify_callback data;
61         unsigned short ip_checksum;
62 };
63
64 int verify_ip_checksum(
65         struct verify_callback *vcb, 
66         Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head)
67 {
68         struct ip_checksum_vcb *cb;
69         struct segment *ptr;
70         unsigned long bytes;
71         unsigned long checksum;
72         unsigned char buff[2], *n_desc;
73         cb = (struct ip_checksum_vcb *)vcb;
74         /* zero the checksum so it's value won't
75          * get in the way of verifying the checksum.
76          */
77         n_desc = 0;
78         if (vcb->desc_addr) {
79                 n_desc = (unsigned char *)(vcb->desc_addr);
80                 memcpy(buff, n_desc, 2);
81                 memset(n_desc, 0, 2);
82         }
83         bytes = 0;
84         checksum = compute_ip_checksum(ehdr, sizeof(*ehdr));
85         bytes += sizeof(*ehdr);
86         checksum = add_ip_checksums(bytes, checksum, 
87                 compute_ip_checksum(phdr, ehdr->e_phnum*sizeof(*phdr)));
88         bytes += ehdr->e_phnum*sizeof(*phdr);
89         for(ptr = head->phdr_next; ptr != head; ptr = ptr->phdr_next) {
90                 checksum = add_ip_checksums(bytes, checksum,
91                         compute_ip_checksum((void *)ptr->s_addr, ptr->s_memsz));
92                 bytes += ptr->s_memsz;
93         }
94         if (n_desc != 0) {
95                 memcpy(n_desc, buff, 2);
96         }
97         if (checksum != cb->ip_checksum) {
98                 printk_err("Image checksum: %04x != computed checksum: %04lx\n",
99                         cb->ip_checksum, checksum);
100         }
101         return checksum == cb->ip_checksum;
102 }
103
104 /* The problem:  
105  * Static executables all want to share the same addresses
106  * in memory because only a few addresses are reliably present on
107  * a machine, and implementing general relocation is hard.
108  *
109  * The solution:
110  * - Allocate a buffer twice the size of the coreboot image.
111  * - Anything that would overwrite coreboot copy into the lower half of
112  *   the buffer. 
113  * - After loading an ELF image copy coreboot to the upper half of the
114  *   buffer.
115  * - Then jump to the loaded image.
116  * 
117  * Benefits:
118  * - Nearly arbitrary standalone executables can be loaded.
119  * - Coreboot is preserved, so it can be returned to.
120  * - The implementation is still relatively simple,
121  *   and much simpler then the general case implemented in kexec.
122  * 
123  */
124
125 static unsigned long get_bounce_buffer(struct lb_memory *mem)
126 {
127         unsigned long lb_size;
128         unsigned long mem_entries;
129         unsigned long buffer;
130         int i;
131         lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
132         /* Double coreboot size so I have somewhere to place a copy to return to */
133         lb_size = lb_size + lb_size;
134         mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
135         buffer = 0;
136         for(i = 0; i < mem_entries; i++) {
137                 unsigned long mstart, mend;
138                 unsigned long msize;
139                 unsigned long tbuffer;
140                 if (mem->map[i].type != LB_MEM_RAM)
141                         continue;
142                 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
143                         continue;
144                 if (unpack_lb64(mem->map[i].size) < lb_size)
145                         continue;
146                 mstart = unpack_lb64(mem->map[i].start);
147                 msize = MAX_ADDR - mstart +1;
148                 if (msize > unpack_lb64(mem->map[i].size))
149                         msize = unpack_lb64(mem->map[i].size);
150                 mend = mstart + msize;
151                 tbuffer = mend - lb_size;
152                 if (tbuffer < buffer) 
153                         continue;
154                 buffer = tbuffer;
155         }
156         return buffer;
157 }
158
159
160 static struct verify_callback *process_elf_notes(
161         unsigned char *header, 
162         unsigned long offset, unsigned long length)
163 {
164         struct verify_callback *cb_chain;
165         unsigned char *note, *end;
166         unsigned char *program, *version;
167
168         cb_chain = 0;
169         note = header + offset;
170         end = note + length;
171         program = version = 0;
172         while(note < end) {
173                 Elf_Nhdr *hdr;
174                 unsigned char *n_name, *n_desc, *next;
175                 hdr = (Elf_Nhdr *)note;
176                 n_name = note + sizeof(*hdr);
177                 n_desc = n_name + ((hdr->n_namesz + 3) & ~3);
178                 next = n_desc + ((hdr->n_descsz + 3) & ~3);
179                 if (next > end) {
180                         break;
181                 }
182                 if ((hdr->n_namesz == sizeof(ELF_NOTE_BOOT)) && 
183                         (memcmp(n_name, ELF_NOTE_BOOT, sizeof(ELF_NOTE_BOOT)) == 0)) {
184                         switch(hdr->n_type) {
185                         case EIN_PROGRAM_NAME:
186                                 if (n_desc[hdr->n_descsz -1] == 0) {
187                                         program = n_desc;
188                                 }
189                                 break;
190                         case EIN_PROGRAM_VERSION:
191                                 if (n_desc[hdr->n_descsz -1] == 0) {
192                                         version = n_desc;
193                                 }
194                                 break;
195                         case EIN_PROGRAM_CHECKSUM:
196                         {
197                                 struct ip_checksum_vcb *cb;
198                                 cb = malloc(sizeof(*cb));
199                                 cb->ip_checksum = *((uint16_t *)n_desc);
200                                 cb->data.callback = verify_ip_checksum;
201                                 cb->data.next = cb_chain;
202                                 cb->data.desc_offset = n_desc - header;
203                                 cb_chain = &cb->data;
204                                 break;
205                         }
206                         }
207                 }
208                 printk_spew("n_type: %08x n_name(%d): %-*.*s n_desc(%d): %-*.*s\n", 
209                         hdr->n_type,
210                         hdr->n_namesz, hdr->n_namesz, hdr->n_namesz, n_name,
211                         hdr->n_descsz,hdr->n_descsz, hdr->n_descsz, n_desc);
212                 note = next;
213         }
214         if (program && version) {
215                 printk_info("Loading %s version: %s\n",
216                         program, version);
217         }
218         return cb_chain;
219 }
220
221 static int valid_area(struct lb_memory *mem, unsigned long buffer,
222         unsigned long start, unsigned long len)
223 {
224         /* Check through all of the memory segments and ensure
225          * the segment that was passed in is completely contained
226          * in RAM.
227          */
228         int i;
229         unsigned long end = start + len;
230         unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
231
232         /* See if I conflict with the bounce buffer */
233         if (end >= buffer) {
234                 return 0;
235         }
236
237         /* Walk through the table of valid memory ranges and see if I
238          * have a match.
239          */
240         for(i = 0; i < mem_entries; i++) {
241                 uint64_t mstart, mend;
242                 uint32_t mtype;
243                 mtype = mem->map[i].type;
244                 mstart = unpack_lb64(mem->map[i].start);
245                 mend = mstart + unpack_lb64(mem->map[i].size);
246                 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
247                         break;
248                 }
249                 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
250                         printk_err("Payload is overwriting Coreboot tables.\n");
251                         break;
252                 }
253         }
254         if (i == mem_entries) {
255                 printk_err("No matching ram area found for range:\n");
256                 printk_err("  [0x%016lx, 0x%016lx)\n", start, end);
257                 printk_err("Ram areas\n");
258                 for(i = 0; i < mem_entries; i++) {
259                         uint64_t mstart, mend;
260                         uint32_t mtype;
261                         mtype = mem->map[i].type;
262                         mstart = unpack_lb64(mem->map[i].start);
263                         mend = mstart + unpack_lb64(mem->map[i].size);
264                         printk_err("  [0x%016lx, 0x%016lx) %s\n",
265                                 (unsigned long)mstart, 
266                                 (unsigned long)mend, 
267                                 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
268                         
269                 }
270                 return 0;
271         }
272         return 1;
273 }
274
275 static void relocate_segment(unsigned long buffer, struct segment *seg)
276 {
277         /* Modify all segments that want to load onto coreboot
278          * to load onto the bounce buffer instead.
279          */
280         unsigned long lb_start = (unsigned long)&_ram_seg;
281         unsigned long lb_end = (unsigned long)&_eram_seg;
282         unsigned long start, middle, end;
283
284         printk_spew("lb: [0x%016lx, 0x%016lx)\n", 
285                 lb_start, lb_end);
286
287         start = seg->s_addr;
288         middle = start + seg->s_filesz;
289         end = start + seg->s_memsz;
290         /* I don't conflict with coreboot so get out of here */
291         if ((end <= lb_start) || (start >= lb_end))
292                 return;
293
294         printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
295                 start, middle, end);
296
297         /* Slice off a piece at the beginning
298          * that doesn't conflict with coreboot.
299          */
300         if (start < lb_start) {
301                 struct segment *new;
302                 unsigned long len = lb_start - start;
303                 new = malloc(sizeof(*new));
304                 *new = *seg;
305                 new->s_memsz = len;
306                 seg->s_memsz -= len;
307                 seg->s_addr += len;
308                 seg->s_offset += len;
309                 if (seg->s_filesz > len) {
310                         new->s_filesz = len;
311                         seg->s_filesz -= len;
312                 } else {
313                         seg->s_filesz = 0;
314                 }
315
316                 /* Order by stream offset */
317                 new->next = seg;
318                 new->prev = seg->prev;
319                 seg->prev->next = new;
320                 seg->prev = new;
321                 /* Order by original program header order */
322                 new->phdr_next = seg;
323                 new->phdr_prev = seg->phdr_prev;
324                 seg->phdr_prev->phdr_next = new;
325                 seg->phdr_prev = new;
326
327                 /* compute the new value of start */
328                 start = seg->s_addr;
329                 
330                 printk_spew("   early: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
331                         new->s_addr, 
332                         new->s_addr + new->s_filesz,
333                         new->s_addr + new->s_memsz);
334         }
335         
336         /* Slice off a piece at the end 
337          * that doesn't conflict with coreboot 
338          */
339         if (end > lb_end) {
340                 unsigned long len = lb_end - start;
341                 struct segment *new;
342                 new = malloc(sizeof(*new));
343                 *new = *seg;
344                 seg->s_memsz = len;
345                 new->s_memsz -= len;
346                 new->s_addr += len;
347                 new->s_offset += len;
348                 if (seg->s_filesz > len) {
349                         seg->s_filesz = len;
350                         new->s_filesz -= len;
351                 } else {
352                         new->s_filesz = 0;
353                 }
354                 /* Order by stream offset */
355                 new->next = seg->next;
356                 new->prev = seg;
357                 seg->next->prev = new;
358                 seg->next = new;
359                 /* Order by original program header order */
360                 new->phdr_next = seg->phdr_next;
361                 new->phdr_prev = seg;
362                 seg->phdr_next->phdr_prev = new;
363                 seg->phdr_next = new;
364
365                 printk_spew("   late: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
366                         new->s_addr, 
367                         new->s_addr + new->s_filesz,
368                         new->s_addr + new->s_memsz);
369                 
370         }
371         /* Now retarget this segment onto the bounce buffer */
372         seg->s_addr = buffer + (seg->s_addr - lb_start);
373
374         printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
375                 seg->s_addr, 
376                 seg->s_addr + seg->s_filesz, 
377                 seg->s_addr + seg->s_memsz);
378 }
379
380
381 static int build_elf_segment_list(
382         struct segment *head, 
383         unsigned long bounce_buffer, struct lb_memory *mem,
384         Elf_phdr *phdr, int headers)
385 {
386         struct segment *ptr;
387         int i;
388         memset(head, 0, sizeof(*head));
389         head->phdr_next = head->phdr_prev = head;
390         head->next = head->prev = head;
391         for(i = 0; i < headers; i++) {
392                 struct segment *new;
393                 /* Ignore data that I don't need to handle */
394                 if (phdr[i].p_type != PT_LOAD) {
395                         printk_debug("Dropping non PT_LOAD segment\n");
396                         continue;
397                 }
398                 if (phdr[i].p_memsz == 0) {
399                         printk_debug("Dropping empty segment\n");
400                         continue;
401                 }
402                 new = malloc(sizeof(*new));
403                 new->s_addr = phdr[i].p_paddr;
404                 new->s_memsz = phdr[i].p_memsz;
405                 new->s_offset = phdr[i].p_offset;
406                 new->s_filesz = phdr[i].p_filesz;
407                 printk_debug("New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
408                         new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
409                 /* Clean up the values */
410                 if (new->s_filesz > new->s_memsz)  {
411                         new->s_filesz = new->s_memsz;
412                 }
413                 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
414                         new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
415                 for(ptr = head->next; ptr != head; ptr = ptr->next) {
416                         if (new->s_offset < ptr->s_offset)
417                                 break;
418                 }
419                 /* Order by stream offset */
420                 new->next = ptr;
421                 new->prev = ptr->prev;
422                 ptr->prev->next = new;
423                 ptr->prev = new;
424                 /* Order by original program header order */
425                 new->phdr_next = head;
426                 new->phdr_prev = head->phdr_prev;
427                 head->phdr_prev->phdr_next  = new;
428                 head->phdr_prev = new;
429
430                 /* Verify the memory addresses in the segment are valid */
431                 if (!valid_area(mem, bounce_buffer, new->s_addr, new->s_memsz)) 
432                         goto out;
433
434                 /* Modify the segment to load onto the bounce_buffer if necessary.
435                  */
436                 relocate_segment(bounce_buffer, new);
437         }
438         return 1;
439  out:
440         return 0;
441 }
442
443 static int load_elf_segments(
444         struct segment *head, unsigned char *header, unsigned long header_size)
445 {
446         unsigned long offset;
447         struct segment *ptr;
448         
449         offset = 0;
450         for(ptr = head->next; ptr != head; ptr = ptr->next) {
451                 unsigned long start_offset;
452                 unsigned long skip_bytes, read_bytes;
453                 unsigned char *dest, *middle, *end;
454                 byte_offset_t result;
455                 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
456                         ptr->s_addr, ptr->s_memsz, ptr->s_filesz);
457                 
458                 /* Compute the boundaries of the segment */
459                 dest = (unsigned char *)(ptr->s_addr);
460                 end = dest + ptr->s_memsz;
461                 middle = dest + ptr->s_filesz;
462                 start_offset = ptr->s_offset;
463                 /* Ignore s_offset if I have a pure bss segment */
464                 if (ptr->s_filesz == 0) {
465                         start_offset = offset;
466                 }
467                 
468                 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
469                         (unsigned long)dest,
470                         (unsigned long)middle,
471                         (unsigned long)end,
472                         (unsigned long)start_offset);
473                 
474                 /* Skip intial buffer unused bytes */
475                 if (offset < header_size) {
476                         if (start_offset < header_size) {
477                                 offset = start_offset;
478                         } else {
479                                 offset = header_size;
480                         }
481                 }
482                 
483                 /* Skip the unused bytes */
484                 skip_bytes = start_offset - offset;
485                 if (skip_bytes && 
486                         ((result = stream_skip(skip_bytes)) != skip_bytes)) {
487                         printk_err("ERROR: Skip of %ld bytes skipped %ld bytes\n",
488                                 skip_bytes, result);
489                         goto out;
490                 }
491                 offset = start_offset;
492                 
493                 /* Copy data from the initial buffer */
494                 if (offset < header_size) {
495                         size_t len;
496                         if ((ptr->s_filesz + start_offset) > header_size) {
497                                 len = header_size - start_offset;
498                         }
499                         else {
500                                 len = ptr->s_filesz;
501                         }
502                         memcpy(dest, &header[start_offset], len);
503                         dest += len;
504                 }
505                 
506                 /* Read the segment into memory */
507                 read_bytes = middle - dest;
508                 if (read_bytes && 
509                         ((result = stream_read(dest, read_bytes)) != read_bytes)) {
510                         printk_err("ERROR: Read of %ld bytes read %ld bytes...\n",
511                                 read_bytes, result);
512                         goto out;
513                 }
514                 offset += ptr->s_filesz;
515                 
516                 /* Zero the extra bytes between middle & end */
517                 if (middle < end) {
518                         printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
519                                 (unsigned long)middle, (unsigned long)(end - middle));
520                         
521                         /* Zero the extra bytes */
522                         memset(middle, 0, end - middle);
523                 }
524         }
525         return 1;
526  out:
527         return 0;
528 }
529
530 static int verify_loaded_image(
531         struct verify_callback *vcb,
532         Elf_ehdr *ehdr, Elf_phdr *phdr,
533         struct segment *head
534         )
535 {
536         struct segment *ptr;
537         int ok;
538         ok = 1;
539         for(; ok && vcb ; vcb = vcb->next) {
540                 /* Find where the note is loaded */
541                 /* The whole note must be loaded intact
542                  * so an address of 0 for the descriptor is impossible
543                  */
544                 vcb->desc_addr = 0; 
545                 for(ptr = head->next; ptr != head; ptr = ptr->next) {
546                         unsigned long desc_addr;
547                         desc_addr = ptr->s_addr + vcb->desc_offset - ptr->s_offset;
548                         if ((desc_addr >= ptr->s_addr) &&
549                                 (desc_addr < (ptr->s_addr + ptr->s_filesz))) {
550                                 vcb->desc_addr = desc_addr;
551                         }
552                 }
553                 ok = vcb->callback(vcb, ehdr, phdr, head);
554         }
555         return ok;
556 }
557
558 int elfload(struct lb_memory *mem,
559         unsigned char *header, unsigned long header_size)
560 {
561         Elf_ehdr *ehdr;
562         Elf_phdr *phdr;
563         void *entry;
564         struct segment head;
565         struct verify_callback *cb_chain;
566         unsigned long bounce_buffer;
567
568         /* Find a bounce buffer so I can load to coreboot's current location */
569         bounce_buffer = get_bounce_buffer(mem);
570         if (!bounce_buffer) {
571                 printk_err("Could not find a bounce buffer...\n");
572                 goto out;
573         }
574
575         ehdr = (Elf_ehdr *)header;
576         entry = (void *)(ehdr->e_entry);
577         phdr = (Elf_phdr *)(&header[ehdr->e_phoff]);
578
579         /* Digest elf note information... */
580         cb_chain = 0;
581         if ((phdr[0].p_type == PT_NOTE) && 
582                 ((phdr[0].p_offset + phdr[0].p_filesz) < header_size)) {
583                 cb_chain = process_elf_notes(header,
584                         phdr[0].p_offset, phdr[0].p_filesz);
585         }
586
587         /* Preprocess the elf segments */
588         if (!build_elf_segment_list(&head, 
589                 bounce_buffer, mem, phdr, ehdr->e_phnum))
590                 goto out;
591
592         /* Load the segments */
593         if (!load_elf_segments(&head, header, header_size))
594                 goto out;
595
596         printk_spew("Loaded segments\n");
597         /* Verify the loaded image */
598         if (!verify_loaded_image(cb_chain, ehdr, phdr, &head)) 
599                 goto out;
600
601         printk_spew("verified segments\n");
602         /* Shutdown the stream device */
603         stream_fini();
604         
605         printk_spew("closed down stream\n");
606         /* Reset to booting from this image as late as possible */
607         boot_successful();
608
609         printk_debug("Jumping to boot code at %p\n", entry);
610         post_code(0xfe);
611
612         /* Jump to kernel */
613         jmp_to_elf_entry(entry, bounce_buffer);
614         return 1;
615
616  out:
617         return 0;
618 }
619
620 int elfboot(struct lb_memory *mem)
621 {
622         Elf_ehdr *ehdr;
623         static unsigned char header[ELF_HEAD_SIZE];
624         int header_offset;
625         int i, result;
626
627         result = 0;
628         printk_debug("\nelfboot: Attempting to load payload.\n");
629         post_code(0xf8);
630
631         if (stream_init() < 0) {
632                 printk_err("Could not initialize driver...\n");
633                 goto out;
634         }
635
636         /* Read in the initial ELF_HEAD_SIZE bytes */
637         if (stream_read(header, ELF_HEAD_SIZE) != ELF_HEAD_SIZE) {
638                 printk_err("Read failed...\n");
639                 goto out;
640         }
641         /* Scan for an elf header */
642         header_offset = -1;
643         for(i = 0; i < ELF_HEAD_SIZE - (sizeof(Elf_ehdr) + sizeof(Elf_phdr)); i+=16) {
644                 ehdr = (Elf_ehdr *)(&header[i]);
645                 if (memcmp(ehdr->e_ident, ELFMAG, 4) != 0) {
646                         printk_debug("No header at %d\n", i);
647                         continue;
648                 }
649                 printk_debug("Found ELF candidate at offset %d\n", i);
650                 /* Sanity check the elf header */
651                 if ((ehdr->e_type == ET_EXEC) &&
652                         elf_check_arch(ehdr) &&
653                         (ehdr->e_ident[EI_VERSION] == EV_CURRENT) &&
654                         (ehdr->e_version == EV_CURRENT) &&
655                         (ehdr->e_ehsize == sizeof(Elf_ehdr)) &&
656                         (ehdr->e_phentsize = sizeof(Elf_phdr)) &&
657                         (ehdr->e_phoff < (ELF_HEAD_SIZE - i)) &&
658                         ((ehdr->e_phoff + (ehdr->e_phentsize * ehdr->e_phnum)) <= 
659                                 (ELF_HEAD_SIZE - i))) {
660                         header_offset = i;
661                         break;
662                 }
663                 ehdr = 0;
664         }
665         printk_debug("header_offset is %d\n", header_offset);
666         if (header_offset == -1) {
667                 goto out;
668         }
669
670         printk_debug("Try to load at offset 0x%x\n", header_offset);
671         result = elfload(mem, 
672                 header + header_offset , ELF_HEAD_SIZE - header_offset);
673  out:
674         if (!result) {
675                 /* Shutdown the stream device */
676                 stream_fini();
677
678                 printk_err("Can not load ELF Image.\n");
679
680                 post_code(0xff);
681         }
682         return 0;
683
684 }