Attached patch moves the CBFS payload loader to selfboot.c as it's
[coreboot.git] / src / boot / elfboot.c
1 /*
2  * This file is part of the coreboot project.
3  *
4  * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
18  */
19
20 #include <console/console.h>
21 #include <part/fallback_boot.h>
22 #include <boot/elf.h>
23 #include <boot/elf_boot.h>
24 #include <boot/coreboot_tables.h>
25 #include <ip_checksum.h>
26 #include <stream/read_bytes.h>
27 #include <stdint.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 /* Maximum physical address we can use for the coreboot bounce buffer.
32  */
33 #ifndef MAX_ADDR
34 #define MAX_ADDR -1UL
35 #endif
36
37 extern unsigned char _ram_seg;
38 extern unsigned char _eram_seg;
39
40 struct segment {
41         struct segment *next;
42         struct segment *prev;
43         struct segment *phdr_next;
44         struct segment *phdr_prev;
45         unsigned long s_addr;
46         unsigned long s_memsz;
47         unsigned long s_offset;
48         unsigned long s_filesz;
49 };
50
51 struct verify_callback {
52         struct verify_callback *next;
53         int (*callback)(struct verify_callback *vcb, 
54                 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
55         unsigned long desc_offset;
56         unsigned long desc_addr;
57 };
58
59 struct ip_checksum_vcb {
60         struct verify_callback data;
61         unsigned short ip_checksum;
62 };
63
64 int verify_ip_checksum(
65         struct verify_callback *vcb, 
66         Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head)
67 {
68         struct ip_checksum_vcb *cb;
69         struct segment *ptr;
70         unsigned long bytes;
71         unsigned long checksum;
72         unsigned char buff[2], *n_desc;
73         cb = (struct ip_checksum_vcb *)vcb;
74         /* zero the checksum so it's value won't
75          * get in the way of verifying the checksum.
76          */
77         n_desc = 0;
78         if (vcb->desc_addr) {
79                 n_desc = (unsigned char *)(vcb->desc_addr);
80                 memcpy(buff, n_desc, 2);
81                 memset(n_desc, 0, 2);
82         }
83         bytes = 0;
84         checksum = compute_ip_checksum(ehdr, sizeof(*ehdr));
85         bytes += sizeof(*ehdr);
86         checksum = add_ip_checksums(bytes, checksum, 
87                 compute_ip_checksum(phdr, ehdr->e_phnum*sizeof(*phdr)));
88         bytes += ehdr->e_phnum*sizeof(*phdr);
89         for(ptr = head->phdr_next; ptr != head; ptr = ptr->phdr_next) {
90                 checksum = add_ip_checksums(bytes, checksum,
91                         compute_ip_checksum((void *)ptr->s_addr, ptr->s_memsz));
92                 bytes += ptr->s_memsz;
93         }
94         if (n_desc != 0) {
95                 memcpy(n_desc, buff, 2);
96         }
97         if (checksum != cb->ip_checksum) {
98                 printk_err("Image checksum: %04x != computed checksum: %04lx\n",
99                         cb->ip_checksum, checksum);
100         }
101         return checksum == cb->ip_checksum;
102 }
103
104 /* The problem:  
105  * Static executables all want to share the same addresses
106  * in memory because only a few addresses are reliably present on
107  * a machine, and implementing general relocation is hard.
108  *
109  * The solution:
110  * - Allocate a buffer twice the size of the coreboot image.
111  * - Anything that would overwrite coreboot copy into the lower half of
112  *   the buffer. 
113  * - After loading an ELF image copy coreboot to the upper half of the
114  *   buffer.
115  * - Then jump to the loaded image.
116  * 
117  * Benefits:
118  * - Nearly arbitrary standalone executables can be loaded.
119  * - Coreboot is preserved, so it can be returned to.
120  * - The implementation is still relatively simple,
121  *   and much simpler then the general case implemented in kexec.
122  * 
123  */
124
125 static unsigned long bounce_size;
126
127 static unsigned long get_bounce_buffer(struct lb_memory *mem)
128 {
129         unsigned long lb_size;
130         unsigned long mem_entries;
131         unsigned long buffer;
132         int i;
133         lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
134         /* Double coreboot size so I have somewhere to place a copy to return to */
135         bounce_size = lb_size;
136         lb_size = bounce_size + lb_size;
137         mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
138         buffer = 0;
139         for(i = 0; i < mem_entries; i++) {
140                 unsigned long mstart, mend;
141                 unsigned long msize;
142                 unsigned long tbuffer;
143                 if (mem->map[i].type != LB_MEM_RAM)
144                         continue;
145                 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
146                         continue;
147                 if (unpack_lb64(mem->map[i].size) < lb_size)
148                         continue;
149                 mstart = unpack_lb64(mem->map[i].start);
150                 msize = MAX_ADDR - mstart +1;
151                 if (msize > unpack_lb64(mem->map[i].size))
152                         msize = unpack_lb64(mem->map[i].size);
153                 mend = mstart + msize;
154                 tbuffer = mend - lb_size;
155                 if (tbuffer < buffer) 
156                         continue;
157                 buffer = tbuffer;
158         }
159         return buffer;
160 }
161
162
163 static struct verify_callback *process_elf_notes(
164         unsigned char *header, 
165         unsigned long offset, unsigned long length)
166 {
167         struct verify_callback *cb_chain;
168         unsigned char *note, *end;
169         unsigned char *program, *version;
170
171         cb_chain = 0;
172         note = header + offset;
173         end = note + length;
174         program = version = 0;
175         while(note < end) {
176                 Elf_Nhdr *hdr;
177                 unsigned char *n_name, *n_desc, *next;
178                 hdr = (Elf_Nhdr *)note;
179                 n_name = note + sizeof(*hdr);
180                 n_desc = n_name + ((hdr->n_namesz + 3) & ~3);
181                 next = n_desc + ((hdr->n_descsz + 3) & ~3);
182                 if (next > end) {
183                         break;
184                 }
185                 if ((hdr->n_namesz == sizeof(ELF_NOTE_BOOT)) && 
186                         (memcmp(n_name, ELF_NOTE_BOOT, sizeof(ELF_NOTE_BOOT)) == 0)) {
187                         switch(hdr->n_type) {
188                         case EIN_PROGRAM_NAME:
189                                 if (n_desc[hdr->n_descsz -1] == 0) {
190                                         program = n_desc;
191                                 }
192                                 break;
193                         case EIN_PROGRAM_VERSION:
194                                 if (n_desc[hdr->n_descsz -1] == 0) {
195                                         version = n_desc;
196                                 }
197                                 break;
198                         case EIN_PROGRAM_CHECKSUM:
199                         {
200                                 struct ip_checksum_vcb *cb;
201                                 cb = malloc(sizeof(*cb));
202                                 cb->ip_checksum = *((uint16_t *)n_desc);
203                                 cb->data.callback = verify_ip_checksum;
204                                 cb->data.next = cb_chain;
205                                 cb->data.desc_offset = n_desc - header;
206                                 cb_chain = &cb->data;
207                                 break;
208                         }
209                         }
210                 }
211                 printk_spew("n_type: %08x n_name(%d): %-*.*s n_desc(%d): %-*.*s\n", 
212                         hdr->n_type,
213                         hdr->n_namesz, hdr->n_namesz, hdr->n_namesz, n_name,
214                         hdr->n_descsz,hdr->n_descsz, hdr->n_descsz, n_desc);
215                 note = next;
216         }
217         if (program && version) {
218                 printk_info("Loading %s version: %s\n",
219                         program, version);
220         }
221         return cb_chain;
222 }
223
224 static int valid_area(struct lb_memory *mem, unsigned long buffer,
225         unsigned long start, unsigned long len)
226 {
227         /* Check through all of the memory segments and ensure
228          * the segment that was passed in is completely contained
229          * in RAM.
230          */
231         int i;
232         unsigned long end = start + len;
233         unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
234
235         /* See if I conflict with the bounce buffer */
236         if (end >= buffer) {
237                 return 0;
238         }
239
240         /* Walk through the table of valid memory ranges and see if I
241          * have a match.
242          */
243         for(i = 0; i < mem_entries; i++) {
244                 uint64_t mstart, mend;
245                 uint32_t mtype;
246                 mtype = mem->map[i].type;
247                 mstart = unpack_lb64(mem->map[i].start);
248                 mend = mstart + unpack_lb64(mem->map[i].size);
249                 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
250                         break;
251                 }
252                 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
253                         printk_err("Payload is overwriting Coreboot tables.\n");
254                         break;
255                 }
256         }
257         if (i == mem_entries) {
258                 printk_err("No matching ram area found for range:\n");
259                 printk_err("  [0x%016lx, 0x%016lx)\n", start, end);
260                 printk_err("Ram areas\n");
261                 for(i = 0; i < mem_entries; i++) {
262                         uint64_t mstart, mend;
263                         uint32_t mtype;
264                         mtype = mem->map[i].type;
265                         mstart = unpack_lb64(mem->map[i].start);
266                         mend = mstart + unpack_lb64(mem->map[i].size);
267                         printk_err("  [0x%016lx, 0x%016lx) %s\n",
268                                 (unsigned long)mstart, 
269                                 (unsigned long)mend, 
270                                 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
271                         
272                 }
273                 return 0;
274         }
275         return 1;
276 }
277
278 static void relocate_segment(unsigned long buffer, struct segment *seg)
279 {
280         /* Modify all segments that want to load onto coreboot
281          * to load onto the bounce buffer instead.
282          */
283         unsigned long lb_start = (unsigned long)&_ram_seg;
284         unsigned long lb_end = (unsigned long)&_eram_seg;
285         unsigned long start, middle, end;
286
287         printk_spew("lb: [0x%016lx, 0x%016lx)\n", 
288                 lb_start, lb_end);
289
290         start = seg->s_addr;
291         middle = start + seg->s_filesz;
292         end = start + seg->s_memsz;
293         /* I don't conflict with coreboot so get out of here */
294         if ((end <= lb_start) || (start >= lb_end))
295                 return;
296
297         printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
298                 start, middle, end);
299
300         /* Slice off a piece at the beginning
301          * that doesn't conflict with coreboot.
302          */
303         if (start < lb_start) {
304                 struct segment *new;
305                 unsigned long len = lb_start - start;
306                 new = malloc(sizeof(*new));
307                 *new = *seg;
308                 new->s_memsz = len;
309                 seg->s_memsz -= len;
310                 seg->s_addr += len;
311                 seg->s_offset += len;
312                 if (seg->s_filesz > len) {
313                         new->s_filesz = len;
314                         seg->s_filesz -= len;
315                 } else {
316                         seg->s_filesz = 0;
317                 }
318
319                 /* Order by stream offset */
320                 new->next = seg;
321                 new->prev = seg->prev;
322                 seg->prev->next = new;
323                 seg->prev = new;
324                 /* Order by original program header order */
325                 new->phdr_next = seg;
326                 new->phdr_prev = seg->phdr_prev;
327                 seg->phdr_prev->phdr_next = new;
328                 seg->phdr_prev = new;
329
330                 /* compute the new value of start */
331                 start = seg->s_addr;
332                 
333                 printk_spew("   early: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
334                         new->s_addr, 
335                         new->s_addr + new->s_filesz,
336                         new->s_addr + new->s_memsz);
337         }
338         
339         /* Slice off a piece at the end 
340          * that doesn't conflict with coreboot 
341          */
342         if (end > lb_end) {
343                 unsigned long len = lb_end - start;
344                 struct segment *new;
345                 new = malloc(sizeof(*new));
346                 *new = *seg;
347                 seg->s_memsz = len;
348                 new->s_memsz -= len;
349                 new->s_addr += len;
350                 new->s_offset += len;
351                 if (seg->s_filesz > len) {
352                         seg->s_filesz = len;
353                         new->s_filesz -= len;
354                 } else {
355                         new->s_filesz = 0;
356                 }
357                 /* Order by stream offset */
358                 new->next = seg->next;
359                 new->prev = seg;
360                 seg->next->prev = new;
361                 seg->next = new;
362                 /* Order by original program header order */
363                 new->phdr_next = seg->phdr_next;
364                 new->phdr_prev = seg;
365                 seg->phdr_next->phdr_prev = new;
366                 seg->phdr_next = new;
367
368                 printk_spew("   late: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
369                         new->s_addr, 
370                         new->s_addr + new->s_filesz,
371                         new->s_addr + new->s_memsz);
372                 
373         }
374         /* Now retarget this segment onto the bounce buffer */
375         seg->s_addr = buffer + (seg->s_addr - lb_start);
376
377         printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n", 
378                 seg->s_addr, 
379                 seg->s_addr + seg->s_filesz, 
380                 seg->s_addr + seg->s_memsz);
381 }
382
383
384 static int build_elf_segment_list(
385         struct segment *head, 
386         unsigned long bounce_buffer, struct lb_memory *mem,
387         Elf_phdr *phdr, int headers)
388 {
389         struct segment *ptr;
390         int i;
391         memset(head, 0, sizeof(*head));
392         head->phdr_next = head->phdr_prev = head;
393         head->next = head->prev = head;
394         for(i = 0; i < headers; i++) {
395                 struct segment *new;
396                 /* Ignore data that I don't need to handle */
397                 if (phdr[i].p_type != PT_LOAD) {
398                         printk_debug("Dropping non PT_LOAD segment\n");
399                         continue;
400                 }
401                 if (phdr[i].p_memsz == 0) {
402                         printk_debug("Dropping empty segment\n");
403                         continue;
404                 }
405                 new = malloc(sizeof(*new));
406                 new->s_addr = phdr[i].p_paddr;
407                 new->s_memsz = phdr[i].p_memsz;
408                 new->s_offset = phdr[i].p_offset;
409                 new->s_filesz = phdr[i].p_filesz;
410                 printk_debug("New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
411                         new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
412                 /* Clean up the values */
413                 if (new->s_filesz > new->s_memsz)  {
414                         new->s_filesz = new->s_memsz;
415                 }
416                 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
417                         new->s_addr, new->s_memsz, new->s_offset, new->s_filesz);
418                 for(ptr = head->next; ptr != head; ptr = ptr->next) {
419                         if (new->s_offset < ptr->s_offset)
420                                 break;
421                 }
422                 /* Order by stream offset */
423                 new->next = ptr;
424                 new->prev = ptr->prev;
425                 ptr->prev->next = new;
426                 ptr->prev = new;
427                 /* Order by original program header order */
428                 new->phdr_next = head;
429                 new->phdr_prev = head->phdr_prev;
430                 head->phdr_prev->phdr_next  = new;
431                 head->phdr_prev = new;
432
433                 /* Verify the memory addresses in the segment are valid */
434                 if (!valid_area(mem, bounce_buffer, new->s_addr, new->s_memsz)) 
435                         goto out;
436
437                 /* Modify the segment to load onto the bounce_buffer if necessary.
438                  */
439                 relocate_segment(bounce_buffer, new);
440         }
441         return 1;
442  out:
443         return 0;
444 }
445
446 static int load_elf_segments(
447         struct segment *head, unsigned char *header, unsigned long header_size)
448 {
449         unsigned long offset;
450         struct segment *ptr;
451         
452         offset = 0;
453         for(ptr = head->next; ptr != head; ptr = ptr->next) {
454                 unsigned long start_offset;
455                 unsigned long skip_bytes, read_bytes;
456                 unsigned char *dest, *middle, *end;
457                 byte_offset_t result;
458                 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
459                         ptr->s_addr, ptr->s_memsz, ptr->s_filesz);
460                 
461                 /* Compute the boundaries of the segment */
462                 dest = (unsigned char *)(ptr->s_addr);
463                 end = dest + ptr->s_memsz;
464                 middle = dest + ptr->s_filesz;
465                 start_offset = ptr->s_offset;
466                 /* Ignore s_offset if I have a pure bss segment */
467                 if (ptr->s_filesz == 0) {
468                         start_offset = offset;
469                 }
470                 
471                 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
472                         (unsigned long)dest,
473                         (unsigned long)middle,
474                         (unsigned long)end,
475                         (unsigned long)start_offset);
476                 
477                 /* Skip intial buffer unused bytes */
478                 if (offset < header_size) {
479                         if (start_offset < header_size) {
480                                 offset = start_offset;
481                         } else {
482                                 offset = header_size;
483                         }
484                 }
485                 
486                 /* Skip the unused bytes */
487                 skip_bytes = start_offset - offset;
488                 if (skip_bytes && 
489                         ((result = stream_skip(skip_bytes)) != skip_bytes)) {
490                         printk_err("ERROR: Skip of %ld bytes skipped %ld bytes\n",
491                                 skip_bytes, result);
492                         goto out;
493                 }
494                 offset = start_offset;
495                 
496                 /* Copy data from the initial buffer */
497                 if (offset < header_size) {
498                         size_t len;
499                         if ((ptr->s_filesz + start_offset) > header_size) {
500                                 len = header_size - start_offset;
501                         }
502                         else {
503                                 len = ptr->s_filesz;
504                         }
505                         memcpy(dest, &header[start_offset], len);
506                         dest += len;
507                 }
508                 
509                 /* Read the segment into memory */
510                 read_bytes = middle - dest;
511                 if (read_bytes && 
512                         ((result = stream_read(dest, read_bytes)) != read_bytes)) {
513                         printk_err("ERROR: Read of %ld bytes read %ld bytes...\n",
514                                 read_bytes, result);
515                         goto out;
516                 }
517                 offset += ptr->s_filesz;
518                 
519                 /* Zero the extra bytes between middle & end */
520                 if (middle < end) {
521                         printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
522                                 (unsigned long)middle, (unsigned long)(end - middle));
523                         
524                         /* Zero the extra bytes */
525                         memset(middle, 0, end - middle);
526                 }
527         }
528         return 1;
529  out:
530         return 0;
531 }
532
533 static int verify_loaded_image(
534         struct verify_callback *vcb,
535         Elf_ehdr *ehdr, Elf_phdr *phdr,
536         struct segment *head
537         )
538 {
539         struct segment *ptr;
540         int ok;
541         ok = 1;
542         for(; ok && vcb ; vcb = vcb->next) {
543                 /* Find where the note is loaded */
544                 /* The whole note must be loaded intact
545                  * so an address of 0 for the descriptor is impossible
546                  */
547                 vcb->desc_addr = 0; 
548                 for(ptr = head->next; ptr != head; ptr = ptr->next) {
549                         unsigned long desc_addr;
550                         desc_addr = ptr->s_addr + vcb->desc_offset - ptr->s_offset;
551                         if ((desc_addr >= ptr->s_addr) &&
552                                 (desc_addr < (ptr->s_addr + ptr->s_filesz))) {
553                                 vcb->desc_addr = desc_addr;
554                         }
555                 }
556                 ok = vcb->callback(vcb, ehdr, phdr, head);
557         }
558         return ok;
559 }
560
561 int elfload(struct lb_memory *mem,
562         unsigned char *header, unsigned long header_size)
563 {
564         Elf_ehdr *ehdr;
565         Elf_phdr *phdr;
566         void *entry;
567         struct segment head;
568         struct verify_callback *cb_chain;
569         unsigned long bounce_buffer;
570
571         /* Find a bounce buffer so I can load to coreboot's current location */
572         bounce_buffer = get_bounce_buffer(mem);
573         if (!bounce_buffer) {
574                 printk_err("Could not find a bounce buffer...\n");
575                 goto out;
576         }
577
578         ehdr = (Elf_ehdr *)header;
579         entry = (void *)(ehdr->e_entry);
580         phdr = (Elf_phdr *)(&header[ehdr->e_phoff]);
581
582         /* Digest elf note information... */
583         cb_chain = 0;
584         if ((phdr[0].p_type == PT_NOTE) && 
585                 ((phdr[0].p_offset + phdr[0].p_filesz) < header_size)) {
586                 cb_chain = process_elf_notes(header,
587                         phdr[0].p_offset, phdr[0].p_filesz);
588         }
589
590         /* Preprocess the elf segments */
591         if (!build_elf_segment_list(&head, 
592                 bounce_buffer, mem, phdr, ehdr->e_phnum))
593                 goto out;
594
595         /* Load the segments */
596         if (!load_elf_segments(&head, header, header_size))
597                 goto out;
598
599         printk_spew("Loaded segments\n");
600         /* Verify the loaded image */
601         if (!verify_loaded_image(cb_chain, ehdr, phdr, &head)) 
602                 goto out;
603
604         printk_spew("verified segments\n");
605         /* Shutdown the stream device */
606         stream_fini();
607         
608         printk_spew("closed down stream\n");
609         /* Reset to booting from this image as late as possible */
610         boot_successful();
611
612         printk_debug("Jumping to boot code at %p\n", entry);
613         post_code(0xfe);
614
615         /* Jump to kernel */
616         jmp_to_elf_entry(entry, bounce_buffer, bounce_size);
617         return 1;
618
619  out:
620         return 0;
621 }
622
623 int elfboot(struct lb_memory *mem)
624 {
625         Elf_ehdr *ehdr;
626         static unsigned char header[ELF_HEAD_SIZE];
627         int header_offset;
628         int i, result;
629
630         result = 0;
631         printk_debug("\nelfboot: Attempting to load payload.\n");
632         post_code(0xf8);
633
634         if (stream_init() < 0) {
635                 printk_err("Could not initialize driver...\n");
636                 goto out;
637         }
638
639         /* Read in the initial ELF_HEAD_SIZE bytes */
640         if (stream_read(header, ELF_HEAD_SIZE) != ELF_HEAD_SIZE) {
641                 printk_err("Read failed...\n");
642                 goto out;
643         }
644         /* Scan for an elf header */
645         header_offset = -1;
646         for(i = 0; i < ELF_HEAD_SIZE - (sizeof(Elf_ehdr) + sizeof(Elf_phdr)); i+=16) {
647                 ehdr = (Elf_ehdr *)(&header[i]);
648                 if (memcmp(ehdr->e_ident, ELFMAG, 4) != 0) {
649                         printk_debug("No header at %d\n", i);
650                         continue;
651                 }
652                 printk_debug("Found ELF candidate at offset %d\n", i);
653                 /* Sanity check the elf header */
654                 if ((ehdr->e_type == ET_EXEC) &&
655                         elf_check_arch(ehdr) &&
656                         (ehdr->e_ident[EI_VERSION] == EV_CURRENT) &&
657                         (ehdr->e_version == EV_CURRENT) &&
658                         (ehdr->e_ehsize == sizeof(Elf_ehdr)) &&
659                         (ehdr->e_phentsize = sizeof(Elf_phdr)) &&
660                         (ehdr->e_phoff < (ELF_HEAD_SIZE - i)) &&
661                         ((ehdr->e_phoff + (ehdr->e_phentsize * ehdr->e_phnum)) <= 
662                                 (ELF_HEAD_SIZE - i))) {
663                         header_offset = i;
664                         break;
665                 }
666                 ehdr = 0;
667         }
668         printk_debug("header_offset is %d\n", header_offset);
669         if (header_offset == -1) {
670                 goto out;
671         }
672
673         printk_debug("Try to load at offset 0x%x\n", header_offset);
674         result = elfload(mem, 
675                 header + header_offset , ELF_HEAD_SIZE - header_offset);
676  out:
677         if (!result) {
678                 /* Shutdown the stream device */
679                 stream_fini();
680
681                 printk_err("Can not load ELF Image.\n");
682
683                 post_code(0xff);
684         }
685         return 0;
686
687 }