1 #include <console/console.h>
2 #include <ip_checksum.h>
3 #include <boot/coreboot_tables.h>
4 #include "coreboot_table.h"
7 #include <device/device.h>
10 struct lb_header *lb_table_init(unsigned long addr)
12 struct lb_header *header;
14 /* 16 byte align the address */
18 header = (void *)addr;
19 header->signature[0] = 'L';
20 header->signature[1] = 'B';
21 header->signature[2] = 'I';
22 header->signature[3] = 'O';
23 header->header_bytes = sizeof(*header);
24 header->header_checksum = 0;
25 header->table_bytes = 0;
26 header->table_checksum = 0;
27 header->table_entries = 0;
31 struct lb_record *lb_first_record(struct lb_header *header)
33 struct lb_record *rec;
34 rec = (void *)(((char *)header) + sizeof(*header));
38 struct lb_record *lb_last_record(struct lb_header *header)
40 struct lb_record *rec;
41 rec = (void *)(((char *)header) + sizeof(*header) + header->table_bytes);
45 struct lb_record *lb_next_record(struct lb_record *rec)
47 rec = (void *)(((char *)rec) + rec->size);
51 struct lb_record *lb_new_record(struct lb_header *header)
53 struct lb_record *rec;
54 rec = lb_last_record(header);
55 if (header->table_entries) {
56 header->table_bytes += rec->size;
58 rec = lb_last_record(header);
59 header->table_entries++;
60 rec->tag = LB_TAG_UNUSED;
61 rec->size = sizeof(*rec);
66 struct lb_memory *lb_memory(struct lb_header *header)
68 struct lb_record *rec;
69 struct lb_memory *mem;
70 rec = lb_new_record(header);
71 mem = (struct lb_memory *)rec;
72 mem->tag = LB_TAG_MEMORY;
73 mem->size = sizeof(*mem);
77 struct lb_serial *lb_serial(struct lb_header *header)
79 #if defined(TTYS0_BASE)
80 struct lb_record *rec;
81 struct lb_serial *serial;
82 rec = lb_new_record(header);
83 serial = (struct lb_serial *)rec;
84 serial->tag = LB_TAG_SERIAL;
85 serial->size = sizeof(*serial);
86 serial->ioport = TTYS0_BASE;
93 struct lb_mainboard *lb_mainboard(struct lb_header *header)
95 struct lb_record *rec;
96 struct lb_mainboard *mainboard;
97 rec = lb_new_record(header);
98 mainboard = (struct lb_mainboard *)rec;
99 mainboard->tag = LB_TAG_MAINBOARD;
101 mainboard->size = (sizeof(*mainboard) +
102 strlen(mainboard_vendor) + 1 +
103 strlen(mainboard_part_number) + 1 +
106 mainboard->vendor_idx = 0;
107 mainboard->part_number_idx = strlen(mainboard_vendor) + 1;
109 memcpy(mainboard->strings + mainboard->vendor_idx,
110 mainboard_vendor, strlen(mainboard_vendor) + 1);
111 memcpy(mainboard->strings + mainboard->part_number_idx,
112 mainboard_part_number, strlen(mainboard_part_number) + 1);
117 struct cmos_checksum *lb_cmos_checksum(struct lb_header *header)
119 struct lb_record *rec;
120 struct cmos_checksum *cmos_checksum;
121 rec = lb_new_record(header);
122 cmos_checksum = (struct cmos_checksum *)rec;
123 cmos_checksum->tag = LB_TAG_OPTION_CHECKSUM;
125 cmos_checksum->size = (sizeof(*cmos_checksum));
127 cmos_checksum->range_start = LB_CKS_RANGE_START * 8;
128 cmos_checksum->range_end = ( LB_CKS_RANGE_END * 8 ) + 7;
129 cmos_checksum->location = LB_CKS_LOC * 8;
130 cmos_checksum->type = CHECKSUM_PCBIOS;
132 return cmos_checksum;
135 void lb_strings(struct lb_header *header)
137 static const struct {
141 { LB_TAG_VERSION, coreboot_version, },
142 { LB_TAG_EXTRA_VERSION, coreboot_extra_version, },
143 { LB_TAG_BUILD, coreboot_build, },
144 { LB_TAG_COMPILE_TIME, coreboot_compile_time, },
145 { LB_TAG_COMPILE_BY, coreboot_compile_by, },
146 { LB_TAG_COMPILE_HOST, coreboot_compile_host, },
147 { LB_TAG_COMPILE_DOMAIN, coreboot_compile_domain, },
148 { LB_TAG_COMPILER, coreboot_compiler, },
149 { LB_TAG_LINKER, coreboot_linker, },
150 { LB_TAG_ASSEMBLER, coreboot_assembler, },
153 for(i = 0; i < sizeof(strings)/sizeof(strings[0]); i++) {
154 struct lb_string *rec;
156 rec = (struct lb_string *)lb_new_record(header);
157 len = strlen(strings[i].string);
158 rec->tag = strings[i].tag;
159 rec->size = (sizeof(*rec) + len + 1 + 3) & ~3;
160 memcpy(rec->string, strings[i].string, len+1);
165 void lb_memory_range(struct lb_memory *mem,
166 uint32_t type, uint64_t start, uint64_t size)
169 entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
170 mem->map[entries].start = pack_lb64(start);
171 mem->map[entries].size = pack_lb64(size);
172 mem->map[entries].type = type;
173 mem->size += sizeof(mem->map[0]);
176 static void lb_reserve_table_memory(struct lb_header *head)
178 struct lb_record *last_rec;
179 struct lb_memory *mem;
184 last_rec = lb_last_record(head);
186 start = (unsigned long)head;
187 end = (unsigned long)last_rec;
188 entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
189 /* Resize the right two memory areas so this table is in
190 * a reserved area of memory. Everything has been carefully
191 * setup so that is all we need to do.
193 for(i = 0; i < entries; i++ ) {
194 uint64_t map_start = unpack_lb64(mem->map[i].start);
195 uint64_t map_end = map_start + unpack_lb64(mem->map[i].size);
196 /* Does this area need to be expanded? */
197 if (map_end == start) {
198 mem->map[i].size = pack_lb64(end - map_start);
200 /* Does this area need to be contracted? */
201 else if (map_start == start) {
202 mem->map[i].start = pack_lb64(end);
203 mem->map[i].size = pack_lb64(map_end - end);
208 unsigned long lb_table_fini(struct lb_header *head)
210 struct lb_record *rec, *first_rec;
211 rec = lb_last_record(head);
212 if (head->table_entries) {
213 head->table_bytes += rec->size;
215 lb_reserve_table_memory(head);
216 first_rec = lb_first_record(head);
217 head->table_checksum = compute_ip_checksum(first_rec, head->table_bytes);
218 head->header_checksum = 0;
219 head->header_checksum = compute_ip_checksum(head, sizeof(*head));
220 printk_debug("Wrote coreboot table at: %p - %p checksum %lx\n",
221 head, rec, head->table_checksum);
222 return (unsigned long)rec;
225 static void lb_cleanup_memory_ranges(struct lb_memory *mem)
229 entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
231 /* Sort the lb memory ranges */
232 for(i = 0; i < entries; i++) {
233 uint64_t entry_start = unpack_lb64(mem->map[i].start);
234 for(j = i; j < entries; j++) {
235 uint64_t temp_start = unpack_lb64(mem->map[j].start);
236 if (temp_start < entry_start) {
237 struct lb_memory_range tmp;
239 mem->map[i] = mem->map[j];
245 /* Merge adjacent entries */
246 for(i = 0; (i + 1) < entries; i++) {
247 uint64_t start, end, nstart, nend;
248 if (mem->map[i].type != mem->map[i + 1].type) {
251 start = unpack_lb64(mem->map[i].start);
252 end = start + unpack_lb64(mem->map[i].size);
253 nstart = unpack_lb64(mem->map[i + 1].start);
254 nend = nstart + unpack_lb64(mem->map[i + 1].size);
255 if ((start <= nstart) && (end > nstart)) {
256 if (start > nstart) {
262 /* Record the new region size */
263 mem->map[i].start = pack_lb64(start);
264 mem->map[i].size = pack_lb64(end - start);
266 /* Delete the entry I have merged with */
267 memmove(&mem->map[i + 1], &mem->map[i + 2],
268 ((entries - i - 2) * sizeof(mem->map[0])));
269 mem->size -= sizeof(mem->map[0]);
271 /* See if I can merge with the next entry as well */
277 static void lb_remove_memory_range(struct lb_memory *mem,
278 uint64_t start, uint64_t size)
285 entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
287 /* Remove a reserved area from the memory map */
288 for(i = 0; i < entries; i++) {
289 uint64_t map_start = unpack_lb64(mem->map[i].start);
290 uint64_t map_end = map_start + unpack_lb64(mem->map[i].size);
291 if ((start <= map_start) && (end >= map_end)) {
292 /* Remove the completely covered range */
293 memmove(&mem->map[i], &mem->map[i + 1],
294 ((entries - i - 1) * sizeof(mem->map[0])));
295 mem->size -= sizeof(mem->map[0]);
297 /* Since the index will disappear revisit what will appear here */
300 else if ((start > map_start) && (end < map_end)) {
301 /* Split the memory range */
302 memmove(&mem->map[i + 1], &mem->map[i],
303 ((entries - i) * sizeof(mem->map[0])));
304 mem->size += sizeof(mem->map[0]);
306 /* Update the first map entry */
307 mem->map[i].size = pack_lb64(start - map_start);
308 /* Update the second map entry */
309 mem->map[i + 1].start = pack_lb64(end);
310 mem->map[i + 1].size = pack_lb64(map_end - end);
311 /* Don't bother with this map entry again */
314 else if ((start <= map_start) && (end > map_start)) {
315 /* Shrink the start of the memory range */
316 mem->map[i].start = pack_lb64(end);
317 mem->map[i].size = pack_lb64(map_end - end);
319 else if ((start < map_end) && (start > map_start)) {
320 /* Shrink the end of the memory range */
321 mem->map[i].size = pack_lb64(start - map_start);
326 static void lb_add_memory_range(struct lb_memory *mem,
327 uint32_t type, uint64_t start, uint64_t size)
329 lb_remove_memory_range(mem, start, size);
330 lb_memory_range(mem, type, start, size);
331 lb_cleanup_memory_ranges(mem);
334 /* Routines to extract part so the coreboot table or
335 * information from the coreboot table after we have written it.
336 * Currently get_lb_mem relies on a global we can change the
339 static struct lb_memory *mem_ranges = 0;
340 struct lb_memory *get_lb_mem(void)
345 static void build_lb_mem_range(void *gp, struct device *dev, struct resource *res)
347 struct lb_memory *mem = gp;
348 lb_memory_range(mem, LB_MEM_RAM, res->base, res->size);
351 static struct lb_memory *build_lb_mem(struct lb_header *head)
353 struct lb_memory *mem;
355 /* Record where the lb memory ranges will live */
356 mem = lb_memory(head);
359 /* Build the raw table of memory */
360 search_global_resources(
361 IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE,
362 build_lb_mem_range, mem);
363 lb_cleanup_memory_ranges(mem);
367 unsigned long write_coreboot_table(
368 unsigned long low_table_start, unsigned long low_table_end,
369 unsigned long rom_table_start, unsigned long rom_table_end)
371 unsigned long table_size;
372 struct lb_header *head;
373 struct lb_memory *mem;
375 if(low_table_end > (0x1000 - sizeof(struct lb_header))) { /* after 4K */
376 /* We need to put lbtable on to [0xf0000,0x100000) */
377 head = lb_table_init(rom_table_end);
378 rom_table_end = (unsigned long)head;
380 head = lb_table_init(low_table_end);
381 low_table_end = (unsigned long)head;
384 printk_debug("Adjust low_table_end from 0x%08x to ", low_table_end);
385 low_table_end += 0xfff; // 4K aligned
386 low_table_end &= ~0xfff;
387 printk_debug("0x%08x \n", low_table_end);
389 /* The Linux kernel assumes this region is reserved */
390 printk_debug("Adjust rom_table_end from 0x%08x to ", rom_table_end);
391 rom_table_end += 0xffff; // 64K align
392 rom_table_end &= ~0xffff;
393 printk_debug("0x%08x \n", rom_table_end);
395 #if (HAVE_OPTION_TABLE == 1)
397 struct lb_record *rec_dest, *rec_src;
398 /* Write the option config table... */
399 rec_dest = lb_new_record(head);
400 rec_src = (struct lb_record *)(void *)&option_table;
401 memcpy(rec_dest, rec_src, rec_src->size);
402 /* Create cmos checksum entry in coreboot table */
403 lb_cmos_checksum(head);
406 /* Record where RAM is located */
407 mem = build_lb_mem(head);
409 /* Record the mptable and the the lb_table (This will be adjusted later) */
410 lb_add_memory_range(mem, LB_MEM_TABLE,
411 low_table_start, low_table_end - low_table_start);
413 /* Record the pirq table, acpi tables, and maybe the mptable */
414 table_size=rom_table_end-rom_table_start;
415 lb_add_memory_range(mem, LB_MEM_TABLE,
416 rom_table_start, table_size<0x10000?0x10000:table_size);
419 * I assume that there is always memory at immediately after
420 * the low_table_end. This means that after I setup the coreboot table.
421 * I can trivially fixup the reserved memory ranges to hold the correct
422 * size of the coreboot table.
425 /* Record our motherboard */
427 /* Record the serial port, if present */
429 /* Record our various random string information */
432 /* Remember where my valid memory ranges are */
433 return lb_table_fini(head);