Print this page
loader: pass args along to unix in C
There's no reason why we can't pass the args gotten from the bootloader to
unix in C.
Note: loader's _start sets the ATAG pointer to 0x100. This change simply
propagates it to unix.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/loader/fakeloader.c
+++ new/usr/src/uts/armv6/loader/fakeloader.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
14 + * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
14 15 */
15 16
16 17 #include "fakeloader.h"
17 18
18 19 #include <sys/types.h>
19 20 #include <sys/param.h>
20 21 #include <sys/elf.h>
21 22 #include <sys/atag.h>
22 23 #include <sys/sysmacros.h>
23 24 #include <sys/machparam.h>
24 25
25 26 #include <vm/pte.h>
26 27
27 28 /*
28 29 * This is the stock ARM fake uniboot loader.
29 30 *
30 31 * Here's what we have to do:
31 32 * o Read the atag header and find the combined archive header
32 33 * o Determine the set of mappings we need to add for the following:
33 34 * - unix
34 35 * - boot_archive
35 36 * - atags
36 37 * o Enable unaligned access
37 38 * o Enable the caches + virtual memory
38 39 *
39 40 * There are several important constraints that we have here:
40 41 *
41 42 * o We cannot use any .data! Several loaders that come before us are broken
42 43 * and only provide us with the ability to map our .text and potentially our
43 44 * .bss. We should strive to avoid even that if we can.
44 45 */
45 46
46 47 #ifdef DEBUG
47 48 #define FAKELOAD_DPRINTF(x) fakeload_puts(x)
48 49 #else
49 50 #define FAKELOAD_DPRINTF(x)
50 51 #endif /* DEBUG */
51 52
52 53 /*
53 54 * XXX ASSUMES WE HAVE Free memory following the boot archive
54 55 */
55 56 static uintptr_t freemem;
56 57 static uintptr_t pt_arena;
57 58 static uintptr_t pt_arena_max;
58 59 static uint32_t *pt_addr;
59 60 static int nl2pages;
60 61
61 62 /* Simple copy routines */
62 63 void
63 64 bcopy(const void *s, void *d, size_t n)
64 65 {
65 66 const char *src = s;
66 67 char *dest = d;
67 68
68 69 if (n == 0 || s == d)
69 70 return;
70 71
71 72 if (dest < src && dest + n < src) {
72 73 /* dest overlaps with the start of src, copy forward */
73 74 for (; n > 0; n--, src++, dest++)
74 75 *dest = *src;
75 76 } else {
76 77 /* src overlaps with start of dest or no overlap, copy rev */
77 78 src += n - 1;
78 79 dest += n - 1;
79 80 for (; n > 0; n--, src--, dest--)
80 81 *dest = *src;
81 82 }
82 83 }
83 84
84 85 void
85 86 bzero(void *s, size_t n)
86 87 {
87 88 char *c = s;
88 89 while (n > 0) {
89 90 *c = 0;
90 91 c++;
91 92 n--;
92 93 }
93 94 }
94 95
95 96 static void
96 97 fakeload_puts(const char *str)
97 98 {
98 99 while (*str != '\0') {
99 100 fakeload_backend_putc(*str);
100 101 str++;
101 102 }
102 103 }
103 104
104 105 static void
105 106 fakeload_panic(const char *reason)
106 107 {
107 108 fakeload_puts("panic!\n");
108 109 fakeload_puts(reason);
109 110 fakeload_puts("\n");
110 111 fakeload_puts("spinning forever... goodbye...\n");
111 112 for (;;)
112 113 ;
113 114 }
114 115
115 116 static void
116 117 fakeload_ultostr(unsigned long value)
117 118 {
118 119 char buf[16];
119 120 ulong_t t, val = (ulong_t)value;
120 121 char c;
121 122 char *ptr = &(buf[14]);
122 123 buf[15] = '\0';
123 124
124 125 do {
125 126 c = (char)('0' + val - 16 * (t = (val >> 4)));
126 127 if (c > '9')
127 128 c += 'A' - '9' - 1;
128 129 *--ptr = c;
129 130 } while ((val = t) != 0);
130 131
131 132 *--ptr = 'x';
132 133 *--ptr = '0';
133 134 fakeload_puts(ptr);
134 135 }
135 136
136 137 static void
137 138 fakeload_selfmap(atag_header_t *chain)
138 139 {
139 140 atag_illumos_mapping_t aim;
140 141
141 142 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
142 143 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
143 144 aim.aim_paddr = 0x7000;
144 145 aim.aim_vaddr = aim.aim_paddr;
145 146 aim.aim_plen = 0x3000;
146 147 aim.aim_vlen = aim.aim_plen;
147 148 aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
148 149 atag_append(chain, &aim.aim_header);
149 150 }
150 151
151 152 static void
152 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
153 154 {
154 155 int entry;
155 156 armpte_t *pte;
156 157 arm_l1s_t *l1e;
157 158
158 159 entry = ARMPT_VADDR_TO_L1E(va);
159 160 pte = &pt_addr[entry];
160 161 if (ARMPT_L1E_ISVALID(*pte))
161 162 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
162 163 l1e = (arm_l1s_t *)pte;
163 164 *pte = 0;
164 165 l1e->al_type = ARMPT_L1_TYPE_SECT;
165 166 /* Assume it's not device memory */
166 167 l1e->al_bbit = 1;
167 168 l1e->al_cbit = 1;
168 169 l1e->al_tex = 1;
169 170 l1e->al_sbit = 1;
170 171
171 172 if (!(prot & PF_X))
172 173 l1e->al_xn = 1;
173 174 l1e->al_domain = 0;
174 175
175 176 if (prot & PF_W) {
176 177 l1e->al_ap2 = 1;
177 178 l1e->al_ap = 1;
178 179 } else {
179 180 l1e->al_ap2 = 0;
180 181 l1e->al_ap = 1;
181 182 }
182 183 l1e->al_ngbit = 0;
183 184 l1e->al_issuper = 0;
184 185 l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
185 186 }
186 187
187 188 /*
188 189 * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
189 190 * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way
190 191 * we can map it and all the other L2 page tables we might need. If we don't do
191 192 * this, it'll become problematic for unix to actually modify this.
192 193 */
193 194 static void
194 195 fakeload_pt_arena_init(const atag_initrd_t *aii)
195 196 {
196 197 int entry, i;
197 198 armpte_t *pte;
198 199 arm_l1s_t *l1e;
199 200
200 201 pt_arena = aii->ai_start + aii->ai_size;
201 202 if (pt_arena & MMU_PAGEOFFSET1M) {
202 203 pt_arena &= MMU_PAGEMASK1M;
203 204 pt_arena += MMU_PAGESIZE1M;
204 205 }
205 206 pt_arena_max = pt_arena + 4 * MMU_PAGESIZE1M;
206 207 freemem = pt_arena_max;
207 208
208 209 /* Set up the l1 page table by first invalidating it */
209 210 pt_addr = (armpte_t *)pt_arena;
210 211 pt_arena += ARMPT_L1_SIZE;
211 212 bzero(pt_addr, ARMPT_L1_SIZE);
212 213 for (i = 0; i < 4; i++)
213 214 fakeload_map_1mb((uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
214 215 (uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
215 216 PF_R | PF_W);
216 217 }
217 218
218 219 /*
219 220 * This is our generally entry point. We're passed in the entry point of the
220 221 * header.
221 222 */
222 223 static uintptr_t
223 224 fakeload_archive_mappings(atag_header_t *chain, const void *addr,
224 225 atag_illumos_status_t *aisp)
225 226 {
226 227 atag_illumos_mapping_t aim;
227 228 fakeloader_hdr_t *hdr;
228 229 Elf32_Ehdr *ehdr;
229 230 Elf32_Phdr *phdr;
230 231 int nhdrs, i;
231 232 uintptr_t ret;
232 233 uintptr_t text = 0, data = 0;
233 234 size_t textln = 0, dataln = 0;
234 235
235 236 hdr = (fakeloader_hdr_t *)addr;
236 237
237 238 if (hdr->fh_magic[0] != FH_MAGIC0)
238 239 fakeload_panic("fh_magic[0] is wrong!\n");
239 240 if (hdr->fh_magic[1] != FH_MAGIC1)
240 241 fakeload_panic("fh_magic[1] is wrong!\n");
241 242 if (hdr->fh_magic[2] != FH_MAGIC2)
242 243 fakeload_panic("fh_magic[2] is wrong!\n");
243 244 if (hdr->fh_magic[3] != FH_MAGIC3)
244 245 fakeload_panic("fh_magic[3] is wrong!\n");
245 246
246 247 if (hdr->fh_unix_size == 0)
247 248 fakeload_panic("hdr unix size is zero\n");
248 249 if (hdr->fh_unix_offset == 0)
249 250 fakeload_panic("hdr unix offset is zero\n");
250 251 if (hdr->fh_archive_size == 0)
251 252 fakeload_panic("hdr archive size is zero\n");
252 253 if (hdr->fh_archive_offset == 0)
253 254 fakeload_panic("hdr archive_offset is zero\n");
254 255
255 256 ehdr = (Elf32_Ehdr *)((uintptr_t)addr + hdr->fh_unix_offset);
256 257
257 258 if (ehdr->e_ident[EI_MAG0] != ELFMAG0)
258 259 fakeload_panic("magic[0] wrong");
259 260 if (ehdr->e_ident[EI_MAG1] != ELFMAG1)
260 261 fakeload_panic("magic[1] wrong");
261 262 if (ehdr->e_ident[EI_MAG2] != ELFMAG2)
262 263 fakeload_panic("magic[2] wrong");
263 264 if (ehdr->e_ident[EI_MAG3] != ELFMAG3)
264 265 fakeload_panic("magic[3] wrong");
265 266 if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
266 267 fakeload_panic("wrong elfclass");
267 268 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
268 269 fakeload_panic("wrong encoding");
269 270 if (ehdr->e_ident[EI_OSABI] != ELFOSABI_SOLARIS)
270 271 fakeload_panic("wrong os abi");
271 272 if (ehdr->e_ident[EI_ABIVERSION] != EAV_SUNW_CURRENT)
272 273 fakeload_panic("wrong abi version");
273 274 if (ehdr->e_type != ET_EXEC)
274 275 fakeload_panic("unix is not an executable");
275 276 if (ehdr->e_machine != EM_ARM)
276 277 fakeload_panic("unix is not an ARM Executible");
277 278 if (ehdr->e_version != EV_CURRENT)
278 279 fakeload_panic("wrong version");
279 280 if (ehdr->e_phnum == 0)
280 281 fakeload_panic("no program headers");
281 282 ret = ehdr->e_entry;
282 283
283 284 FAKELOAD_DPRINTF("validated unix's headers\n");
284 285
285 286 nhdrs = ehdr->e_phnum;
286 287 phdr = (Elf32_Phdr *)((uintptr_t)addr + hdr->fh_unix_offset +
287 288 ehdr->e_phoff);
288 289 for (i = 0; i < nhdrs; i++, phdr++) {
289 290 if (phdr->p_type != PT_LOAD) {
290 291 fakeload_puts("skipping non-PT_LOAD header\n");
291 292 continue;
292 293 }
293 294
294 295 if (phdr->p_filesz == 0 || phdr->p_memsz == 0) {
295 296 fakeload_puts("skipping PT_LOAD with 0 file/mem\n");
296 297 continue;
297 298 }
298 299
299 300 /*
300 301 * Create a mapping record for this in the atags.
301 302 */
302 303 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
303 304 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
304 305 aim.aim_paddr = (uintptr_t)addr + hdr->fh_unix_offset +
305 306 phdr->p_offset;
306 307 aim.aim_plen = phdr->p_filesz;
307 308 aim.aim_vaddr = phdr->p_vaddr;
308 309 aim.aim_vlen = phdr->p_memsz;
309 310 /* Round up vlen to be a multiple of 4k */
310 311 if (aim.aim_vlen & 0xfff) {
311 312 aim.aim_vlen &= ~0xfff;
312 313 aim.aim_vlen += 0x1000;
313 314 }
314 315 aim.aim_mapflags = phdr->p_flags;
315 316 atag_append(chain, &aim.aim_header);
316 317
317 318 /*
318 319 * When built with highvecs we need to account for the fact that
319 320 * _edata, _etext and _end are built assuming that the highvecs
320 321 * are normally part of our segments. ld is not doing anything
321 322 * wrong, but this breaks the assumptions that krtld currently
322 323 * has. As such, unix will use this information to overwrite the
323 324 * normal entry points that krtld uses in a similar style to
324 325 * SPARC.
325 326 */
326 327 if (aim.aim_vaddr != 0xffff0000) {
327 328 if ((phdr->p_flags & PF_W) != 0) {
328 329 data = aim.aim_vaddr;
329 330 dataln = aim.aim_vlen;
330 331 } else {
331 332 text = aim.aim_vaddr;
332 333 textln = aim.aim_vlen;
333 334 }
334 335 }
335 336 }
336 337
337 338 aisp->ais_stext = text;
338 339 aisp->ais_etext = text + textln;
339 340 aisp->ais_sdata = data;
340 341 aisp->ais_edata = data + dataln;
341 342
342 343 /* 1:1 map the boot archive */
343 344 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
344 345 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
345 346 aim.aim_paddr = (uintptr_t)addr + hdr->fh_archive_offset;
346 347 aim.aim_plen = hdr->fh_archive_size;
347 348 aim.aim_vaddr = aim.aim_paddr;
348 349 aim.aim_vlen = aim.aim_plen;
349 350 aim.aim_mapflags = PF_R | PF_W | PF_X;
350 351 atag_append(chain, &aim.aim_header);
351 352 aisp->ais_archive = aim.aim_paddr;
352 353 aisp->ais_archivelen = aim.aim_plen;
353 354
354 355 return (ret);
355 356 }
356 357
357 358 static void
358 359 fakeload_mkatags(atag_header_t *chain)
359 360 {
360 361 atag_illumos_status_t ais;
361 362 atag_illumos_mapping_t aim;
362 363
363 364 bzero(&ais, sizeof (ais));
364 365 bzero(&aim, sizeof (aim));
365 366
366 367 ais.ais_header.ah_size = ATAG_ILLUMOS_STATUS_SIZE;
367 368 ais.ais_header.ah_tag = ATAG_ILLUMOS_STATUS;
368 369 atag_append(chain, &ais.ais_header);
369 370 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
370 371 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
371 372 atag_append(chain, &aim.aim_header);
372 373 }
373 374
374 375 static uintptr_t
375 376 fakeload_alloc_l2pt(void)
376 377 {
377 378 uintptr_t ret;
378 379
379 380 if (pt_arena & ARMPT_L2_MASK) {
380 381 ret = pt_arena;
381 382 ret &= ~ARMPT_L2_MASK;
382 383 ret += ARMPT_L2_SIZE;
383 384 pt_arena = ret + ARMPT_L2_SIZE;
384 385 } else {
385 386 ret = pt_arena;
386 387 pt_arena = ret + ARMPT_L2_SIZE;
387 388 }
388 389 if (pt_arena >= pt_arena_max) {
389 390 fakeload_puts("pt_arena, max\n");
390 391 fakeload_ultostr(pt_arena);
391 392 fakeload_puts("\n");
392 393 fakeload_ultostr(pt_arena_max);
393 394 fakeload_puts("\n");
394 395 fakeload_puts("l2pts alloced\n");
395 396 fakeload_ultostr(nl2pages);
396 397 fakeload_puts("\n");
397 398 fakeload_panic("ran out of page tables!");
398 399 }
399 400
400 401 bzero((void *)ret, ARMPT_L2_SIZE);
401 402 nl2pages++;
402 403 return (ret);
403 404 }
404 405
405 406 /*
406 407 * Finally, do all the dirty work. Let's create some page tables. The L1 page
407 408 * table is full of 1 MB mappings by default. The L2 Page table is 1k in size
408 409 * and covers that 1 MB. We're going to always create L2 page tables for now
409 410 * which will use 4k and 64k pages.
410 411 */
411 412 static void
412 413 fakeload_map(armpte_t *pt, uintptr_t pstart, uintptr_t vstart, size_t len,
413 414 uint32_t prot)
414 415 {
415 416 int entry, chunksize;
416 417 armpte_t *pte, *l2pt;
417 418 arm_l1pt_t *l1pt;
418 419
419 420 /*
420 421 * Make sure both pstart + vstart are 4k aligned, along with len.
421 422 */
422 423 if (pstart & MMU_PAGEOFFSET)
423 424 fakeload_panic("pstart is not 4k aligned");
424 425 if (vstart & MMU_PAGEOFFSET)
425 426 fakeload_panic("vstart is not 4k aligned");
426 427 if (len & MMU_PAGEOFFSET)
427 428 fakeload_panic("len is not 4k aligned");
428 429
429 430 /*
430 431 * We're going to logically deal with each 1 MB chunk at a time.
431 432 */
432 433 while (len > 0) {
433 434 if (vstart & MMU_PAGEOFFSET1M) {
434 435 chunksize = MIN(len, MMU_PAGESIZE1M -
435 436 (vstart & MMU_PAGEOFFSET1M));
436 437 } else {
437 438 chunksize = MIN(len, MMU_PAGESIZE1M);
438 439 }
439 440
440 441 entry = ARMPT_VADDR_TO_L1E(vstart);
441 442 pte = &pt[entry];
442 443
443 444 if (!ARMPT_L1E_ISVALID(*pte)) {
444 445 uintptr_t l2table;
445 446
446 447 if (!(vstart & MMU_PAGEOFFSET1M) &&
447 448 !(pstart & MMU_PAGEOFFSET1M) &&
448 449 len == MMU_PAGESIZE1M) {
449 450 fakeload_map_1mb(pstart, vstart, prot);
450 451 vstart += MMU_PAGESIZE1M;
451 452 pstart += MMU_PAGESIZE1M;
452 453 len -= MMU_PAGESIZE1M;
453 454 continue;
454 455 }
455 456
456 457 l2table = fakeload_alloc_l2pt();
457 458 *pte = 0;
458 459 l1pt = (arm_l1pt_t *)pte;
459 460 l1pt->al_type = ARMPT_L1_TYPE_L2PT;
460 461 l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
461 462 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
462 463 fakeload_panic("encountered l1 entry that's not a "
463 464 "pointer to a level 2 table\n");
464 465 } else {
465 466 l1pt = (arm_l1pt_t *)pte;
466 467 }
467 468
468 469 /* Now that we have the l1pt fill in l2 entries */
469 470 l2pt = (void *)(l1pt->al_ptaddr << ARMPT_L1PT_TO_L2_SHIFT);
470 471 len -= chunksize;
471 472 while (chunksize > 0) {
472 473 arm_l2e_t *l2pte;
473 474
474 475 entry = ARMPT_VADDR_TO_L2E(vstart);
475 476 pte = &l2pt[entry];
476 477
477 478 #ifdef MAP_DEBUG
478 479 fakeload_puts("4k page pa->va, l2root, entry\n");
479 480 fakeload_ultostr(pstart);
480 481 fakeload_puts("->");
481 482 fakeload_ultostr(vstart);
482 483 fakeload_puts(", ");
483 484 fakeload_ultostr((uintptr_t)l2pt);
484 485 fakeload_puts(", ");
485 486 fakeload_ultostr(entry);
486 487 fakeload_puts("\n");
487 488 #endif
488 489
489 490 if ((*pte & ARMPT_L2_TYPE_MASK) !=
490 491 ARMPT_L2_TYPE_INVALID)
491 492 fakeload_panic("found existing l2 page table, "
492 493 "overlap in requested mappings detected!");
493 494 /* Map vaddr to our paddr! */
494 495 l2pte = ((arm_l2e_t *)pte);
495 496 *pte = 0;
496 497 if (!(prot & PF_X))
497 498 l2pte->ale_xn = 1;
498 499 l2pte->ale_ident = 1;
499 500 if (prot & PF_DEVICE) {
500 501 l2pte->ale_bbit = 1;
501 502 l2pte->ale_cbit = 0;
502 503 l2pte->ale_tex = 0;
503 504 l2pte->ale_sbit = 1;
504 505 } else {
505 506 l2pte->ale_bbit = 1;
506 507 l2pte->ale_cbit = 1;
507 508 l2pte->ale_tex = 1;
508 509 l2pte->ale_sbit = 1;
509 510 }
510 511 if (prot & PF_W) {
511 512 l2pte->ale_ap2 = 1;
512 513 l2pte->ale_ap = 1;
513 514 } else {
514 515 l2pte->ale_ap2 = 0;
515 516 l2pte->ale_ap = 1;
516 517 }
517 518 l2pte->ale_ngbit = 0;
518 519 l2pte->ale_addr = ARMPT_PADDR_TO_L2ADDR(pstart);
519 520
520 521 chunksize -= MMU_PAGESIZE;
521 522 vstart += MMU_PAGESIZE;
522 523 pstart += MMU_PAGESIZE;
523 524 }
524 525 }
525 526 }
526 527
527 528 static void
528 529 fakeload_create_map(armpte_t *pt, atag_illumos_mapping_t *aimp)
529 530 {
530 531 #ifdef MAP_DEBUG
531 532 fakeload_puts("paddr->vaddr\n");
532 533 fakeload_ultostr(aimp->aim_paddr);
533 534 fakeload_puts("->");
534 535 fakeload_ultostr(aimp->aim_vaddr);
535 536 fakeload_puts("\n");
536 537 fakeload_puts("plen-vlen\n");
537 538 fakeload_ultostr(aimp->aim_plen);
538 539 fakeload_puts("-");
539 540 fakeload_ultostr(aimp->aim_vlen);
540 541 fakeload_puts("\n");
541 542 #endif /* MAP_DEBUG */
542 543
543 544 /*
544 545 * Can we map this in place or do we need to basically allocate a new
545 546 * region and bcopy everything into place for proper alignment?
546 547 *
547 548 * Criteria for this: we have a vlen > plen. plen is not page aligned.
548 549 */
549 550 if (aimp->aim_vlen > aimp->aim_plen ||
550 551 (aimp->aim_paddr & MMU_PAGEOFFSET) != 0) {
551 552 uintptr_t start;
552 553
553 554 if (aimp->aim_mapflags & PF_NORELOC)
554 555 fakeload_panic("tried to reloc unrelocatable mapping");
555 556 #ifdef MAP_DEBUG
556 557 FAKELOAD_DPRINTF("reloacting paddr\n");
557 558 #endif
558 559 start = freemem;
559 560 if (start & MMU_PAGEOFFSET) {
560 561 start &= MMU_PAGEMASK;
561 562 start += MMU_PAGESIZE;
562 563 }
563 564 bcopy((void *)aimp->aim_paddr, (void *)start,
564 565 aimp->aim_plen);
565 566 if (aimp->aim_vlen > aimp->aim_plen) {
566 567 bzero((void *)(start + aimp->aim_plen),
567 568 aimp->aim_vlen - aimp->aim_plen);
568 569 }
569 570 aimp->aim_paddr = start;
570 571 freemem = start + aimp->aim_vlen;
571 572 #ifdef MAP_DEBUG
572 573 fakeload_puts("new paddr: ");
573 574 fakeload_ultostr(start);
574 575 fakeload_puts("\n");
575 576 #endif /* MAP_DEBUG */
576 577 }
577 578
578 579 /*
579 580 * Now that everything has been set up, go ahead and map the new region.
580 581 */
581 582 fakeload_map(pt, aimp->aim_paddr, aimp->aim_vaddr, aimp->aim_vlen,
582 583 aimp->aim_mapflags);
583 584 #ifdef MAP_DEBUG
584 585 FAKELOAD_DPRINTF("\n");
585 586 #endif /* MAP_DEBUG */
586 587 }
587 588
588 589 void
589 590 fakeload_init(void *ident, void *ident2, void *atag)
590 591 {
591 592 atag_header_t *hdr;
592 593 atag_header_t *chain = (atag_header_t *)atag;
593 594 const atag_initrd_t *initrd;
594 595 atag_illumos_status_t *aisp;
595 596 atag_illumos_mapping_t *aimp;
596 597 uintptr_t unix_start;
597 598
598 599 fakeload_backend_init();
599 600 fakeload_puts("Hello from the loader\n");
600 601 initrd = (atag_initrd_t *)atag_find(chain, ATAG_INITRD2);
601 602 if (initrd == NULL)
602 603 fakeload_panic("missing the initial ramdisk\n");
603 604
604 605 /*
605 606 * Create the status atag header and the initial mapping record for the
606 607 * atags. We'll hold onto both of these.
607 608 */
608 609 fakeload_mkatags(chain);
609 610 aisp = (atag_illumos_status_t *)atag_find(chain, ATAG_ILLUMOS_STATUS);
610 611 if (aisp == NULL)
611 612 fakeload_panic("can't find ATAG_ILLUMOS_STATUS");
612 613 aimp = (atag_illumos_mapping_t *)atag_find(chain, ATAG_ILLUMOS_MAPPING);
613 614 if (aimp == NULL)
614 615 fakeload_panic("can't find ATAG_ILLUMOS_MAPPING");
615 616 FAKELOAD_DPRINTF("created proto atags\n");
616 617
617 618 fakeload_pt_arena_init(initrd);
618 619
619 620 fakeload_selfmap(chain);
620 621
621 622 /*
622 623 * Map the boot archive and all of unix
623 624 */
624 625 unix_start = fakeload_archive_mappings(chain,
625 626 (const void *)(uintptr_t)initrd->ai_start, aisp);
626 627 FAKELOAD_DPRINTF("filled out unix and the archive's mappings\n");
627 628
628 629 /*
629 630 * Fill in the atag mapping header for the atags themselves. 1:1 map it.
630 631 */
631 632 aimp->aim_paddr = (uintptr_t)chain & ~0xfff;
632 633 aimp->aim_plen = atag_length(chain) & ~0xfff;
633 634 aimp->aim_plen += 0x1000;
634 635 aimp->aim_vaddr = aimp->aim_paddr;
635 636 aimp->aim_vlen = aimp->aim_plen;
636 637 aimp->aim_mapflags = PF_R | PF_W | PF_NORELOC;
637 638
638 639 /*
639 640 * Let the backend add mappings
640 641 */
641 642 fakeload_backend_addmaps(chain);
642 643
643 644 /*
644 645 * Turn on unaligned access
645 646 */
646 647 FAKELOAD_DPRINTF("turning on unaligned access\n");
647 648 fakeload_unaligned_enable();
648 649 FAKELOAD_DPRINTF("successfully enabled unaligned access\n");
649 650
650 651 /*
651 652 * To turn on the MMU we need to do the following:
652 653 * o Program all relevant CP15 registers
653 654 * o Program 1st and 2nd level page tables
654 655 * o Invalidate and Disable the I/D-cache
655 656 * o Fill in the last bits of the ATAG_ILLUMOS_STATUS atag
656 657 * o Turn on the MMU in SCTLR
657 658 * o Jump to unix
658 659 */
659 660
660 661 /* Last bits of the atag */
661 662 aisp->ais_freemem = freemem;
662 663 aisp->ais_version = 1;
663 664 aisp->ais_ptbase = (uintptr_t)pt_addr;
664 665
665 666 /*
666 667 * Our initial page table is a series of 1 MB sections. While we really
667 668 * should map 4k pages, for the moment we're just going to map 1 MB
668 669 * regions, yay team!
669 670 */
670 671 hdr = chain;
671 672 FAKELOAD_DPRINTF("creating mappings\n");
672 673 while (hdr != NULL) {
673 674 if (hdr->ah_tag == ATAG_ILLUMOS_MAPPING)
674 675 fakeload_create_map(pt_addr,
675 676 (atag_illumos_mapping_t *)hdr);
676 677 hdr = atag_next(hdr);
677 678 }
678 679
679 680 /*
680 681 * Now that we've mapped everything, update the status atag.
681 682 */
682 683 aisp->ais_freeused = freemem - aisp->ais_freemem;
683 684 aisp->ais_pt_arena = pt_arena;
684 685 aisp->ais_pt_arena_max = pt_arena_max;
685 686
686 687 /* Cache disable */
687 688 FAKELOAD_DPRINTF("Flushing and disabling caches\n");
688 689 armv6_dcache_flush();
689 690 armv6_dcache_disable();
690 691 armv6_dcache_inval();
691 692 armv6_icache_disable();
692 693 armv6_icache_inval();
693 694
694 695 /* Program the page tables */
695 696 FAKELOAD_DPRINTF("programming cp15 regs\n");
696 697 fakeload_pt_setup((uintptr_t)pt_addr);
697 698
698 699
699 700 /* MMU Enable */
↓ open down ↓ |
676 lines elided |
↑ open up ↑ |
700 701 FAKELOAD_DPRINTF("see you on the other side\n");
701 702 fakeload_mmu_enable();
702 703
703 704 FAKELOAD_DPRINTF("why helo thar\n");
704 705
705 706 /* Renable caches */
706 707 armv6_dcache_enable();
707 708 armv6_icache_enable();
708 709
709 710 /* we should never come back */
710 - fakeload_exec(unix_start);
711 + fakeload_exec(ident, ident2, chain, unix_start);
711 712 fakeload_panic("hit the end of the world\n");
712 713 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX