Print this page
unix: enable caches in locore
The loader should really be as simple as possible to be as small as
possible. It should configure the machine so that unix can make certain
assumptions but it should leave more complex initialization to unix.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/loader/fakeloader.c
+++ new/usr/src/uts/armv6/loader/fakeloader.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
14 14 * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
15 15 */
16 16
17 17 #include "fakeloader.h"
18 18
19 19 #include <sys/types.h>
20 20 #include <sys/param.h>
21 21 #include <sys/elf.h>
22 22 #include <sys/atag.h>
23 23 #include <sys/sysmacros.h>
24 24 #include <sys/machparam.h>
25 25
26 26 #include <vm/pte.h>
27 27
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
28 28 /*
29 29 * This is the stock ARM fake uniboot loader.
30 30 *
31 31 * Here's what we have to do:
32 32 * o Read the atag header and find the combined archive header
33 33 * o Determine the set of mappings we need to add for the following:
34 34 * - unix
35 35 * - boot_archive
36 36 * - atags
37 37 * o Enable unaligned access
38 - * o Enable the caches + virtual memory
38 + * o Enable virtual memory
39 39 *
40 40 * There are several important constraints that we have here:
41 41 *
42 42 * o We cannot use any .data! Several loaders that come before us are broken
43 43 * and only provide us with the ability to map our .text and potentially our
44 44 * .bss. We should strive to avoid even that if we can.
45 45 */
46 46
47 47 #ifdef DEBUG
48 48 #define FAKELOAD_DPRINTF(x) fakeload_puts(x)
49 49 #else
50 50 #define FAKELOAD_DPRINTF(x)
51 51 #endif /* DEBUG */
52 52
53 53 /*
54 54 * XXX ASSUMES WE HAVE Free memory following the boot archive
55 55 */
56 56 static uintptr_t freemem;
57 57 static uintptr_t pt_arena;
58 58 static uintptr_t pt_arena_max;
59 59 static uint32_t *pt_addr;
60 60 static int nl2pages;
61 61
62 62 /* Simple copy routines */
63 63 void
64 64 bcopy(const void *s, void *d, size_t n)
65 65 {
66 66 const char *src = s;
67 67 char *dest = d;
68 68
69 69 if (n == 0 || s == d)
70 70 return;
71 71
72 72 if (dest < src && dest + n < src) {
73 73 /* dest overlaps with the start of src, copy forward */
74 74 for (; n > 0; n--, src++, dest++)
75 75 *dest = *src;
76 76 } else {
77 77 /* src overlaps with start of dest or no overlap, copy rev */
78 78 src += n - 1;
79 79 dest += n - 1;
80 80 for (; n > 0; n--, src--, dest--)
81 81 *dest = *src;
82 82 }
83 83 }
84 84
85 85 void
86 86 bzero(void *s, size_t n)
87 87 {
88 88 char *c = s;
89 89 while (n > 0) {
90 90 *c = 0;
91 91 c++;
92 92 n--;
93 93 }
94 94 }
95 95
96 96 static void
97 97 fakeload_puts(const char *str)
98 98 {
99 99 while (*str != '\0') {
100 100 fakeload_backend_putc(*str);
101 101 str++;
102 102 }
103 103 }
104 104
105 105 static void
106 106 fakeload_panic(const char *reason)
107 107 {
108 108 fakeload_puts("panic!\n");
109 109 fakeload_puts(reason);
110 110 fakeload_puts("\n");
111 111 fakeload_puts("spinning forever... goodbye...\n");
112 112 for (;;)
113 113 ;
114 114 }
115 115
116 116 static void
117 117 fakeload_ultostr(unsigned long value)
118 118 {
119 119 char buf[16];
120 120 ulong_t t, val = (ulong_t)value;
121 121 char c;
122 122 char *ptr = &(buf[14]);
123 123 buf[15] = '\0';
124 124
125 125 do {
126 126 c = (char)('0' + val - 16 * (t = (val >> 4)));
127 127 if (c > '9')
128 128 c += 'A' - '9' - 1;
129 129 *--ptr = c;
130 130 } while ((val = t) != 0);
131 131
132 132 *--ptr = 'x';
133 133 *--ptr = '0';
134 134 fakeload_puts(ptr);
135 135 }
136 136
137 137 static void
138 138 fakeload_selfmap(atag_header_t *chain)
139 139 {
140 140 atag_illumos_mapping_t aim;
141 141
142 142 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
143 143 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
144 144 aim.aim_paddr = 0x7000;
145 145 aim.aim_vaddr = aim.aim_paddr;
146 146 aim.aim_plen = 0x3000;
147 147 aim.aim_vlen = aim.aim_plen;
148 148 aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
149 149 atag_append(chain, &aim.aim_header);
150 150 }
151 151
152 152 static void
153 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
154 154 {
155 155 int entry;
156 156 armpte_t *pte;
157 157 arm_l1s_t *l1e;
158 158
159 159 entry = ARMPT_VADDR_TO_L1E(va);
160 160 pte = &pt_addr[entry];
161 161 if (ARMPT_L1E_ISVALID(*pte))
162 162 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
163 163 l1e = (arm_l1s_t *)pte;
164 164 *pte = 0;
165 165 l1e->al_type = ARMPT_L1_TYPE_SECT;
166 166 /* Assume it's not device memory */
167 167 l1e->al_bbit = 1;
168 168 l1e->al_cbit = 1;
169 169 l1e->al_tex = 1;
170 170 l1e->al_sbit = 1;
171 171
172 172 if (!(prot & PF_X))
173 173 l1e->al_xn = 1;
174 174 l1e->al_domain = 0;
175 175
176 176 if (prot & PF_W) {
177 177 l1e->al_ap2 = 1;
178 178 l1e->al_ap = 1;
179 179 } else {
180 180 l1e->al_ap2 = 0;
181 181 l1e->al_ap = 1;
182 182 }
183 183 l1e->al_ngbit = 0;
184 184 l1e->al_issuper = 0;
185 185 l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
186 186 }
187 187
188 188 /*
189 189 * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
190 190 * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way
191 191 * we can map it and all the other L2 page tables we might need. If we don't do
192 192 * this, it'll become problematic for unix to actually modify this.
193 193 */
194 194 static void
195 195 fakeload_pt_arena_init(const atag_initrd_t *aii)
196 196 {
197 197 int entry, i;
198 198 armpte_t *pte;
199 199 arm_l1s_t *l1e;
200 200
201 201 pt_arena = aii->ai_start + aii->ai_size;
202 202 if (pt_arena & MMU_PAGEOFFSET1M) {
203 203 pt_arena &= MMU_PAGEMASK1M;
204 204 pt_arena += MMU_PAGESIZE1M;
205 205 }
206 206 pt_arena_max = pt_arena + 4 * MMU_PAGESIZE1M;
207 207 freemem = pt_arena_max;
208 208
209 209 /* Set up the l1 page table by first invalidating it */
210 210 pt_addr = (armpte_t *)pt_arena;
211 211 pt_arena += ARMPT_L1_SIZE;
212 212 bzero(pt_addr, ARMPT_L1_SIZE);
213 213 for (i = 0; i < 4; i++)
214 214 fakeload_map_1mb((uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
215 215 (uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
216 216 PF_R | PF_W);
217 217 }
218 218
219 219 /*
220 220 * This is our generally entry point. We're passed in the entry point of the
221 221 * header.
222 222 */
223 223 static uintptr_t
224 224 fakeload_archive_mappings(atag_header_t *chain, const void *addr,
225 225 atag_illumos_status_t *aisp)
226 226 {
227 227 atag_illumos_mapping_t aim;
228 228 fakeloader_hdr_t *hdr;
229 229 Elf32_Ehdr *ehdr;
230 230 Elf32_Phdr *phdr;
231 231 int nhdrs, i;
232 232 uintptr_t ret;
233 233 uintptr_t text = 0, data = 0;
234 234 size_t textln = 0, dataln = 0;
235 235
236 236 hdr = (fakeloader_hdr_t *)addr;
237 237
238 238 if (hdr->fh_magic[0] != FH_MAGIC0)
239 239 fakeload_panic("fh_magic[0] is wrong!\n");
240 240 if (hdr->fh_magic[1] != FH_MAGIC1)
241 241 fakeload_panic("fh_magic[1] is wrong!\n");
242 242 if (hdr->fh_magic[2] != FH_MAGIC2)
243 243 fakeload_panic("fh_magic[2] is wrong!\n");
244 244 if (hdr->fh_magic[3] != FH_MAGIC3)
245 245 fakeload_panic("fh_magic[3] is wrong!\n");
246 246
247 247 if (hdr->fh_unix_size == 0)
248 248 fakeload_panic("hdr unix size is zero\n");
249 249 if (hdr->fh_unix_offset == 0)
250 250 fakeload_panic("hdr unix offset is zero\n");
251 251 if (hdr->fh_archive_size == 0)
252 252 fakeload_panic("hdr archive size is zero\n");
253 253 if (hdr->fh_archive_offset == 0)
254 254 fakeload_panic("hdr archive_offset is zero\n");
255 255
256 256 ehdr = (Elf32_Ehdr *)((uintptr_t)addr + hdr->fh_unix_offset);
257 257
258 258 if (ehdr->e_ident[EI_MAG0] != ELFMAG0)
259 259 fakeload_panic("magic[0] wrong");
260 260 if (ehdr->e_ident[EI_MAG1] != ELFMAG1)
261 261 fakeload_panic("magic[1] wrong");
262 262 if (ehdr->e_ident[EI_MAG2] != ELFMAG2)
263 263 fakeload_panic("magic[2] wrong");
264 264 if (ehdr->e_ident[EI_MAG3] != ELFMAG3)
265 265 fakeload_panic("magic[3] wrong");
266 266 if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
267 267 fakeload_panic("wrong elfclass");
268 268 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
269 269 fakeload_panic("wrong encoding");
270 270 if (ehdr->e_ident[EI_OSABI] != ELFOSABI_SOLARIS)
271 271 fakeload_panic("wrong os abi");
272 272 if (ehdr->e_ident[EI_ABIVERSION] != EAV_SUNW_CURRENT)
273 273 fakeload_panic("wrong abi version");
274 274 if (ehdr->e_type != ET_EXEC)
275 275 fakeload_panic("unix is not an executable");
276 276 if (ehdr->e_machine != EM_ARM)
277 277 fakeload_panic("unix is not an ARM Executible");
278 278 if (ehdr->e_version != EV_CURRENT)
279 279 fakeload_panic("wrong version");
280 280 if (ehdr->e_phnum == 0)
281 281 fakeload_panic("no program headers");
282 282 ret = ehdr->e_entry;
283 283
284 284 FAKELOAD_DPRINTF("validated unix's headers\n");
285 285
286 286 nhdrs = ehdr->e_phnum;
287 287 phdr = (Elf32_Phdr *)((uintptr_t)addr + hdr->fh_unix_offset +
288 288 ehdr->e_phoff);
289 289 for (i = 0; i < nhdrs; i++, phdr++) {
290 290 if (phdr->p_type != PT_LOAD) {
291 291 fakeload_puts("skipping non-PT_LOAD header\n");
292 292 continue;
293 293 }
294 294
295 295 if (phdr->p_filesz == 0 || phdr->p_memsz == 0) {
296 296 fakeload_puts("skipping PT_LOAD with 0 file/mem\n");
297 297 continue;
298 298 }
299 299
300 300 /*
301 301 * Create a mapping record for this in the atags.
302 302 */
303 303 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
304 304 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
305 305 aim.aim_paddr = (uintptr_t)addr + hdr->fh_unix_offset +
306 306 phdr->p_offset;
307 307 aim.aim_plen = phdr->p_filesz;
308 308 aim.aim_vaddr = phdr->p_vaddr;
309 309 aim.aim_vlen = phdr->p_memsz;
310 310 /* Round up vlen to be a multiple of 4k */
311 311 if (aim.aim_vlen & 0xfff) {
312 312 aim.aim_vlen &= ~0xfff;
313 313 aim.aim_vlen += 0x1000;
314 314 }
315 315 aim.aim_mapflags = phdr->p_flags;
316 316 atag_append(chain, &aim.aim_header);
317 317
318 318 /*
319 319 * When built with highvecs we need to account for the fact that
320 320 * _edata, _etext and _end are built assuming that the highvecs
321 321 * are normally part of our segments. ld is not doing anything
322 322 * wrong, but this breaks the assumptions that krtld currently
323 323 * has. As such, unix will use this information to overwrite the
324 324 * normal entry points that krtld uses in a similar style to
325 325 * SPARC.
326 326 */
327 327 if (aim.aim_vaddr != 0xffff0000) {
328 328 if ((phdr->p_flags & PF_W) != 0) {
329 329 data = aim.aim_vaddr;
330 330 dataln = aim.aim_vlen;
331 331 } else {
332 332 text = aim.aim_vaddr;
333 333 textln = aim.aim_vlen;
334 334 }
335 335 }
336 336 }
337 337
338 338 aisp->ais_stext = text;
339 339 aisp->ais_etext = text + textln;
340 340 aisp->ais_sdata = data;
341 341 aisp->ais_edata = data + dataln;
342 342
343 343 /* 1:1 map the boot archive */
344 344 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
345 345 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
346 346 aim.aim_paddr = (uintptr_t)addr + hdr->fh_archive_offset;
347 347 aim.aim_plen = hdr->fh_archive_size;
348 348 aim.aim_vaddr = aim.aim_paddr;
349 349 aim.aim_vlen = aim.aim_plen;
350 350 aim.aim_mapflags = PF_R | PF_W | PF_X;
351 351 atag_append(chain, &aim.aim_header);
352 352 aisp->ais_archive = aim.aim_paddr;
353 353 aisp->ais_archivelen = aim.aim_plen;
354 354
355 355 return (ret);
356 356 }
357 357
358 358 static void
359 359 fakeload_mkatags(atag_header_t *chain)
360 360 {
361 361 atag_illumos_status_t ais;
362 362 atag_illumos_mapping_t aim;
363 363
364 364 bzero(&ais, sizeof (ais));
365 365 bzero(&aim, sizeof (aim));
366 366
367 367 ais.ais_header.ah_size = ATAG_ILLUMOS_STATUS_SIZE;
368 368 ais.ais_header.ah_tag = ATAG_ILLUMOS_STATUS;
369 369 atag_append(chain, &ais.ais_header);
370 370 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
371 371 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
372 372 atag_append(chain, &aim.aim_header);
373 373 }
374 374
375 375 static uintptr_t
376 376 fakeload_alloc_l2pt(void)
377 377 {
378 378 uintptr_t ret;
379 379
380 380 if (pt_arena & ARMPT_L2_MASK) {
381 381 ret = pt_arena;
382 382 ret &= ~ARMPT_L2_MASK;
383 383 ret += ARMPT_L2_SIZE;
384 384 pt_arena = ret + ARMPT_L2_SIZE;
385 385 } else {
386 386 ret = pt_arena;
387 387 pt_arena = ret + ARMPT_L2_SIZE;
388 388 }
389 389 if (pt_arena >= pt_arena_max) {
390 390 fakeload_puts("pt_arena, max\n");
391 391 fakeload_ultostr(pt_arena);
392 392 fakeload_puts("\n");
393 393 fakeload_ultostr(pt_arena_max);
394 394 fakeload_puts("\n");
395 395 fakeload_puts("l2pts alloced\n");
396 396 fakeload_ultostr(nl2pages);
397 397 fakeload_puts("\n");
398 398 fakeload_panic("ran out of page tables!");
399 399 }
400 400
401 401 bzero((void *)ret, ARMPT_L2_SIZE);
402 402 nl2pages++;
403 403 return (ret);
404 404 }
405 405
406 406 /*
407 407 * Finally, do all the dirty work. Let's create some page tables. The L1 page
408 408 * table is full of 1 MB mappings by default. The L2 Page table is 1k in size
409 409 * and covers that 1 MB. We're going to always create L2 page tables for now
410 410 * which will use 4k and 64k pages.
411 411 */
412 412 static void
413 413 fakeload_map(armpte_t *pt, uintptr_t pstart, uintptr_t vstart, size_t len,
414 414 uint32_t prot)
415 415 {
416 416 int entry, chunksize;
417 417 armpte_t *pte, *l2pt;
418 418 arm_l1pt_t *l1pt;
419 419
420 420 /*
421 421 * Make sure both pstart + vstart are 4k aligned, along with len.
422 422 */
423 423 if (pstart & MMU_PAGEOFFSET)
424 424 fakeload_panic("pstart is not 4k aligned");
425 425 if (vstart & MMU_PAGEOFFSET)
426 426 fakeload_panic("vstart is not 4k aligned");
427 427 if (len & MMU_PAGEOFFSET)
428 428 fakeload_panic("len is not 4k aligned");
429 429
430 430 /*
431 431 * We're going to logically deal with each 1 MB chunk at a time.
432 432 */
433 433 while (len > 0) {
434 434 if (vstart & MMU_PAGEOFFSET1M) {
435 435 chunksize = MIN(len, MMU_PAGESIZE1M -
436 436 (vstart & MMU_PAGEOFFSET1M));
437 437 } else {
438 438 chunksize = MIN(len, MMU_PAGESIZE1M);
439 439 }
440 440
441 441 entry = ARMPT_VADDR_TO_L1E(vstart);
442 442 pte = &pt[entry];
443 443
444 444 if (!ARMPT_L1E_ISVALID(*pte)) {
445 445 uintptr_t l2table;
446 446
447 447 if (!(vstart & MMU_PAGEOFFSET1M) &&
448 448 !(pstart & MMU_PAGEOFFSET1M) &&
449 449 len == MMU_PAGESIZE1M) {
450 450 fakeload_map_1mb(pstart, vstart, prot);
451 451 vstart += MMU_PAGESIZE1M;
452 452 pstart += MMU_PAGESIZE1M;
453 453 len -= MMU_PAGESIZE1M;
454 454 continue;
455 455 }
456 456
457 457 l2table = fakeload_alloc_l2pt();
458 458 *pte = 0;
459 459 l1pt = (arm_l1pt_t *)pte;
460 460 l1pt->al_type = ARMPT_L1_TYPE_L2PT;
461 461 l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
462 462 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
463 463 fakeload_panic("encountered l1 entry that's not a "
464 464 "pointer to a level 2 table\n");
465 465 } else {
466 466 l1pt = (arm_l1pt_t *)pte;
467 467 }
468 468
469 469 /* Now that we have the l1pt fill in l2 entries */
470 470 l2pt = (void *)(l1pt->al_ptaddr << ARMPT_L1PT_TO_L2_SHIFT);
471 471 len -= chunksize;
472 472 while (chunksize > 0) {
473 473 arm_l2e_t *l2pte;
474 474
475 475 entry = ARMPT_VADDR_TO_L2E(vstart);
476 476 pte = &l2pt[entry];
477 477
478 478 #ifdef MAP_DEBUG
479 479 fakeload_puts("4k page pa->va, l2root, entry\n");
480 480 fakeload_ultostr(pstart);
481 481 fakeload_puts("->");
482 482 fakeload_ultostr(vstart);
483 483 fakeload_puts(", ");
484 484 fakeload_ultostr((uintptr_t)l2pt);
485 485 fakeload_puts(", ");
486 486 fakeload_ultostr(entry);
487 487 fakeload_puts("\n");
488 488 #endif
489 489
490 490 if ((*pte & ARMPT_L2_TYPE_MASK) !=
491 491 ARMPT_L2_TYPE_INVALID)
492 492 fakeload_panic("found existing l2 page table, "
493 493 "overlap in requested mappings detected!");
494 494 /* Map vaddr to our paddr! */
495 495 l2pte = ((arm_l2e_t *)pte);
496 496 *pte = 0;
497 497 if (!(prot & PF_X))
498 498 l2pte->ale_xn = 1;
499 499 l2pte->ale_ident = 1;
500 500 if (prot & PF_DEVICE) {
501 501 l2pte->ale_bbit = 1;
502 502 l2pte->ale_cbit = 0;
503 503 l2pte->ale_tex = 0;
504 504 l2pte->ale_sbit = 1;
505 505 } else {
506 506 l2pte->ale_bbit = 1;
507 507 l2pte->ale_cbit = 1;
508 508 l2pte->ale_tex = 1;
509 509 l2pte->ale_sbit = 1;
510 510 }
511 511 if (prot & PF_W) {
512 512 l2pte->ale_ap2 = 1;
513 513 l2pte->ale_ap = 1;
514 514 } else {
515 515 l2pte->ale_ap2 = 0;
516 516 l2pte->ale_ap = 1;
517 517 }
518 518 l2pte->ale_ngbit = 0;
519 519 l2pte->ale_addr = ARMPT_PADDR_TO_L2ADDR(pstart);
520 520
521 521 chunksize -= MMU_PAGESIZE;
522 522 vstart += MMU_PAGESIZE;
523 523 pstart += MMU_PAGESIZE;
524 524 }
525 525 }
526 526 }
527 527
528 528 static void
529 529 fakeload_create_map(armpte_t *pt, atag_illumos_mapping_t *aimp)
530 530 {
531 531 #ifdef MAP_DEBUG
532 532 fakeload_puts("paddr->vaddr\n");
533 533 fakeload_ultostr(aimp->aim_paddr);
534 534 fakeload_puts("->");
535 535 fakeload_ultostr(aimp->aim_vaddr);
536 536 fakeload_puts("\n");
537 537 fakeload_puts("plen-vlen\n");
538 538 fakeload_ultostr(aimp->aim_plen);
539 539 fakeload_puts("-");
540 540 fakeload_ultostr(aimp->aim_vlen);
541 541 fakeload_puts("\n");
542 542 #endif /* MAP_DEBUG */
543 543
544 544 /*
545 545 * Can we map this in place or do we need to basically allocate a new
546 546 * region and bcopy everything into place for proper alignment?
547 547 *
548 548 * Criteria for this: we have a vlen > plen. plen is not page aligned.
549 549 */
550 550 if (aimp->aim_vlen > aimp->aim_plen ||
551 551 (aimp->aim_paddr & MMU_PAGEOFFSET) != 0) {
552 552 uintptr_t start;
553 553
554 554 if (aimp->aim_mapflags & PF_NORELOC)
555 555 fakeload_panic("tried to reloc unrelocatable mapping");
556 556 #ifdef MAP_DEBUG
557 557 FAKELOAD_DPRINTF("reloacting paddr\n");
558 558 #endif
559 559 start = freemem;
560 560 if (start & MMU_PAGEOFFSET) {
561 561 start &= MMU_PAGEMASK;
562 562 start += MMU_PAGESIZE;
563 563 }
564 564 bcopy((void *)aimp->aim_paddr, (void *)start,
565 565 aimp->aim_plen);
566 566 if (aimp->aim_vlen > aimp->aim_plen) {
567 567 bzero((void *)(start + aimp->aim_plen),
568 568 aimp->aim_vlen - aimp->aim_plen);
569 569 }
570 570 aimp->aim_paddr = start;
571 571 freemem = start + aimp->aim_vlen;
572 572 #ifdef MAP_DEBUG
573 573 fakeload_puts("new paddr: ");
574 574 fakeload_ultostr(start);
575 575 fakeload_puts("\n");
576 576 #endif /* MAP_DEBUG */
577 577 }
578 578
579 579 /*
580 580 * Now that everything has been set up, go ahead and map the new region.
581 581 */
582 582 fakeload_map(pt, aimp->aim_paddr, aimp->aim_vaddr, aimp->aim_vlen,
583 583 aimp->aim_mapflags);
584 584 #ifdef MAP_DEBUG
585 585 FAKELOAD_DPRINTF("\n");
586 586 #endif /* MAP_DEBUG */
587 587 }
588 588
589 589 void
590 590 fakeload_init(void *ident, void *ident2, void *atag)
591 591 {
592 592 atag_header_t *hdr;
593 593 atag_header_t *chain = (atag_header_t *)atag;
594 594 const atag_initrd_t *initrd;
595 595 atag_illumos_status_t *aisp;
596 596 atag_illumos_mapping_t *aimp;
597 597 uintptr_t unix_start;
598 598
599 599 fakeload_backend_init();
600 600 fakeload_puts("Hello from the loader\n");
601 601 initrd = (atag_initrd_t *)atag_find(chain, ATAG_INITRD2);
602 602 if (initrd == NULL)
603 603 fakeload_panic("missing the initial ramdisk\n");
604 604
605 605 /*
606 606 * Create the status atag header and the initial mapping record for the
607 607 * atags. We'll hold onto both of these.
608 608 */
609 609 fakeload_mkatags(chain);
610 610 aisp = (atag_illumos_status_t *)atag_find(chain, ATAG_ILLUMOS_STATUS);
611 611 if (aisp == NULL)
612 612 fakeload_panic("can't find ATAG_ILLUMOS_STATUS");
613 613 aimp = (atag_illumos_mapping_t *)atag_find(chain, ATAG_ILLUMOS_MAPPING);
614 614 if (aimp == NULL)
615 615 fakeload_panic("can't find ATAG_ILLUMOS_MAPPING");
616 616 FAKELOAD_DPRINTF("created proto atags\n");
617 617
618 618 fakeload_pt_arena_init(initrd);
619 619
620 620 fakeload_selfmap(chain);
621 621
622 622 /*
623 623 * Map the boot archive and all of unix
624 624 */
625 625 unix_start = fakeload_archive_mappings(chain,
626 626 (const void *)(uintptr_t)initrd->ai_start, aisp);
627 627 FAKELOAD_DPRINTF("filled out unix and the archive's mappings\n");
628 628
629 629 /*
630 630 * Fill in the atag mapping header for the atags themselves. 1:1 map it.
631 631 */
632 632 aimp->aim_paddr = (uintptr_t)chain & ~0xfff;
633 633 aimp->aim_plen = atag_length(chain) & ~0xfff;
634 634 aimp->aim_plen += 0x1000;
635 635 aimp->aim_vaddr = aimp->aim_paddr;
636 636 aimp->aim_vlen = aimp->aim_plen;
637 637 aimp->aim_mapflags = PF_R | PF_W | PF_NORELOC;
638 638
639 639 /*
640 640 * Let the backend add mappings
641 641 */
642 642 fakeload_backend_addmaps(chain);
643 643
644 644 /*
645 645 * Turn on unaligned access
646 646 */
647 647 FAKELOAD_DPRINTF("turning on unaligned access\n");
648 648 fakeload_unaligned_enable();
649 649 FAKELOAD_DPRINTF("successfully enabled unaligned access\n");
650 650
651 651 /*
652 652 * To turn on the MMU we need to do the following:
653 653 * o Program all relevant CP15 registers
654 654 * o Program 1st and 2nd level page tables
655 655 * o Invalidate and Disable the I/D-cache
656 656 * o Fill in the last bits of the ATAG_ILLUMOS_STATUS atag
657 657 * o Turn on the MMU in SCTLR
658 658 * o Jump to unix
659 659 */
660 660
661 661 /* Last bits of the atag */
662 662 aisp->ais_freemem = freemem;
663 663 aisp->ais_version = 1;
664 664 aisp->ais_ptbase = (uintptr_t)pt_addr;
665 665
666 666 /*
667 667 * Our initial page table is a series of 1 MB sections. While we really
668 668 * should map 4k pages, for the moment we're just going to map 1 MB
669 669 * regions, yay team!
670 670 */
671 671 hdr = chain;
672 672 FAKELOAD_DPRINTF("creating mappings\n");
673 673 while (hdr != NULL) {
674 674 if (hdr->ah_tag == ATAG_ILLUMOS_MAPPING)
675 675 fakeload_create_map(pt_addr,
676 676 (atag_illumos_mapping_t *)hdr);
677 677 hdr = atag_next(hdr);
678 678 }
679 679
680 680 /*
681 681 * Now that we've mapped everything, update the status atag.
682 682 */
683 683 aisp->ais_freeused = freemem - aisp->ais_freemem;
684 684 aisp->ais_pt_arena = pt_arena;
685 685 aisp->ais_pt_arena_max = pt_arena_max;
686 686
687 687 /* Cache disable */
688 688 FAKELOAD_DPRINTF("Flushing and disabling caches\n");
689 689 armv6_dcache_flush();
690 690 armv6_dcache_disable();
691 691 armv6_dcache_inval();
692 692 armv6_icache_disable();
693 693 armv6_icache_inval();
694 694
↓ open down ↓ |
646 lines elided |
↑ open up ↑ |
695 695 /* Program the page tables */
696 696 FAKELOAD_DPRINTF("programming cp15 regs\n");
697 697 fakeload_pt_setup((uintptr_t)pt_addr);
698 698
699 699
700 700 /* MMU Enable */
701 701 FAKELOAD_DPRINTF("see you on the other side\n");
702 702 fakeload_mmu_enable();
703 703
704 704 FAKELOAD_DPRINTF("why helo thar\n");
705 -
706 - /* Renable caches */
707 - armv6_dcache_enable();
708 - armv6_icache_enable();
709 705
710 706 /* we should never come back */
711 707 fakeload_exec(ident, ident2, chain, unix_start);
712 708 fakeload_panic("hit the end of the world\n");
713 709 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX