Print this page
loader: allow 1MB device maps
There's no reason we shouldn't allow 1MB PTEs for use on device memory.
loader: map as much as possible using 1MB pages
Chances are that we never actually executed this bit of code since all the
maps we ever deal with are either very short or much larger than 1MB.
unix: enable caches in locore
The loader should really be as simple as possible to be as small as
possible. It should configure the machine so that unix can make certain
assumptions but it should leave more complex initialization to unix.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/loader/fakeloader.c
+++ new/usr/src/uts/armv6/loader/fakeloader.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
14 14 * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
15 15 */
16 16
17 17 #include "fakeloader.h"
18 18
19 19 #include <sys/types.h>
20 20 #include <sys/param.h>
21 21 #include <sys/elf.h>
22 22 #include <sys/atag.h>
23 23 #include <sys/sysmacros.h>
24 24 #include <sys/machparam.h>
25 25
26 26 #include <vm/pte.h>
27 27
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
28 28 /*
29 29 * This is the stock ARM fake uniboot loader.
30 30 *
31 31 * Here's what we have to do:
32 32 * o Read the atag header and find the combined archive header
33 33 * o Determine the set of mappings we need to add for the following:
34 34 * - unix
35 35 * - boot_archive
36 36 * - atags
37 37 * o Enable unaligned access
38 - * o Enable the caches + virtual memory
38 + * o Enable virtual memory
39 39 *
40 40 * There are several important constraints that we have here:
41 41 *
42 42 * o We cannot use any .data! Several loaders that come before us are broken
43 43 * and only provide us with the ability to map our .text and potentially our
44 44 * .bss. We should strive to avoid even that if we can.
45 45 */
46 46
47 47 #ifdef DEBUG
48 48 #define FAKELOAD_DPRINTF(x) fakeload_puts(x)
49 49 #else
50 50 #define FAKELOAD_DPRINTF(x)
51 51 #endif /* DEBUG */
52 52
53 53 /*
54 54 * XXX ASSUMES WE HAVE Free memory following the boot archive
55 55 */
56 56 static uintptr_t freemem;
57 57 static uintptr_t pt_arena;
58 58 static uintptr_t pt_arena_max;
59 59 static uint32_t *pt_addr;
60 60 static int nl2pages;
61 61
62 62 /* Simple copy routines */
63 63 void
64 64 bcopy(const void *s, void *d, size_t n)
65 65 {
66 66 const char *src = s;
67 67 char *dest = d;
68 68
69 69 if (n == 0 || s == d)
70 70 return;
71 71
72 72 if (dest < src && dest + n < src) {
73 73 /* dest overlaps with the start of src, copy forward */
74 74 for (; n > 0; n--, src++, dest++)
75 75 *dest = *src;
76 76 } else {
77 77 /* src overlaps with start of dest or no overlap, copy rev */
78 78 src += n - 1;
79 79 dest += n - 1;
80 80 for (; n > 0; n--, src--, dest--)
81 81 *dest = *src;
82 82 }
83 83 }
84 84
85 85 void
86 86 bzero(void *s, size_t n)
87 87 {
88 88 char *c = s;
89 89 while (n > 0) {
90 90 *c = 0;
91 91 c++;
92 92 n--;
93 93 }
94 94 }
95 95
96 96 static void
97 97 fakeload_puts(const char *str)
98 98 {
99 99 while (*str != '\0') {
100 100 fakeload_backend_putc(*str);
101 101 str++;
102 102 }
103 103 }
104 104
105 105 static void
106 106 fakeload_panic(const char *reason)
107 107 {
108 108 fakeload_puts("panic!\n");
109 109 fakeload_puts(reason);
110 110 fakeload_puts("\n");
111 111 fakeload_puts("spinning forever... goodbye...\n");
112 112 for (;;)
113 113 ;
114 114 }
115 115
116 116 static void
117 117 fakeload_ultostr(unsigned long value)
118 118 {
119 119 char buf[16];
120 120 ulong_t t, val = (ulong_t)value;
121 121 char c;
122 122 char *ptr = &(buf[14]);
123 123 buf[15] = '\0';
124 124
125 125 do {
126 126 c = (char)('0' + val - 16 * (t = (val >> 4)));
127 127 if (c > '9')
128 128 c += 'A' - '9' - 1;
129 129 *--ptr = c;
130 130 } while ((val = t) != 0);
131 131
132 132 *--ptr = 'x';
133 133 *--ptr = '0';
134 134 fakeload_puts(ptr);
135 135 }
136 136
137 137 static void
138 138 fakeload_selfmap(atag_header_t *chain)
139 139 {
140 140 atag_illumos_mapping_t aim;
141 141
142 142 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
143 143 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
144 144 aim.aim_paddr = 0x7000;
145 145 aim.aim_vaddr = aim.aim_paddr;
146 146 aim.aim_plen = 0x3000;
147 147 aim.aim_vlen = aim.aim_plen;
148 148 aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
149 149 atag_append(chain, &aim.aim_header);
150 150 }
151 151
152 152 static void
153 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
154 154 {
155 155 int entry;
↓ open down ↓ |
107 lines elided |
↑ open up ↑ |
156 156 armpte_t *pte;
157 157 arm_l1s_t *l1e;
158 158
159 159 entry = ARMPT_VADDR_TO_L1E(va);
160 160 pte = &pt_addr[entry];
161 161 if (ARMPT_L1E_ISVALID(*pte))
162 162 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
163 163 l1e = (arm_l1s_t *)pte;
164 164 *pte = 0;
165 165 l1e->al_type = ARMPT_L1_TYPE_SECT;
166 - /* Assume it's not device memory */
167 - l1e->al_bbit = 1;
168 - l1e->al_cbit = 1;
169 - l1e->al_tex = 1;
170 - l1e->al_sbit = 1;
166 +
167 + if (prot & PF_DEVICE) {
168 + l1e->al_bbit = 1;
169 + l1e->al_cbit = 0;
170 + l1e->al_tex = 0;
171 + l1e->al_sbit = 1;
172 + } else {
173 + l1e->al_bbit = 1;
174 + l1e->al_cbit = 1;
175 + l1e->al_tex = 1;
176 + l1e->al_sbit = 1;
177 + }
171 178
172 179 if (!(prot & PF_X))
173 180 l1e->al_xn = 1;
174 181 l1e->al_domain = 0;
175 182
176 183 if (prot & PF_W) {
177 184 l1e->al_ap2 = 1;
178 185 l1e->al_ap = 1;
179 186 } else {
180 187 l1e->al_ap2 = 0;
181 188 l1e->al_ap = 1;
182 189 }
183 190 l1e->al_ngbit = 0;
184 191 l1e->al_issuper = 0;
185 192 l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
186 193 }
187 194
188 195 /*
189 196 * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
190 197 * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way
191 198 * we can map it and all the other L2 page tables we might need. If we don't do
192 199 * this, it'll become problematic for unix to actually modify this.
193 200 */
194 201 static void
195 202 fakeload_pt_arena_init(const atag_initrd_t *aii)
196 203 {
197 204 int entry, i;
198 205 armpte_t *pte;
199 206 arm_l1s_t *l1e;
200 207
201 208 pt_arena = aii->ai_start + aii->ai_size;
202 209 if (pt_arena & MMU_PAGEOFFSET1M) {
203 210 pt_arena &= MMU_PAGEMASK1M;
204 211 pt_arena += MMU_PAGESIZE1M;
205 212 }
206 213 pt_arena_max = pt_arena + 4 * MMU_PAGESIZE1M;
207 214 freemem = pt_arena_max;
208 215
209 216 /* Set up the l1 page table by first invalidating it */
210 217 pt_addr = (armpte_t *)pt_arena;
211 218 pt_arena += ARMPT_L1_SIZE;
212 219 bzero(pt_addr, ARMPT_L1_SIZE);
213 220 for (i = 0; i < 4; i++)
214 221 fakeload_map_1mb((uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
215 222 (uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
216 223 PF_R | PF_W);
217 224 }
218 225
219 226 /*
220 227 * This is our generally entry point. We're passed in the entry point of the
221 228 * header.
222 229 */
223 230 static uintptr_t
224 231 fakeload_archive_mappings(atag_header_t *chain, const void *addr,
225 232 atag_illumos_status_t *aisp)
226 233 {
227 234 atag_illumos_mapping_t aim;
228 235 fakeloader_hdr_t *hdr;
229 236 Elf32_Ehdr *ehdr;
230 237 Elf32_Phdr *phdr;
231 238 int nhdrs, i;
232 239 uintptr_t ret;
233 240 uintptr_t text = 0, data = 0;
234 241 size_t textln = 0, dataln = 0;
235 242
236 243 hdr = (fakeloader_hdr_t *)addr;
237 244
238 245 if (hdr->fh_magic[0] != FH_MAGIC0)
239 246 fakeload_panic("fh_magic[0] is wrong!\n");
240 247 if (hdr->fh_magic[1] != FH_MAGIC1)
241 248 fakeload_panic("fh_magic[1] is wrong!\n");
242 249 if (hdr->fh_magic[2] != FH_MAGIC2)
243 250 fakeload_panic("fh_magic[2] is wrong!\n");
244 251 if (hdr->fh_magic[3] != FH_MAGIC3)
245 252 fakeload_panic("fh_magic[3] is wrong!\n");
246 253
247 254 if (hdr->fh_unix_size == 0)
248 255 fakeload_panic("hdr unix size is zero\n");
249 256 if (hdr->fh_unix_offset == 0)
250 257 fakeload_panic("hdr unix offset is zero\n");
251 258 if (hdr->fh_archive_size == 0)
252 259 fakeload_panic("hdr archive size is zero\n");
253 260 if (hdr->fh_archive_offset == 0)
254 261 fakeload_panic("hdr archive_offset is zero\n");
255 262
256 263 ehdr = (Elf32_Ehdr *)((uintptr_t)addr + hdr->fh_unix_offset);
257 264
258 265 if (ehdr->e_ident[EI_MAG0] != ELFMAG0)
259 266 fakeload_panic("magic[0] wrong");
260 267 if (ehdr->e_ident[EI_MAG1] != ELFMAG1)
261 268 fakeload_panic("magic[1] wrong");
262 269 if (ehdr->e_ident[EI_MAG2] != ELFMAG2)
263 270 fakeload_panic("magic[2] wrong");
264 271 if (ehdr->e_ident[EI_MAG3] != ELFMAG3)
265 272 fakeload_panic("magic[3] wrong");
266 273 if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
267 274 fakeload_panic("wrong elfclass");
268 275 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
269 276 fakeload_panic("wrong encoding");
270 277 if (ehdr->e_ident[EI_OSABI] != ELFOSABI_SOLARIS)
271 278 fakeload_panic("wrong os abi");
272 279 if (ehdr->e_ident[EI_ABIVERSION] != EAV_SUNW_CURRENT)
273 280 fakeload_panic("wrong abi version");
274 281 if (ehdr->e_type != ET_EXEC)
275 282 fakeload_panic("unix is not an executable");
276 283 if (ehdr->e_machine != EM_ARM)
277 284 fakeload_panic("unix is not an ARM Executible");
278 285 if (ehdr->e_version != EV_CURRENT)
279 286 fakeload_panic("wrong version");
280 287 if (ehdr->e_phnum == 0)
281 288 fakeload_panic("no program headers");
282 289 ret = ehdr->e_entry;
283 290
284 291 FAKELOAD_DPRINTF("validated unix's headers\n");
285 292
286 293 nhdrs = ehdr->e_phnum;
287 294 phdr = (Elf32_Phdr *)((uintptr_t)addr + hdr->fh_unix_offset +
288 295 ehdr->e_phoff);
289 296 for (i = 0; i < nhdrs; i++, phdr++) {
290 297 if (phdr->p_type != PT_LOAD) {
291 298 fakeload_puts("skipping non-PT_LOAD header\n");
292 299 continue;
293 300 }
294 301
295 302 if (phdr->p_filesz == 0 || phdr->p_memsz == 0) {
296 303 fakeload_puts("skipping PT_LOAD with 0 file/mem\n");
297 304 continue;
298 305 }
299 306
300 307 /*
301 308 * Create a mapping record for this in the atags.
302 309 */
303 310 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
304 311 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
305 312 aim.aim_paddr = (uintptr_t)addr + hdr->fh_unix_offset +
306 313 phdr->p_offset;
307 314 aim.aim_plen = phdr->p_filesz;
308 315 aim.aim_vaddr = phdr->p_vaddr;
309 316 aim.aim_vlen = phdr->p_memsz;
310 317 /* Round up vlen to be a multiple of 4k */
311 318 if (aim.aim_vlen & 0xfff) {
312 319 aim.aim_vlen &= ~0xfff;
313 320 aim.aim_vlen += 0x1000;
314 321 }
315 322 aim.aim_mapflags = phdr->p_flags;
316 323 atag_append(chain, &aim.aim_header);
317 324
318 325 /*
319 326 * When built with highvecs we need to account for the fact that
320 327 * _edata, _etext and _end are built assuming that the highvecs
321 328 * are normally part of our segments. ld is not doing anything
322 329 * wrong, but this breaks the assumptions that krtld currently
323 330 * has. As such, unix will use this information to overwrite the
324 331 * normal entry points that krtld uses in a similar style to
325 332 * SPARC.
326 333 */
327 334 if (aim.aim_vaddr != 0xffff0000) {
328 335 if ((phdr->p_flags & PF_W) != 0) {
329 336 data = aim.aim_vaddr;
330 337 dataln = aim.aim_vlen;
331 338 } else {
332 339 text = aim.aim_vaddr;
333 340 textln = aim.aim_vlen;
334 341 }
335 342 }
336 343 }
337 344
338 345 aisp->ais_stext = text;
339 346 aisp->ais_etext = text + textln;
340 347 aisp->ais_sdata = data;
341 348 aisp->ais_edata = data + dataln;
342 349
343 350 /* 1:1 map the boot archive */
344 351 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
345 352 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
346 353 aim.aim_paddr = (uintptr_t)addr + hdr->fh_archive_offset;
347 354 aim.aim_plen = hdr->fh_archive_size;
348 355 aim.aim_vaddr = aim.aim_paddr;
349 356 aim.aim_vlen = aim.aim_plen;
350 357 aim.aim_mapflags = PF_R | PF_W | PF_X;
351 358 atag_append(chain, &aim.aim_header);
352 359 aisp->ais_archive = aim.aim_paddr;
353 360 aisp->ais_archivelen = aim.aim_plen;
354 361
355 362 return (ret);
356 363 }
357 364
358 365 static void
359 366 fakeload_mkatags(atag_header_t *chain)
360 367 {
361 368 atag_illumos_status_t ais;
362 369 atag_illumos_mapping_t aim;
363 370
364 371 bzero(&ais, sizeof (ais));
365 372 bzero(&aim, sizeof (aim));
366 373
367 374 ais.ais_header.ah_size = ATAG_ILLUMOS_STATUS_SIZE;
368 375 ais.ais_header.ah_tag = ATAG_ILLUMOS_STATUS;
369 376 atag_append(chain, &ais.ais_header);
370 377 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
371 378 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
372 379 atag_append(chain, &aim.aim_header);
373 380 }
374 381
375 382 static uintptr_t
376 383 fakeload_alloc_l2pt(void)
377 384 {
378 385 uintptr_t ret;
379 386
380 387 if (pt_arena & ARMPT_L2_MASK) {
381 388 ret = pt_arena;
382 389 ret &= ~ARMPT_L2_MASK;
383 390 ret += ARMPT_L2_SIZE;
384 391 pt_arena = ret + ARMPT_L2_SIZE;
385 392 } else {
386 393 ret = pt_arena;
387 394 pt_arena = ret + ARMPT_L2_SIZE;
388 395 }
389 396 if (pt_arena >= pt_arena_max) {
390 397 fakeload_puts("pt_arena, max\n");
391 398 fakeload_ultostr(pt_arena);
392 399 fakeload_puts("\n");
393 400 fakeload_ultostr(pt_arena_max);
394 401 fakeload_puts("\n");
395 402 fakeload_puts("l2pts alloced\n");
396 403 fakeload_ultostr(nl2pages);
397 404 fakeload_puts("\n");
398 405 fakeload_panic("ran out of page tables!");
399 406 }
400 407
401 408 bzero((void *)ret, ARMPT_L2_SIZE);
402 409 nl2pages++;
403 410 return (ret);
404 411 }
405 412
406 413 /*
407 414 * Finally, do all the dirty work. Let's create some page tables. The L1 page
408 415 * table is full of 1 MB mappings by default. The L2 Page table is 1k in size
409 416 * and covers that 1 MB. We're going to always create L2 page tables for now
410 417 * which will use 4k and 64k pages.
411 418 */
412 419 static void
413 420 fakeload_map(armpte_t *pt, uintptr_t pstart, uintptr_t vstart, size_t len,
414 421 uint32_t prot)
415 422 {
416 423 int entry, chunksize;
417 424 armpte_t *pte, *l2pt;
418 425 arm_l1pt_t *l1pt;
419 426
420 427 /*
421 428 * Make sure both pstart + vstart are 4k aligned, along with len.
422 429 */
423 430 if (pstart & MMU_PAGEOFFSET)
424 431 fakeload_panic("pstart is not 4k aligned");
425 432 if (vstart & MMU_PAGEOFFSET)
426 433 fakeload_panic("vstart is not 4k aligned");
427 434 if (len & MMU_PAGEOFFSET)
428 435 fakeload_panic("len is not 4k aligned");
429 436
430 437 /*
431 438 * We're going to logically deal with each 1 MB chunk at a time.
432 439 */
433 440 while (len > 0) {
434 441 if (vstart & MMU_PAGEOFFSET1M) {
435 442 chunksize = MIN(len, MMU_PAGESIZE1M -
436 443 (vstart & MMU_PAGEOFFSET1M));
437 444 } else {
438 445 chunksize = MIN(len, MMU_PAGESIZE1M);
↓ open down ↓ |
258 lines elided |
↑ open up ↑ |
439 446 }
440 447
441 448 entry = ARMPT_VADDR_TO_L1E(vstart);
442 449 pte = &pt[entry];
443 450
444 451 if (!ARMPT_L1E_ISVALID(*pte)) {
445 452 uintptr_t l2table;
446 453
447 454 if (!(vstart & MMU_PAGEOFFSET1M) &&
448 455 !(pstart & MMU_PAGEOFFSET1M) &&
449 - len == MMU_PAGESIZE1M) {
456 + len >= MMU_PAGESIZE1M) {
450 457 fakeload_map_1mb(pstart, vstart, prot);
451 458 vstart += MMU_PAGESIZE1M;
452 459 pstart += MMU_PAGESIZE1M;
453 460 len -= MMU_PAGESIZE1M;
454 461 continue;
455 462 }
456 463
457 464 l2table = fakeload_alloc_l2pt();
458 465 *pte = 0;
459 466 l1pt = (arm_l1pt_t *)pte;
460 467 l1pt->al_type = ARMPT_L1_TYPE_L2PT;
461 468 l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
462 469 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
463 470 fakeload_panic("encountered l1 entry that's not a "
464 471 "pointer to a level 2 table\n");
465 472 } else {
466 473 l1pt = (arm_l1pt_t *)pte;
467 474 }
468 475
469 476 /* Now that we have the l1pt fill in l2 entries */
470 477 l2pt = (void *)(l1pt->al_ptaddr << ARMPT_L1PT_TO_L2_SHIFT);
471 478 len -= chunksize;
472 479 while (chunksize > 0) {
473 480 arm_l2e_t *l2pte;
474 481
475 482 entry = ARMPT_VADDR_TO_L2E(vstart);
476 483 pte = &l2pt[entry];
477 484
478 485 #ifdef MAP_DEBUG
479 486 fakeload_puts("4k page pa->va, l2root, entry\n");
480 487 fakeload_ultostr(pstart);
481 488 fakeload_puts("->");
482 489 fakeload_ultostr(vstart);
483 490 fakeload_puts(", ");
484 491 fakeload_ultostr((uintptr_t)l2pt);
485 492 fakeload_puts(", ");
486 493 fakeload_ultostr(entry);
487 494 fakeload_puts("\n");
488 495 #endif
489 496
490 497 if ((*pte & ARMPT_L2_TYPE_MASK) !=
491 498 ARMPT_L2_TYPE_INVALID)
492 499 fakeload_panic("found existing l2 page table, "
493 500 "overlap in requested mappings detected!");
494 501 /* Map vaddr to our paddr! */
495 502 l2pte = ((arm_l2e_t *)pte);
496 503 *pte = 0;
497 504 if (!(prot & PF_X))
498 505 l2pte->ale_xn = 1;
499 506 l2pte->ale_ident = 1;
500 507 if (prot & PF_DEVICE) {
501 508 l2pte->ale_bbit = 1;
502 509 l2pte->ale_cbit = 0;
503 510 l2pte->ale_tex = 0;
504 511 l2pte->ale_sbit = 1;
505 512 } else {
506 513 l2pte->ale_bbit = 1;
507 514 l2pte->ale_cbit = 1;
508 515 l2pte->ale_tex = 1;
509 516 l2pte->ale_sbit = 1;
510 517 }
511 518 if (prot & PF_W) {
512 519 l2pte->ale_ap2 = 1;
513 520 l2pte->ale_ap = 1;
514 521 } else {
515 522 l2pte->ale_ap2 = 0;
516 523 l2pte->ale_ap = 1;
517 524 }
518 525 l2pte->ale_ngbit = 0;
519 526 l2pte->ale_addr = ARMPT_PADDR_TO_L2ADDR(pstart);
520 527
521 528 chunksize -= MMU_PAGESIZE;
522 529 vstart += MMU_PAGESIZE;
523 530 pstart += MMU_PAGESIZE;
524 531 }
525 532 }
526 533 }
527 534
528 535 static void
529 536 fakeload_create_map(armpte_t *pt, atag_illumos_mapping_t *aimp)
530 537 {
531 538 #ifdef MAP_DEBUG
532 539 fakeload_puts("paddr->vaddr\n");
533 540 fakeload_ultostr(aimp->aim_paddr);
534 541 fakeload_puts("->");
535 542 fakeload_ultostr(aimp->aim_vaddr);
536 543 fakeload_puts("\n");
537 544 fakeload_puts("plen-vlen\n");
538 545 fakeload_ultostr(aimp->aim_plen);
539 546 fakeload_puts("-");
540 547 fakeload_ultostr(aimp->aim_vlen);
541 548 fakeload_puts("\n");
542 549 #endif /* MAP_DEBUG */
543 550
544 551 /*
545 552 * Can we map this in place or do we need to basically allocate a new
546 553 * region and bcopy everything into place for proper alignment?
547 554 *
548 555 * Criteria for this: we have a vlen > plen. plen is not page aligned.
549 556 */
550 557 if (aimp->aim_vlen > aimp->aim_plen ||
551 558 (aimp->aim_paddr & MMU_PAGEOFFSET) != 0) {
552 559 uintptr_t start;
553 560
554 561 if (aimp->aim_mapflags & PF_NORELOC)
555 562 fakeload_panic("tried to reloc unrelocatable mapping");
556 563 #ifdef MAP_DEBUG
557 564 FAKELOAD_DPRINTF("reloacting paddr\n");
558 565 #endif
559 566 start = freemem;
560 567 if (start & MMU_PAGEOFFSET) {
561 568 start &= MMU_PAGEMASK;
562 569 start += MMU_PAGESIZE;
563 570 }
564 571 bcopy((void *)aimp->aim_paddr, (void *)start,
565 572 aimp->aim_plen);
566 573 if (aimp->aim_vlen > aimp->aim_plen) {
567 574 bzero((void *)(start + aimp->aim_plen),
568 575 aimp->aim_vlen - aimp->aim_plen);
569 576 }
570 577 aimp->aim_paddr = start;
571 578 freemem = start + aimp->aim_vlen;
572 579 #ifdef MAP_DEBUG
573 580 fakeload_puts("new paddr: ");
574 581 fakeload_ultostr(start);
575 582 fakeload_puts("\n");
576 583 #endif /* MAP_DEBUG */
577 584 }
578 585
579 586 /*
580 587 * Now that everything has been set up, go ahead and map the new region.
581 588 */
582 589 fakeload_map(pt, aimp->aim_paddr, aimp->aim_vaddr, aimp->aim_vlen,
583 590 aimp->aim_mapflags);
584 591 #ifdef MAP_DEBUG
585 592 FAKELOAD_DPRINTF("\n");
586 593 #endif /* MAP_DEBUG */
587 594 }
588 595
589 596 void
590 597 fakeload_init(void *ident, void *ident2, void *atag)
591 598 {
592 599 atag_header_t *hdr;
593 600 atag_header_t *chain = (atag_header_t *)atag;
594 601 const atag_initrd_t *initrd;
595 602 atag_illumos_status_t *aisp;
596 603 atag_illumos_mapping_t *aimp;
597 604 uintptr_t unix_start;
598 605
599 606 fakeload_backend_init();
600 607 fakeload_puts("Hello from the loader\n");
601 608 initrd = (atag_initrd_t *)atag_find(chain, ATAG_INITRD2);
602 609 if (initrd == NULL)
603 610 fakeload_panic("missing the initial ramdisk\n");
604 611
605 612 /*
606 613 * Create the status atag header and the initial mapping record for the
607 614 * atags. We'll hold onto both of these.
608 615 */
609 616 fakeload_mkatags(chain);
610 617 aisp = (atag_illumos_status_t *)atag_find(chain, ATAG_ILLUMOS_STATUS);
611 618 if (aisp == NULL)
612 619 fakeload_panic("can't find ATAG_ILLUMOS_STATUS");
613 620 aimp = (atag_illumos_mapping_t *)atag_find(chain, ATAG_ILLUMOS_MAPPING);
614 621 if (aimp == NULL)
615 622 fakeload_panic("can't find ATAG_ILLUMOS_MAPPING");
616 623 FAKELOAD_DPRINTF("created proto atags\n");
617 624
618 625 fakeload_pt_arena_init(initrd);
619 626
620 627 fakeload_selfmap(chain);
621 628
622 629 /*
623 630 * Map the boot archive and all of unix
624 631 */
625 632 unix_start = fakeload_archive_mappings(chain,
626 633 (const void *)(uintptr_t)initrd->ai_start, aisp);
627 634 FAKELOAD_DPRINTF("filled out unix and the archive's mappings\n");
628 635
629 636 /*
630 637 * Fill in the atag mapping header for the atags themselves. 1:1 map it.
631 638 */
632 639 aimp->aim_paddr = (uintptr_t)chain & ~0xfff;
633 640 aimp->aim_plen = atag_length(chain) & ~0xfff;
634 641 aimp->aim_plen += 0x1000;
635 642 aimp->aim_vaddr = aimp->aim_paddr;
636 643 aimp->aim_vlen = aimp->aim_plen;
637 644 aimp->aim_mapflags = PF_R | PF_W | PF_NORELOC;
638 645
639 646 /*
640 647 * Let the backend add mappings
641 648 */
642 649 fakeload_backend_addmaps(chain);
643 650
644 651 /*
645 652 * Turn on unaligned access
646 653 */
647 654 FAKELOAD_DPRINTF("turning on unaligned access\n");
648 655 fakeload_unaligned_enable();
649 656 FAKELOAD_DPRINTF("successfully enabled unaligned access\n");
650 657
651 658 /*
652 659 * To turn on the MMU we need to do the following:
653 660 * o Program all relevant CP15 registers
654 661 * o Program 1st and 2nd level page tables
655 662 * o Invalidate and Disable the I/D-cache
656 663 * o Fill in the last bits of the ATAG_ILLUMOS_STATUS atag
657 664 * o Turn on the MMU in SCTLR
658 665 * o Jump to unix
659 666 */
660 667
661 668 /* Last bits of the atag */
662 669 aisp->ais_freemem = freemem;
663 670 aisp->ais_version = 1;
664 671 aisp->ais_ptbase = (uintptr_t)pt_addr;
665 672
666 673 /*
667 674 * Our initial page table is a series of 1 MB sections. While we really
668 675 * should map 4k pages, for the moment we're just going to map 1 MB
669 676 * regions, yay team!
670 677 */
671 678 hdr = chain;
672 679 FAKELOAD_DPRINTF("creating mappings\n");
673 680 while (hdr != NULL) {
674 681 if (hdr->ah_tag == ATAG_ILLUMOS_MAPPING)
675 682 fakeload_create_map(pt_addr,
676 683 (atag_illumos_mapping_t *)hdr);
677 684 hdr = atag_next(hdr);
678 685 }
679 686
680 687 /*
681 688 * Now that we've mapped everything, update the status atag.
682 689 */
683 690 aisp->ais_freeused = freemem - aisp->ais_freemem;
684 691 aisp->ais_pt_arena = pt_arena;
685 692 aisp->ais_pt_arena_max = pt_arena_max;
686 693
687 694 /* Cache disable */
688 695 FAKELOAD_DPRINTF("Flushing and disabling caches\n");
689 696 armv6_dcache_flush();
690 697 armv6_dcache_disable();
691 698 armv6_dcache_inval();
692 699 armv6_icache_disable();
693 700 armv6_icache_inval();
694 701
↓ open down ↓ |
235 lines elided |
↑ open up ↑ |
695 702 /* Program the page tables */
696 703 FAKELOAD_DPRINTF("programming cp15 regs\n");
697 704 fakeload_pt_setup((uintptr_t)pt_addr);
698 705
699 706
700 707 /* MMU Enable */
701 708 FAKELOAD_DPRINTF("see you on the other side\n");
702 709 fakeload_mmu_enable();
703 710
704 711 FAKELOAD_DPRINTF("why helo thar\n");
705 -
706 - /* Renable caches */
707 - armv6_dcache_enable();
708 - armv6_icache_enable();
709 712
710 713 /* we should never come back */
711 714 fakeload_exec(ident, ident2, chain, unix_start);
712 715 fakeload_panic("hit the end of the world\n");
713 716 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX