1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright (c) 2014 Joyent, Inc.  All rights reserved.
  14  * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  15  */
  16 
  17 #include "fakeloader.h"
  18 
  19 #include <sys/types.h>
  20 #include <sys/param.h>
  21 #include <sys/elf.h>
  22 #include <sys/atag.h>
  23 #include <sys/sysmacros.h>
  24 #include <sys/machparam.h>
  25 
  26 #include <vm/pte.h>
  27 
  28 /*
  29  * This is the stock ARM fake uniboot loader.
  30  *
  31  * Here's what we have to do:
  32  *   o Read the atag header and find the combined archive header
  33  *   o Determine the set of mappings we need to add for the following:
  34  *              - unix
  35  *              - boot_archive
  36  *              - atags
  37  *   o Enable unaligned access
  38  *   o Enable the caches + virtual memory
  39  *
  40  * There are several important constraints that we have here:
  41  *
  42  *   o We cannot use any .data! Several loaders that come before us are broken
  43  *     and only provide us with the ability to map our .text and potentially our
  44  *     .bss. We should strive to avoid even that if we can.
  45  */
  46 
  47 #ifdef  DEBUG
  48 #define FAKELOAD_DPRINTF(x)     fakeload_puts(x)
  49 #else
  50 #define FAKELOAD_DPRINTF(x)
  51 #endif  /* DEBUG */
  52 
  53 /*
  54  * XXX ASSUMES WE HAVE Free memory following the boot archive
  55  */
  56 static uintptr_t freemem;
  57 static uintptr_t pt_arena;
  58 static uintptr_t pt_arena_max;
  59 static uint32_t *pt_addr;
  60 static int nl2pages;
  61 
  62 /* Simple copy routines */
  63 void
  64 bcopy(const void *s, void *d, size_t n)
  65 {
  66         const char *src = s;
  67         char *dest = d;
  68 
  69         if (n == 0 || s == d)
  70                 return;
  71 
  72         if (dest < src && dest + n < src) {
  73                 /* dest overlaps with the start of src, copy forward */
  74                 for (; n > 0; n--, src++, dest++)
  75                         *dest = *src;
  76         } else {
  77                 /* src overlaps with start of dest or no overlap, copy rev */
  78                 src += n - 1;
  79                 dest += n - 1;
  80                 for (; n > 0; n--, src--, dest--)
  81                         *dest = *src;
  82         }
  83 }
  84 
  85 void
  86 bzero(void *s, size_t n)
  87 {
  88         char *c = s;
  89         while (n > 0) {
  90                 *c = 0;
  91                 c++;
  92                 n--;
  93         }
  94 }
  95 
  96 static void
  97 fakeload_puts(const char *str)
  98 {
  99         while (*str != '\0') {
 100                 fakeload_backend_putc(*str);
 101                 str++;
 102         }
 103 }
 104 
 105 static void
 106 fakeload_panic(const char *reason)
 107 {
 108         fakeload_puts("panic!\n");
 109         fakeload_puts(reason);
 110         fakeload_puts("\n");
 111         fakeload_puts("spinning forever... goodbye...\n");
 112         for (;;)
 113                 ;
 114 }
 115 
 116 static void
 117 fakeload_ultostr(unsigned long value)
 118 {
 119         char buf[16];
 120         ulong_t t, val = (ulong_t)value;
 121         char c;
 122         char *ptr = &(buf[14]);
 123         buf[15] = '\0';
 124 
 125         do {
 126                 c = (char)('0' + val - 16 * (t = (val >> 4)));
 127                 if (c > '9')
 128                         c += 'A' - '9' - 1;
 129                 *--ptr = c;
 130         } while ((val = t) != 0);
 131 
 132         *--ptr = 'x';
 133         *--ptr = '0';
 134         fakeload_puts(ptr);
 135 }
 136 
 137 static void
 138 fakeload_selfmap(atag_header_t *chain)
 139 {
 140         atag_illumos_mapping_t aim;
 141 
 142         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 143         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 144         aim.aim_paddr = 0x7000;
 145         aim.aim_vaddr = aim.aim_paddr;
 146         aim.aim_plen = 0x3000;
 147         aim.aim_vlen = aim.aim_plen;
 148         aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
 149         atag_append(chain, &aim.aim_header);
 150 }
 151 
 152 static void
 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
 154 {
 155         int entry;
 156         armpte_t *pte;
 157         arm_l1s_t *l1e;
 158 
 159         entry = ARMPT_VADDR_TO_L1E(va);
 160         pte = &pt_addr[entry];
 161         if (ARMPT_L1E_ISVALID(*pte))
 162                 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
 163         l1e = (arm_l1s_t *)pte;
 164         *pte = 0;
 165         l1e->al_type = ARMPT_L1_TYPE_SECT;
 166         /* Assume it's not device memory */
 167         l1e->al_bbit = 1;
 168         l1e->al_cbit = 1;
 169         l1e->al_tex = 1;
 170         l1e->al_sbit = 1;
 171 
 172         if (!(prot & PF_X))
 173                 l1e->al_xn = 1;
 174         l1e->al_domain = 0;
 175 
 176         if (prot & PF_W) {
 177                 l1e->al_ap2 = 1;
 178                 l1e->al_ap = 1;
 179         } else {
 180                 l1e->al_ap2 = 0;
 181                 l1e->al_ap = 1;
 182         }
 183         l1e->al_ngbit = 0;
 184         l1e->al_issuper = 0;
 185         l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
 186 }
 187 
 188 /*
 189  * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
 190  * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way
 191  * we can map it and all the other L2 page tables we might need. If we don't do
 192  * this, it'll become problematic for unix to actually modify this.
 193  */
 194 static void
 195 fakeload_pt_arena_init(const atag_initrd_t *aii)
 196 {
 197         int entry, i;
 198         armpte_t *pte;
 199         arm_l1s_t *l1e;
 200 
 201         pt_arena = aii->ai_start + aii->ai_size;
 202         if (pt_arena & MMU_PAGEOFFSET1M) {
 203                 pt_arena &= MMU_PAGEMASK1M;
 204                 pt_arena += MMU_PAGESIZE1M;
 205         }
 206         pt_arena_max = pt_arena + 4 * MMU_PAGESIZE1M;
 207         freemem = pt_arena_max;
 208 
 209         /* Set up the l1 page table by first invalidating it */
 210         pt_addr = (armpte_t *)pt_arena;
 211         pt_arena += ARMPT_L1_SIZE;
 212         bzero(pt_addr, ARMPT_L1_SIZE);
 213         for (i = 0; i < 4; i++)
 214                 fakeload_map_1mb((uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
 215                     (uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
 216                     PF_R | PF_W);
 217 }
 218 
 219 /*
 220  * This is our generally entry point. We're passed in the entry point of the
 221  * header.
 222  */
 223 static uintptr_t
 224 fakeload_archive_mappings(atag_header_t *chain, const void *addr,
 225     atag_illumos_status_t *aisp)
 226 {
 227         atag_illumos_mapping_t aim;
 228         fakeloader_hdr_t *hdr;
 229         Elf32_Ehdr *ehdr;
 230         Elf32_Phdr *phdr;
 231         int nhdrs, i;
 232         uintptr_t ret;
 233         uintptr_t text = 0, data = 0;
 234         size_t textln = 0, dataln = 0;
 235 
 236         hdr = (fakeloader_hdr_t *)addr;
 237 
 238         if (hdr->fh_magic[0] != FH_MAGIC0)
 239                 fakeload_panic("fh_magic[0] is wrong!\n");
 240         if (hdr->fh_magic[1] != FH_MAGIC1)
 241                 fakeload_panic("fh_magic[1] is wrong!\n");
 242         if (hdr->fh_magic[2] != FH_MAGIC2)
 243                 fakeload_panic("fh_magic[2] is wrong!\n");
 244         if (hdr->fh_magic[3] != FH_MAGIC3)
 245                 fakeload_panic("fh_magic[3] is wrong!\n");
 246 
 247         if (hdr->fh_unix_size == 0)
 248                 fakeload_panic("hdr unix size is zero\n");
 249         if (hdr->fh_unix_offset == 0)
 250                 fakeload_panic("hdr unix offset is zero\n");
 251         if (hdr->fh_archive_size == 0)
 252                 fakeload_panic("hdr archive size is zero\n");
 253         if (hdr->fh_archive_offset == 0)
 254                 fakeload_panic("hdr archive_offset is zero\n");
 255 
 256         ehdr = (Elf32_Ehdr *)((uintptr_t)addr + hdr->fh_unix_offset);
 257 
 258         if (ehdr->e_ident[EI_MAG0] != ELFMAG0)
 259                 fakeload_panic("magic[0] wrong");
 260         if (ehdr->e_ident[EI_MAG1] != ELFMAG1)
 261                 fakeload_panic("magic[1] wrong");
 262         if (ehdr->e_ident[EI_MAG2] != ELFMAG2)
 263                 fakeload_panic("magic[2] wrong");
 264         if (ehdr->e_ident[EI_MAG3] != ELFMAG3)
 265                 fakeload_panic("magic[3] wrong");
 266         if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
 267                 fakeload_panic("wrong elfclass");
 268         if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
 269                 fakeload_panic("wrong encoding");
 270         if (ehdr->e_ident[EI_OSABI] != ELFOSABI_SOLARIS)
 271                 fakeload_panic("wrong os abi");
 272         if (ehdr->e_ident[EI_ABIVERSION] != EAV_SUNW_CURRENT)
 273                 fakeload_panic("wrong abi version");
 274         if (ehdr->e_type != ET_EXEC)
 275                 fakeload_panic("unix is not an executable");
 276         if (ehdr->e_machine != EM_ARM)
 277                 fakeload_panic("unix is not an ARM Executible");
 278         if (ehdr->e_version != EV_CURRENT)
 279                 fakeload_panic("wrong version");
 280         if (ehdr->e_phnum == 0)
 281                 fakeload_panic("no program headers");
 282         ret = ehdr->e_entry;
 283 
 284         FAKELOAD_DPRINTF("validated unix's headers\n");
 285 
 286         nhdrs = ehdr->e_phnum;
 287         phdr = (Elf32_Phdr *)((uintptr_t)addr + hdr->fh_unix_offset +
 288             ehdr->e_phoff);
 289         for (i = 0; i < nhdrs; i++, phdr++) {
 290                 if (phdr->p_type != PT_LOAD) {
 291                         fakeload_puts("skipping non-PT_LOAD header\n");
 292                         continue;
 293                 }
 294 
 295                 if (phdr->p_filesz == 0 || phdr->p_memsz == 0) {
 296                         fakeload_puts("skipping PT_LOAD with 0 file/mem\n");
 297                         continue;
 298                 }
 299 
 300                 /*
 301                  * Create a mapping record for this in the atags.
 302                  */
 303                 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 304                 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 305                 aim.aim_paddr = (uintptr_t)addr + hdr->fh_unix_offset +
 306                     phdr->p_offset;
 307                 aim.aim_plen = phdr->p_filesz;
 308                 aim.aim_vaddr = phdr->p_vaddr;
 309                 aim.aim_vlen = phdr->p_memsz;
 310                 /* Round up vlen to be a multiple of 4k */
 311                 if (aim.aim_vlen & 0xfff) {
 312                         aim.aim_vlen &= ~0xfff;
 313                         aim.aim_vlen += 0x1000;
 314                 }
 315                 aim.aim_mapflags = phdr->p_flags;
 316                 atag_append(chain, &aim.aim_header);
 317 
 318                 /*
 319                  * When built with highvecs we need to account for the fact that
 320                  * _edata, _etext and _end are built assuming that the highvecs
 321                  * are normally part of our segments. ld is not doing anything
 322                  * wrong, but this breaks the assumptions that krtld currently
 323                  * has. As such, unix will use this information to overwrite the
 324                  * normal entry points that krtld uses in a similar style to
 325                  * SPARC.
 326                  */
 327                 if (aim.aim_vaddr != 0xffff0000) {
 328                         if ((phdr->p_flags & PF_W) != 0) {
 329                                 data = aim.aim_vaddr;
 330                                 dataln = aim.aim_vlen;
 331                         } else {
 332                                 text = aim.aim_vaddr;
 333                                 textln = aim.aim_vlen;
 334                         }
 335                 }
 336         }
 337 
 338         aisp->ais_stext = text;
 339         aisp->ais_etext = text + textln;
 340         aisp->ais_sdata = data;
 341         aisp->ais_edata = data + dataln;
 342 
 343         /* 1:1 map the boot archive */
 344         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 345         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 346         aim.aim_paddr = (uintptr_t)addr + hdr->fh_archive_offset;
 347         aim.aim_plen = hdr->fh_archive_size;
 348         aim.aim_vaddr = aim.aim_paddr;
 349         aim.aim_vlen = aim.aim_plen;
 350         aim.aim_mapflags = PF_R | PF_W | PF_X;
 351         atag_append(chain, &aim.aim_header);
 352         aisp->ais_archive = aim.aim_paddr;
 353         aisp->ais_archivelen = aim.aim_plen;
 354 
 355         return (ret);
 356 }
 357 
 358 static void
 359 fakeload_mkatags(atag_header_t *chain)
 360 {
 361         atag_illumos_status_t ais;
 362         atag_illumos_mapping_t aim;
 363 
 364         bzero(&ais, sizeof (ais));
 365         bzero(&aim, sizeof (aim));
 366 
 367         ais.ais_header.ah_size = ATAG_ILLUMOS_STATUS_SIZE;
 368         ais.ais_header.ah_tag = ATAG_ILLUMOS_STATUS;
 369         atag_append(chain, &ais.ais_header);
 370         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 371         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 372         atag_append(chain, &aim.aim_header);
 373 }
 374 
 375 static uintptr_t
 376 fakeload_alloc_l2pt(void)
 377 {
 378         uintptr_t ret;
 379 
 380         if (pt_arena & ARMPT_L2_MASK) {
 381                 ret = pt_arena;
 382                 ret &= ~ARMPT_L2_MASK;
 383                 ret += ARMPT_L2_SIZE;
 384                 pt_arena = ret + ARMPT_L2_SIZE;
 385         } else {
 386                 ret = pt_arena;
 387                 pt_arena = ret + ARMPT_L2_SIZE;
 388         }
 389         if (pt_arena >= pt_arena_max) {
 390                 fakeload_puts("pt_arena, max\n");
 391                 fakeload_ultostr(pt_arena);
 392                 fakeload_puts("\n");
 393                 fakeload_ultostr(pt_arena_max);
 394                 fakeload_puts("\n");
 395                 fakeload_puts("l2pts alloced\n");
 396                 fakeload_ultostr(nl2pages);
 397                 fakeload_puts("\n");
 398                 fakeload_panic("ran out of page tables!");
 399         }
 400 
 401         bzero((void *)ret, ARMPT_L2_SIZE);
 402         nl2pages++;
 403         return (ret);
 404 }
 405 
 406 /*
 407  * Finally, do all the dirty work. Let's create some page tables. The L1 page
 408  * table is full of 1 MB mappings by default. The L2 Page table is 1k in size
 409  * and covers that 1 MB. We're going to always create L2 page tables for now
 410  * which will use 4k and 64k pages.
 411  */
 412 static void
 413 fakeload_map(armpte_t *pt, uintptr_t pstart, uintptr_t vstart, size_t len,
 414     uint32_t prot)
 415 {
 416         int entry, chunksize;
 417         armpte_t *pte, *l2pt;
 418         arm_l1pt_t *l1pt;
 419 
 420         /*
 421          * Make sure both pstart + vstart are 4k aligned, along with len.
 422          */
 423         if (pstart & MMU_PAGEOFFSET)
 424                 fakeload_panic("pstart is not 4k aligned");
 425         if (vstart & MMU_PAGEOFFSET)
 426                 fakeload_panic("vstart is not 4k aligned");
 427         if (len & MMU_PAGEOFFSET)
 428                 fakeload_panic("len is not 4k aligned");
 429 
 430         /*
 431          * We're going to logically deal with each 1 MB chunk at a time.
 432          */
 433         while (len > 0) {
 434                 if (vstart & MMU_PAGEOFFSET1M) {
 435                         chunksize = MIN(len, MMU_PAGESIZE1M -
 436                             (vstart & MMU_PAGEOFFSET1M));
 437                 } else {
 438                         chunksize = MIN(len, MMU_PAGESIZE1M);
 439                 }
 440 
 441                 entry = ARMPT_VADDR_TO_L1E(vstart);
 442                 pte = &pt[entry];
 443 
 444                 if (!ARMPT_L1E_ISVALID(*pte)) {
 445                         uintptr_t l2table;
 446 
 447                         if (!(vstart & MMU_PAGEOFFSET1M) &&
 448                             !(pstart & MMU_PAGEOFFSET1M) &&
 449                             len == MMU_PAGESIZE1M) {
 450                                 fakeload_map_1mb(pstart, vstart, prot);
 451                                 vstart += MMU_PAGESIZE1M;
 452                                 pstart += MMU_PAGESIZE1M;
 453                                 len -= MMU_PAGESIZE1M;
 454                                 continue;
 455                         }
 456 
 457                         l2table = fakeload_alloc_l2pt();
 458                         *pte = 0;
 459                         l1pt = (arm_l1pt_t *)pte;
 460                         l1pt->al_type = ARMPT_L1_TYPE_L2PT;
 461                         l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
 462                 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
 463                         fakeload_panic("encountered l1 entry that's not a "
 464                             "pointer to a level 2 table\n");
 465                 } else {
 466                         l1pt = (arm_l1pt_t *)pte;
 467                 }
 468 
 469                 /* Now that we have the l1pt fill in l2 entries */
 470                 l2pt = (void *)(l1pt->al_ptaddr << ARMPT_L1PT_TO_L2_SHIFT);
 471                 len -= chunksize;
 472                 while (chunksize > 0) {
 473                         arm_l2e_t *l2pte;
 474 
 475                         entry = ARMPT_VADDR_TO_L2E(vstart);
 476                         pte = &l2pt[entry];
 477 
 478 #ifdef  MAP_DEBUG
 479                         fakeload_puts("4k page pa->va, l2root, entry\n");
 480                         fakeload_ultostr(pstart);
 481                         fakeload_puts("->");
 482                         fakeload_ultostr(vstart);
 483                         fakeload_puts(", ");
 484                         fakeload_ultostr((uintptr_t)l2pt);
 485                         fakeload_puts(", ");
 486                         fakeload_ultostr(entry);
 487                         fakeload_puts("\n");
 488 #endif
 489 
 490                         if ((*pte & ARMPT_L2_TYPE_MASK) !=
 491                             ARMPT_L2_TYPE_INVALID)
 492                                 fakeload_panic("found existing l2 page table, "
 493                                     "overlap in requested mappings detected!");
 494                         /* Map vaddr to our paddr! */
 495                         l2pte = ((arm_l2e_t *)pte);
 496                         *pte = 0;
 497                         if (!(prot & PF_X))
 498                                 l2pte->ale_xn = 1;
 499                         l2pte->ale_ident = 1;
 500                         if (prot & PF_DEVICE) {
 501                                 l2pte->ale_bbit = 1;
 502                                 l2pte->ale_cbit = 0;
 503                                 l2pte->ale_tex = 0;
 504                                 l2pte->ale_sbit = 1;
 505                         } else {
 506                                 l2pte->ale_bbit = 1;
 507                                 l2pte->ale_cbit = 1;
 508                                 l2pte->ale_tex = 1;
 509                                 l2pte->ale_sbit = 1;
 510                         }
 511                         if (prot & PF_W) {
 512                                 l2pte->ale_ap2 = 1;
 513                                 l2pte->ale_ap = 1;
 514                         } else {
 515                                 l2pte->ale_ap2 = 0;
 516                                 l2pte->ale_ap = 1;
 517                         }
 518                         l2pte->ale_ngbit = 0;
 519                         l2pte->ale_addr = ARMPT_PADDR_TO_L2ADDR(pstart);
 520 
 521                         chunksize -= MMU_PAGESIZE;
 522                         vstart += MMU_PAGESIZE;
 523                         pstart += MMU_PAGESIZE;
 524                 }
 525         }
 526 }
 527 
 528 static void
 529 fakeload_create_map(armpte_t *pt, atag_illumos_mapping_t *aimp)
 530 {
 531 #ifdef MAP_DEBUG
 532         fakeload_puts("paddr->vaddr\n");
 533         fakeload_ultostr(aimp->aim_paddr);
 534         fakeload_puts("->");
 535         fakeload_ultostr(aimp->aim_vaddr);
 536         fakeload_puts("\n");
 537         fakeload_puts("plen-vlen\n");
 538         fakeload_ultostr(aimp->aim_plen);
 539         fakeload_puts("-");
 540         fakeload_ultostr(aimp->aim_vlen);
 541         fakeload_puts("\n");
 542 #endif /* MAP_DEBUG */
 543 
 544         /*
 545          * Can we map this in place or do we need to basically allocate a new
 546          * region and bcopy everything into place for proper alignment?
 547          *
 548          * Criteria for this: we have a vlen > plen. plen is not page aligned.
 549          */
 550         if (aimp->aim_vlen > aimp->aim_plen ||
 551             (aimp->aim_paddr & MMU_PAGEOFFSET) != 0) {
 552                 uintptr_t start;
 553 
 554                 if (aimp->aim_mapflags & PF_NORELOC)
 555                         fakeload_panic("tried to reloc unrelocatable mapping");
 556 #ifdef  MAP_DEBUG
 557                 FAKELOAD_DPRINTF("reloacting paddr\n");
 558 #endif
 559                 start = freemem;
 560                 if (start & MMU_PAGEOFFSET) {
 561                         start &= MMU_PAGEMASK;
 562                         start += MMU_PAGESIZE;
 563                 }
 564                 bcopy((void *)aimp->aim_paddr, (void *)start,
 565                     aimp->aim_plen);
 566                 if (aimp->aim_vlen > aimp->aim_plen) {
 567                         bzero((void *)(start + aimp->aim_plen),
 568                             aimp->aim_vlen - aimp->aim_plen);
 569                 }
 570                 aimp->aim_paddr = start;
 571                 freemem = start + aimp->aim_vlen;
 572 #ifdef MAP_DEBUG
 573                 fakeload_puts("new paddr: ");
 574                 fakeload_ultostr(start);
 575                 fakeload_puts("\n");
 576 #endif /* MAP_DEBUG */
 577         }
 578 
 579         /*
 580          * Now that everything has been set up, go ahead and map the new region.
 581          */
 582         fakeload_map(pt, aimp->aim_paddr, aimp->aim_vaddr, aimp->aim_vlen,
 583             aimp->aim_mapflags);
 584 #ifdef MAP_DEBUG
 585         FAKELOAD_DPRINTF("\n");
 586 #endif /* MAP_DEBUG */
 587 }
 588 
 589 void
 590 fakeload_init(void *ident, void *ident2, void *atag)
 591 {
 592         atag_header_t *hdr;
 593         atag_header_t *chain = (atag_header_t *)atag;
 594         const atag_initrd_t *initrd;
 595         atag_illumos_status_t *aisp;
 596         atag_illumos_mapping_t *aimp;
 597         uintptr_t unix_start;
 598 
 599         fakeload_backend_init();
 600         fakeload_puts("Hello from the loader\n");
 601         initrd = (atag_initrd_t *)atag_find(chain, ATAG_INITRD2);
 602         if (initrd == NULL)
 603                 fakeload_panic("missing the initial ramdisk\n");
 604 
 605         /*
 606          * Create the status atag header and the initial mapping record for the
 607          * atags. We'll hold onto both of these.
 608          */
 609         fakeload_mkatags(chain);
 610         aisp = (atag_illumos_status_t *)atag_find(chain, ATAG_ILLUMOS_STATUS);
 611         if (aisp == NULL)
 612                 fakeload_panic("can't find ATAG_ILLUMOS_STATUS");
 613         aimp = (atag_illumos_mapping_t *)atag_find(chain, ATAG_ILLUMOS_MAPPING);
 614         if (aimp == NULL)
 615                 fakeload_panic("can't find ATAG_ILLUMOS_MAPPING");
 616         FAKELOAD_DPRINTF("created proto atags\n");
 617 
 618         fakeload_pt_arena_init(initrd);
 619 
 620         fakeload_selfmap(chain);
 621 
 622         /*
 623          * Map the boot archive and all of unix
 624          */
 625         unix_start = fakeload_archive_mappings(chain,
 626             (const void *)(uintptr_t)initrd->ai_start, aisp);
 627         FAKELOAD_DPRINTF("filled out unix and the archive's mappings\n");
 628 
 629         /*
 630          * Fill in the atag mapping header for the atags themselves. 1:1 map it.
 631          */
 632         aimp->aim_paddr = (uintptr_t)chain & ~0xfff;
 633         aimp->aim_plen = atag_length(chain) & ~0xfff;
 634         aimp->aim_plen += 0x1000;
 635         aimp->aim_vaddr = aimp->aim_paddr;
 636         aimp->aim_vlen = aimp->aim_plen;
 637         aimp->aim_mapflags = PF_R | PF_W | PF_NORELOC;
 638 
 639         /*
 640          * Let the backend add mappings
 641          */
 642         fakeload_backend_addmaps(chain);
 643 
 644         /*
 645          * Turn on unaligned access
 646          */
 647         FAKELOAD_DPRINTF("turning on unaligned access\n");
 648         fakeload_unaligned_enable();
 649         FAKELOAD_DPRINTF("successfully enabled unaligned access\n");
 650 
 651         /*
 652          * To turn on the MMU we need to do the following:
 653          *  o Program all relevant CP15 registers
 654          *  o Program 1st and 2nd level page tables
 655          *  o Invalidate and Disable the I/D-cache
 656          *  o Fill in the last bits of the ATAG_ILLUMOS_STATUS atag
 657          *  o Turn on the MMU in SCTLR
 658          *  o Jump to unix
 659          */
 660 
 661         /* Last bits of the atag */
 662         aisp->ais_freemem = freemem;
 663         aisp->ais_version = 1;
 664         aisp->ais_ptbase = (uintptr_t)pt_addr;
 665 
 666         /*
 667          * Our initial page table is a series of 1 MB sections. While we really
 668          * should map 4k pages, for the moment we're just going to map 1 MB
 669          * regions, yay team!
 670          */
 671         hdr = chain;
 672         FAKELOAD_DPRINTF("creating mappings\n");
 673         while (hdr != NULL) {
 674                 if (hdr->ah_tag == ATAG_ILLUMOS_MAPPING)
 675                         fakeload_create_map(pt_addr,
 676                             (atag_illumos_mapping_t *)hdr);
 677                 hdr = atag_next(hdr);
 678         }
 679 
 680         /*
 681          * Now that we've mapped everything, update the status atag.
 682          */
 683         aisp->ais_freeused = freemem - aisp->ais_freemem;
 684         aisp->ais_pt_arena = pt_arena;
 685         aisp->ais_pt_arena_max = pt_arena_max;
 686 
 687         /* Cache disable */
 688         FAKELOAD_DPRINTF("Flushing and disabling caches\n");
 689         armv6_dcache_flush();
 690         armv6_dcache_disable();
 691         armv6_dcache_inval();
 692         armv6_icache_disable();
 693         armv6_icache_inval();
 694 
 695         /* Program the page tables */
 696         FAKELOAD_DPRINTF("programming cp15 regs\n");
 697         fakeload_pt_setup((uintptr_t)pt_addr);
 698 
 699 
 700         /* MMU Enable */
 701         FAKELOAD_DPRINTF("see you on the other side\n");
 702         fakeload_mmu_enable();
 703 
 704         FAKELOAD_DPRINTF("why helo thar\n");
 705 
 706         /* Renable caches */
 707         armv6_dcache_enable();
 708         armv6_icache_enable();
 709 
 710         /* we should never come back */
 711         fakeload_exec(ident, ident2, chain, unix_start);
 712         fakeload_panic("hit the end of the world\n");
 713 }