1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright (c) 2014 Joyent, Inc.  All rights reserved.
  14  */
  15 
  16 #include "fakeloader.h"
  17 
  18 #include <sys/types.h>
  19 #include <sys/param.h>
  20 #include <sys/elf.h>
  21 #include <sys/atag.h>
  22 #include <sys/sysmacros.h>
  23 #include <sys/machparam.h>
  24 
  25 #include <vm/pte.h>
  26 
  27 /*
  28  * This is the stock ARM fake uniboot loader.
  29  *
  30  * Here's what we have to do:
  31  *   o Read the atag header and find the combined archive header
  32  *   o Determine the set of mappings we need to add for the following:
  33  *              - unix
  34  *              - boot_archive
  35  *              - atags
  36  *   o Enable unaligned access
  37  *   o Enable the caches + virtual memory
  38  *
  39  * There are several important constraints that we have here:
  40  *
  41  *   o We cannot use any .data! Several loaders that come before us are broken
  42  *     and only provide us with the ability to map our .text and potentially our
  43  *     .bss. We should strive to avoid even that if we can.
  44  */
  45 
  46 #ifdef  DEBUG
  47 #define FAKELOAD_DPRINTF(x)     fakeload_puts(x)
  48 #else
  49 #define FAKELOAD_DPRINTF(x)
  50 #endif  /* DEBUG */
  51 
  52 /*
  53  * XXX ASSUMES WE HAVE Free memory following the boot archive
  54  */
  55 static uintptr_t freemem;
  56 static uintptr_t pt_arena;
  57 static uintptr_t pt_arena_max;
  58 static uint32_t *pt_addr;
  59 static int nl2pages;
  60 
  61 /* Simple copy routines */
  62 void
  63 bcopy(const void *s, void *d, size_t n)
  64 {
  65         const char *src = s;
  66         char *dest = d;
  67 
  68         if (n == 0 || s == d)
  69                 return;
  70 
  71         if (dest < src && dest + n < src) {
  72                 /* dest overlaps with the start of src, copy forward */
  73                 for (; n > 0; n--, src++, dest++)
  74                         *dest = *src;
  75         } else {
  76                 /* src overlaps with start of dest or no overlap, copy rev */
  77                 src += n - 1;
  78                 dest += n - 1;
  79                 for (; n > 0; n--, src--, dest--)
  80                         *dest = *src;
  81         }
  82 }
  83 
  84 void
  85 bzero(void *s, size_t n)
  86 {
  87         char *c = s;
  88         while (n > 0) {
  89                 *c = 0;
  90                 c++;
  91                 n--;
  92         }
  93 }
  94 
  95 static void
  96 fakeload_puts(const char *str)
  97 {
  98         while (*str != '\0') {
  99                 fakeload_backend_putc(*str);
 100                 str++;
 101         }
 102 }
 103 
 104 static void
 105 fakeload_panic(const char *reason)
 106 {
 107         fakeload_puts("panic!\n");
 108         fakeload_puts(reason);
 109         fakeload_puts("\n");
 110         fakeload_puts("spinning forever... goodbye...\n");
 111         for (;;)
 112                 ;
 113 }
 114 
 115 static void
 116 fakeload_ultostr(unsigned long value)
 117 {
 118         char buf[16];
 119         ulong_t t, val = (ulong_t)value;
 120         char c;
 121         char *ptr = &(buf[14]);
 122         buf[15] = '\0';
 123 
 124         do {
 125                 c = (char)('0' + val - 16 * (t = (val >> 4)));
 126                 if (c > '9')
 127                         c += 'A' - '9' - 1;
 128                 *--ptr = c;
 129         } while ((val = t) != 0);
 130 
 131         *--ptr = 'x';
 132         *--ptr = '0';
 133         fakeload_puts(ptr);
 134 }
 135 
 136 static void
 137 fakeload_selfmap(atag_header_t *chain)
 138 {
 139         atag_illumos_mapping_t aim;
 140 
 141         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 142         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 143         aim.aim_paddr = 0x7000;
 144         aim.aim_vaddr = aim.aim_paddr;
 145         aim.aim_plen = 0x3000;
 146         aim.aim_vlen = aim.aim_plen;
 147         aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
 148         atag_append(chain, &aim.aim_header);
 149 }
 150 
 151 static void
 152 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
 153 {
 154         int entry;
 155         armpte_t *pte;
 156         arm_l1s_t *l1e;
 157 
 158         entry = ARMPT_VADDR_TO_L1E(va);
 159         pte = &pt_addr[entry];
 160         if (ARMPT_L1E_ISVALID(*pte))
 161                 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
 162         l1e = (arm_l1s_t *)pte;
 163         *pte = 0;
 164         l1e->al_type = ARMPT_L1_TYPE_SECT;
 165         /* Assume it's not device memory */
 166         l1e->al_bbit = 1;
 167         l1e->al_cbit = 1;
 168         l1e->al_tex = 1;
 169         l1e->al_sbit = 1;
 170 
 171         if (!(prot & PF_X))
 172                 l1e->al_xn = 1;
 173         l1e->al_domain = 0;
 174 
 175         if (prot & PF_W) {
 176                 l1e->al_ap2 = 1;
 177                 l1e->al_ap = 1;
 178         } else {
 179                 l1e->al_ap2 = 0;
 180                 l1e->al_ap = 1;
 181         }
 182         l1e->al_ngbit = 0;
 183         l1e->al_issuper = 0;
 184         l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
 185 }
 186 
 187 /*
 188  * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
 189  * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way
 190  * we can map it and all the other L2 page tables we might need. If we don't do
 191  * this, it'll become problematic for unix to actually modify this.
 192  */
 193 static void
 194 fakeload_pt_arena_init(const atag_initrd_t *aii)
 195 {
 196         int entry, i;
 197         armpte_t *pte;
 198         arm_l1s_t *l1e;
 199 
 200         pt_arena = aii->ai_start + aii->ai_size;
 201         if (pt_arena & MMU_PAGEOFFSET1M) {
 202                 pt_arena &= MMU_PAGEMASK1M;
 203                 pt_arena += MMU_PAGESIZE1M;
 204         }
 205         pt_arena_max = pt_arena + 4 * MMU_PAGESIZE1M;
 206         freemem = pt_arena_max;
 207 
 208         /* Set up the l1 page table by first invalidating it */
 209         pt_addr = (armpte_t *)pt_arena;
 210         pt_arena += ARMPT_L1_SIZE;
 211         bzero(pt_addr, ARMPT_L1_SIZE);
 212         for (i = 0; i < 4; i++)
 213                 fakeload_map_1mb((uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
 214                     (uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
 215                     PF_R | PF_W);
 216 }
 217 
 218 /*
 219  * This is our generally entry point. We're passed in the entry point of the
 220  * header.
 221  */
 222 static uintptr_t
 223 fakeload_archive_mappings(atag_header_t *chain, const void *addr,
 224     atag_illumos_status_t *aisp)
 225 {
 226         atag_illumos_mapping_t aim;
 227         fakeloader_hdr_t *hdr;
 228         Elf32_Ehdr *ehdr;
 229         Elf32_Phdr *phdr;
 230         int nhdrs, i;
 231         uintptr_t ret;
 232         uintptr_t text = 0, data = 0;
 233         size_t textln = 0, dataln = 0;
 234 
 235         hdr = (fakeloader_hdr_t *)addr;
 236 
 237         if (hdr->fh_magic[0] != FH_MAGIC0)
 238                 fakeload_panic("fh_magic[0] is wrong!\n");
 239         if (hdr->fh_magic[1] != FH_MAGIC1)
 240                 fakeload_panic("fh_magic[1] is wrong!\n");
 241         if (hdr->fh_magic[2] != FH_MAGIC2)
 242                 fakeload_panic("fh_magic[2] is wrong!\n");
 243         if (hdr->fh_magic[3] != FH_MAGIC3)
 244                 fakeload_panic("fh_magic[3] is wrong!\n");
 245 
 246         if (hdr->fh_unix_size == 0)
 247                 fakeload_panic("hdr unix size is zero\n");
 248         if (hdr->fh_unix_offset == 0)
 249                 fakeload_panic("hdr unix offset is zero\n");
 250         if (hdr->fh_archive_size == 0)
 251                 fakeload_panic("hdr archive size is zero\n");
 252         if (hdr->fh_archive_offset == 0)
 253                 fakeload_panic("hdr archive_offset is zero\n");
 254 
 255         ehdr = (Elf32_Ehdr *)((uintptr_t)addr + hdr->fh_unix_offset);
 256 
 257         if (ehdr->e_ident[EI_MAG0] != ELFMAG0)
 258                 fakeload_panic("magic[0] wrong");
 259         if (ehdr->e_ident[EI_MAG1] != ELFMAG1)
 260                 fakeload_panic("magic[1] wrong");
 261         if (ehdr->e_ident[EI_MAG2] != ELFMAG2)
 262                 fakeload_panic("magic[2] wrong");
 263         if (ehdr->e_ident[EI_MAG3] != ELFMAG3)
 264                 fakeload_panic("magic[3] wrong");
 265         if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
 266                 fakeload_panic("wrong elfclass");
 267         if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
 268                 fakeload_panic("wrong encoding");
 269         if (ehdr->e_ident[EI_OSABI] != ELFOSABI_SOLARIS)
 270                 fakeload_panic("wrong os abi");
 271         if (ehdr->e_ident[EI_ABIVERSION] != EAV_SUNW_CURRENT)
 272                 fakeload_panic("wrong abi version");
 273         if (ehdr->e_type != ET_EXEC)
 274                 fakeload_panic("unix is not an executable");
 275         if (ehdr->e_machine != EM_ARM)
 276                 fakeload_panic("unix is not an ARM Executible");
 277         if (ehdr->e_version != EV_CURRENT)
 278                 fakeload_panic("wrong version");
 279         if (ehdr->e_phnum == 0)
 280                 fakeload_panic("no program headers");
 281         ret = ehdr->e_entry;
 282 
 283         FAKELOAD_DPRINTF("validated unix's headers\n");
 284 
 285         nhdrs = ehdr->e_phnum;
 286         phdr = (Elf32_Phdr *)((uintptr_t)addr + hdr->fh_unix_offset +
 287             ehdr->e_phoff);
 288         for (i = 0; i < nhdrs; i++, phdr++) {
 289                 if (phdr->p_type != PT_LOAD) {
 290                         fakeload_puts("skipping non-PT_LOAD header\n");
 291                         continue;
 292                 }
 293 
 294                 if (phdr->p_filesz == 0 || phdr->p_memsz == 0) {
 295                         fakeload_puts("skipping PT_LOAD with 0 file/mem\n");
 296                         continue;
 297                 }
 298 
 299                 /*
 300                  * Create a mapping record for this in the atags.
 301                  */
 302                 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 303                 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 304                 aim.aim_paddr = (uintptr_t)addr + hdr->fh_unix_offset +
 305                     phdr->p_offset;
 306                 aim.aim_plen = phdr->p_filesz;
 307                 aim.aim_vaddr = phdr->p_vaddr;
 308                 aim.aim_vlen = phdr->p_memsz;
 309                 /* Round up vlen to be a multiple of 4k */
 310                 if (aim.aim_vlen & 0xfff) {
 311                         aim.aim_vlen &= ~0xfff;
 312                         aim.aim_vlen += 0x1000;
 313                 }
 314                 aim.aim_mapflags = phdr->p_flags;
 315                 atag_append(chain, &aim.aim_header);
 316 
 317                 /*
 318                  * When built with highvecs we need to account for the fact that
 319                  * _edata, _etext and _end are built assuming that the highvecs
 320                  * are normally part of our segments. ld is not doing anything
 321                  * wrong, but this breaks the assumptions that krtld currently
 322                  * has. As such, unix will use this information to overwrite the
 323                  * normal entry points that krtld uses in a similar style to
 324                  * SPARC.
 325                  */
 326                 if (aim.aim_vaddr != 0xffff0000) {
 327                         if ((phdr->p_flags & PF_W) != 0) {
 328                                 data = aim.aim_vaddr;
 329                                 dataln = aim.aim_vlen;
 330                         } else {
 331                                 text = aim.aim_vaddr;
 332                                 textln = aim.aim_vlen;
 333                         }
 334                 }
 335         }
 336 
 337         aisp->ais_stext = text;
 338         aisp->ais_etext = text + textln;
 339         aisp->ais_sdata = data;
 340         aisp->ais_edata = data + dataln;
 341 
 342         /* 1:1 map the boot archive */
 343         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 344         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 345         aim.aim_paddr = (uintptr_t)addr + hdr->fh_archive_offset;
 346         aim.aim_plen = hdr->fh_archive_size;
 347         aim.aim_vaddr = aim.aim_paddr;
 348         aim.aim_vlen = aim.aim_plen;
 349         aim.aim_mapflags = PF_R | PF_W | PF_X;
 350         atag_append(chain, &aim.aim_header);
 351         aisp->ais_archive = aim.aim_paddr;
 352         aisp->ais_archivelen = aim.aim_plen;
 353 
 354         return (ret);
 355 }
 356 
 357 static void
 358 fakeload_mkatags(atag_header_t *chain)
 359 {
 360         atag_illumos_status_t ais;
 361         atag_illumos_mapping_t aim;
 362 
 363         bzero(&ais, sizeof (ais));
 364         bzero(&aim, sizeof (aim));
 365 
 366         ais.ais_header.ah_size = ATAG_ILLUMOS_STATUS_SIZE;
 367         ais.ais_header.ah_tag = ATAG_ILLUMOS_STATUS;
 368         atag_append(chain, &ais.ais_header);
 369         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 370         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 371         atag_append(chain, &aim.aim_header);
 372 }
 373 
 374 static uintptr_t
 375 fakeload_alloc_l2pt(void)
 376 {
 377         uintptr_t ret;
 378 
 379         if (pt_arena & ARMPT_L2_MASK) {
 380                 ret = pt_arena;
 381                 ret &= ~ARMPT_L2_MASK;
 382                 ret += ARMPT_L2_SIZE;
 383                 pt_arena = ret + ARMPT_L2_SIZE;
 384         } else {
 385                 ret = pt_arena;
 386                 pt_arena = ret + ARMPT_L2_SIZE;
 387         }
 388         if (pt_arena >= pt_arena_max) {
 389                 fakeload_puts("pt_arena, max\n");
 390                 fakeload_ultostr(pt_arena);
 391                 fakeload_puts("\n");
 392                 fakeload_ultostr(pt_arena_max);
 393                 fakeload_puts("\n");
 394                 fakeload_puts("l2pts alloced\n");
 395                 fakeload_ultostr(nl2pages);
 396                 fakeload_puts("\n");
 397                 fakeload_panic("ran out of page tables!");
 398         }
 399 
 400         bzero((void *)ret, ARMPT_L2_SIZE);
 401         nl2pages++;
 402         return (ret);
 403 }
 404 
 405 /*
 406  * Finally, do all the dirty work. Let's create some page tables. The L1 page
 407  * table is full of 1 MB mappings by default. The L2 Page table is 1k in size
 408  * and covers that 1 MB. We're going to always create L2 page tables for now
 409  * which will use 4k and 64k pages.
 410  */
 411 static void
 412 fakeload_map(armpte_t *pt, uintptr_t pstart, uintptr_t vstart, size_t len,
 413     uint32_t prot)
 414 {
 415         int entry, chunksize;
 416         armpte_t *pte, *l2pt;
 417         arm_l1pt_t *l1pt;
 418 
 419         /*
 420          * Make sure both pstart + vstart are 4k aligned, along with len.
 421          */
 422         if (pstart & MMU_PAGEOFFSET)
 423                 fakeload_panic("pstart is not 4k aligned");
 424         if (vstart & MMU_PAGEOFFSET)
 425                 fakeload_panic("vstart is not 4k aligned");
 426         if (len & MMU_PAGEOFFSET)
 427                 fakeload_panic("len is not 4k aligned");
 428 
 429         /*
 430          * We're going to logically deal with each 1 MB chunk at a time.
 431          */
 432         while (len > 0) {
 433                 if (vstart & MMU_PAGEOFFSET1M) {
 434                         chunksize = MIN(len, MMU_PAGESIZE1M -
 435                             (vstart & MMU_PAGEOFFSET1M));
 436                 } else {
 437                         chunksize = MIN(len, MMU_PAGESIZE1M);
 438                 }
 439 
 440                 entry = ARMPT_VADDR_TO_L1E(vstart);
 441                 pte = &pt[entry];
 442 
 443                 if (!ARMPT_L1E_ISVALID(*pte)) {
 444                         uintptr_t l2table;
 445 
 446                         if (!(vstart & MMU_PAGEOFFSET1M) &&
 447                             !(pstart & MMU_PAGEOFFSET1M) &&
 448                             len == MMU_PAGESIZE1M) {
 449                                 fakeload_map_1mb(pstart, vstart, prot);
 450                                 vstart += MMU_PAGESIZE1M;
 451                                 pstart += MMU_PAGESIZE1M;
 452                                 len -= MMU_PAGESIZE1M;
 453                                 continue;
 454                         }
 455 
 456                         l2table = fakeload_alloc_l2pt();
 457                         *pte = 0;
 458                         l1pt = (arm_l1pt_t *)pte;
 459                         l1pt->al_type = ARMPT_L1_TYPE_L2PT;
 460                         l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
 461                 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
 462                         fakeload_panic("encountered l1 entry that's not a "
 463                             "pointer to a level 2 table\n");
 464                 } else {
 465                         l1pt = (arm_l1pt_t *)pte;
 466                 }
 467 
 468                 /* Now that we have the l1pt fill in l2 entries */
 469                 l2pt = (void *)(l1pt->al_ptaddr << ARMPT_L1PT_TO_L2_SHIFT);
 470                 len -= chunksize;
 471                 while (chunksize > 0) {
 472                         arm_l2e_t *l2pte;
 473 
 474                         entry = ARMPT_VADDR_TO_L2E(vstart);
 475                         pte = &l2pt[entry];
 476 
 477 #ifdef  MAP_DEBUG
 478                         fakeload_puts("4k page pa->va, l2root, entry\n");
 479                         fakeload_ultostr(pstart);
 480                         fakeload_puts("->");
 481                         fakeload_ultostr(vstart);
 482                         fakeload_puts(", ");
 483                         fakeload_ultostr((uintptr_t)l2pt);
 484                         fakeload_puts(", ");
 485                         fakeload_ultostr(entry);
 486                         fakeload_puts("\n");
 487 #endif
 488 
 489                         if ((*pte & ARMPT_L2_TYPE_MASK) !=
 490                             ARMPT_L2_TYPE_INVALID)
 491                                 fakeload_panic("found existing l2 page table, "
 492                                     "overlap in requested mappings detected!");
 493                         /* Map vaddr to our paddr! */
 494                         l2pte = ((arm_l2e_t *)pte);
 495                         *pte = 0;
 496                         if (!(prot & PF_X))
 497                                 l2pte->ale_xn = 1;
 498                         l2pte->ale_ident = 1;
 499                         if (prot & PF_DEVICE) {
 500                                 l2pte->ale_bbit = 1;
 501                                 l2pte->ale_cbit = 0;
 502                                 l2pte->ale_tex = 0;
 503                                 l2pte->ale_sbit = 1;
 504                         } else {
 505                                 l2pte->ale_bbit = 1;
 506                                 l2pte->ale_cbit = 1;
 507                                 l2pte->ale_tex = 1;
 508                                 l2pte->ale_sbit = 1;
 509                         }
 510                         if (prot & PF_W) {
 511                                 l2pte->ale_ap2 = 1;
 512                                 l2pte->ale_ap = 1;
 513                         } else {
 514                                 l2pte->ale_ap2 = 0;
 515                                 l2pte->ale_ap = 1;
 516                         }
 517                         l2pte->ale_ngbit = 0;
 518                         l2pte->ale_addr = ARMPT_PADDR_TO_L2ADDR(pstart);
 519 
 520                         chunksize -= MMU_PAGESIZE;
 521                         vstart += MMU_PAGESIZE;
 522                         pstart += MMU_PAGESIZE;
 523                 }
 524         }
 525 }
 526 
 527 static void
 528 fakeload_create_map(armpte_t *pt, atag_illumos_mapping_t *aimp)
 529 {
 530 #ifdef MAP_DEBUG
 531         fakeload_puts("paddr->vaddr\n");
 532         fakeload_ultostr(aimp->aim_paddr);
 533         fakeload_puts("->");
 534         fakeload_ultostr(aimp->aim_vaddr);
 535         fakeload_puts("\n");
 536         fakeload_puts("plen-vlen\n");
 537         fakeload_ultostr(aimp->aim_plen);
 538         fakeload_puts("-");
 539         fakeload_ultostr(aimp->aim_vlen);
 540         fakeload_puts("\n");
 541 #endif /* MAP_DEBUG */
 542 
 543         /*
 544          * Can we map this in place or do we need to basically allocate a new
 545          * region and bcopy everything into place for proper alignment?
 546          *
 547          * Criteria for this: we have a vlen > plen. plen is not page aligned.
 548          */
 549         if (aimp->aim_vlen > aimp->aim_plen ||
 550             (aimp->aim_paddr & MMU_PAGEOFFSET) != 0) {
 551                 uintptr_t start;
 552 
 553                 if (aimp->aim_mapflags & PF_NORELOC)
 554                         fakeload_panic("tried to reloc unrelocatable mapping");
 555 #ifdef  MAP_DEBUG
 556                 FAKELOAD_DPRINTF("reloacting paddr\n");
 557 #endif
 558                 start = freemem;
 559                 if (start & MMU_PAGEOFFSET) {
 560                         start &= MMU_PAGEMASK;
 561                         start += MMU_PAGESIZE;
 562                 }
 563                 bcopy((void *)aimp->aim_paddr, (void *)start,
 564                     aimp->aim_plen);
 565                 if (aimp->aim_vlen > aimp->aim_plen) {
 566                         bzero((void *)(start + aimp->aim_plen),
 567                             aimp->aim_vlen - aimp->aim_plen);
 568                 }
 569                 aimp->aim_paddr = start;
 570                 freemem = start + aimp->aim_vlen;
 571 #ifdef MAP_DEBUG
 572                 fakeload_puts("new paddr: ");
 573                 fakeload_ultostr(start);
 574                 fakeload_puts("\n");
 575 #endif /* MAP_DEBUG */
 576         }
 577 
 578         /*
 579          * Now that everything has been set up, go ahead and map the new region.
 580          */
 581         fakeload_map(pt, aimp->aim_paddr, aimp->aim_vaddr, aimp->aim_vlen,
 582             aimp->aim_mapflags);
 583 #ifdef MAP_DEBUG
 584         FAKELOAD_DPRINTF("\n");
 585 #endif /* MAP_DEBUG */
 586 }
 587 
 588 void
 589 fakeload_init(void *ident, void *ident2, void *atag)
 590 {
 591         atag_header_t *hdr;
 592         atag_header_t *chain = (atag_header_t *)atag;
 593         const atag_initrd_t *initrd;
 594         atag_illumos_status_t *aisp;
 595         atag_illumos_mapping_t *aimp;
 596         uintptr_t unix_start;
 597 
 598         fakeload_backend_init();
 599         fakeload_puts("Hello from the loader\n");
 600         initrd = (atag_initrd_t *)atag_find(chain, ATAG_INITRD2);
 601         if (initrd == NULL)
 602                 fakeload_panic("missing the initial ramdisk\n");
 603 
 604         /*
 605          * Create the status atag header and the initial mapping record for the
 606          * atags. We'll hold onto both of these.
 607          */
 608         fakeload_mkatags(chain);
 609         aisp = (atag_illumos_status_t *)atag_find(chain, ATAG_ILLUMOS_STATUS);
 610         if (aisp == NULL)
 611                 fakeload_panic("can't find ATAG_ILLUMOS_STATUS");
 612         aimp = (atag_illumos_mapping_t *)atag_find(chain, ATAG_ILLUMOS_MAPPING);
 613         if (aimp == NULL)
 614                 fakeload_panic("can't find ATAG_ILLUMOS_MAPPING");
 615         FAKELOAD_DPRINTF("created proto atags\n");
 616 
 617         fakeload_pt_arena_init(initrd);
 618 
 619         fakeload_selfmap(chain);
 620 
 621         /*
 622          * Map the boot archive and all of unix
 623          */
 624         unix_start = fakeload_archive_mappings(chain,
 625             (const void *)(uintptr_t)initrd->ai_start, aisp);
 626         FAKELOAD_DPRINTF("filled out unix and the archive's mappings\n");
 627 
 628         /*
 629          * Fill in the atag mapping header for the atags themselves. 1:1 map it.
 630          */
 631         aimp->aim_paddr = (uintptr_t)chain & ~0xfff;
 632         aimp->aim_plen = atag_length(chain) & ~0xfff;
 633         aimp->aim_plen += 0x1000;
 634         aimp->aim_vaddr = aimp->aim_paddr;
 635         aimp->aim_vlen = aimp->aim_plen;
 636         aimp->aim_mapflags = PF_R | PF_W | PF_NORELOC;
 637 
 638         /*
 639          * Let the backend add mappings
 640          */
 641         fakeload_backend_addmaps(chain);
 642 
 643         /*
 644          * Turn on unaligned access
 645          */
 646         FAKELOAD_DPRINTF("turning on unaligned access\n");
 647         fakeload_unaligned_enable();
 648         FAKELOAD_DPRINTF("successfully enabled unaligned access\n");
 649 
 650         /*
 651          * To turn on the MMU we need to do the following:
 652          *  o Program all relevant CP15 registers
 653          *  o Program 1st and 2nd level page tables
 654          *  o Invalidate and Disable the I/D-cache
 655          *  o Fill in the last bits of the ATAG_ILLUMOS_STATUS atag
 656          *  o Turn on the MMU in SCTLR
 657          *  o Jump to unix
 658          */
 659 
 660         /* Last bits of the atag */
 661         aisp->ais_freemem = freemem;
 662         aisp->ais_version = 1;
 663         aisp->ais_ptbase = (uintptr_t)pt_addr;
 664 
 665         /*
 666          * Our initial page table is a series of 1 MB sections. While we really
 667          * should map 4k pages, for the moment we're just going to map 1 MB
 668          * regions, yay team!
 669          */
 670         hdr = chain;
 671         FAKELOAD_DPRINTF("creating mappings\n");
 672         while (hdr != NULL) {
 673                 if (hdr->ah_tag == ATAG_ILLUMOS_MAPPING)
 674                         fakeload_create_map(pt_addr,
 675                             (atag_illumos_mapping_t *)hdr);
 676                 hdr = atag_next(hdr);
 677         }
 678 
 679         /*
 680          * Now that we've mapped everything, update the status atag.
 681          */
 682         aisp->ais_freeused = freemem - aisp->ais_freemem;
 683         aisp->ais_pt_arena = pt_arena;
 684         aisp->ais_pt_arena_max = pt_arena_max;
 685 
 686         /* Cache disable */
 687         FAKELOAD_DPRINTF("Flushing and disabling caches\n");
 688         armv6_dcache_flush();
 689         armv6_dcache_disable();
 690         armv6_dcache_inval();
 691         armv6_icache_disable();
 692         armv6_icache_inval();
 693 
 694         /* Program the page tables */
 695         FAKELOAD_DPRINTF("programming cp15 regs\n");
 696         fakeload_pt_setup((uintptr_t)pt_addr);
 697 
 698 
 699         /* MMU Enable */
 700         FAKELOAD_DPRINTF("see you on the other side\n");
 701         fakeload_mmu_enable();
 702 
 703         FAKELOAD_DPRINTF("why helo thar\n");
 704 
 705         /* Renable caches */
 706         armv6_dcache_enable();
 707         armv6_icache_enable();
 708 
 709         /* we should never come back */
 710         fakeload_exec(unix_start);
 711         fakeload_panic("hit the end of the world\n");
 712 }