1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright (c) 2014 Joyent, Inc.  All rights reserved.
  14  * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  15  */
  16 
  17 #include "fakeloader.h"
  18 
  19 #include <sys/types.h>
  20 #include <sys/param.h>
  21 #include <sys/elf.h>
  22 #include <sys/atag.h>
  23 #include <sys/sysmacros.h>
  24 #include <sys/machparam.h>
  25 
  26 #include <vm/pte.h>
  27 
  28 /*
  29  * This is the stock ARM fake uniboot loader.
  30  *
  31  * Here's what we have to do:
  32  *   o Read the atag header and find the combined archive header
  33  *   o Determine the set of mappings we need to add for the following:
  34  *              - unix
  35  *              - boot_archive
  36  *              - atags
  37  *   o Enable unaligned access
  38  *   o Enable virtual memory
  39  *
  40  * There are several important constraints that we have here:
  41  *
  42  *   o We cannot use any .data! Several loaders that come before us are broken
  43  *     and only provide us with the ability to map our .text and potentially our
  44  *     .bss. We should strive to avoid even that if we can.
  45  */
  46 
  47 #ifdef  DEBUG
  48 #define FAKELOAD_DPRINTF(x)     fakeload_puts(x)
  49 #else
  50 #define FAKELOAD_DPRINTF(x)
  51 #endif  /* DEBUG */
  52 
  53 /*
  54  * XXX ASSUMES WE HAVE Free memory following the boot archive
  55  */
  56 static uintptr_t freemem;
  57 static uintptr_t pt_arena;
  58 static uintptr_t pt_arena_max;
  59 static uint32_t *pt_addr;
  60 static int nl2pages;
  61 
  62 /* Simple copy routines */
  63 void
  64 bcopy(const void *s, void *d, size_t n)
  65 {
  66         const char *src = s;
  67         char *dest = d;
  68 
  69         if (n == 0 || s == d)
  70                 return;
  71 
  72         if (dest < src && dest + n < src) {
  73                 /* dest overlaps with the start of src, copy forward */
  74                 for (; n > 0; n--, src++, dest++)
  75                         *dest = *src;
  76         } else {
  77                 /* src overlaps with start of dest or no overlap, copy rev */
  78                 src += n - 1;
  79                 dest += n - 1;
  80                 for (; n > 0; n--, src--, dest--)
  81                         *dest = *src;
  82         }
  83 }
  84 
  85 void
  86 bzero(void *s, size_t n)
  87 {
  88         char *c = s;
  89         while (n > 0) {
  90                 *c = 0;
  91                 c++;
  92                 n--;
  93         }
  94 }
  95 
  96 static void
  97 fakeload_puts(const char *str)
  98 {
  99         while (*str != '\0') {
 100                 fakeload_backend_putc(*str);
 101                 str++;
 102         }
 103 }
 104 
 105 static void
 106 fakeload_panic(const char *reason)
 107 {
 108         fakeload_puts("panic!\n");
 109         fakeload_puts(reason);
 110         fakeload_puts("\n");
 111         fakeload_puts("spinning forever... goodbye...\n");
 112         for (;;)
 113                 ;
 114 }
 115 
 116 static void
 117 fakeload_ultostr(unsigned long value)
 118 {
 119         char buf[16];
 120         ulong_t t, val = (ulong_t)value;
 121         char c;
 122         char *ptr = &(buf[14]);
 123         buf[15] = '\0';
 124 
 125         do {
 126                 c = (char)('0' + val - 16 * (t = (val >> 4)));
 127                 if (c > '9')
 128                         c += 'A' - '9' - 1;
 129                 *--ptr = c;
 130         } while ((val = t) != 0);
 131 
 132         *--ptr = 'x';
 133         *--ptr = '0';
 134         fakeload_puts(ptr);
 135 }
 136 
 137 static void
 138 fakeload_selfmap(atag_header_t *chain)
 139 {
 140         atag_illumos_mapping_t aim;
 141 
 142         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 143         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 144         aim.aim_paddr = 0x7000;
 145         aim.aim_vaddr = aim.aim_paddr;
 146         aim.aim_plen = 0x3000;
 147         aim.aim_vlen = aim.aim_plen;
 148         aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
 149         atag_append(chain, &aim.aim_header);
 150 }
 151 
 152 static void
 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
 154 {
 155         int entry;
 156         armpte_t *pte;
 157         arm_l1s_t *l1e;
 158 
 159         entry = ARMPT_VADDR_TO_L1E(va);
 160         pte = &pt_addr[entry];
 161         if (ARMPT_L1E_ISVALID(*pte))
 162                 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
 163         l1e = (arm_l1s_t *)pte;
 164         *pte = 0;
 165         l1e->al_type = ARMPT_L1_TYPE_SECT;
 166 
 167         if (prot & PF_DEVICE) {
 168                 l1e->al_bbit = 1;
 169                 l1e->al_cbit = 0;
 170                 l1e->al_tex = 0;
 171                 l1e->al_sbit = 1;
 172         } else {
 173                 l1e->al_bbit = 1;
 174                 l1e->al_cbit = 1;
 175                 l1e->al_tex = 1;
 176                 l1e->al_sbit = 1;
 177         }
 178 
 179         if (!(prot & PF_X))
 180                 l1e->al_xn = 1;
 181         l1e->al_domain = 0;
 182 
 183         if (prot & PF_W) {
 184                 l1e->al_ap2 = 1;
 185                 l1e->al_ap = 1;
 186         } else {
 187                 l1e->al_ap2 = 0;
 188                 l1e->al_ap = 1;
 189         }
 190         l1e->al_ngbit = 0;
 191         l1e->al_issuper = 0;
 192         l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
 193 }
 194 
 195 /*
 196  * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
 197  * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way
 198  * we can map it and all the other L2 page tables we might need. If we don't do
 199  * this, it'll become problematic for unix to actually modify this.
 200  */
 201 static void
 202 fakeload_pt_arena_init(const atag_initrd_t *aii)
 203 {
 204         int entry, i;
 205         armpte_t *pte;
 206         arm_l1s_t *l1e;
 207 
 208         pt_arena = aii->ai_start + aii->ai_size;
 209         if (pt_arena & MMU_PAGEOFFSET1M) {
 210                 pt_arena &= MMU_PAGEMASK1M;
 211                 pt_arena += MMU_PAGESIZE1M;
 212         }
 213         pt_arena_max = pt_arena + 4 * MMU_PAGESIZE1M;
 214         freemem = pt_arena_max;
 215 
 216         /* Set up the l1 page table by first invalidating it */
 217         pt_addr = (armpte_t *)pt_arena;
 218         pt_arena += ARMPT_L1_SIZE;
 219         bzero(pt_addr, ARMPT_L1_SIZE);
 220         for (i = 0; i < 4; i++)
 221                 fakeload_map_1mb((uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
 222                     (uintptr_t)pt_addr + i * MMU_PAGESIZE1M,
 223                     PF_R | PF_W);
 224 }
 225 
 226 /*
 227  * This is our generally entry point. We're passed in the entry point of the
 228  * header.
 229  */
 230 static uintptr_t
 231 fakeload_archive_mappings(atag_header_t *chain, const void *addr,
 232     atag_illumos_status_t *aisp)
 233 {
 234         atag_illumos_mapping_t aim;
 235         fakeloader_hdr_t *hdr;
 236         Elf32_Ehdr *ehdr;
 237         Elf32_Phdr *phdr;
 238         int nhdrs, i;
 239         uintptr_t ret;
 240         uintptr_t text = 0, data = 0;
 241         size_t textln = 0, dataln = 0;
 242 
 243         hdr = (fakeloader_hdr_t *)addr;
 244 
 245         if (hdr->fh_magic[0] != FH_MAGIC0)
 246                 fakeload_panic("fh_magic[0] is wrong!\n");
 247         if (hdr->fh_magic[1] != FH_MAGIC1)
 248                 fakeload_panic("fh_magic[1] is wrong!\n");
 249         if (hdr->fh_magic[2] != FH_MAGIC2)
 250                 fakeload_panic("fh_magic[2] is wrong!\n");
 251         if (hdr->fh_magic[3] != FH_MAGIC3)
 252                 fakeload_panic("fh_magic[3] is wrong!\n");
 253 
 254         if (hdr->fh_unix_size == 0)
 255                 fakeload_panic("hdr unix size is zero\n");
 256         if (hdr->fh_unix_offset == 0)
 257                 fakeload_panic("hdr unix offset is zero\n");
 258         if (hdr->fh_archive_size == 0)
 259                 fakeload_panic("hdr archive size is zero\n");
 260         if (hdr->fh_archive_offset == 0)
 261                 fakeload_panic("hdr archive_offset is zero\n");
 262 
 263         ehdr = (Elf32_Ehdr *)((uintptr_t)addr + hdr->fh_unix_offset);
 264 
 265         if (ehdr->e_ident[EI_MAG0] != ELFMAG0)
 266                 fakeload_panic("magic[0] wrong");
 267         if (ehdr->e_ident[EI_MAG1] != ELFMAG1)
 268                 fakeload_panic("magic[1] wrong");
 269         if (ehdr->e_ident[EI_MAG2] != ELFMAG2)
 270                 fakeload_panic("magic[2] wrong");
 271         if (ehdr->e_ident[EI_MAG3] != ELFMAG3)
 272                 fakeload_panic("magic[3] wrong");
 273         if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
 274                 fakeload_panic("wrong elfclass");
 275         if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
 276                 fakeload_panic("wrong encoding");
 277         if (ehdr->e_ident[EI_OSABI] != ELFOSABI_SOLARIS)
 278                 fakeload_panic("wrong os abi");
 279         if (ehdr->e_ident[EI_ABIVERSION] != EAV_SUNW_CURRENT)
 280                 fakeload_panic("wrong abi version");
 281         if (ehdr->e_type != ET_EXEC)
 282                 fakeload_panic("unix is not an executable");
 283         if (ehdr->e_machine != EM_ARM)
 284                 fakeload_panic("unix is not an ARM Executible");
 285         if (ehdr->e_version != EV_CURRENT)
 286                 fakeload_panic("wrong version");
 287         if (ehdr->e_phnum == 0)
 288                 fakeload_panic("no program headers");
 289         ret = ehdr->e_entry;
 290 
 291         FAKELOAD_DPRINTF("validated unix's headers\n");
 292 
 293         nhdrs = ehdr->e_phnum;
 294         phdr = (Elf32_Phdr *)((uintptr_t)addr + hdr->fh_unix_offset +
 295             ehdr->e_phoff);
 296         for (i = 0; i < nhdrs; i++, phdr++) {
 297                 if (phdr->p_type != PT_LOAD) {
 298                         fakeload_puts("skipping non-PT_LOAD header\n");
 299                         continue;
 300                 }
 301 
 302                 if (phdr->p_filesz == 0 || phdr->p_memsz == 0) {
 303                         fakeload_puts("skipping PT_LOAD with 0 file/mem\n");
 304                         continue;
 305                 }
 306 
 307                 /*
 308                  * Create a mapping record for this in the atags.
 309                  */
 310                 aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 311                 aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 312                 aim.aim_paddr = (uintptr_t)addr + hdr->fh_unix_offset +
 313                     phdr->p_offset;
 314                 aim.aim_plen = phdr->p_filesz;
 315                 aim.aim_vaddr = phdr->p_vaddr;
 316                 aim.aim_vlen = phdr->p_memsz;
 317                 /* Round up vlen to be a multiple of 4k */
 318                 if (aim.aim_vlen & 0xfff) {
 319                         aim.aim_vlen &= ~0xfff;
 320                         aim.aim_vlen += 0x1000;
 321                 }
 322                 aim.aim_mapflags = phdr->p_flags;
 323                 atag_append(chain, &aim.aim_header);
 324 
 325                 /*
 326                  * When built with highvecs we need to account for the fact that
 327                  * _edata, _etext and _end are built assuming that the highvecs
 328                  * are normally part of our segments. ld is not doing anything
 329                  * wrong, but this breaks the assumptions that krtld currently
 330                  * has. As such, unix will use this information to overwrite the
 331                  * normal entry points that krtld uses in a similar style to
 332                  * SPARC.
 333                  */
 334                 if (aim.aim_vaddr != 0xffff0000) {
 335                         if ((phdr->p_flags & PF_W) != 0) {
 336                                 data = aim.aim_vaddr;
 337                                 dataln = aim.aim_vlen;
 338                         } else {
 339                                 text = aim.aim_vaddr;
 340                                 textln = aim.aim_vlen;
 341                         }
 342                 }
 343         }
 344 
 345         aisp->ais_stext = text;
 346         aisp->ais_etext = text + textln;
 347         aisp->ais_sdata = data;
 348         aisp->ais_edata = data + dataln;
 349 
 350         /* 1:1 map the boot archive */
 351         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 352         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 353         aim.aim_paddr = (uintptr_t)addr + hdr->fh_archive_offset;
 354         aim.aim_plen = hdr->fh_archive_size;
 355         aim.aim_vaddr = aim.aim_paddr;
 356         aim.aim_vlen = aim.aim_plen;
 357         aim.aim_mapflags = PF_R | PF_W | PF_X;
 358         atag_append(chain, &aim.aim_header);
 359         aisp->ais_archive = aim.aim_paddr;
 360         aisp->ais_archivelen = aim.aim_plen;
 361 
 362         return (ret);
 363 }
 364 
 365 static void
 366 fakeload_mkatags(atag_header_t *chain)
 367 {
 368         atag_illumos_status_t ais;
 369         atag_illumos_mapping_t aim;
 370 
 371         bzero(&ais, sizeof (ais));
 372         bzero(&aim, sizeof (aim));
 373 
 374         ais.ais_header.ah_size = ATAG_ILLUMOS_STATUS_SIZE;
 375         ais.ais_header.ah_tag = ATAG_ILLUMOS_STATUS;
 376         atag_append(chain, &ais.ais_header);
 377         aim.aim_header.ah_size = ATAG_ILLUMOS_MAPPING_SIZE;
 378         aim.aim_header.ah_tag = ATAG_ILLUMOS_MAPPING;
 379         atag_append(chain, &aim.aim_header);
 380 }
 381 
 382 static uintptr_t
 383 fakeload_alloc_l2pt(void)
 384 {
 385         uintptr_t ret;
 386 
 387         if (pt_arena & ARMPT_L2_MASK) {
 388                 ret = pt_arena;
 389                 ret &= ~ARMPT_L2_MASK;
 390                 ret += ARMPT_L2_SIZE;
 391                 pt_arena = ret + ARMPT_L2_SIZE;
 392         } else {
 393                 ret = pt_arena;
 394                 pt_arena = ret + ARMPT_L2_SIZE;
 395         }
 396         if (pt_arena >= pt_arena_max) {
 397                 fakeload_puts("pt_arena, max\n");
 398                 fakeload_ultostr(pt_arena);
 399                 fakeload_puts("\n");
 400                 fakeload_ultostr(pt_arena_max);
 401                 fakeload_puts("\n");
 402                 fakeload_puts("l2pts alloced\n");
 403                 fakeload_ultostr(nl2pages);
 404                 fakeload_puts("\n");
 405                 fakeload_panic("ran out of page tables!");
 406         }
 407 
 408         bzero((void *)ret, ARMPT_L2_SIZE);
 409         nl2pages++;
 410         return (ret);
 411 }
 412 
 413 /*
 414  * Finally, do all the dirty work. Let's create some page tables. The L1 page
 415  * table is full of 1 MB mappings by default. The L2 Page table is 1k in size
 416  * and covers that 1 MB. We're going to always create L2 page tables for now
 417  * which will use 4k and 64k pages.
 418  */
 419 static void
 420 fakeload_map(armpte_t *pt, uintptr_t pstart, uintptr_t vstart, size_t len,
 421     uint32_t prot)
 422 {
 423         int entry, chunksize;
 424         armpte_t *pte, *l2pt;
 425         arm_l1pt_t *l1pt;
 426 
 427         /*
 428          * Make sure both pstart + vstart are 4k aligned, along with len.
 429          */
 430         if (pstart & MMU_PAGEOFFSET)
 431                 fakeload_panic("pstart is not 4k aligned");
 432         if (vstart & MMU_PAGEOFFSET)
 433                 fakeload_panic("vstart is not 4k aligned");
 434         if (len & MMU_PAGEOFFSET)
 435                 fakeload_panic("len is not 4k aligned");
 436 
 437         /*
 438          * We're going to logically deal with each 1 MB chunk at a time.
 439          */
 440         while (len > 0) {
 441                 if (vstart & MMU_PAGEOFFSET1M) {
 442                         chunksize = MIN(len, MMU_PAGESIZE1M -
 443                             (vstart & MMU_PAGEOFFSET1M));
 444                 } else {
 445                         chunksize = MIN(len, MMU_PAGESIZE1M);
 446                 }
 447 
 448                 entry = ARMPT_VADDR_TO_L1E(vstart);
 449                 pte = &pt[entry];
 450 
 451                 if (!ARMPT_L1E_ISVALID(*pte)) {
 452                         uintptr_t l2table;
 453 
 454                         if (!(vstart & MMU_PAGEOFFSET1M) &&
 455                             !(pstart & MMU_PAGEOFFSET1M) &&
 456                             len >= MMU_PAGESIZE1M) {
 457                                 fakeload_map_1mb(pstart, vstart, prot);
 458                                 vstart += MMU_PAGESIZE1M;
 459                                 pstart += MMU_PAGESIZE1M;
 460                                 len -= MMU_PAGESIZE1M;
 461                                 continue;
 462                         }
 463 
 464                         l2table = fakeload_alloc_l2pt();
 465                         *pte = 0;
 466                         l1pt = (arm_l1pt_t *)pte;
 467                         l1pt->al_type = ARMPT_L1_TYPE_L2PT;
 468                         l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
 469                 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
 470                         fakeload_panic("encountered l1 entry that's not a "
 471                             "pointer to a level 2 table\n");
 472                 } else {
 473                         l1pt = (arm_l1pt_t *)pte;
 474                 }
 475 
 476                 /* Now that we have the l1pt fill in l2 entries */
 477                 l2pt = (void *)(l1pt->al_ptaddr << ARMPT_L1PT_TO_L2_SHIFT);
 478                 len -= chunksize;
 479                 while (chunksize > 0) {
 480                         arm_l2e_t *l2pte;
 481 
 482                         entry = ARMPT_VADDR_TO_L2E(vstart);
 483                         pte = &l2pt[entry];
 484 
 485 #ifdef  MAP_DEBUG
 486                         fakeload_puts("4k page pa->va, l2root, entry\n");
 487                         fakeload_ultostr(pstart);
 488                         fakeload_puts("->");
 489                         fakeload_ultostr(vstart);
 490                         fakeload_puts(", ");
 491                         fakeload_ultostr((uintptr_t)l2pt);
 492                         fakeload_puts(", ");
 493                         fakeload_ultostr(entry);
 494                         fakeload_puts("\n");
 495 #endif
 496 
 497                         if ((*pte & ARMPT_L2_TYPE_MASK) !=
 498                             ARMPT_L2_TYPE_INVALID)
 499                                 fakeload_panic("found existing l2 page table, "
 500                                     "overlap in requested mappings detected!");
 501                         /* Map vaddr to our paddr! */
 502                         l2pte = ((arm_l2e_t *)pte);
 503                         *pte = 0;
 504                         if (!(prot & PF_X))
 505                                 l2pte->ale_xn = 1;
 506                         l2pte->ale_ident = 1;
 507                         if (prot & PF_DEVICE) {
 508                                 l2pte->ale_bbit = 1;
 509                                 l2pte->ale_cbit = 0;
 510                                 l2pte->ale_tex = 0;
 511                                 l2pte->ale_sbit = 1;
 512                         } else {
 513                                 l2pte->ale_bbit = 1;
 514                                 l2pte->ale_cbit = 1;
 515                                 l2pte->ale_tex = 1;
 516                                 l2pte->ale_sbit = 1;
 517                         }
 518                         if (prot & PF_W) {
 519                                 l2pte->ale_ap2 = 1;
 520                                 l2pte->ale_ap = 1;
 521                         } else {
 522                                 l2pte->ale_ap2 = 0;
 523                                 l2pte->ale_ap = 1;
 524                         }
 525                         l2pte->ale_ngbit = 0;
 526                         l2pte->ale_addr = ARMPT_PADDR_TO_L2ADDR(pstart);
 527 
 528                         chunksize -= MMU_PAGESIZE;
 529                         vstart += MMU_PAGESIZE;
 530                         pstart += MMU_PAGESIZE;
 531                 }
 532         }
 533 }
 534 
 535 static void
 536 fakeload_create_map(armpte_t *pt, atag_illumos_mapping_t *aimp)
 537 {
 538 #ifdef MAP_DEBUG
 539         fakeload_puts("paddr->vaddr\n");
 540         fakeload_ultostr(aimp->aim_paddr);
 541         fakeload_puts("->");
 542         fakeload_ultostr(aimp->aim_vaddr);
 543         fakeload_puts("\n");
 544         fakeload_puts("plen-vlen\n");
 545         fakeload_ultostr(aimp->aim_plen);
 546         fakeload_puts("-");
 547         fakeload_ultostr(aimp->aim_vlen);
 548         fakeload_puts("\n");
 549 #endif /* MAP_DEBUG */
 550 
 551         /*
 552          * Can we map this in place or do we need to basically allocate a new
 553          * region and bcopy everything into place for proper alignment?
 554          *
 555          * Criteria for this: we have a vlen > plen. plen is not page aligned.
 556          */
 557         if (aimp->aim_vlen > aimp->aim_plen ||
 558             (aimp->aim_paddr & MMU_PAGEOFFSET) != 0) {
 559                 uintptr_t start;
 560 
 561                 if (aimp->aim_mapflags & PF_NORELOC)
 562                         fakeload_panic("tried to reloc unrelocatable mapping");
 563 #ifdef  MAP_DEBUG
 564                 FAKELOAD_DPRINTF("reloacting paddr\n");
 565 #endif
 566                 start = freemem;
 567                 if (start & MMU_PAGEOFFSET) {
 568                         start &= MMU_PAGEMASK;
 569                         start += MMU_PAGESIZE;
 570                 }
 571                 bcopy((void *)aimp->aim_paddr, (void *)start,
 572                     aimp->aim_plen);
 573                 if (aimp->aim_vlen > aimp->aim_plen) {
 574                         bzero((void *)(start + aimp->aim_plen),
 575                             aimp->aim_vlen - aimp->aim_plen);
 576                 }
 577                 aimp->aim_paddr = start;
 578                 freemem = start + aimp->aim_vlen;
 579 #ifdef MAP_DEBUG
 580                 fakeload_puts("new paddr: ");
 581                 fakeload_ultostr(start);
 582                 fakeload_puts("\n");
 583 #endif /* MAP_DEBUG */
 584         }
 585 
 586         /*
 587          * Now that everything has been set up, go ahead and map the new region.
 588          */
 589         fakeload_map(pt, aimp->aim_paddr, aimp->aim_vaddr, aimp->aim_vlen,
 590             aimp->aim_mapflags);
 591 #ifdef MAP_DEBUG
 592         FAKELOAD_DPRINTF("\n");
 593 #endif /* MAP_DEBUG */
 594 }
 595 
 596 void
 597 fakeload_init(void *ident, void *ident2, void *atag)
 598 {
 599         atag_header_t *hdr;
 600         atag_header_t *chain = (atag_header_t *)atag;
 601         const atag_initrd_t *initrd;
 602         atag_illumos_status_t *aisp;
 603         atag_illumos_mapping_t *aimp;
 604         uintptr_t unix_start;
 605 
 606         fakeload_backend_init();
 607         fakeload_puts("Hello from the loader\n");
 608         initrd = (atag_initrd_t *)atag_find(chain, ATAG_INITRD2);
 609         if (initrd == NULL)
 610                 fakeload_panic("missing the initial ramdisk\n");
 611 
 612         /*
 613          * Create the status atag header and the initial mapping record for the
 614          * atags. We'll hold onto both of these.
 615          */
 616         fakeload_mkatags(chain);
 617         aisp = (atag_illumos_status_t *)atag_find(chain, ATAG_ILLUMOS_STATUS);
 618         if (aisp == NULL)
 619                 fakeload_panic("can't find ATAG_ILLUMOS_STATUS");
 620         aimp = (atag_illumos_mapping_t *)atag_find(chain, ATAG_ILLUMOS_MAPPING);
 621         if (aimp == NULL)
 622                 fakeload_panic("can't find ATAG_ILLUMOS_MAPPING");
 623         FAKELOAD_DPRINTF("created proto atags\n");
 624 
 625         fakeload_pt_arena_init(initrd);
 626 
 627         fakeload_selfmap(chain);
 628 
 629         /*
 630          * Map the boot archive and all of unix
 631          */
 632         unix_start = fakeload_archive_mappings(chain,
 633             (const void *)(uintptr_t)initrd->ai_start, aisp);
 634         FAKELOAD_DPRINTF("filled out unix and the archive's mappings\n");
 635 
 636         /*
 637          * Fill in the atag mapping header for the atags themselves. 1:1 map it.
 638          */
 639         aimp->aim_paddr = (uintptr_t)chain & ~0xfff;
 640         aimp->aim_plen = atag_length(chain) & ~0xfff;
 641         aimp->aim_plen += 0x1000;
 642         aimp->aim_vaddr = aimp->aim_paddr;
 643         aimp->aim_vlen = aimp->aim_plen;
 644         aimp->aim_mapflags = PF_R | PF_W | PF_NORELOC;
 645 
 646         /*
 647          * Let the backend add mappings
 648          */
 649         fakeload_backend_addmaps(chain);
 650 
 651         /*
 652          * Turn on unaligned access
 653          */
 654         FAKELOAD_DPRINTF("turning on unaligned access\n");
 655         fakeload_unaligned_enable();
 656         FAKELOAD_DPRINTF("successfully enabled unaligned access\n");
 657 
 658         /*
 659          * To turn on the MMU we need to do the following:
 660          *  o Program all relevant CP15 registers
 661          *  o Program 1st and 2nd level page tables
 662          *  o Invalidate and Disable the I/D-cache
 663          *  o Fill in the last bits of the ATAG_ILLUMOS_STATUS atag
 664          *  o Turn on the MMU in SCTLR
 665          *  o Jump to unix
 666          */
 667 
 668         /* Last bits of the atag */
 669         aisp->ais_freemem = freemem;
 670         aisp->ais_version = 1;
 671         aisp->ais_ptbase = (uintptr_t)pt_addr;
 672 
 673         /*
 674          * Our initial page table is a series of 1 MB sections. While we really
 675          * should map 4k pages, for the moment we're just going to map 1 MB
 676          * regions, yay team!
 677          */
 678         hdr = chain;
 679         FAKELOAD_DPRINTF("creating mappings\n");
 680         while (hdr != NULL) {
 681                 if (hdr->ah_tag == ATAG_ILLUMOS_MAPPING)
 682                         fakeload_create_map(pt_addr,
 683                             (atag_illumos_mapping_t *)hdr);
 684                 hdr = atag_next(hdr);
 685         }
 686 
 687         /*
 688          * Now that we've mapped everything, update the status atag.
 689          */
 690         aisp->ais_freeused = freemem - aisp->ais_freemem;
 691         aisp->ais_pt_arena = pt_arena;
 692         aisp->ais_pt_arena_max = pt_arena_max;
 693 
 694         /* Cache disable */
 695         FAKELOAD_DPRINTF("Flushing and disabling caches\n");
 696         armv6_dcache_flush();
 697         armv6_dcache_disable();
 698         armv6_dcache_inval();
 699         armv6_icache_disable();
 700         armv6_icache_inval();
 701 
 702         /* Program the page tables */
 703         FAKELOAD_DPRINTF("programming cp15 regs\n");
 704         fakeload_pt_setup((uintptr_t)pt_addr);
 705 
 706 
 707         /* MMU Enable */
 708         FAKELOAD_DPRINTF("see you on the other side\n");
 709         fakeload_mmu_enable();
 710 
 711         FAKELOAD_DPRINTF("why helo thar\n");
 712 
 713         /* we should never come back */
 714         fakeload_exec(ident, ident2, chain, unix_start);
 715         fakeload_panic("hit the end of the world\n");
 716 }