Print this page
loader: allow 1MB device maps
There's no reason we shouldn't allow 1MB PTEs for use on device memory.
loader: map as much as possible using 1MB pages
Chances are that we never actually executed this bit of code since all the
maps we ever deal with are either very short or much larger than 1MB.
unix: enable caches in locore
The loader should really be as simple as possible to be as small as
possible.  It should configure the machine so that unix can make certain
assumptions but it should leave more complex initialization to unix.


  18 
  19 #include <sys/types.h>
  20 #include <sys/param.h>
  21 #include <sys/elf.h>
  22 #include <sys/atag.h>
  23 #include <sys/sysmacros.h>
  24 #include <sys/machparam.h>
  25 
  26 #include <vm/pte.h>
  27 
  28 /*
  29  * This is the stock ARM fake uniboot loader.
  30  *
  31  * Here's what we have to do:
  32  *   o Read the atag header and find the combined archive header
  33  *   o Determine the set of mappings we need to add for the following:
  34  *              - unix
  35  *              - boot_archive
  36  *              - atags
  37  *   o Enable unaligned access
  38  *   o Enable the caches + virtual memory
  39  *
  40  * There are several important constraints that we have here:
  41  *
  42  *   o We cannot use any .data! Several loaders that come before us are broken
  43  *     and only provide us with the ability to map our .text and potentially our
  44  *     .bss. We should strive to avoid even that if we can.
  45  */
  46 
  47 #ifdef  DEBUG
  48 #define FAKELOAD_DPRINTF(x)     fakeload_puts(x)
  49 #else
  50 #define FAKELOAD_DPRINTF(x)
  51 #endif  /* DEBUG */
  52 
  53 /*
  54  * XXX ASSUMES WE HAVE Free memory following the boot archive
  55  */
  56 static uintptr_t freemem;
  57 static uintptr_t pt_arena;
  58 static uintptr_t pt_arena_max;


 146         aim.aim_plen = 0x3000;
 147         aim.aim_vlen = aim.aim_plen;
 148         aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
 149         atag_append(chain, &aim.aim_header);
 150 }
 151 
 152 static void
 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
 154 {
 155         int entry;
 156         armpte_t *pte;
 157         arm_l1s_t *l1e;
 158 
 159         entry = ARMPT_VADDR_TO_L1E(va);
 160         pte = &pt_addr[entry];
 161         if (ARMPT_L1E_ISVALID(*pte))
 162                 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
 163         l1e = (arm_l1s_t *)pte;
 164         *pte = 0;
 165         l1e->al_type = ARMPT_L1_TYPE_SECT;
 166         /* Assume it's not device memory */






 167         l1e->al_bbit = 1;
 168         l1e->al_cbit = 1;
 169         l1e->al_tex = 1;
 170         l1e->al_sbit = 1;

 171 
 172         if (!(prot & PF_X))
 173                 l1e->al_xn = 1;
 174         l1e->al_domain = 0;
 175 
 176         if (prot & PF_W) {
 177                 l1e->al_ap2 = 1;
 178                 l1e->al_ap = 1;
 179         } else {
 180                 l1e->al_ap2 = 0;
 181                 l1e->al_ap = 1;
 182         }
 183         l1e->al_ngbit = 0;
 184         l1e->al_issuper = 0;
 185         l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
 186 }
 187 
 188 /*
 189  * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
 190  * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way


 429 
 430         /*
 431          * We're going to logically deal with each 1 MB chunk at a time.
 432          */
 433         while (len > 0) {
 434                 if (vstart & MMU_PAGEOFFSET1M) {
 435                         chunksize = MIN(len, MMU_PAGESIZE1M -
 436                             (vstart & MMU_PAGEOFFSET1M));
 437                 } else {
 438                         chunksize = MIN(len, MMU_PAGESIZE1M);
 439                 }
 440 
 441                 entry = ARMPT_VADDR_TO_L1E(vstart);
 442                 pte = &pt[entry];
 443 
 444                 if (!ARMPT_L1E_ISVALID(*pte)) {
 445                         uintptr_t l2table;
 446 
 447                         if (!(vstart & MMU_PAGEOFFSET1M) &&
 448                             !(pstart & MMU_PAGEOFFSET1M) &&
 449                             len == MMU_PAGESIZE1M) {
 450                                 fakeload_map_1mb(pstart, vstart, prot);
 451                                 vstart += MMU_PAGESIZE1M;
 452                                 pstart += MMU_PAGESIZE1M;
 453                                 len -= MMU_PAGESIZE1M;
 454                                 continue;
 455                         }
 456 
 457                         l2table = fakeload_alloc_l2pt();
 458                         *pte = 0;
 459                         l1pt = (arm_l1pt_t *)pte;
 460                         l1pt->al_type = ARMPT_L1_TYPE_L2PT;
 461                         l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
 462                 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
 463                         fakeload_panic("encountered l1 entry that's not a "
 464                             "pointer to a level 2 table\n");
 465                 } else {
 466                         l1pt = (arm_l1pt_t *)pte;
 467                 }
 468 
 469                 /* Now that we have the l1pt fill in l2 entries */


 685         aisp->ais_pt_arena_max = pt_arena_max;
 686 
 687         /* Cache disable */
 688         FAKELOAD_DPRINTF("Flushing and disabling caches\n");
 689         armv6_dcache_flush();
 690         armv6_dcache_disable();
 691         armv6_dcache_inval();
 692         armv6_icache_disable();
 693         armv6_icache_inval();
 694 
 695         /* Program the page tables */
 696         FAKELOAD_DPRINTF("programming cp15 regs\n");
 697         fakeload_pt_setup((uintptr_t)pt_addr);
 698 
 699 
 700         /* MMU Enable */
 701         FAKELOAD_DPRINTF("see you on the other side\n");
 702         fakeload_mmu_enable();
 703 
 704         FAKELOAD_DPRINTF("why helo thar\n");
 705 
 706         /* Renable caches */
 707         armv6_dcache_enable();
 708         armv6_icache_enable();
 709 
 710         /* we should never come back */
 711         fakeload_exec(ident, ident2, chain, unix_start);
 712         fakeload_panic("hit the end of the world\n");
 713 }


  18 
  19 #include <sys/types.h>
  20 #include <sys/param.h>
  21 #include <sys/elf.h>
  22 #include <sys/atag.h>
  23 #include <sys/sysmacros.h>
  24 #include <sys/machparam.h>
  25 
  26 #include <vm/pte.h>
  27 
  28 /*
  29  * This is the stock ARM fake uniboot loader.
  30  *
  31  * Here's what we have to do:
  32  *   o Read the atag header and find the combined archive header
  33  *   o Determine the set of mappings we need to add for the following:
  34  *              - unix
  35  *              - boot_archive
  36  *              - atags
  37  *   o Enable unaligned access
  38  *   o Enable virtual memory
  39  *
  40  * There are several important constraints that we have here:
  41  *
  42  *   o We cannot use any .data! Several loaders that come before us are broken
  43  *     and only provide us with the ability to map our .text and potentially our
  44  *     .bss. We should strive to avoid even that if we can.
  45  */
  46 
  47 #ifdef  DEBUG
  48 #define FAKELOAD_DPRINTF(x)     fakeload_puts(x)
  49 #else
  50 #define FAKELOAD_DPRINTF(x)
  51 #endif  /* DEBUG */
  52 
  53 /*
  54  * XXX ASSUMES WE HAVE Free memory following the boot archive
  55  */
  56 static uintptr_t freemem;
  57 static uintptr_t pt_arena;
  58 static uintptr_t pt_arena_max;


 146         aim.aim_plen = 0x3000;
 147         aim.aim_vlen = aim.aim_plen;
 148         aim.aim_mapflags = PF_R | PF_X | PF_LOADER;
 149         atag_append(chain, &aim.aim_header);
 150 }
 151 
 152 static void
 153 fakeload_map_1mb(uintptr_t pa, uintptr_t va, int prot)
 154 {
 155         int entry;
 156         armpte_t *pte;
 157         arm_l1s_t *l1e;
 158 
 159         entry = ARMPT_VADDR_TO_L1E(va);
 160         pte = &pt_addr[entry];
 161         if (ARMPT_L1E_ISVALID(*pte))
 162                 fakeload_panic("armboot_mmu: asked to map a mapped region!\n");
 163         l1e = (arm_l1s_t *)pte;
 164         *pte = 0;
 165         l1e->al_type = ARMPT_L1_TYPE_SECT;
 166 
 167         if (prot & PF_DEVICE) {
 168                 l1e->al_bbit = 1;
 169                 l1e->al_cbit = 0;
 170                 l1e->al_tex = 0;
 171                 l1e->al_sbit = 1;
 172         } else {
 173                 l1e->al_bbit = 1;
 174                 l1e->al_cbit = 1;
 175                 l1e->al_tex = 1;
 176                 l1e->al_sbit = 1;
 177         }
 178 
 179         if (!(prot & PF_X))
 180                 l1e->al_xn = 1;
 181         l1e->al_domain = 0;
 182 
 183         if (prot & PF_W) {
 184                 l1e->al_ap2 = 1;
 185                 l1e->al_ap = 1;
 186         } else {
 187                 l1e->al_ap2 = 0;
 188                 l1e->al_ap = 1;
 189         }
 190         l1e->al_ngbit = 0;
 191         l1e->al_issuper = 0;
 192         l1e->al_addr = ARMPT_PADDR_TO_L1SECT(pa);
 193 }
 194 
 195 /*
 196  * Set freemem to be 1 MB aligned at the end of boot archive. While the L1 Page
 197  * table only needs to be 16 KB aligned, we opt for 1 MB alignment so that way


 436 
 437         /*
 438          * We're going to logically deal with each 1 MB chunk at a time.
 439          */
 440         while (len > 0) {
 441                 if (vstart & MMU_PAGEOFFSET1M) {
 442                         chunksize = MIN(len, MMU_PAGESIZE1M -
 443                             (vstart & MMU_PAGEOFFSET1M));
 444                 } else {
 445                         chunksize = MIN(len, MMU_PAGESIZE1M);
 446                 }
 447 
 448                 entry = ARMPT_VADDR_TO_L1E(vstart);
 449                 pte = &pt[entry];
 450 
 451                 if (!ARMPT_L1E_ISVALID(*pte)) {
 452                         uintptr_t l2table;
 453 
 454                         if (!(vstart & MMU_PAGEOFFSET1M) &&
 455                             !(pstart & MMU_PAGEOFFSET1M) &&
 456                             len >= MMU_PAGESIZE1M) {
 457                                 fakeload_map_1mb(pstart, vstart, prot);
 458                                 vstart += MMU_PAGESIZE1M;
 459                                 pstart += MMU_PAGESIZE1M;
 460                                 len -= MMU_PAGESIZE1M;
 461                                 continue;
 462                         }
 463 
 464                         l2table = fakeload_alloc_l2pt();
 465                         *pte = 0;
 466                         l1pt = (arm_l1pt_t *)pte;
 467                         l1pt->al_type = ARMPT_L1_TYPE_L2PT;
 468                         l1pt->al_ptaddr = ARMPT_ADDR_TO_L1PTADDR(l2table);
 469                 } else if ((*pte & ARMPT_L1_TYPE_MASK) != ARMPT_L1_TYPE_L2PT) {
 470                         fakeload_panic("encountered l1 entry that's not a "
 471                             "pointer to a level 2 table\n");
 472                 } else {
 473                         l1pt = (arm_l1pt_t *)pte;
 474                 }
 475 
 476                 /* Now that we have the l1pt fill in l2 entries */


 692         aisp->ais_pt_arena_max = pt_arena_max;
 693 
 694         /* Cache disable */
 695         FAKELOAD_DPRINTF("Flushing and disabling caches\n");
 696         armv6_dcache_flush();
 697         armv6_dcache_disable();
 698         armv6_dcache_inval();
 699         armv6_icache_disable();
 700         armv6_icache_inval();
 701 
 702         /* Program the page tables */
 703         FAKELOAD_DPRINTF("programming cp15 regs\n");
 704         fakeload_pt_setup((uintptr_t)pt_addr);
 705 
 706 
 707         /* MMU Enable */
 708         FAKELOAD_DPRINTF("see you on the other side\n");
 709         fakeload_mmu_enable();
 710 
 711         FAKELOAD_DPRINTF("why helo thar\n");




 712 
 713         /* we should never come back */
 714         fakeload_exec(ident, ident2, chain, unix_start);
 715         fakeload_panic("hit the end of the world\n");
 716 }