Print this page
patch vm-cleanup

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/hat_i86.c
          +++ new/usr/src/uts/i86pc/vm/hat_i86.c
↓ open down ↓ 1116 lines elided ↑ open up ↑
1117 1117  int
1118 1118  hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1119 1119  {
1120 1120          ASSERT((uintptr_t)addr < kernelbase);
1121 1121          ASSERT(new != kas.a_hat);
1122 1122          ASSERT(old != kas.a_hat);
1123 1123          return (0);
1124 1124  }
1125 1125  
1126 1126  /*
1127      - * Allocate any hat resources required for a process being swapped in.
1128      - */
1129      -/*ARGSUSED*/
1130      -void
1131      -hat_swapin(hat_t *hat)
1132      -{
1133      -        /* do nothing - we let everything fault back in */
1134      -}
1135      -
1136      -/*
1137      - * Unload all translations associated with an address space of a process
1138      - * that is being swapped out.
1139      - */
1140      -void
1141      -hat_swapout(hat_t *hat)
1142      -{
1143      -        uintptr_t       vaddr = (uintptr_t)0;
1144      -        uintptr_t       eaddr = _userlimit;
1145      -        htable_t        *ht = NULL;
1146      -        level_t         l;
1147      -
1148      -        XPV_DISALLOW_MIGRATE();
1149      -        /*
1150      -         * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1151      -         * seg_spt and shared pagetables can't be swapped out.
1152      -         * Take a look at segspt_shmswapout() - it's a big no-op.
1153      -         *
1154      -         * Instead we'll walk through all the address space and unload
1155      -         * any mappings which we are sure are not shared, not locked.
1156      -         */
1157      -        ASSERT(IS_PAGEALIGNED(vaddr));
1158      -        ASSERT(IS_PAGEALIGNED(eaddr));
1159      -        ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1160      -        if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1161      -                eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1162      -
1163      -        while (vaddr < eaddr) {
1164      -                (void) htable_walk(hat, &ht, &vaddr, eaddr);
1165      -                if (ht == NULL)
1166      -                        break;
1167      -
1168      -                ASSERT(!IN_VA_HOLE(vaddr));
1169      -
1170      -                /*
1171      -                 * If the page table is shared skip its entire range.
1172      -                 */
1173      -                l = ht->ht_level;
1174      -                if (ht->ht_flags & HTABLE_SHARED_PFN) {
1175      -                        vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1176      -                        htable_release(ht);
1177      -                        ht = NULL;
1178      -                        continue;
1179      -                }
1180      -
1181      -                /*
1182      -                 * If the page table has no locked entries, unload this one.
1183      -                 */
1184      -                if (ht->ht_lock_cnt == 0)
1185      -                        hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1186      -                            HAT_UNLOAD_UNMAP);
1187      -
1188      -                /*
1189      -                 * If we have a level 0 page table with locked entries,
1190      -                 * skip the entire page table, otherwise skip just one entry.
1191      -                 */
1192      -                if (ht->ht_lock_cnt > 0 && l == 0)
1193      -                        vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1194      -                else
1195      -                        vaddr += LEVEL_SIZE(l);
1196      -        }
1197      -        if (ht)
1198      -                htable_release(ht);
1199      -
1200      -        /*
1201      -         * We're in swapout because the system is low on memory, so
1202      -         * go back and flush all the htables off the cached list.
1203      -         */
1204      -        htable_purge_hat(hat);
1205      -        XPV_ALLOW_MIGRATE();
1206      -}
1207      -
1208      -/*
1209 1127   * returns number of bytes that have valid mappings in hat.
1210 1128   */
1211 1129  size_t
1212 1130  hat_get_mapped_size(hat_t *hat)
1213 1131  {
1214 1132          size_t total = 0;
1215 1133          int l;
1216 1134  
1217 1135          for (l = 0; l <= mmu.max_page_level; l++)
1218 1136                  total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
↓ open down ↓ 3245 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX