Print this page
6345 remove xhat support

*** 3854,3864 **** anon_sync_obj_t an_cookie; enum seg_rw arw; int alloc_failed = 0; int adjszc_chk; struct vattr va; - int xhat = 0; page_t *pplist; pfn_t pfn; int physcontig; int upgrdfail; int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ --- 3854,3863 ----
*** 3904,3917 **** } else { prot = svd->prot; /* caller has already done segment level protection check. */ } - if (seg->s_as->a_hat != hat) { - xhat = 1; - } - if (rw == S_WRITE && segtype == MAP_PRIVATE) { SEGVN_VMSTAT_FLTVNPAGES(2); arw = S_READ; } else { arw = rw; --- 3903,3912 ----
*** 4263,4291 **** if (PP_ISMIGRATE(ppa[0])) { page_migrate(seg, a, ppa, pages); } SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); - if (!xhat) { hat_memload_array_region(hat, a, pgsz, ppa, prot & vpprot, hat_flag, svd->rcookie); - } else { - /* - * avoid large xhat mappings to FS - * pages so that hat_page_demote() - * doesn't need to check for xhat - * large mappings. - * Don't use regions with xhats. - */ - for (i = 0; i < pages; i++) { - hat_memload(hat, - a + (i << PAGESHIFT), - ppa[i], prot & vpprot, - hat_flag); - } - } if (!(hat_flag & HAT_LOAD_LOCK)) { for (i = 0; i < pages; i++) { page_unlock(ppa[i]); } --- 4258,4270 ----
*** 4335,4345 **** /* * check if we should use smallest mapping size. */ upgrdfail = 0; ! if (szc == 0 || xhat || (pszc >= szc && !IS_P2ALIGNED(pfn, pages)) || (pszc < szc && !segvn_full_szcpages(ppa, szc, &upgrdfail, &pszc))) { --- 4314,4324 ---- /* * check if we should use smallest mapping size. */ upgrdfail = 0; ! if (szc == 0 || (pszc >= szc && !IS_P2ALIGNED(pfn, pages)) || (pszc < szc && !segvn_full_szcpages(ppa, szc, &upgrdfail, &pszc))) {
*** 4367,4377 **** ANON_LOCK_EXIT(&amp->a_rwlock); } ierr = -1; break; } ! if (szc != 0 && !xhat && !upgrdfail) { segvn_faultvnmpss_align_err5++; } SEGVN_VMSTAT_FLTVNPAGES(34); if (pplist != NULL) { page_free_replacement_page(pplist); --- 4346,4356 ---- ANON_LOCK_EXIT(&amp->a_rwlock); } ierr = -1; break; } ! if (szc != 0 && !upgrdfail) { segvn_faultvnmpss_align_err5++; } SEGVN_VMSTAT_FLTVNPAGES(34); if (pplist != NULL) { page_free_replacement_page(pplist);