3839 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3840 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3841 struct vpage *vpage = (svd->vpage != NULL) ?
3842 &svd->vpage[seg_page(seg, a)] : NULL;
3843 vnode_t *vp = svd->vp;
3844 page_t **ppa;
3845 uint_t pszc;
3846 size_t ppgsz;
3847 pgcnt_t ppages;
3848 faultcode_t err = 0;
3849 int ierr;
3850 int vop_size_err = 0;
3851 uint_t protchk, prot, vpprot;
3852 ulong_t i;
3853 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3854 anon_sync_obj_t an_cookie;
3855 enum seg_rw arw;
3856 int alloc_failed = 0;
3857 int adjszc_chk;
3858 struct vattr va;
3859 int xhat = 0;
3860 page_t *pplist;
3861 pfn_t pfn;
3862 int physcontig;
3863 int upgrdfail;
3864 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3865 int tron = (svd->tr_state == SEGVN_TR_ON);
3866
3867 ASSERT(szc != 0);
3868 ASSERT(vp != NULL);
3869 ASSERT(brkcow == 0 || amp != NULL);
3870 ASSERT(tron == 0 || amp != NULL);
3871 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3872 ASSERT(!(svd->flags & MAP_NORESERVE));
3873 ASSERT(type != F_SOFTUNLOCK);
3874 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3875 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3876 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3877 ASSERT(seg->s_szc < NBBY * sizeof (int));
3878 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3879 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3889 switch (rw) {
3890 case S_READ:
3891 protchk = PROT_READ;
3892 break;
3893 case S_WRITE:
3894 protchk = PROT_WRITE;
3895 break;
3896 case S_EXEC:
3897 protchk = PROT_EXEC;
3898 break;
3899 case S_OTHER:
3900 default:
3901 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3902 break;
3903 }
3904 } else {
3905 prot = svd->prot;
3906 /* caller has already done segment level protection check. */
3907 }
3908
3909 if (seg->s_as->a_hat != hat) {
3910 xhat = 1;
3911 }
3912
3913 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3914 SEGVN_VMSTAT_FLTVNPAGES(2);
3915 arw = S_READ;
3916 } else {
3917 arw = rw;
3918 }
3919
3920 ppa = kmem_alloc(ppasize, KM_SLEEP);
3921
3922 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3923
3924 for (;;) {
3925 adjszc_chk = 0;
3926 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3927 if (adjszc_chk) {
3928 while (szc < seg->s_szc) {
3929 uintptr_t e;
3930 uint_t tszc;
3931 tszc = segvn_anypgsz_vnode ? szc + 1 :
3932 seg->s_szc;
4248 off + (i << PAGESHIFT));
4249 }
4250 #endif /* DEBUG */
4251 /*
4252 * All pages are of szc we need and they are
4253 * all locked so they can't change szc. load
4254 * translations.
4255 *
4256 * if page got promoted since last check
4257 * we don't need pplist.
4258 */
4259 if (pplist != NULL) {
4260 page_free_replacement_page(pplist);
4261 page_create_putback(pages);
4262 }
4263 if (PP_ISMIGRATE(ppa[0])) {
4264 page_migrate(seg, a, ppa, pages);
4265 }
4266 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4267 prot, vpprot);
4268 if (!xhat) {
4269 hat_memload_array_region(hat, a, pgsz,
4270 ppa, prot & vpprot, hat_flag,
4271 svd->rcookie);
4272 } else {
4273 /*
4274 * avoid large xhat mappings to FS
4275 * pages so that hat_page_demote()
4276 * doesn't need to check for xhat
4277 * large mappings.
4278 * Don't use regions with xhats.
4279 */
4280 for (i = 0; i < pages; i++) {
4281 hat_memload(hat,
4282 a + (i << PAGESHIFT),
4283 ppa[i], prot & vpprot,
4284 hat_flag);
4285 }
4286 }
4287
4288 if (!(hat_flag & HAT_LOAD_LOCK)) {
4289 for (i = 0; i < pages; i++) {
4290 page_unlock(ppa[i]);
4291 }
4292 }
4293 if (amp != NULL) {
4294 anon_array_exit(&an_cookie);
4295 ANON_LOCK_EXIT(&->a_rwlock);
4296 }
4297 goto next;
4298 }
4299
4300 /*
4301 * See if upsize is possible.
4302 */
4303 if (pszc > szc && szc < seg->s_szc &&
4304 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4305 pgcnt_t aphase;
4306 uint_t pszc1 = MIN(pszc, seg->s_szc);
4320 page_free_replacement_page(pl);
4321 page_create_putback(pages);
4322 }
4323 for (i = 0; i < pages; i++) {
4324 page_unlock(ppa[i]);
4325 }
4326 if (amp != NULL) {
4327 anon_array_exit(&an_cookie);
4328 ANON_LOCK_EXIT(&->a_rwlock);
4329 }
4330 pszc = pszc1;
4331 ierr = -2;
4332 break;
4333 }
4334 }
4335
4336 /*
4337 * check if we should use smallest mapping size.
4338 */
4339 upgrdfail = 0;
4340 if (szc == 0 || xhat ||
4341 (pszc >= szc &&
4342 !IS_P2ALIGNED(pfn, pages)) ||
4343 (pszc < szc &&
4344 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4345 &pszc))) {
4346
4347 if (upgrdfail && type != F_SOFTLOCK) {
4348 /*
4349 * segvn_full_szcpages failed to lock
4350 * all pages EXCL. Size down.
4351 */
4352 ASSERT(pszc < szc);
4353
4354 SEGVN_VMSTAT_FLTVNPAGES(33);
4355
4356 if (pplist != NULL) {
4357 page_t *pl = pplist;
4358 page_free_replacement_page(pl);
4359 page_create_putback(pages);
4360 }
4361
4362 for (i = 0; i < pages; i++) {
4363 page_unlock(ppa[i]);
4364 }
4365 if (amp != NULL) {
4366 anon_array_exit(&an_cookie);
4367 ANON_LOCK_EXIT(&->a_rwlock);
4368 }
4369 ierr = -1;
4370 break;
4371 }
4372 if (szc != 0 && !xhat && !upgrdfail) {
4373 segvn_faultvnmpss_align_err5++;
4374 }
4375 SEGVN_VMSTAT_FLTVNPAGES(34);
4376 if (pplist != NULL) {
4377 page_free_replacement_page(pplist);
4378 page_create_putback(pages);
4379 }
4380 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4381 prot, vpprot);
4382 if (upgrdfail && segvn_anypgsz_vnode) {
4383 /* SOFTLOCK case */
4384 hat_memload_array_region(hat, a, pgsz,
4385 ppa, prot & vpprot, hat_flag,
4386 svd->rcookie);
4387 } else {
4388 for (i = 0; i < pages; i++) {
4389 hat_memload_region(hat,
4390 a + (i << PAGESHIFT),
4391 ppa[i], prot & vpprot,
4392 hat_flag, svd->rcookie);
|
3839 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3840 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3841 struct vpage *vpage = (svd->vpage != NULL) ?
3842 &svd->vpage[seg_page(seg, a)] : NULL;
3843 vnode_t *vp = svd->vp;
3844 page_t **ppa;
3845 uint_t pszc;
3846 size_t ppgsz;
3847 pgcnt_t ppages;
3848 faultcode_t err = 0;
3849 int ierr;
3850 int vop_size_err = 0;
3851 uint_t protchk, prot, vpprot;
3852 ulong_t i;
3853 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3854 anon_sync_obj_t an_cookie;
3855 enum seg_rw arw;
3856 int alloc_failed = 0;
3857 int adjszc_chk;
3858 struct vattr va;
3859 page_t *pplist;
3860 pfn_t pfn;
3861 int physcontig;
3862 int upgrdfail;
3863 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3864 int tron = (svd->tr_state == SEGVN_TR_ON);
3865
3866 ASSERT(szc != 0);
3867 ASSERT(vp != NULL);
3868 ASSERT(brkcow == 0 || amp != NULL);
3869 ASSERT(tron == 0 || amp != NULL);
3870 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3871 ASSERT(!(svd->flags & MAP_NORESERVE));
3872 ASSERT(type != F_SOFTUNLOCK);
3873 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3874 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3875 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3876 ASSERT(seg->s_szc < NBBY * sizeof (int));
3877 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3878 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3888 switch (rw) {
3889 case S_READ:
3890 protchk = PROT_READ;
3891 break;
3892 case S_WRITE:
3893 protchk = PROT_WRITE;
3894 break;
3895 case S_EXEC:
3896 protchk = PROT_EXEC;
3897 break;
3898 case S_OTHER:
3899 default:
3900 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3901 break;
3902 }
3903 } else {
3904 prot = svd->prot;
3905 /* caller has already done segment level protection check. */
3906 }
3907
3908 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3909 SEGVN_VMSTAT_FLTVNPAGES(2);
3910 arw = S_READ;
3911 } else {
3912 arw = rw;
3913 }
3914
3915 ppa = kmem_alloc(ppasize, KM_SLEEP);
3916
3917 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3918
3919 for (;;) {
3920 adjszc_chk = 0;
3921 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3922 if (adjszc_chk) {
3923 while (szc < seg->s_szc) {
3924 uintptr_t e;
3925 uint_t tszc;
3926 tszc = segvn_anypgsz_vnode ? szc + 1 :
3927 seg->s_szc;
4243 off + (i << PAGESHIFT));
4244 }
4245 #endif /* DEBUG */
4246 /*
4247 * All pages are of szc we need and they are
4248 * all locked so they can't change szc. load
4249 * translations.
4250 *
4251 * if page got promoted since last check
4252 * we don't need pplist.
4253 */
4254 if (pplist != NULL) {
4255 page_free_replacement_page(pplist);
4256 page_create_putback(pages);
4257 }
4258 if (PP_ISMIGRATE(ppa[0])) {
4259 page_migrate(seg, a, ppa, pages);
4260 }
4261 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4262 prot, vpprot);
4263 hat_memload_array_region(hat, a, pgsz,
4264 ppa, prot & vpprot, hat_flag,
4265 svd->rcookie);
4266
4267 if (!(hat_flag & HAT_LOAD_LOCK)) {
4268 for (i = 0; i < pages; i++) {
4269 page_unlock(ppa[i]);
4270 }
4271 }
4272 if (amp != NULL) {
4273 anon_array_exit(&an_cookie);
4274 ANON_LOCK_EXIT(&->a_rwlock);
4275 }
4276 goto next;
4277 }
4278
4279 /*
4280 * See if upsize is possible.
4281 */
4282 if (pszc > szc && szc < seg->s_szc &&
4283 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4284 pgcnt_t aphase;
4285 uint_t pszc1 = MIN(pszc, seg->s_szc);
4299 page_free_replacement_page(pl);
4300 page_create_putback(pages);
4301 }
4302 for (i = 0; i < pages; i++) {
4303 page_unlock(ppa[i]);
4304 }
4305 if (amp != NULL) {
4306 anon_array_exit(&an_cookie);
4307 ANON_LOCK_EXIT(&->a_rwlock);
4308 }
4309 pszc = pszc1;
4310 ierr = -2;
4311 break;
4312 }
4313 }
4314
4315 /*
4316 * check if we should use smallest mapping size.
4317 */
4318 upgrdfail = 0;
4319 if (szc == 0 ||
4320 (pszc >= szc &&
4321 !IS_P2ALIGNED(pfn, pages)) ||
4322 (pszc < szc &&
4323 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4324 &pszc))) {
4325
4326 if (upgrdfail && type != F_SOFTLOCK) {
4327 /*
4328 * segvn_full_szcpages failed to lock
4329 * all pages EXCL. Size down.
4330 */
4331 ASSERT(pszc < szc);
4332
4333 SEGVN_VMSTAT_FLTVNPAGES(33);
4334
4335 if (pplist != NULL) {
4336 page_t *pl = pplist;
4337 page_free_replacement_page(pl);
4338 page_create_putback(pages);
4339 }
4340
4341 for (i = 0; i < pages; i++) {
4342 page_unlock(ppa[i]);
4343 }
4344 if (amp != NULL) {
4345 anon_array_exit(&an_cookie);
4346 ANON_LOCK_EXIT(&->a_rwlock);
4347 }
4348 ierr = -1;
4349 break;
4350 }
4351 if (szc != 0 && !upgrdfail) {
4352 segvn_faultvnmpss_align_err5++;
4353 }
4354 SEGVN_VMSTAT_FLTVNPAGES(34);
4355 if (pplist != NULL) {
4356 page_free_replacement_page(pplist);
4357 page_create_putback(pages);
4358 }
4359 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4360 prot, vpprot);
4361 if (upgrdfail && segvn_anypgsz_vnode) {
4362 /* SOFTLOCK case */
4363 hat_memload_array_region(hat, a, pgsz,
4364 ppa, prot & vpprot, hat_flag,
4365 svd->rcookie);
4366 } else {
4367 for (i = 0; i < pages; i++) {
4368 hat_memload_region(hat,
4369 a + (i << PAGESHIFT),
4370 ppa[i], prot & vpprot,
4371 hat_flag, svd->rcookie);
|