2686 case S_WRITE:
2687 protchk = PROT_WRITE;
2688 break;
2689 case S_EXEC:
2690 protchk = PROT_EXEC;
2691 break;
2692 case S_OTHER:
2693 default:
2694 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2695 break;
2696 }
2697
2698 prot = VPP_PROT(vpage);
2699 if ((prot & protchk) == 0)
2700 return (FC_PROT); /* illegal access type */
2701 } else {
2702 prot = svd->prot;
2703 }
2704
2705 if (type == F_SOFTLOCK) {
2706 atomic_add_long((ulong_t *)&svd->softlockcnt, 1);
2707 }
2708
2709 /*
2710 * Always acquire the anon array lock to prevent 2 threads from
2711 * allocating separate anon slots for the same "addr".
2712 */
2713
2714 if ((amp = svd->amp) != NULL) {
2715 ASSERT(RW_READ_HELD(&->a_rwlock));
2716 anon_index = svd->anon_index + seg_page(seg, addr);
2717 anon_array_enter(amp, anon_index, &cookie);
2718 anon_lock = 1;
2719 }
2720
2721 if (svd->vp == NULL && amp != NULL) {
2722 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2723 /*
2724 * Allocate a (normally) writable anonymous page of
2725 * zeroes. If no advance reservations, reserve now.
2726 */
3047 if (rw == S_WRITE)
3048 hat_setmod(pp);
3049 else if (!hat_ismod(pp))
3050 prot &= ~PROT_WRITE;
3051 }
3052
3053 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3054 hat_memload(hat, addr, pp, prot, hat_flag);
3055
3056 if (!(hat_flag & HAT_LOAD_LOCK))
3057 page_unlock(pp);
3058
3059 ASSERT(anon_lock);
3060 anon_array_exit(&cookie);
3061 return (0);
3062 out:
3063 if (anon_lock)
3064 anon_array_exit(&cookie);
3065
3066 if (type == F_SOFTLOCK) {
3067 atomic_add_long((ulong_t *)&svd->softlockcnt, -1);
3068 }
3069 return (FC_MAKE_ERR(err));
3070 }
3071
3072 /*
3073 * relocate a bunch of smaller targ pages into one large repl page. all targ
3074 * pages must be complete pages smaller than replacement pages.
3075 * it's assumed that no page's szc can change since they are all PAGESIZE or
3076 * complete large pages locked SHARED.
3077 */
3078 static void
3079 segvn_relocate_pages(page_t **targ, page_t *replacement)
3080 {
3081 page_t *pp;
3082 pgcnt_t repl_npgs, curnpgs;
3083 pgcnt_t i;
3084 uint_t repl_szc = replacement->p_szc;
3085 page_t *first_repl = replacement;
3086 page_t *repl;
3087 spgcnt_t npgs;
8875 paddr = addr;
8876 }
8877 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8878 } else {
8879 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8880 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8881 len = lpgeaddr - lpgaddr;
8882 npages = btop(len);
8883 seg_pinactive(seg, pamp, paddr, len,
8884 *ppp - adjustpages, rw, pflags, preclaim_callback);
8885 }
8886
8887 if (pamp != NULL) {
8888 ASSERT(svd->type == MAP_SHARED);
8889 ASSERT(svd->softlockcnt >= npages);
8890 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8891 }
8892
8893 if (sftlck_sbase) {
8894 ASSERT(svd->softlockcnt_sbase > 0);
8895 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1);
8896 }
8897 if (sftlck_send) {
8898 ASSERT(svd->softlockcnt_send > 0);
8899 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1);
8900 }
8901
8902 /*
8903 * If someone is blocked while unmapping, we purge
8904 * segment page cache and thus reclaim pplist synchronously
8905 * without waiting for seg_pasync_thread. This speeds up
8906 * unmapping in cases where munmap(2) is called, while
8907 * raw async i/o is still in progress or where a thread
8908 * exits on data fault in a multithreaded application.
8909 */
8910 if (AS_ISUNMAPWAIT(seg->s_as)) {
8911 if (svd->softlockcnt == 0) {
8912 mutex_enter(&seg->s_as->a_contents);
8913 if (AS_ISUNMAPWAIT(seg->s_as)) {
8914 AS_CLRUNMAPWAIT(seg->s_as);
8915 cv_broadcast(&seg->s_as->a_cv);
8916 }
8917 mutex_exit(&seg->s_as->a_contents);
8918 } else if (pamp == NULL) {
8919 /*
8976 if ((VPP_PROT(vp) & protchk) == 0) {
8977 error = EACCES;
8978 goto out;
8979 }
8980 }
8981 }
8982 }
8983
8984 /*
8985 * try to find pages in segment page cache
8986 */
8987 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8988 if (pplist != NULL) {
8989 if (pamp != NULL) {
8990 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8991 ASSERT(svd->type == MAP_SHARED);
8992 atomic_add_long((ulong_t *)&svd->softlockcnt,
8993 npages);
8994 }
8995 if (sftlck_sbase) {
8996 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
8997 }
8998 if (sftlck_send) {
8999 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9000 }
9001 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9002 *ppp = pplist + adjustpages;
9003 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9004 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9005 return (0);
9006 }
9007
9008 /*
9009 * For MAP_SHARED segments we already verified above that segment
9010 * protections allow this pagelock operation.
9011 */
9012 if (pamp == NULL) {
9013 ASSERT(svd->type == MAP_PRIVATE);
9014 if (svd->pageprot == 0) {
9015 if ((svd->prot & protchk) == 0) {
9016 error = EACCES;
9017 goto out;
9018 }
9019 if (svd->prot & PROT_WRITE) {
9169 ASSERT(anlock);
9170 anon_array_exit(&cookie);
9171 anlock = 0;
9172 }
9173 *pplist++ = pp;
9174 }
9175 if (anlock) { /* Ensure the lock is dropped */
9176 anon_array_exit(&cookie);
9177 }
9178 ANON_LOCK_EXIT(&->a_rwlock);
9179
9180 if (a >= addr + len) {
9181 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9182 if (pamp != NULL) {
9183 ASSERT(svd->type == MAP_SHARED);
9184 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9185 npages);
9186 wlen = len;
9187 }
9188 if (sftlck_sbase) {
9189 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
9190 }
9191 if (sftlck_send) {
9192 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9193 }
9194 if (use_pcache) {
9195 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9196 rw, pflags, preclaim_callback);
9197 }
9198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9199 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9200 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9201 return (0);
9202 }
9203
9204 pplist = pl;
9205 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9206 while (np > (uint_t)0) {
9207 ASSERT(PAGE_LOCKED(*pplist));
9208 page_unlock(*pplist);
9209 np--;
9210 pplist++;
9211 }
9212 kmem_free(pl, sizeof (page_t *) * (npages + 1));
|
2686 case S_WRITE:
2687 protchk = PROT_WRITE;
2688 break;
2689 case S_EXEC:
2690 protchk = PROT_EXEC;
2691 break;
2692 case S_OTHER:
2693 default:
2694 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2695 break;
2696 }
2697
2698 prot = VPP_PROT(vpage);
2699 if ((prot & protchk) == 0)
2700 return (FC_PROT); /* illegal access type */
2701 } else {
2702 prot = svd->prot;
2703 }
2704
2705 if (type == F_SOFTLOCK) {
2706 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2707 }
2708
2709 /*
2710 * Always acquire the anon array lock to prevent 2 threads from
2711 * allocating separate anon slots for the same "addr".
2712 */
2713
2714 if ((amp = svd->amp) != NULL) {
2715 ASSERT(RW_READ_HELD(&->a_rwlock));
2716 anon_index = svd->anon_index + seg_page(seg, addr);
2717 anon_array_enter(amp, anon_index, &cookie);
2718 anon_lock = 1;
2719 }
2720
2721 if (svd->vp == NULL && amp != NULL) {
2722 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2723 /*
2724 * Allocate a (normally) writable anonymous page of
2725 * zeroes. If no advance reservations, reserve now.
2726 */
3047 if (rw == S_WRITE)
3048 hat_setmod(pp);
3049 else if (!hat_ismod(pp))
3050 prot &= ~PROT_WRITE;
3051 }
3052
3053 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3054 hat_memload(hat, addr, pp, prot, hat_flag);
3055
3056 if (!(hat_flag & HAT_LOAD_LOCK))
3057 page_unlock(pp);
3058
3059 ASSERT(anon_lock);
3060 anon_array_exit(&cookie);
3061 return (0);
3062 out:
3063 if (anon_lock)
3064 anon_array_exit(&cookie);
3065
3066 if (type == F_SOFTLOCK) {
3067 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3068 }
3069 return (FC_MAKE_ERR(err));
3070 }
3071
3072 /*
3073 * relocate a bunch of smaller targ pages into one large repl page. all targ
3074 * pages must be complete pages smaller than replacement pages.
3075 * it's assumed that no page's szc can change since they are all PAGESIZE or
3076 * complete large pages locked SHARED.
3077 */
3078 static void
3079 segvn_relocate_pages(page_t **targ, page_t *replacement)
3080 {
3081 page_t *pp;
3082 pgcnt_t repl_npgs, curnpgs;
3083 pgcnt_t i;
3084 uint_t repl_szc = replacement->p_szc;
3085 page_t *first_repl = replacement;
3086 page_t *repl;
3087 spgcnt_t npgs;
8875 paddr = addr;
8876 }
8877 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8878 } else {
8879 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8880 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8881 len = lpgeaddr - lpgaddr;
8882 npages = btop(len);
8883 seg_pinactive(seg, pamp, paddr, len,
8884 *ppp - adjustpages, rw, pflags, preclaim_callback);
8885 }
8886
8887 if (pamp != NULL) {
8888 ASSERT(svd->type == MAP_SHARED);
8889 ASSERT(svd->softlockcnt >= npages);
8890 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8891 }
8892
8893 if (sftlck_sbase) {
8894 ASSERT(svd->softlockcnt_sbase > 0);
8895 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
8896 }
8897 if (sftlck_send) {
8898 ASSERT(svd->softlockcnt_send > 0);
8899 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
8900 }
8901
8902 /*
8903 * If someone is blocked while unmapping, we purge
8904 * segment page cache and thus reclaim pplist synchronously
8905 * without waiting for seg_pasync_thread. This speeds up
8906 * unmapping in cases where munmap(2) is called, while
8907 * raw async i/o is still in progress or where a thread
8908 * exits on data fault in a multithreaded application.
8909 */
8910 if (AS_ISUNMAPWAIT(seg->s_as)) {
8911 if (svd->softlockcnt == 0) {
8912 mutex_enter(&seg->s_as->a_contents);
8913 if (AS_ISUNMAPWAIT(seg->s_as)) {
8914 AS_CLRUNMAPWAIT(seg->s_as);
8915 cv_broadcast(&seg->s_as->a_cv);
8916 }
8917 mutex_exit(&seg->s_as->a_contents);
8918 } else if (pamp == NULL) {
8919 /*
8976 if ((VPP_PROT(vp) & protchk) == 0) {
8977 error = EACCES;
8978 goto out;
8979 }
8980 }
8981 }
8982 }
8983
8984 /*
8985 * try to find pages in segment page cache
8986 */
8987 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8988 if (pplist != NULL) {
8989 if (pamp != NULL) {
8990 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8991 ASSERT(svd->type == MAP_SHARED);
8992 atomic_add_long((ulong_t *)&svd->softlockcnt,
8993 npages);
8994 }
8995 if (sftlck_sbase) {
8996 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
8997 }
8998 if (sftlck_send) {
8999 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9000 }
9001 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9002 *ppp = pplist + adjustpages;
9003 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9004 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9005 return (0);
9006 }
9007
9008 /*
9009 * For MAP_SHARED segments we already verified above that segment
9010 * protections allow this pagelock operation.
9011 */
9012 if (pamp == NULL) {
9013 ASSERT(svd->type == MAP_PRIVATE);
9014 if (svd->pageprot == 0) {
9015 if ((svd->prot & protchk) == 0) {
9016 error = EACCES;
9017 goto out;
9018 }
9019 if (svd->prot & PROT_WRITE) {
9169 ASSERT(anlock);
9170 anon_array_exit(&cookie);
9171 anlock = 0;
9172 }
9173 *pplist++ = pp;
9174 }
9175 if (anlock) { /* Ensure the lock is dropped */
9176 anon_array_exit(&cookie);
9177 }
9178 ANON_LOCK_EXIT(&->a_rwlock);
9179
9180 if (a >= addr + len) {
9181 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9182 if (pamp != NULL) {
9183 ASSERT(svd->type == MAP_SHARED);
9184 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9185 npages);
9186 wlen = len;
9187 }
9188 if (sftlck_sbase) {
9189 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9190 }
9191 if (sftlck_send) {
9192 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9193 }
9194 if (use_pcache) {
9195 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9196 rw, pflags, preclaim_callback);
9197 }
9198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9199 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9200 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9201 return (0);
9202 }
9203
9204 pplist = pl;
9205 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9206 while (np > (uint_t)0) {
9207 ASSERT(PAGE_LOCKED(*pplist));
9208 page_unlock(*pplist);
9209 np--;
9210 pplist++;
9211 }
9212 kmem_free(pl, sizeof (page_t *) * (npages + 1));
|