109 struct seg_pcache *p_hprev;
110 kmutex_t p_hmutex; /* protects hash bucket */
111 };
112
113 /*
114 * A parameter to control a maximum number of bytes that can be
115 * purged from pcache at a time.
116 */
117 #define P_MAX_APURGE_BYTES (1024 * 1024 * 1024)
118
119 /*
120 * log2(fraction of pcache to reclaim at a time).
121 */
122 #define P_SHRINK_SHFT (5)
123
124 /*
125 * The following variables can be tuned via /etc/system.
126 */
127
128 int segpcache_enabled = 1; /* if 1, shadow lists are cached */
129 pgcnt_t segpcache_maxwindow = 0; /* max # of pages that can be cached */
130 ulong_t segpcache_hashsize_win = 0; /* # of non wired buckets */
131 ulong_t segpcache_hashsize_wired = 0; /* # of wired buckets */
132 int segpcache_reap_sec = 1; /* reap check rate in secs */
133 clock_t segpcache_reap_ticks = 0; /* reap interval in ticks */
134 int segpcache_pcp_maxage_sec = 1; /* pcp max age in secs */
135 clock_t segpcache_pcp_maxage_ticks = 0; /* pcp max age in ticks */
136 int segpcache_shrink_shift = P_SHRINK_SHFT; /* log2 reap fraction */
137 pgcnt_t segpcache_maxapurge_bytes = P_MAX_APURGE_BYTES; /* max purge bytes */
138
139 static kmutex_t seg_pcache_mtx; /* protects seg_pdisabled counter */
140 static kmutex_t seg_pasync_mtx; /* protects async thread scheduling */
141 static kcondvar_t seg_pasync_cv;
142
143 #pragma align 64(pctrl1)
144 #pragma align 64(pctrl2)
145 #pragma align 64(pctrl3)
146
147 /*
148 * Keep frequently used variables together in one cache line.
149 */
150 static struct p_ctrl1 {
151 uint_t p_disabled; /* if not 0, caching temporarily off */
152 pgcnt_t p_maxwin; /* max # of pages that can be cached */
153 size_t p_hashwin_sz; /* # of non wired buckets */
154 struct seg_phash *p_htabwin; /* hash table for non wired entries */
155 size_t p_hashwired_sz; /* # of wired buckets */
156 struct seg_phash_wired *p_htabwired; /* hash table for wired entries */
157 kmem_cache_t *p_kmcache; /* kmem cache for seg_pcache structs */
158 #ifdef _LP64
159 ulong_t pad[1];
160 #endif /* _LP64 */
161 } pctrl1;
162
163 static struct p_ctrl2 {
164 kmutex_t p_mem_mtx; /* protects window counter and p_halinks */
165 pgcnt_t p_locked_win; /* # pages from window */
166 pgcnt_t p_locked; /* # of pages cached by pagelock */
167 uchar_t p_ahcur; /* current active links for insert/delete */
168 uchar_t p_athr_on; /* async reclaim thread is running. */
169 pcache_link_t p_ahhead[2]; /* active buckets linkages */
170 } pctrl2;
171
172 static struct p_ctrl3 {
173 clock_t p_pcp_maxage; /* max pcp age in ticks */
174 ulong_t p_athr_empty_ahb; /* athread walk stats */
175 ulong_t p_athr_full_ahb; /* athread walk stats */
176 pgcnt_t p_maxapurge_npages; /* max pages to purge at a time */
177 int p_shrink_shft; /* reap shift factor */
178 #ifdef _LP64
179 ulong_t pad[3];
180 #endif /* _LP64 */
181 } pctrl3;
182
183 #define seg_pdisabled pctrl1.p_disabled
184 #define seg_pmaxwindow pctrl1.p_maxwin
185 #define seg_phashsize_win pctrl1.p_hashwin_sz
186 #define seg_phashtab_win pctrl1.p_htabwin
187 #define seg_phashsize_wired pctrl1.p_hashwired_sz
188 #define seg_phashtab_wired pctrl1.p_htabwired
189 #define seg_pkmcache pctrl1.p_kmcache
190 #define seg_pmem_mtx pctrl2.p_mem_mtx
191 #define seg_plocked_window pctrl2.p_locked_win
192 #define seg_plocked pctrl2.p_locked
193 #define seg_pahcur pctrl2.p_ahcur
194 #define seg_pathr_on pctrl2.p_athr_on
195 #define seg_pahhead pctrl2.p_ahhead
196 #define seg_pmax_pcpage pctrl3.p_pcp_maxage
197 #define seg_pathr_empty_ahb pctrl3.p_athr_empty_ahb
198 #define seg_pathr_full_ahb pctrl3.p_athr_full_ahb
199 #define seg_pshrink_shift pctrl3.p_shrink_shft
200 #define seg_pmaxapurge_npages pctrl3.p_maxapurge_npages
201
202 #define P_HASHWIN_MASK (seg_phashsize_win - 1)
203 #define P_HASHWIRED_MASK (seg_phashsize_wired - 1)
204 #define P_BASESHIFT (6)
739 seg_pinsert_check(struct seg *seg, struct anon_map *amp, caddr_t addr,
740 size_t len, uint_t flags)
741 {
742 ASSERT(seg != NULL);
743
744 #ifdef DEBUG
745 if (p_insert_chk_mtbf && !(gethrtime() % p_insert_chk_mtbf)) {
746 return (SEGP_FAIL);
747 }
748 #endif
749
750 if (seg_pdisabled) {
751 return (SEGP_FAIL);
752 }
753 ASSERT(seg_phashsize_win != 0);
754
755 if (IS_PFLAGS_WIRED(flags)) {
756 return (SEGP_SUCCESS);
757 }
758
759 if (seg_plocked_window + btop(len) > seg_pmaxwindow) {
760 return (SEGP_FAIL);
761 }
762
763 if (freemem < desfree) {
764 return (SEGP_FAIL);
765 }
766
767 return (SEGP_SUCCESS);
768 }
769
770 #ifdef DEBUG
771 static uint32_t p_insert_mtbf = 0;
772 #endif
773
774 /*
775 * Insert address range with shadow list into pagelock cache if there's no
776 * shadow list already cached for this address range. If the cache is off or
777 * caching is temporarily disabled or the allowed 'window' is exceeded return
778 * SEGP_FAIL. Otherwise return SEGP_SUCCESS.
779 *
780 * For non wired shadow lists (segvn case) include address in the hashing
781 * function to avoid linking all the entries from the same segment or amp on
782 * the same bucket. amp is used instead of seg if amp is not NULL. Non wired
812 ASSERT(rw == S_READ || rw == S_WRITE);
813 ASSERT(rw == S_READ || wlen == len);
814 ASSERT(rw == S_WRITE || wlen <= len);
815 ASSERT(amp == NULL || wlen == len);
816
817 #ifdef DEBUG
818 if (p_insert_mtbf && !(gethrtime() % p_insert_mtbf)) {
819 return (SEGP_FAIL);
820 }
821 #endif
822
823 if (seg_pdisabled) {
824 return (SEGP_FAIL);
825 }
826 ASSERT(seg_phashsize_win != 0);
827
828 ASSERT((len & PAGEOFFSET) == 0);
829 npages = btop(len);
830 mutex_enter(&seg_pmem_mtx);
831 if (!IS_PFLAGS_WIRED(flags)) {
832 if (seg_plocked_window + npages > seg_pmaxwindow) {
833 mutex_exit(&seg_pmem_mtx);
834 return (SEGP_FAIL);
835 }
836 seg_plocked_window += npages;
837 }
838 seg_plocked += npages;
839 mutex_exit(&seg_pmem_mtx);
840
841 pcp = kmem_cache_alloc(seg_pkmcache, KM_SLEEP);
842 /*
843 * If amp is not NULL set htag0 to amp otherwise set it to seg.
844 */
845 if (amp == NULL) {
846 pcp->p_htag0 = (void *)seg;
847 pcp->p_flags = flags & 0xffff;
848 } else {
849 pcp->p_htag0 = (void *)amp;
850 pcp->p_flags = (flags & 0xffff) | SEGP_AMP;
851 }
852 pcp->p_addr = addr;
853 pcp->p_len = len;
854 pcp->p_wlen = wlen;
855 pcp->p_pp = pp;
931
932 /*
933 * purge entries from the pagelock cache if not active
934 * and not recently used.
935 */
936 static void
937 seg_ppurge_async(int force)
938 {
939 struct seg_pcache *delcallb_list = NULL;
940 struct seg_pcache *pcp;
941 struct seg_phash *hp;
942 pgcnt_t npages = 0;
943 pgcnt_t npages_window = 0;
944 pgcnt_t npgs_to_purge;
945 pgcnt_t npgs_purged = 0;
946 int hlinks = 0;
947 int hlix;
948 pcache_link_t *hlinkp;
949 pcache_link_t *hlnextp = NULL;
950 int lowmem;
951 int trim;
952
953 ASSERT(seg_phashsize_win != 0);
954
955 /*
956 * if the cache is off or empty, return
957 */
958 if (seg_plocked == 0 || (!force && seg_plocked_window == 0)) {
959 return;
960 }
961
962 if (!force) {
963 lowmem = 0;
964 trim = 0;
965 if (freemem < lotsfree + needfree) {
966 spgcnt_t fmem = MAX((spgcnt_t)(freemem - needfree), 0);
967 if (fmem <= 5 * (desfree >> 2)) {
968 lowmem = 1;
969 } else if (fmem <= 7 * (lotsfree >> 3)) {
970 if (seg_plocked_window >=
971 (availrmem_initial >> 1)) {
972 lowmem = 1;
973 }
974 } else if (fmem < lotsfree) {
975 if (seg_plocked_window >=
976 3 * (availrmem_initial >> 2)) {
977 lowmem = 1;
978 }
979 }
980 }
981 if (seg_plocked_window >= 7 * (seg_pmaxwindow >> 3)) {
982 trim = 1;
983 }
984 if (!lowmem && !trim) {
985 return;
986 }
987 npgs_to_purge = seg_plocked_window >>
988 seg_pshrink_shift;
989 if (lowmem) {
990 npgs_to_purge = MIN(npgs_to_purge,
991 MAX(seg_pmaxapurge_npages, desfree));
992 } else {
993 npgs_to_purge = MIN(npgs_to_purge,
994 seg_pmaxapurge_npages);
995 }
996 if (npgs_to_purge == 0) {
997 return;
998 }
999 } else {
1000 struct seg_phash_wired *hpw;
1001
1002 ASSERT(seg_phashsize_wired != 0);
1003
1004 for (hpw = seg_phashtab_wired;
1092 plinkp->p_lnext->p_lprev =
1093 plinkp->p_lprev;
1094 pcp->p_hprev->p_hnext = pcp->p_hnext;
1095 pcp->p_hnext->p_hprev = pcp->p_hprev;
1096 mutex_exit(pmtx);
1097 pcp->p_hprev = delcallb_list;
1098 delcallb_list = pcp;
1099 npgs_purged += btop(pcp->p_len);
1100 }
1101 if (hp->p_hnext == (struct seg_pcache *)hp) {
1102 seg_premove_abuck(hp, 1);
1103 }
1104 mutex_exit(&hp->p_hmutex);
1105 if (npgs_purged >= seg_plocked_window) {
1106 break;
1107 }
1108 if (!force) {
1109 if (npgs_purged >= npgs_to_purge) {
1110 break;
1111 }
1112 if (!trim && !(seg_pathr_full_ahb & 15)) {
1113 ASSERT(lowmem);
1114 if (freemem >= lotsfree + needfree) {
1115 break;
1116 }
1117 }
1118 }
1119 }
1120
1121 if (hlinkp == &seg_pahhead[hlix]) {
1122 /*
1123 * We processed the entire hlix active bucket list
1124 * but didn't find enough pages to reclaim.
1125 * Switch the lists and walk the other list
1126 * if we haven't done it yet.
1127 */
1128 mutex_enter(&seg_pmem_mtx);
1129 ASSERT(seg_pathr_on);
1130 ASSERT(seg_pahcur == !hlix);
1131 seg_pahcur = hlix;
1132 mutex_exit(&seg_pmem_mtx);
1453 if (physmegs < 20 * 1024) {
1454 segpcache_hashsize_wired = MAX(1024, physmegs << 3);
1455 } else {
1456 segpcache_hashsize_wired = 256 * 1024;
1457 }
1458 }
1459 if (!ISP2(segpcache_hashsize_wired)) {
1460 segpcache_hashsize_wired = 1 <<
1461 highbit(segpcache_hashsize_wired);
1462 }
1463 seg_phashsize_wired = segpcache_hashsize_wired;
1464 seg_phashtab_wired = kmem_zalloc(
1465 seg_phashsize_wired * sizeof (struct seg_phash_wired), KM_SLEEP);
1466 for (i = 0; i < seg_phashsize_wired; i++) {
1467 hp = (struct seg_phash *)&seg_phashtab_wired[i];
1468 hp->p_hnext = (struct seg_pcache *)hp;
1469 hp->p_hprev = (struct seg_pcache *)hp;
1470 mutex_init(&hp->p_hmutex, NULL, MUTEX_DEFAULT, NULL);
1471 }
1472
1473 if (segpcache_maxwindow == 0) {
1474 if (physmegs < 64) {
1475 /* 3% of memory */
1476 segpcache_maxwindow = availrmem >> 5;
1477 } else if (physmegs < 512) {
1478 /* 12% of memory */
1479 segpcache_maxwindow = availrmem >> 3;
1480 } else if (physmegs < 1024) {
1481 /* 25% of memory */
1482 segpcache_maxwindow = availrmem >> 2;
1483 } else if (physmegs < 2048) {
1484 /* 50% of memory */
1485 segpcache_maxwindow = availrmem >> 1;
1486 } else {
1487 /* no limit */
1488 segpcache_maxwindow = (pgcnt_t)-1;
1489 }
1490 }
1491 seg_pmaxwindow = segpcache_maxwindow;
1492 seg_pinit_mem_config();
1493 }
1494
1495 /*
1496 * called by pageout if memory is low
1497 */
1498 void
1499 seg_preap(void)
1500 {
1501 /*
1502 * if the cache is off or empty, return
1503 */
1504 if (seg_plocked_window == 0) {
1505 return;
1506 }
1507 ASSERT(seg_phashsize_win != 0);
1508
1509 /*
1510 * If somebody is already purging pcache
1511 * just return.
|
109 struct seg_pcache *p_hprev;
110 kmutex_t p_hmutex; /* protects hash bucket */
111 };
112
113 /*
114 * A parameter to control a maximum number of bytes that can be
115 * purged from pcache at a time.
116 */
117 #define P_MAX_APURGE_BYTES (1024 * 1024 * 1024)
118
119 /*
120 * log2(fraction of pcache to reclaim at a time).
121 */
122 #define P_SHRINK_SHFT (5)
123
124 /*
125 * The following variables can be tuned via /etc/system.
126 */
127
128 int segpcache_enabled = 1; /* if 1, shadow lists are cached */
129 ulong_t segpcache_hashsize_win = 0; /* # of non wired buckets */
130 ulong_t segpcache_hashsize_wired = 0; /* # of wired buckets */
131 int segpcache_reap_sec = 1; /* reap check rate in secs */
132 clock_t segpcache_reap_ticks = 0; /* reap interval in ticks */
133 int segpcache_pcp_maxage_sec = 1; /* pcp max age in secs */
134 clock_t segpcache_pcp_maxage_ticks = 0; /* pcp max age in ticks */
135 int segpcache_shrink_shift = P_SHRINK_SHFT; /* log2 reap fraction */
136 pgcnt_t segpcache_maxapurge_bytes = P_MAX_APURGE_BYTES; /* max purge bytes */
137
138 static kmutex_t seg_pcache_mtx; /* protects seg_pdisabled counter */
139 static kmutex_t seg_pasync_mtx; /* protects async thread scheduling */
140 static kcondvar_t seg_pasync_cv;
141
142 #pragma align 64(pctrl1)
143 #pragma align 64(pctrl2)
144 #pragma align 64(pctrl3)
145
146 /*
147 * Keep frequently used variables together in one cache line.
148 */
149 static struct p_ctrl1 {
150 uint_t p_disabled; /* if not 0, caching temporarily off */
151 size_t p_hashwin_sz; /* # of non wired buckets */
152 struct seg_phash *p_htabwin; /* hash table for non wired entries */
153 size_t p_hashwired_sz; /* # of wired buckets */
154 struct seg_phash_wired *p_htabwired; /* hash table for wired entries */
155 kmem_cache_t *p_kmcache; /* kmem cache for seg_pcache structs */
156 #ifdef _LP64
157 ulong_t pad[2];
158 #endif /* _LP64 */
159 } pctrl1;
160
161 static struct p_ctrl2 {
162 kmutex_t p_mem_mtx; /* protects window counter and p_halinks */
163 pgcnt_t p_locked_win; /* # pages from window */
164 pgcnt_t p_locked; /* # of pages cached by pagelock */
165 uchar_t p_ahcur; /* current active links for insert/delete */
166 uchar_t p_athr_on; /* async reclaim thread is running. */
167 pcache_link_t p_ahhead[2]; /* active buckets linkages */
168 } pctrl2;
169
170 static struct p_ctrl3 {
171 clock_t p_pcp_maxage; /* max pcp age in ticks */
172 ulong_t p_athr_empty_ahb; /* athread walk stats */
173 ulong_t p_athr_full_ahb; /* athread walk stats */
174 pgcnt_t p_maxapurge_npages; /* max pages to purge at a time */
175 int p_shrink_shft; /* reap shift factor */
176 #ifdef _LP64
177 ulong_t pad[3];
178 #endif /* _LP64 */
179 } pctrl3;
180
181 #define seg_pdisabled pctrl1.p_disabled
182 #define seg_phashsize_win pctrl1.p_hashwin_sz
183 #define seg_phashtab_win pctrl1.p_htabwin
184 #define seg_phashsize_wired pctrl1.p_hashwired_sz
185 #define seg_phashtab_wired pctrl1.p_htabwired
186 #define seg_pkmcache pctrl1.p_kmcache
187 #define seg_pmem_mtx pctrl2.p_mem_mtx
188 #define seg_plocked_window pctrl2.p_locked_win
189 #define seg_plocked pctrl2.p_locked
190 #define seg_pahcur pctrl2.p_ahcur
191 #define seg_pathr_on pctrl2.p_athr_on
192 #define seg_pahhead pctrl2.p_ahhead
193 #define seg_pmax_pcpage pctrl3.p_pcp_maxage
194 #define seg_pathr_empty_ahb pctrl3.p_athr_empty_ahb
195 #define seg_pathr_full_ahb pctrl3.p_athr_full_ahb
196 #define seg_pshrink_shift pctrl3.p_shrink_shft
197 #define seg_pmaxapurge_npages pctrl3.p_maxapurge_npages
198
199 #define P_HASHWIN_MASK (seg_phashsize_win - 1)
200 #define P_HASHWIRED_MASK (seg_phashsize_wired - 1)
201 #define P_BASESHIFT (6)
736 seg_pinsert_check(struct seg *seg, struct anon_map *amp, caddr_t addr,
737 size_t len, uint_t flags)
738 {
739 ASSERT(seg != NULL);
740
741 #ifdef DEBUG
742 if (p_insert_chk_mtbf && !(gethrtime() % p_insert_chk_mtbf)) {
743 return (SEGP_FAIL);
744 }
745 #endif
746
747 if (seg_pdisabled) {
748 return (SEGP_FAIL);
749 }
750 ASSERT(seg_phashsize_win != 0);
751
752 if (IS_PFLAGS_WIRED(flags)) {
753 return (SEGP_SUCCESS);
754 }
755
756 if (freemem < desfree) {
757 return (SEGP_FAIL);
758 }
759
760 return (SEGP_SUCCESS);
761 }
762
763 #ifdef DEBUG
764 static uint32_t p_insert_mtbf = 0;
765 #endif
766
767 /*
768 * Insert address range with shadow list into pagelock cache if there's no
769 * shadow list already cached for this address range. If the cache is off or
770 * caching is temporarily disabled or the allowed 'window' is exceeded return
771 * SEGP_FAIL. Otherwise return SEGP_SUCCESS.
772 *
773 * For non wired shadow lists (segvn case) include address in the hashing
774 * function to avoid linking all the entries from the same segment or amp on
775 * the same bucket. amp is used instead of seg if amp is not NULL. Non wired
805 ASSERT(rw == S_READ || rw == S_WRITE);
806 ASSERT(rw == S_READ || wlen == len);
807 ASSERT(rw == S_WRITE || wlen <= len);
808 ASSERT(amp == NULL || wlen == len);
809
810 #ifdef DEBUG
811 if (p_insert_mtbf && !(gethrtime() % p_insert_mtbf)) {
812 return (SEGP_FAIL);
813 }
814 #endif
815
816 if (seg_pdisabled) {
817 return (SEGP_FAIL);
818 }
819 ASSERT(seg_phashsize_win != 0);
820
821 ASSERT((len & PAGEOFFSET) == 0);
822 npages = btop(len);
823 mutex_enter(&seg_pmem_mtx);
824 if (!IS_PFLAGS_WIRED(flags)) {
825 seg_plocked_window += npages;
826 }
827 seg_plocked += npages;
828 mutex_exit(&seg_pmem_mtx);
829
830 pcp = kmem_cache_alloc(seg_pkmcache, KM_SLEEP);
831 /*
832 * If amp is not NULL set htag0 to amp otherwise set it to seg.
833 */
834 if (amp == NULL) {
835 pcp->p_htag0 = (void *)seg;
836 pcp->p_flags = flags & 0xffff;
837 } else {
838 pcp->p_htag0 = (void *)amp;
839 pcp->p_flags = (flags & 0xffff) | SEGP_AMP;
840 }
841 pcp->p_addr = addr;
842 pcp->p_len = len;
843 pcp->p_wlen = wlen;
844 pcp->p_pp = pp;
920
921 /*
922 * purge entries from the pagelock cache if not active
923 * and not recently used.
924 */
925 static void
926 seg_ppurge_async(int force)
927 {
928 struct seg_pcache *delcallb_list = NULL;
929 struct seg_pcache *pcp;
930 struct seg_phash *hp;
931 pgcnt_t npages = 0;
932 pgcnt_t npages_window = 0;
933 pgcnt_t npgs_to_purge;
934 pgcnt_t npgs_purged = 0;
935 int hlinks = 0;
936 int hlix;
937 pcache_link_t *hlinkp;
938 pcache_link_t *hlnextp = NULL;
939 int lowmem;
940
941 ASSERT(seg_phashsize_win != 0);
942
943 /*
944 * if the cache is off or empty, return
945 */
946 if (seg_plocked == 0 || (!force && seg_plocked_window == 0)) {
947 return;
948 }
949
950 if (!force) {
951 lowmem = 0;
952 if (freemem < lotsfree + needfree) {
953 spgcnt_t fmem = MAX((spgcnt_t)(freemem - needfree), 0);
954 if (fmem <= 5 * (desfree >> 2)) {
955 lowmem = 1;
956 } else if (fmem <= 7 * (lotsfree >> 3)) {
957 if (seg_plocked_window >=
958 (availrmem_initial >> 1)) {
959 lowmem = 1;
960 }
961 } else if (fmem < lotsfree) {
962 if (seg_plocked_window >=
963 3 * (availrmem_initial >> 2)) {
964 lowmem = 1;
965 }
966 }
967 }
968 if (!lowmem) {
969 return;
970 }
971 npgs_to_purge = seg_plocked_window >>
972 seg_pshrink_shift;
973 if (lowmem) {
974 npgs_to_purge = MIN(npgs_to_purge,
975 MAX(seg_pmaxapurge_npages, desfree));
976 } else {
977 npgs_to_purge = MIN(npgs_to_purge,
978 seg_pmaxapurge_npages);
979 }
980 if (npgs_to_purge == 0) {
981 return;
982 }
983 } else {
984 struct seg_phash_wired *hpw;
985
986 ASSERT(seg_phashsize_wired != 0);
987
988 for (hpw = seg_phashtab_wired;
1076 plinkp->p_lnext->p_lprev =
1077 plinkp->p_lprev;
1078 pcp->p_hprev->p_hnext = pcp->p_hnext;
1079 pcp->p_hnext->p_hprev = pcp->p_hprev;
1080 mutex_exit(pmtx);
1081 pcp->p_hprev = delcallb_list;
1082 delcallb_list = pcp;
1083 npgs_purged += btop(pcp->p_len);
1084 }
1085 if (hp->p_hnext == (struct seg_pcache *)hp) {
1086 seg_premove_abuck(hp, 1);
1087 }
1088 mutex_exit(&hp->p_hmutex);
1089 if (npgs_purged >= seg_plocked_window) {
1090 break;
1091 }
1092 if (!force) {
1093 if (npgs_purged >= npgs_to_purge) {
1094 break;
1095 }
1096 if (!(seg_pathr_full_ahb & 15)) {
1097 ASSERT(lowmem);
1098 if (freemem >= lotsfree + needfree) {
1099 break;
1100 }
1101 }
1102 }
1103 }
1104
1105 if (hlinkp == &seg_pahhead[hlix]) {
1106 /*
1107 * We processed the entire hlix active bucket list
1108 * but didn't find enough pages to reclaim.
1109 * Switch the lists and walk the other list
1110 * if we haven't done it yet.
1111 */
1112 mutex_enter(&seg_pmem_mtx);
1113 ASSERT(seg_pathr_on);
1114 ASSERT(seg_pahcur == !hlix);
1115 seg_pahcur = hlix;
1116 mutex_exit(&seg_pmem_mtx);
1437 if (physmegs < 20 * 1024) {
1438 segpcache_hashsize_wired = MAX(1024, physmegs << 3);
1439 } else {
1440 segpcache_hashsize_wired = 256 * 1024;
1441 }
1442 }
1443 if (!ISP2(segpcache_hashsize_wired)) {
1444 segpcache_hashsize_wired = 1 <<
1445 highbit(segpcache_hashsize_wired);
1446 }
1447 seg_phashsize_wired = segpcache_hashsize_wired;
1448 seg_phashtab_wired = kmem_zalloc(
1449 seg_phashsize_wired * sizeof (struct seg_phash_wired), KM_SLEEP);
1450 for (i = 0; i < seg_phashsize_wired; i++) {
1451 hp = (struct seg_phash *)&seg_phashtab_wired[i];
1452 hp->p_hnext = (struct seg_pcache *)hp;
1453 hp->p_hprev = (struct seg_pcache *)hp;
1454 mutex_init(&hp->p_hmutex, NULL, MUTEX_DEFAULT, NULL);
1455 }
1456
1457 seg_pinit_mem_config();
1458 }
1459
1460 /*
1461 * called by pageout if memory is low
1462 */
1463 void
1464 seg_preap(void)
1465 {
1466 /*
1467 * if the cache is off or empty, return
1468 */
1469 if (seg_plocked_window == 0) {
1470 return;
1471 }
1472 ASSERT(seg_phashsize_win != 0);
1473
1474 /*
1475 * If somebody is already purging pcache
1476 * just return.
|