76 * Private seg op routines
77 */
78 static void segkp_dump(struct seg *seg);
79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 uint_t prot);
81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 struct page ***page, enum lock_type type,
84 enum seg_rw rw);
85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 struct segkp_data **tkpd, struct anon_map *amp);
89 static void segkp_release_internal(struct seg *seg,
90 struct segkp_data *kpd, size_t len);
91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 size_t len, struct segkp_data *kpd, uint_t flags);
93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 size_t len, struct segkp_data *kpd, uint_t flags);
95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97
98 /*
99 * Lock used to protect the hash table(s) and caches.
100 */
101 static kmutex_t segkp_lock;
102
103 /*
104 * The segkp caches
105 */
106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
107
108 /*
109 * When there are fewer than red_minavail bytes left on the stack,
110 * segkp_map_red() will map in the redzone (if called). 5000 seems
111 * to work reasonably well...
112 */
113 long red_minavail = 5000;
114
115 /*
116 * will be set to 1 for 32 bit x86 systems only, in startup.c
128 * in red_deep_hires and red_deep_thread respectively.
129 */
130 #define RED_DEEP_THRESHOLD 2000
131
132 hrtime_t red_deep_hires;
133 kthread_t *red_deep_thread;
134
135 uint32_t red_nmapped;
136 uint32_t red_closest = UINT_MAX;
137 uint32_t red_ndoubles;
138
139 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
140 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
141
142 static struct seg_ops segkp_ops = {
143 .fault = segkp_fault,
144 .checkprot = segkp_checkprot,
145 .kluster = segkp_kluster,
146 .dump = segkp_dump,
147 .pagelock = segkp_pagelock,
148 .getmemid = segkp_getmemid,
149 };
150
151
152 static void segkpinit_mem_config(struct seg *);
153
154 static uint32_t segkp_indel;
155
156 /*
157 * Allocate the segment specific private data struct and fill it in
158 * with the per kp segment mutex, anon ptr. array and hash table.
159 */
160 int
161 segkp_create(struct seg *seg)
162 {
163 struct segkp_segdata *kpsd;
164 size_t np;
165
166 ASSERT(seg != NULL && seg->s_as == &kas);
167 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
168
1342 addr = kpd->kp_base;
1343 eaddr = addr + kpd->kp_len;
1344 while (addr < eaddr) {
1345 ASSERT(seg->s_as == &kas);
1346 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1347 if (pfn != PFN_INVALID)
1348 dump_addpage(seg->s_as, addr, pfn);
1349 addr += PAGESIZE;
1350 dump_timeleft = dump_timeout;
1351 }
1352 }
1353 }
1354 }
1355
1356 /*ARGSUSED*/
1357 static int
1358 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1359 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1360 {
1361 return (ENOTSUP);
1362 }
1363
1364 /*ARGSUSED*/
1365 static int
1366 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1367 {
1368 return (ENODEV);
1369 }
1370
1371 #include <sys/mem_config.h>
1372
1373 /*ARGSUSED*/
1374 static void
1375 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1376 {}
1377
1378 /*
1379 * During memory delete, turn off caches so that pages are not held.
1380 * A better solution may be to unlock the pages while they are
1381 * in the cache so that they may be collected naturally.
1382 */
1383
1384 /*ARGSUSED*/
1385 static int
1386 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1387 {
1388 atomic_inc_32(&segkp_indel);
|
76 * Private seg op routines
77 */
78 static void segkp_dump(struct seg *seg);
79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 uint_t prot);
81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 struct page ***page, enum lock_type type,
84 enum seg_rw rw);
85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 struct segkp_data **tkpd, struct anon_map *amp);
89 static void segkp_release_internal(struct seg *seg,
90 struct segkp_data *kpd, size_t len);
91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 size_t len, struct segkp_data *kpd, uint_t flags);
93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 size_t len, struct segkp_data *kpd, uint_t flags);
95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96
97 /*
98 * Lock used to protect the hash table(s) and caches.
99 */
100 static kmutex_t segkp_lock;
101
102 /*
103 * The segkp caches
104 */
105 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
106
107 /*
108 * When there are fewer than red_minavail bytes left on the stack,
109 * segkp_map_red() will map in the redzone (if called). 5000 seems
110 * to work reasonably well...
111 */
112 long red_minavail = 5000;
113
114 /*
115 * will be set to 1 for 32 bit x86 systems only, in startup.c
127 * in red_deep_hires and red_deep_thread respectively.
128 */
129 #define RED_DEEP_THRESHOLD 2000
130
131 hrtime_t red_deep_hires;
132 kthread_t *red_deep_thread;
133
134 uint32_t red_nmapped;
135 uint32_t red_closest = UINT_MAX;
136 uint32_t red_ndoubles;
137
138 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
139 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
140
141 static struct seg_ops segkp_ops = {
142 .fault = segkp_fault,
143 .checkprot = segkp_checkprot,
144 .kluster = segkp_kluster,
145 .dump = segkp_dump,
146 .pagelock = segkp_pagelock,
147 };
148
149
150 static void segkpinit_mem_config(struct seg *);
151
152 static uint32_t segkp_indel;
153
154 /*
155 * Allocate the segment specific private data struct and fill it in
156 * with the per kp segment mutex, anon ptr. array and hash table.
157 */
158 int
159 segkp_create(struct seg *seg)
160 {
161 struct segkp_segdata *kpsd;
162 size_t np;
163
164 ASSERT(seg != NULL && seg->s_as == &kas);
165 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
166
1340 addr = kpd->kp_base;
1341 eaddr = addr + kpd->kp_len;
1342 while (addr < eaddr) {
1343 ASSERT(seg->s_as == &kas);
1344 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1345 if (pfn != PFN_INVALID)
1346 dump_addpage(seg->s_as, addr, pfn);
1347 addr += PAGESIZE;
1348 dump_timeleft = dump_timeout;
1349 }
1350 }
1351 }
1352 }
1353
1354 /*ARGSUSED*/
1355 static int
1356 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1357 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1358 {
1359 return (ENOTSUP);
1360 }
1361
1362 #include <sys/mem_config.h>
1363
1364 /*ARGSUSED*/
1365 static void
1366 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1367 {}
1368
1369 /*
1370 * During memory delete, turn off caches so that pages are not held.
1371 * A better solution may be to unlock the pages while they are
1372 * in the cache so that they may be collected naturally.
1373 */
1374
1375 /*ARGSUSED*/
1376 static int
1377 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1378 {
1379 atomic_inc_32(&segkp_indel);
|