77 */
78 static void segkp_dump(struct seg *seg);
79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 uint_t prot);
81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 struct page ***page, enum lock_type type,
84 enum seg_rw rw);
85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 struct segkp_data **tkpd, struct anon_map *amp);
89 static void segkp_release_internal(struct seg *seg,
90 struct segkp_data *kpd, size_t len);
91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 size_t len, struct segkp_data *kpd, uint_t flags);
93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 size_t len, struct segkp_data *kpd, uint_t flags);
95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 static int segkp_capable(struct seg *seg, segcapability_t capability);
98
99 /*
100 * Lock used to protect the hash table(s) and caches.
101 */
102 static kmutex_t segkp_lock;
103
104 /*
105 * The segkp caches
106 */
107 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
108
109 /*
110 * When there are fewer than red_minavail bytes left on the stack,
111 * segkp_map_red() will map in the redzone (if called). 5000 seems
112 * to work reasonably well...
113 */
114 long red_minavail = 5000;
115
116 /*
117 * will be set to 1 for 32 bit x86 systems only, in startup.c
130 */
131 #define RED_DEEP_THRESHOLD 2000
132
133 hrtime_t red_deep_hires;
134 kthread_t *red_deep_thread;
135
136 uint32_t red_nmapped;
137 uint32_t red_closest = UINT_MAX;
138 uint32_t red_ndoubles;
139
140 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
141 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
142
143 static struct seg_ops segkp_ops = {
144 .fault = segkp_fault,
145 .checkprot = segkp_checkprot,
146 .kluster = segkp_kluster,
147 .dump = segkp_dump,
148 .pagelock = segkp_pagelock,
149 .getmemid = segkp_getmemid,
150 .capable = segkp_capable,
151 };
152
153
154 static void segkpinit_mem_config(struct seg *);
155
156 static uint32_t segkp_indel;
157
158 /*
159 * Allocate the segment specific private data struct and fill it in
160 * with the per kp segment mutex, anon ptr. array and hash table.
161 */
162 int
163 segkp_create(struct seg *seg)
164 {
165 struct segkp_segdata *kpsd;
166 size_t np;
167
168 ASSERT(seg != NULL && seg->s_as == &kas);
169 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
170
1351 addr += PAGESIZE;
1352 dump_timeleft = dump_timeout;
1353 }
1354 }
1355 }
1356 }
1357
1358 /*ARGSUSED*/
1359 static int
1360 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1361 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1362 {
1363 return (ENOTSUP);
1364 }
1365
1366 /*ARGSUSED*/
1367 static int
1368 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1369 {
1370 return (ENODEV);
1371 }
1372
1373 /*ARGSUSED*/
1374 static int
1375 segkp_capable(struct seg *seg, segcapability_t capability)
1376 {
1377 return (0);
1378 }
1379
1380 #include <sys/mem_config.h>
1381
1382 /*ARGSUSED*/
1383 static void
1384 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1385 {}
1386
1387 /*
1388 * During memory delete, turn off caches so that pages are not held.
1389 * A better solution may be to unlock the pages while they are
1390 * in the cache so that they may be collected naturally.
1391 */
1392
1393 /*ARGSUSED*/
1394 static int
1395 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1396 {
1397 atomic_inc_32(&segkp_indel);
|
77 */
78 static void segkp_dump(struct seg *seg);
79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 uint_t prot);
81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 struct page ***page, enum lock_type type,
84 enum seg_rw rw);
85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 struct segkp_data **tkpd, struct anon_map *amp);
89 static void segkp_release_internal(struct seg *seg,
90 struct segkp_data *kpd, size_t len);
91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 size_t len, struct segkp_data *kpd, uint_t flags);
93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 size_t len, struct segkp_data *kpd, uint_t flags);
95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97
98 /*
99 * Lock used to protect the hash table(s) and caches.
100 */
101 static kmutex_t segkp_lock;
102
103 /*
104 * The segkp caches
105 */
106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
107
108 /*
109 * When there are fewer than red_minavail bytes left on the stack,
110 * segkp_map_red() will map in the redzone (if called). 5000 seems
111 * to work reasonably well...
112 */
113 long red_minavail = 5000;
114
115 /*
116 * will be set to 1 for 32 bit x86 systems only, in startup.c
129 */
130 #define RED_DEEP_THRESHOLD 2000
131
132 hrtime_t red_deep_hires;
133 kthread_t *red_deep_thread;
134
135 uint32_t red_nmapped;
136 uint32_t red_closest = UINT_MAX;
137 uint32_t red_ndoubles;
138
139 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
140 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
141
142 static struct seg_ops segkp_ops = {
143 .fault = segkp_fault,
144 .checkprot = segkp_checkprot,
145 .kluster = segkp_kluster,
146 .dump = segkp_dump,
147 .pagelock = segkp_pagelock,
148 .getmemid = segkp_getmemid,
149 };
150
151
152 static void segkpinit_mem_config(struct seg *);
153
154 static uint32_t segkp_indel;
155
156 /*
157 * Allocate the segment specific private data struct and fill it in
158 * with the per kp segment mutex, anon ptr. array and hash table.
159 */
160 int
161 segkp_create(struct seg *seg)
162 {
163 struct segkp_segdata *kpsd;
164 size_t np;
165
166 ASSERT(seg != NULL && seg->s_as == &kas);
167 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
168
1349 addr += PAGESIZE;
1350 dump_timeleft = dump_timeout;
1351 }
1352 }
1353 }
1354 }
1355
1356 /*ARGSUSED*/
1357 static int
1358 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1359 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1360 {
1361 return (ENOTSUP);
1362 }
1363
1364 /*ARGSUSED*/
1365 static int
1366 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1367 {
1368 return (ENODEV);
1369 }
1370
1371 #include <sys/mem_config.h>
1372
1373 /*ARGSUSED*/
1374 static void
1375 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1376 {}
1377
1378 /*
1379 * During memory delete, turn off caches so that pages are not held.
1380 * A better solution may be to unlock the pages while they are
1381 * in the cache so that they may be collected naturally.
1382 */
1383
1384 /*ARGSUSED*/
1385 static int
1386 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1387 {
1388 atomic_inc_32(&segkp_indel);
|