77 */
78 static void segkp_dump(struct seg *seg);
79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 uint_t prot);
81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 struct page ***page, enum lock_type type,
84 enum seg_rw rw);
85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 struct segkp_data **tkpd, struct anon_map *amp);
89 static void segkp_release_internal(struct seg *seg,
90 struct segkp_data *kpd, size_t len);
91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 size_t len, struct segkp_data *kpd, uint_t flags);
93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 size_t len, struct segkp_data *kpd, uint_t flags);
95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
98 caddr_t addr);
99 static int segkp_capable(struct seg *seg, segcapability_t capability);
100
101 /*
102 * Lock used to protect the hash table(s) and caches.
103 */
104 static kmutex_t segkp_lock;
105
106 /*
107 * The segkp caches
108 */
109 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
110
111 /*
112 * When there are fewer than red_minavail bytes left on the stack,
113 * segkp_map_red() will map in the redzone (if called). 5000 seems
114 * to work reasonably well...
115 */
116 long red_minavail = 5000;
117
118 /*
132 */
133 #define RED_DEEP_THRESHOLD 2000
134
135 hrtime_t red_deep_hires;
136 kthread_t *red_deep_thread;
137
138 uint32_t red_nmapped;
139 uint32_t red_closest = UINT_MAX;
140 uint32_t red_ndoubles;
141
142 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
143 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
144
145 static struct seg_ops segkp_ops = {
146 .fault = segkp_fault,
147 .checkprot = segkp_checkprot,
148 .kluster = segkp_kluster,
149 .dump = segkp_dump,
150 .pagelock = segkp_pagelock,
151 .getmemid = segkp_getmemid,
152 .getpolicy = segkp_getpolicy,
153 .capable = segkp_capable,
154 };
155
156
157 static void segkpinit_mem_config(struct seg *);
158
159 static uint32_t segkp_indel;
160
161 /*
162 * Allocate the segment specific private data struct and fill it in
163 * with the per kp segment mutex, anon ptr. array and hash table.
164 */
165 int
166 segkp_create(struct seg *seg)
167 {
168 struct segkp_segdata *kpsd;
169 size_t np;
170
171 ASSERT(seg != NULL && seg->s_as == &kas);
172 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
1354 addr += PAGESIZE;
1355 dump_timeleft = dump_timeout;
1356 }
1357 }
1358 }
1359 }
1360
1361 /*ARGSUSED*/
1362 static int
1363 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1364 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1365 {
1366 return (ENOTSUP);
1367 }
1368
1369 /*ARGSUSED*/
1370 static int
1371 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1372 {
1373 return (ENODEV);
1374 }
1375
1376 /*ARGSUSED*/
1377 static lgrp_mem_policy_info_t *
1378 segkp_getpolicy(struct seg *seg, caddr_t addr)
1379 {
1380 return (NULL);
1381 }
1382
1383 /*ARGSUSED*/
1384 static int
1385 segkp_capable(struct seg *seg, segcapability_t capability)
1386 {
1387 return (0);
1388 }
1389
1390 #include <sys/mem_config.h>
1391
1392 /*ARGSUSED*/
1393 static void
1394 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1395 {}
1396
1397 /*
1398 * During memory delete, turn off caches so that pages are not held.
1399 * A better solution may be to unlock the pages while they are
1400 * in the cache so that they may be collected naturally.
|
77 */
78 static void segkp_dump(struct seg *seg);
79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 uint_t prot);
81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 struct page ***page, enum lock_type type,
84 enum seg_rw rw);
85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 struct segkp_data **tkpd, struct anon_map *amp);
89 static void segkp_release_internal(struct seg *seg,
90 struct segkp_data *kpd, size_t len);
91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 size_t len, struct segkp_data *kpd, uint_t flags);
93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 size_t len, struct segkp_data *kpd, uint_t flags);
95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 static int segkp_capable(struct seg *seg, segcapability_t capability);
98
99 /*
100 * Lock used to protect the hash table(s) and caches.
101 */
102 static kmutex_t segkp_lock;
103
104 /*
105 * The segkp caches
106 */
107 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
108
109 /*
110 * When there are fewer than red_minavail bytes left on the stack,
111 * segkp_map_red() will map in the redzone (if called). 5000 seems
112 * to work reasonably well...
113 */
114 long red_minavail = 5000;
115
116 /*
130 */
131 #define RED_DEEP_THRESHOLD 2000
132
133 hrtime_t red_deep_hires;
134 kthread_t *red_deep_thread;
135
136 uint32_t red_nmapped;
137 uint32_t red_closest = UINT_MAX;
138 uint32_t red_ndoubles;
139
140 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
141 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
142
143 static struct seg_ops segkp_ops = {
144 .fault = segkp_fault,
145 .checkprot = segkp_checkprot,
146 .kluster = segkp_kluster,
147 .dump = segkp_dump,
148 .pagelock = segkp_pagelock,
149 .getmemid = segkp_getmemid,
150 .capable = segkp_capable,
151 };
152
153
154 static void segkpinit_mem_config(struct seg *);
155
156 static uint32_t segkp_indel;
157
158 /*
159 * Allocate the segment specific private data struct and fill it in
160 * with the per kp segment mutex, anon ptr. array and hash table.
161 */
162 int
163 segkp_create(struct seg *seg)
164 {
165 struct segkp_segdata *kpsd;
166 size_t np;
167
168 ASSERT(seg != NULL && seg->s_as == &kas);
169 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
1351 addr += PAGESIZE;
1352 dump_timeleft = dump_timeout;
1353 }
1354 }
1355 }
1356 }
1357
1358 /*ARGSUSED*/
1359 static int
1360 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1361 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1362 {
1363 return (ENOTSUP);
1364 }
1365
1366 /*ARGSUSED*/
1367 static int
1368 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1369 {
1370 return (ENODEV);
1371 }
1372
1373 /*ARGSUSED*/
1374 static int
1375 segkp_capable(struct seg *seg, segcapability_t capability)
1376 {
1377 return (0);
1378 }
1379
1380 #include <sys/mem_config.h>
1381
1382 /*ARGSUSED*/
1383 static void
1384 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1385 {}
1386
1387 /*
1388 * During memory delete, turn off caches so that pages are not held.
1389 * A better solution may be to unlock the pages while they are
1390 * in the cache so that they may be collected naturally.
|