Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * VM - Hardware Address Translation management for Spitfire MMU.
30 30 *
31 31 * This file implements the machine specific hardware translation
32 32 * needed by the VM system. The machine independent interface is
33 33 * described in <vm/hat.h> while the machine dependent interface
34 34 * and data structures are described in <vm/hat_sfmmu.h>.
35 35 *
36 36 * The hat layer manages the address translation hardware as a cache
37 37 * driven by calls from the higher levels in the VM system.
38 38 */
39 39
40 40 #include <sys/types.h>
41 41 #include <sys/kstat.h>
42 42 #include <vm/hat.h>
43 43 #include <vm/hat_sfmmu.h>
44 44 #include <vm/page.h>
45 45 #include <sys/pte.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/machparam.h>
50 50 #include <sys/vtrace.h>
51 51 #include <sys/kmem.h>
52 52 #include <sys/mmu.h>
53 53 #include <sys/cmn_err.h>
54 54 #include <sys/cpu.h>
55 55 #include <sys/cpuvar.h>
56 56 #include <sys/debug.h>
57 57 #include <sys/lgrp.h>
58 58 #include <sys/archsystm.h>
59 59 #include <sys/machsystm.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <vm/as.h>
62 62 #include <vm/seg.h>
63 63 #include <vm/seg_kp.h>
64 64 #include <vm/seg_kmem.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/rm.h>
67 67 #include <sys/t_lock.h>
68 68 #include <sys/obpdefs.h>
69 69 #include <sys/vm_machparam.h>
70 70 #include <sys/var.h>
71 71 #include <sys/trap.h>
72 72 #include <sys/machtrap.h>
73 73 #include <sys/scb.h>
74 74 #include <sys/bitmap.h>
75 75 #include <sys/machlock.h>
76 76 #include <sys/membar.h>
77 77 #include <sys/atomic.h>
78 78 #include <sys/cpu_module.h>
79 79 #include <sys/prom_debug.h>
80 80 #include <sys/ksynch.h>
81 81 #include <sys/mem_config.h>
82 82 #include <sys/mem_cage.h>
83 83 #include <vm/vm_dep.h>
84 84 #include <sys/fpu/fpusystm.h>
85 85 #include <vm/mach_kpm.h>
86 86 #include <sys/callb.h>
87 87
88 88 #ifdef DEBUG
89 89 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
90 90 if (SFMMU_IS_SHMERID_VALID(rid)) { \
91 91 caddr_t _eaddr = (saddr) + (len); \
92 92 sf_srd_t *_srdp; \
93 93 sf_region_t *_rgnp; \
94 94 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
95 95 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
96 96 ASSERT((hat) != ksfmmup); \
97 97 _srdp = (hat)->sfmmu_srdp; \
98 98 ASSERT(_srdp != NULL); \
99 99 ASSERT(_srdp->srd_refcnt != 0); \
100 100 _rgnp = _srdp->srd_hmergnp[(rid)]; \
101 101 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
102 102 ASSERT(_rgnp->rgn_refcnt != 0); \
103 103 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
104 104 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
105 105 SFMMU_REGION_HME); \
106 106 ASSERT((saddr) >= _rgnp->rgn_saddr); \
107 107 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
108 108 ASSERT(_eaddr > _rgnp->rgn_saddr); \
109 109 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
110 110 }
111 111
112 112 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
113 113 { \
114 114 caddr_t _hsva; \
115 115 caddr_t _heva; \
116 116 caddr_t _rsva; \
117 117 caddr_t _reva; \
118 118 int _ttesz = get_hblk_ttesz(hmeblkp); \
119 119 int _flagtte; \
120 120 ASSERT((srdp)->srd_refcnt != 0); \
121 121 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
122 122 ASSERT((rgnp)->rgn_id == rid); \
123 123 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
124 124 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
125 125 SFMMU_REGION_HME); \
126 126 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
127 127 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
128 128 _heva = get_hblk_endaddr(hmeblkp); \
129 129 _rsva = (caddr_t)P2ALIGN( \
130 130 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
131 131 _reva = (caddr_t)P2ROUNDUP( \
132 132 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
133 133 HBLK_MIN_BYTES); \
134 134 ASSERT(_hsva >= _rsva); \
135 135 ASSERT(_hsva < _reva); \
136 136 ASSERT(_heva > _rsva); \
137 137 ASSERT(_heva <= _reva); \
138 138 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
139 139 _ttesz; \
140 140 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
141 141 }
142 142
143 143 #else /* DEBUG */
144 144 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
145 145 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
146 146 #endif /* DEBUG */
147 147
148 148 #if defined(SF_ERRATA_57)
149 149 extern caddr_t errata57_limit;
150 150 #endif
151 151
152 152 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
153 153 (sizeof (int64_t)))
154 154 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
155 155
156 156 #define HBLK_RESERVE_CNT 128
157 157 #define HBLK_RESERVE_MIN 20
158 158
159 159 static struct hme_blk *freehblkp;
160 160 static kmutex_t freehblkp_lock;
161 161 static int freehblkcnt;
162 162
163 163 static int64_t hblk_reserve[HME8BLK_SZ_RND];
164 164 static kmutex_t hblk_reserve_lock;
165 165 static kthread_t *hblk_reserve_thread;
166 166
167 167 static nucleus_hblk8_info_t nucleus_hblk8;
168 168 static nucleus_hblk1_info_t nucleus_hblk1;
169 169
170 170 /*
171 171 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
172 172 * after the initial phase of removing an hmeblk from the hash chain, see
173 173 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
174 174 */
175 175 static cpu_hme_pend_t *cpu_hme_pend;
176 176 static uint_t cpu_hme_pend_thresh;
177 177 /*
178 178 * SFMMU specific hat functions
179 179 */
180 180 void hat_pagecachectl(struct page *, int);
181 181
182 182 /* flags for hat_pagecachectl */
183 183 #define HAT_CACHE 0x1
184 184 #define HAT_UNCACHE 0x2
185 185 #define HAT_TMPNC 0x4
186 186
187 187 /*
188 188 * Flag to allow the creation of non-cacheable translations
189 189 * to system memory. It is off by default. At the moment this
190 190 * flag is used by the ecache error injector. The error injector
191 191 * will turn it on when creating such a translation then shut it
192 192 * off when it's finished.
193 193 */
194 194
195 195 int sfmmu_allow_nc_trans = 0;
196 196
197 197 /*
198 198 * Flag to disable large page support.
199 199 * value of 1 => disable all large pages.
200 200 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
201 201 *
202 202 * For example, use the value 0x4 to disable 512K pages.
203 203 *
204 204 */
205 205 #define LARGE_PAGES_OFF 0x1
206 206
207 207 /*
208 208 * The disable_large_pages and disable_ism_large_pages variables control
209 209 * hat_memload_array and the page sizes to be used by ISM and the kernel.
210 210 *
211 211 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
212 212 * are only used to control which OOB pages to use at upper VM segment creation
213 213 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
214 214 * Their values may come from platform or CPU specific code to disable page
215 215 * sizes that should not be used.
216 216 *
217 217 * WARNING: 512K pages are currently not supported for ISM/DISM.
218 218 */
219 219 uint_t disable_large_pages = 0;
220 220 uint_t disable_ism_large_pages = (1 << TTE512K);
221 221 uint_t disable_auto_data_large_pages = 0;
222 222 uint_t disable_auto_text_large_pages = 0;
223 223
224 224 /*
225 225 * Private sfmmu data structures for hat management
226 226 */
227 227 static struct kmem_cache *sfmmuid_cache;
228 228 static struct kmem_cache *mmuctxdom_cache;
229 229
230 230 /*
231 231 * Private sfmmu data structures for tsb management
232 232 */
233 233 static struct kmem_cache *sfmmu_tsbinfo_cache;
234 234 static struct kmem_cache *sfmmu_tsb8k_cache;
235 235 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
236 236 static vmem_t *kmem_bigtsb_arena;
237 237 static vmem_t *kmem_tsb_arena;
238 238
239 239 /*
240 240 * sfmmu static variables for hmeblk resource management.
241 241 */
242 242 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
243 243 static struct kmem_cache *sfmmu8_cache;
244 244 static struct kmem_cache *sfmmu1_cache;
245 245 static struct kmem_cache *pa_hment_cache;
246 246
247 247 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
248 248 /*
249 249 * private data for ism
250 250 */
251 251 static struct kmem_cache *ism_blk_cache;
252 252 static struct kmem_cache *ism_ment_cache;
253 253 #define ISMID_STARTADDR NULL
254 254
255 255 /*
256 256 * Region management data structures and function declarations.
257 257 */
258 258
259 259 static void sfmmu_leave_srd(sfmmu_t *);
260 260 static int sfmmu_srdcache_constructor(void *, void *, int);
261 261 static void sfmmu_srdcache_destructor(void *, void *);
262 262 static int sfmmu_rgncache_constructor(void *, void *, int);
263 263 static void sfmmu_rgncache_destructor(void *, void *);
264 264 static int sfrgnmap_isnull(sf_region_map_t *);
265 265 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
266 266 static int sfmmu_scdcache_constructor(void *, void *, int);
267 267 static void sfmmu_scdcache_destructor(void *, void *);
268 268 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
269 269 size_t, void *, u_offset_t);
270 270
271 271 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
272 272 static sf_srd_bucket_t *srd_buckets;
273 273 static struct kmem_cache *srd_cache;
274 274 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
275 275 static struct kmem_cache *region_cache;
276 276 static struct kmem_cache *scd_cache;
277 277
278 278 #ifdef sun4v
279 279 int use_bigtsb_arena = 1;
280 280 #else
281 281 int use_bigtsb_arena = 0;
282 282 #endif
283 283
284 284 /* External /etc/system tunable, for turning on&off the shctx support */
285 285 int disable_shctx = 0;
286 286 /* Internal variable, set by MD if the HW supports shctx feature */
287 287 int shctx_on = 0;
288 288
289 289 #ifdef DEBUG
290 290 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
291 291 #endif
292 292 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
293 293 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
294 294
295 295 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
296 296 static void sfmmu_find_scd(sfmmu_t *);
297 297 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
298 298 static void sfmmu_finish_join_scd(sfmmu_t *);
299 299 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
300 300 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
301 301 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
302 302 static void sfmmu_free_scd_tsbs(sfmmu_t *);
303 303 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
304 304 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
305 305 static void sfmmu_ism_hatflags(sfmmu_t *, int);
306 306 static int sfmmu_srd_lock_held(sf_srd_t *);
307 307 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
308 308 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
309 309 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
310 310 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
311 311 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
312 312 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
313 313
314 314 /*
315 315 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
316 316 * HAT flags, synchronizing TLB/TSB coherency, and context management.
317 317 * The lock is hashed on the sfmmup since the case where we need to lock
318 318 * all processes is rare but does occur (e.g. we need to unload a shared
319 319 * mapping from all processes using the mapping). We have a lot of buckets,
320 320 * and each slab of sfmmu_t's can use about a quarter of them, giving us
321 321 * a fairly good distribution without wasting too much space and overhead
322 322 * when we have to grab them all.
323 323 */
324 324 #define SFMMU_NUM_LOCK 128 /* must be power of two */
325 325 hatlock_t hat_lock[SFMMU_NUM_LOCK];
326 326
327 327 /*
328 328 * Hash algorithm optimized for a small number of slabs.
329 329 * 7 is (highbit((sizeof sfmmu_t)) - 1)
330 330 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
331 331 * kmem_cache, and thus they will be sequential within that cache. In
332 332 * addition, each new slab will have a different "color" up to cache_maxcolor
333 333 * which will skew the hashing for each successive slab which is allocated.
334 334 * If the size of sfmmu_t changed to a larger size, this algorithm may need
335 335 * to be revisited.
336 336 */
337 337 #define TSB_HASH_SHIFT_BITS (7)
338 338 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
339 339
340 340 #ifdef DEBUG
341 341 int tsb_hash_debug = 0;
342 342 #define TSB_HASH(sfmmup) \
343 343 (tsb_hash_debug ? &hat_lock[0] : \
344 344 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
345 345 #else /* DEBUG */
346 346 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
347 347 #endif /* DEBUG */
348 348
349 349
350 350 /* sfmmu_replace_tsb() return codes. */
351 351 typedef enum tsb_replace_rc {
352 352 TSB_SUCCESS,
353 353 TSB_ALLOCFAIL,
354 354 TSB_LOSTRACE,
355 355 TSB_ALREADY_SWAPPED,
356 356 TSB_CANTGROW
357 357 } tsb_replace_rc_t;
358 358
359 359 /*
360 360 * Flags for TSB allocation routines.
361 361 */
362 362 #define TSB_ALLOC 0x01
363 363 #define TSB_FORCEALLOC 0x02
364 364 #define TSB_GROW 0x04
365 365 #define TSB_SHRINK 0x08
366 366 #define TSB_SWAPIN 0x10
367 367
368 368 /*
369 369 * Support for HAT callbacks.
370 370 */
371 371 #define SFMMU_MAX_RELOC_CALLBACKS 10
372 372 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
373 373 static id_t sfmmu_cb_nextid = 0;
374 374 static id_t sfmmu_tsb_cb_id;
375 375 struct sfmmu_callback *sfmmu_cb_table;
376 376
377 377 kmutex_t kpr_mutex;
378 378 kmutex_t kpr_suspendlock;
379 379 kthread_t *kreloc_thread;
380 380
381 381 /*
382 382 * Enable VA->PA translation sanity checking on DEBUG kernels.
383 383 * Disabled by default. This is incompatible with some
384 384 * drivers (error injector, RSM) so if it breaks you get
385 385 * to keep both pieces.
386 386 */
387 387 int hat_check_vtop = 0;
388 388
389 389 /*
390 390 * Private sfmmu routines (prototypes)
391 391 */
392 392 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
393 393 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
394 394 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
395 395 uint_t);
396 396 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
397 397 caddr_t, demap_range_t *, uint_t);
398 398 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
399 399 caddr_t, int);
400 400 static void sfmmu_hblk_free(struct hme_blk **);
401 401 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
402 402 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
403 403 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
404 404 static struct hme_blk *sfmmu_hblk_steal(int);
405 405 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
406 406 struct hme_blk *, uint64_t, struct hme_blk *);
407 407 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
408 408
409 409 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
410 410 struct page **, uint_t, uint_t, uint_t);
411 411 static void hat_do_memload(struct hat *, caddr_t, struct page *,
412 412 uint_t, uint_t, uint_t);
413 413 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
414 414 uint_t, uint_t, pgcnt_t, uint_t);
415 415 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
416 416 uint_t);
417 417 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
418 418 uint_t, uint_t);
419 419 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
420 420 caddr_t, int, uint_t);
421 421 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
422 422 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
423 423 uint_t);
424 424 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
425 425 caddr_t, page_t **, uint_t, uint_t);
426 426 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
427 427
428 428 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
429 429 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
430 430 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
431 431 #ifdef VAC
432 432 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
433 433 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
434 434 int tst_tnc(page_t *pp, pgcnt_t);
435 435 void conv_tnc(page_t *pp, int);
436 436 #endif
437 437
438 438 static void sfmmu_get_ctx(sfmmu_t *);
439 439 static void sfmmu_free_sfmmu(sfmmu_t *);
440 440
441 441 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
442 442 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
443 443
444 444 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
445 445 static void hat_pagereload(struct page *, struct page *);
446 446 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
447 447 #ifdef VAC
448 448 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
449 449 static void sfmmu_page_cache(page_t *, int, int, int);
450 450 #endif
451 451
452 452 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
453 453 struct hme_blk *, int);
454 454 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
455 455 pfn_t, int, int, int, int);
456 456 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
457 457 pfn_t, int);
458 458 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
459 459 static void sfmmu_tlb_range_demap(demap_range_t *);
460 460 static void sfmmu_invalidate_ctx(sfmmu_t *);
461 461 static void sfmmu_sync_mmustate(sfmmu_t *);
462 462
463 463 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
464 464 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
465 465 sfmmu_t *);
466 466 static void sfmmu_tsb_free(struct tsb_info *);
467 467 static void sfmmu_tsbinfo_free(struct tsb_info *);
468 468 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
469 469 sfmmu_t *);
470 470 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
471 471 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
472 472 static int sfmmu_select_tsb_szc(pgcnt_t);
473 473 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
474 474 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
475 475 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
476 476 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
477 477 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
478 478 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
479 479 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
480 480 hatlock_t *, uint_t);
481 481 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
482 482
483 483 #ifdef VAC
484 484 void sfmmu_cache_flush(pfn_t, int);
485 485 void sfmmu_cache_flushcolor(int, pfn_t);
486 486 #endif
487 487 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
488 488 caddr_t, demap_range_t *, uint_t, int);
489 489
490 490 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
491 491 static uint_t sfmmu_ptov_attr(tte_t *);
492 492 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
493 493 caddr_t, demap_range_t *, uint_t);
494 494 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
495 495 static int sfmmu_idcache_constructor(void *, void *, int);
496 496 static void sfmmu_idcache_destructor(void *, void *);
497 497 static int sfmmu_hblkcache_constructor(void *, void *, int);
498 498 static void sfmmu_hblkcache_destructor(void *, void *);
499 499 static void sfmmu_hblkcache_reclaim(void *);
500 500 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
501 501 struct hmehash_bucket *);
502 502 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
503 503 struct hme_blk *, struct hme_blk **, int);
504 504 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
505 505 uint64_t);
506 506 static struct hme_blk *sfmmu_check_pending_hblks(int);
507 507 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
508 508 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
509 509 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
510 510 int, caddr_t *);
511 511 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
512 512
513 513 static void sfmmu_rm_large_mappings(page_t *, int);
514 514
515 515 static void hat_lock_init(void);
516 516 static void hat_kstat_init(void);
517 517 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
518 518 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
519 519 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
520 520 static void sfmmu_check_page_sizes(sfmmu_t *, int);
521 521 int fnd_mapping_sz(page_t *);
522 522 static void iment_add(struct ism_ment *, struct hat *);
523 523 static void iment_sub(struct ism_ment *, struct hat *);
524 524 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
525 525 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
526 526 extern void sfmmu_clear_utsbinfo(void);
527 527
528 528 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
529 529
530 530 extern int vpm_enable;
531 531
532 532 /* kpm globals */
533 533 #ifdef DEBUG
534 534 /*
535 535 * Enable trap level tsbmiss handling
536 536 */
537 537 int kpm_tsbmtl = 1;
538 538
539 539 /*
540 540 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
541 541 * required TLB shootdowns in this case, so handle w/ care. Off by default.
542 542 */
543 543 int kpm_tlb_flush;
544 544 #endif /* DEBUG */
545 545
546 546 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
547 547
548 548 #ifdef DEBUG
549 549 static void sfmmu_check_hblk_flist();
550 550 #endif
551 551
552 552 /*
553 553 * Semi-private sfmmu data structures. Some of them are initialize in
554 554 * startup or in hat_init. Some of them are private but accessed by
555 555 * assembly code or mach_sfmmu.c
556 556 */
557 557 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
558 558 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
559 559 uint64_t uhme_hash_pa; /* PA of uhme_hash */
560 560 uint64_t khme_hash_pa; /* PA of khme_hash */
561 561 int uhmehash_num; /* # of buckets in user hash table */
562 562 int khmehash_num; /* # of buckets in kernel hash table */
563 563
564 564 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
565 565 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
566 566 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
567 567
568 568 #define DEFAULT_NUM_CTXS_PER_MMU 8192
569 569 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
570 570
571 571 int cache; /* describes system cache */
572 572
573 573 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
574 574 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
575 575 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
576 576 int ktsb_sz; /* kernel 8k-indexed tsb size */
577 577
578 578 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
579 579 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
580 580 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
581 581 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
582 582
583 583 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
584 584 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
585 585 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
586 586 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
587 587
588 588 #ifndef sun4v
589 589 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
590 590 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
591 591 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
592 592 caddr_t utsb_vabase; /* reserved kernel virtual memory */
593 593 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
594 594 #endif /* sun4v */
595 595 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
596 596 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
597 597 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
598 598
599 599 /*
600 600 * Size to use for TSB slabs. Future platforms that support page sizes
601 601 * larger than 4M may wish to change these values, and provide their own
602 602 * assembly macros for building and decoding the TSB base register contents.
603 603 * Note disable_large_pages will override the value set here.
604 604 */
605 605 static uint_t tsb_slab_ttesz = TTE4M;
606 606 size_t tsb_slab_size = MMU_PAGESIZE4M;
607 607 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
608 608 /* PFN mask for TTE */
609 609 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
610 610
611 611 /*
612 612 * Size to use for TSB slabs. These are used only when 256M tsb arenas
613 613 * exist.
614 614 */
615 615 static uint_t bigtsb_slab_ttesz = TTE256M;
616 616 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
617 617 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
618 618 /* 256M page alignment for 8K pfn */
619 619 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
620 620
621 621 /* largest TSB size to grow to, will be smaller on smaller memory systems */
622 622 static int tsb_max_growsize = 0;
623 623
624 624 /*
625 625 * Tunable parameters dealing with TSB policies.
626 626 */
627 627
628 628 /*
629 629 * This undocumented tunable forces all 8K TSBs to be allocated from
630 630 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
631 631 */
632 632 #ifdef DEBUG
633 633 int tsb_forceheap = 0;
634 634 #endif /* DEBUG */
635 635
636 636 /*
637 637 * Decide whether to use per-lgroup arenas, or one global set of
638 638 * TSB arenas. The default is not to break up per-lgroup, since
639 639 * most platforms don't recognize any tangible benefit from it.
640 640 */
641 641 int tsb_lgrp_affinity = 0;
642 642
643 643 /*
644 644 * Used for growing the TSB based on the process RSS.
645 645 * tsb_rss_factor is based on the smallest TSB, and is
646 646 * shifted by the TSB size to determine if we need to grow.
647 647 * The default will grow the TSB if the number of TTEs for
648 648 * this page size exceeds 75% of the number of TSB entries,
649 649 * which should _almost_ eliminate all conflict misses
650 650 * (at the expense of using up lots and lots of memory).
651 651 */
652 652 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
653 653 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
654 654 #define SELECT_TSB_SIZECODE(pgcnt) ( \
655 655 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
656 656 default_tsb_size)
657 657 #define TSB_OK_SHRINK() \
658 658 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
659 659 #define TSB_OK_GROW() \
660 660 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
661 661
662 662 int enable_tsb_rss_sizing = 1;
663 663 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
664 664
665 665 /* which TSB size code to use for new address spaces or if rss sizing off */
666 666 int default_tsb_size = TSB_8K_SZCODE;
667 667
668 668 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
669 669 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
670 670 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
671 671
672 672 #ifdef DEBUG
673 673 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
674 674 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
675 675 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
676 676 static int tsb_alloc_fail_mtbf = 0;
677 677 static int tsb_alloc_count = 0;
678 678 #endif /* DEBUG */
679 679
680 680 /* if set to 1, will remap valid TTEs when growing TSB. */
681 681 int tsb_remap_ttes = 1;
682 682
683 683 /*
684 684 * If we have more than this many mappings, allocate a second TSB.
685 685 * This default is chosen because the I/D fully associative TLBs are
686 686 * assumed to have at least 8 available entries. Platforms with a
687 687 * larger fully-associative TLB could probably override the default.
688 688 */
689 689
690 690 #ifdef sun4v
691 691 int tsb_sectsb_threshold = 0;
692 692 #else
693 693 int tsb_sectsb_threshold = 8;
694 694 #endif
695 695
696 696 /*
697 697 * kstat data
698 698 */
699 699 struct sfmmu_global_stat sfmmu_global_stat;
700 700 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
701 701
702 702 /*
703 703 * Global data
704 704 */
705 705 sfmmu_t *ksfmmup; /* kernel's hat id */
706 706
707 707 #ifdef DEBUG
708 708 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
709 709 #endif
710 710
711 711 /* sfmmu locking operations */
712 712 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
713 713 static int sfmmu_mlspl_held(struct page *, int);
714 714
715 715 kmutex_t *sfmmu_page_enter(page_t *);
716 716 void sfmmu_page_exit(kmutex_t *);
717 717 int sfmmu_page_spl_held(struct page *);
718 718
719 719 /* sfmmu internal locking operations - accessed directly */
720 720 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
721 721 kmutex_t **, kmutex_t **);
722 722 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
723 723 static hatlock_t *
724 724 sfmmu_hat_enter(sfmmu_t *);
725 725 static hatlock_t *
726 726 sfmmu_hat_tryenter(sfmmu_t *);
727 727 static void sfmmu_hat_exit(hatlock_t *);
728 728 static void sfmmu_hat_lock_all(void);
729 729 static void sfmmu_hat_unlock_all(void);
730 730 static void sfmmu_ismhat_enter(sfmmu_t *, int);
731 731 static void sfmmu_ismhat_exit(sfmmu_t *, int);
732 732
733 733 kpm_hlk_t *kpmp_table;
734 734 uint_t kpmp_table_sz; /* must be a power of 2 */
735 735 uchar_t kpmp_shift;
736 736
737 737 kpm_shlk_t *kpmp_stable;
738 738 uint_t kpmp_stable_sz; /* must be a power of 2 */
739 739
740 740 /*
741 741 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
742 742 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
743 743 */
744 744 #if ((2*NCPU_P2) > 128)
745 745 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
746 746 #else
747 747 #define SPL_SHIFT 7U
748 748 #endif
749 749 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
750 750 #define SPL_MASK (SPL_TABLE_SIZE - 1)
751 751
752 752 /*
753 753 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
754 754 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
755 755 */
756 756 #define SPL_INDEX(pp) \
757 757 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
758 758 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
759 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
760 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
761 761 SPL_MASK)
762 762
763 763 #define SPL_HASH(pp) \
764 764 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
765 765
766 766 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
767 767
768 768 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
769 769
770 770 #define MML_TABLE_SIZE SPL_TABLE_SIZE
771 771 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
772 772
773 773 static pad_mutex_t mml_table[MML_TABLE_SIZE];
774 774
775 775 /*
776 776 * hat_unload_callback() will group together callbacks in order
777 777 * to avoid xt_sync() calls. This is the maximum size of the group.
778 778 */
779 779 #define MAX_CB_ADDR 32
780 780
781 781 tte_t hw_tte;
782 782 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
783 783
784 784 static char *mmu_ctx_kstat_names[] = {
785 785 "mmu_ctx_tsb_exceptions",
786 786 "mmu_ctx_tsb_raise_exception",
787 787 "mmu_ctx_wrap_around",
788 788 };
789 789
790 790 /*
791 791 * Wrapper for vmem_xalloc since vmem_create only allows limited
792 792 * parameters for vm_source_alloc functions. This function allows us
793 793 * to specify alignment consistent with the size of the object being
794 794 * allocated.
795 795 */
796 796 static void *
797 797 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
798 798 {
799 799 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
800 800 }
801 801
802 802 /* Common code for setting tsb_alloc_hiwater. */
803 803 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
804 804 ptob(pages) / tsb_alloc_hiwater_factor
805 805
806 806 /*
807 807 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
808 808 * a single TSB. physmem is the number of physical pages so we need physmem 8K
809 809 * TTEs to represent all those physical pages. We round this up by using
810 810 * 1<<highbit(). To figure out which size code to use, remember that the size
811 811 * code is just an amount to shift the smallest TSB size to get the size of
812 812 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
813 813 * highbit() - 1) to get the size code for the smallest TSB that can represent
814 814 * all of physical memory, while erring on the side of too much.
815 815 *
816 816 * Restrict tsb_max_growsize to make sure that:
817 817 * 1) TSBs can't grow larger than the TSB slab size
818 818 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
819 819 */
820 820 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
821 821 int _i, _szc, _slabszc, _tsbszc; \
822 822 \
823 823 _i = highbit(pages); \
824 824 if ((1 << (_i - 1)) == (pages)) \
825 825 _i--; /* 2^n case, round down */ \
826 826 _szc = _i - TSB_START_SIZE; \
827 827 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
828 828 _tsbszc = MIN(_szc, _slabszc); \
829 829 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
830 830 }
831 831
832 832 /*
833 833 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
834 834 * tsb_info which handles that TTE size.
835 835 */
836 836 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
837 837 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
838 838 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
839 839 sfmmu_hat_lock_held(sfmmup)); \
840 840 if ((tte_szc) >= TTE4M) { \
841 841 ASSERT((tsbinfop) != NULL); \
842 842 (tsbinfop) = (tsbinfop)->tsb_next; \
843 843 } \
844 844 }
845 845
846 846 /*
847 847 * Macro to use to unload entries from the TSB.
848 848 * It has knowledge of which page sizes get replicated in the TSB
849 849 * and will call the appropriate unload routine for the appropriate size.
850 850 */
851 851 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
852 852 { \
853 853 int ttesz = get_hblk_ttesz(hmeblkp); \
854 854 if (ttesz == TTE8K || ttesz == TTE4M) { \
855 855 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
856 856 } else { \
857 857 caddr_t sva = ismhat ? addr : \
858 858 (caddr_t)get_hblk_base(hmeblkp); \
859 859 caddr_t eva = sva + get_hblk_span(hmeblkp); \
860 860 ASSERT(addr >= sva && addr < eva); \
861 861 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
862 862 } \
863 863 }
864 864
865 865
866 866 /* Update tsb_alloc_hiwater after memory is configured. */
867 867 /*ARGSUSED*/
868 868 static void
869 869 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
870 870 {
871 871 /* Assumes physmem has already been updated. */
872 872 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
873 873 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
874 874 }
875 875
876 876 /*
877 877 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
878 878 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
879 879 * deleted.
880 880 */
881 881 /*ARGSUSED*/
882 882 static int
883 883 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
884 884 {
885 885 return (0);
886 886 }
887 887
888 888 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
889 889 /*ARGSUSED*/
890 890 static void
891 891 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
892 892 {
893 893 /*
894 894 * Whether the delete was cancelled or not, just go ahead and update
895 895 * tsb_alloc_hiwater and tsb_max_growsize.
896 896 */
897 897 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
898 898 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
899 899 }
900 900
901 901 static kphysm_setup_vector_t sfmmu_update_vec = {
902 902 KPHYSM_SETUP_VECTOR_VERSION, /* version */
903 903 sfmmu_update_post_add, /* post_add */
904 904 sfmmu_update_pre_del, /* pre_del */
905 905 sfmmu_update_post_del /* post_del */
906 906 };
907 907
908 908
909 909 /*
910 910 * HME_BLK HASH PRIMITIVES
911 911 */
912 912
913 913 /*
914 914 * Enter a hme on the mapping list for page pp.
915 915 * When large pages are more prevalent in the system we might want to
916 916 * keep the mapping list in ascending order by the hment size. For now,
917 917 * small pages are more frequent, so don't slow it down.
918 918 */
919 919 #define HME_ADD(hme, pp) \
920 920 { \
921 921 ASSERT(sfmmu_mlist_held(pp)); \
922 922 \
923 923 hme->hme_prev = NULL; \
924 924 hme->hme_next = pp->p_mapping; \
925 925 hme->hme_page = pp; \
926 926 if (pp->p_mapping) { \
927 927 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
928 928 ASSERT(pp->p_share > 0); \
929 929 } else { \
930 930 /* EMPTY */ \
931 931 ASSERT(pp->p_share == 0); \
932 932 } \
933 933 pp->p_mapping = hme; \
934 934 pp->p_share++; \
935 935 }
936 936
937 937 /*
938 938 * Enter a hme on the mapping list for page pp.
939 939 * If we are unmapping a large translation, we need to make sure that the
940 940 * change is reflect in the corresponding bit of the p_index field.
941 941 */
942 942 #define HME_SUB(hme, pp) \
943 943 { \
944 944 ASSERT(sfmmu_mlist_held(pp)); \
945 945 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
946 946 \
947 947 if (pp->p_mapping == NULL) { \
948 948 panic("hme_remove - no mappings"); \
949 949 } \
950 950 \
951 951 membar_stst(); /* ensure previous stores finish */ \
952 952 \
953 953 ASSERT(pp->p_share > 0); \
954 954 pp->p_share--; \
955 955 \
956 956 if (hme->hme_prev) { \
957 957 ASSERT(pp->p_mapping != hme); \
958 958 ASSERT(hme->hme_prev->hme_page == pp || \
959 959 IS_PAHME(hme->hme_prev)); \
960 960 hme->hme_prev->hme_next = hme->hme_next; \
961 961 } else { \
962 962 ASSERT(pp->p_mapping == hme); \
963 963 pp->p_mapping = hme->hme_next; \
964 964 ASSERT((pp->p_mapping == NULL) ? \
965 965 (pp->p_share == 0) : 1); \
966 966 } \
967 967 \
968 968 if (hme->hme_next) { \
969 969 ASSERT(hme->hme_next->hme_page == pp || \
970 970 IS_PAHME(hme->hme_next)); \
971 971 hme->hme_next->hme_prev = hme->hme_prev; \
972 972 } \
973 973 \
974 974 /* zero out the entry */ \
975 975 hme->hme_next = NULL; \
976 976 hme->hme_prev = NULL; \
977 977 hme->hme_page = NULL; \
978 978 \
979 979 if (hme_size(hme) > TTE8K) { \
980 980 /* remove mappings for remainder of large pg */ \
981 981 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
982 982 } \
983 983 }
984 984
985 985 /*
986 986 * This function returns the hment given the hme_blk and a vaddr.
987 987 * It assumes addr has already been checked to belong to hme_blk's
988 988 * range.
989 989 */
990 990 #define HBLKTOHME(hment, hmeblkp, addr) \
991 991 { \
992 992 int index; \
993 993 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
994 994 }
995 995
996 996 /*
997 997 * Version of HBLKTOHME that also returns the index in hmeblkp
998 998 * of the hment.
999 999 */
1000 1000 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1001 1001 { \
1002 1002 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1003 1003 \
1004 1004 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1005 1005 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1006 1006 } else \
1007 1007 idx = 0; \
1008 1008 \
1009 1009 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1010 1010 }
1011 1011
1012 1012 /*
1013 1013 * Disable any page sizes not supported by the CPU
1014 1014 */
1015 1015 void
1016 1016 hat_init_pagesizes()
1017 1017 {
1018 1018 int i;
1019 1019
1020 1020 mmu_exported_page_sizes = 0;
1021 1021 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1022 1022
1023 1023 szc_2_userszc[i] = (uint_t)-1;
1024 1024 userszc_2_szc[i] = (uint_t)-1;
1025 1025
1026 1026 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1027 1027 disable_large_pages |= (1 << i);
1028 1028 } else {
1029 1029 szc_2_userszc[i] = mmu_exported_page_sizes;
1030 1030 userszc_2_szc[mmu_exported_page_sizes] = i;
1031 1031 mmu_exported_page_sizes++;
1032 1032 }
1033 1033 }
1034 1034
1035 1035 disable_ism_large_pages |= disable_large_pages;
1036 1036 disable_auto_data_large_pages = disable_large_pages;
1037 1037 disable_auto_text_large_pages = disable_large_pages;
1038 1038
1039 1039 /*
1040 1040 * Initialize mmu-specific large page sizes.
1041 1041 */
1042 1042 if (&mmu_large_pages_disabled) {
1043 1043 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1044 1044 disable_ism_large_pages |=
1045 1045 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1046 1046 disable_auto_data_large_pages |=
1047 1047 mmu_large_pages_disabled(HAT_AUTO_DATA);
1048 1048 disable_auto_text_large_pages |=
1049 1049 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1050 1050 }
1051 1051 }
1052 1052
1053 1053 /*
1054 1054 * Initialize the hardware address translation structures.
1055 1055 */
1056 1056 void
1057 1057 hat_init(void)
1058 1058 {
1059 1059 int i;
1060 1060 uint_t sz;
1061 1061 size_t size;
1062 1062
1063 1063 hat_lock_init();
1064 1064 hat_kstat_init();
1065 1065
1066 1066 /*
1067 1067 * Hardware-only bits in a TTE
1068 1068 */
1069 1069 MAKE_TTE_MASK(&hw_tte);
1070 1070
1071 1071 hat_init_pagesizes();
1072 1072
1073 1073 /* Initialize the hash locks */
1074 1074 for (i = 0; i < khmehash_num; i++) {
1075 1075 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1076 1076 MUTEX_DEFAULT, NULL);
1077 1077 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1078 1078 }
1079 1079 for (i = 0; i < uhmehash_num; i++) {
1080 1080 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1081 1081 MUTEX_DEFAULT, NULL);
1082 1082 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1083 1083 }
1084 1084 khmehash_num--; /* make sure counter starts from 0 */
1085 1085 uhmehash_num--; /* make sure counter starts from 0 */
1086 1086
1087 1087 /*
1088 1088 * Allocate context domain structures.
1089 1089 *
1090 1090 * A platform may choose to modify max_mmu_ctxdoms in
1091 1091 * set_platform_defaults(). If a platform does not define
1092 1092 * a set_platform_defaults() or does not choose to modify
1093 1093 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1094 1094 *
1095 1095 * For all platforms that have CPUs sharing MMUs, this
1096 1096 * value must be defined.
1097 1097 */
1098 1098 if (max_mmu_ctxdoms == 0)
1099 1099 max_mmu_ctxdoms = max_ncpus;
1100 1100
1101 1101 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1102 1102 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1103 1103
1104 1104 /* mmu_ctx_t is 64 bytes aligned */
1105 1105 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1106 1106 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1107 1107 /*
1108 1108 * MMU context domain initialization for the Boot CPU.
1109 1109 * This needs the context domains array allocated above.
1110 1110 */
1111 1111 mutex_enter(&cpu_lock);
1112 1112 sfmmu_cpu_init(CPU);
1113 1113 mutex_exit(&cpu_lock);
1114 1114
1115 1115 /*
1116 1116 * Intialize ism mapping list lock.
1117 1117 */
1118 1118
1119 1119 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1120 1120
1121 1121 /*
1122 1122 * Each sfmmu structure carries an array of MMU context info
1123 1123 * structures, one per context domain. The size of this array depends
1124 1124 * on the maximum number of context domains. So, the size of the
1125 1125 * sfmmu structure varies per platform.
1126 1126 *
1127 1127 * sfmmu is allocated from static arena, because trap
1128 1128 * handler at TL > 0 is not allowed to touch kernel relocatable
1129 1129 * memory. sfmmu's alignment is changed to 64 bytes from
1130 1130 * default 8 bytes, as the lower 6 bits will be used to pass
1131 1131 * pgcnt to vtag_flush_pgcnt_tl1.
1132 1132 */
1133 1133 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1134 1134
1135 1135 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1136 1136 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1137 1137 NULL, NULL, static_arena, 0);
1138 1138
1139 1139 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1140 1140 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1141 1141
1142 1142 /*
1143 1143 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1144 1144 * from the heap when low on memory or when TSB_FORCEALLOC is
1145 1145 * specified, don't use magazines to cache them--we want to return
1146 1146 * them to the system as quickly as possible.
1147 1147 */
1148 1148 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1149 1149 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1150 1150 static_arena, KMC_NOMAGAZINE);
1151 1151
1152 1152 /*
1153 1153 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1154 1154 * memory, which corresponds to the old static reserve for TSBs.
1155 1155 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1156 1156 * memory we'll allocate for TSB slabs; beyond this point TSB
1157 1157 * allocations will be taken from the kernel heap (via
1158 1158 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1159 1159 * consumer.
1160 1160 */
1161 1161 if (tsb_alloc_hiwater_factor == 0) {
1162 1162 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1163 1163 }
1164 1164 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1165 1165
1166 1166 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1167 1167 if (!(disable_large_pages & (1 << sz)))
1168 1168 break;
1169 1169 }
1170 1170
1171 1171 if (sz < tsb_slab_ttesz) {
1172 1172 tsb_slab_ttesz = sz;
1173 1173 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1174 1174 tsb_slab_size = 1 << tsb_slab_shift;
1175 1175 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1176 1176 use_bigtsb_arena = 0;
1177 1177 } else if (use_bigtsb_arena &&
1178 1178 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1179 1179 use_bigtsb_arena = 0;
1180 1180 }
1181 1181
1182 1182 if (!use_bigtsb_arena) {
1183 1183 bigtsb_slab_shift = tsb_slab_shift;
1184 1184 }
1185 1185 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1186 1186
1187 1187 /*
1188 1188 * On smaller memory systems, allocate TSB memory in smaller chunks
1189 1189 * than the default 4M slab size. We also honor disable_large_pages
1190 1190 * here.
1191 1191 *
1192 1192 * The trap handlers need to be patched with the final slab shift,
1193 1193 * since they need to be able to construct the TSB pointer at runtime.
1194 1194 */
1195 1195 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1196 1196 !(disable_large_pages & (1 << TTE512K))) {
1197 1197 tsb_slab_ttesz = TTE512K;
1198 1198 tsb_slab_shift = MMU_PAGESHIFT512K;
1199 1199 tsb_slab_size = MMU_PAGESIZE512K;
1200 1200 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1201 1201 use_bigtsb_arena = 0;
1202 1202 }
1203 1203
1204 1204 if (!use_bigtsb_arena) {
1205 1205 bigtsb_slab_ttesz = tsb_slab_ttesz;
1206 1206 bigtsb_slab_shift = tsb_slab_shift;
1207 1207 bigtsb_slab_size = tsb_slab_size;
1208 1208 bigtsb_slab_mask = tsb_slab_mask;
1209 1209 }
1210 1210
1211 1211
1212 1212 /*
1213 1213 * Set up memory callback to update tsb_alloc_hiwater and
1214 1214 * tsb_max_growsize.
1215 1215 */
1216 1216 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1217 1217 ASSERT(i == 0);
1218 1218
1219 1219 /*
1220 1220 * kmem_tsb_arena is the source from which large TSB slabs are
1221 1221 * drawn. The quantum of this arena corresponds to the largest
1222 1222 * TSB size we can dynamically allocate for user processes.
1223 1223 * Currently it must also be a supported page size since we
1224 1224 * use exactly one translation entry to map each slab page.
1225 1225 *
1226 1226 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1227 1227 * which most TSBs are allocated. Since most TSB allocations are
1228 1228 * typically 8K we have a kmem cache we stack on top of each
1229 1229 * kmem_tsb_default_arena to speed up those allocations.
1230 1230 *
1231 1231 * Note the two-level scheme of arenas is required only
1232 1232 * because vmem_create doesn't allow us to specify alignment
1233 1233 * requirements. If this ever changes the code could be
1234 1234 * simplified to use only one level of arenas.
1235 1235 *
1236 1236 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1237 1237 * will be provided in addition to the 4M kmem_tsb_arena.
1238 1238 */
1239 1239 if (use_bigtsb_arena) {
1240 1240 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1241 1241 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1242 1242 vmem_xfree, heap_arena, 0, VM_SLEEP);
1243 1243 }
1244 1244
1245 1245 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1246 1246 sfmmu_vmem_xalloc_aligned_wrapper,
1247 1247 vmem_xfree, heap_arena, 0, VM_SLEEP);
1248 1248
1249 1249 if (tsb_lgrp_affinity) {
1250 1250 char s[50];
1251 1251 for (i = 0; i < NLGRPS_MAX; i++) {
1252 1252 if (use_bigtsb_arena) {
1253 1253 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1254 1254 kmem_bigtsb_default_arena[i] = vmem_create(s,
1255 1255 NULL, 0, 2 * tsb_slab_size,
1256 1256 sfmmu_tsb_segkmem_alloc,
1257 1257 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1258 1258 0, VM_SLEEP | VM_BESTFIT);
1259 1259 }
1260 1260
1261 1261 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1262 1262 kmem_tsb_default_arena[i] = vmem_create(s,
1263 1263 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1264 1264 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1265 1265 VM_SLEEP | VM_BESTFIT);
1266 1266
1267 1267 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1268 1268 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1269 1269 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1270 1270 kmem_tsb_default_arena[i], 0);
1271 1271 }
1272 1272 } else {
1273 1273 if (use_bigtsb_arena) {
1274 1274 kmem_bigtsb_default_arena[0] =
1275 1275 vmem_create("kmem_bigtsb_default", NULL, 0,
1276 1276 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1277 1277 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1278 1278 VM_SLEEP | VM_BESTFIT);
1279 1279 }
1280 1280
1281 1281 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1282 1282 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1283 1283 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1284 1284 VM_SLEEP | VM_BESTFIT);
1285 1285 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1286 1286 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1287 1287 kmem_tsb_default_arena[0], 0);
1288 1288 }
1289 1289
1290 1290 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1291 1291 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1292 1292 sfmmu_hblkcache_destructor,
1293 1293 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1294 1294 hat_memload_arena, KMC_NOHASH);
1295 1295
1296 1296 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1297 1297 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1298 1298 VMC_DUMPSAFE | VM_SLEEP);
1299 1299
1300 1300 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1301 1301 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1302 1302 sfmmu_hblkcache_destructor,
1303 1303 NULL, (void *)HME1BLK_SZ,
1304 1304 hat_memload1_arena, KMC_NOHASH);
1305 1305
1306 1306 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1307 1307 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1308 1308
1309 1309 ism_blk_cache = kmem_cache_create("ism_blk_cache",
1310 1310 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1311 1311 NULL, NULL, static_arena, KMC_NOHASH);
1312 1312
1313 1313 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1314 1314 sizeof (ism_ment_t), 0, NULL, NULL,
1315 1315 NULL, NULL, NULL, 0);
1316 1316
1317 1317 /*
1318 1318 * We grab the first hat for the kernel,
1319 1319 */
1320 1320 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1321 1321 kas.a_hat = hat_alloc(&kas);
1322 1322 AS_LOCK_EXIT(&kas, &kas.a_lock);
1323 1323
1324 1324 /*
1325 1325 * Initialize hblk_reserve.
1326 1326 */
1327 1327 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1328 1328 va_to_pa((caddr_t)hblk_reserve);
1329 1329
1330 1330 #ifndef UTSB_PHYS
1331 1331 /*
1332 1332 * Reserve some kernel virtual address space for the locked TTEs
1333 1333 * that allow us to probe the TSB from TL>0.
1334 1334 */
1335 1335 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1336 1336 0, 0, NULL, NULL, VM_SLEEP);
1337 1337 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1338 1338 0, 0, NULL, NULL, VM_SLEEP);
1339 1339 #endif
1340 1340
1341 1341 #ifdef VAC
1342 1342 /*
1343 1343 * The big page VAC handling code assumes VAC
1344 1344 * will not be bigger than the smallest big
1345 1345 * page- which is 64K.
1346 1346 */
1347 1347 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1348 1348 cmn_err(CE_PANIC, "VAC too big!");
1349 1349 }
1350 1350 #endif
1351 1351
1352 1352 uhme_hash_pa = va_to_pa(uhme_hash);
1353 1353 khme_hash_pa = va_to_pa(khme_hash);
1354 1354
1355 1355 /*
1356 1356 * Initialize relocation locks. kpr_suspendlock is held
1357 1357 * at PIL_MAX to prevent interrupts from pinning the holder
1358 1358 * of a suspended TTE which may access it leading to a
1359 1359 * deadlock condition.
1360 1360 */
1361 1361 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1362 1362 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1363 1363
1364 1364 /*
1365 1365 * If Shared context support is disabled via /etc/system
1366 1366 * set shctx_on to 0 here if it was set to 1 earlier in boot
1367 1367 * sequence by cpu module initialization code.
1368 1368 */
1369 1369 if (shctx_on && disable_shctx) {
1370 1370 shctx_on = 0;
1371 1371 }
1372 1372
1373 1373 if (shctx_on) {
1374 1374 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1375 1375 sizeof (srd_buckets[0]), KM_SLEEP);
1376 1376 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1377 1377 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1378 1378 MUTEX_DEFAULT, NULL);
1379 1379 }
1380 1380
1381 1381 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1382 1382 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1383 1383 NULL, NULL, NULL, 0);
1384 1384 region_cache = kmem_cache_create("region_cache",
1385 1385 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1386 1386 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1387 1387 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1388 1388 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1389 1389 NULL, NULL, NULL, 0);
1390 1390 }
1391 1391
1392 1392 /*
1393 1393 * Pre-allocate hrm_hashtab before enabling the collection of
1394 1394 * refmod statistics. Allocating on the fly would mean us
1395 1395 * running the risk of suffering recursive mutex enters or
1396 1396 * deadlocks.
1397 1397 */
1398 1398 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1399 1399 KM_SLEEP);
1400 1400
1401 1401 /* Allocate per-cpu pending freelist of hmeblks */
1402 1402 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1403 1403 KM_SLEEP);
1404 1404 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1405 1405 (uintptr_t)cpu_hme_pend, 64);
1406 1406
1407 1407 for (i = 0; i < NCPU; i++) {
1408 1408 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1409 1409 NULL);
1410 1410 }
1411 1411
1412 1412 if (cpu_hme_pend_thresh == 0) {
1413 1413 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1414 1414 }
1415 1415 }
1416 1416
1417 1417 /*
1418 1418 * Initialize locking for the hat layer, called early during boot.
1419 1419 */
1420 1420 static void
1421 1421 hat_lock_init()
1422 1422 {
1423 1423 int i;
1424 1424
1425 1425 /*
1426 1426 * initialize the array of mutexes protecting a page's mapping
1427 1427 * list and p_nrm field.
1428 1428 */
1429 1429 for (i = 0; i < MML_TABLE_SIZE; i++)
1430 1430 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1431 1431
1432 1432 if (kpm_enable) {
1433 1433 for (i = 0; i < kpmp_table_sz; i++) {
1434 1434 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1435 1435 MUTEX_DEFAULT, NULL);
1436 1436 }
1437 1437 }
1438 1438
1439 1439 /*
1440 1440 * Initialize array of mutex locks that protects sfmmu fields and
1441 1441 * TSB lists.
1442 1442 */
1443 1443 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1444 1444 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1445 1445 NULL);
1446 1446 }
1447 1447
1448 1448 #define SFMMU_KERNEL_MAXVA \
1449 1449 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1450 1450
1451 1451 /*
1452 1452 * Allocate a hat structure.
1453 1453 * Called when an address space first uses a hat.
1454 1454 */
1455 1455 struct hat *
1456 1456 hat_alloc(struct as *as)
1457 1457 {
1458 1458 sfmmu_t *sfmmup;
1459 1459 int i;
1460 1460 uint64_t cnum;
1461 1461 extern uint_t get_color_start(struct as *);
1462 1462
1463 1463 ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1464 1464 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1465 1465 sfmmup->sfmmu_as = as;
1466 1466 sfmmup->sfmmu_flags = 0;
1467 1467 sfmmup->sfmmu_tteflags = 0;
1468 1468 sfmmup->sfmmu_rtteflags = 0;
1469 1469 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1470 1470
1471 1471 if (as == &kas) {
1472 1472 ksfmmup = sfmmup;
1473 1473 sfmmup->sfmmu_cext = 0;
1474 1474 cnum = KCONTEXT;
1475 1475
1476 1476 sfmmup->sfmmu_clrstart = 0;
1477 1477 sfmmup->sfmmu_tsb = NULL;
1478 1478 /*
1479 1479 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1480 1480 * to setup tsb_info for ksfmmup.
1481 1481 */
1482 1482 } else {
1483 1483
1484 1484 /*
1485 1485 * Just set to invalid ctx. When it faults, it will
1486 1486 * get a valid ctx. This would avoid the situation
1487 1487 * where we get a ctx, but it gets stolen and then
1488 1488 * we fault when we try to run and so have to get
1489 1489 * another ctx.
1490 1490 */
1491 1491 sfmmup->sfmmu_cext = 0;
1492 1492 cnum = INVALID_CONTEXT;
1493 1493
1494 1494 /* initialize original physical page coloring bin */
1495 1495 sfmmup->sfmmu_clrstart = get_color_start(as);
1496 1496 #ifdef DEBUG
1497 1497 if (tsb_random_size) {
1498 1498 uint32_t randval = (uint32_t)gettick() >> 4;
1499 1499 int size = randval % (tsb_max_growsize + 1);
1500 1500
1501 1501 /* chose a random tsb size for stress testing */
1502 1502 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1503 1503 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1504 1504 } else
1505 1505 #endif /* DEBUG */
1506 1506 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1507 1507 default_tsb_size,
1508 1508 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1509 1509 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1510 1510 ASSERT(sfmmup->sfmmu_tsb != NULL);
1511 1511 }
1512 1512
1513 1513 ASSERT(max_mmu_ctxdoms > 0);
1514 1514 for (i = 0; i < max_mmu_ctxdoms; i++) {
1515 1515 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1516 1516 sfmmup->sfmmu_ctxs[i].gnum = 0;
1517 1517 }
1518 1518
1519 1519 for (i = 0; i < max_mmu_page_sizes; i++) {
1520 1520 sfmmup->sfmmu_ttecnt[i] = 0;
1521 1521 sfmmup->sfmmu_scdrttecnt[i] = 0;
1522 1522 sfmmup->sfmmu_ismttecnt[i] = 0;
1523 1523 sfmmup->sfmmu_scdismttecnt[i] = 0;
1524 1524 sfmmup->sfmmu_pgsz[i] = TTE8K;
1525 1525 }
1526 1526 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1527 1527 sfmmup->sfmmu_iblk = NULL;
1528 1528 sfmmup->sfmmu_ismhat = 0;
1529 1529 sfmmup->sfmmu_scdhat = 0;
1530 1530 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1531 1531 if (sfmmup == ksfmmup) {
1532 1532 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1533 1533 } else {
1534 1534 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1535 1535 }
1536 1536 sfmmup->sfmmu_free = 0;
1537 1537 sfmmup->sfmmu_rmstat = 0;
1538 1538 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1539 1539 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1540 1540 sfmmup->sfmmu_srdp = NULL;
1541 1541 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1542 1542 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1543 1543 sfmmup->sfmmu_scdp = NULL;
1544 1544 sfmmup->sfmmu_scd_link.next = NULL;
1545 1545 sfmmup->sfmmu_scd_link.prev = NULL;
1546 1546 return (sfmmup);
1547 1547 }
1548 1548
1549 1549 /*
1550 1550 * Create per-MMU context domain kstats for a given MMU ctx.
1551 1551 */
1552 1552 static void
1553 1553 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1554 1554 {
1555 1555 mmu_ctx_stat_t stat;
1556 1556 kstat_t *mmu_kstat;
1557 1557
1558 1558 ASSERT(MUTEX_HELD(&cpu_lock));
1559 1559 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1560 1560
1561 1561 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1562 1562 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1563 1563
1564 1564 if (mmu_kstat == NULL) {
1565 1565 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1566 1566 mmu_ctxp->mmu_idx);
1567 1567 } else {
1568 1568 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1569 1569 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1570 1570 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1571 1571 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1572 1572 mmu_ctxp->mmu_kstat = mmu_kstat;
1573 1573 kstat_install(mmu_kstat);
1574 1574 }
1575 1575 }
1576 1576
1577 1577 /*
1578 1578 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1579 1579 * context domain information for a given CPU. If a platform does not
1580 1580 * specify that interface, then the function below is used instead to return
1581 1581 * default information. The defaults are as follows:
1582 1582 *
1583 1583 * - The number of MMU context IDs supported on any CPU in the
1584 1584 * system is 8K.
1585 1585 * - There is one MMU context domain per CPU.
1586 1586 */
1587 1587 /*ARGSUSED*/
1588 1588 static void
1589 1589 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1590 1590 {
1591 1591 infop->mmu_nctxs = nctxs;
1592 1592 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1593 1593 }
1594 1594
1595 1595 /*
1596 1596 * Called during CPU initialization to set the MMU context-related information
1597 1597 * for a CPU.
1598 1598 *
1599 1599 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1600 1600 */
1601 1601 void
1602 1602 sfmmu_cpu_init(cpu_t *cp)
1603 1603 {
1604 1604 mmu_ctx_info_t info;
1605 1605 mmu_ctx_t *mmu_ctxp;
1606 1606
1607 1607 ASSERT(MUTEX_HELD(&cpu_lock));
1608 1608
1609 1609 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1610 1610 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1611 1611 else
1612 1612 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1613 1613
1614 1614 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1615 1615
1616 1616 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1617 1617 /* Each mmu_ctx is cacheline aligned. */
1618 1618 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1619 1619 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1620 1620
1621 1621 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1622 1622 (void *)ipltospl(DISP_LEVEL));
1623 1623 mmu_ctxp->mmu_idx = info.mmu_idx;
1624 1624 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1625 1625 /*
1626 1626 * Globally for lifetime of a system,
1627 1627 * gnum must always increase.
1628 1628 * mmu_saved_gnum is protected by the cpu_lock.
1629 1629 */
1630 1630 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1631 1631 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1632 1632
1633 1633 sfmmu_mmu_kstat_create(mmu_ctxp);
1634 1634
1635 1635 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1636 1636 } else {
1637 1637 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1638 1638 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1639 1639 }
1640 1640
1641 1641 /*
1642 1642 * The mmu_lock is acquired here to prevent races with
1643 1643 * the wrap-around code.
1644 1644 */
1645 1645 mutex_enter(&mmu_ctxp->mmu_lock);
1646 1646
1647 1647
1648 1648 mmu_ctxp->mmu_ncpus++;
1649 1649 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1650 1650 CPU_MMU_IDX(cp) = info.mmu_idx;
1651 1651 CPU_MMU_CTXP(cp) = mmu_ctxp;
1652 1652
1653 1653 mutex_exit(&mmu_ctxp->mmu_lock);
1654 1654 }
1655 1655
1656 1656 static void
1657 1657 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1658 1658 {
1659 1659 ASSERT(MUTEX_HELD(&cpu_lock));
1660 1660 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1661 1661
1662 1662 mutex_destroy(&mmu_ctxp->mmu_lock);
1663 1663
1664 1664 if (mmu_ctxp->mmu_kstat)
1665 1665 kstat_delete(mmu_ctxp->mmu_kstat);
1666 1666
1667 1667 /* mmu_saved_gnum is protected by the cpu_lock. */
1668 1668 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1669 1669 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1670 1670
1671 1671 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1672 1672 }
1673 1673
1674 1674 /*
1675 1675 * Called to perform MMU context-related cleanup for a CPU.
1676 1676 */
1677 1677 void
1678 1678 sfmmu_cpu_cleanup(cpu_t *cp)
1679 1679 {
1680 1680 mmu_ctx_t *mmu_ctxp;
1681 1681
1682 1682 ASSERT(MUTEX_HELD(&cpu_lock));
1683 1683
1684 1684 mmu_ctxp = CPU_MMU_CTXP(cp);
1685 1685 ASSERT(mmu_ctxp != NULL);
1686 1686
1687 1687 /*
1688 1688 * The mmu_lock is acquired here to prevent races with
1689 1689 * the wrap-around code.
1690 1690 */
1691 1691 mutex_enter(&mmu_ctxp->mmu_lock);
1692 1692
1693 1693 CPU_MMU_CTXP(cp) = NULL;
1694 1694
1695 1695 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1696 1696 if (--mmu_ctxp->mmu_ncpus == 0) {
1697 1697 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1698 1698 mutex_exit(&mmu_ctxp->mmu_lock);
1699 1699 sfmmu_ctxdom_free(mmu_ctxp);
1700 1700 return;
1701 1701 }
1702 1702
1703 1703 mutex_exit(&mmu_ctxp->mmu_lock);
1704 1704 }
1705 1705
1706 1706 uint_t
1707 1707 sfmmu_ctxdom_nctxs(int idx)
1708 1708 {
1709 1709 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1710 1710 }
1711 1711
1712 1712 #ifdef sun4v
1713 1713 /*
1714 1714 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1715 1715 * consistant after suspend/resume on system that can resume on a different
1716 1716 * hardware than it was suspended.
1717 1717 *
1718 1718 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1719 1719 * from being allocated. It acquires all hat_locks, which blocks most access to
1720 1720 * context data, except for a few cases that are handled separately or are
1721 1721 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1722 1722 * contexts, and forces cnum to its max. As a result of this call all user
1723 1723 * threads that are running on CPUs trap and try to perform wrap around but
1724 1724 * can't because hat_locks are taken. Threads that were not on CPUs but started
1725 1725 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1726 1726 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1727 1727 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1728 1728 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1729 1729 *
1730 1730 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1731 1731 * the CPUs that had them. It must be called after CPUs have been paused. This
1732 1732 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1733 1733 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1734 1734 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1735 1735 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1736 1736 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1737 1737 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1738 1738 * accessing the old context domains.
1739 1739 *
1740 1740 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1741 1741 * allocates new context domains based on hardware layout. It initializes
1742 1742 * every CPU that had context domain before migration to have one again.
1743 1743 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1744 1744 * could deadlock acquiring locks held by paused CPUs.
1745 1745 *
1746 1746 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1747 1747 * acquire new context ids and continue execution.
1748 1748 *
1749 1749 * Therefore functions should be called in the following order:
1750 1750 * suspend_routine()
1751 1751 * sfmmu_ctxdom_lock()
1752 1752 * pause_cpus()
1753 1753 * suspend()
1754 1754 * if (suspend failed)
1755 1755 * sfmmu_ctxdom_unlock()
1756 1756 * ...
1757 1757 * sfmmu_ctxdom_remove()
1758 1758 * resume_cpus()
1759 1759 * sfmmu_ctxdom_update()
1760 1760 * sfmmu_ctxdom_unlock()
1761 1761 */
1762 1762 static cpuset_t sfmmu_ctxdoms_pset;
1763 1763
1764 1764 void
1765 1765 sfmmu_ctxdoms_remove()
1766 1766 {
1767 1767 processorid_t id;
1768 1768 cpu_t *cp;
1769 1769
1770 1770 /*
1771 1771 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1772 1772 * be restored post-migration. A CPU may be powered off and not have a
1773 1773 * domain, for example.
1774 1774 */
1775 1775 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1776 1776
1777 1777 for (id = 0; id < NCPU; id++) {
1778 1778 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1779 1779 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1780 1780 CPU_MMU_CTXP(cp) = NULL;
1781 1781 }
1782 1782 }
1783 1783 }
1784 1784
1785 1785 void
1786 1786 sfmmu_ctxdoms_lock(void)
1787 1787 {
1788 1788 int idx;
1789 1789 mmu_ctx_t *mmu_ctxp;
1790 1790
1791 1791 sfmmu_hat_lock_all();
1792 1792
1793 1793 /*
1794 1794 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1795 1795 * hat_lock is always taken before calling it.
1796 1796 *
1797 1797 * For each domain, set mmu_cnum to max so no more contexts can be
1798 1798 * allocated, and wrap to flush on-CPU contexts and force threads to
1799 1799 * acquire a new context when we later drop hat_lock after migration.
1800 1800 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1801 1801 * but the latter uses CAS and will miscompare and not overwrite it.
1802 1802 */
1803 1803 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1804 1804 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1805 1805 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1806 1806 mutex_enter(&mmu_ctxp->mmu_lock);
1807 1807 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1808 1808 /* make sure updated cnum visible */
1809 1809 membar_enter();
1810 1810 mutex_exit(&mmu_ctxp->mmu_lock);
1811 1811 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1812 1812 }
1813 1813 }
1814 1814 kpreempt_enable();
1815 1815 }
1816 1816
1817 1817 void
1818 1818 sfmmu_ctxdoms_unlock(void)
1819 1819 {
1820 1820 sfmmu_hat_unlock_all();
1821 1821 }
1822 1822
1823 1823 void
1824 1824 sfmmu_ctxdoms_update(void)
1825 1825 {
1826 1826 processorid_t id;
1827 1827 cpu_t *cp;
1828 1828 uint_t idx;
1829 1829 mmu_ctx_t *mmu_ctxp;
1830 1830
1831 1831 /*
1832 1832 * Free all context domains. As side effect, this increases
1833 1833 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1834 1834 * init gnum in the new domains, which therefore will be larger than the
1835 1835 * sfmmu gnum for any process, guaranteeing that every process will see
1836 1836 * a new generation and allocate a new context regardless of what new
1837 1837 * domain it runs in.
1838 1838 */
1839 1839 mutex_enter(&cpu_lock);
1840 1840
1841 1841 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1842 1842 if (mmu_ctxs_tbl[idx] != NULL) {
1843 1843 mmu_ctxp = mmu_ctxs_tbl[idx];
1844 1844 mmu_ctxs_tbl[idx] = NULL;
1845 1845 sfmmu_ctxdom_free(mmu_ctxp);
1846 1846 }
1847 1847 }
1848 1848
1849 1849 for (id = 0; id < NCPU; id++) {
1850 1850 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1851 1851 (cp = cpu[id]) != NULL)
1852 1852 sfmmu_cpu_init(cp);
1853 1853 }
1854 1854 mutex_exit(&cpu_lock);
1855 1855 }
1856 1856 #endif
1857 1857
1858 1858 /*
1859 1859 * Hat_setup, makes an address space context the current active one.
1860 1860 * In sfmmu this translates to setting the secondary context with the
1861 1861 * corresponding context.
1862 1862 */
1863 1863 void
1864 1864 hat_setup(struct hat *sfmmup, int allocflag)
1865 1865 {
1866 1866 hatlock_t *hatlockp;
1867 1867
1868 1868 /* Init needs some special treatment. */
1869 1869 if (allocflag == HAT_INIT) {
1870 1870 /*
1871 1871 * Make sure that we have
1872 1872 * 1. a TSB
1873 1873 * 2. a valid ctx that doesn't get stolen after this point.
1874 1874 */
1875 1875 hatlockp = sfmmu_hat_enter(sfmmup);
1876 1876
1877 1877 /*
1878 1878 * Swap in the TSB. hat_init() allocates tsbinfos without
1879 1879 * TSBs, but we need one for init, since the kernel does some
1880 1880 * special things to set up its stack and needs the TSB to
1881 1881 * resolve page faults.
1882 1882 */
1883 1883 sfmmu_tsb_swapin(sfmmup, hatlockp);
1884 1884
1885 1885 sfmmu_get_ctx(sfmmup);
1886 1886
1887 1887 sfmmu_hat_exit(hatlockp);
1888 1888 } else {
1889 1889 ASSERT(allocflag == HAT_ALLOC);
1890 1890
1891 1891 hatlockp = sfmmu_hat_enter(sfmmup);
1892 1892 kpreempt_disable();
1893 1893
1894 1894 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1895 1895 /*
1896 1896 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1897 1897 * pagesize bits don't matter in this case since we are passing
1898 1898 * INVALID_CONTEXT to it.
1899 1899 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1900 1900 */
1901 1901 sfmmu_setctx_sec(INVALID_CONTEXT);
1902 1902 sfmmu_clear_utsbinfo();
1903 1903
1904 1904 kpreempt_enable();
1905 1905 sfmmu_hat_exit(hatlockp);
1906 1906 }
1907 1907 }
1908 1908
1909 1909 /*
1910 1910 * Free all the translation resources for the specified address space.
1911 1911 * Called from as_free when an address space is being destroyed.
1912 1912 */
1913 1913 void
1914 1914 hat_free_start(struct hat *sfmmup)
1915 1915 {
1916 1916 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1917 1917 ASSERT(sfmmup != ksfmmup);
1918 1918
1919 1919 sfmmup->sfmmu_free = 1;
1920 1920 if (sfmmup->sfmmu_scdp != NULL) {
1921 1921 sfmmu_leave_scd(sfmmup, 0);
1922 1922 }
1923 1923
1924 1924 ASSERT(sfmmup->sfmmu_scdp == NULL);
1925 1925 }
1926 1926
1927 1927 void
1928 1928 hat_free_end(struct hat *sfmmup)
1929 1929 {
1930 1930 int i;
1931 1931
1932 1932 ASSERT(sfmmup->sfmmu_free == 1);
1933 1933 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1934 1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1935 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1936 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1937 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1938 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1939 1939
1940 1940 if (sfmmup->sfmmu_rmstat) {
1941 1941 hat_freestat(sfmmup->sfmmu_as, NULL);
1942 1942 }
1943 1943
1944 1944 while (sfmmup->sfmmu_tsb != NULL) {
1945 1945 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1946 1946 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1947 1947 sfmmup->sfmmu_tsb = next;
1948 1948 }
1949 1949
1950 1950 if (sfmmup->sfmmu_srdp != NULL) {
1951 1951 sfmmu_leave_srd(sfmmup);
1952 1952 ASSERT(sfmmup->sfmmu_srdp == NULL);
1953 1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1954 1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1955 1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1956 1956 SFMMU_L2_HMERLINKS_SIZE);
1957 1957 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1958 1958 }
1959 1959 }
1960 1960 }
1961 1961 sfmmu_free_sfmmu(sfmmup);
1962 1962
↓ open down ↓ |
1962 lines elided |
↑ open up ↑ |
1963 1963 #ifdef DEBUG
1964 1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965 1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1966 1966 }
1967 1967 #endif
1968 1968
1969 1969 kmem_cache_free(sfmmuid_cache, sfmmup);
1970 1970 }
1971 1971
1972 1972 /*
1973 - * Set up any translation structures, for the specified address space,
1974 - * that are needed or preferred when the process is being swapped in.
1975 - */
1976 -/* ARGSUSED */
1977 -void
1978 -hat_swapin(struct hat *hat)
1979 -{
1980 -}
1981 -
1982 -/*
1983 - * Free all of the translation resources, for the specified address space,
1984 - * that can be freed while the process is swapped out. Called from as_swapout.
1985 - * Also, free up the ctx that this process was using.
1986 - */
1987 -void
1988 -hat_swapout(struct hat *sfmmup)
1989 -{
1990 - struct hmehash_bucket *hmebp;
1991 - struct hme_blk *hmeblkp;
1992 - struct hme_blk *pr_hblk = NULL;
1993 - struct hme_blk *nx_hblk;
1994 - int i;
1995 - struct hme_blk *list = NULL;
1996 - hatlock_t *hatlockp;
1997 - struct tsb_info *tsbinfop;
1998 - struct free_tsb {
1999 - struct free_tsb *next;
2000 - struct tsb_info *tsbinfop;
2001 - }; /* free list of TSBs */
2002 - struct free_tsb *freelist, *last, *next;
2003 -
2004 - SFMMU_STAT(sf_swapout);
2005 -
2006 - /*
2007 - * There is no way to go from an as to all its translations in sfmmu.
2008 - * Here is one of the times when we take the big hit and traverse
2009 - * the hash looking for hme_blks to free up. Not only do we free up
2010 - * this as hme_blks but all those that are free. We are obviously
2011 - * swapping because we need memory so let's free up as much
2012 - * as we can.
2013 - *
2014 - * Note that we don't flush TLB/TSB here -- it's not necessary
2015 - * because:
2016 - * 1) we free the ctx we're using and throw away the TSB(s);
2017 - * 2) processes aren't runnable while being swapped out.
2018 - */
2019 - ASSERT(sfmmup != KHATID);
2020 - for (i = 0; i <= UHMEHASH_SZ; i++) {
2021 - hmebp = &uhme_hash[i];
2022 - SFMMU_HASH_LOCK(hmebp);
2023 - hmeblkp = hmebp->hmeblkp;
2024 - pr_hblk = NULL;
2025 - while (hmeblkp) {
2026 -
2027 - if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2028 - !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2029 - ASSERT(!hmeblkp->hblk_shared);
2030 - (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2031 - (caddr_t)get_hblk_base(hmeblkp),
2032 - get_hblk_endaddr(hmeblkp),
2033 - NULL, HAT_UNLOAD);
2034 - }
2035 - nx_hblk = hmeblkp->hblk_next;
2036 - if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2037 - ASSERT(!hmeblkp->hblk_lckcnt);
2038 - sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2039 - &list, 0);
2040 - } else {
2041 - pr_hblk = hmeblkp;
2042 - }
2043 - hmeblkp = nx_hblk;
2044 - }
2045 - SFMMU_HASH_UNLOCK(hmebp);
2046 - }
2047 -
2048 - sfmmu_hblks_list_purge(&list, 0);
2049 -
2050 - /*
2051 - * Now free up the ctx so that others can reuse it.
2052 - */
2053 - hatlockp = sfmmu_hat_enter(sfmmup);
2054 -
2055 - sfmmu_invalidate_ctx(sfmmup);
2056 -
2057 - /*
2058 - * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2059 - * If TSBs were never swapped in, just return.
2060 - * This implies that we don't support partial swapping
2061 - * of TSBs -- either all are swapped out, or none are.
2062 - *
2063 - * We must hold the HAT lock here to prevent racing with another
2064 - * thread trying to unmap TTEs from the TSB or running the post-
2065 - * relocator after relocating the TSB's memory. Unfortunately, we
2066 - * can't free memory while holding the HAT lock or we could
2067 - * deadlock, so we build a list of TSBs to be freed after marking
2068 - * the tsbinfos as swapped out and free them after dropping the
2069 - * lock.
2070 - */
2071 - if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2072 - sfmmu_hat_exit(hatlockp);
2073 - return;
2074 - }
2075 -
2076 - SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2077 - last = freelist = NULL;
2078 - for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2079 - tsbinfop = tsbinfop->tsb_next) {
2080 - ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2081 -
2082 - /*
2083 - * Cast the TSB into a struct free_tsb and put it on the free
2084 - * list.
2085 - */
2086 - if (freelist == NULL) {
2087 - last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2088 - } else {
2089 - last->next = (struct free_tsb *)tsbinfop->tsb_va;
2090 - last = last->next;
2091 - }
2092 - last->next = NULL;
2093 - last->tsbinfop = tsbinfop;
2094 - tsbinfop->tsb_flags |= TSB_SWAPPED;
2095 - /*
2096 - * Zero out the TTE to clear the valid bit.
2097 - * Note we can't use a value like 0xbad because we want to
2098 - * ensure diagnostic bits are NEVER set on TTEs that might
2099 - * be loaded. The intent is to catch any invalid access
2100 - * to the swapped TSB, such as a thread running with a valid
2101 - * context without first calling sfmmu_tsb_swapin() to
2102 - * allocate TSB memory.
2103 - */
2104 - tsbinfop->tsb_tte.ll = 0;
2105 - }
2106 -
2107 - /* Now we can drop the lock and free the TSB memory. */
2108 - sfmmu_hat_exit(hatlockp);
2109 - for (; freelist != NULL; freelist = next) {
2110 - next = freelist->next;
2111 - sfmmu_tsb_free(freelist->tsbinfop);
2112 - }
2113 -}
2114 -
2115 -/*
2116 1973 * Duplicate the translations of an as into another newas
2117 1974 */
2118 1975 /* ARGSUSED */
2119 1976 int
2120 1977 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2121 1978 uint_t flag)
2122 1979 {
2123 1980 sf_srd_t *srdp;
2124 1981 sf_scd_t *scdp;
2125 1982 int i;
2126 1983 extern uint_t get_color_start(struct as *);
2127 1984
2128 1985 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2129 1986 (flag == HAT_DUP_SRD));
2130 1987 ASSERT(hat != ksfmmup);
2131 1988 ASSERT(newhat != ksfmmup);
2132 1989 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2133 1990
2134 1991 if (flag == HAT_DUP_COW) {
2135 1992 panic("hat_dup: HAT_DUP_COW not supported");
2136 1993 }
2137 1994
2138 1995 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2139 1996 ASSERT(srdp->srd_evp != NULL);
2140 1997 VN_HOLD(srdp->srd_evp);
2141 1998 ASSERT(srdp->srd_refcnt > 0);
2142 1999 newhat->sfmmu_srdp = srdp;
2143 2000 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2144 2001 }
2145 2002
2146 2003 /*
2147 2004 * HAT_DUP_ALL flag is used after as duplication is done.
2148 2005 */
2149 2006 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2150 2007 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2151 2008 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2152 2009 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2153 2010 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2154 2011 }
2155 2012
2156 2013 /* check if need to join scd */
2157 2014 if ((scdp = hat->sfmmu_scdp) != NULL &&
2158 2015 newhat->sfmmu_scdp != scdp) {
2159 2016 int ret;
2160 2017 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2161 2018 &scdp->scd_region_map, ret);
2162 2019 ASSERT(ret);
2163 2020 sfmmu_join_scd(scdp, newhat);
2164 2021 ASSERT(newhat->sfmmu_scdp == scdp &&
2165 2022 scdp->scd_refcnt >= 2);
2166 2023 for (i = 0; i < max_mmu_page_sizes; i++) {
2167 2024 newhat->sfmmu_ismttecnt[i] =
2168 2025 hat->sfmmu_ismttecnt[i];
2169 2026 newhat->sfmmu_scdismttecnt[i] =
2170 2027 hat->sfmmu_scdismttecnt[i];
2171 2028 }
2172 2029 }
2173 2030
2174 2031 sfmmu_check_page_sizes(newhat, 1);
2175 2032 }
2176 2033
2177 2034 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2178 2035 update_proc_pgcolorbase_after_fork != 0) {
2179 2036 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2180 2037 }
2181 2038 return (0);
2182 2039 }
2183 2040
2184 2041 void
2185 2042 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2186 2043 uint_t attr, uint_t flags)
2187 2044 {
2188 2045 hat_do_memload(hat, addr, pp, attr, flags,
2189 2046 SFMMU_INVALID_SHMERID);
2190 2047 }
2191 2048
2192 2049 void
2193 2050 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2194 2051 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2195 2052 {
2196 2053 uint_t rid;
2197 2054 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2198 2055 hat_do_memload(hat, addr, pp, attr, flags,
2199 2056 SFMMU_INVALID_SHMERID);
2200 2057 return;
2201 2058 }
2202 2059 rid = (uint_t)((uint64_t)rcookie);
2203 2060 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2204 2061 hat_do_memload(hat, addr, pp, attr, flags, rid);
2205 2062 }
2206 2063
2207 2064 /*
2208 2065 * Set up addr to map to page pp with protection prot.
2209 2066 * As an optimization we also load the TSB with the
2210 2067 * corresponding tte but it is no big deal if the tte gets kicked out.
2211 2068 */
2212 2069 static void
2213 2070 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2214 2071 uint_t attr, uint_t flags, uint_t rid)
2215 2072 {
2216 2073 tte_t tte;
2217 2074
2218 2075
2219 2076 ASSERT(hat != NULL);
2220 2077 ASSERT(PAGE_LOCKED(pp));
2221 2078 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2222 2079 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2223 2080 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2224 2081 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2225 2082
2226 2083 if (PP_ISFREE(pp)) {
2227 2084 panic("hat_memload: loading a mapping to free page %p",
2228 2085 (void *)pp);
2229 2086 }
2230 2087
2231 2088 ASSERT((hat == ksfmmup) ||
2232 2089 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2233 2090
2234 2091 if (flags & ~SFMMU_LOAD_ALLFLAG)
2235 2092 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2236 2093 flags & ~SFMMU_LOAD_ALLFLAG);
2237 2094
2238 2095 if (hat->sfmmu_rmstat)
2239 2096 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2240 2097
2241 2098 #if defined(SF_ERRATA_57)
2242 2099 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2243 2100 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2244 2101 !(flags & HAT_LOAD_SHARE)) {
2245 2102 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2246 2103 " page executable");
2247 2104 attr &= ~PROT_EXEC;
2248 2105 }
2249 2106 #endif
2250 2107
2251 2108 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2252 2109 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2253 2110
2254 2111 /*
2255 2112 * Check TSB and TLB page sizes.
2256 2113 */
2257 2114 if ((flags & HAT_LOAD_SHARE) == 0) {
2258 2115 sfmmu_check_page_sizes(hat, 1);
2259 2116 }
2260 2117 }
2261 2118
2262 2119 /*
2263 2120 * hat_devload can be called to map real memory (e.g.
2264 2121 * /dev/kmem) and even though hat_devload will determine pf is
2265 2122 * for memory, it will be unable to get a shared lock on the
2266 2123 * page (because someone else has it exclusively) and will
2267 2124 * pass dp = NULL. If tteload doesn't get a non-NULL
2268 2125 * page pointer it can't cache memory.
2269 2126 */
2270 2127 void
2271 2128 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2272 2129 uint_t attr, int flags)
2273 2130 {
2274 2131 tte_t tte;
2275 2132 struct page *pp = NULL;
2276 2133 int use_lgpg = 0;
2277 2134
2278 2135 ASSERT(hat != NULL);
2279 2136
2280 2137 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2281 2138 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2282 2139 ASSERT((hat == ksfmmup) ||
2283 2140 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2284 2141 if (len == 0)
2285 2142 panic("hat_devload: zero len");
2286 2143 if (flags & ~SFMMU_LOAD_ALLFLAG)
2287 2144 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2288 2145 flags & ~SFMMU_LOAD_ALLFLAG);
2289 2146
2290 2147 #if defined(SF_ERRATA_57)
2291 2148 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2292 2149 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2293 2150 !(flags & HAT_LOAD_SHARE)) {
2294 2151 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2295 2152 " page executable");
2296 2153 attr &= ~PROT_EXEC;
2297 2154 }
2298 2155 #endif
2299 2156
2300 2157 /*
2301 2158 * If it's a memory page find its pp
2302 2159 */
2303 2160 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2304 2161 pp = page_numtopp_nolock(pfn);
2305 2162 if (pp == NULL) {
2306 2163 flags |= HAT_LOAD_NOCONSIST;
2307 2164 } else {
2308 2165 if (PP_ISFREE(pp)) {
2309 2166 panic("hat_memload: loading "
2310 2167 "a mapping to free page %p",
2311 2168 (void *)pp);
2312 2169 }
2313 2170 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2314 2171 panic("hat_memload: loading a mapping "
2315 2172 "to unlocked relocatable page %p",
2316 2173 (void *)pp);
2317 2174 }
2318 2175 ASSERT(len == MMU_PAGESIZE);
2319 2176 }
2320 2177 }
2321 2178
2322 2179 if (hat->sfmmu_rmstat)
2323 2180 hat_resvstat(len, hat->sfmmu_as, addr);
2324 2181
2325 2182 if (flags & HAT_LOAD_NOCONSIST) {
2326 2183 attr |= SFMMU_UNCACHEVTTE;
2327 2184 use_lgpg = 1;
2328 2185 }
2329 2186 if (!pf_is_memory(pfn)) {
2330 2187 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2331 2188 use_lgpg = 1;
2332 2189 switch (attr & HAT_ORDER_MASK) {
2333 2190 case HAT_STRICTORDER:
2334 2191 case HAT_UNORDERED_OK:
2335 2192 /*
2336 2193 * we set the side effect bit for all non
2337 2194 * memory mappings unless merging is ok
2338 2195 */
2339 2196 attr |= SFMMU_SIDEFFECT;
2340 2197 break;
2341 2198 case HAT_MERGING_OK:
2342 2199 case HAT_LOADCACHING_OK:
2343 2200 case HAT_STORECACHING_OK:
2344 2201 break;
2345 2202 default:
2346 2203 panic("hat_devload: bad attr");
2347 2204 break;
2348 2205 }
2349 2206 }
2350 2207 while (len) {
2351 2208 if (!use_lgpg) {
2352 2209 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2353 2210 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2354 2211 flags, SFMMU_INVALID_SHMERID);
2355 2212 len -= MMU_PAGESIZE;
2356 2213 addr += MMU_PAGESIZE;
2357 2214 pfn++;
2358 2215 continue;
2359 2216 }
2360 2217 /*
2361 2218 * try to use large pages, check va/pa alignments
2362 2219 * Note that 32M/256M page sizes are not (yet) supported.
2363 2220 */
2364 2221 if ((len >= MMU_PAGESIZE4M) &&
2365 2222 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2366 2223 !(disable_large_pages & (1 << TTE4M)) &&
2367 2224 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2368 2225 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2369 2226 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2370 2227 flags, SFMMU_INVALID_SHMERID);
2371 2228 len -= MMU_PAGESIZE4M;
2372 2229 addr += MMU_PAGESIZE4M;
2373 2230 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2374 2231 } else if ((len >= MMU_PAGESIZE512K) &&
2375 2232 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2376 2233 !(disable_large_pages & (1 << TTE512K)) &&
2377 2234 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2378 2235 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2379 2236 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2380 2237 flags, SFMMU_INVALID_SHMERID);
2381 2238 len -= MMU_PAGESIZE512K;
2382 2239 addr += MMU_PAGESIZE512K;
2383 2240 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2384 2241 } else if ((len >= MMU_PAGESIZE64K) &&
2385 2242 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2386 2243 !(disable_large_pages & (1 << TTE64K)) &&
2387 2244 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2388 2245 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2389 2246 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2390 2247 flags, SFMMU_INVALID_SHMERID);
2391 2248 len -= MMU_PAGESIZE64K;
2392 2249 addr += MMU_PAGESIZE64K;
2393 2250 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2394 2251 } else {
2395 2252 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2396 2253 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2397 2254 flags, SFMMU_INVALID_SHMERID);
2398 2255 len -= MMU_PAGESIZE;
2399 2256 addr += MMU_PAGESIZE;
2400 2257 pfn++;
2401 2258 }
2402 2259 }
2403 2260
2404 2261 /*
2405 2262 * Check TSB and TLB page sizes.
2406 2263 */
2407 2264 if ((flags & HAT_LOAD_SHARE) == 0) {
2408 2265 sfmmu_check_page_sizes(hat, 1);
2409 2266 }
2410 2267 }
2411 2268
2412 2269 void
2413 2270 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2414 2271 struct page **pps, uint_t attr, uint_t flags)
2415 2272 {
2416 2273 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2417 2274 SFMMU_INVALID_SHMERID);
2418 2275 }
2419 2276
2420 2277 void
2421 2278 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2422 2279 struct page **pps, uint_t attr, uint_t flags,
2423 2280 hat_region_cookie_t rcookie)
2424 2281 {
2425 2282 uint_t rid;
2426 2283 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2427 2284 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2428 2285 SFMMU_INVALID_SHMERID);
2429 2286 return;
2430 2287 }
2431 2288 rid = (uint_t)((uint64_t)rcookie);
2432 2289 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2433 2290 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2434 2291 }
2435 2292
2436 2293 /*
2437 2294 * Map the largest extend possible out of the page array. The array may NOT
2438 2295 * be in order. The largest possible mapping a page can have
2439 2296 * is specified in the p_szc field. The p_szc field
2440 2297 * cannot change as long as there any mappings (large or small)
2441 2298 * to any of the pages that make up the large page. (ie. any
2442 2299 * promotion/demotion of page size is not up to the hat but up to
2443 2300 * the page free list manager). The array
2444 2301 * should consist of properly aligned contigous pages that are
2445 2302 * part of a big page for a large mapping to be created.
2446 2303 */
2447 2304 static void
2448 2305 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2449 2306 struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2450 2307 {
2451 2308 int ttesz;
2452 2309 size_t mapsz;
2453 2310 pgcnt_t numpg, npgs;
2454 2311 tte_t tte;
2455 2312 page_t *pp;
2456 2313 uint_t large_pages_disable;
2457 2314
2458 2315 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2459 2316 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2460 2317
2461 2318 if (hat->sfmmu_rmstat)
2462 2319 hat_resvstat(len, hat->sfmmu_as, addr);
2463 2320
2464 2321 #if defined(SF_ERRATA_57)
2465 2322 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2466 2323 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2467 2324 !(flags & HAT_LOAD_SHARE)) {
2468 2325 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2469 2326 "user page executable");
2470 2327 attr &= ~PROT_EXEC;
2471 2328 }
2472 2329 #endif
2473 2330
2474 2331 /* Get number of pages */
2475 2332 npgs = len >> MMU_PAGESHIFT;
2476 2333
2477 2334 if (flags & HAT_LOAD_SHARE) {
2478 2335 large_pages_disable = disable_ism_large_pages;
2479 2336 } else {
2480 2337 large_pages_disable = disable_large_pages;
2481 2338 }
2482 2339
2483 2340 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2484 2341 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2485 2342 rid);
2486 2343 return;
2487 2344 }
2488 2345
2489 2346 while (npgs >= NHMENTS) {
2490 2347 pp = *pps;
2491 2348 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2492 2349 /*
2493 2350 * Check if this page size is disabled.
2494 2351 */
2495 2352 if (large_pages_disable & (1 << ttesz))
2496 2353 continue;
2497 2354
2498 2355 numpg = TTEPAGES(ttesz);
2499 2356 mapsz = numpg << MMU_PAGESHIFT;
2500 2357 if ((npgs >= numpg) &&
2501 2358 IS_P2ALIGNED(addr, mapsz) &&
2502 2359 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2503 2360 /*
2504 2361 * At this point we have enough pages and
2505 2362 * we know the virtual address and the pfn
2506 2363 * are properly aligned. We still need
2507 2364 * to check for physical contiguity but since
2508 2365 * it is very likely that this is the case
2509 2366 * we will assume they are so and undo
2510 2367 * the request if necessary. It would
2511 2368 * be great if we could get a hint flag
2512 2369 * like HAT_CONTIG which would tell us
2513 2370 * the pages are contigous for sure.
2514 2371 */
2515 2372 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2516 2373 attr, ttesz);
2517 2374 if (!sfmmu_tteload_array(hat, &tte, addr,
2518 2375 pps, flags, rid)) {
2519 2376 break;
2520 2377 }
2521 2378 }
2522 2379 }
2523 2380 if (ttesz == TTE8K) {
2524 2381 /*
2525 2382 * We were not able to map array using a large page
2526 2383 * batch a hmeblk or fraction at a time.
2527 2384 */
2528 2385 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2529 2386 & (NHMENTS-1);
2530 2387 numpg = NHMENTS - numpg;
2531 2388 ASSERT(numpg <= npgs);
2532 2389 mapsz = numpg * MMU_PAGESIZE;
2533 2390 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2534 2391 numpg, rid);
2535 2392 }
2536 2393 addr += mapsz;
2537 2394 npgs -= numpg;
2538 2395 pps += numpg;
2539 2396 }
2540 2397
2541 2398 if (npgs) {
2542 2399 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2543 2400 rid);
2544 2401 }
2545 2402
2546 2403 /*
2547 2404 * Check TSB and TLB page sizes.
2548 2405 */
2549 2406 if ((flags & HAT_LOAD_SHARE) == 0) {
2550 2407 sfmmu_check_page_sizes(hat, 1);
2551 2408 }
2552 2409 }
2553 2410
2554 2411 /*
2555 2412 * Function tries to batch 8K pages into the same hme blk.
2556 2413 */
2557 2414 static void
2558 2415 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2559 2416 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2560 2417 {
2561 2418 tte_t tte;
2562 2419 page_t *pp;
2563 2420 struct hmehash_bucket *hmebp;
2564 2421 struct hme_blk *hmeblkp;
2565 2422 int index;
2566 2423
2567 2424 while (npgs) {
2568 2425 /*
2569 2426 * Acquire the hash bucket.
2570 2427 */
2571 2428 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2572 2429 rid);
2573 2430 ASSERT(hmebp);
2574 2431
2575 2432 /*
2576 2433 * Find the hment block.
2577 2434 */
2578 2435 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2579 2436 TTE8K, flags, rid);
2580 2437 ASSERT(hmeblkp);
2581 2438
2582 2439 do {
2583 2440 /*
2584 2441 * Make the tte.
2585 2442 */
2586 2443 pp = *pps;
2587 2444 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2588 2445
2589 2446 /*
2590 2447 * Add the translation.
2591 2448 */
2592 2449 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2593 2450 vaddr, pps, flags, rid);
2594 2451
2595 2452 /*
2596 2453 * Goto next page.
2597 2454 */
2598 2455 pps++;
2599 2456 npgs--;
2600 2457
2601 2458 /*
2602 2459 * Goto next address.
2603 2460 */
2604 2461 vaddr += MMU_PAGESIZE;
2605 2462
2606 2463 /*
2607 2464 * Don't crossover into a different hmentblk.
2608 2465 */
2609 2466 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2610 2467 (NHMENTS-1));
2611 2468
2612 2469 } while (index != 0 && npgs != 0);
2613 2470
2614 2471 /*
2615 2472 * Release the hash bucket.
2616 2473 */
2617 2474
2618 2475 sfmmu_tteload_release_hashbucket(hmebp);
2619 2476 }
2620 2477 }
2621 2478
2622 2479 /*
2623 2480 * Construct a tte for a page:
2624 2481 *
2625 2482 * tte_valid = 1
2626 2483 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2627 2484 * tte_size = size
2628 2485 * tte_nfo = attr & HAT_NOFAULT
2629 2486 * tte_ie = attr & HAT_STRUCTURE_LE
2630 2487 * tte_hmenum = hmenum
2631 2488 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2632 2489 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2633 2490 * tte_ref = 1 (optimization)
2634 2491 * tte_wr_perm = attr & PROT_WRITE;
2635 2492 * tte_no_sync = attr & HAT_NOSYNC
2636 2493 * tte_lock = attr & SFMMU_LOCKTTE
2637 2494 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2638 2495 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2639 2496 * tte_e = attr & SFMMU_SIDEFFECT
2640 2497 * tte_priv = !(attr & PROT_USER)
2641 2498 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2642 2499 * tte_glb = 0
2643 2500 */
2644 2501 void
2645 2502 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2646 2503 {
2647 2504 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2648 2505
2649 2506 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2650 2507 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2651 2508
2652 2509 if (TTE_IS_NOSYNC(ttep)) {
2653 2510 TTE_SET_REF(ttep);
2654 2511 if (TTE_IS_WRITABLE(ttep)) {
2655 2512 TTE_SET_MOD(ttep);
2656 2513 }
2657 2514 }
2658 2515 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2659 2516 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2660 2517 }
2661 2518 }
2662 2519
2663 2520 /*
2664 2521 * This function will add a translation to the hme_blk and allocate the
2665 2522 * hme_blk if one does not exist.
2666 2523 * If a page structure is specified then it will add the
2667 2524 * corresponding hment to the mapping list.
2668 2525 * It will also update the hmenum field for the tte.
2669 2526 *
2670 2527 * Currently this function is only used for kernel mappings.
2671 2528 * So pass invalid region to sfmmu_tteload_array().
2672 2529 */
2673 2530 void
2674 2531 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2675 2532 uint_t flags)
2676 2533 {
2677 2534 ASSERT(sfmmup == ksfmmup);
2678 2535 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2679 2536 SFMMU_INVALID_SHMERID);
2680 2537 }
2681 2538
2682 2539 /*
2683 2540 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2684 2541 * Assumes that a particular page size may only be resident in one TSB.
2685 2542 */
2686 2543 static void
2687 2544 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2688 2545 {
2689 2546 struct tsb_info *tsbinfop = NULL;
2690 2547 uint64_t tag;
2691 2548 struct tsbe *tsbe_addr;
2692 2549 uint64_t tsb_base;
2693 2550 uint_t tsb_size;
2694 2551 int vpshift = MMU_PAGESHIFT;
2695 2552 int phys = 0;
2696 2553
2697 2554 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2698 2555 phys = ktsb_phys;
2699 2556 if (ttesz >= TTE4M) {
2700 2557 #ifndef sun4v
2701 2558 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2702 2559 #endif
2703 2560 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2704 2561 tsb_size = ktsb4m_szcode;
2705 2562 } else {
2706 2563 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2707 2564 tsb_size = ktsb_szcode;
2708 2565 }
2709 2566 } else {
2710 2567 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2711 2568
2712 2569 /*
2713 2570 * If there isn't a TSB for this page size, or the TSB is
2714 2571 * swapped out, there is nothing to do. Note that the latter
2715 2572 * case seems impossible but can occur if hat_pageunload()
2716 2573 * is called on an ISM mapping while the process is swapped
2717 2574 * out.
2718 2575 */
2719 2576 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2720 2577 return;
2721 2578
2722 2579 /*
2723 2580 * If another thread is in the middle of relocating a TSB
2724 2581 * we can't unload the entry so set a flag so that the
2725 2582 * TSB will be flushed before it can be accessed by the
2726 2583 * process.
2727 2584 */
2728 2585 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2729 2586 if (ttep == NULL)
2730 2587 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2731 2588 return;
2732 2589 }
2733 2590 #if defined(UTSB_PHYS)
2734 2591 phys = 1;
2735 2592 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2736 2593 #else
2737 2594 tsb_base = (uint64_t)tsbinfop->tsb_va;
2738 2595 #endif
2739 2596 tsb_size = tsbinfop->tsb_szc;
2740 2597 }
2741 2598 if (ttesz >= TTE4M)
2742 2599 vpshift = MMU_PAGESHIFT4M;
2743 2600
2744 2601 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2745 2602 tag = sfmmu_make_tsbtag(vaddr);
2746 2603
2747 2604 if (ttep == NULL) {
2748 2605 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2749 2606 } else {
2750 2607 if (ttesz >= TTE4M) {
2751 2608 SFMMU_STAT(sf_tsb_load4m);
2752 2609 } else {
2753 2610 SFMMU_STAT(sf_tsb_load8k);
2754 2611 }
2755 2612
2756 2613 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2757 2614 }
2758 2615 }
2759 2616
2760 2617 /*
2761 2618 * Unmap all entries from [start, end) matching the given page size.
2762 2619 *
2763 2620 * This function is used primarily to unmap replicated 64K or 512K entries
2764 2621 * from the TSB that are inserted using the base page size TSB pointer, but
2765 2622 * it may also be called to unmap a range of addresses from the TSB.
2766 2623 */
2767 2624 void
2768 2625 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2769 2626 {
2770 2627 struct tsb_info *tsbinfop;
2771 2628 uint64_t tag;
2772 2629 struct tsbe *tsbe_addr;
2773 2630 caddr_t vaddr;
2774 2631 uint64_t tsb_base;
2775 2632 int vpshift, vpgsz;
2776 2633 uint_t tsb_size;
2777 2634 int phys = 0;
2778 2635
2779 2636 /*
2780 2637 * Assumptions:
2781 2638 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2782 2639 * at a time shooting down any valid entries we encounter.
2783 2640 *
2784 2641 * If ttesz >= 4M we walk the range 4M at a time shooting
2785 2642 * down any valid mappings we find.
2786 2643 */
2787 2644 if (sfmmup == ksfmmup) {
2788 2645 phys = ktsb_phys;
2789 2646 if (ttesz >= TTE4M) {
2790 2647 #ifndef sun4v
2791 2648 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2792 2649 #endif
2793 2650 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2794 2651 tsb_size = ktsb4m_szcode;
2795 2652 } else {
2796 2653 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2797 2654 tsb_size = ktsb_szcode;
2798 2655 }
2799 2656 } else {
2800 2657 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2801 2658
2802 2659 /*
2803 2660 * If there isn't a TSB for this page size, or the TSB is
2804 2661 * swapped out, there is nothing to do. Note that the latter
2805 2662 * case seems impossible but can occur if hat_pageunload()
2806 2663 * is called on an ISM mapping while the process is swapped
2807 2664 * out.
2808 2665 */
2809 2666 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2810 2667 return;
2811 2668
2812 2669 /*
2813 2670 * If another thread is in the middle of relocating a TSB
2814 2671 * we can't unload the entry so set a flag so that the
2815 2672 * TSB will be flushed before it can be accessed by the
2816 2673 * process.
2817 2674 */
2818 2675 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2819 2676 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2820 2677 return;
2821 2678 }
2822 2679 #if defined(UTSB_PHYS)
2823 2680 phys = 1;
2824 2681 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2825 2682 #else
2826 2683 tsb_base = (uint64_t)tsbinfop->tsb_va;
2827 2684 #endif
2828 2685 tsb_size = tsbinfop->tsb_szc;
2829 2686 }
2830 2687 if (ttesz >= TTE4M) {
2831 2688 vpshift = MMU_PAGESHIFT4M;
2832 2689 vpgsz = MMU_PAGESIZE4M;
2833 2690 } else {
2834 2691 vpshift = MMU_PAGESHIFT;
2835 2692 vpgsz = MMU_PAGESIZE;
2836 2693 }
2837 2694
2838 2695 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2839 2696 tag = sfmmu_make_tsbtag(vaddr);
2840 2697 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2841 2698 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2842 2699 }
2843 2700 }
2844 2701
2845 2702 /*
2846 2703 * Select the optimum TSB size given the number of mappings
2847 2704 * that need to be cached.
2848 2705 */
2849 2706 static int
2850 2707 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2851 2708 {
2852 2709 int szc = 0;
2853 2710
2854 2711 #ifdef DEBUG
2855 2712 if (tsb_grow_stress) {
2856 2713 uint32_t randval = (uint32_t)gettick() >> 4;
2857 2714 return (randval % (tsb_max_growsize + 1));
2858 2715 }
2859 2716 #endif /* DEBUG */
2860 2717
2861 2718 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2862 2719 szc++;
2863 2720 return (szc);
2864 2721 }
2865 2722
2866 2723 /*
2867 2724 * This function will add a translation to the hme_blk and allocate the
2868 2725 * hme_blk if one does not exist.
2869 2726 * If a page structure is specified then it will add the
2870 2727 * corresponding hment to the mapping list.
2871 2728 * It will also update the hmenum field for the tte.
2872 2729 * Furthermore, it attempts to create a large page translation
2873 2730 * for <addr,hat> at page array pps. It assumes addr and first
2874 2731 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2875 2732 */
2876 2733 static int
2877 2734 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2878 2735 page_t **pps, uint_t flags, uint_t rid)
2879 2736 {
2880 2737 struct hmehash_bucket *hmebp;
2881 2738 struct hme_blk *hmeblkp;
2882 2739 int ret;
2883 2740 uint_t size;
2884 2741
2885 2742 /*
2886 2743 * Get mapping size.
2887 2744 */
2888 2745 size = TTE_CSZ(ttep);
2889 2746 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2890 2747
2891 2748 /*
2892 2749 * Acquire the hash bucket.
2893 2750 */
2894 2751 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2895 2752 ASSERT(hmebp);
2896 2753
2897 2754 /*
2898 2755 * Find the hment block.
2899 2756 */
2900 2757 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2901 2758 rid);
2902 2759 ASSERT(hmeblkp);
2903 2760
2904 2761 /*
2905 2762 * Add the translation.
2906 2763 */
2907 2764 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2908 2765 rid);
2909 2766
2910 2767 /*
2911 2768 * Release the hash bucket.
2912 2769 */
2913 2770 sfmmu_tteload_release_hashbucket(hmebp);
2914 2771
2915 2772 return (ret);
2916 2773 }
2917 2774
2918 2775 /*
2919 2776 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2920 2777 */
2921 2778 static struct hmehash_bucket *
2922 2779 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2923 2780 uint_t rid)
2924 2781 {
2925 2782 struct hmehash_bucket *hmebp;
2926 2783 int hmeshift;
2927 2784 void *htagid = sfmmutohtagid(sfmmup, rid);
2928 2785
2929 2786 ASSERT(htagid != NULL);
2930 2787
2931 2788 hmeshift = HME_HASH_SHIFT(size);
2932 2789
2933 2790 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2934 2791
2935 2792 SFMMU_HASH_LOCK(hmebp);
2936 2793
2937 2794 return (hmebp);
2938 2795 }
2939 2796
2940 2797 /*
2941 2798 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2942 2799 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2943 2800 * allocated.
2944 2801 */
2945 2802 static struct hme_blk *
2946 2803 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2947 2804 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2948 2805 {
2949 2806 hmeblk_tag hblktag;
2950 2807 int hmeshift;
2951 2808 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2952 2809
2953 2810 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2954 2811
2955 2812 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2956 2813 ASSERT(hblktag.htag_id != NULL);
2957 2814 hmeshift = HME_HASH_SHIFT(size);
2958 2815 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2959 2816 hblktag.htag_rehash = HME_HASH_REHASH(size);
2960 2817 hblktag.htag_rid = rid;
2961 2818
2962 2819 ttearray_realloc:
2963 2820
2964 2821 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2965 2822
2966 2823 /*
2967 2824 * We block until hblk_reserve_lock is released; it's held by
2968 2825 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2969 2826 * replaced by a hblk from sfmmu8_cache.
2970 2827 */
2971 2828 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2972 2829 hblk_reserve_thread != curthread) {
2973 2830 SFMMU_HASH_UNLOCK(hmebp);
2974 2831 mutex_enter(&hblk_reserve_lock);
2975 2832 mutex_exit(&hblk_reserve_lock);
2976 2833 SFMMU_STAT(sf_hblk_reserve_hit);
2977 2834 SFMMU_HASH_LOCK(hmebp);
2978 2835 goto ttearray_realloc;
2979 2836 }
2980 2837
2981 2838 if (hmeblkp == NULL) {
2982 2839 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2983 2840 hblktag, flags, rid);
2984 2841 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2985 2842 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2986 2843 } else {
2987 2844 /*
2988 2845 * It is possible for 8k and 64k hblks to collide since they
2989 2846 * have the same rehash value. This is because we
2990 2847 * lazily free hblks and 8K/64K blks could be lingering.
2991 2848 * If we find size mismatch we free the block and & try again.
2992 2849 */
2993 2850 if (get_hblk_ttesz(hmeblkp) != size) {
2994 2851 ASSERT(!hmeblkp->hblk_vcnt);
2995 2852 ASSERT(!hmeblkp->hblk_hmecnt);
2996 2853 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2997 2854 &list, 0);
2998 2855 goto ttearray_realloc;
2999 2856 }
3000 2857 if (hmeblkp->hblk_shw_bit) {
3001 2858 /*
3002 2859 * if the hblk was previously used as a shadow hblk then
3003 2860 * we will change it to a normal hblk
3004 2861 */
3005 2862 ASSERT(!hmeblkp->hblk_shared);
3006 2863 if (hmeblkp->hblk_shw_mask) {
3007 2864 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3008 2865 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3009 2866 goto ttearray_realloc;
3010 2867 } else {
3011 2868 hmeblkp->hblk_shw_bit = 0;
3012 2869 }
3013 2870 }
3014 2871 SFMMU_STAT(sf_hblk_hit);
3015 2872 }
3016 2873
3017 2874 /*
3018 2875 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3019 2876 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3020 2877 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3021 2878 * just add these hmeblks to the per-cpu pending queue.
3022 2879 */
3023 2880 sfmmu_hblks_list_purge(&list, 1);
3024 2881
3025 2882 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3026 2883 ASSERT(!hmeblkp->hblk_shw_bit);
3027 2884 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3028 2885 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3029 2886 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3030 2887
3031 2888 return (hmeblkp);
3032 2889 }
3033 2890
3034 2891 /*
3035 2892 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3036 2893 * otherwise.
3037 2894 */
3038 2895 static int
3039 2896 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3040 2897 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3041 2898 {
3042 2899 page_t *pp = *pps;
3043 2900 int hmenum, size, remap;
3044 2901 tte_t tteold, flush_tte;
3045 2902 #ifdef DEBUG
3046 2903 tte_t orig_old;
3047 2904 #endif /* DEBUG */
3048 2905 struct sf_hment *sfhme;
3049 2906 kmutex_t *pml, *pmtx;
3050 2907 hatlock_t *hatlockp;
3051 2908 int myflt;
3052 2909
3053 2910 /*
3054 2911 * remove this panic when we decide to let user virtual address
3055 2912 * space be >= USERLIMIT.
3056 2913 */
3057 2914 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3058 2915 panic("user addr %p in kernel space", (void *)vaddr);
3059 2916 #if defined(TTE_IS_GLOBAL)
3060 2917 if (TTE_IS_GLOBAL(ttep))
3061 2918 panic("sfmmu_tteload: creating global tte");
3062 2919 #endif
3063 2920
3064 2921 #ifdef DEBUG
3065 2922 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3066 2923 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3067 2924 panic("sfmmu_tteload: non cacheable memory tte");
3068 2925 #endif /* DEBUG */
3069 2926
3070 2927 /* don't simulate dirty bit for writeable ISM/DISM mappings */
3071 2928 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3072 2929 TTE_SET_REF(ttep);
3073 2930 TTE_SET_MOD(ttep);
3074 2931 }
3075 2932
3076 2933 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3077 2934 !TTE_IS_MOD(ttep)) {
3078 2935 /*
3079 2936 * Don't load TSB for dummy as in ISM. Also don't preload
3080 2937 * the TSB if the TTE isn't writable since we're likely to
3081 2938 * fault on it again -- preloading can be fairly expensive.
3082 2939 */
3083 2940 flags |= SFMMU_NO_TSBLOAD;
3084 2941 }
3085 2942
3086 2943 size = TTE_CSZ(ttep);
3087 2944 switch (size) {
3088 2945 case TTE8K:
3089 2946 SFMMU_STAT(sf_tteload8k);
3090 2947 break;
3091 2948 case TTE64K:
3092 2949 SFMMU_STAT(sf_tteload64k);
3093 2950 break;
3094 2951 case TTE512K:
3095 2952 SFMMU_STAT(sf_tteload512k);
3096 2953 break;
3097 2954 case TTE4M:
3098 2955 SFMMU_STAT(sf_tteload4m);
3099 2956 break;
3100 2957 case (TTE32M):
3101 2958 SFMMU_STAT(sf_tteload32m);
3102 2959 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3103 2960 break;
3104 2961 case (TTE256M):
3105 2962 SFMMU_STAT(sf_tteload256m);
3106 2963 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3107 2964 break;
3108 2965 }
3109 2966
3110 2967 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3111 2968 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3112 2969 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3113 2970 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3114 2971
3115 2972 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3116 2973
3117 2974 /*
3118 2975 * Need to grab mlist lock here so that pageunload
3119 2976 * will not change tte behind us.
3120 2977 */
3121 2978 if (pp) {
3122 2979 pml = sfmmu_mlist_enter(pp);
3123 2980 }
3124 2981
3125 2982 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3126 2983 /*
3127 2984 * Look for corresponding hment and if valid verify
3128 2985 * pfns are equal.
3129 2986 */
3130 2987 remap = TTE_IS_VALID(&tteold);
3131 2988 if (remap) {
3132 2989 pfn_t new_pfn, old_pfn;
3133 2990
3134 2991 old_pfn = TTE_TO_PFN(vaddr, &tteold);
3135 2992 new_pfn = TTE_TO_PFN(vaddr, ttep);
3136 2993
3137 2994 if (flags & HAT_LOAD_REMAP) {
3138 2995 /* make sure we are remapping same type of pages */
3139 2996 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3140 2997 panic("sfmmu_tteload - tte remap io<->memory");
3141 2998 }
3142 2999 if (old_pfn != new_pfn &&
3143 3000 (pp != NULL || sfhme->hme_page != NULL)) {
3144 3001 panic("sfmmu_tteload - tte remap pp != NULL");
3145 3002 }
3146 3003 } else if (old_pfn != new_pfn) {
3147 3004 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3148 3005 (void *)hmeblkp);
3149 3006 }
3150 3007 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3151 3008 }
3152 3009
3153 3010 if (pp) {
3154 3011 if (size == TTE8K) {
3155 3012 #ifdef VAC
3156 3013 /*
3157 3014 * Handle VAC consistency
3158 3015 */
3159 3016 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3160 3017 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3161 3018 }
3162 3019 #endif
3163 3020
3164 3021 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3165 3022 pmtx = sfmmu_page_enter(pp);
3166 3023 PP_CLRRO(pp);
3167 3024 sfmmu_page_exit(pmtx);
3168 3025 } else if (!PP_ISMAPPED(pp) &&
3169 3026 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3170 3027 pmtx = sfmmu_page_enter(pp);
3171 3028 if (!(PP_ISMOD(pp))) {
3172 3029 PP_SETRO(pp);
3173 3030 }
3174 3031 sfmmu_page_exit(pmtx);
3175 3032 }
3176 3033
3177 3034 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3178 3035 /*
3179 3036 * sfmmu_pagearray_setup failed so return
3180 3037 */
3181 3038 sfmmu_mlist_exit(pml);
3182 3039 return (1);
3183 3040 }
3184 3041 }
3185 3042
3186 3043 /*
3187 3044 * Make sure hment is not on a mapping list.
3188 3045 */
3189 3046 ASSERT(remap || (sfhme->hme_page == NULL));
3190 3047
3191 3048 /* if it is not a remap then hme->next better be NULL */
3192 3049 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3193 3050
3194 3051 if (flags & HAT_LOAD_LOCK) {
3195 3052 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3196 3053 panic("too high lckcnt-hmeblk %p",
3197 3054 (void *)hmeblkp);
3198 3055 }
3199 3056 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3200 3057
3201 3058 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3202 3059 }
3203 3060
3204 3061 #ifdef VAC
3205 3062 if (pp && PP_ISNC(pp)) {
3206 3063 /*
3207 3064 * If the physical page is marked to be uncacheable, like
3208 3065 * by a vac conflict, make sure the new mapping is also
3209 3066 * uncacheable.
3210 3067 */
3211 3068 TTE_CLR_VCACHEABLE(ttep);
3212 3069 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3213 3070 }
3214 3071 #endif
3215 3072 ttep->tte_hmenum = hmenum;
3216 3073
3217 3074 #ifdef DEBUG
3218 3075 orig_old = tteold;
3219 3076 #endif /* DEBUG */
3220 3077
3221 3078 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3222 3079 if ((sfmmup == KHATID) &&
3223 3080 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3224 3081 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3225 3082 }
3226 3083 #ifdef DEBUG
3227 3084 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3228 3085 #endif /* DEBUG */
3229 3086 }
3230 3087 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3231 3088
3232 3089 if (!TTE_IS_VALID(&tteold)) {
3233 3090
3234 3091 atomic_inc_16(&hmeblkp->hblk_vcnt);
3235 3092 if (rid == SFMMU_INVALID_SHMERID) {
3236 3093 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3237 3094 } else {
3238 3095 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3239 3096 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3240 3097 /*
3241 3098 * We already accounted for region ttecnt's in sfmmu
3242 3099 * during hat_join_region() processing. Here we
3243 3100 * only update ttecnt's in region struture.
3244 3101 */
3245 3102 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3246 3103 }
3247 3104 }
3248 3105
3249 3106 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3250 3107 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3251 3108 sfmmup != ksfmmup) {
3252 3109 uchar_t tteflag = 1 << size;
3253 3110 if (rid == SFMMU_INVALID_SHMERID) {
3254 3111 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3255 3112 hatlockp = sfmmu_hat_enter(sfmmup);
3256 3113 sfmmup->sfmmu_tteflags |= tteflag;
3257 3114 sfmmu_hat_exit(hatlockp);
3258 3115 }
3259 3116 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3260 3117 hatlockp = sfmmu_hat_enter(sfmmup);
3261 3118 sfmmup->sfmmu_rtteflags |= tteflag;
3262 3119 sfmmu_hat_exit(hatlockp);
3263 3120 }
3264 3121 /*
3265 3122 * Update the current CPU tsbmiss area, so the current thread
3266 3123 * won't need to take the tsbmiss for the new pagesize.
3267 3124 * The other threads in the process will update their tsb
3268 3125 * miss area lazily in sfmmu_tsbmiss_exception() when they
3269 3126 * fail to find the translation for a newly added pagesize.
3270 3127 */
3271 3128 if (size > TTE64K && myflt) {
3272 3129 struct tsbmiss *tsbmp;
3273 3130 kpreempt_disable();
3274 3131 tsbmp = &tsbmiss_area[CPU->cpu_id];
3275 3132 if (rid == SFMMU_INVALID_SHMERID) {
3276 3133 if (!(tsbmp->uhat_tteflags & tteflag)) {
3277 3134 tsbmp->uhat_tteflags |= tteflag;
3278 3135 }
3279 3136 } else {
3280 3137 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3281 3138 tsbmp->uhat_rtteflags |= tteflag;
3282 3139 }
3283 3140 }
3284 3141 kpreempt_enable();
3285 3142 }
3286 3143 }
3287 3144
3288 3145 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3289 3146 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3290 3147 hatlockp = sfmmu_hat_enter(sfmmup);
3291 3148 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3292 3149 sfmmu_hat_exit(hatlockp);
3293 3150 }
3294 3151
3295 3152 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3296 3153 hw_tte.tte_intlo;
3297 3154 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3298 3155 hw_tte.tte_inthi;
3299 3156
3300 3157 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3301 3158 /*
3302 3159 * If remap and new tte differs from old tte we need
3303 3160 * to sync the mod bit and flush TLB/TSB. We don't
3304 3161 * need to sync ref bit because we currently always set
3305 3162 * ref bit in tteload.
3306 3163 */
3307 3164 ASSERT(TTE_IS_REF(ttep));
3308 3165 if (TTE_IS_MOD(&tteold)) {
3309 3166 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3310 3167 }
3311 3168 /*
3312 3169 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3313 3170 * hmes are only used for read only text. Adding this code for
3314 3171 * completeness and future use of shared hmeblks with writable
3315 3172 * mappings of VMODSORT vnodes.
3316 3173 */
3317 3174 if (hmeblkp->hblk_shared) {
3318 3175 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3319 3176 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3320 3177 xt_sync(cpuset);
3321 3178 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3322 3179 } else {
3323 3180 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3324 3181 xt_sync(sfmmup->sfmmu_cpusran);
3325 3182 }
3326 3183 }
3327 3184
3328 3185 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3329 3186 /*
3330 3187 * We only preload 8K and 4M mappings into the TSB, since
3331 3188 * 64K and 512K mappings are replicated and hence don't
3332 3189 * have a single, unique TSB entry. Ditto for 32M/256M.
3333 3190 */
3334 3191 if (size == TTE8K || size == TTE4M) {
3335 3192 sf_scd_t *scdp;
3336 3193 hatlockp = sfmmu_hat_enter(sfmmup);
3337 3194 /*
3338 3195 * Don't preload private TSB if the mapping is used
3339 3196 * by the shctx in the SCD.
3340 3197 */
3341 3198 scdp = sfmmup->sfmmu_scdp;
3342 3199 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3343 3200 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3344 3201 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3345 3202 size);
3346 3203 }
3347 3204 sfmmu_hat_exit(hatlockp);
3348 3205 }
3349 3206 }
3350 3207 if (pp) {
3351 3208 if (!remap) {
3352 3209 HME_ADD(sfhme, pp);
3353 3210 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3354 3211 ASSERT(hmeblkp->hblk_hmecnt > 0);
3355 3212
3356 3213 /*
3357 3214 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3358 3215 * see pageunload() for comment.
3359 3216 */
3360 3217 }
3361 3218 sfmmu_mlist_exit(pml);
3362 3219 }
3363 3220
3364 3221 return (0);
3365 3222 }
3366 3223 /*
3367 3224 * Function unlocks hash bucket.
3368 3225 */
3369 3226 static void
3370 3227 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3371 3228 {
3372 3229 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3373 3230 SFMMU_HASH_UNLOCK(hmebp);
3374 3231 }
3375 3232
3376 3233 /*
3377 3234 * function which checks and sets up page array for a large
3378 3235 * translation. Will set p_vcolor, p_index, p_ro fields.
3379 3236 * Assumes addr and pfnum of first page are properly aligned.
3380 3237 * Will check for physical contiguity. If check fails it return
3381 3238 * non null.
3382 3239 */
3383 3240 static int
3384 3241 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3385 3242 {
3386 3243 int i, index, ttesz;
3387 3244 pfn_t pfnum;
3388 3245 pgcnt_t npgs;
3389 3246 page_t *pp, *pp1;
3390 3247 kmutex_t *pmtx;
3391 3248 #ifdef VAC
3392 3249 int osz;
3393 3250 int cflags = 0;
3394 3251 int vac_err = 0;
3395 3252 #endif
3396 3253 int newidx = 0;
3397 3254
3398 3255 ttesz = TTE_CSZ(ttep);
3399 3256
3400 3257 ASSERT(ttesz > TTE8K);
3401 3258
3402 3259 npgs = TTEPAGES(ttesz);
3403 3260 index = PAGESZ_TO_INDEX(ttesz);
3404 3261
3405 3262 pfnum = (*pps)->p_pagenum;
3406 3263 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3407 3264
3408 3265 /*
3409 3266 * Save the first pp so we can do HAT_TMPNC at the end.
3410 3267 */
3411 3268 pp1 = *pps;
3412 3269 #ifdef VAC
3413 3270 osz = fnd_mapping_sz(pp1);
3414 3271 #endif
3415 3272
3416 3273 for (i = 0; i < npgs; i++, pps++) {
3417 3274 pp = *pps;
3418 3275 ASSERT(PAGE_LOCKED(pp));
3419 3276 ASSERT(pp->p_szc >= ttesz);
3420 3277 ASSERT(pp->p_szc == pp1->p_szc);
3421 3278 ASSERT(sfmmu_mlist_held(pp));
3422 3279
3423 3280 /*
3424 3281 * XXX is it possible to maintain P_RO on the root only?
3425 3282 */
3426 3283 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3427 3284 pmtx = sfmmu_page_enter(pp);
3428 3285 PP_CLRRO(pp);
3429 3286 sfmmu_page_exit(pmtx);
3430 3287 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3431 3288 !PP_ISMOD(pp)) {
3432 3289 pmtx = sfmmu_page_enter(pp);
3433 3290 if (!(PP_ISMOD(pp))) {
3434 3291 PP_SETRO(pp);
3435 3292 }
3436 3293 sfmmu_page_exit(pmtx);
3437 3294 }
3438 3295
3439 3296 /*
3440 3297 * If this is a remap we skip vac & contiguity checks.
3441 3298 */
3442 3299 if (remap)
3443 3300 continue;
3444 3301
3445 3302 /*
3446 3303 * set p_vcolor and detect any vac conflicts.
3447 3304 */
3448 3305 #ifdef VAC
3449 3306 if (vac_err == 0) {
3450 3307 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3451 3308
3452 3309 }
3453 3310 #endif
3454 3311
3455 3312 /*
3456 3313 * Save current index in case we need to undo it.
3457 3314 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3458 3315 * "SFMMU_INDEX_SHIFT 6"
3459 3316 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3460 3317 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3461 3318 *
3462 3319 * So: index = PAGESZ_TO_INDEX(ttesz);
3463 3320 * if ttesz == 1 then index = 0x2
3464 3321 * 2 then index = 0x4
3465 3322 * 3 then index = 0x8
3466 3323 * 4 then index = 0x10
3467 3324 * 5 then index = 0x20
3468 3325 * The code below checks if it's a new pagesize (ie, newidx)
3469 3326 * in case we need to take it back out of p_index,
3470 3327 * and then or's the new index into the existing index.
3471 3328 */
3472 3329 if ((PP_MAPINDEX(pp) & index) == 0)
3473 3330 newidx = 1;
3474 3331 pp->p_index = (PP_MAPINDEX(pp) | index);
3475 3332
3476 3333 /*
3477 3334 * contiguity check
3478 3335 */
3479 3336 if (pp->p_pagenum != pfnum) {
3480 3337 /*
3481 3338 * If we fail the contiguity test then
3482 3339 * the only thing we need to fix is the p_index field.
3483 3340 * We might get a few extra flushes but since this
3484 3341 * path is rare that is ok. The p_ro field will
3485 3342 * get automatically fixed on the next tteload to
3486 3343 * the page. NO TNC bit is set yet.
3487 3344 */
3488 3345 while (i >= 0) {
3489 3346 pp = *pps;
3490 3347 if (newidx)
3491 3348 pp->p_index = (PP_MAPINDEX(pp) &
3492 3349 ~index);
3493 3350 pps--;
3494 3351 i--;
3495 3352 }
3496 3353 return (1);
3497 3354 }
3498 3355 pfnum++;
3499 3356 addr += MMU_PAGESIZE;
3500 3357 }
3501 3358
3502 3359 #ifdef VAC
3503 3360 if (vac_err) {
3504 3361 if (ttesz > osz) {
3505 3362 /*
3506 3363 * There are some smaller mappings that causes vac
3507 3364 * conflicts. Convert all existing small mappings to
3508 3365 * TNC.
3509 3366 */
3510 3367 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3511 3368 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3512 3369 npgs);
3513 3370 } else {
3514 3371 /* EMPTY */
3515 3372 /*
3516 3373 * If there exists an big page mapping,
3517 3374 * that means the whole existing big page
3518 3375 * has TNC setting already. No need to covert to
3519 3376 * TNC again.
3520 3377 */
3521 3378 ASSERT(PP_ISTNC(pp1));
3522 3379 }
3523 3380 }
3524 3381 #endif /* VAC */
3525 3382
3526 3383 return (0);
3527 3384 }
3528 3385
3529 3386 #ifdef VAC
3530 3387 /*
3531 3388 * Routine that detects vac consistency for a large page. It also
3532 3389 * sets virtual color for all pp's for this big mapping.
3533 3390 */
3534 3391 static int
3535 3392 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3536 3393 {
3537 3394 int vcolor, ocolor;
3538 3395
3539 3396 ASSERT(sfmmu_mlist_held(pp));
3540 3397
3541 3398 if (PP_ISNC(pp)) {
3542 3399 return (HAT_TMPNC);
3543 3400 }
3544 3401
3545 3402 vcolor = addr_to_vcolor(addr);
3546 3403 if (PP_NEWPAGE(pp)) {
3547 3404 PP_SET_VCOLOR(pp, vcolor);
3548 3405 return (0);
3549 3406 }
3550 3407
3551 3408 ocolor = PP_GET_VCOLOR(pp);
3552 3409 if (ocolor == vcolor) {
3553 3410 return (0);
3554 3411 }
3555 3412
3556 3413 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3557 3414 /*
3558 3415 * Previous user of page had a differnet color
3559 3416 * but since there are no current users
3560 3417 * we just flush the cache and change the color.
3561 3418 * As an optimization for large pages we flush the
3562 3419 * entire cache of that color and set a flag.
3563 3420 */
3564 3421 SFMMU_STAT(sf_pgcolor_conflict);
3565 3422 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3566 3423 CacheColor_SetFlushed(*cflags, ocolor);
3567 3424 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3568 3425 }
3569 3426 PP_SET_VCOLOR(pp, vcolor);
3570 3427 return (0);
3571 3428 }
3572 3429
3573 3430 /*
3574 3431 * We got a real conflict with a current mapping.
3575 3432 * set flags to start unencaching all mappings
3576 3433 * and return failure so we restart looping
3577 3434 * the pp array from the beginning.
3578 3435 */
3579 3436 return (HAT_TMPNC);
3580 3437 }
3581 3438 #endif /* VAC */
3582 3439
3583 3440 /*
3584 3441 * creates a large page shadow hmeblk for a tte.
3585 3442 * The purpose of this routine is to allow us to do quick unloads because
3586 3443 * the vm layer can easily pass a very large but sparsely populated range.
3587 3444 */
3588 3445 static struct hme_blk *
3589 3446 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3590 3447 {
3591 3448 struct hmehash_bucket *hmebp;
3592 3449 hmeblk_tag hblktag;
3593 3450 int hmeshift, size, vshift;
3594 3451 uint_t shw_mask, newshw_mask;
3595 3452 struct hme_blk *hmeblkp;
3596 3453
3597 3454 ASSERT(sfmmup != KHATID);
3598 3455 if (mmu_page_sizes == max_mmu_page_sizes) {
3599 3456 ASSERT(ttesz < TTE256M);
3600 3457 } else {
3601 3458 ASSERT(ttesz < TTE4M);
3602 3459 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3603 3460 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3604 3461 }
3605 3462
3606 3463 if (ttesz == TTE8K) {
3607 3464 size = TTE512K;
3608 3465 } else {
3609 3466 size = ++ttesz;
3610 3467 }
3611 3468
3612 3469 hblktag.htag_id = sfmmup;
3613 3470 hmeshift = HME_HASH_SHIFT(size);
3614 3471 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3615 3472 hblktag.htag_rehash = HME_HASH_REHASH(size);
3616 3473 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3617 3474 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3618 3475
3619 3476 SFMMU_HASH_LOCK(hmebp);
3620 3477
3621 3478 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3622 3479 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3623 3480 if (hmeblkp == NULL) {
3624 3481 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3625 3482 hblktag, flags, SFMMU_INVALID_SHMERID);
3626 3483 }
3627 3484 ASSERT(hmeblkp);
3628 3485 if (!hmeblkp->hblk_shw_mask) {
3629 3486 /*
3630 3487 * if this is a unused hblk it was just allocated or could
3631 3488 * potentially be a previous large page hblk so we need to
3632 3489 * set the shadow bit.
3633 3490 */
3634 3491 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3635 3492 hmeblkp->hblk_shw_bit = 1;
3636 3493 } else if (hmeblkp->hblk_shw_bit == 0) {
3637 3494 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3638 3495 (void *)hmeblkp);
3639 3496 }
3640 3497 ASSERT(hmeblkp->hblk_shw_bit == 1);
3641 3498 ASSERT(!hmeblkp->hblk_shared);
3642 3499 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3643 3500 ASSERT(vshift < 8);
3644 3501 /*
3645 3502 * Atomically set shw mask bit
3646 3503 */
3647 3504 do {
3648 3505 shw_mask = hmeblkp->hblk_shw_mask;
3649 3506 newshw_mask = shw_mask | (1 << vshift);
3650 3507 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3651 3508 newshw_mask);
3652 3509 } while (newshw_mask != shw_mask);
3653 3510
3654 3511 SFMMU_HASH_UNLOCK(hmebp);
3655 3512
3656 3513 return (hmeblkp);
3657 3514 }
3658 3515
3659 3516 /*
3660 3517 * This routine cleanup a previous shadow hmeblk and changes it to
3661 3518 * a regular hblk. This happens rarely but it is possible
3662 3519 * when a process wants to use large pages and there are hblks still
3663 3520 * lying around from the previous as that used these hmeblks.
3664 3521 * The alternative was to cleanup the shadow hblks at unload time
3665 3522 * but since so few user processes actually use large pages, it is
3666 3523 * better to be lazy and cleanup at this time.
3667 3524 */
3668 3525 static void
3669 3526 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3670 3527 struct hmehash_bucket *hmebp)
3671 3528 {
3672 3529 caddr_t addr, endaddr;
3673 3530 int hashno, size;
3674 3531
3675 3532 ASSERT(hmeblkp->hblk_shw_bit);
3676 3533 ASSERT(!hmeblkp->hblk_shared);
3677 3534
3678 3535 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3679 3536
3680 3537 if (!hmeblkp->hblk_shw_mask) {
3681 3538 hmeblkp->hblk_shw_bit = 0;
3682 3539 return;
3683 3540 }
3684 3541 addr = (caddr_t)get_hblk_base(hmeblkp);
3685 3542 endaddr = get_hblk_endaddr(hmeblkp);
3686 3543 size = get_hblk_ttesz(hmeblkp);
3687 3544 hashno = size - 1;
3688 3545 ASSERT(hashno > 0);
3689 3546 SFMMU_HASH_UNLOCK(hmebp);
3690 3547
3691 3548 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3692 3549
3693 3550 SFMMU_HASH_LOCK(hmebp);
3694 3551 }
3695 3552
3696 3553 static void
3697 3554 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3698 3555 int hashno)
3699 3556 {
3700 3557 int hmeshift, shadow = 0;
3701 3558 hmeblk_tag hblktag;
3702 3559 struct hmehash_bucket *hmebp;
3703 3560 struct hme_blk *hmeblkp;
3704 3561 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3705 3562
3706 3563 ASSERT(hashno > 0);
3707 3564 hblktag.htag_id = sfmmup;
3708 3565 hblktag.htag_rehash = hashno;
3709 3566 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3710 3567
3711 3568 hmeshift = HME_HASH_SHIFT(hashno);
3712 3569
3713 3570 while (addr < endaddr) {
3714 3571 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3715 3572 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3716 3573 SFMMU_HASH_LOCK(hmebp);
3717 3574 /* inline HME_HASH_SEARCH */
3718 3575 hmeblkp = hmebp->hmeblkp;
3719 3576 pr_hblk = NULL;
3720 3577 while (hmeblkp) {
3721 3578 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3722 3579 /* found hme_blk */
3723 3580 ASSERT(!hmeblkp->hblk_shared);
3724 3581 if (hmeblkp->hblk_shw_bit) {
3725 3582 if (hmeblkp->hblk_shw_mask) {
3726 3583 shadow = 1;
3727 3584 sfmmu_shadow_hcleanup(sfmmup,
3728 3585 hmeblkp, hmebp);
3729 3586 break;
3730 3587 } else {
3731 3588 hmeblkp->hblk_shw_bit = 0;
3732 3589 }
3733 3590 }
3734 3591
3735 3592 /*
3736 3593 * Hblk_hmecnt and hblk_vcnt could be non zero
3737 3594 * since hblk_unload() does not gurantee that.
3738 3595 *
3739 3596 * XXX - this could cause tteload() to spin
3740 3597 * where sfmmu_shadow_hcleanup() is called.
3741 3598 */
3742 3599 }
3743 3600
3744 3601 nx_hblk = hmeblkp->hblk_next;
3745 3602 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3746 3603 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3747 3604 &list, 0);
3748 3605 } else {
3749 3606 pr_hblk = hmeblkp;
3750 3607 }
3751 3608 hmeblkp = nx_hblk;
3752 3609 }
3753 3610
3754 3611 SFMMU_HASH_UNLOCK(hmebp);
3755 3612
3756 3613 if (shadow) {
3757 3614 /*
3758 3615 * We found another shadow hblk so cleaned its
3759 3616 * children. We need to go back and cleanup
3760 3617 * the original hblk so we don't change the
3761 3618 * addr.
3762 3619 */
3763 3620 shadow = 0;
3764 3621 } else {
3765 3622 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3766 3623 (1 << hmeshift));
3767 3624 }
3768 3625 }
3769 3626 sfmmu_hblks_list_purge(&list, 0);
3770 3627 }
3771 3628
3772 3629 /*
3773 3630 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3774 3631 * may still linger on after pageunload.
3775 3632 */
3776 3633 static void
3777 3634 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3778 3635 {
3779 3636 int hmeshift;
3780 3637 hmeblk_tag hblktag;
3781 3638 struct hmehash_bucket *hmebp;
3782 3639 struct hme_blk *hmeblkp;
3783 3640 struct hme_blk *pr_hblk;
3784 3641 struct hme_blk *list = NULL;
3785 3642
3786 3643 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3787 3644 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3788 3645
3789 3646 hmeshift = HME_HASH_SHIFT(ttesz);
3790 3647 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3791 3648 hblktag.htag_rehash = ttesz;
3792 3649 hblktag.htag_rid = rid;
3793 3650 hblktag.htag_id = srdp;
3794 3651 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3795 3652
3796 3653 SFMMU_HASH_LOCK(hmebp);
3797 3654 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3798 3655 if (hmeblkp != NULL) {
3799 3656 ASSERT(hmeblkp->hblk_shared);
3800 3657 ASSERT(!hmeblkp->hblk_shw_bit);
3801 3658 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3802 3659 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3803 3660 }
3804 3661 ASSERT(!hmeblkp->hblk_lckcnt);
3805 3662 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3806 3663 &list, 0);
3807 3664 }
3808 3665 SFMMU_HASH_UNLOCK(hmebp);
3809 3666 sfmmu_hblks_list_purge(&list, 0);
3810 3667 }
3811 3668
3812 3669 /* ARGSUSED */
3813 3670 static void
3814 3671 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3815 3672 size_t r_size, void *r_obj, u_offset_t r_objoff)
3816 3673 {
3817 3674 }
3818 3675
3819 3676 /*
3820 3677 * Searches for an hmeblk which maps addr, then unloads this mapping
3821 3678 * and updates *eaddrp, if the hmeblk is found.
3822 3679 */
3823 3680 static void
3824 3681 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3825 3682 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3826 3683 {
3827 3684 int hmeshift;
3828 3685 hmeblk_tag hblktag;
3829 3686 struct hmehash_bucket *hmebp;
3830 3687 struct hme_blk *hmeblkp;
3831 3688 struct hme_blk *pr_hblk;
3832 3689 struct hme_blk *list = NULL;
3833 3690
3834 3691 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3835 3692 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3836 3693 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3837 3694
3838 3695 hmeshift = HME_HASH_SHIFT(ttesz);
3839 3696 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3840 3697 hblktag.htag_rehash = ttesz;
3841 3698 hblktag.htag_rid = rid;
3842 3699 hblktag.htag_id = srdp;
3843 3700 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3844 3701
3845 3702 SFMMU_HASH_LOCK(hmebp);
3846 3703 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3847 3704 if (hmeblkp != NULL) {
3848 3705 ASSERT(hmeblkp->hblk_shared);
3849 3706 ASSERT(!hmeblkp->hblk_lckcnt);
3850 3707 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3851 3708 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3852 3709 eaddr, NULL, HAT_UNLOAD);
3853 3710 ASSERT(*eaddrp > addr);
3854 3711 }
3855 3712 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3856 3713 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3857 3714 &list, 0);
3858 3715 }
3859 3716 SFMMU_HASH_UNLOCK(hmebp);
3860 3717 sfmmu_hblks_list_purge(&list, 0);
3861 3718 }
3862 3719
3863 3720 static void
3864 3721 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3865 3722 {
3866 3723 int ttesz = rgnp->rgn_pgszc;
3867 3724 size_t rsz = rgnp->rgn_size;
3868 3725 caddr_t rsaddr = rgnp->rgn_saddr;
3869 3726 caddr_t readdr = rsaddr + rsz;
3870 3727 caddr_t rhsaddr;
3871 3728 caddr_t va;
3872 3729 uint_t rid = rgnp->rgn_id;
3873 3730 caddr_t cbsaddr;
3874 3731 caddr_t cbeaddr;
3875 3732 hat_rgn_cb_func_t rcbfunc;
3876 3733 ulong_t cnt;
3877 3734
3878 3735 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3879 3736 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3880 3737
3881 3738 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3882 3739 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3883 3740 if (ttesz < HBLK_MIN_TTESZ) {
3884 3741 ttesz = HBLK_MIN_TTESZ;
3885 3742 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3886 3743 } else {
3887 3744 rhsaddr = rsaddr;
3888 3745 }
3889 3746
3890 3747 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3891 3748 rcbfunc = sfmmu_rgn_cb_noop;
3892 3749 }
3893 3750
3894 3751 while (ttesz >= HBLK_MIN_TTESZ) {
3895 3752 cbsaddr = rsaddr;
3896 3753 cbeaddr = rsaddr;
3897 3754 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3898 3755 ttesz--;
3899 3756 continue;
3900 3757 }
3901 3758 cnt = 0;
3902 3759 va = rsaddr;
3903 3760 while (va < readdr) {
3904 3761 ASSERT(va >= rhsaddr);
3905 3762 if (va != cbeaddr) {
3906 3763 if (cbeaddr != cbsaddr) {
3907 3764 ASSERT(cbeaddr > cbsaddr);
3908 3765 (*rcbfunc)(cbsaddr, cbeaddr,
3909 3766 rsaddr, rsz, rgnp->rgn_obj,
3910 3767 rgnp->rgn_objoff);
3911 3768 }
3912 3769 cbsaddr = va;
3913 3770 cbeaddr = va;
3914 3771 }
3915 3772 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3916 3773 ttesz, &cbeaddr);
3917 3774 cnt++;
3918 3775 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3919 3776 }
3920 3777 if (cbeaddr != cbsaddr) {
3921 3778 ASSERT(cbeaddr > cbsaddr);
3922 3779 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3923 3780 rsz, rgnp->rgn_obj,
3924 3781 rgnp->rgn_objoff);
3925 3782 }
3926 3783 ttesz--;
3927 3784 }
3928 3785 }
3929 3786
3930 3787 /*
3931 3788 * Release one hardware address translation lock on the given address range.
3932 3789 */
3933 3790 void
3934 3791 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3935 3792 {
3936 3793 struct hmehash_bucket *hmebp;
3937 3794 hmeblk_tag hblktag;
3938 3795 int hmeshift, hashno = 1;
3939 3796 struct hme_blk *hmeblkp, *list = NULL;
3940 3797 caddr_t endaddr;
3941 3798
3942 3799 ASSERT(sfmmup != NULL);
3943 3800
3944 3801 ASSERT((sfmmup == ksfmmup) ||
3945 3802 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3946 3803 ASSERT((len & MMU_PAGEOFFSET) == 0);
3947 3804 endaddr = addr + len;
3948 3805 hblktag.htag_id = sfmmup;
3949 3806 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3950 3807
3951 3808 /*
3952 3809 * Spitfire supports 4 page sizes.
3953 3810 * Most pages are expected to be of the smallest page size (8K) and
3954 3811 * these will not need to be rehashed. 64K pages also don't need to be
3955 3812 * rehashed because an hmeblk spans 64K of address space. 512K pages
3956 3813 * might need 1 rehash and and 4M pages might need 2 rehashes.
3957 3814 */
3958 3815 while (addr < endaddr) {
3959 3816 hmeshift = HME_HASH_SHIFT(hashno);
3960 3817 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3961 3818 hblktag.htag_rehash = hashno;
3962 3819 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3963 3820
3964 3821 SFMMU_HASH_LOCK(hmebp);
3965 3822
3966 3823 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3967 3824 if (hmeblkp != NULL) {
3968 3825 ASSERT(!hmeblkp->hblk_shared);
3969 3826 /*
3970 3827 * If we encounter a shadow hmeblk then
3971 3828 * we know there are no valid hmeblks mapping
3972 3829 * this address at this size or larger.
3973 3830 * Just increment address by the smallest
3974 3831 * page size.
3975 3832 */
3976 3833 if (hmeblkp->hblk_shw_bit) {
3977 3834 addr += MMU_PAGESIZE;
3978 3835 } else {
3979 3836 addr = sfmmu_hblk_unlock(hmeblkp, addr,
3980 3837 endaddr);
3981 3838 }
3982 3839 SFMMU_HASH_UNLOCK(hmebp);
3983 3840 hashno = 1;
3984 3841 continue;
3985 3842 }
3986 3843 SFMMU_HASH_UNLOCK(hmebp);
3987 3844
3988 3845 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3989 3846 /*
3990 3847 * We have traversed the whole list and rehashed
3991 3848 * if necessary without finding the address to unlock
3992 3849 * which should never happen.
3993 3850 */
3994 3851 panic("sfmmu_unlock: addr not found. "
3995 3852 "addr %p hat %p", (void *)addr, (void *)sfmmup);
3996 3853 } else {
3997 3854 hashno++;
3998 3855 }
3999 3856 }
4000 3857
4001 3858 sfmmu_hblks_list_purge(&list, 0);
4002 3859 }
4003 3860
4004 3861 void
4005 3862 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4006 3863 hat_region_cookie_t rcookie)
4007 3864 {
4008 3865 sf_srd_t *srdp;
4009 3866 sf_region_t *rgnp;
4010 3867 int ttesz;
4011 3868 uint_t rid;
4012 3869 caddr_t eaddr;
4013 3870 caddr_t va;
4014 3871 int hmeshift;
4015 3872 hmeblk_tag hblktag;
4016 3873 struct hmehash_bucket *hmebp;
4017 3874 struct hme_blk *hmeblkp;
4018 3875 struct hme_blk *pr_hblk;
4019 3876 struct hme_blk *list;
4020 3877
4021 3878 if (rcookie == HAT_INVALID_REGION_COOKIE) {
4022 3879 hat_unlock(sfmmup, addr, len);
4023 3880 return;
4024 3881 }
4025 3882
4026 3883 ASSERT(sfmmup != NULL);
4027 3884 ASSERT(sfmmup != ksfmmup);
4028 3885
4029 3886 srdp = sfmmup->sfmmu_srdp;
4030 3887 rid = (uint_t)((uint64_t)rcookie);
4031 3888 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4032 3889 eaddr = addr + len;
4033 3890 va = addr;
4034 3891 list = NULL;
4035 3892 rgnp = srdp->srd_hmergnp[rid];
4036 3893 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4037 3894
4038 3895 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4039 3896 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4040 3897 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4041 3898 ttesz = HBLK_MIN_TTESZ;
4042 3899 } else {
4043 3900 ttesz = rgnp->rgn_pgszc;
4044 3901 }
4045 3902 while (va < eaddr) {
4046 3903 while (ttesz < rgnp->rgn_pgszc &&
4047 3904 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4048 3905 ttesz++;
4049 3906 }
4050 3907 while (ttesz >= HBLK_MIN_TTESZ) {
4051 3908 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4052 3909 ttesz--;
4053 3910 continue;
4054 3911 }
4055 3912 hmeshift = HME_HASH_SHIFT(ttesz);
4056 3913 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4057 3914 hblktag.htag_rehash = ttesz;
4058 3915 hblktag.htag_rid = rid;
4059 3916 hblktag.htag_id = srdp;
4060 3917 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4061 3918 SFMMU_HASH_LOCK(hmebp);
4062 3919 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4063 3920 &list);
4064 3921 if (hmeblkp == NULL) {
4065 3922 SFMMU_HASH_UNLOCK(hmebp);
4066 3923 ttesz--;
4067 3924 continue;
4068 3925 }
4069 3926 ASSERT(hmeblkp->hblk_shared);
4070 3927 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4071 3928 ASSERT(va >= eaddr ||
4072 3929 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4073 3930 SFMMU_HASH_UNLOCK(hmebp);
4074 3931 break;
4075 3932 }
4076 3933 if (ttesz < HBLK_MIN_TTESZ) {
4077 3934 panic("hat_unlock_region: addr not found "
4078 3935 "addr %p hat %p", (void *)va, (void *)sfmmup);
4079 3936 }
4080 3937 }
4081 3938 sfmmu_hblks_list_purge(&list, 0);
4082 3939 }
4083 3940
4084 3941 /*
4085 3942 * Function to unlock a range of addresses in an hmeblk. It returns the
4086 3943 * next address that needs to be unlocked.
4087 3944 * Should be called with the hash lock held.
4088 3945 */
4089 3946 static caddr_t
4090 3947 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4091 3948 {
4092 3949 struct sf_hment *sfhme;
4093 3950 tte_t tteold, ttemod;
4094 3951 int ttesz, ret;
4095 3952
4096 3953 ASSERT(in_hblk_range(hmeblkp, addr));
4097 3954 ASSERT(hmeblkp->hblk_shw_bit == 0);
4098 3955
4099 3956 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4100 3957 ttesz = get_hblk_ttesz(hmeblkp);
4101 3958
4102 3959 HBLKTOHME(sfhme, hmeblkp, addr);
4103 3960 while (addr < endaddr) {
4104 3961 readtte:
4105 3962 sfmmu_copytte(&sfhme->hme_tte, &tteold);
4106 3963 if (TTE_IS_VALID(&tteold)) {
4107 3964
4108 3965 ttemod = tteold;
4109 3966
4110 3967 ret = sfmmu_modifytte_try(&tteold, &ttemod,
4111 3968 &sfhme->hme_tte);
4112 3969
4113 3970 if (ret < 0)
4114 3971 goto readtte;
4115 3972
4116 3973 if (hmeblkp->hblk_lckcnt == 0)
4117 3974 panic("zero hblk lckcnt");
4118 3975
4119 3976 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4120 3977 (uintptr_t)endaddr)
4121 3978 panic("can't unlock large tte");
4122 3979
4123 3980 ASSERT(hmeblkp->hblk_lckcnt > 0);
4124 3981 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4125 3982 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4126 3983 } else {
4127 3984 panic("sfmmu_hblk_unlock: invalid tte");
4128 3985 }
4129 3986 addr += TTEBYTES(ttesz);
4130 3987 sfhme++;
4131 3988 }
4132 3989 return (addr);
4133 3990 }
4134 3991
4135 3992 /*
4136 3993 * Physical Address Mapping Framework
4137 3994 *
4138 3995 * General rules:
4139 3996 *
4140 3997 * (1) Applies only to seg_kmem memory pages. To make things easier,
4141 3998 * seg_kpm addresses are also accepted by the routines, but nothing
4142 3999 * is done with them since by definition their PA mappings are static.
4143 4000 * (2) hat_add_callback() may only be called while holding the page lock
4144 4001 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4145 4002 * or passing HAC_PAGELOCK flag.
4146 4003 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4147 4004 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4148 4005 * callbacks may not sleep or acquire adaptive mutex locks.
4149 4006 * (4) Either prehandler() or posthandler() (but not both) may be specified
4150 4007 * as being NULL. Specifying an errhandler() is optional.
4151 4008 *
4152 4009 * Details of using the framework:
4153 4010 *
4154 4011 * registering a callback (hat_register_callback())
4155 4012 *
4156 4013 * Pass prehandler, posthandler, errhandler addresses
4157 4014 * as described below. If capture_cpus argument is nonzero,
4158 4015 * suspend callback to the prehandler will occur with CPUs
4159 4016 * captured and executing xc_loop() and CPUs will remain
4160 4017 * captured until after the posthandler suspend callback
4161 4018 * occurs.
4162 4019 *
4163 4020 * adding a callback (hat_add_callback())
4164 4021 *
4165 4022 * as_pagelock();
4166 4023 * hat_add_callback();
4167 4024 * save returned pfn in private data structures or program registers;
4168 4025 * as_pageunlock();
4169 4026 *
4170 4027 * prehandler()
4171 4028 *
4172 4029 * Stop all accesses by physical address to this memory page.
4173 4030 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4174 4031 * adaptive locks. The second, SUSPEND, is called at high PIL with
4175 4032 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4176 4033 * locks must be XCALL_PIL or higher locks).
4177 4034 *
4178 4035 * May return the following errors:
4179 4036 * EIO: A fatal error has occurred. This will result in panic.
4180 4037 * EAGAIN: The page cannot be suspended. This will fail the
4181 4038 * relocation.
4182 4039 * 0: Success.
4183 4040 *
4184 4041 * posthandler()
4185 4042 *
4186 4043 * Save new pfn in private data structures or program registers;
4187 4044 * not allowed to fail (non-zero return values will result in panic).
4188 4045 *
4189 4046 * errhandler()
4190 4047 *
4191 4048 * called when an error occurs related to the callback. Currently
4192 4049 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4193 4050 * a page is being freed, but there are still outstanding callback(s)
4194 4051 * registered on the page.
4195 4052 *
4196 4053 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4197 4054 *
4198 4055 * stop using physical address
4199 4056 * hat_delete_callback();
4200 4057 *
4201 4058 */
4202 4059
4203 4060 /*
4204 4061 * Register a callback class. Each subsystem should do this once and
4205 4062 * cache the id_t returned for use in setting up and tearing down callbacks.
4206 4063 *
4207 4064 * There is no facility for removing callback IDs once they are created;
4208 4065 * the "key" should be unique for each module, so in case a module is unloaded
4209 4066 * and subsequently re-loaded, we can recycle the module's previous entry.
4210 4067 */
4211 4068 id_t
4212 4069 hat_register_callback(int key,
4213 4070 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4214 4071 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4215 4072 int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4216 4073 int capture_cpus)
4217 4074 {
4218 4075 id_t id;
4219 4076
4220 4077 /*
4221 4078 * Search the table for a pre-existing callback associated with
4222 4079 * the identifier "key". If one exists, we re-use that entry in
4223 4080 * the table for this instance, otherwise we assign the next
4224 4081 * available table slot.
4225 4082 */
4226 4083 for (id = 0; id < sfmmu_max_cb_id; id++) {
4227 4084 if (sfmmu_cb_table[id].key == key)
4228 4085 break;
4229 4086 }
4230 4087
4231 4088 if (id == sfmmu_max_cb_id) {
4232 4089 id = sfmmu_cb_nextid++;
4233 4090 if (id >= sfmmu_max_cb_id)
4234 4091 panic("hat_register_callback: out of callback IDs");
4235 4092 }
4236 4093
4237 4094 ASSERT(prehandler != NULL || posthandler != NULL);
4238 4095
4239 4096 sfmmu_cb_table[id].key = key;
4240 4097 sfmmu_cb_table[id].prehandler = prehandler;
4241 4098 sfmmu_cb_table[id].posthandler = posthandler;
4242 4099 sfmmu_cb_table[id].errhandler = errhandler;
4243 4100 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4244 4101
4245 4102 return (id);
4246 4103 }
4247 4104
4248 4105 #define HAC_COOKIE_NONE (void *)-1
4249 4106
4250 4107 /*
4251 4108 * Add relocation callbacks to the specified addr/len which will be called
4252 4109 * when relocating the associated page. See the description of pre and
4253 4110 * posthandler above for more details.
4254 4111 *
4255 4112 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4256 4113 * locked internally so the caller must be able to deal with the callback
4257 4114 * running even before this function has returned. If HAC_PAGELOCK is not
4258 4115 * set, it is assumed that the underlying memory pages are locked.
4259 4116 *
4260 4117 * Since the caller must track the individual page boundaries anyway,
4261 4118 * we only allow a callback to be added to a single page (large
4262 4119 * or small). Thus [addr, addr + len) MUST be contained within a single
4263 4120 * page.
4264 4121 *
4265 4122 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4266 4123 * _provided_that_ a unique parameter is specified for each callback.
4267 4124 * If multiple callbacks are registered on the same range the callback will
4268 4125 * be invoked with each unique parameter. Registering the same callback with
4269 4126 * the same argument more than once will result in corrupted kernel state.
4270 4127 *
4271 4128 * Returns the pfn of the underlying kernel page in *rpfn
4272 4129 * on success, or PFN_INVALID on failure.
4273 4130 *
4274 4131 * cookiep (if passed) provides storage space for an opaque cookie
4275 4132 * to return later to hat_delete_callback(). This cookie makes the callback
4276 4133 * deletion significantly quicker by avoiding a potentially lengthy hash
4277 4134 * search.
4278 4135 *
4279 4136 * Returns values:
4280 4137 * 0: success
4281 4138 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4282 4139 * EINVAL: callback ID is not valid
4283 4140 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4284 4141 * space
4285 4142 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4286 4143 */
4287 4144 int
4288 4145 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4289 4146 void *pvt, pfn_t *rpfn, void **cookiep)
4290 4147 {
4291 4148 struct hmehash_bucket *hmebp;
4292 4149 hmeblk_tag hblktag;
4293 4150 struct hme_blk *hmeblkp;
4294 4151 int hmeshift, hashno;
4295 4152 caddr_t saddr, eaddr, baseaddr;
4296 4153 struct pa_hment *pahmep;
4297 4154 struct sf_hment *sfhmep, *osfhmep;
4298 4155 kmutex_t *pml;
4299 4156 tte_t tte;
4300 4157 page_t *pp;
4301 4158 vnode_t *vp;
4302 4159 u_offset_t off;
4303 4160 pfn_t pfn;
4304 4161 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4305 4162 int locked = 0;
4306 4163
4307 4164 /*
4308 4165 * For KPM mappings, just return the physical address since we
4309 4166 * don't need to register any callbacks.
4310 4167 */
4311 4168 if (IS_KPM_ADDR(vaddr)) {
4312 4169 uint64_t paddr;
4313 4170 SFMMU_KPM_VTOP(vaddr, paddr);
4314 4171 *rpfn = btop(paddr);
4315 4172 if (cookiep != NULL)
4316 4173 *cookiep = HAC_COOKIE_NONE;
4317 4174 return (0);
4318 4175 }
4319 4176
4320 4177 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4321 4178 *rpfn = PFN_INVALID;
4322 4179 return (EINVAL);
4323 4180 }
4324 4181
4325 4182 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4326 4183 *rpfn = PFN_INVALID;
4327 4184 return (ENOMEM);
4328 4185 }
4329 4186
4330 4187 sfhmep = &pahmep->sfment;
4331 4188
4332 4189 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4333 4190 eaddr = saddr + len;
4334 4191
4335 4192 rehash:
4336 4193 /* Find the mapping(s) for this page */
4337 4194 for (hashno = TTE64K, hmeblkp = NULL;
4338 4195 hmeblkp == NULL && hashno <= mmu_hashcnt;
4339 4196 hashno++) {
4340 4197 hmeshift = HME_HASH_SHIFT(hashno);
4341 4198 hblktag.htag_id = ksfmmup;
4342 4199 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4343 4200 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4344 4201 hblktag.htag_rehash = hashno;
4345 4202 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4346 4203
4347 4204 SFMMU_HASH_LOCK(hmebp);
4348 4205
4349 4206 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4350 4207
4351 4208 if (hmeblkp == NULL)
4352 4209 SFMMU_HASH_UNLOCK(hmebp);
4353 4210 }
4354 4211
4355 4212 if (hmeblkp == NULL) {
4356 4213 kmem_cache_free(pa_hment_cache, pahmep);
4357 4214 *rpfn = PFN_INVALID;
4358 4215 return (ENXIO);
4359 4216 }
4360 4217
4361 4218 ASSERT(!hmeblkp->hblk_shared);
4362 4219
4363 4220 HBLKTOHME(osfhmep, hmeblkp, saddr);
4364 4221 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4365 4222
4366 4223 if (!TTE_IS_VALID(&tte)) {
4367 4224 SFMMU_HASH_UNLOCK(hmebp);
4368 4225 kmem_cache_free(pa_hment_cache, pahmep);
4369 4226 *rpfn = PFN_INVALID;
4370 4227 return (ENXIO);
4371 4228 }
4372 4229
4373 4230 /*
4374 4231 * Make sure the boundaries for the callback fall within this
4375 4232 * single mapping.
4376 4233 */
4377 4234 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4378 4235 ASSERT(saddr >= baseaddr);
4379 4236 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4380 4237 SFMMU_HASH_UNLOCK(hmebp);
4381 4238 kmem_cache_free(pa_hment_cache, pahmep);
4382 4239 *rpfn = PFN_INVALID;
4383 4240 return (ERANGE);
4384 4241 }
4385 4242
4386 4243 pfn = sfmmu_ttetopfn(&tte, vaddr);
4387 4244
4388 4245 /*
4389 4246 * The pfn may not have a page_t underneath in which case we
4390 4247 * just return it. This can happen if we are doing I/O to a
4391 4248 * static portion of the kernel's address space, for instance.
4392 4249 */
4393 4250 pp = osfhmep->hme_page;
4394 4251 if (pp == NULL) {
4395 4252 SFMMU_HASH_UNLOCK(hmebp);
4396 4253 kmem_cache_free(pa_hment_cache, pahmep);
4397 4254 *rpfn = pfn;
4398 4255 if (cookiep)
4399 4256 *cookiep = HAC_COOKIE_NONE;
4400 4257 return (0);
4401 4258 }
4402 4259 ASSERT(pp == PP_PAGEROOT(pp));
4403 4260
4404 4261 vp = pp->p_vnode;
4405 4262 off = pp->p_offset;
4406 4263
4407 4264 pml = sfmmu_mlist_enter(pp);
4408 4265
4409 4266 if (flags & HAC_PAGELOCK) {
4410 4267 if (!page_trylock(pp, SE_SHARED)) {
4411 4268 /*
4412 4269 * Somebody is holding SE_EXCL lock. Might
4413 4270 * even be hat_page_relocate(). Drop all
4414 4271 * our locks, lookup the page in &kvp, and
4415 4272 * retry. If it doesn't exist in &kvp and &zvp,
4416 4273 * then we must be dealing with a kernel mapped
4417 4274 * page which doesn't actually belong to
4418 4275 * segkmem so we punt.
4419 4276 */
4420 4277 sfmmu_mlist_exit(pml);
4421 4278 SFMMU_HASH_UNLOCK(hmebp);
4422 4279 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4423 4280
4424 4281 /* check zvp before giving up */
4425 4282 if (pp == NULL)
4426 4283 pp = page_lookup(&zvp, (u_offset_t)saddr,
4427 4284 SE_SHARED);
4428 4285
4429 4286 /* Okay, we didn't find it, give up */
4430 4287 if (pp == NULL) {
4431 4288 kmem_cache_free(pa_hment_cache, pahmep);
4432 4289 *rpfn = pfn;
4433 4290 if (cookiep)
4434 4291 *cookiep = HAC_COOKIE_NONE;
4435 4292 return (0);
4436 4293 }
4437 4294 page_unlock(pp);
4438 4295 goto rehash;
4439 4296 }
4440 4297 locked = 1;
4441 4298 }
4442 4299
4443 4300 if (!PAGE_LOCKED(pp) && !panicstr)
4444 4301 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4445 4302
4446 4303 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4447 4304 pp->p_offset != off) {
4448 4305 /*
4449 4306 * The page moved before we got our hands on it. Drop
4450 4307 * all the locks and try again.
4451 4308 */
4452 4309 ASSERT((flags & HAC_PAGELOCK) != 0);
4453 4310 sfmmu_mlist_exit(pml);
4454 4311 SFMMU_HASH_UNLOCK(hmebp);
4455 4312 page_unlock(pp);
4456 4313 locked = 0;
4457 4314 goto rehash;
4458 4315 }
4459 4316
4460 4317 if (!VN_ISKAS(vp)) {
4461 4318 /*
4462 4319 * This is not a segkmem page but another page which
4463 4320 * has been kernel mapped. It had better have at least
4464 4321 * a share lock on it. Return the pfn.
4465 4322 */
4466 4323 sfmmu_mlist_exit(pml);
4467 4324 SFMMU_HASH_UNLOCK(hmebp);
4468 4325 if (locked)
4469 4326 page_unlock(pp);
4470 4327 kmem_cache_free(pa_hment_cache, pahmep);
4471 4328 ASSERT(PAGE_LOCKED(pp));
4472 4329 *rpfn = pfn;
4473 4330 if (cookiep)
4474 4331 *cookiep = HAC_COOKIE_NONE;
4475 4332 return (0);
4476 4333 }
4477 4334
4478 4335 /*
4479 4336 * Setup this pa_hment and link its embedded dummy sf_hment into
4480 4337 * the mapping list.
4481 4338 */
4482 4339 pp->p_share++;
4483 4340 pahmep->cb_id = callback_id;
4484 4341 pahmep->addr = vaddr;
4485 4342 pahmep->len = len;
4486 4343 pahmep->refcnt = 1;
4487 4344 pahmep->flags = 0;
4488 4345 pahmep->pvt = pvt;
4489 4346
4490 4347 sfhmep->hme_tte.ll = 0;
4491 4348 sfhmep->hme_data = pahmep;
4492 4349 sfhmep->hme_prev = osfhmep;
4493 4350 sfhmep->hme_next = osfhmep->hme_next;
4494 4351
4495 4352 if (osfhmep->hme_next)
4496 4353 osfhmep->hme_next->hme_prev = sfhmep;
4497 4354
4498 4355 osfhmep->hme_next = sfhmep;
4499 4356
4500 4357 sfmmu_mlist_exit(pml);
4501 4358 SFMMU_HASH_UNLOCK(hmebp);
4502 4359
4503 4360 if (locked)
4504 4361 page_unlock(pp);
4505 4362
4506 4363 *rpfn = pfn;
4507 4364 if (cookiep)
4508 4365 *cookiep = (void *)pahmep;
4509 4366
4510 4367 return (0);
4511 4368 }
4512 4369
4513 4370 /*
4514 4371 * Remove the relocation callbacks from the specified addr/len.
4515 4372 */
4516 4373 void
4517 4374 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4518 4375 void *cookie)
4519 4376 {
4520 4377 struct hmehash_bucket *hmebp;
4521 4378 hmeblk_tag hblktag;
4522 4379 struct hme_blk *hmeblkp;
4523 4380 int hmeshift, hashno;
4524 4381 caddr_t saddr;
4525 4382 struct pa_hment *pahmep;
4526 4383 struct sf_hment *sfhmep, *osfhmep;
4527 4384 kmutex_t *pml;
4528 4385 tte_t tte;
4529 4386 page_t *pp;
4530 4387 vnode_t *vp;
4531 4388 u_offset_t off;
4532 4389 int locked = 0;
4533 4390
4534 4391 /*
4535 4392 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4536 4393 * remove so just return.
4537 4394 */
4538 4395 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4539 4396 return;
4540 4397
4541 4398 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4542 4399
4543 4400 rehash:
4544 4401 /* Find the mapping(s) for this page */
4545 4402 for (hashno = TTE64K, hmeblkp = NULL;
4546 4403 hmeblkp == NULL && hashno <= mmu_hashcnt;
4547 4404 hashno++) {
4548 4405 hmeshift = HME_HASH_SHIFT(hashno);
4549 4406 hblktag.htag_id = ksfmmup;
4550 4407 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4551 4408 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4552 4409 hblktag.htag_rehash = hashno;
4553 4410 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4554 4411
4555 4412 SFMMU_HASH_LOCK(hmebp);
4556 4413
4557 4414 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4558 4415
4559 4416 if (hmeblkp == NULL)
4560 4417 SFMMU_HASH_UNLOCK(hmebp);
4561 4418 }
4562 4419
4563 4420 if (hmeblkp == NULL)
4564 4421 return;
4565 4422
4566 4423 ASSERT(!hmeblkp->hblk_shared);
4567 4424
4568 4425 HBLKTOHME(osfhmep, hmeblkp, saddr);
4569 4426
4570 4427 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4571 4428 if (!TTE_IS_VALID(&tte)) {
4572 4429 SFMMU_HASH_UNLOCK(hmebp);
4573 4430 return;
4574 4431 }
4575 4432
4576 4433 pp = osfhmep->hme_page;
4577 4434 if (pp == NULL) {
4578 4435 SFMMU_HASH_UNLOCK(hmebp);
4579 4436 ASSERT(cookie == NULL);
4580 4437 return;
4581 4438 }
4582 4439
4583 4440 vp = pp->p_vnode;
4584 4441 off = pp->p_offset;
4585 4442
4586 4443 pml = sfmmu_mlist_enter(pp);
4587 4444
4588 4445 if (flags & HAC_PAGELOCK) {
4589 4446 if (!page_trylock(pp, SE_SHARED)) {
4590 4447 /*
4591 4448 * Somebody is holding SE_EXCL lock. Might
4592 4449 * even be hat_page_relocate(). Drop all
4593 4450 * our locks, lookup the page in &kvp, and
4594 4451 * retry. If it doesn't exist in &kvp and &zvp,
4595 4452 * then we must be dealing with a kernel mapped
4596 4453 * page which doesn't actually belong to
4597 4454 * segkmem so we punt.
4598 4455 */
4599 4456 sfmmu_mlist_exit(pml);
4600 4457 SFMMU_HASH_UNLOCK(hmebp);
4601 4458 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4602 4459 /* check zvp before giving up */
4603 4460 if (pp == NULL)
4604 4461 pp = page_lookup(&zvp, (u_offset_t)saddr,
4605 4462 SE_SHARED);
4606 4463
4607 4464 if (pp == NULL) {
4608 4465 ASSERT(cookie == NULL);
4609 4466 return;
4610 4467 }
4611 4468 page_unlock(pp);
4612 4469 goto rehash;
4613 4470 }
4614 4471 locked = 1;
4615 4472 }
4616 4473
4617 4474 ASSERT(PAGE_LOCKED(pp));
4618 4475
4619 4476 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4620 4477 pp->p_offset != off) {
4621 4478 /*
4622 4479 * The page moved before we got our hands on it. Drop
4623 4480 * all the locks and try again.
4624 4481 */
4625 4482 ASSERT((flags & HAC_PAGELOCK) != 0);
4626 4483 sfmmu_mlist_exit(pml);
4627 4484 SFMMU_HASH_UNLOCK(hmebp);
4628 4485 page_unlock(pp);
4629 4486 locked = 0;
4630 4487 goto rehash;
4631 4488 }
4632 4489
4633 4490 if (!VN_ISKAS(vp)) {
4634 4491 /*
4635 4492 * This is not a segkmem page but another page which
4636 4493 * has been kernel mapped.
4637 4494 */
4638 4495 sfmmu_mlist_exit(pml);
4639 4496 SFMMU_HASH_UNLOCK(hmebp);
4640 4497 if (locked)
4641 4498 page_unlock(pp);
4642 4499 ASSERT(cookie == NULL);
4643 4500 return;
4644 4501 }
4645 4502
4646 4503 if (cookie != NULL) {
4647 4504 pahmep = (struct pa_hment *)cookie;
4648 4505 sfhmep = &pahmep->sfment;
4649 4506 } else {
4650 4507 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4651 4508 sfhmep = sfhmep->hme_next) {
4652 4509
4653 4510 /*
4654 4511 * skip va<->pa mappings
4655 4512 */
4656 4513 if (!IS_PAHME(sfhmep))
4657 4514 continue;
4658 4515
4659 4516 pahmep = sfhmep->hme_data;
4660 4517 ASSERT(pahmep != NULL);
4661 4518
4662 4519 /*
4663 4520 * if pa_hment matches, remove it
4664 4521 */
4665 4522 if ((pahmep->pvt == pvt) &&
4666 4523 (pahmep->addr == vaddr) &&
4667 4524 (pahmep->len == len)) {
4668 4525 break;
4669 4526 }
4670 4527 }
4671 4528 }
4672 4529
4673 4530 if (sfhmep == NULL) {
4674 4531 if (!panicstr) {
4675 4532 panic("hat_delete_callback: pa_hment not found, pp %p",
4676 4533 (void *)pp);
4677 4534 }
4678 4535 return;
4679 4536 }
4680 4537
4681 4538 /*
4682 4539 * Note: at this point a valid kernel mapping must still be
4683 4540 * present on this page.
4684 4541 */
4685 4542 pp->p_share--;
4686 4543 if (pp->p_share <= 0)
4687 4544 panic("hat_delete_callback: zero p_share");
4688 4545
4689 4546 if (--pahmep->refcnt == 0) {
4690 4547 if (pahmep->flags != 0)
4691 4548 panic("hat_delete_callback: pa_hment is busy");
4692 4549
4693 4550 /*
4694 4551 * Remove sfhmep from the mapping list for the page.
4695 4552 */
4696 4553 if (sfhmep->hme_prev) {
4697 4554 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4698 4555 } else {
4699 4556 pp->p_mapping = sfhmep->hme_next;
4700 4557 }
4701 4558
4702 4559 if (sfhmep->hme_next)
4703 4560 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4704 4561
4705 4562 sfmmu_mlist_exit(pml);
4706 4563 SFMMU_HASH_UNLOCK(hmebp);
4707 4564
4708 4565 if (locked)
4709 4566 page_unlock(pp);
4710 4567
4711 4568 kmem_cache_free(pa_hment_cache, pahmep);
4712 4569 return;
4713 4570 }
4714 4571
4715 4572 sfmmu_mlist_exit(pml);
4716 4573 SFMMU_HASH_UNLOCK(hmebp);
4717 4574 if (locked)
4718 4575 page_unlock(pp);
4719 4576 }
4720 4577
4721 4578 /*
4722 4579 * hat_probe returns 1 if the translation for the address 'addr' is
4723 4580 * loaded, zero otherwise.
4724 4581 *
4725 4582 * hat_probe should be used only for advisorary purposes because it may
4726 4583 * occasionally return the wrong value. The implementation must guarantee that
4727 4584 * returning the wrong value is a very rare event. hat_probe is used
4728 4585 * to implement optimizations in the segment drivers.
4729 4586 *
4730 4587 */
4731 4588 int
4732 4589 hat_probe(struct hat *sfmmup, caddr_t addr)
4733 4590 {
4734 4591 pfn_t pfn;
4735 4592 tte_t tte;
4736 4593
4737 4594 ASSERT(sfmmup != NULL);
4738 4595
4739 4596 ASSERT((sfmmup == ksfmmup) ||
4740 4597 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4741 4598
4742 4599 if (sfmmup == ksfmmup) {
4743 4600 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4744 4601 == PFN_SUSPENDED) {
4745 4602 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4746 4603 }
4747 4604 } else {
4748 4605 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4749 4606 }
4750 4607
4751 4608 if (pfn != PFN_INVALID)
4752 4609 return (1);
4753 4610 else
4754 4611 return (0);
4755 4612 }
4756 4613
4757 4614 ssize_t
4758 4615 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4759 4616 {
4760 4617 tte_t tte;
4761 4618
4762 4619 if (sfmmup == ksfmmup) {
4763 4620 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4764 4621 return (-1);
4765 4622 }
4766 4623 } else {
4767 4624 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4768 4625 return (-1);
4769 4626 }
4770 4627 }
4771 4628
4772 4629 ASSERT(TTE_IS_VALID(&tte));
4773 4630 return (TTEBYTES(TTE_CSZ(&tte)));
4774 4631 }
4775 4632
4776 4633 uint_t
4777 4634 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4778 4635 {
4779 4636 tte_t tte;
4780 4637
4781 4638 if (sfmmup == ksfmmup) {
4782 4639 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4783 4640 tte.ll = 0;
4784 4641 }
4785 4642 } else {
4786 4643 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4787 4644 tte.ll = 0;
4788 4645 }
4789 4646 }
4790 4647 if (TTE_IS_VALID(&tte)) {
4791 4648 *attr = sfmmu_ptov_attr(&tte);
4792 4649 return (0);
4793 4650 }
4794 4651 *attr = 0;
4795 4652 return ((uint_t)0xffffffff);
4796 4653 }
4797 4654
4798 4655 /*
4799 4656 * Enables more attributes on specified address range (ie. logical OR)
4800 4657 */
4801 4658 void
4802 4659 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4803 4660 {
4804 4661 ASSERT(hat->sfmmu_as != NULL);
4805 4662
4806 4663 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4807 4664 }
4808 4665
4809 4666 /*
4810 4667 * Assigns attributes to the specified address range. All the attributes
4811 4668 * are specified.
4812 4669 */
4813 4670 void
4814 4671 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4815 4672 {
4816 4673 ASSERT(hat->sfmmu_as != NULL);
4817 4674
4818 4675 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4819 4676 }
4820 4677
4821 4678 /*
4822 4679 * Remove attributes on the specified address range (ie. loginal NAND)
4823 4680 */
4824 4681 void
4825 4682 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4826 4683 {
4827 4684 ASSERT(hat->sfmmu_as != NULL);
4828 4685
4829 4686 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4830 4687 }
4831 4688
4832 4689 /*
4833 4690 * Change attributes on an address range to that specified by attr and mode.
4834 4691 */
4835 4692 static void
4836 4693 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4837 4694 int mode)
4838 4695 {
4839 4696 struct hmehash_bucket *hmebp;
4840 4697 hmeblk_tag hblktag;
4841 4698 int hmeshift, hashno = 1;
4842 4699 struct hme_blk *hmeblkp, *list = NULL;
4843 4700 caddr_t endaddr;
4844 4701 cpuset_t cpuset;
4845 4702 demap_range_t dmr;
4846 4703
4847 4704 CPUSET_ZERO(cpuset);
4848 4705
4849 4706 ASSERT((sfmmup == ksfmmup) ||
4850 4707 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4851 4708 ASSERT((len & MMU_PAGEOFFSET) == 0);
4852 4709 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4853 4710
4854 4711 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4855 4712 ((addr + len) > (caddr_t)USERLIMIT)) {
4856 4713 panic("user addr %p in kernel space",
4857 4714 (void *)addr);
4858 4715 }
4859 4716
4860 4717 endaddr = addr + len;
4861 4718 hblktag.htag_id = sfmmup;
4862 4719 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4863 4720 DEMAP_RANGE_INIT(sfmmup, &dmr);
4864 4721
4865 4722 while (addr < endaddr) {
4866 4723 hmeshift = HME_HASH_SHIFT(hashno);
4867 4724 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4868 4725 hblktag.htag_rehash = hashno;
4869 4726 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4870 4727
4871 4728 SFMMU_HASH_LOCK(hmebp);
4872 4729
4873 4730 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4874 4731 if (hmeblkp != NULL) {
4875 4732 ASSERT(!hmeblkp->hblk_shared);
4876 4733 /*
4877 4734 * We've encountered a shadow hmeblk so skip the range
4878 4735 * of the next smaller mapping size.
4879 4736 */
4880 4737 if (hmeblkp->hblk_shw_bit) {
4881 4738 ASSERT(sfmmup != ksfmmup);
4882 4739 ASSERT(hashno > 1);
4883 4740 addr = (caddr_t)P2END((uintptr_t)addr,
4884 4741 TTEBYTES(hashno - 1));
4885 4742 } else {
4886 4743 addr = sfmmu_hblk_chgattr(sfmmup,
4887 4744 hmeblkp, addr, endaddr, &dmr, attr, mode);
4888 4745 }
4889 4746 SFMMU_HASH_UNLOCK(hmebp);
4890 4747 hashno = 1;
4891 4748 continue;
4892 4749 }
4893 4750 SFMMU_HASH_UNLOCK(hmebp);
4894 4751
4895 4752 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4896 4753 /*
4897 4754 * We have traversed the whole list and rehashed
4898 4755 * if necessary without finding the address to chgattr.
4899 4756 * This is ok, so we increment the address by the
4900 4757 * smallest hmeblk range for kernel mappings or for
4901 4758 * user mappings with no large pages, and the largest
4902 4759 * hmeblk range, to account for shadow hmeblks, for
4903 4760 * user mappings with large pages and continue.
4904 4761 */
4905 4762 if (sfmmup == ksfmmup)
4906 4763 addr = (caddr_t)P2END((uintptr_t)addr,
4907 4764 TTEBYTES(1));
4908 4765 else
4909 4766 addr = (caddr_t)P2END((uintptr_t)addr,
4910 4767 TTEBYTES(hashno));
4911 4768 hashno = 1;
4912 4769 } else {
4913 4770 hashno++;
4914 4771 }
4915 4772 }
4916 4773
4917 4774 sfmmu_hblks_list_purge(&list, 0);
4918 4775 DEMAP_RANGE_FLUSH(&dmr);
4919 4776 cpuset = sfmmup->sfmmu_cpusran;
4920 4777 xt_sync(cpuset);
4921 4778 }
4922 4779
4923 4780 /*
4924 4781 * This function chgattr on a range of addresses in an hmeblk. It returns the
4925 4782 * next addres that needs to be chgattr.
4926 4783 * It should be called with the hash lock held.
4927 4784 * XXX It should be possible to optimize chgattr by not flushing every time but
4928 4785 * on the other hand:
4929 4786 * 1. do one flush crosscall.
4930 4787 * 2. only flush if we are increasing permissions (make sure this will work)
4931 4788 */
4932 4789 static caddr_t
4933 4790 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4934 4791 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4935 4792 {
4936 4793 tte_t tte, tteattr, tteflags, ttemod;
4937 4794 struct sf_hment *sfhmep;
4938 4795 int ttesz;
4939 4796 struct page *pp = NULL;
4940 4797 kmutex_t *pml, *pmtx;
4941 4798 int ret;
4942 4799 int use_demap_range;
4943 4800 #if defined(SF_ERRATA_57)
4944 4801 int check_exec;
4945 4802 #endif
4946 4803
4947 4804 ASSERT(in_hblk_range(hmeblkp, addr));
4948 4805 ASSERT(hmeblkp->hblk_shw_bit == 0);
4949 4806 ASSERT(!hmeblkp->hblk_shared);
4950 4807
4951 4808 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4952 4809 ttesz = get_hblk_ttesz(hmeblkp);
4953 4810
4954 4811 /*
4955 4812 * Flush the current demap region if addresses have been
4956 4813 * skipped or the page size doesn't match.
4957 4814 */
4958 4815 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4959 4816 if (use_demap_range) {
4960 4817 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4961 4818 } else if (dmrp != NULL) {
4962 4819 DEMAP_RANGE_FLUSH(dmrp);
4963 4820 }
4964 4821
4965 4822 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4966 4823 #if defined(SF_ERRATA_57)
4967 4824 check_exec = (sfmmup != ksfmmup) &&
4968 4825 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4969 4826 TTE_IS_EXECUTABLE(&tteattr);
4970 4827 #endif
4971 4828 HBLKTOHME(sfhmep, hmeblkp, addr);
4972 4829 while (addr < endaddr) {
4973 4830 sfmmu_copytte(&sfhmep->hme_tte, &tte);
4974 4831 if (TTE_IS_VALID(&tte)) {
4975 4832 if ((tte.ll & tteflags.ll) == tteattr.ll) {
4976 4833 /*
4977 4834 * if the new attr is the same as old
4978 4835 * continue
4979 4836 */
4980 4837 goto next_addr;
4981 4838 }
4982 4839 if (!TTE_IS_WRITABLE(&tteattr)) {
4983 4840 /*
4984 4841 * make sure we clear hw modify bit if we
4985 4842 * removing write protections
4986 4843 */
4987 4844 tteflags.tte_intlo |= TTE_HWWR_INT;
4988 4845 }
4989 4846
4990 4847 pml = NULL;
4991 4848 pp = sfhmep->hme_page;
4992 4849 if (pp) {
4993 4850 pml = sfmmu_mlist_enter(pp);
4994 4851 }
4995 4852
4996 4853 if (pp != sfhmep->hme_page) {
4997 4854 /*
4998 4855 * tte must have been unloaded.
4999 4856 */
5000 4857 ASSERT(pml);
5001 4858 sfmmu_mlist_exit(pml);
5002 4859 continue;
5003 4860 }
5004 4861
5005 4862 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5006 4863
5007 4864 ttemod = tte;
5008 4865 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5009 4866 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5010 4867
5011 4868 #if defined(SF_ERRATA_57)
5012 4869 if (check_exec && addr < errata57_limit)
5013 4870 ttemod.tte_exec_perm = 0;
5014 4871 #endif
5015 4872 ret = sfmmu_modifytte_try(&tte, &ttemod,
5016 4873 &sfhmep->hme_tte);
5017 4874
5018 4875 if (ret < 0) {
5019 4876 /* tte changed underneath us */
5020 4877 if (pml) {
5021 4878 sfmmu_mlist_exit(pml);
5022 4879 }
5023 4880 continue;
5024 4881 }
5025 4882
5026 4883 if (tteflags.tte_intlo & TTE_HWWR_INT) {
5027 4884 /*
5028 4885 * need to sync if we are clearing modify bit.
5029 4886 */
5030 4887 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5031 4888 }
5032 4889
5033 4890 if (pp && PP_ISRO(pp)) {
5034 4891 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5035 4892 pmtx = sfmmu_page_enter(pp);
5036 4893 PP_CLRRO(pp);
5037 4894 sfmmu_page_exit(pmtx);
5038 4895 }
5039 4896 }
5040 4897
5041 4898 if (ret > 0 && use_demap_range) {
5042 4899 DEMAP_RANGE_MARKPG(dmrp, addr);
5043 4900 } else if (ret > 0) {
5044 4901 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5045 4902 }
5046 4903
5047 4904 if (pml) {
5048 4905 sfmmu_mlist_exit(pml);
5049 4906 }
5050 4907 }
5051 4908 next_addr:
5052 4909 addr += TTEBYTES(ttesz);
5053 4910 sfhmep++;
5054 4911 DEMAP_RANGE_NEXTPG(dmrp);
5055 4912 }
5056 4913 return (addr);
5057 4914 }
5058 4915
5059 4916 /*
5060 4917 * This routine converts virtual attributes to physical ones. It will
5061 4918 * update the tteflags field with the tte mask corresponding to the attributes
5062 4919 * affected and it returns the new attributes. It will also clear the modify
5063 4920 * bit if we are taking away write permission. This is necessary since the
5064 4921 * modify bit is the hardware permission bit and we need to clear it in order
5065 4922 * to detect write faults.
5066 4923 */
5067 4924 static uint64_t
5068 4925 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5069 4926 {
5070 4927 tte_t ttevalue;
5071 4928
5072 4929 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5073 4930
5074 4931 switch (mode) {
5075 4932 case SFMMU_CHGATTR:
5076 4933 /* all attributes specified */
5077 4934 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5078 4935 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5079 4936 ttemaskp->tte_inthi = TTEINTHI_ATTR;
5080 4937 ttemaskp->tte_intlo = TTEINTLO_ATTR;
5081 4938 break;
5082 4939 case SFMMU_SETATTR:
5083 4940 ASSERT(!(attr & ~HAT_PROT_MASK));
5084 4941 ttemaskp->ll = 0;
5085 4942 ttevalue.ll = 0;
5086 4943 /*
5087 4944 * a valid tte implies exec and read for sfmmu
5088 4945 * so no need to do anything about them.
5089 4946 * since priviledged access implies user access
5090 4947 * PROT_USER doesn't make sense either.
5091 4948 */
5092 4949 if (attr & PROT_WRITE) {
5093 4950 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5094 4951 ttevalue.tte_intlo |= TTE_WRPRM_INT;
5095 4952 }
5096 4953 break;
5097 4954 case SFMMU_CLRATTR:
5098 4955 /* attributes will be nand with current ones */
5099 4956 if (attr & ~(PROT_WRITE | PROT_USER)) {
5100 4957 panic("sfmmu: attr %x not supported", attr);
5101 4958 }
5102 4959 ttemaskp->ll = 0;
5103 4960 ttevalue.ll = 0;
5104 4961 if (attr & PROT_WRITE) {
5105 4962 /* clear both writable and modify bit */
5106 4963 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5107 4964 }
5108 4965 if (attr & PROT_USER) {
5109 4966 ttemaskp->tte_intlo |= TTE_PRIV_INT;
5110 4967 ttevalue.tte_intlo |= TTE_PRIV_INT;
5111 4968 }
5112 4969 break;
5113 4970 default:
5114 4971 panic("sfmmu_vtop_attr: bad mode %x", mode);
5115 4972 }
5116 4973 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5117 4974 return (ttevalue.ll);
5118 4975 }
5119 4976
5120 4977 static uint_t
5121 4978 sfmmu_ptov_attr(tte_t *ttep)
5122 4979 {
5123 4980 uint_t attr;
5124 4981
5125 4982 ASSERT(TTE_IS_VALID(ttep));
5126 4983
5127 4984 attr = PROT_READ;
5128 4985
5129 4986 if (TTE_IS_WRITABLE(ttep)) {
5130 4987 attr |= PROT_WRITE;
5131 4988 }
5132 4989 if (TTE_IS_EXECUTABLE(ttep)) {
5133 4990 attr |= PROT_EXEC;
5134 4991 }
5135 4992 if (!TTE_IS_PRIVILEGED(ttep)) {
5136 4993 attr |= PROT_USER;
5137 4994 }
5138 4995 if (TTE_IS_NFO(ttep)) {
5139 4996 attr |= HAT_NOFAULT;
5140 4997 }
5141 4998 if (TTE_IS_NOSYNC(ttep)) {
5142 4999 attr |= HAT_NOSYNC;
5143 5000 }
5144 5001 if (TTE_IS_SIDEFFECT(ttep)) {
5145 5002 attr |= SFMMU_SIDEFFECT;
5146 5003 }
5147 5004 if (!TTE_IS_VCACHEABLE(ttep)) {
5148 5005 attr |= SFMMU_UNCACHEVTTE;
5149 5006 }
5150 5007 if (!TTE_IS_PCACHEABLE(ttep)) {
5151 5008 attr |= SFMMU_UNCACHEPTTE;
5152 5009 }
5153 5010 return (attr);
5154 5011 }
5155 5012
5156 5013 /*
5157 5014 * hat_chgprot is a deprecated hat call. New segment drivers
5158 5015 * should store all attributes and use hat_*attr calls.
5159 5016 *
5160 5017 * Change the protections in the virtual address range
5161 5018 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5162 5019 * then remove write permission, leaving the other
5163 5020 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5164 5021 *
5165 5022 */
5166 5023 void
5167 5024 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5168 5025 {
5169 5026 struct hmehash_bucket *hmebp;
5170 5027 hmeblk_tag hblktag;
5171 5028 int hmeshift, hashno = 1;
5172 5029 struct hme_blk *hmeblkp, *list = NULL;
5173 5030 caddr_t endaddr;
5174 5031 cpuset_t cpuset;
5175 5032 demap_range_t dmr;
5176 5033
5177 5034 ASSERT((len & MMU_PAGEOFFSET) == 0);
5178 5035 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5179 5036
5180 5037 ASSERT(sfmmup->sfmmu_as != NULL);
5181 5038
5182 5039 CPUSET_ZERO(cpuset);
5183 5040
5184 5041 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5185 5042 ((addr + len) > (caddr_t)USERLIMIT)) {
5186 5043 panic("user addr %p vprot %x in kernel space",
5187 5044 (void *)addr, vprot);
5188 5045 }
5189 5046 endaddr = addr + len;
5190 5047 hblktag.htag_id = sfmmup;
5191 5048 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5192 5049 DEMAP_RANGE_INIT(sfmmup, &dmr);
5193 5050
5194 5051 while (addr < endaddr) {
5195 5052 hmeshift = HME_HASH_SHIFT(hashno);
5196 5053 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5197 5054 hblktag.htag_rehash = hashno;
5198 5055 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5199 5056
5200 5057 SFMMU_HASH_LOCK(hmebp);
5201 5058
5202 5059 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5203 5060 if (hmeblkp != NULL) {
5204 5061 ASSERT(!hmeblkp->hblk_shared);
5205 5062 /*
5206 5063 * We've encountered a shadow hmeblk so skip the range
5207 5064 * of the next smaller mapping size.
5208 5065 */
5209 5066 if (hmeblkp->hblk_shw_bit) {
5210 5067 ASSERT(sfmmup != ksfmmup);
5211 5068 ASSERT(hashno > 1);
5212 5069 addr = (caddr_t)P2END((uintptr_t)addr,
5213 5070 TTEBYTES(hashno - 1));
5214 5071 } else {
5215 5072 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5216 5073 addr, endaddr, &dmr, vprot);
5217 5074 }
5218 5075 SFMMU_HASH_UNLOCK(hmebp);
5219 5076 hashno = 1;
5220 5077 continue;
5221 5078 }
5222 5079 SFMMU_HASH_UNLOCK(hmebp);
5223 5080
5224 5081 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5225 5082 /*
5226 5083 * We have traversed the whole list and rehashed
5227 5084 * if necessary without finding the address to chgprot.
5228 5085 * This is ok so we increment the address by the
5229 5086 * smallest hmeblk range for kernel mappings and the
5230 5087 * largest hmeblk range, to account for shadow hmeblks,
5231 5088 * for user mappings and continue.
5232 5089 */
5233 5090 if (sfmmup == ksfmmup)
5234 5091 addr = (caddr_t)P2END((uintptr_t)addr,
5235 5092 TTEBYTES(1));
5236 5093 else
5237 5094 addr = (caddr_t)P2END((uintptr_t)addr,
5238 5095 TTEBYTES(hashno));
5239 5096 hashno = 1;
5240 5097 } else {
5241 5098 hashno++;
5242 5099 }
5243 5100 }
5244 5101
5245 5102 sfmmu_hblks_list_purge(&list, 0);
5246 5103 DEMAP_RANGE_FLUSH(&dmr);
5247 5104 cpuset = sfmmup->sfmmu_cpusran;
5248 5105 xt_sync(cpuset);
5249 5106 }
5250 5107
5251 5108 /*
5252 5109 * This function chgprots a range of addresses in an hmeblk. It returns the
5253 5110 * next addres that needs to be chgprot.
5254 5111 * It should be called with the hash lock held.
5255 5112 * XXX It shold be possible to optimize chgprot by not flushing every time but
5256 5113 * on the other hand:
5257 5114 * 1. do one flush crosscall.
5258 5115 * 2. only flush if we are increasing permissions (make sure this will work)
5259 5116 */
5260 5117 static caddr_t
5261 5118 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5262 5119 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5263 5120 {
5264 5121 uint_t pprot;
5265 5122 tte_t tte, ttemod;
5266 5123 struct sf_hment *sfhmep;
5267 5124 uint_t tteflags;
5268 5125 int ttesz;
5269 5126 struct page *pp = NULL;
5270 5127 kmutex_t *pml, *pmtx;
5271 5128 int ret;
5272 5129 int use_demap_range;
5273 5130 #if defined(SF_ERRATA_57)
5274 5131 int check_exec;
5275 5132 #endif
5276 5133
5277 5134 ASSERT(in_hblk_range(hmeblkp, addr));
5278 5135 ASSERT(hmeblkp->hblk_shw_bit == 0);
5279 5136 ASSERT(!hmeblkp->hblk_shared);
5280 5137
5281 5138 #ifdef DEBUG
5282 5139 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5283 5140 (endaddr < get_hblk_endaddr(hmeblkp))) {
5284 5141 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5285 5142 }
5286 5143 #endif /* DEBUG */
5287 5144
5288 5145 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5289 5146 ttesz = get_hblk_ttesz(hmeblkp);
5290 5147
5291 5148 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5292 5149 #if defined(SF_ERRATA_57)
5293 5150 check_exec = (sfmmup != ksfmmup) &&
5294 5151 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5295 5152 ((vprot & PROT_EXEC) == PROT_EXEC);
5296 5153 #endif
5297 5154 HBLKTOHME(sfhmep, hmeblkp, addr);
5298 5155
5299 5156 /*
5300 5157 * Flush the current demap region if addresses have been
5301 5158 * skipped or the page size doesn't match.
5302 5159 */
5303 5160 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5304 5161 if (use_demap_range) {
5305 5162 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5306 5163 } else if (dmrp != NULL) {
5307 5164 DEMAP_RANGE_FLUSH(dmrp);
5308 5165 }
5309 5166
5310 5167 while (addr < endaddr) {
5311 5168 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5312 5169 if (TTE_IS_VALID(&tte)) {
5313 5170 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5314 5171 /*
5315 5172 * if the new protection is the same as old
5316 5173 * continue
5317 5174 */
5318 5175 goto next_addr;
5319 5176 }
5320 5177 pml = NULL;
5321 5178 pp = sfhmep->hme_page;
5322 5179 if (pp) {
5323 5180 pml = sfmmu_mlist_enter(pp);
5324 5181 }
5325 5182 if (pp != sfhmep->hme_page) {
5326 5183 /*
5327 5184 * tte most have been unloaded
5328 5185 * underneath us. Recheck
5329 5186 */
5330 5187 ASSERT(pml);
5331 5188 sfmmu_mlist_exit(pml);
5332 5189 continue;
5333 5190 }
5334 5191
5335 5192 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5336 5193
5337 5194 ttemod = tte;
5338 5195 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5339 5196 #if defined(SF_ERRATA_57)
5340 5197 if (check_exec && addr < errata57_limit)
5341 5198 ttemod.tte_exec_perm = 0;
5342 5199 #endif
5343 5200 ret = sfmmu_modifytte_try(&tte, &ttemod,
5344 5201 &sfhmep->hme_tte);
5345 5202
5346 5203 if (ret < 0) {
5347 5204 /* tte changed underneath us */
5348 5205 if (pml) {
5349 5206 sfmmu_mlist_exit(pml);
5350 5207 }
5351 5208 continue;
5352 5209 }
5353 5210
5354 5211 if (tteflags & TTE_HWWR_INT) {
5355 5212 /*
5356 5213 * need to sync if we are clearing modify bit.
5357 5214 */
5358 5215 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5359 5216 }
5360 5217
5361 5218 if (pp && PP_ISRO(pp)) {
5362 5219 if (pprot & TTE_WRPRM_INT) {
5363 5220 pmtx = sfmmu_page_enter(pp);
5364 5221 PP_CLRRO(pp);
5365 5222 sfmmu_page_exit(pmtx);
5366 5223 }
5367 5224 }
5368 5225
5369 5226 if (ret > 0 && use_demap_range) {
5370 5227 DEMAP_RANGE_MARKPG(dmrp, addr);
5371 5228 } else if (ret > 0) {
5372 5229 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5373 5230 }
5374 5231
5375 5232 if (pml) {
5376 5233 sfmmu_mlist_exit(pml);
5377 5234 }
5378 5235 }
5379 5236 next_addr:
5380 5237 addr += TTEBYTES(ttesz);
5381 5238 sfhmep++;
5382 5239 DEMAP_RANGE_NEXTPG(dmrp);
5383 5240 }
5384 5241 return (addr);
5385 5242 }
5386 5243
5387 5244 /*
5388 5245 * This routine is deprecated and should only be used by hat_chgprot.
5389 5246 * The correct routine is sfmmu_vtop_attr.
5390 5247 * This routine converts virtual page protections to physical ones. It will
5391 5248 * update the tteflags field with the tte mask corresponding to the protections
5392 5249 * affected and it returns the new protections. It will also clear the modify
5393 5250 * bit if we are taking away write permission. This is necessary since the
5394 5251 * modify bit is the hardware permission bit and we need to clear it in order
5395 5252 * to detect write faults.
5396 5253 * It accepts the following special protections:
5397 5254 * ~PROT_WRITE = remove write permissions.
5398 5255 * ~PROT_USER = remove user permissions.
5399 5256 */
5400 5257 static uint_t
5401 5258 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5402 5259 {
5403 5260 if (vprot == (uint_t)~PROT_WRITE) {
5404 5261 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5405 5262 return (0); /* will cause wrprm to be cleared */
5406 5263 }
5407 5264 if (vprot == (uint_t)~PROT_USER) {
5408 5265 *tteflagsp = TTE_PRIV_INT;
5409 5266 return (0); /* will cause privprm to be cleared */
5410 5267 }
5411 5268 if ((vprot == 0) || (vprot == PROT_USER) ||
5412 5269 ((vprot & PROT_ALL) != vprot)) {
5413 5270 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5414 5271 }
5415 5272
5416 5273 switch (vprot) {
5417 5274 case (PROT_READ):
5418 5275 case (PROT_EXEC):
5419 5276 case (PROT_EXEC | PROT_READ):
5420 5277 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5421 5278 return (TTE_PRIV_INT); /* set prv and clr wrt */
5422 5279 case (PROT_WRITE):
5423 5280 case (PROT_WRITE | PROT_READ):
5424 5281 case (PROT_EXEC | PROT_WRITE):
5425 5282 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5426 5283 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5427 5284 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5428 5285 case (PROT_USER | PROT_READ):
5429 5286 case (PROT_USER | PROT_EXEC):
5430 5287 case (PROT_USER | PROT_EXEC | PROT_READ):
5431 5288 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5432 5289 return (0); /* clr prv and wrt */
5433 5290 case (PROT_USER | PROT_WRITE):
5434 5291 case (PROT_USER | PROT_WRITE | PROT_READ):
5435 5292 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5436 5293 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5437 5294 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5438 5295 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5439 5296 default:
5440 5297 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5441 5298 }
5442 5299 return (0);
5443 5300 }
5444 5301
5445 5302 /*
5446 5303 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5447 5304 * the normal algorithm would take too long for a very large VA range with
5448 5305 * few real mappings. This routine just walks thru all HMEs in the global
5449 5306 * hash table to find and remove mappings.
5450 5307 */
5451 5308 static void
5452 5309 hat_unload_large_virtual(
5453 5310 struct hat *sfmmup,
5454 5311 caddr_t startaddr,
5455 5312 size_t len,
5456 5313 uint_t flags,
5457 5314 hat_callback_t *callback)
5458 5315 {
5459 5316 struct hmehash_bucket *hmebp;
5460 5317 struct hme_blk *hmeblkp;
5461 5318 struct hme_blk *pr_hblk = NULL;
5462 5319 struct hme_blk *nx_hblk;
5463 5320 struct hme_blk *list = NULL;
5464 5321 int i;
5465 5322 demap_range_t dmr, *dmrp;
5466 5323 cpuset_t cpuset;
5467 5324 caddr_t endaddr = startaddr + len;
5468 5325 caddr_t sa;
5469 5326 caddr_t ea;
5470 5327 caddr_t cb_sa[MAX_CB_ADDR];
5471 5328 caddr_t cb_ea[MAX_CB_ADDR];
5472 5329 int addr_cnt = 0;
5473 5330 int a = 0;
5474 5331
5475 5332 if (sfmmup->sfmmu_free) {
5476 5333 dmrp = NULL;
5477 5334 } else {
5478 5335 dmrp = &dmr;
5479 5336 DEMAP_RANGE_INIT(sfmmup, dmrp);
5480 5337 }
5481 5338
5482 5339 /*
5483 5340 * Loop through all the hash buckets of HME blocks looking for matches.
5484 5341 */
5485 5342 for (i = 0; i <= UHMEHASH_SZ; i++) {
5486 5343 hmebp = &uhme_hash[i];
5487 5344 SFMMU_HASH_LOCK(hmebp);
5488 5345 hmeblkp = hmebp->hmeblkp;
5489 5346 pr_hblk = NULL;
5490 5347 while (hmeblkp) {
5491 5348 nx_hblk = hmeblkp->hblk_next;
5492 5349
5493 5350 /*
5494 5351 * skip if not this context, if a shadow block or
5495 5352 * if the mapping is not in the requested range
5496 5353 */
5497 5354 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5498 5355 hmeblkp->hblk_shw_bit ||
5499 5356 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5500 5357 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5501 5358 pr_hblk = hmeblkp;
5502 5359 goto next_block;
5503 5360 }
5504 5361
5505 5362 ASSERT(!hmeblkp->hblk_shared);
5506 5363 /*
5507 5364 * unload if there are any current valid mappings
5508 5365 */
5509 5366 if (hmeblkp->hblk_vcnt != 0 ||
5510 5367 hmeblkp->hblk_hmecnt != 0)
5511 5368 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5512 5369 sa, ea, dmrp, flags);
5513 5370
5514 5371 /*
5515 5372 * on unmap we also release the HME block itself, once
5516 5373 * all mappings are gone.
5517 5374 */
5518 5375 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5519 5376 !hmeblkp->hblk_vcnt &&
5520 5377 !hmeblkp->hblk_hmecnt) {
5521 5378 ASSERT(!hmeblkp->hblk_lckcnt);
5522 5379 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5523 5380 &list, 0);
5524 5381 } else {
5525 5382 pr_hblk = hmeblkp;
5526 5383 }
5527 5384
5528 5385 if (callback == NULL)
5529 5386 goto next_block;
5530 5387
5531 5388 /*
5532 5389 * HME blocks may span more than one page, but we may be
5533 5390 * unmapping only one page, so check for a smaller range
5534 5391 * for the callback
5535 5392 */
5536 5393 if (sa < startaddr)
5537 5394 sa = startaddr;
5538 5395 if (--ea > endaddr)
5539 5396 ea = endaddr - 1;
5540 5397
5541 5398 cb_sa[addr_cnt] = sa;
5542 5399 cb_ea[addr_cnt] = ea;
5543 5400 if (++addr_cnt == MAX_CB_ADDR) {
5544 5401 if (dmrp != NULL) {
5545 5402 DEMAP_RANGE_FLUSH(dmrp);
5546 5403 cpuset = sfmmup->sfmmu_cpusran;
5547 5404 xt_sync(cpuset);
5548 5405 }
5549 5406
5550 5407 for (a = 0; a < MAX_CB_ADDR; ++a) {
5551 5408 callback->hcb_start_addr = cb_sa[a];
5552 5409 callback->hcb_end_addr = cb_ea[a];
5553 5410 callback->hcb_function(callback);
5554 5411 }
5555 5412 addr_cnt = 0;
5556 5413 }
5557 5414
5558 5415 next_block:
5559 5416 hmeblkp = nx_hblk;
5560 5417 }
5561 5418 SFMMU_HASH_UNLOCK(hmebp);
5562 5419 }
5563 5420
5564 5421 sfmmu_hblks_list_purge(&list, 0);
5565 5422 if (dmrp != NULL) {
5566 5423 DEMAP_RANGE_FLUSH(dmrp);
5567 5424 cpuset = sfmmup->sfmmu_cpusran;
5568 5425 xt_sync(cpuset);
5569 5426 }
5570 5427
5571 5428 for (a = 0; a < addr_cnt; ++a) {
5572 5429 callback->hcb_start_addr = cb_sa[a];
5573 5430 callback->hcb_end_addr = cb_ea[a];
5574 5431 callback->hcb_function(callback);
5575 5432 }
5576 5433
5577 5434 /*
5578 5435 * Check TSB and TLB page sizes if the process isn't exiting.
5579 5436 */
5580 5437 if (!sfmmup->sfmmu_free)
5581 5438 sfmmu_check_page_sizes(sfmmup, 0);
5582 5439 }
5583 5440
5584 5441 /*
5585 5442 * Unload all the mappings in the range [addr..addr+len). addr and len must
5586 5443 * be MMU_PAGESIZE aligned.
5587 5444 */
5588 5445
5589 5446 extern struct seg *segkmap;
5590 5447 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5591 5448 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5592 5449
5593 5450
5594 5451 void
5595 5452 hat_unload_callback(
5596 5453 struct hat *sfmmup,
5597 5454 caddr_t addr,
5598 5455 size_t len,
5599 5456 uint_t flags,
5600 5457 hat_callback_t *callback)
5601 5458 {
5602 5459 struct hmehash_bucket *hmebp;
5603 5460 hmeblk_tag hblktag;
5604 5461 int hmeshift, hashno, iskernel;
5605 5462 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5606 5463 caddr_t endaddr;
5607 5464 cpuset_t cpuset;
5608 5465 int addr_count = 0;
5609 5466 int a;
5610 5467 caddr_t cb_start_addr[MAX_CB_ADDR];
5611 5468 caddr_t cb_end_addr[MAX_CB_ADDR];
5612 5469 int issegkmap = ISSEGKMAP(sfmmup, addr);
5613 5470 demap_range_t dmr, *dmrp;
5614 5471
5615 5472 ASSERT(sfmmup->sfmmu_as != NULL);
5616 5473
5617 5474 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5618 5475 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5619 5476
5620 5477 ASSERT(sfmmup != NULL);
5621 5478 ASSERT((len & MMU_PAGEOFFSET) == 0);
5622 5479 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5623 5480
5624 5481 /*
5625 5482 * Probing through a large VA range (say 63 bits) will be slow, even
5626 5483 * at 4 Meg steps between the probes. So, when the virtual address range
5627 5484 * is very large, search the HME entries for what to unload.
5628 5485 *
5629 5486 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5630 5487 *
5631 5488 * UHMEHASH_SZ is number of hash buckets to examine
5632 5489 *
5633 5490 */
5634 5491 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5635 5492 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5636 5493 return;
5637 5494 }
5638 5495
5639 5496 CPUSET_ZERO(cpuset);
5640 5497
5641 5498 /*
5642 5499 * If the process is exiting, we can save a lot of fuss since
5643 5500 * we'll flush the TLB when we free the ctx anyway.
5644 5501 */
5645 5502 if (sfmmup->sfmmu_free) {
5646 5503 dmrp = NULL;
5647 5504 } else {
5648 5505 dmrp = &dmr;
5649 5506 DEMAP_RANGE_INIT(sfmmup, dmrp);
5650 5507 }
5651 5508
5652 5509 endaddr = addr + len;
5653 5510 hblktag.htag_id = sfmmup;
5654 5511 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5655 5512
5656 5513 /*
5657 5514 * It is likely for the vm to call unload over a wide range of
5658 5515 * addresses that are actually very sparsely populated by
5659 5516 * translations. In order to speed this up the sfmmu hat supports
5660 5517 * the concept of shadow hmeblks. Dummy large page hmeblks that
5661 5518 * correspond to actual small translations are allocated at tteload
5662 5519 * time and are referred to as shadow hmeblks. Now, during unload
5663 5520 * time, we first check if we have a shadow hmeblk for that
5664 5521 * translation. The absence of one means the corresponding address
5665 5522 * range is empty and can be skipped.
5666 5523 *
5667 5524 * The kernel is an exception to above statement and that is why
5668 5525 * we don't use shadow hmeblks and hash starting from the smallest
5669 5526 * page size.
5670 5527 */
5671 5528 if (sfmmup == KHATID) {
5672 5529 iskernel = 1;
5673 5530 hashno = TTE64K;
5674 5531 } else {
5675 5532 iskernel = 0;
5676 5533 if (mmu_page_sizes == max_mmu_page_sizes) {
5677 5534 hashno = TTE256M;
5678 5535 } else {
5679 5536 hashno = TTE4M;
5680 5537 }
5681 5538 }
5682 5539 while (addr < endaddr) {
5683 5540 hmeshift = HME_HASH_SHIFT(hashno);
5684 5541 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5685 5542 hblktag.htag_rehash = hashno;
5686 5543 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5687 5544
5688 5545 SFMMU_HASH_LOCK(hmebp);
5689 5546
5690 5547 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5691 5548 if (hmeblkp == NULL) {
5692 5549 /*
5693 5550 * didn't find an hmeblk. skip the appropiate
5694 5551 * address range.
5695 5552 */
5696 5553 SFMMU_HASH_UNLOCK(hmebp);
5697 5554 if (iskernel) {
5698 5555 if (hashno < mmu_hashcnt) {
5699 5556 hashno++;
5700 5557 continue;
5701 5558 } else {
5702 5559 hashno = TTE64K;
5703 5560 addr = (caddr_t)roundup((uintptr_t)addr
5704 5561 + 1, MMU_PAGESIZE64K);
5705 5562 continue;
5706 5563 }
5707 5564 }
5708 5565 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5709 5566 (1 << hmeshift));
5710 5567 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5711 5568 ASSERT(hashno == TTE64K);
5712 5569 continue;
5713 5570 }
5714 5571 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5715 5572 hashno = TTE512K;
5716 5573 continue;
5717 5574 }
5718 5575 if (mmu_page_sizes == max_mmu_page_sizes) {
5719 5576 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5720 5577 hashno = TTE4M;
5721 5578 continue;
5722 5579 }
5723 5580 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5724 5581 hashno = TTE32M;
5725 5582 continue;
5726 5583 }
5727 5584 hashno = TTE256M;
5728 5585 continue;
5729 5586 } else {
5730 5587 hashno = TTE4M;
5731 5588 continue;
5732 5589 }
5733 5590 }
5734 5591 ASSERT(hmeblkp);
5735 5592 ASSERT(!hmeblkp->hblk_shared);
5736 5593 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5737 5594 /*
5738 5595 * If the valid count is zero we can skip the range
5739 5596 * mapped by this hmeblk.
5740 5597 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5741 5598 * is used by segment drivers as a hint
5742 5599 * that the mapping resource won't be used any longer.
5743 5600 * The best example of this is during exit().
5744 5601 */
5745 5602 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5746 5603 get_hblk_span(hmeblkp));
5747 5604 if ((flags & HAT_UNLOAD_UNMAP) ||
5748 5605 (iskernel && !issegkmap)) {
5749 5606 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5750 5607 &list, 0);
5751 5608 }
5752 5609 SFMMU_HASH_UNLOCK(hmebp);
5753 5610
5754 5611 if (iskernel) {
5755 5612 hashno = TTE64K;
5756 5613 continue;
5757 5614 }
5758 5615 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5759 5616 ASSERT(hashno == TTE64K);
5760 5617 continue;
5761 5618 }
5762 5619 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5763 5620 hashno = TTE512K;
5764 5621 continue;
5765 5622 }
5766 5623 if (mmu_page_sizes == max_mmu_page_sizes) {
5767 5624 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5768 5625 hashno = TTE4M;
5769 5626 continue;
5770 5627 }
5771 5628 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5772 5629 hashno = TTE32M;
5773 5630 continue;
5774 5631 }
5775 5632 hashno = TTE256M;
5776 5633 continue;
5777 5634 } else {
5778 5635 hashno = TTE4M;
5779 5636 continue;
5780 5637 }
5781 5638 }
5782 5639 if (hmeblkp->hblk_shw_bit) {
5783 5640 /*
5784 5641 * If we encounter a shadow hmeblk we know there is
5785 5642 * smaller sized hmeblks mapping the same address space.
5786 5643 * Decrement the hash size and rehash.
5787 5644 */
5788 5645 ASSERT(sfmmup != KHATID);
5789 5646 hashno--;
5790 5647 SFMMU_HASH_UNLOCK(hmebp);
5791 5648 continue;
5792 5649 }
5793 5650
5794 5651 /*
5795 5652 * track callback address ranges.
5796 5653 * only start a new range when it's not contiguous
5797 5654 */
5798 5655 if (callback != NULL) {
5799 5656 if (addr_count > 0 &&
5800 5657 addr == cb_end_addr[addr_count - 1])
5801 5658 --addr_count;
5802 5659 else
5803 5660 cb_start_addr[addr_count] = addr;
5804 5661 }
5805 5662
5806 5663 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5807 5664 dmrp, flags);
5808 5665
5809 5666 if (callback != NULL)
5810 5667 cb_end_addr[addr_count++] = addr;
5811 5668
5812 5669 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5813 5670 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5814 5671 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5815 5672 }
5816 5673 SFMMU_HASH_UNLOCK(hmebp);
5817 5674
5818 5675 /*
5819 5676 * Notify our caller as to exactly which pages
5820 5677 * have been unloaded. We do these in clumps,
5821 5678 * to minimize the number of xt_sync()s that need to occur.
5822 5679 */
5823 5680 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5824 5681 if (dmrp != NULL) {
5825 5682 DEMAP_RANGE_FLUSH(dmrp);
5826 5683 cpuset = sfmmup->sfmmu_cpusran;
5827 5684 xt_sync(cpuset);
5828 5685 }
5829 5686
5830 5687 for (a = 0; a < MAX_CB_ADDR; ++a) {
5831 5688 callback->hcb_start_addr = cb_start_addr[a];
5832 5689 callback->hcb_end_addr = cb_end_addr[a];
5833 5690 callback->hcb_function(callback);
5834 5691 }
5835 5692 addr_count = 0;
5836 5693 }
5837 5694 if (iskernel) {
5838 5695 hashno = TTE64K;
5839 5696 continue;
5840 5697 }
5841 5698 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5842 5699 ASSERT(hashno == TTE64K);
5843 5700 continue;
5844 5701 }
5845 5702 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5846 5703 hashno = TTE512K;
5847 5704 continue;
5848 5705 }
5849 5706 if (mmu_page_sizes == max_mmu_page_sizes) {
5850 5707 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5851 5708 hashno = TTE4M;
5852 5709 continue;
5853 5710 }
5854 5711 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5855 5712 hashno = TTE32M;
5856 5713 continue;
5857 5714 }
5858 5715 hashno = TTE256M;
5859 5716 } else {
5860 5717 hashno = TTE4M;
5861 5718 }
5862 5719 }
5863 5720
5864 5721 sfmmu_hblks_list_purge(&list, 0);
5865 5722 if (dmrp != NULL) {
5866 5723 DEMAP_RANGE_FLUSH(dmrp);
5867 5724 cpuset = sfmmup->sfmmu_cpusran;
5868 5725 xt_sync(cpuset);
5869 5726 }
5870 5727 if (callback && addr_count != 0) {
5871 5728 for (a = 0; a < addr_count; ++a) {
5872 5729 callback->hcb_start_addr = cb_start_addr[a];
5873 5730 callback->hcb_end_addr = cb_end_addr[a];
5874 5731 callback->hcb_function(callback);
5875 5732 }
5876 5733 }
5877 5734
5878 5735 /*
5879 5736 * Check TSB and TLB page sizes if the process isn't exiting.
5880 5737 */
5881 5738 if (!sfmmup->sfmmu_free)
5882 5739 sfmmu_check_page_sizes(sfmmup, 0);
5883 5740 }
5884 5741
5885 5742 /*
5886 5743 * Unload all the mappings in the range [addr..addr+len). addr and len must
5887 5744 * be MMU_PAGESIZE aligned.
5888 5745 */
5889 5746 void
5890 5747 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5891 5748 {
5892 5749 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5893 5750 }
5894 5751
5895 5752
5896 5753 /*
5897 5754 * Find the largest mapping size for this page.
5898 5755 */
5899 5756 int
5900 5757 fnd_mapping_sz(page_t *pp)
5901 5758 {
5902 5759 int sz;
5903 5760 int p_index;
5904 5761
5905 5762 p_index = PP_MAPINDEX(pp);
5906 5763
5907 5764 sz = 0;
5908 5765 p_index >>= 1; /* don't care about 8K bit */
5909 5766 for (; p_index; p_index >>= 1) {
5910 5767 sz++;
5911 5768 }
5912 5769
5913 5770 return (sz);
5914 5771 }
5915 5772
5916 5773 /*
5917 5774 * This function unloads a range of addresses for an hmeblk.
5918 5775 * It returns the next address to be unloaded.
5919 5776 * It should be called with the hash lock held.
5920 5777 */
5921 5778 static caddr_t
5922 5779 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5923 5780 caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5924 5781 {
5925 5782 tte_t tte, ttemod;
5926 5783 struct sf_hment *sfhmep;
5927 5784 int ttesz;
5928 5785 long ttecnt;
5929 5786 page_t *pp;
5930 5787 kmutex_t *pml;
5931 5788 int ret;
5932 5789 int use_demap_range;
5933 5790
5934 5791 ASSERT(in_hblk_range(hmeblkp, addr));
5935 5792 ASSERT(!hmeblkp->hblk_shw_bit);
5936 5793 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5937 5794 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5938 5795 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5939 5796
5940 5797 #ifdef DEBUG
5941 5798 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5942 5799 (endaddr < get_hblk_endaddr(hmeblkp))) {
5943 5800 panic("sfmmu_hblk_unload: partial unload of large page");
5944 5801 }
5945 5802 #endif /* DEBUG */
5946 5803
5947 5804 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5948 5805 ttesz = get_hblk_ttesz(hmeblkp);
5949 5806
5950 5807 use_demap_range = ((dmrp == NULL) ||
5951 5808 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5952 5809
5953 5810 if (use_demap_range) {
5954 5811 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5955 5812 } else if (dmrp != NULL) {
5956 5813 DEMAP_RANGE_FLUSH(dmrp);
5957 5814 }
5958 5815 ttecnt = 0;
5959 5816 HBLKTOHME(sfhmep, hmeblkp, addr);
5960 5817
5961 5818 while (addr < endaddr) {
5962 5819 pml = NULL;
5963 5820 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5964 5821 if (TTE_IS_VALID(&tte)) {
5965 5822 pp = sfhmep->hme_page;
5966 5823 if (pp != NULL) {
5967 5824 pml = sfmmu_mlist_enter(pp);
5968 5825 }
5969 5826
5970 5827 /*
5971 5828 * Verify if hme still points to 'pp' now that
5972 5829 * we have p_mapping lock.
5973 5830 */
5974 5831 if (sfhmep->hme_page != pp) {
5975 5832 if (pp != NULL && sfhmep->hme_page != NULL) {
5976 5833 ASSERT(pml != NULL);
5977 5834 sfmmu_mlist_exit(pml);
5978 5835 /* Re-start this iteration. */
5979 5836 continue;
5980 5837 }
5981 5838 ASSERT((pp != NULL) &&
5982 5839 (sfhmep->hme_page == NULL));
5983 5840 goto tte_unloaded;
5984 5841 }
5985 5842
5986 5843 /*
5987 5844 * This point on we have both HASH and p_mapping
5988 5845 * lock.
5989 5846 */
5990 5847 ASSERT(pp == sfhmep->hme_page);
5991 5848 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5992 5849
5993 5850 /*
5994 5851 * We need to loop on modify tte because it is
5995 5852 * possible for pagesync to come along and
5996 5853 * change the software bits beneath us.
5997 5854 *
5998 5855 * Page_unload can also invalidate the tte after
5999 5856 * we read tte outside of p_mapping lock.
6000 5857 */
6001 5858 again:
6002 5859 ttemod = tte;
6003 5860
6004 5861 TTE_SET_INVALID(&ttemod);
6005 5862 ret = sfmmu_modifytte_try(&tte, &ttemod,
6006 5863 &sfhmep->hme_tte);
6007 5864
6008 5865 if (ret <= 0) {
6009 5866 if (TTE_IS_VALID(&tte)) {
6010 5867 ASSERT(ret < 0);
6011 5868 goto again;
6012 5869 }
6013 5870 if (pp != NULL) {
6014 5871 panic("sfmmu_hblk_unload: pp = 0x%p "
6015 5872 "tte became invalid under mlist"
6016 5873 " lock = 0x%p", (void *)pp,
6017 5874 (void *)pml);
6018 5875 }
6019 5876 continue;
6020 5877 }
6021 5878
6022 5879 if (!(flags & HAT_UNLOAD_NOSYNC)) {
6023 5880 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6024 5881 }
6025 5882
6026 5883 /*
6027 5884 * Ok- we invalidated the tte. Do the rest of the job.
6028 5885 */
6029 5886 ttecnt++;
6030 5887
6031 5888 if (flags & HAT_UNLOAD_UNLOCK) {
6032 5889 ASSERT(hmeblkp->hblk_lckcnt > 0);
6033 5890 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6034 5891 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6035 5892 }
6036 5893
6037 5894 /*
6038 5895 * Normally we would need to flush the page
6039 5896 * from the virtual cache at this point in
6040 5897 * order to prevent a potential cache alias
6041 5898 * inconsistency.
6042 5899 * The particular scenario we need to worry
6043 5900 * about is:
6044 5901 * Given: va1 and va2 are two virtual address
6045 5902 * that alias and map the same physical
6046 5903 * address.
6047 5904 * 1. mapping exists from va1 to pa and data
6048 5905 * has been read into the cache.
6049 5906 * 2. unload va1.
6050 5907 * 3. load va2 and modify data using va2.
6051 5908 * 4 unload va2.
6052 5909 * 5. load va1 and reference data. Unless we
6053 5910 * flush the data cache when we unload we will
6054 5911 * get stale data.
6055 5912 * Fortunately, page coloring eliminates the
6056 5913 * above scenario by remembering the color a
6057 5914 * physical page was last or is currently
6058 5915 * mapped to. Now, we delay the flush until
6059 5916 * the loading of translations. Only when the
6060 5917 * new translation is of a different color
6061 5918 * are we forced to flush.
6062 5919 */
6063 5920 if (use_demap_range) {
6064 5921 /*
6065 5922 * Mark this page as needing a demap.
6066 5923 */
6067 5924 DEMAP_RANGE_MARKPG(dmrp, addr);
6068 5925 } else {
6069 5926 ASSERT(sfmmup != NULL);
6070 5927 ASSERT(!hmeblkp->hblk_shared);
6071 5928 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6072 5929 sfmmup->sfmmu_free, 0);
6073 5930 }
6074 5931
6075 5932 if (pp) {
6076 5933 /*
6077 5934 * Remove the hment from the mapping list
6078 5935 */
6079 5936 ASSERT(hmeblkp->hblk_hmecnt > 0);
6080 5937
6081 5938 /*
6082 5939 * Again, we cannot
6083 5940 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6084 5941 */
6085 5942 HME_SUB(sfhmep, pp);
6086 5943 membar_stst();
6087 5944 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6088 5945 }
6089 5946
6090 5947 ASSERT(hmeblkp->hblk_vcnt > 0);
6091 5948 atomic_dec_16(&hmeblkp->hblk_vcnt);
6092 5949
6093 5950 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6094 5951 !hmeblkp->hblk_lckcnt);
6095 5952
6096 5953 #ifdef VAC
6097 5954 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6098 5955 if (PP_ISTNC(pp)) {
6099 5956 /*
6100 5957 * If page was temporary
6101 5958 * uncached, try to recache
6102 5959 * it. Note that HME_SUB() was
6103 5960 * called above so p_index and
6104 5961 * mlist had been updated.
6105 5962 */
6106 5963 conv_tnc(pp, ttesz);
6107 5964 } else if (pp->p_mapping == NULL) {
6108 5965 ASSERT(kpm_enable);
6109 5966 /*
6110 5967 * Page is marked to be in VAC conflict
6111 5968 * to an existing kpm mapping and/or is
6112 5969 * kpm mapped using only the regular
6113 5970 * pagesize.
6114 5971 */
6115 5972 sfmmu_kpm_hme_unload(pp);
6116 5973 }
6117 5974 }
6118 5975 #endif /* VAC */
6119 5976 } else if ((pp = sfhmep->hme_page) != NULL) {
6120 5977 /*
6121 5978 * TTE is invalid but the hme
6122 5979 * still exists. let pageunload
6123 5980 * complete its job.
6124 5981 */
6125 5982 ASSERT(pml == NULL);
6126 5983 pml = sfmmu_mlist_enter(pp);
6127 5984 if (sfhmep->hme_page != NULL) {
6128 5985 sfmmu_mlist_exit(pml);
6129 5986 continue;
6130 5987 }
6131 5988 ASSERT(sfhmep->hme_page == NULL);
6132 5989 } else if (hmeblkp->hblk_hmecnt != 0) {
6133 5990 /*
6134 5991 * pageunload may have not finished decrementing
6135 5992 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6136 5993 * wait for pageunload to finish. Rely on pageunload
6137 5994 * to decrement hblk_hmecnt after hblk_vcnt.
6138 5995 */
6139 5996 pfn_t pfn = TTE_TO_TTEPFN(&tte);
6140 5997 ASSERT(pml == NULL);
6141 5998 if (pf_is_memory(pfn)) {
6142 5999 pp = page_numtopp_nolock(pfn);
6143 6000 if (pp != NULL) {
6144 6001 pml = sfmmu_mlist_enter(pp);
6145 6002 sfmmu_mlist_exit(pml);
6146 6003 pml = NULL;
6147 6004 }
6148 6005 }
6149 6006 }
6150 6007
6151 6008 tte_unloaded:
6152 6009 /*
6153 6010 * At this point, the tte we are looking at
6154 6011 * should be unloaded, and hme has been unlinked
6155 6012 * from page too. This is important because in
6156 6013 * pageunload, it does ttesync() then HME_SUB.
6157 6014 * We need to make sure HME_SUB has been completed
6158 6015 * so we know ttesync() has been completed. Otherwise,
6159 6016 * at exit time, after return from hat layer, VM will
6160 6017 * release as structure which hat_setstat() (called
6161 6018 * by ttesync()) needs.
6162 6019 */
6163 6020 #ifdef DEBUG
6164 6021 {
6165 6022 tte_t dtte;
6166 6023
6167 6024 ASSERT(sfhmep->hme_page == NULL);
6168 6025
6169 6026 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6170 6027 ASSERT(!TTE_IS_VALID(&dtte));
6171 6028 }
6172 6029 #endif
6173 6030
6174 6031 if (pml) {
6175 6032 sfmmu_mlist_exit(pml);
6176 6033 }
6177 6034
6178 6035 addr += TTEBYTES(ttesz);
6179 6036 sfhmep++;
6180 6037 DEMAP_RANGE_NEXTPG(dmrp);
6181 6038 }
6182 6039 /*
6183 6040 * For shared hmeblks this routine is only called when region is freed
6184 6041 * and no longer referenced. So no need to decrement ttecnt
6185 6042 * in the region structure here.
6186 6043 */
6187 6044 if (ttecnt > 0 && sfmmup != NULL) {
6188 6045 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6189 6046 }
6190 6047 return (addr);
6191 6048 }
6192 6049
6193 6050 /*
6194 6051 * Invalidate a virtual address range for the local CPU.
6195 6052 * For best performance ensure that the va range is completely
6196 6053 * mapped, otherwise the entire TLB will be flushed.
6197 6054 */
6198 6055 void
6199 6056 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6200 6057 {
6201 6058 ssize_t sz;
6202 6059 caddr_t endva = va + size;
6203 6060
6204 6061 while (va < endva) {
6205 6062 sz = hat_getpagesize(sfmmup, va);
6206 6063 if (sz < 0) {
6207 6064 vtag_flushall();
6208 6065 break;
6209 6066 }
6210 6067 vtag_flushpage(va, (uint64_t)sfmmup);
6211 6068 va += sz;
6212 6069 }
6213 6070 }
6214 6071
6215 6072 /*
6216 6073 * Synchronize all the mappings in the range [addr..addr+len).
6217 6074 * Can be called with clearflag having two states:
6218 6075 * HAT_SYNC_DONTZERO means just return the rm stats
6219 6076 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6220 6077 */
6221 6078 void
6222 6079 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6223 6080 {
6224 6081 struct hmehash_bucket *hmebp;
6225 6082 hmeblk_tag hblktag;
6226 6083 int hmeshift, hashno = 1;
6227 6084 struct hme_blk *hmeblkp, *list = NULL;
6228 6085 caddr_t endaddr;
6229 6086 cpuset_t cpuset;
6230 6087
6231 6088 ASSERT((sfmmup == ksfmmup) ||
6232 6089 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6233 6090 ASSERT((len & MMU_PAGEOFFSET) == 0);
6234 6091 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6235 6092 (clearflag == HAT_SYNC_ZERORM));
6236 6093
6237 6094 CPUSET_ZERO(cpuset);
6238 6095
6239 6096 endaddr = addr + len;
6240 6097 hblktag.htag_id = sfmmup;
6241 6098 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6242 6099
6243 6100 /*
6244 6101 * Spitfire supports 4 page sizes.
6245 6102 * Most pages are expected to be of the smallest page
6246 6103 * size (8K) and these will not need to be rehashed. 64K
6247 6104 * pages also don't need to be rehashed because the an hmeblk
6248 6105 * spans 64K of address space. 512K pages might need 1 rehash and
6249 6106 * and 4M pages 2 rehashes.
6250 6107 */
6251 6108 while (addr < endaddr) {
6252 6109 hmeshift = HME_HASH_SHIFT(hashno);
6253 6110 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6254 6111 hblktag.htag_rehash = hashno;
6255 6112 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6256 6113
6257 6114 SFMMU_HASH_LOCK(hmebp);
6258 6115
6259 6116 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6260 6117 if (hmeblkp != NULL) {
6261 6118 ASSERT(!hmeblkp->hblk_shared);
6262 6119 /*
6263 6120 * We've encountered a shadow hmeblk so skip the range
6264 6121 * of the next smaller mapping size.
6265 6122 */
6266 6123 if (hmeblkp->hblk_shw_bit) {
6267 6124 ASSERT(sfmmup != ksfmmup);
6268 6125 ASSERT(hashno > 1);
6269 6126 addr = (caddr_t)P2END((uintptr_t)addr,
6270 6127 TTEBYTES(hashno - 1));
6271 6128 } else {
6272 6129 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6273 6130 addr, endaddr, clearflag);
6274 6131 }
6275 6132 SFMMU_HASH_UNLOCK(hmebp);
6276 6133 hashno = 1;
6277 6134 continue;
6278 6135 }
6279 6136 SFMMU_HASH_UNLOCK(hmebp);
6280 6137
6281 6138 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6282 6139 /*
6283 6140 * We have traversed the whole list and rehashed
6284 6141 * if necessary without finding the address to sync.
6285 6142 * This is ok so we increment the address by the
6286 6143 * smallest hmeblk range for kernel mappings and the
6287 6144 * largest hmeblk range, to account for shadow hmeblks,
6288 6145 * for user mappings and continue.
6289 6146 */
6290 6147 if (sfmmup == ksfmmup)
6291 6148 addr = (caddr_t)P2END((uintptr_t)addr,
6292 6149 TTEBYTES(1));
6293 6150 else
6294 6151 addr = (caddr_t)P2END((uintptr_t)addr,
6295 6152 TTEBYTES(hashno));
6296 6153 hashno = 1;
6297 6154 } else {
6298 6155 hashno++;
6299 6156 }
6300 6157 }
6301 6158 sfmmu_hblks_list_purge(&list, 0);
6302 6159 cpuset = sfmmup->sfmmu_cpusran;
6303 6160 xt_sync(cpuset);
6304 6161 }
6305 6162
6306 6163 static caddr_t
6307 6164 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6308 6165 caddr_t endaddr, int clearflag)
6309 6166 {
6310 6167 tte_t tte, ttemod;
6311 6168 struct sf_hment *sfhmep;
6312 6169 int ttesz;
6313 6170 struct page *pp;
6314 6171 kmutex_t *pml;
6315 6172 int ret;
6316 6173
6317 6174 ASSERT(hmeblkp->hblk_shw_bit == 0);
6318 6175 ASSERT(!hmeblkp->hblk_shared);
6319 6176
6320 6177 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6321 6178
6322 6179 ttesz = get_hblk_ttesz(hmeblkp);
6323 6180 HBLKTOHME(sfhmep, hmeblkp, addr);
6324 6181
6325 6182 while (addr < endaddr) {
6326 6183 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6327 6184 if (TTE_IS_VALID(&tte)) {
6328 6185 pml = NULL;
6329 6186 pp = sfhmep->hme_page;
6330 6187 if (pp) {
6331 6188 pml = sfmmu_mlist_enter(pp);
6332 6189 }
6333 6190 if (pp != sfhmep->hme_page) {
6334 6191 /*
6335 6192 * tte most have been unloaded
6336 6193 * underneath us. Recheck
6337 6194 */
6338 6195 ASSERT(pml);
6339 6196 sfmmu_mlist_exit(pml);
6340 6197 continue;
6341 6198 }
6342 6199
6343 6200 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6344 6201
6345 6202 if (clearflag == HAT_SYNC_ZERORM) {
6346 6203 ttemod = tte;
6347 6204 TTE_CLR_RM(&ttemod);
6348 6205 ret = sfmmu_modifytte_try(&tte, &ttemod,
6349 6206 &sfhmep->hme_tte);
6350 6207 if (ret < 0) {
6351 6208 if (pml) {
6352 6209 sfmmu_mlist_exit(pml);
6353 6210 }
6354 6211 continue;
6355 6212 }
6356 6213
6357 6214 if (ret > 0) {
6358 6215 sfmmu_tlb_demap(addr, sfmmup,
6359 6216 hmeblkp, 0, 0);
6360 6217 }
6361 6218 }
6362 6219 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6363 6220 if (pml) {
6364 6221 sfmmu_mlist_exit(pml);
6365 6222 }
6366 6223 }
6367 6224 addr += TTEBYTES(ttesz);
6368 6225 sfhmep++;
6369 6226 }
6370 6227 return (addr);
6371 6228 }
6372 6229
6373 6230 /*
6374 6231 * This function will sync a tte to the page struct and it will
6375 6232 * update the hat stats. Currently it allows us to pass a NULL pp
6376 6233 * and we will simply update the stats. We may want to change this
6377 6234 * so we only keep stats for pages backed by pp's.
6378 6235 */
6379 6236 static void
6380 6237 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6381 6238 {
6382 6239 uint_t rm = 0;
6383 6240 int sz;
6384 6241 pgcnt_t npgs;
6385 6242
6386 6243 ASSERT(TTE_IS_VALID(ttep));
6387 6244
6388 6245 if (TTE_IS_NOSYNC(ttep)) {
6389 6246 return;
6390 6247 }
6391 6248
6392 6249 if (TTE_IS_REF(ttep)) {
6393 6250 rm = P_REF;
6394 6251 }
6395 6252 if (TTE_IS_MOD(ttep)) {
6396 6253 rm |= P_MOD;
6397 6254 }
6398 6255
6399 6256 if (rm == 0) {
6400 6257 return;
6401 6258 }
6402 6259
6403 6260 sz = TTE_CSZ(ttep);
6404 6261 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6405 6262 int i;
6406 6263 caddr_t vaddr = addr;
6407 6264
6408 6265 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6409 6266 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6410 6267 }
6411 6268
6412 6269 }
6413 6270
6414 6271 /*
6415 6272 * XXX I want to use cas to update nrm bits but they
6416 6273 * currently belong in common/vm and not in hat where
6417 6274 * they should be.
6418 6275 * The nrm bits are protected by the same mutex as
6419 6276 * the one that protects the page's mapping list.
6420 6277 */
6421 6278 if (!pp)
6422 6279 return;
6423 6280 ASSERT(sfmmu_mlist_held(pp));
6424 6281 /*
6425 6282 * If the tte is for a large page, we need to sync all the
6426 6283 * pages covered by the tte.
6427 6284 */
6428 6285 if (sz != TTE8K) {
6429 6286 ASSERT(pp->p_szc != 0);
6430 6287 pp = PP_GROUPLEADER(pp, sz);
6431 6288 ASSERT(sfmmu_mlist_held(pp));
6432 6289 }
6433 6290
6434 6291 /* Get number of pages from tte size. */
6435 6292 npgs = TTEPAGES(sz);
6436 6293
6437 6294 do {
6438 6295 ASSERT(pp);
6439 6296 ASSERT(sfmmu_mlist_held(pp));
6440 6297 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6441 6298 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6442 6299 hat_page_setattr(pp, rm);
6443 6300
6444 6301 /*
6445 6302 * Are we done? If not, we must have a large mapping.
6446 6303 * For large mappings we need to sync the rest of the pages
6447 6304 * covered by this tte; goto the next page.
6448 6305 */
6449 6306 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6450 6307 }
6451 6308
6452 6309 /*
6453 6310 * Execute pre-callback handler of each pa_hment linked to pp
6454 6311 *
6455 6312 * Inputs:
6456 6313 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6457 6314 * capture_cpus: pointer to return value (below)
6458 6315 *
6459 6316 * Returns:
6460 6317 * Propagates the subsystem callback return values back to the caller;
6461 6318 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6462 6319 * is zero if all of the pa_hments are of a type that do not require
6463 6320 * capturing CPUs prior to suspending the mapping, else it is 1.
6464 6321 */
6465 6322 static int
6466 6323 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6467 6324 {
6468 6325 struct sf_hment *sfhmep;
6469 6326 struct pa_hment *pahmep;
6470 6327 int (*f)(caddr_t, uint_t, uint_t, void *);
6471 6328 int ret;
6472 6329 id_t id;
6473 6330 int locked = 0;
6474 6331 kmutex_t *pml;
6475 6332
6476 6333 ASSERT(PAGE_EXCL(pp));
6477 6334 if (!sfmmu_mlist_held(pp)) {
6478 6335 pml = sfmmu_mlist_enter(pp);
6479 6336 locked = 1;
6480 6337 }
6481 6338
6482 6339 if (capture_cpus)
6483 6340 *capture_cpus = 0;
6484 6341
6485 6342 top:
6486 6343 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6487 6344 /*
6488 6345 * skip sf_hments corresponding to VA<->PA mappings;
6489 6346 * for pa_hment's, hme_tte.ll is zero
6490 6347 */
6491 6348 if (!IS_PAHME(sfhmep))
6492 6349 continue;
6493 6350
6494 6351 pahmep = sfhmep->hme_data;
6495 6352 ASSERT(pahmep != NULL);
6496 6353
6497 6354 /*
6498 6355 * skip if pre-handler has been called earlier in this loop
6499 6356 */
6500 6357 if (pahmep->flags & flag)
6501 6358 continue;
6502 6359
6503 6360 id = pahmep->cb_id;
6504 6361 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6505 6362 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6506 6363 *capture_cpus = 1;
6507 6364 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6508 6365 pahmep->flags |= flag;
6509 6366 continue;
6510 6367 }
6511 6368
6512 6369 /*
6513 6370 * Drop the mapping list lock to avoid locking order issues.
6514 6371 */
6515 6372 if (locked)
6516 6373 sfmmu_mlist_exit(pml);
6517 6374
6518 6375 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6519 6376 if (ret != 0)
6520 6377 return (ret); /* caller must do the cleanup */
6521 6378
6522 6379 if (locked) {
6523 6380 pml = sfmmu_mlist_enter(pp);
6524 6381 pahmep->flags |= flag;
6525 6382 goto top;
6526 6383 }
6527 6384
6528 6385 pahmep->flags |= flag;
6529 6386 }
6530 6387
6531 6388 if (locked)
6532 6389 sfmmu_mlist_exit(pml);
6533 6390
6534 6391 return (0);
6535 6392 }
6536 6393
6537 6394 /*
6538 6395 * Execute post-callback handler of each pa_hment linked to pp
6539 6396 *
6540 6397 * Same overall assumptions and restrictions apply as for
6541 6398 * hat_pageprocess_precallbacks().
6542 6399 */
6543 6400 static void
6544 6401 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6545 6402 {
6546 6403 pfn_t pgpfn = pp->p_pagenum;
6547 6404 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6548 6405 pfn_t newpfn;
6549 6406 struct sf_hment *sfhmep;
6550 6407 struct pa_hment *pahmep;
6551 6408 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6552 6409 id_t id;
6553 6410 int locked = 0;
6554 6411 kmutex_t *pml;
6555 6412
6556 6413 ASSERT(PAGE_EXCL(pp));
6557 6414 if (!sfmmu_mlist_held(pp)) {
6558 6415 pml = sfmmu_mlist_enter(pp);
6559 6416 locked = 1;
6560 6417 }
6561 6418
6562 6419 top:
6563 6420 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6564 6421 /*
6565 6422 * skip sf_hments corresponding to VA<->PA mappings;
6566 6423 * for pa_hment's, hme_tte.ll is zero
6567 6424 */
6568 6425 if (!IS_PAHME(sfhmep))
6569 6426 continue;
6570 6427
6571 6428 pahmep = sfhmep->hme_data;
6572 6429 ASSERT(pahmep != NULL);
6573 6430
6574 6431 if ((pahmep->flags & flag) == 0)
6575 6432 continue;
6576 6433
6577 6434 pahmep->flags &= ~flag;
6578 6435
6579 6436 id = pahmep->cb_id;
6580 6437 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6581 6438 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6582 6439 continue;
6583 6440
6584 6441 /*
6585 6442 * Convert the base page PFN into the constituent PFN
6586 6443 * which is needed by the callback handler.
6587 6444 */
6588 6445 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6589 6446
6590 6447 /*
6591 6448 * Drop the mapping list lock to avoid locking order issues.
6592 6449 */
6593 6450 if (locked)
6594 6451 sfmmu_mlist_exit(pml);
6595 6452
6596 6453 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6597 6454 != 0)
6598 6455 panic("sfmmu: posthandler failed");
6599 6456
6600 6457 if (locked) {
6601 6458 pml = sfmmu_mlist_enter(pp);
6602 6459 goto top;
6603 6460 }
6604 6461 }
6605 6462
6606 6463 if (locked)
6607 6464 sfmmu_mlist_exit(pml);
6608 6465 }
6609 6466
6610 6467 /*
6611 6468 * Suspend locked kernel mapping
6612 6469 */
6613 6470 void
6614 6471 hat_pagesuspend(struct page *pp)
6615 6472 {
6616 6473 struct sf_hment *sfhmep;
6617 6474 sfmmu_t *sfmmup;
6618 6475 tte_t tte, ttemod;
6619 6476 struct hme_blk *hmeblkp;
6620 6477 caddr_t addr;
6621 6478 int index, cons;
6622 6479 cpuset_t cpuset;
6623 6480
6624 6481 ASSERT(PAGE_EXCL(pp));
6625 6482 ASSERT(sfmmu_mlist_held(pp));
6626 6483
6627 6484 mutex_enter(&kpr_suspendlock);
6628 6485
6629 6486 /*
6630 6487 * We're about to suspend a kernel mapping so mark this thread as
6631 6488 * non-traceable by DTrace. This prevents us from running into issues
6632 6489 * with probe context trying to touch a suspended page
6633 6490 * in the relocation codepath itself.
6634 6491 */
6635 6492 curthread->t_flag |= T_DONTDTRACE;
6636 6493
6637 6494 index = PP_MAPINDEX(pp);
6638 6495 cons = TTE8K;
6639 6496
6640 6497 retry:
6641 6498 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6642 6499
6643 6500 if (IS_PAHME(sfhmep))
6644 6501 continue;
6645 6502
6646 6503 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6647 6504 continue;
6648 6505
6649 6506 /*
6650 6507 * Loop until we successfully set the suspend bit in
6651 6508 * the TTE.
6652 6509 */
6653 6510 again:
6654 6511 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6655 6512 ASSERT(TTE_IS_VALID(&tte));
6656 6513
6657 6514 ttemod = tte;
6658 6515 TTE_SET_SUSPEND(&ttemod);
6659 6516 if (sfmmu_modifytte_try(&tte, &ttemod,
6660 6517 &sfhmep->hme_tte) < 0)
6661 6518 goto again;
6662 6519
6663 6520 /*
6664 6521 * Invalidate TSB entry
6665 6522 */
6666 6523 hmeblkp = sfmmu_hmetohblk(sfhmep);
6667 6524
6668 6525 sfmmup = hblktosfmmu(hmeblkp);
6669 6526 ASSERT(sfmmup == ksfmmup);
6670 6527 ASSERT(!hmeblkp->hblk_shared);
6671 6528
6672 6529 addr = tte_to_vaddr(hmeblkp, tte);
6673 6530
6674 6531 /*
6675 6532 * No need to make sure that the TSB for this sfmmu is
6676 6533 * not being relocated since it is ksfmmup and thus it
6677 6534 * will never be relocated.
6678 6535 */
6679 6536 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6680 6537
6681 6538 /*
6682 6539 * Update xcall stats
6683 6540 */
6684 6541 cpuset = cpu_ready_set;
6685 6542 CPUSET_DEL(cpuset, CPU->cpu_id);
6686 6543
6687 6544 /* LINTED: constant in conditional context */
6688 6545 SFMMU_XCALL_STATS(ksfmmup);
6689 6546
6690 6547 /*
6691 6548 * Flush TLB entry on remote CPU's
6692 6549 */
6693 6550 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6694 6551 (uint64_t)ksfmmup);
6695 6552 xt_sync(cpuset);
6696 6553
6697 6554 /*
6698 6555 * Flush TLB entry on local CPU
6699 6556 */
6700 6557 vtag_flushpage(addr, (uint64_t)ksfmmup);
6701 6558 }
6702 6559
6703 6560 while (index != 0) {
6704 6561 index = index >> 1;
6705 6562 if (index != 0)
6706 6563 cons++;
6707 6564 if (index & 0x1) {
6708 6565 pp = PP_GROUPLEADER(pp, cons);
6709 6566 goto retry;
6710 6567 }
6711 6568 }
6712 6569 }
6713 6570
6714 6571 #ifdef DEBUG
6715 6572
6716 6573 #define N_PRLE 1024
6717 6574 struct prle {
6718 6575 page_t *targ;
6719 6576 page_t *repl;
6720 6577 int status;
6721 6578 int pausecpus;
6722 6579 hrtime_t whence;
6723 6580 };
6724 6581
6725 6582 static struct prle page_relocate_log[N_PRLE];
6726 6583 static int prl_entry;
6727 6584 static kmutex_t prl_mutex;
6728 6585
6729 6586 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6730 6587 mutex_enter(&prl_mutex); \
6731 6588 page_relocate_log[prl_entry].targ = *(t); \
6732 6589 page_relocate_log[prl_entry].repl = *(r); \
6733 6590 page_relocate_log[prl_entry].status = (s); \
6734 6591 page_relocate_log[prl_entry].pausecpus = (p); \
6735 6592 page_relocate_log[prl_entry].whence = gethrtime(); \
6736 6593 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6737 6594 mutex_exit(&prl_mutex);
6738 6595
6739 6596 #else /* !DEBUG */
6740 6597 #define PAGE_RELOCATE_LOG(t, r, s, p)
6741 6598 #endif
6742 6599
6743 6600 /*
6744 6601 * Core Kernel Page Relocation Algorithm
6745 6602 *
6746 6603 * Input:
6747 6604 *
6748 6605 * target : constituent pages are SE_EXCL locked.
6749 6606 * replacement: constituent pages are SE_EXCL locked.
6750 6607 *
6751 6608 * Output:
6752 6609 *
6753 6610 * nrelocp: number of pages relocated
6754 6611 */
6755 6612 int
6756 6613 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6757 6614 {
6758 6615 page_t *targ, *repl;
6759 6616 page_t *tpp, *rpp;
6760 6617 kmutex_t *low, *high;
6761 6618 spgcnt_t npages, i;
6762 6619 page_t *pl = NULL;
6763 6620 int old_pil;
6764 6621 cpuset_t cpuset;
6765 6622 int cap_cpus;
6766 6623 int ret;
6767 6624 #ifdef VAC
6768 6625 int cflags = 0;
6769 6626 #endif
6770 6627
6771 6628 if (!kcage_on || PP_ISNORELOC(*target)) {
6772 6629 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6773 6630 return (EAGAIN);
6774 6631 }
6775 6632
6776 6633 mutex_enter(&kpr_mutex);
6777 6634 kreloc_thread = curthread;
6778 6635
6779 6636 targ = *target;
6780 6637 repl = *replacement;
6781 6638 ASSERT(repl != NULL);
6782 6639 ASSERT(targ->p_szc == repl->p_szc);
6783 6640
6784 6641 npages = page_get_pagecnt(targ->p_szc);
6785 6642
6786 6643 /*
6787 6644 * unload VA<->PA mappings that are not locked
6788 6645 */
6789 6646 tpp = targ;
6790 6647 for (i = 0; i < npages; i++) {
6791 6648 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6792 6649 tpp++;
6793 6650 }
6794 6651
6795 6652 /*
6796 6653 * Do "presuspend" callbacks, in a context from which we can still
6797 6654 * block as needed. Note that we don't hold the mapping list lock
6798 6655 * of "targ" at this point due to potential locking order issues;
6799 6656 * we assume that between the hat_pageunload() above and holding
6800 6657 * the SE_EXCL lock that the mapping list *cannot* change at this
6801 6658 * point.
6802 6659 */
6803 6660 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6804 6661 if (ret != 0) {
6805 6662 /*
6806 6663 * EIO translates to fatal error, for all others cleanup
6807 6664 * and return EAGAIN.
6808 6665 */
6809 6666 ASSERT(ret != EIO);
6810 6667 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6811 6668 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6812 6669 kreloc_thread = NULL;
6813 6670 mutex_exit(&kpr_mutex);
6814 6671 return (EAGAIN);
6815 6672 }
6816 6673
6817 6674 /*
6818 6675 * acquire p_mapping list lock for both the target and replacement
6819 6676 * root pages.
6820 6677 *
6821 6678 * low and high refer to the need to grab the mlist locks in a
6822 6679 * specific order in order to prevent race conditions. Thus the
6823 6680 * lower lock must be grabbed before the higher lock.
6824 6681 *
6825 6682 * This will block hat_unload's accessing p_mapping list. Since
6826 6683 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6827 6684 * blocked. Thus, no one else will be accessing the p_mapping list
6828 6685 * while we suspend and reload the locked mapping below.
6829 6686 */
6830 6687 tpp = targ;
6831 6688 rpp = repl;
6832 6689 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6833 6690
6834 6691 kpreempt_disable();
6835 6692
6836 6693 /*
6837 6694 * We raise our PIL to 13 so that we don't get captured by
6838 6695 * another CPU or pinned by an interrupt thread. We can't go to
6839 6696 * PIL 14 since the nexus driver(s) may need to interrupt at
6840 6697 * that level in the case of IOMMU pseudo mappings.
6841 6698 */
6842 6699 cpuset = cpu_ready_set;
6843 6700 CPUSET_DEL(cpuset, CPU->cpu_id);
6844 6701 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6845 6702 old_pil = splr(XCALL_PIL);
6846 6703 } else {
6847 6704 old_pil = -1;
6848 6705 xc_attention(cpuset);
6849 6706 }
6850 6707 ASSERT(getpil() == XCALL_PIL);
6851 6708
6852 6709 /*
6853 6710 * Now do suspend callbacks. In the case of an IOMMU mapping
6854 6711 * this will suspend all DMA activity to the page while it is
6855 6712 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6856 6713 * may be captured at this point we should have acquired any needed
6857 6714 * locks in the presuspend callback.
6858 6715 */
6859 6716 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6860 6717 if (ret != 0) {
6861 6718 repl = targ;
6862 6719 goto suspend_fail;
6863 6720 }
6864 6721
6865 6722 /*
6866 6723 * Raise the PIL yet again, this time to block all high-level
6867 6724 * interrupts on this CPU. This is necessary to prevent an
6868 6725 * interrupt routine from pinning the thread which holds the
6869 6726 * mapping suspended and then touching the suspended page.
6870 6727 *
6871 6728 * Once the page is suspended we also need to be careful to
6872 6729 * avoid calling any functions which touch any seg_kmem memory
6873 6730 * since that memory may be backed by the very page we are
6874 6731 * relocating in here!
6875 6732 */
6876 6733 hat_pagesuspend(targ);
6877 6734
6878 6735 /*
6879 6736 * Now that we are confident everybody has stopped using this page,
6880 6737 * copy the page contents. Note we use a physical copy to prevent
6881 6738 * locking issues and to avoid fpRAS because we can't handle it in
6882 6739 * this context.
6883 6740 */
6884 6741 for (i = 0; i < npages; i++, tpp++, rpp++) {
6885 6742 #ifdef VAC
6886 6743 /*
6887 6744 * If the replacement has a different vcolor than
6888 6745 * the one being replacd, we need to handle VAC
6889 6746 * consistency for it just as we were setting up
6890 6747 * a new mapping to it.
6891 6748 */
6892 6749 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6893 6750 (tpp->p_vcolor != rpp->p_vcolor) &&
6894 6751 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6895 6752 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6896 6753 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6897 6754 rpp->p_pagenum);
6898 6755 }
6899 6756 #endif
6900 6757 /*
6901 6758 * Copy the contents of the page.
6902 6759 */
6903 6760 ppcopy_kernel(tpp, rpp);
6904 6761 }
6905 6762
6906 6763 tpp = targ;
6907 6764 rpp = repl;
6908 6765 for (i = 0; i < npages; i++, tpp++, rpp++) {
6909 6766 /*
6910 6767 * Copy attributes. VAC consistency was handled above,
6911 6768 * if required.
6912 6769 */
6913 6770 rpp->p_nrm = tpp->p_nrm;
6914 6771 tpp->p_nrm = 0;
6915 6772 rpp->p_index = tpp->p_index;
6916 6773 tpp->p_index = 0;
6917 6774 #ifdef VAC
6918 6775 rpp->p_vcolor = tpp->p_vcolor;
6919 6776 #endif
6920 6777 }
6921 6778
6922 6779 /*
6923 6780 * First, unsuspend the page, if we set the suspend bit, and transfer
6924 6781 * the mapping list from the target page to the replacement page.
6925 6782 * Next process postcallbacks; since pa_hment's are linked only to the
6926 6783 * p_mapping list of root page, we don't iterate over the constituent
6927 6784 * pages.
6928 6785 */
6929 6786 hat_pagereload(targ, repl);
6930 6787
6931 6788 suspend_fail:
6932 6789 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6933 6790
6934 6791 /*
6935 6792 * Now lower our PIL and release any captured CPUs since we
6936 6793 * are out of the "danger zone". After this it will again be
6937 6794 * safe to acquire adaptive mutex locks, or to drop them...
6938 6795 */
6939 6796 if (old_pil != -1) {
6940 6797 splx(old_pil);
6941 6798 } else {
6942 6799 xc_dismissed(cpuset);
6943 6800 }
6944 6801
6945 6802 kpreempt_enable();
6946 6803
6947 6804 sfmmu_mlist_reloc_exit(low, high);
6948 6805
6949 6806 /*
6950 6807 * Postsuspend callbacks should drop any locks held across
6951 6808 * the suspend callbacks. As before, we don't hold the mapping
6952 6809 * list lock at this point.. our assumption is that the mapping
6953 6810 * list still can't change due to our holding SE_EXCL lock and
6954 6811 * there being no unlocked mappings left. Hence the restriction
6955 6812 * on calling context to hat_delete_callback()
6956 6813 */
6957 6814 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6958 6815 if (ret != 0) {
6959 6816 /*
6960 6817 * The second presuspend call failed: we got here through
6961 6818 * the suspend_fail label above.
6962 6819 */
6963 6820 ASSERT(ret != EIO);
6964 6821 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6965 6822 kreloc_thread = NULL;
6966 6823 mutex_exit(&kpr_mutex);
6967 6824 return (EAGAIN);
6968 6825 }
6969 6826
6970 6827 /*
6971 6828 * Now that we're out of the performance critical section we can
6972 6829 * take care of updating the hash table, since we still
6973 6830 * hold all the pages locked SE_EXCL at this point we
6974 6831 * needn't worry about things changing out from under us.
6975 6832 */
6976 6833 tpp = targ;
6977 6834 rpp = repl;
6978 6835 for (i = 0; i < npages; i++, tpp++, rpp++) {
6979 6836
6980 6837 /*
6981 6838 * replace targ with replacement in page_hash table
6982 6839 */
6983 6840 targ = tpp;
6984 6841 page_relocate_hash(rpp, targ);
6985 6842
6986 6843 /*
6987 6844 * concatenate target; caller of platform_page_relocate()
6988 6845 * expects target to be concatenated after returning.
6989 6846 */
6990 6847 ASSERT(targ->p_next == targ);
6991 6848 ASSERT(targ->p_prev == targ);
6992 6849 page_list_concat(&pl, &targ);
6993 6850 }
6994 6851
6995 6852 ASSERT(*target == pl);
6996 6853 *nrelocp = npages;
6997 6854 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6998 6855 kreloc_thread = NULL;
6999 6856 mutex_exit(&kpr_mutex);
7000 6857 return (0);
7001 6858 }
7002 6859
7003 6860 /*
7004 6861 * Called when stray pa_hments are found attached to a page which is
7005 6862 * being freed. Notify the subsystem which attached the pa_hment of
7006 6863 * the error if it registered a suitable handler, else panic.
7007 6864 */
7008 6865 static void
7009 6866 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7010 6867 {
7011 6868 id_t cb_id = pahmep->cb_id;
7012 6869
7013 6870 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7014 6871 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7015 6872 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7016 6873 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7017 6874 return; /* non-fatal */
7018 6875 }
7019 6876 panic("pa_hment leaked: 0x%p", (void *)pahmep);
7020 6877 }
7021 6878
7022 6879 /*
7023 6880 * Remove all mappings to page 'pp'.
7024 6881 */
7025 6882 int
7026 6883 hat_pageunload(struct page *pp, uint_t forceflag)
7027 6884 {
7028 6885 struct page *origpp = pp;
7029 6886 struct sf_hment *sfhme, *tmphme;
7030 6887 struct hme_blk *hmeblkp;
7031 6888 kmutex_t *pml;
7032 6889 #ifdef VAC
7033 6890 kmutex_t *pmtx;
7034 6891 #endif
7035 6892 cpuset_t cpuset, tset;
7036 6893 int index, cons;
7037 6894 int pa_hments;
7038 6895
7039 6896 ASSERT(PAGE_EXCL(pp));
7040 6897
7041 6898 tmphme = NULL;
7042 6899 pa_hments = 0;
7043 6900 CPUSET_ZERO(cpuset);
7044 6901
7045 6902 pml = sfmmu_mlist_enter(pp);
7046 6903
7047 6904 #ifdef VAC
7048 6905 if (pp->p_kpmref)
7049 6906 sfmmu_kpm_pageunload(pp);
7050 6907 ASSERT(!PP_ISMAPPED_KPM(pp));
7051 6908 #endif
7052 6909 /*
7053 6910 * Clear vpm reference. Since the page is exclusively locked
7054 6911 * vpm cannot be referencing it.
7055 6912 */
7056 6913 if (vpm_enable) {
7057 6914 pp->p_vpmref = 0;
7058 6915 }
7059 6916
7060 6917 index = PP_MAPINDEX(pp);
7061 6918 cons = TTE8K;
7062 6919 retry:
7063 6920 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7064 6921 tmphme = sfhme->hme_next;
7065 6922
7066 6923 if (IS_PAHME(sfhme)) {
7067 6924 ASSERT(sfhme->hme_data != NULL);
7068 6925 pa_hments++;
7069 6926 continue;
7070 6927 }
7071 6928
7072 6929 hmeblkp = sfmmu_hmetohblk(sfhme);
7073 6930
7074 6931 /*
7075 6932 * If there are kernel mappings don't unload them, they will
7076 6933 * be suspended.
7077 6934 */
7078 6935 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7079 6936 hmeblkp->hblk_tag.htag_id == ksfmmup)
7080 6937 continue;
7081 6938
7082 6939 tset = sfmmu_pageunload(pp, sfhme, cons);
7083 6940 CPUSET_OR(cpuset, tset);
7084 6941 }
7085 6942
7086 6943 while (index != 0) {
7087 6944 index = index >> 1;
7088 6945 if (index != 0)
7089 6946 cons++;
7090 6947 if (index & 0x1) {
7091 6948 /* Go to leading page */
7092 6949 pp = PP_GROUPLEADER(pp, cons);
7093 6950 ASSERT(sfmmu_mlist_held(pp));
7094 6951 goto retry;
7095 6952 }
7096 6953 }
7097 6954
7098 6955 /*
7099 6956 * cpuset may be empty if the page was only mapped by segkpm,
7100 6957 * in which case we won't actually cross-trap.
7101 6958 */
7102 6959 xt_sync(cpuset);
7103 6960
7104 6961 /*
7105 6962 * The page should have no mappings at this point, unless
7106 6963 * we were called from hat_page_relocate() in which case we
7107 6964 * leave the locked mappings which will be suspended later.
7108 6965 */
7109 6966 ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7110 6967 (forceflag == SFMMU_KERNEL_RELOC));
7111 6968
7112 6969 #ifdef VAC
7113 6970 if (PP_ISTNC(pp)) {
7114 6971 if (cons == TTE8K) {
7115 6972 pmtx = sfmmu_page_enter(pp);
7116 6973 PP_CLRTNC(pp);
7117 6974 sfmmu_page_exit(pmtx);
7118 6975 } else {
7119 6976 conv_tnc(pp, cons);
7120 6977 }
7121 6978 }
7122 6979 #endif /* VAC */
7123 6980
7124 6981 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7125 6982 /*
7126 6983 * Unlink any pa_hments and free them, calling back
7127 6984 * the responsible subsystem to notify it of the error.
7128 6985 * This can occur in situations such as drivers leaking
7129 6986 * DMA handles: naughty, but common enough that we'd like
7130 6987 * to keep the system running rather than bringing it
7131 6988 * down with an obscure error like "pa_hment leaked"
7132 6989 * which doesn't aid the user in debugging their driver.
7133 6990 */
7134 6991 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7135 6992 tmphme = sfhme->hme_next;
7136 6993 if (IS_PAHME(sfhme)) {
7137 6994 struct pa_hment *pahmep = sfhme->hme_data;
7138 6995 sfmmu_pahment_leaked(pahmep);
7139 6996 HME_SUB(sfhme, pp);
7140 6997 kmem_cache_free(pa_hment_cache, pahmep);
7141 6998 }
7142 6999 }
7143 7000
7144 7001 ASSERT(!PP_ISMAPPED(origpp));
7145 7002 }
7146 7003
7147 7004 sfmmu_mlist_exit(pml);
7148 7005
7149 7006 return (0);
7150 7007 }
7151 7008
7152 7009 cpuset_t
7153 7010 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7154 7011 {
7155 7012 struct hme_blk *hmeblkp;
7156 7013 sfmmu_t *sfmmup;
7157 7014 tte_t tte, ttemod;
7158 7015 #ifdef DEBUG
7159 7016 tte_t orig_old;
7160 7017 #endif /* DEBUG */
7161 7018 caddr_t addr;
7162 7019 int ttesz;
7163 7020 int ret;
7164 7021 cpuset_t cpuset;
7165 7022
7166 7023 ASSERT(pp != NULL);
7167 7024 ASSERT(sfmmu_mlist_held(pp));
7168 7025 ASSERT(!PP_ISKAS(pp));
7169 7026
7170 7027 CPUSET_ZERO(cpuset);
7171 7028
7172 7029 hmeblkp = sfmmu_hmetohblk(sfhme);
7173 7030
7174 7031 readtte:
7175 7032 sfmmu_copytte(&sfhme->hme_tte, &tte);
7176 7033 if (TTE_IS_VALID(&tte)) {
7177 7034 sfmmup = hblktosfmmu(hmeblkp);
7178 7035 ttesz = get_hblk_ttesz(hmeblkp);
7179 7036 /*
7180 7037 * Only unload mappings of 'cons' size.
7181 7038 */
7182 7039 if (ttesz != cons)
7183 7040 return (cpuset);
7184 7041
7185 7042 /*
7186 7043 * Note that we have p_mapping lock, but no hash lock here.
7187 7044 * hblk_unload() has to have both hash lock AND p_mapping
7188 7045 * lock before it tries to modify tte. So, the tte could
7189 7046 * not become invalid in the sfmmu_modifytte_try() below.
7190 7047 */
7191 7048 ttemod = tte;
7192 7049 #ifdef DEBUG
7193 7050 orig_old = tte;
7194 7051 #endif /* DEBUG */
7195 7052
7196 7053 TTE_SET_INVALID(&ttemod);
7197 7054 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7198 7055 if (ret < 0) {
7199 7056 #ifdef DEBUG
7200 7057 /* only R/M bits can change. */
7201 7058 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7202 7059 #endif /* DEBUG */
7203 7060 goto readtte;
7204 7061 }
7205 7062
7206 7063 if (ret == 0) {
7207 7064 panic("pageunload: cas failed?");
7208 7065 }
7209 7066
7210 7067 addr = tte_to_vaddr(hmeblkp, tte);
7211 7068
7212 7069 if (hmeblkp->hblk_shared) {
7213 7070 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7214 7071 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7215 7072 sf_region_t *rgnp;
7216 7073 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7217 7074 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7218 7075 ASSERT(srdp != NULL);
7219 7076 rgnp = srdp->srd_hmergnp[rid];
7220 7077 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7221 7078 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7222 7079 sfmmu_ttesync(NULL, addr, &tte, pp);
7223 7080 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7224 7081 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7225 7082 } else {
7226 7083 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7227 7084 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7228 7085
7229 7086 /*
7230 7087 * We need to flush the page from the virtual cache
7231 7088 * in order to prevent a virtual cache alias
7232 7089 * inconsistency. The particular scenario we need
7233 7090 * to worry about is:
7234 7091 * Given: va1 and va2 are two virtual address that
7235 7092 * alias and will map the same physical address.
7236 7093 * 1. mapping exists from va1 to pa and data has
7237 7094 * been read into the cache.
7238 7095 * 2. unload va1.
7239 7096 * 3. load va2 and modify data using va2.
7240 7097 * 4 unload va2.
7241 7098 * 5. load va1 and reference data. Unless we flush
7242 7099 * the data cache when we unload we will get
7243 7100 * stale data.
7244 7101 * This scenario is taken care of by using virtual
7245 7102 * page coloring.
7246 7103 */
7247 7104 if (sfmmup->sfmmu_ismhat) {
7248 7105 /*
7249 7106 * Flush TSBs, TLBs and caches
7250 7107 * of every process
7251 7108 * sharing this ism segment.
7252 7109 */
7253 7110 sfmmu_hat_lock_all();
7254 7111 mutex_enter(&ism_mlist_lock);
7255 7112 kpreempt_disable();
7256 7113 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7257 7114 pp->p_pagenum, CACHE_NO_FLUSH);
7258 7115 kpreempt_enable();
7259 7116 mutex_exit(&ism_mlist_lock);
7260 7117 sfmmu_hat_unlock_all();
7261 7118 cpuset = cpu_ready_set;
7262 7119 } else {
7263 7120 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7264 7121 cpuset = sfmmup->sfmmu_cpusran;
7265 7122 }
7266 7123 }
7267 7124
7268 7125 /*
7269 7126 * Hme_sub has to run after ttesync() and a_rss update.
7270 7127 * See hblk_unload().
7271 7128 */
7272 7129 HME_SUB(sfhme, pp);
7273 7130 membar_stst();
7274 7131
7275 7132 /*
7276 7133 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7277 7134 * since pteload may have done a HME_ADD() right after
7278 7135 * we did the HME_SUB() above. Hmecnt is now maintained
7279 7136 * by cas only. no lock guranteed its value. The only
7280 7137 * gurantee we have is the hmecnt should not be less than
7281 7138 * what it should be so the hblk will not be taken away.
7282 7139 * It's also important that we decremented the hmecnt after
7283 7140 * we are done with hmeblkp so that this hmeblk won't be
7284 7141 * stolen.
7285 7142 */
7286 7143 ASSERT(hmeblkp->hblk_hmecnt > 0);
7287 7144 ASSERT(hmeblkp->hblk_vcnt > 0);
7288 7145 atomic_dec_16(&hmeblkp->hblk_vcnt);
7289 7146 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7290 7147 /*
7291 7148 * This is bug 4063182.
7292 7149 * XXX: fixme
7293 7150 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7294 7151 * !hmeblkp->hblk_lckcnt);
7295 7152 */
7296 7153 } else {
7297 7154 panic("invalid tte? pp %p &tte %p",
7298 7155 (void *)pp, (void *)&tte);
7299 7156 }
7300 7157
7301 7158 return (cpuset);
7302 7159 }
7303 7160
7304 7161 /*
7305 7162 * While relocating a kernel page, this function will move the mappings
7306 7163 * from tpp to dpp and modify any associated data with these mappings.
7307 7164 * It also unsuspends the suspended kernel mapping.
7308 7165 */
7309 7166 static void
7310 7167 hat_pagereload(struct page *tpp, struct page *dpp)
7311 7168 {
7312 7169 struct sf_hment *sfhme;
7313 7170 tte_t tte, ttemod;
7314 7171 int index, cons;
7315 7172
7316 7173 ASSERT(getpil() == PIL_MAX);
7317 7174 ASSERT(sfmmu_mlist_held(tpp));
7318 7175 ASSERT(sfmmu_mlist_held(dpp));
7319 7176
7320 7177 index = PP_MAPINDEX(tpp);
7321 7178 cons = TTE8K;
7322 7179
7323 7180 /* Update real mappings to the page */
7324 7181 retry:
7325 7182 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7326 7183 if (IS_PAHME(sfhme))
7327 7184 continue;
7328 7185 sfmmu_copytte(&sfhme->hme_tte, &tte);
7329 7186 ttemod = tte;
7330 7187
7331 7188 /*
7332 7189 * replace old pfn with new pfn in TTE
7333 7190 */
7334 7191 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7335 7192
7336 7193 /*
7337 7194 * clear suspend bit
7338 7195 */
7339 7196 ASSERT(TTE_IS_SUSPEND(&ttemod));
7340 7197 TTE_CLR_SUSPEND(&ttemod);
7341 7198
7342 7199 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7343 7200 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7344 7201
7345 7202 /*
7346 7203 * set hme_page point to new page
7347 7204 */
7348 7205 sfhme->hme_page = dpp;
7349 7206 }
7350 7207
7351 7208 /*
7352 7209 * move p_mapping list from old page to new page
7353 7210 */
7354 7211 dpp->p_mapping = tpp->p_mapping;
7355 7212 tpp->p_mapping = NULL;
7356 7213 dpp->p_share = tpp->p_share;
7357 7214 tpp->p_share = 0;
7358 7215
7359 7216 while (index != 0) {
7360 7217 index = index >> 1;
7361 7218 if (index != 0)
7362 7219 cons++;
7363 7220 if (index & 0x1) {
7364 7221 tpp = PP_GROUPLEADER(tpp, cons);
7365 7222 dpp = PP_GROUPLEADER(dpp, cons);
7366 7223 goto retry;
7367 7224 }
7368 7225 }
7369 7226
7370 7227 curthread->t_flag &= ~T_DONTDTRACE;
7371 7228 mutex_exit(&kpr_suspendlock);
7372 7229 }
7373 7230
7374 7231 uint_t
7375 7232 hat_pagesync(struct page *pp, uint_t clearflag)
7376 7233 {
7377 7234 struct sf_hment *sfhme, *tmphme = NULL;
7378 7235 struct hme_blk *hmeblkp;
7379 7236 kmutex_t *pml;
7380 7237 cpuset_t cpuset, tset;
7381 7238 int index, cons;
7382 7239 extern ulong_t po_share;
7383 7240 page_t *save_pp = pp;
7384 7241 int stop_on_sh = 0;
7385 7242 uint_t shcnt;
7386 7243
7387 7244 CPUSET_ZERO(cpuset);
7388 7245
7389 7246 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7390 7247 return (PP_GENERIC_ATTR(pp));
7391 7248 }
7392 7249
7393 7250 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7394 7251 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7395 7252 return (PP_GENERIC_ATTR(pp));
7396 7253 }
7397 7254 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7398 7255 return (PP_GENERIC_ATTR(pp));
7399 7256 }
7400 7257 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7401 7258 if (pp->p_share > po_share) {
7402 7259 hat_page_setattr(pp, P_REF);
7403 7260 return (PP_GENERIC_ATTR(pp));
7404 7261 }
7405 7262 stop_on_sh = 1;
7406 7263 shcnt = 0;
7407 7264 }
7408 7265 }
7409 7266
7410 7267 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7411 7268 pml = sfmmu_mlist_enter(pp);
7412 7269 index = PP_MAPINDEX(pp);
7413 7270 cons = TTE8K;
7414 7271 retry:
7415 7272 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7416 7273 /*
7417 7274 * We need to save the next hment on the list since
7418 7275 * it is possible for pagesync to remove an invalid hment
7419 7276 * from the list.
7420 7277 */
7421 7278 tmphme = sfhme->hme_next;
7422 7279 if (IS_PAHME(sfhme))
7423 7280 continue;
7424 7281 /*
7425 7282 * If we are looking for large mappings and this hme doesn't
7426 7283 * reach the range we are seeking, just ignore it.
7427 7284 */
7428 7285 hmeblkp = sfmmu_hmetohblk(sfhme);
7429 7286
7430 7287 if (hme_size(sfhme) < cons)
7431 7288 continue;
7432 7289
7433 7290 if (stop_on_sh) {
7434 7291 if (hmeblkp->hblk_shared) {
7435 7292 sf_srd_t *srdp = hblktosrd(hmeblkp);
7436 7293 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7437 7294 sf_region_t *rgnp;
7438 7295 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7439 7296 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7440 7297 ASSERT(srdp != NULL);
7441 7298 rgnp = srdp->srd_hmergnp[rid];
7442 7299 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7443 7300 rgnp, rid);
7444 7301 shcnt += rgnp->rgn_refcnt;
7445 7302 } else {
7446 7303 shcnt++;
7447 7304 }
7448 7305 if (shcnt > po_share) {
7449 7306 /*
7450 7307 * tell the pager to spare the page this time
7451 7308 * around.
7452 7309 */
7453 7310 hat_page_setattr(save_pp, P_REF);
7454 7311 index = 0;
7455 7312 break;
7456 7313 }
7457 7314 }
7458 7315 tset = sfmmu_pagesync(pp, sfhme,
7459 7316 clearflag & ~HAT_SYNC_STOPON_RM);
7460 7317 CPUSET_OR(cpuset, tset);
7461 7318
7462 7319 /*
7463 7320 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7464 7321 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7465 7322 */
7466 7323 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7467 7324 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7468 7325 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7469 7326 index = 0;
7470 7327 break;
7471 7328 }
7472 7329 }
7473 7330
7474 7331 while (index) {
7475 7332 index = index >> 1;
7476 7333 cons++;
7477 7334 if (index & 0x1) {
7478 7335 /* Go to leading page */
7479 7336 pp = PP_GROUPLEADER(pp, cons);
7480 7337 goto retry;
7481 7338 }
7482 7339 }
7483 7340
7484 7341 xt_sync(cpuset);
7485 7342 sfmmu_mlist_exit(pml);
7486 7343 return (PP_GENERIC_ATTR(save_pp));
7487 7344 }
7488 7345
7489 7346 /*
7490 7347 * Get all the hardware dependent attributes for a page struct
7491 7348 */
7492 7349 static cpuset_t
7493 7350 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7494 7351 uint_t clearflag)
7495 7352 {
7496 7353 caddr_t addr;
7497 7354 tte_t tte, ttemod;
7498 7355 struct hme_blk *hmeblkp;
7499 7356 int ret;
7500 7357 sfmmu_t *sfmmup;
7501 7358 cpuset_t cpuset;
7502 7359
7503 7360 ASSERT(pp != NULL);
7504 7361 ASSERT(sfmmu_mlist_held(pp));
7505 7362 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7506 7363 (clearflag == HAT_SYNC_ZERORM));
7507 7364
7508 7365 SFMMU_STAT(sf_pagesync);
7509 7366
7510 7367 CPUSET_ZERO(cpuset);
7511 7368
7512 7369 sfmmu_pagesync_retry:
7513 7370
7514 7371 sfmmu_copytte(&sfhme->hme_tte, &tte);
7515 7372 if (TTE_IS_VALID(&tte)) {
7516 7373 hmeblkp = sfmmu_hmetohblk(sfhme);
7517 7374 sfmmup = hblktosfmmu(hmeblkp);
7518 7375 addr = tte_to_vaddr(hmeblkp, tte);
7519 7376 if (clearflag == HAT_SYNC_ZERORM) {
7520 7377 ttemod = tte;
7521 7378 TTE_CLR_RM(&ttemod);
7522 7379 ret = sfmmu_modifytte_try(&tte, &ttemod,
7523 7380 &sfhme->hme_tte);
7524 7381 if (ret < 0) {
7525 7382 /*
7526 7383 * cas failed and the new value is not what
7527 7384 * we want.
7528 7385 */
7529 7386 goto sfmmu_pagesync_retry;
7530 7387 }
7531 7388
7532 7389 if (ret > 0) {
7533 7390 /* we win the cas */
7534 7391 if (hmeblkp->hblk_shared) {
7535 7392 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7536 7393 uint_t rid =
7537 7394 hmeblkp->hblk_tag.htag_rid;
7538 7395 sf_region_t *rgnp;
7539 7396 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7540 7397 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7541 7398 ASSERT(srdp != NULL);
7542 7399 rgnp = srdp->srd_hmergnp[rid];
7543 7400 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7544 7401 srdp, rgnp, rid);
7545 7402 cpuset = sfmmu_rgntlb_demap(addr,
7546 7403 rgnp, hmeblkp, 1);
7547 7404 } else {
7548 7405 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7549 7406 0, 0);
7550 7407 cpuset = sfmmup->sfmmu_cpusran;
7551 7408 }
7552 7409 }
7553 7410 }
7554 7411 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7555 7412 &tte, pp);
7556 7413 }
7557 7414 return (cpuset);
7558 7415 }
7559 7416
7560 7417 /*
7561 7418 * Remove write permission from a mappings to a page, so that
7562 7419 * we can detect the next modification of it. This requires modifying
7563 7420 * the TTE then invalidating (demap) any TLB entry using that TTE.
7564 7421 * This code is similar to sfmmu_pagesync().
7565 7422 */
7566 7423 static cpuset_t
7567 7424 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7568 7425 {
7569 7426 caddr_t addr;
7570 7427 tte_t tte;
7571 7428 tte_t ttemod;
7572 7429 struct hme_blk *hmeblkp;
7573 7430 int ret;
7574 7431 sfmmu_t *sfmmup;
7575 7432 cpuset_t cpuset;
7576 7433
7577 7434 ASSERT(pp != NULL);
7578 7435 ASSERT(sfmmu_mlist_held(pp));
7579 7436
7580 7437 CPUSET_ZERO(cpuset);
7581 7438 SFMMU_STAT(sf_clrwrt);
7582 7439
7583 7440 retry:
7584 7441
7585 7442 sfmmu_copytte(&sfhme->hme_tte, &tte);
7586 7443 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7587 7444 hmeblkp = sfmmu_hmetohblk(sfhme);
7588 7445 sfmmup = hblktosfmmu(hmeblkp);
7589 7446 addr = tte_to_vaddr(hmeblkp, tte);
7590 7447
7591 7448 ttemod = tte;
7592 7449 TTE_CLR_WRT(&ttemod);
7593 7450 TTE_CLR_MOD(&ttemod);
7594 7451 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7595 7452
7596 7453 /*
7597 7454 * if cas failed and the new value is not what
7598 7455 * we want retry
7599 7456 */
7600 7457 if (ret < 0)
7601 7458 goto retry;
7602 7459
7603 7460 /* we win the cas */
7604 7461 if (ret > 0) {
7605 7462 if (hmeblkp->hblk_shared) {
7606 7463 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7607 7464 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7608 7465 sf_region_t *rgnp;
7609 7466 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7610 7467 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7611 7468 ASSERT(srdp != NULL);
7612 7469 rgnp = srdp->srd_hmergnp[rid];
7613 7470 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7614 7471 srdp, rgnp, rid);
7615 7472 cpuset = sfmmu_rgntlb_demap(addr,
7616 7473 rgnp, hmeblkp, 1);
7617 7474 } else {
7618 7475 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7619 7476 cpuset = sfmmup->sfmmu_cpusran;
7620 7477 }
7621 7478 }
7622 7479 }
7623 7480
7624 7481 return (cpuset);
7625 7482 }
7626 7483
7627 7484 /*
7628 7485 * Walk all mappings of a page, removing write permission and clearing the
7629 7486 * ref/mod bits. This code is similar to hat_pagesync()
7630 7487 */
7631 7488 static void
7632 7489 hat_page_clrwrt(page_t *pp)
7633 7490 {
7634 7491 struct sf_hment *sfhme;
7635 7492 struct sf_hment *tmphme = NULL;
7636 7493 kmutex_t *pml;
7637 7494 cpuset_t cpuset;
7638 7495 cpuset_t tset;
7639 7496 int index;
7640 7497 int cons;
7641 7498
7642 7499 CPUSET_ZERO(cpuset);
7643 7500
7644 7501 pml = sfmmu_mlist_enter(pp);
7645 7502 index = PP_MAPINDEX(pp);
7646 7503 cons = TTE8K;
7647 7504 retry:
7648 7505 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7649 7506 tmphme = sfhme->hme_next;
7650 7507
7651 7508 /*
7652 7509 * If we are looking for large mappings and this hme doesn't
7653 7510 * reach the range we are seeking, just ignore its.
7654 7511 */
7655 7512
7656 7513 if (hme_size(sfhme) < cons)
7657 7514 continue;
7658 7515
7659 7516 tset = sfmmu_pageclrwrt(pp, sfhme);
7660 7517 CPUSET_OR(cpuset, tset);
7661 7518 }
7662 7519
7663 7520 while (index) {
7664 7521 index = index >> 1;
7665 7522 cons++;
7666 7523 if (index & 0x1) {
7667 7524 /* Go to leading page */
7668 7525 pp = PP_GROUPLEADER(pp, cons);
7669 7526 goto retry;
7670 7527 }
7671 7528 }
7672 7529
7673 7530 xt_sync(cpuset);
7674 7531 sfmmu_mlist_exit(pml);
7675 7532 }
7676 7533
7677 7534 /*
7678 7535 * Set the given REF/MOD/RO bits for the given page.
7679 7536 * For a vnode with a sorted v_pages list, we need to change
7680 7537 * the attributes and the v_pages list together under page_vnode_mutex.
7681 7538 */
7682 7539 void
7683 7540 hat_page_setattr(page_t *pp, uint_t flag)
7684 7541 {
7685 7542 vnode_t *vp = pp->p_vnode;
7686 7543 page_t **listp;
7687 7544 kmutex_t *pmtx;
7688 7545 kmutex_t *vphm = NULL;
7689 7546 int noshuffle;
7690 7547
7691 7548 noshuffle = flag & P_NSH;
7692 7549 flag &= ~P_NSH;
7693 7550
7694 7551 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7695 7552
7696 7553 /*
7697 7554 * nothing to do if attribute already set
7698 7555 */
7699 7556 if ((pp->p_nrm & flag) == flag)
7700 7557 return;
7701 7558
7702 7559 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7703 7560 !noshuffle) {
7704 7561 vphm = page_vnode_mutex(vp);
7705 7562 mutex_enter(vphm);
7706 7563 }
7707 7564
7708 7565 pmtx = sfmmu_page_enter(pp);
7709 7566 pp->p_nrm |= flag;
7710 7567 sfmmu_page_exit(pmtx);
7711 7568
7712 7569 if (vphm != NULL) {
7713 7570 /*
7714 7571 * Some File Systems examine v_pages for NULL w/o
7715 7572 * grabbing the vphm mutex. Must not let it become NULL when
7716 7573 * pp is the only page on the list.
7717 7574 */
7718 7575 if (pp->p_vpnext != pp) {
7719 7576 page_vpsub(&vp->v_pages, pp);
7720 7577 if (vp->v_pages != NULL)
7721 7578 listp = &vp->v_pages->p_vpprev->p_vpnext;
7722 7579 else
7723 7580 listp = &vp->v_pages;
7724 7581 page_vpadd(listp, pp);
7725 7582 }
7726 7583 mutex_exit(vphm);
7727 7584 }
7728 7585 }
7729 7586
7730 7587 void
7731 7588 hat_page_clrattr(page_t *pp, uint_t flag)
7732 7589 {
7733 7590 vnode_t *vp = pp->p_vnode;
7734 7591 kmutex_t *pmtx;
7735 7592
7736 7593 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7737 7594
7738 7595 pmtx = sfmmu_page_enter(pp);
7739 7596
7740 7597 /*
7741 7598 * Caller is expected to hold page's io lock for VMODSORT to work
7742 7599 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7743 7600 * bit is cleared.
7744 7601 * We don't have assert to avoid tripping some existing third party
7745 7602 * code. The dirty page is moved back to top of the v_page list
7746 7603 * after IO is done in pvn_write_done().
7747 7604 */
7748 7605 pp->p_nrm &= ~flag;
7749 7606 sfmmu_page_exit(pmtx);
7750 7607
7751 7608 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7752 7609
7753 7610 /*
7754 7611 * VMODSORT works by removing write permissions and getting
7755 7612 * a fault when a page is made dirty. At this point
7756 7613 * we need to remove write permission from all mappings
7757 7614 * to this page.
7758 7615 */
7759 7616 hat_page_clrwrt(pp);
7760 7617 }
7761 7618 }
7762 7619
7763 7620 uint_t
7764 7621 hat_page_getattr(page_t *pp, uint_t flag)
7765 7622 {
7766 7623 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7767 7624 return ((uint_t)(pp->p_nrm & flag));
7768 7625 }
7769 7626
7770 7627 /*
7771 7628 * DEBUG kernels: verify that a kernel va<->pa translation
7772 7629 * is safe by checking the underlying page_t is in a page
7773 7630 * relocation-safe state.
7774 7631 */
7775 7632 #ifdef DEBUG
7776 7633 void
7777 7634 sfmmu_check_kpfn(pfn_t pfn)
7778 7635 {
7779 7636 page_t *pp;
7780 7637 int index, cons;
7781 7638
7782 7639 if (hat_check_vtop == 0)
7783 7640 return;
7784 7641
7785 7642 if (kvseg.s_base == NULL || panicstr)
7786 7643 return;
7787 7644
7788 7645 pp = page_numtopp_nolock(pfn);
7789 7646 if (!pp)
7790 7647 return;
7791 7648
7792 7649 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7793 7650 return;
7794 7651
7795 7652 /*
7796 7653 * Handed a large kernel page, we dig up the root page since we
7797 7654 * know the root page might have the lock also.
7798 7655 */
7799 7656 if (pp->p_szc != 0) {
7800 7657 index = PP_MAPINDEX(pp);
7801 7658 cons = TTE8K;
7802 7659 again:
7803 7660 while (index != 0) {
7804 7661 index >>= 1;
7805 7662 if (index != 0)
7806 7663 cons++;
7807 7664 if (index & 0x1) {
7808 7665 pp = PP_GROUPLEADER(pp, cons);
7809 7666 goto again;
7810 7667 }
7811 7668 }
7812 7669 }
7813 7670
7814 7671 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7815 7672 return;
7816 7673
7817 7674 /*
7818 7675 * Pages need to be locked or allocated "permanent" (either from
7819 7676 * static_arena arena or explicitly setting PG_NORELOC when calling
7820 7677 * page_create_va()) for VA->PA translations to be valid.
7821 7678 */
7822 7679 if (!PP_ISNORELOC(pp))
7823 7680 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7824 7681 (void *)pp);
7825 7682 else
7826 7683 panic("Illegal VA->PA translation, pp 0x%p not locked",
7827 7684 (void *)pp);
7828 7685 }
7829 7686 #endif /* DEBUG */
7830 7687
7831 7688 /*
7832 7689 * Returns a page frame number for a given virtual address.
7833 7690 * Returns PFN_INVALID to indicate an invalid mapping
7834 7691 */
7835 7692 pfn_t
7836 7693 hat_getpfnum(struct hat *hat, caddr_t addr)
7837 7694 {
7838 7695 pfn_t pfn;
7839 7696 tte_t tte;
7840 7697
7841 7698 /*
7842 7699 * We would like to
7843 7700 * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7844 7701 * but we can't because the iommu driver will call this
7845 7702 * routine at interrupt time and it can't grab the as lock
7846 7703 * or it will deadlock: A thread could have the as lock
7847 7704 * and be waiting for io. The io can't complete
7848 7705 * because the interrupt thread is blocked trying to grab
7849 7706 * the as lock.
7850 7707 */
7851 7708
7852 7709 if (hat == ksfmmup) {
7853 7710 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7854 7711 ASSERT(segkmem_lpszc > 0);
7855 7712 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7856 7713 if (pfn != PFN_INVALID) {
7857 7714 sfmmu_check_kpfn(pfn);
7858 7715 return (pfn);
7859 7716 }
7860 7717 } else if (segkpm && IS_KPM_ADDR(addr)) {
7861 7718 return (sfmmu_kpm_vatopfn(addr));
7862 7719 }
7863 7720 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7864 7721 == PFN_SUSPENDED) {
7865 7722 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7866 7723 }
7867 7724 sfmmu_check_kpfn(pfn);
7868 7725 return (pfn);
7869 7726 } else {
7870 7727 return (sfmmu_uvatopfn(addr, hat, NULL));
7871 7728 }
7872 7729 }
7873 7730
7874 7731 /*
7875 7732 * This routine will return both pfn and tte for the vaddr.
7876 7733 */
7877 7734 static pfn_t
7878 7735 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
7879 7736 {
7880 7737 struct hmehash_bucket *hmebp;
7881 7738 hmeblk_tag hblktag;
7882 7739 int hmeshift, hashno = 1;
7883 7740 struct hme_blk *hmeblkp = NULL;
7884 7741 tte_t tte;
7885 7742
7886 7743 struct sf_hment *sfhmep;
7887 7744 pfn_t pfn;
7888 7745
7889 7746 /* support for ISM */
7890 7747 ism_map_t *ism_map;
7891 7748 ism_blk_t *ism_blkp;
7892 7749 int i;
7893 7750 sfmmu_t *ism_hatid = NULL;
7894 7751 sfmmu_t *locked_hatid = NULL;
7895 7752 sfmmu_t *sv_sfmmup = sfmmup;
7896 7753 caddr_t sv_vaddr = vaddr;
7897 7754 sf_srd_t *srdp;
7898 7755
7899 7756 if (ttep == NULL) {
7900 7757 ttep = &tte;
7901 7758 } else {
7902 7759 ttep->ll = 0;
7903 7760 }
7904 7761
7905 7762 ASSERT(sfmmup != ksfmmup);
7906 7763 SFMMU_STAT(sf_user_vtop);
7907 7764 /*
7908 7765 * Set ism_hatid if vaddr falls in a ISM segment.
7909 7766 */
7910 7767 ism_blkp = sfmmup->sfmmu_iblk;
7911 7768 if (ism_blkp != NULL) {
7912 7769 sfmmu_ismhat_enter(sfmmup, 0);
7913 7770 locked_hatid = sfmmup;
7914 7771 }
7915 7772 while (ism_blkp != NULL && ism_hatid == NULL) {
7916 7773 ism_map = ism_blkp->iblk_maps;
7917 7774 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7918 7775 if (vaddr >= ism_start(ism_map[i]) &&
7919 7776 vaddr < ism_end(ism_map[i])) {
7920 7777 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7921 7778 vaddr = (caddr_t)(vaddr -
7922 7779 ism_start(ism_map[i]));
7923 7780 break;
7924 7781 }
7925 7782 }
7926 7783 ism_blkp = ism_blkp->iblk_next;
7927 7784 }
7928 7785 if (locked_hatid) {
7929 7786 sfmmu_ismhat_exit(locked_hatid, 0);
7930 7787 }
7931 7788
7932 7789 hblktag.htag_id = sfmmup;
7933 7790 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
7934 7791 do {
7935 7792 hmeshift = HME_HASH_SHIFT(hashno);
7936 7793 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7937 7794 hblktag.htag_rehash = hashno;
7938 7795 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7939 7796
7940 7797 SFMMU_HASH_LOCK(hmebp);
7941 7798
7942 7799 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7943 7800 if (hmeblkp != NULL) {
7944 7801 ASSERT(!hmeblkp->hblk_shared);
7945 7802 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7946 7803 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7947 7804 SFMMU_HASH_UNLOCK(hmebp);
7948 7805 if (TTE_IS_VALID(ttep)) {
7949 7806 pfn = TTE_TO_PFN(vaddr, ttep);
7950 7807 return (pfn);
7951 7808 }
7952 7809 break;
7953 7810 }
7954 7811 SFMMU_HASH_UNLOCK(hmebp);
7955 7812 hashno++;
7956 7813 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7957 7814
7958 7815 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
7959 7816 return (PFN_INVALID);
7960 7817 }
7961 7818 srdp = sv_sfmmup->sfmmu_srdp;
7962 7819 ASSERT(srdp != NULL);
7963 7820 ASSERT(srdp->srd_refcnt != 0);
7964 7821 hblktag.htag_id = srdp;
7965 7822 hashno = 1;
7966 7823 do {
7967 7824 hmeshift = HME_HASH_SHIFT(hashno);
7968 7825 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
7969 7826 hblktag.htag_rehash = hashno;
7970 7827 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
7971 7828
7972 7829 SFMMU_HASH_LOCK(hmebp);
7973 7830 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7974 7831 hmeblkp = hmeblkp->hblk_next) {
7975 7832 uint_t rid;
7976 7833 sf_region_t *rgnp;
7977 7834 caddr_t rsaddr;
7978 7835 caddr_t readdr;
7979 7836
7980 7837 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7981 7838 sv_sfmmup->sfmmu_hmeregion_map)) {
7982 7839 continue;
7983 7840 }
7984 7841 ASSERT(hmeblkp->hblk_shared);
7985 7842 rid = hmeblkp->hblk_tag.htag_rid;
7986 7843 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7987 7844 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7988 7845 rgnp = srdp->srd_hmergnp[rid];
7989 7846 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7990 7847 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7991 7848 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7992 7849 rsaddr = rgnp->rgn_saddr;
7993 7850 readdr = rsaddr + rgnp->rgn_size;
7994 7851 #ifdef DEBUG
7995 7852 if (TTE_IS_VALID(ttep) ||
7996 7853 get_hblk_ttesz(hmeblkp) > TTE8K) {
7997 7854 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
7998 7855 ASSERT(eva > sv_vaddr);
7999 7856 ASSERT(sv_vaddr >= rsaddr);
8000 7857 ASSERT(sv_vaddr < readdr);
8001 7858 ASSERT(eva <= readdr);
8002 7859 }
8003 7860 #endif /* DEBUG */
8004 7861 /*
8005 7862 * Continue the search if we
8006 7863 * found an invalid 8K tte outside of the area
8007 7864 * covered by this hmeblk's region.
8008 7865 */
8009 7866 if (TTE_IS_VALID(ttep)) {
8010 7867 SFMMU_HASH_UNLOCK(hmebp);
8011 7868 pfn = TTE_TO_PFN(sv_vaddr, ttep);
8012 7869 return (pfn);
8013 7870 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8014 7871 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8015 7872 SFMMU_HASH_UNLOCK(hmebp);
8016 7873 pfn = PFN_INVALID;
8017 7874 return (pfn);
8018 7875 }
8019 7876 }
8020 7877 SFMMU_HASH_UNLOCK(hmebp);
8021 7878 hashno++;
8022 7879 } while (hashno <= mmu_hashcnt);
8023 7880 return (PFN_INVALID);
8024 7881 }
8025 7882
8026 7883
8027 7884 /*
8028 7885 * For compatability with AT&T and later optimizations
8029 7886 */
8030 7887 /* ARGSUSED */
8031 7888 void
8032 7889 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8033 7890 {
8034 7891 ASSERT(hat != NULL);
8035 7892 }
8036 7893
8037 7894 /*
8038 7895 * Return the number of mappings to a particular page. This number is an
8039 7896 * approximation of the number of people sharing the page.
8040 7897 *
8041 7898 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8042 7899 * hat_page_checkshare() can be used to compare threshold to share
8043 7900 * count that reflects the number of region sharers albeit at higher cost.
8044 7901 */
8045 7902 ulong_t
8046 7903 hat_page_getshare(page_t *pp)
8047 7904 {
8048 7905 page_t *spp = pp; /* start page */
8049 7906 kmutex_t *pml;
8050 7907 ulong_t cnt;
8051 7908 int index, sz = TTE64K;
8052 7909
8053 7910 /*
8054 7911 * We need to grab the mlist lock to make sure any outstanding
8055 7912 * load/unloads complete. Otherwise we could return zero
8056 7913 * even though the unload(s) hasn't finished yet.
8057 7914 */
8058 7915 pml = sfmmu_mlist_enter(spp);
8059 7916 cnt = spp->p_share;
8060 7917
8061 7918 #ifdef VAC
8062 7919 if (kpm_enable)
8063 7920 cnt += spp->p_kpmref;
8064 7921 #endif
8065 7922 if (vpm_enable && pp->p_vpmref) {
8066 7923 cnt += 1;
8067 7924 }
8068 7925
8069 7926 /*
8070 7927 * If we have any large mappings, we count the number of
8071 7928 * mappings that this large page is part of.
8072 7929 */
8073 7930 index = PP_MAPINDEX(spp);
8074 7931 index >>= 1;
8075 7932 while (index) {
8076 7933 pp = PP_GROUPLEADER(spp, sz);
8077 7934 if ((index & 0x1) && pp != spp) {
8078 7935 cnt += pp->p_share;
8079 7936 spp = pp;
8080 7937 }
8081 7938 index >>= 1;
8082 7939 sz++;
8083 7940 }
8084 7941 sfmmu_mlist_exit(pml);
8085 7942 return (cnt);
8086 7943 }
8087 7944
8088 7945 /*
8089 7946 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8090 7947 * otherwise. Count shared hmeblks by region's refcnt.
8091 7948 */
8092 7949 int
8093 7950 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8094 7951 {
8095 7952 kmutex_t *pml;
8096 7953 ulong_t cnt = 0;
8097 7954 int index, sz = TTE8K;
8098 7955 struct sf_hment *sfhme, *tmphme = NULL;
8099 7956 struct hme_blk *hmeblkp;
8100 7957
8101 7958 pml = sfmmu_mlist_enter(pp);
8102 7959
8103 7960 #ifdef VAC
8104 7961 if (kpm_enable)
8105 7962 cnt = pp->p_kpmref;
8106 7963 #endif
8107 7964
8108 7965 if (vpm_enable && pp->p_vpmref) {
8109 7966 cnt += 1;
8110 7967 }
8111 7968
8112 7969 if (pp->p_share + cnt > sh_thresh) {
8113 7970 sfmmu_mlist_exit(pml);
8114 7971 return (1);
8115 7972 }
8116 7973
8117 7974 index = PP_MAPINDEX(pp);
8118 7975
8119 7976 again:
8120 7977 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8121 7978 tmphme = sfhme->hme_next;
8122 7979 if (IS_PAHME(sfhme)) {
8123 7980 continue;
8124 7981 }
8125 7982
8126 7983 hmeblkp = sfmmu_hmetohblk(sfhme);
8127 7984 if (hme_size(sfhme) != sz) {
8128 7985 continue;
8129 7986 }
8130 7987
8131 7988 if (hmeblkp->hblk_shared) {
8132 7989 sf_srd_t *srdp = hblktosrd(hmeblkp);
8133 7990 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8134 7991 sf_region_t *rgnp;
8135 7992 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8136 7993 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8137 7994 ASSERT(srdp != NULL);
8138 7995 rgnp = srdp->srd_hmergnp[rid];
8139 7996 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8140 7997 rgnp, rid);
8141 7998 cnt += rgnp->rgn_refcnt;
8142 7999 } else {
8143 8000 cnt++;
8144 8001 }
8145 8002 if (cnt > sh_thresh) {
8146 8003 sfmmu_mlist_exit(pml);
8147 8004 return (1);
8148 8005 }
8149 8006 }
8150 8007
8151 8008 index >>= 1;
8152 8009 sz++;
8153 8010 while (index) {
8154 8011 pp = PP_GROUPLEADER(pp, sz);
8155 8012 ASSERT(sfmmu_mlist_held(pp));
8156 8013 if (index & 0x1) {
8157 8014 goto again;
8158 8015 }
8159 8016 index >>= 1;
8160 8017 sz++;
8161 8018 }
8162 8019 sfmmu_mlist_exit(pml);
8163 8020 return (0);
8164 8021 }
8165 8022
8166 8023 /*
8167 8024 * Unload all large mappings to the pp and reset the p_szc field of every
8168 8025 * constituent page according to the remaining mappings.
8169 8026 *
8170 8027 * pp must be locked SE_EXCL. Even though no other constituent pages are
8171 8028 * locked it's legal to unload the large mappings to the pp because all
8172 8029 * constituent pages of large locked mappings have to be locked SE_SHARED.
8173 8030 * This means if we have SE_EXCL lock on one of constituent pages none of the
8174 8031 * large mappings to pp are locked.
8175 8032 *
8176 8033 * Decrease p_szc field starting from the last constituent page and ending
8177 8034 * with the root page. This method is used because other threads rely on the
8178 8035 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8179 8036 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8180 8037 * ensures that p_szc changes of the constituent pages appears atomic for all
8181 8038 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8182 8039 *
8183 8040 * This mechanism is only used for file system pages where it's not always
8184 8041 * possible to get SE_EXCL locks on all constituent pages to demote the size
8185 8042 * code (as is done for anonymous or kernel large pages).
8186 8043 *
8187 8044 * See more comments in front of sfmmu_mlspl_enter().
8188 8045 */
8189 8046 void
8190 8047 hat_page_demote(page_t *pp)
8191 8048 {
8192 8049 int index;
8193 8050 int sz;
8194 8051 cpuset_t cpuset;
8195 8052 int sync = 0;
8196 8053 page_t *rootpp;
8197 8054 struct sf_hment *sfhme;
8198 8055 struct sf_hment *tmphme = NULL;
8199 8056 struct hme_blk *hmeblkp;
8200 8057 uint_t pszc;
8201 8058 page_t *lastpp;
8202 8059 cpuset_t tset;
8203 8060 pgcnt_t npgs;
8204 8061 kmutex_t *pml;
8205 8062 kmutex_t *pmtx = NULL;
8206 8063
8207 8064 ASSERT(PAGE_EXCL(pp));
8208 8065 ASSERT(!PP_ISFREE(pp));
8209 8066 ASSERT(!PP_ISKAS(pp));
8210 8067 ASSERT(page_szc_lock_assert(pp));
8211 8068 pml = sfmmu_mlist_enter(pp);
8212 8069
8213 8070 pszc = pp->p_szc;
8214 8071 if (pszc == 0) {
8215 8072 goto out;
8216 8073 }
8217 8074
8218 8075 index = PP_MAPINDEX(pp) >> 1;
8219 8076
8220 8077 if (index) {
8221 8078 CPUSET_ZERO(cpuset);
8222 8079 sz = TTE64K;
8223 8080 sync = 1;
8224 8081 }
8225 8082
8226 8083 while (index) {
8227 8084 if (!(index & 0x1)) {
8228 8085 index >>= 1;
8229 8086 sz++;
8230 8087 continue;
8231 8088 }
8232 8089 ASSERT(sz <= pszc);
8233 8090 rootpp = PP_GROUPLEADER(pp, sz);
8234 8091 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8235 8092 tmphme = sfhme->hme_next;
8236 8093 ASSERT(!IS_PAHME(sfhme));
8237 8094 hmeblkp = sfmmu_hmetohblk(sfhme);
8238 8095 if (hme_size(sfhme) != sz) {
8239 8096 continue;
8240 8097 }
8241 8098 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8242 8099 CPUSET_OR(cpuset, tset);
8243 8100 }
8244 8101 if (index >>= 1) {
8245 8102 sz++;
8246 8103 }
8247 8104 }
8248 8105
8249 8106 ASSERT(!PP_ISMAPPED_LARGE(pp));
8250 8107
8251 8108 if (sync) {
8252 8109 xt_sync(cpuset);
8253 8110 #ifdef VAC
8254 8111 if (PP_ISTNC(pp)) {
8255 8112 conv_tnc(rootpp, sz);
8256 8113 }
8257 8114 #endif /* VAC */
8258 8115 }
8259 8116
8260 8117 pmtx = sfmmu_page_enter(pp);
8261 8118
8262 8119 ASSERT(pp->p_szc == pszc);
8263 8120 rootpp = PP_PAGEROOT(pp);
8264 8121 ASSERT(rootpp->p_szc == pszc);
8265 8122 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8266 8123
8267 8124 while (lastpp != rootpp) {
8268 8125 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8269 8126 ASSERT(sz < pszc);
8270 8127 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8271 8128 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8272 8129 while (--npgs > 0) {
8273 8130 lastpp->p_szc = (uchar_t)sz;
8274 8131 lastpp = PP_PAGEPREV(lastpp);
8275 8132 }
8276 8133 if (sz) {
8277 8134 /*
8278 8135 * make sure before current root's pszc
8279 8136 * is updated all updates to constituent pages pszc
8280 8137 * fields are globally visible.
8281 8138 */
8282 8139 membar_producer();
8283 8140 }
8284 8141 lastpp->p_szc = sz;
8285 8142 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8286 8143 if (lastpp != rootpp) {
8287 8144 lastpp = PP_PAGEPREV(lastpp);
8288 8145 }
8289 8146 }
8290 8147 if (sz == 0) {
8291 8148 /* the loop above doesn't cover this case */
8292 8149 rootpp->p_szc = 0;
8293 8150 }
8294 8151 out:
8295 8152 ASSERT(pp->p_szc == 0);
8296 8153 if (pmtx != NULL) {
8297 8154 sfmmu_page_exit(pmtx);
8298 8155 }
8299 8156 sfmmu_mlist_exit(pml);
8300 8157 }
8301 8158
8302 8159 /*
8303 8160 * Refresh the HAT ismttecnt[] element for size szc.
8304 8161 * Caller must have set ISM busy flag to prevent mapping
8305 8162 * lists from changing while we're traversing them.
8306 8163 */
8307 8164 pgcnt_t
8308 8165 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8309 8166 {
8310 8167 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8311 8168 ism_map_t *ism_map;
8312 8169 pgcnt_t npgs = 0;
8313 8170 pgcnt_t npgs_scd = 0;
8314 8171 int j;
8315 8172 sf_scd_t *scdp;
8316 8173 uchar_t rid;
8317 8174
8318 8175 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8319 8176 scdp = sfmmup->sfmmu_scdp;
8320 8177
8321 8178 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8322 8179 ism_map = ism_blkp->iblk_maps;
8323 8180 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8324 8181 rid = ism_map[j].imap_rid;
8325 8182 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8326 8183 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8327 8184
8328 8185 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8329 8186 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8330 8187 /* ISM is in sfmmup's SCD */
8331 8188 npgs_scd +=
8332 8189 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8333 8190 } else {
8334 8191 /* ISMs is not in SCD */
8335 8192 npgs +=
8336 8193 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8337 8194 }
8338 8195 }
8339 8196 }
8340 8197 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8341 8198 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8342 8199 return (npgs);
8343 8200 }
8344 8201
8345 8202 /*
8346 8203 * Yield the memory claim requirement for an address space.
8347 8204 *
8348 8205 * This is currently implemented as the number of bytes that have active
8349 8206 * hardware translations that have page structures. Therefore, it can
8350 8207 * underestimate the traditional resident set size, eg, if the
8351 8208 * physical page is present and the hardware translation is missing;
8352 8209 * and it can overestimate the rss, eg, if there are active
8353 8210 * translations to a frame buffer with page structs.
8354 8211 * Also, it does not take sharing into account.
8355 8212 *
8356 8213 * Note that we don't acquire locks here since this function is most often
8357 8214 * called from the clock thread.
8358 8215 */
8359 8216 size_t
8360 8217 hat_get_mapped_size(struct hat *hat)
8361 8218 {
8362 8219 size_t assize = 0;
8363 8220 int i;
8364 8221
8365 8222 if (hat == NULL)
8366 8223 return (0);
8367 8224
8368 8225 for (i = 0; i < mmu_page_sizes; i++)
8369 8226 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8370 8227 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8371 8228
8372 8229 if (hat->sfmmu_iblk == NULL)
8373 8230 return (assize);
8374 8231
8375 8232 for (i = 0; i < mmu_page_sizes; i++)
8376 8233 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8377 8234 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8378 8235
8379 8236 return (assize);
8380 8237 }
8381 8238
8382 8239 int
8383 8240 hat_stats_enable(struct hat *hat)
8384 8241 {
8385 8242 hatlock_t *hatlockp;
8386 8243
8387 8244 hatlockp = sfmmu_hat_enter(hat);
8388 8245 hat->sfmmu_rmstat++;
8389 8246 sfmmu_hat_exit(hatlockp);
8390 8247 return (1);
8391 8248 }
8392 8249
8393 8250 void
8394 8251 hat_stats_disable(struct hat *hat)
8395 8252 {
8396 8253 hatlock_t *hatlockp;
8397 8254
8398 8255 hatlockp = sfmmu_hat_enter(hat);
8399 8256 hat->sfmmu_rmstat--;
8400 8257 sfmmu_hat_exit(hatlockp);
8401 8258 }
8402 8259
8403 8260 /*
8404 8261 * Routines for entering or removing ourselves from the
8405 8262 * ism_hat's mapping list. This is used for both private and
8406 8263 * SCD hats.
8407 8264 */
8408 8265 static void
8409 8266 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8410 8267 {
8411 8268 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8412 8269
8413 8270 iment->iment_prev = NULL;
8414 8271 iment->iment_next = ism_hat->sfmmu_iment;
8415 8272 if (ism_hat->sfmmu_iment) {
8416 8273 ism_hat->sfmmu_iment->iment_prev = iment;
8417 8274 }
8418 8275 ism_hat->sfmmu_iment = iment;
8419 8276 }
8420 8277
8421 8278 static void
8422 8279 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8423 8280 {
8424 8281 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8425 8282
8426 8283 if (ism_hat->sfmmu_iment == NULL) {
8427 8284 panic("ism map entry remove - no entries");
8428 8285 }
8429 8286
8430 8287 if (iment->iment_prev) {
8431 8288 ASSERT(ism_hat->sfmmu_iment != iment);
8432 8289 iment->iment_prev->iment_next = iment->iment_next;
8433 8290 } else {
8434 8291 ASSERT(ism_hat->sfmmu_iment == iment);
8435 8292 ism_hat->sfmmu_iment = iment->iment_next;
8436 8293 }
8437 8294
8438 8295 if (iment->iment_next) {
8439 8296 iment->iment_next->iment_prev = iment->iment_prev;
8440 8297 }
8441 8298
8442 8299 /*
8443 8300 * zero out the entry
8444 8301 */
8445 8302 iment->iment_next = NULL;
8446 8303 iment->iment_prev = NULL;
8447 8304 iment->iment_hat = NULL;
8448 8305 iment->iment_base_va = 0;
8449 8306 }
8450 8307
8451 8308 /*
8452 8309 * Hat_share()/unshare() return an (non-zero) error
8453 8310 * when saddr and daddr are not properly aligned.
8454 8311 *
8455 8312 * The top level mapping element determines the alignment
8456 8313 * requirement for saddr and daddr, depending on different
8457 8314 * architectures.
8458 8315 *
8459 8316 * When hat_share()/unshare() are not supported,
8460 8317 * HATOP_SHARE()/UNSHARE() return 0
8461 8318 */
8462 8319 int
8463 8320 hat_share(struct hat *sfmmup, caddr_t addr,
8464 8321 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8465 8322 {
8466 8323 ism_blk_t *ism_blkp;
8467 8324 ism_blk_t *new_iblk;
8468 8325 ism_map_t *ism_map;
8469 8326 ism_ment_t *ism_ment;
8470 8327 int i, added;
8471 8328 hatlock_t *hatlockp;
8472 8329 int reload_mmu = 0;
8473 8330 uint_t ismshift = page_get_shift(ismszc);
8474 8331 size_t ismpgsz = page_get_pagesize(ismszc);
8475 8332 uint_t ismmask = (uint_t)ismpgsz - 1;
8476 8333 size_t sh_size = ISM_SHIFT(ismshift, len);
8477 8334 ushort_t ismhatflag;
8478 8335 hat_region_cookie_t rcookie;
8479 8336 sf_scd_t *old_scdp;
8480 8337
8481 8338 #ifdef DEBUG
8482 8339 caddr_t eaddr = addr + len;
8483 8340 #endif /* DEBUG */
8484 8341
8485 8342 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8486 8343 ASSERT(sptaddr == ISMID_STARTADDR);
8487 8344 /*
8488 8345 * Check the alignment.
8489 8346 */
8490 8347 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8491 8348 return (EINVAL);
8492 8349
8493 8350 /*
8494 8351 * Check size alignment.
8495 8352 */
8496 8353 if (!ISM_ALIGNED(ismshift, len))
8497 8354 return (EINVAL);
8498 8355
8499 8356 /*
8500 8357 * Allocate ism_ment for the ism_hat's mapping list, and an
8501 8358 * ism map blk in case we need one. We must do our
8502 8359 * allocations before acquiring locks to prevent a deadlock
8503 8360 * in the kmem allocator on the mapping list lock.
8504 8361 */
8505 8362 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8506 8363 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8507 8364
8508 8365 /*
8509 8366 * Serialize ISM mappings with the ISM busy flag, and also the
8510 8367 * trap handlers.
8511 8368 */
8512 8369 sfmmu_ismhat_enter(sfmmup, 0);
8513 8370
8514 8371 /*
8515 8372 * Allocate an ism map blk if necessary.
8516 8373 */
8517 8374 if (sfmmup->sfmmu_iblk == NULL) {
8518 8375 sfmmup->sfmmu_iblk = new_iblk;
8519 8376 bzero(new_iblk, sizeof (*new_iblk));
8520 8377 new_iblk->iblk_nextpa = (uint64_t)-1;
8521 8378 membar_stst(); /* make sure next ptr visible to all CPUs */
8522 8379 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8523 8380 reload_mmu = 1;
8524 8381 new_iblk = NULL;
8525 8382 }
8526 8383
8527 8384 #ifdef DEBUG
8528 8385 /*
8529 8386 * Make sure mapping does not already exist.
8530 8387 */
8531 8388 ism_blkp = sfmmup->sfmmu_iblk;
8532 8389 while (ism_blkp != NULL) {
8533 8390 ism_map = ism_blkp->iblk_maps;
8534 8391 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8535 8392 if ((addr >= ism_start(ism_map[i]) &&
8536 8393 addr < ism_end(ism_map[i])) ||
8537 8394 eaddr > ism_start(ism_map[i]) &&
8538 8395 eaddr <= ism_end(ism_map[i])) {
8539 8396 panic("sfmmu_share: Already mapped!");
8540 8397 }
8541 8398 }
8542 8399 ism_blkp = ism_blkp->iblk_next;
8543 8400 }
8544 8401 #endif /* DEBUG */
8545 8402
8546 8403 ASSERT(ismszc >= TTE4M);
8547 8404 if (ismszc == TTE4M) {
8548 8405 ismhatflag = HAT_4M_FLAG;
8549 8406 } else if (ismszc == TTE32M) {
8550 8407 ismhatflag = HAT_32M_FLAG;
8551 8408 } else if (ismszc == TTE256M) {
8552 8409 ismhatflag = HAT_256M_FLAG;
8553 8410 }
8554 8411 /*
8555 8412 * Add mapping to first available mapping slot.
8556 8413 */
8557 8414 ism_blkp = sfmmup->sfmmu_iblk;
8558 8415 added = 0;
8559 8416 while (!added) {
8560 8417 ism_map = ism_blkp->iblk_maps;
8561 8418 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8562 8419 if (ism_map[i].imap_ismhat == NULL) {
8563 8420
8564 8421 ism_map[i].imap_ismhat = ism_hatid;
8565 8422 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8566 8423 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8567 8424 ism_map[i].imap_hatflags = ismhatflag;
8568 8425 ism_map[i].imap_sz_mask = ismmask;
8569 8426 /*
8570 8427 * imap_seg is checked in ISM_CHECK to see if
8571 8428 * non-NULL, then other info assumed valid.
8572 8429 */
8573 8430 membar_stst();
8574 8431 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8575 8432 ism_map[i].imap_ment = ism_ment;
8576 8433
8577 8434 /*
8578 8435 * Now add ourselves to the ism_hat's
8579 8436 * mapping list.
8580 8437 */
8581 8438 ism_ment->iment_hat = sfmmup;
8582 8439 ism_ment->iment_base_va = addr;
8583 8440 ism_hatid->sfmmu_ismhat = 1;
8584 8441 mutex_enter(&ism_mlist_lock);
8585 8442 iment_add(ism_ment, ism_hatid);
8586 8443 mutex_exit(&ism_mlist_lock);
8587 8444 added = 1;
8588 8445 break;
8589 8446 }
8590 8447 }
8591 8448 if (!added && ism_blkp->iblk_next == NULL) {
8592 8449 ism_blkp->iblk_next = new_iblk;
8593 8450 new_iblk = NULL;
8594 8451 bzero(ism_blkp->iblk_next,
8595 8452 sizeof (*ism_blkp->iblk_next));
8596 8453 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8597 8454 membar_stst();
8598 8455 ism_blkp->iblk_nextpa =
8599 8456 va_to_pa((caddr_t)ism_blkp->iblk_next);
8600 8457 }
8601 8458 ism_blkp = ism_blkp->iblk_next;
8602 8459 }
8603 8460
8604 8461 /*
8605 8462 * After calling hat_join_region, sfmmup may join a new SCD or
8606 8463 * move from the old scd to a new scd, in which case, we want to
8607 8464 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8608 8465 * sfmmu_check_page_sizes at the end of this routine.
8609 8466 */
8610 8467 old_scdp = sfmmup->sfmmu_scdp;
8611 8468
8612 8469 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8613 8470 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8614 8471 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8615 8472 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8616 8473 }
8617 8474 /*
8618 8475 * Update our counters for this sfmmup's ism mappings.
8619 8476 */
8620 8477 for (i = 0; i <= ismszc; i++) {
8621 8478 if (!(disable_ism_large_pages & (1 << i)))
8622 8479 (void) ism_tsb_entries(sfmmup, i);
8623 8480 }
8624 8481
8625 8482 /*
8626 8483 * For ISM and DISM we do not support 512K pages, so we only only
8627 8484 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8628 8485 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8629 8486 *
8630 8487 * Need to set 32M/256M ISM flags to make sure
8631 8488 * sfmmu_check_page_sizes() enables them on Panther.
8632 8489 */
8633 8490 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8634 8491
8635 8492 switch (ismszc) {
8636 8493 case TTE256M:
8637 8494 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8638 8495 hatlockp = sfmmu_hat_enter(sfmmup);
8639 8496 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8640 8497 sfmmu_hat_exit(hatlockp);
8641 8498 }
8642 8499 break;
8643 8500 case TTE32M:
8644 8501 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8645 8502 hatlockp = sfmmu_hat_enter(sfmmup);
8646 8503 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8647 8504 sfmmu_hat_exit(hatlockp);
8648 8505 }
8649 8506 break;
8650 8507 default:
8651 8508 break;
8652 8509 }
8653 8510
8654 8511 /*
8655 8512 * If we updated the ismblkpa for this HAT we must make
8656 8513 * sure all CPUs running this process reload their tsbmiss area.
8657 8514 * Otherwise they will fail to load the mappings in the tsbmiss
8658 8515 * handler and will loop calling pagefault().
8659 8516 */
8660 8517 if (reload_mmu) {
8661 8518 hatlockp = sfmmu_hat_enter(sfmmup);
8662 8519 sfmmu_sync_mmustate(sfmmup);
8663 8520 sfmmu_hat_exit(hatlockp);
8664 8521 }
8665 8522
8666 8523 sfmmu_ismhat_exit(sfmmup, 0);
8667 8524
8668 8525 /*
8669 8526 * Free up ismblk if we didn't use it.
8670 8527 */
8671 8528 if (new_iblk != NULL)
8672 8529 kmem_cache_free(ism_blk_cache, new_iblk);
8673 8530
8674 8531 /*
8675 8532 * Check TSB and TLB page sizes.
8676 8533 */
8677 8534 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8678 8535 sfmmu_check_page_sizes(sfmmup, 0);
8679 8536 } else {
8680 8537 sfmmu_check_page_sizes(sfmmup, 1);
8681 8538 }
8682 8539 return (0);
8683 8540 }
8684 8541
8685 8542 /*
8686 8543 * hat_unshare removes exactly one ism_map from
8687 8544 * this process's as. It expects multiple calls
8688 8545 * to hat_unshare for multiple shm segments.
8689 8546 */
8690 8547 void
8691 8548 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8692 8549 {
8693 8550 ism_map_t *ism_map;
8694 8551 ism_ment_t *free_ment = NULL;
8695 8552 ism_blk_t *ism_blkp;
8696 8553 struct hat *ism_hatid;
8697 8554 int found, i;
8698 8555 hatlock_t *hatlockp;
8699 8556 struct tsb_info *tsbinfo;
8700 8557 uint_t ismshift = page_get_shift(ismszc);
8701 8558 size_t sh_size = ISM_SHIFT(ismshift, len);
8702 8559 uchar_t ism_rid;
8703 8560 sf_scd_t *old_scdp;
8704 8561
8705 8562 ASSERT(ISM_ALIGNED(ismshift, addr));
8706 8563 ASSERT(ISM_ALIGNED(ismshift, len));
8707 8564 ASSERT(sfmmup != NULL);
8708 8565 ASSERT(sfmmup != ksfmmup);
8709 8566
8710 8567 ASSERT(sfmmup->sfmmu_as != NULL);
8711 8568
8712 8569 /*
8713 8570 * Make sure that during the entire time ISM mappings are removed,
8714 8571 * the trap handlers serialize behind us, and that no one else
8715 8572 * can be mucking with ISM mappings. This also lets us get away
8716 8573 * with not doing expensive cross calls to flush the TLB -- we
8717 8574 * just discard the context, flush the entire TSB, and call it
8718 8575 * a day.
8719 8576 */
8720 8577 sfmmu_ismhat_enter(sfmmup, 0);
8721 8578
8722 8579 /*
8723 8580 * Remove the mapping.
8724 8581 *
8725 8582 * We can't have any holes in the ism map.
8726 8583 * The tsb miss code while searching the ism map will
8727 8584 * stop on an empty map slot. So we must move
8728 8585 * everyone past the hole up 1 if any.
8729 8586 *
8730 8587 * Also empty ism map blks are not freed until the
8731 8588 * process exits. This is to prevent a MT race condition
8732 8589 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8733 8590 */
8734 8591 found = 0;
8735 8592 ism_blkp = sfmmup->sfmmu_iblk;
8736 8593 while (!found && ism_blkp != NULL) {
8737 8594 ism_map = ism_blkp->iblk_maps;
8738 8595 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8739 8596 if (addr == ism_start(ism_map[i]) &&
8740 8597 sh_size == (size_t)(ism_size(ism_map[i]))) {
8741 8598 found = 1;
8742 8599 break;
8743 8600 }
8744 8601 }
8745 8602 if (!found)
8746 8603 ism_blkp = ism_blkp->iblk_next;
8747 8604 }
8748 8605
8749 8606 if (found) {
8750 8607 ism_hatid = ism_map[i].imap_ismhat;
8751 8608 ism_rid = ism_map[i].imap_rid;
8752 8609 ASSERT(ism_hatid != NULL);
8753 8610 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8754 8611
8755 8612 /*
8756 8613 * After hat_leave_region, the sfmmup may leave SCD,
8757 8614 * in which case, we want to grow the private tsb size when
8758 8615 * calling sfmmu_check_page_sizes at the end of the routine.
8759 8616 */
8760 8617 old_scdp = sfmmup->sfmmu_scdp;
8761 8618 /*
8762 8619 * Then remove ourselves from the region.
8763 8620 */
8764 8621 if (ism_rid != SFMMU_INVALID_ISMRID) {
8765 8622 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8766 8623 HAT_REGION_ISM);
8767 8624 }
8768 8625
8769 8626 /*
8770 8627 * And now guarantee that any other cpu
8771 8628 * that tries to process an ISM miss
8772 8629 * will go to tl=0.
8773 8630 */
8774 8631 hatlockp = sfmmu_hat_enter(sfmmup);
8775 8632 sfmmu_invalidate_ctx(sfmmup);
8776 8633 sfmmu_hat_exit(hatlockp);
8777 8634
8778 8635 /*
8779 8636 * Remove ourselves from the ism mapping list.
8780 8637 */
8781 8638 mutex_enter(&ism_mlist_lock);
8782 8639 iment_sub(ism_map[i].imap_ment, ism_hatid);
8783 8640 mutex_exit(&ism_mlist_lock);
8784 8641 free_ment = ism_map[i].imap_ment;
8785 8642
8786 8643 /*
8787 8644 * We delete the ism map by copying
8788 8645 * the next map over the current one.
8789 8646 * We will take the next one in the maps
8790 8647 * array or from the next ism_blk.
8791 8648 */
8792 8649 while (ism_blkp != NULL) {
8793 8650 ism_map = ism_blkp->iblk_maps;
8794 8651 while (i < (ISM_MAP_SLOTS - 1)) {
8795 8652 ism_map[i] = ism_map[i + 1];
8796 8653 i++;
8797 8654 }
8798 8655 /* i == (ISM_MAP_SLOTS - 1) */
8799 8656 ism_blkp = ism_blkp->iblk_next;
8800 8657 if (ism_blkp != NULL) {
8801 8658 ism_map[i] = ism_blkp->iblk_maps[0];
8802 8659 i = 0;
8803 8660 } else {
8804 8661 ism_map[i].imap_seg = 0;
8805 8662 ism_map[i].imap_vb_shift = 0;
8806 8663 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8807 8664 ism_map[i].imap_hatflags = 0;
8808 8665 ism_map[i].imap_sz_mask = 0;
8809 8666 ism_map[i].imap_ismhat = NULL;
8810 8667 ism_map[i].imap_ment = NULL;
8811 8668 }
8812 8669 }
8813 8670
8814 8671 /*
8815 8672 * Now flush entire TSB for the process, since
8816 8673 * demapping page by page can be too expensive.
8817 8674 * We don't have to flush the TLB here anymore
8818 8675 * since we switch to a new TLB ctx instead.
8819 8676 * Also, there is no need to flush if the process
8820 8677 * is exiting since the TSB will be freed later.
8821 8678 */
8822 8679 if (!sfmmup->sfmmu_free) {
8823 8680 hatlockp = sfmmu_hat_enter(sfmmup);
8824 8681 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8825 8682 tsbinfo = tsbinfo->tsb_next) {
8826 8683 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8827 8684 continue;
8828 8685 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8829 8686 tsbinfo->tsb_flags |=
8830 8687 TSB_FLUSH_NEEDED;
8831 8688 continue;
8832 8689 }
8833 8690
8834 8691 sfmmu_inv_tsb(tsbinfo->tsb_va,
8835 8692 TSB_BYTES(tsbinfo->tsb_szc));
8836 8693 }
8837 8694 sfmmu_hat_exit(hatlockp);
8838 8695 }
8839 8696 }
8840 8697
8841 8698 /*
8842 8699 * Update our counters for this sfmmup's ism mappings.
8843 8700 */
8844 8701 for (i = 0; i <= ismszc; i++) {
8845 8702 if (!(disable_ism_large_pages & (1 << i)))
8846 8703 (void) ism_tsb_entries(sfmmup, i);
8847 8704 }
8848 8705
8849 8706 sfmmu_ismhat_exit(sfmmup, 0);
8850 8707
8851 8708 /*
8852 8709 * We must do our freeing here after dropping locks
8853 8710 * to prevent a deadlock in the kmem allocator on the
8854 8711 * mapping list lock.
8855 8712 */
8856 8713 if (free_ment != NULL)
8857 8714 kmem_cache_free(ism_ment_cache, free_ment);
8858 8715
8859 8716 /*
8860 8717 * Check TSB and TLB page sizes if the process isn't exiting.
8861 8718 */
8862 8719 if (!sfmmup->sfmmu_free) {
8863 8720 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
8864 8721 sfmmu_check_page_sizes(sfmmup, 1);
8865 8722 } else {
8866 8723 sfmmu_check_page_sizes(sfmmup, 0);
8867 8724 }
8868 8725 }
8869 8726 }
8870 8727
8871 8728 /* ARGSUSED */
8872 8729 static int
8873 8730 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8874 8731 {
8875 8732 /* void *buf is sfmmu_t pointer */
8876 8733 bzero(buf, sizeof (sfmmu_t));
8877 8734
8878 8735 return (0);
8879 8736 }
8880 8737
8881 8738 /* ARGSUSED */
8882 8739 static void
8883 8740 sfmmu_idcache_destructor(void *buf, void *cdrarg)
8884 8741 {
8885 8742 /* void *buf is sfmmu_t pointer */
8886 8743 }
8887 8744
8888 8745 /*
8889 8746 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8890 8747 * field to be the pa of this hmeblk
8891 8748 */
8892 8749 /* ARGSUSED */
8893 8750 static int
8894 8751 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8895 8752 {
8896 8753 struct hme_blk *hmeblkp;
8897 8754
8898 8755 bzero(buf, (size_t)cdrarg);
8899 8756 hmeblkp = (struct hme_blk *)buf;
8900 8757 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8901 8758
8902 8759 #ifdef HBLK_TRACE
8903 8760 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8904 8761 #endif /* HBLK_TRACE */
8905 8762
8906 8763 return (0);
8907 8764 }
8908 8765
8909 8766 /* ARGSUSED */
8910 8767 static void
8911 8768 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8912 8769 {
8913 8770
8914 8771 #ifdef HBLK_TRACE
8915 8772
8916 8773 struct hme_blk *hmeblkp;
8917 8774
8918 8775 hmeblkp = (struct hme_blk *)buf;
8919 8776 mutex_destroy(&hmeblkp->hblk_audit_lock);
8920 8777
8921 8778 #endif /* HBLK_TRACE */
8922 8779 }
8923 8780
8924 8781 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8925 8782 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8926 8783 /*
8927 8784 * The kmem allocator will callback into our reclaim routine when the system
8928 8785 * is running low in memory. We traverse the hash and free up all unused but
8929 8786 * still cached hme_blks. We also traverse the free list and free them up
8930 8787 * as well.
8931 8788 */
8932 8789 /*ARGSUSED*/
8933 8790 static void
8934 8791 sfmmu_hblkcache_reclaim(void *cdrarg)
8935 8792 {
8936 8793 int i;
8937 8794 struct hmehash_bucket *hmebp;
8938 8795 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8939 8796 static struct hmehash_bucket *uhmehash_reclaim_hand;
8940 8797 static struct hmehash_bucket *khmehash_reclaim_hand;
8941 8798 struct hme_blk *list = NULL, *last_hmeblkp;
8942 8799 cpuset_t cpuset = cpu_ready_set;
8943 8800 cpu_hme_pend_t *cpuhp;
8944 8801
8945 8802 /* Free up hmeblks on the cpu pending lists */
8946 8803 for (i = 0; i < NCPU; i++) {
8947 8804 cpuhp = &cpu_hme_pend[i];
8948 8805 if (cpuhp->chp_listp != NULL) {
8949 8806 mutex_enter(&cpuhp->chp_mutex);
8950 8807 if (cpuhp->chp_listp == NULL) {
8951 8808 mutex_exit(&cpuhp->chp_mutex);
8952 8809 continue;
8953 8810 }
8954 8811 for (last_hmeblkp = cpuhp->chp_listp;
8955 8812 last_hmeblkp->hblk_next != NULL;
8956 8813 last_hmeblkp = last_hmeblkp->hblk_next)
8957 8814 ;
8958 8815 last_hmeblkp->hblk_next = list;
8959 8816 list = cpuhp->chp_listp;
8960 8817 cpuhp->chp_listp = NULL;
8961 8818 cpuhp->chp_count = 0;
8962 8819 mutex_exit(&cpuhp->chp_mutex);
8963 8820 }
8964 8821
8965 8822 }
8966 8823
8967 8824 if (list != NULL) {
8968 8825 kpreempt_disable();
8969 8826 CPUSET_DEL(cpuset, CPU->cpu_id);
8970 8827 xt_sync(cpuset);
8971 8828 xt_sync(cpuset);
8972 8829 kpreempt_enable();
8973 8830 sfmmu_hblk_free(&list);
8974 8831 list = NULL;
8975 8832 }
8976 8833
8977 8834 hmebp = uhmehash_reclaim_hand;
8978 8835 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8979 8836 uhmehash_reclaim_hand = hmebp = uhme_hash;
8980 8837 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8981 8838
8982 8839 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8983 8840 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8984 8841 hmeblkp = hmebp->hmeblkp;
8985 8842 pr_hblk = NULL;
8986 8843 while (hmeblkp) {
8987 8844 nx_hblk = hmeblkp->hblk_next;
8988 8845 if (!hmeblkp->hblk_vcnt &&
8989 8846 !hmeblkp->hblk_hmecnt) {
8990 8847 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8991 8848 pr_hblk, &list, 0);
8992 8849 } else {
8993 8850 pr_hblk = hmeblkp;
8994 8851 }
8995 8852 hmeblkp = nx_hblk;
8996 8853 }
8997 8854 SFMMU_HASH_UNLOCK(hmebp);
8998 8855 }
8999 8856 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
9000 8857 hmebp = uhme_hash;
9001 8858 }
9002 8859
9003 8860 hmebp = khmehash_reclaim_hand;
9004 8861 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
9005 8862 khmehash_reclaim_hand = hmebp = khme_hash;
9006 8863 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9007 8864
9008 8865 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9009 8866 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9010 8867 hmeblkp = hmebp->hmeblkp;
9011 8868 pr_hblk = NULL;
9012 8869 while (hmeblkp) {
9013 8870 nx_hblk = hmeblkp->hblk_next;
9014 8871 if (!hmeblkp->hblk_vcnt &&
9015 8872 !hmeblkp->hblk_hmecnt) {
9016 8873 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9017 8874 pr_hblk, &list, 0);
9018 8875 } else {
9019 8876 pr_hblk = hmeblkp;
9020 8877 }
9021 8878 hmeblkp = nx_hblk;
9022 8879 }
9023 8880 SFMMU_HASH_UNLOCK(hmebp);
9024 8881 }
9025 8882 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9026 8883 hmebp = khme_hash;
9027 8884 }
9028 8885 sfmmu_hblks_list_purge(&list, 0);
9029 8886 }
9030 8887
9031 8888 /*
9032 8889 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9033 8890 * same goes for sfmmu_get_addrvcolor().
9034 8891 *
9035 8892 * This function will return the virtual color for the specified page. The
9036 8893 * virtual color corresponds to this page current mapping or its last mapping.
9037 8894 * It is used by memory allocators to choose addresses with the correct
9038 8895 * alignment so vac consistency is automatically maintained. If the page
9039 8896 * has no color it returns -1.
9040 8897 */
9041 8898 /*ARGSUSED*/
9042 8899 int
9043 8900 sfmmu_get_ppvcolor(struct page *pp)
9044 8901 {
9045 8902 #ifdef VAC
9046 8903 int color;
9047 8904
9048 8905 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9049 8906 return (-1);
9050 8907 }
9051 8908 color = PP_GET_VCOLOR(pp);
9052 8909 ASSERT(color < mmu_btop(shm_alignment));
9053 8910 return (color);
9054 8911 #else
9055 8912 return (-1);
9056 8913 #endif /* VAC */
9057 8914 }
9058 8915
9059 8916 /*
9060 8917 * This function will return the desired alignment for vac consistency
9061 8918 * (vac color) given a virtual address. If no vac is present it returns -1.
9062 8919 */
9063 8920 /*ARGSUSED*/
9064 8921 int
9065 8922 sfmmu_get_addrvcolor(caddr_t vaddr)
9066 8923 {
9067 8924 #ifdef VAC
9068 8925 if (cache & CACHE_VAC) {
9069 8926 return (addr_to_vcolor(vaddr));
9070 8927 } else {
9071 8928 return (-1);
9072 8929 }
9073 8930 #else
9074 8931 return (-1);
9075 8932 #endif /* VAC */
9076 8933 }
9077 8934
9078 8935 #ifdef VAC
9079 8936 /*
9080 8937 * Check for conflicts.
9081 8938 * A conflict exists if the new and existent mappings do not match in
9082 8939 * their "shm_alignment fields. If conflicts exist, the existant mappings
9083 8940 * are flushed unless one of them is locked. If one of them is locked, then
9084 8941 * the mappings are flushed and converted to non-cacheable mappings.
9085 8942 */
9086 8943 static void
9087 8944 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9088 8945 {
9089 8946 struct hat *tmphat;
9090 8947 struct sf_hment *sfhmep, *tmphme = NULL;
9091 8948 struct hme_blk *hmeblkp;
9092 8949 int vcolor;
9093 8950 tte_t tte;
9094 8951
9095 8952 ASSERT(sfmmu_mlist_held(pp));
9096 8953 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
9097 8954
9098 8955 vcolor = addr_to_vcolor(addr);
9099 8956 if (PP_NEWPAGE(pp)) {
9100 8957 PP_SET_VCOLOR(pp, vcolor);
9101 8958 return;
9102 8959 }
9103 8960
9104 8961 if (PP_GET_VCOLOR(pp) == vcolor) {
9105 8962 return;
9106 8963 }
9107 8964
9108 8965 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9109 8966 /*
9110 8967 * Previous user of page had a different color
9111 8968 * but since there are no current users
9112 8969 * we just flush the cache and change the color.
9113 8970 */
9114 8971 SFMMU_STAT(sf_pgcolor_conflict);
9115 8972 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9116 8973 PP_SET_VCOLOR(pp, vcolor);
9117 8974 return;
9118 8975 }
9119 8976
9120 8977 /*
9121 8978 * If we get here we have a vac conflict with a current
9122 8979 * mapping. VAC conflict policy is as follows.
9123 8980 * - The default is to unload the other mappings unless:
9124 8981 * - If we have a large mapping we uncache the page.
9125 8982 * We need to uncache the rest of the large page too.
9126 8983 * - If any of the mappings are locked we uncache the page.
9127 8984 * - If the requested mapping is inconsistent
9128 8985 * with another mapping and that mapping
9129 8986 * is in the same address space we have to
9130 8987 * make it non-cached. The default thing
9131 8988 * to do is unload the inconsistent mapping
9132 8989 * but if they are in the same address space
9133 8990 * we run the risk of unmapping the pc or the
9134 8991 * stack which we will use as we return to the user,
9135 8992 * in which case we can then fault on the thing
9136 8993 * we just unloaded and get into an infinite loop.
9137 8994 */
9138 8995 if (PP_ISMAPPED_LARGE(pp)) {
9139 8996 int sz;
9140 8997
9141 8998 /*
9142 8999 * Existing mapping is for big pages. We don't unload
9143 9000 * existing big mappings to satisfy new mappings.
9144 9001 * Always convert all mappings to TNC.
9145 9002 */
9146 9003 sz = fnd_mapping_sz(pp);
9147 9004 pp = PP_GROUPLEADER(pp, sz);
9148 9005 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9149 9006 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9150 9007 TTEPAGES(sz));
9151 9008
9152 9009 return;
9153 9010 }
9154 9011
9155 9012 /*
9156 9013 * check if any mapping is in same as or if it is locked
9157 9014 * since in that case we need to uncache.
9158 9015 */
9159 9016 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9160 9017 tmphme = sfhmep->hme_next;
9161 9018 if (IS_PAHME(sfhmep))
9162 9019 continue;
9163 9020 hmeblkp = sfmmu_hmetohblk(sfhmep);
9164 9021 tmphat = hblktosfmmu(hmeblkp);
9165 9022 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9166 9023 ASSERT(TTE_IS_VALID(&tte));
9167 9024 if (hmeblkp->hblk_shared || tmphat == hat ||
9168 9025 hmeblkp->hblk_lckcnt) {
9169 9026 /*
9170 9027 * We have an uncache conflict
9171 9028 */
9172 9029 SFMMU_STAT(sf_uncache_conflict);
9173 9030 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9174 9031 return;
9175 9032 }
9176 9033 }
9177 9034
9178 9035 /*
9179 9036 * We have an unload conflict
9180 9037 * We have already checked for LARGE mappings, therefore
9181 9038 * the remaining mapping(s) must be TTE8K.
9182 9039 */
9183 9040 SFMMU_STAT(sf_unload_conflict);
9184 9041
9185 9042 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9186 9043 tmphme = sfhmep->hme_next;
9187 9044 if (IS_PAHME(sfhmep))
9188 9045 continue;
9189 9046 hmeblkp = sfmmu_hmetohblk(sfhmep);
9190 9047 ASSERT(!hmeblkp->hblk_shared);
9191 9048 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9192 9049 }
9193 9050
9194 9051 if (PP_ISMAPPED_KPM(pp))
9195 9052 sfmmu_kpm_vac_unload(pp, addr);
9196 9053
9197 9054 /*
9198 9055 * Unloads only do TLB flushes so we need to flush the
9199 9056 * cache here.
9200 9057 */
9201 9058 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9202 9059 PP_SET_VCOLOR(pp, vcolor);
9203 9060 }
9204 9061
9205 9062 /*
9206 9063 * Whenever a mapping is unloaded and the page is in TNC state,
9207 9064 * we see if the page can be made cacheable again. 'pp' is
9208 9065 * the page that we just unloaded a mapping from, the size
9209 9066 * of mapping that was unloaded is 'ottesz'.
9210 9067 * Remark:
9211 9068 * The recache policy for mpss pages can leave a performance problem
9212 9069 * under the following circumstances:
9213 9070 * . A large page in uncached mode has just been unmapped.
9214 9071 * . All constituent pages are TNC due to a conflicting small mapping.
9215 9072 * . There are many other, non conflicting, small mappings around for
9216 9073 * a lot of the constituent pages.
9217 9074 * . We're called w/ the "old" groupleader page and the old ottesz,
9218 9075 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9219 9076 * we end up w/ TTE8K or npages == 1.
9220 9077 * . We call tst_tnc w/ the old groupleader only, and if there is no
9221 9078 * conflict, we re-cache only this page.
9222 9079 * . All other small mappings are not checked and will be left in TNC mode.
9223 9080 * The problem is not very serious because:
9224 9081 * . mpss is actually only defined for heap and stack, so the probability
9225 9082 * is not very high that a large page mapping exists in parallel to a small
9226 9083 * one (this is possible, but seems to be bad programming style in the
9227 9084 * appl).
9228 9085 * . The problem gets a little bit more serious, when those TNC pages
9229 9086 * have to be mapped into kernel space, e.g. for networking.
9230 9087 * . When VAC alias conflicts occur in applications, this is regarded
9231 9088 * as an application bug. So if kstat's show them, the appl should
9232 9089 * be changed anyway.
9233 9090 */
9234 9091 void
9235 9092 conv_tnc(page_t *pp, int ottesz)
9236 9093 {
9237 9094 int cursz, dosz;
9238 9095 pgcnt_t curnpgs, dopgs;
9239 9096 pgcnt_t pg64k;
9240 9097 page_t *pp2;
9241 9098
9242 9099 /*
9243 9100 * Determine how big a range we check for TNC and find
9244 9101 * leader page. cursz is the size of the biggest
9245 9102 * mapping that still exist on 'pp'.
9246 9103 */
9247 9104 if (PP_ISMAPPED_LARGE(pp)) {
9248 9105 cursz = fnd_mapping_sz(pp);
9249 9106 } else {
9250 9107 cursz = TTE8K;
9251 9108 }
9252 9109
9253 9110 if (ottesz >= cursz) {
9254 9111 dosz = ottesz;
9255 9112 pp2 = pp;
9256 9113 } else {
9257 9114 dosz = cursz;
9258 9115 pp2 = PP_GROUPLEADER(pp, dosz);
9259 9116 }
9260 9117
9261 9118 pg64k = TTEPAGES(TTE64K);
9262 9119 dopgs = TTEPAGES(dosz);
9263 9120
9264 9121 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9265 9122
9266 9123 while (dopgs != 0) {
9267 9124 curnpgs = TTEPAGES(cursz);
9268 9125 if (tst_tnc(pp2, curnpgs)) {
9269 9126 SFMMU_STAT_ADD(sf_recache, curnpgs);
9270 9127 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9271 9128 curnpgs);
9272 9129 }
9273 9130
9274 9131 ASSERT(dopgs >= curnpgs);
9275 9132 dopgs -= curnpgs;
9276 9133
9277 9134 if (dopgs == 0) {
9278 9135 break;
9279 9136 }
9280 9137
9281 9138 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9282 9139 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9283 9140 cursz = fnd_mapping_sz(pp2);
9284 9141 } else {
9285 9142 cursz = TTE8K;
9286 9143 }
9287 9144 }
9288 9145 }
9289 9146
9290 9147 /*
9291 9148 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9292 9149 * returns 0 otherwise. Note that oaddr argument is valid for only
9293 9150 * 8k pages.
9294 9151 */
9295 9152 int
9296 9153 tst_tnc(page_t *pp, pgcnt_t npages)
9297 9154 {
9298 9155 struct sf_hment *sfhme;
9299 9156 struct hme_blk *hmeblkp;
9300 9157 tte_t tte;
9301 9158 caddr_t vaddr;
9302 9159 int clr_valid = 0;
9303 9160 int color, color1, bcolor;
9304 9161 int i, ncolors;
9305 9162
9306 9163 ASSERT(pp != NULL);
9307 9164 ASSERT(!(cache & CACHE_WRITEBACK));
9308 9165
9309 9166 if (npages > 1) {
9310 9167 ncolors = CACHE_NUM_COLOR;
9311 9168 }
9312 9169
9313 9170 for (i = 0; i < npages; i++) {
9314 9171 ASSERT(sfmmu_mlist_held(pp));
9315 9172 ASSERT(PP_ISTNC(pp));
9316 9173 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9317 9174
9318 9175 if (PP_ISPNC(pp)) {
9319 9176 return (0);
9320 9177 }
9321 9178
9322 9179 clr_valid = 0;
9323 9180 if (PP_ISMAPPED_KPM(pp)) {
9324 9181 caddr_t kpmvaddr;
9325 9182
9326 9183 ASSERT(kpm_enable);
9327 9184 kpmvaddr = hat_kpm_page2va(pp, 1);
9328 9185 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9329 9186 color1 = addr_to_vcolor(kpmvaddr);
9330 9187 clr_valid = 1;
9331 9188 }
9332 9189
9333 9190 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9334 9191 if (IS_PAHME(sfhme))
9335 9192 continue;
9336 9193 hmeblkp = sfmmu_hmetohblk(sfhme);
9337 9194
9338 9195 sfmmu_copytte(&sfhme->hme_tte, &tte);
9339 9196 ASSERT(TTE_IS_VALID(&tte));
9340 9197
9341 9198 vaddr = tte_to_vaddr(hmeblkp, tte);
9342 9199 color = addr_to_vcolor(vaddr);
9343 9200
9344 9201 if (npages > 1) {
9345 9202 /*
9346 9203 * If there is a big mapping, make sure
9347 9204 * 8K mapping is consistent with the big
9348 9205 * mapping.
9349 9206 */
9350 9207 bcolor = i % ncolors;
9351 9208 if (color != bcolor) {
9352 9209 return (0);
9353 9210 }
9354 9211 }
9355 9212 if (!clr_valid) {
9356 9213 clr_valid = 1;
9357 9214 color1 = color;
9358 9215 }
9359 9216
9360 9217 if (color1 != color) {
9361 9218 return (0);
9362 9219 }
9363 9220 }
9364 9221
9365 9222 pp = PP_PAGENEXT(pp);
9366 9223 }
9367 9224
9368 9225 return (1);
9369 9226 }
9370 9227
9371 9228 void
9372 9229 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9373 9230 pgcnt_t npages)
9374 9231 {
9375 9232 kmutex_t *pmtx;
9376 9233 int i, ncolors, bcolor;
9377 9234 kpm_hlk_t *kpmp;
9378 9235 cpuset_t cpuset;
9379 9236
9380 9237 ASSERT(pp != NULL);
9381 9238 ASSERT(!(cache & CACHE_WRITEBACK));
9382 9239
9383 9240 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9384 9241 pmtx = sfmmu_page_enter(pp);
9385 9242
9386 9243 /*
9387 9244 * Fast path caching single unmapped page
9388 9245 */
9389 9246 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9390 9247 flags == HAT_CACHE) {
9391 9248 PP_CLRTNC(pp);
9392 9249 PP_CLRPNC(pp);
9393 9250 sfmmu_page_exit(pmtx);
9394 9251 sfmmu_kpm_kpmp_exit(kpmp);
9395 9252 return;
9396 9253 }
9397 9254
9398 9255 /*
9399 9256 * We need to capture all cpus in order to change cacheability
9400 9257 * because we can't allow one cpu to access the same physical
9401 9258 * page using a cacheable and a non-cachebale mapping at the same
9402 9259 * time. Since we may end up walking the ism mapping list
9403 9260 * have to grab it's lock now since we can't after all the
9404 9261 * cpus have been captured.
9405 9262 */
9406 9263 sfmmu_hat_lock_all();
9407 9264 mutex_enter(&ism_mlist_lock);
9408 9265 kpreempt_disable();
9409 9266 cpuset = cpu_ready_set;
9410 9267 xc_attention(cpuset);
9411 9268
9412 9269 if (npages > 1) {
9413 9270 /*
9414 9271 * Make sure all colors are flushed since the
9415 9272 * sfmmu_page_cache() only flushes one color-
9416 9273 * it does not know big pages.
9417 9274 */
9418 9275 ncolors = CACHE_NUM_COLOR;
9419 9276 if (flags & HAT_TMPNC) {
9420 9277 for (i = 0; i < ncolors; i++) {
9421 9278 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9422 9279 }
9423 9280 cache_flush_flag = CACHE_NO_FLUSH;
9424 9281 }
9425 9282 }
9426 9283
9427 9284 for (i = 0; i < npages; i++) {
9428 9285
9429 9286 ASSERT(sfmmu_mlist_held(pp));
9430 9287
9431 9288 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9432 9289
9433 9290 if (npages > 1) {
9434 9291 bcolor = i % ncolors;
9435 9292 } else {
9436 9293 bcolor = NO_VCOLOR;
9437 9294 }
9438 9295
9439 9296 sfmmu_page_cache(pp, flags, cache_flush_flag,
9440 9297 bcolor);
9441 9298 }
9442 9299
9443 9300 pp = PP_PAGENEXT(pp);
9444 9301 }
9445 9302
9446 9303 xt_sync(cpuset);
9447 9304 xc_dismissed(cpuset);
9448 9305 mutex_exit(&ism_mlist_lock);
9449 9306 sfmmu_hat_unlock_all();
9450 9307 sfmmu_page_exit(pmtx);
9451 9308 sfmmu_kpm_kpmp_exit(kpmp);
9452 9309 kpreempt_enable();
9453 9310 }
9454 9311
9455 9312 /*
9456 9313 * This function changes the virtual cacheability of all mappings to a
9457 9314 * particular page. When changing from uncache to cacheable the mappings will
9458 9315 * only be changed if all of them have the same virtual color.
9459 9316 * We need to flush the cache in all cpus. It is possible that
9460 9317 * a process referenced a page as cacheable but has sinced exited
9461 9318 * and cleared the mapping list. We still to flush it but have no
9462 9319 * state so all cpus is the only alternative.
9463 9320 */
9464 9321 static void
9465 9322 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9466 9323 {
9467 9324 struct sf_hment *sfhme;
9468 9325 struct hme_blk *hmeblkp;
9469 9326 sfmmu_t *sfmmup;
9470 9327 tte_t tte, ttemod;
9471 9328 caddr_t vaddr;
9472 9329 int ret, color;
9473 9330 pfn_t pfn;
9474 9331
9475 9332 color = bcolor;
9476 9333 pfn = pp->p_pagenum;
9477 9334
9478 9335 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9479 9336
9480 9337 if (IS_PAHME(sfhme))
9481 9338 continue;
9482 9339 hmeblkp = sfmmu_hmetohblk(sfhme);
9483 9340
9484 9341 sfmmu_copytte(&sfhme->hme_tte, &tte);
9485 9342 ASSERT(TTE_IS_VALID(&tte));
9486 9343 vaddr = tte_to_vaddr(hmeblkp, tte);
9487 9344 color = addr_to_vcolor(vaddr);
9488 9345
9489 9346 #ifdef DEBUG
9490 9347 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9491 9348 ASSERT(color == bcolor);
9492 9349 }
9493 9350 #endif
9494 9351
9495 9352 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9496 9353
9497 9354 ttemod = tte;
9498 9355 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9499 9356 TTE_CLR_VCACHEABLE(&ttemod);
9500 9357 } else { /* flags & HAT_CACHE */
9501 9358 TTE_SET_VCACHEABLE(&ttemod);
9502 9359 }
9503 9360 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9504 9361 if (ret < 0) {
9505 9362 /*
9506 9363 * Since all cpus are captured modifytte should not
9507 9364 * fail.
9508 9365 */
9509 9366 panic("sfmmu_page_cache: write to tte failed");
9510 9367 }
9511 9368
9512 9369 sfmmup = hblktosfmmu(hmeblkp);
9513 9370 if (cache_flush_flag == CACHE_FLUSH) {
9514 9371 /*
9515 9372 * Flush TSBs, TLBs and caches
9516 9373 */
9517 9374 if (hmeblkp->hblk_shared) {
9518 9375 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9519 9376 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9520 9377 sf_region_t *rgnp;
9521 9378 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9522 9379 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9523 9380 ASSERT(srdp != NULL);
9524 9381 rgnp = srdp->srd_hmergnp[rid];
9525 9382 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9526 9383 srdp, rgnp, rid);
9527 9384 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9528 9385 hmeblkp, 0);
9529 9386 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9530 9387 } else if (sfmmup->sfmmu_ismhat) {
9531 9388 if (flags & HAT_CACHE) {
9532 9389 SFMMU_STAT(sf_ism_recache);
9533 9390 } else {
9534 9391 SFMMU_STAT(sf_ism_uncache);
9535 9392 }
9536 9393 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9537 9394 pfn, CACHE_FLUSH);
9538 9395 } else {
9539 9396 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9540 9397 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9541 9398 }
9542 9399
9543 9400 /*
9544 9401 * all cache entries belonging to this pfn are
9545 9402 * now flushed.
9546 9403 */
9547 9404 cache_flush_flag = CACHE_NO_FLUSH;
9548 9405 } else {
9549 9406 /*
9550 9407 * Flush only TSBs and TLBs.
9551 9408 */
9552 9409 if (hmeblkp->hblk_shared) {
9553 9410 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9554 9411 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9555 9412 sf_region_t *rgnp;
9556 9413 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9557 9414 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9558 9415 ASSERT(srdp != NULL);
9559 9416 rgnp = srdp->srd_hmergnp[rid];
9560 9417 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9561 9418 srdp, rgnp, rid);
9562 9419 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9563 9420 hmeblkp, 0);
9564 9421 } else if (sfmmup->sfmmu_ismhat) {
9565 9422 if (flags & HAT_CACHE) {
9566 9423 SFMMU_STAT(sf_ism_recache);
9567 9424 } else {
9568 9425 SFMMU_STAT(sf_ism_uncache);
9569 9426 }
9570 9427 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9571 9428 pfn, CACHE_NO_FLUSH);
9572 9429 } else {
9573 9430 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9574 9431 }
9575 9432 }
9576 9433 }
9577 9434
9578 9435 if (PP_ISMAPPED_KPM(pp))
9579 9436 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9580 9437
9581 9438 switch (flags) {
9582 9439
9583 9440 default:
9584 9441 panic("sfmmu_pagecache: unknown flags");
9585 9442 break;
9586 9443
9587 9444 case HAT_CACHE:
9588 9445 PP_CLRTNC(pp);
9589 9446 PP_CLRPNC(pp);
9590 9447 PP_SET_VCOLOR(pp, color);
9591 9448 break;
9592 9449
9593 9450 case HAT_TMPNC:
9594 9451 PP_SETTNC(pp);
9595 9452 PP_SET_VCOLOR(pp, NO_VCOLOR);
9596 9453 break;
9597 9454
9598 9455 case HAT_UNCACHE:
9599 9456 PP_SETPNC(pp);
9600 9457 PP_CLRTNC(pp);
9601 9458 PP_SET_VCOLOR(pp, NO_VCOLOR);
9602 9459 break;
9603 9460 }
9604 9461 }
9605 9462 #endif /* VAC */
9606 9463
9607 9464
9608 9465 /*
9609 9466 * Wrapper routine used to return a context.
9610 9467 *
9611 9468 * It's the responsibility of the caller to guarantee that the
9612 9469 * process serializes on calls here by taking the HAT lock for
9613 9470 * the hat.
9614 9471 *
9615 9472 */
9616 9473 static void
9617 9474 sfmmu_get_ctx(sfmmu_t *sfmmup)
9618 9475 {
9619 9476 mmu_ctx_t *mmu_ctxp;
9620 9477 uint_t pstate_save;
9621 9478 int ret;
9622 9479
9623 9480 ASSERT(sfmmu_hat_lock_held(sfmmup));
9624 9481 ASSERT(sfmmup != ksfmmup);
9625 9482
9626 9483 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9627 9484 sfmmu_setup_tsbinfo(sfmmup);
9628 9485 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9629 9486 }
9630 9487
9631 9488 kpreempt_disable();
9632 9489
9633 9490 mmu_ctxp = CPU_MMU_CTXP(CPU);
9634 9491 ASSERT(mmu_ctxp);
9635 9492 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9636 9493 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9637 9494
9638 9495 /*
9639 9496 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9640 9497 */
9641 9498 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9642 9499 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9643 9500
9644 9501 /*
9645 9502 * Let the MMU set up the page sizes to use for
9646 9503 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9647 9504 */
9648 9505 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9649 9506 mmu_set_ctx_page_sizes(sfmmup);
9650 9507 }
9651 9508
9652 9509 /*
9653 9510 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9654 9511 * interrupts disabled to prevent race condition with wrap-around
9655 9512 * ctx invalidatation. In sun4v, ctx invalidation also involves
9656 9513 * a HV call to set the number of TSBs to 0. If interrupts are not
9657 9514 * disabled until after sfmmu_load_mmustate is complete TSBs may
9658 9515 * become assigned to INVALID_CONTEXT. This is not allowed.
9659 9516 */
9660 9517 pstate_save = sfmmu_disable_intrs();
9661 9518
9662 9519 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9663 9520 sfmmup->sfmmu_scdp != NULL) {
9664 9521 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9665 9522 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9666 9523 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9667 9524 /* debug purpose only */
9668 9525 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9669 9526 != INVALID_CONTEXT);
9670 9527 }
9671 9528 sfmmu_load_mmustate(sfmmup);
9672 9529
9673 9530 sfmmu_enable_intrs(pstate_save);
9674 9531
9675 9532 kpreempt_enable();
9676 9533 }
9677 9534
9678 9535 /*
9679 9536 * When all cnums are used up in a MMU, cnum will wrap around to the
9680 9537 * next generation and start from 2.
9681 9538 */
9682 9539 static void
9683 9540 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9684 9541 {
9685 9542
9686 9543 /* caller must have disabled the preemption */
9687 9544 ASSERT(curthread->t_preempt >= 1);
9688 9545 ASSERT(mmu_ctxp != NULL);
9689 9546
9690 9547 /* acquire Per-MMU (PM) spin lock */
9691 9548 mutex_enter(&mmu_ctxp->mmu_lock);
9692 9549
9693 9550 /* re-check to see if wrap-around is needed */
9694 9551 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9695 9552 goto done;
9696 9553
9697 9554 SFMMU_MMU_STAT(mmu_wrap_around);
9698 9555
9699 9556 /* update gnum */
9700 9557 ASSERT(mmu_ctxp->mmu_gnum != 0);
9701 9558 mmu_ctxp->mmu_gnum++;
9702 9559 if (mmu_ctxp->mmu_gnum == 0 ||
9703 9560 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9704 9561 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9705 9562 (void *)mmu_ctxp);
9706 9563 }
9707 9564
9708 9565 if (mmu_ctxp->mmu_ncpus > 1) {
9709 9566 cpuset_t cpuset;
9710 9567
9711 9568 membar_enter(); /* make sure updated gnum visible */
9712 9569
9713 9570 SFMMU_XCALL_STATS(NULL);
9714 9571
9715 9572 /* xcall to others on the same MMU to invalidate ctx */
9716 9573 cpuset = mmu_ctxp->mmu_cpuset;
9717 9574 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9718 9575 CPUSET_DEL(cpuset, CPU->cpu_id);
9719 9576 CPUSET_AND(cpuset, cpu_ready_set);
9720 9577
9721 9578 /*
9722 9579 * Pass in INVALID_CONTEXT as the first parameter to
9723 9580 * sfmmu_raise_tsb_exception, which invalidates the context
9724 9581 * of any process running on the CPUs in the MMU.
9725 9582 */
9726 9583 xt_some(cpuset, sfmmu_raise_tsb_exception,
9727 9584 INVALID_CONTEXT, INVALID_CONTEXT);
9728 9585 xt_sync(cpuset);
9729 9586
9730 9587 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9731 9588 }
9732 9589
9733 9590 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9734 9591 sfmmu_setctx_sec(INVALID_CONTEXT);
9735 9592 sfmmu_clear_utsbinfo();
9736 9593 }
9737 9594
9738 9595 /*
9739 9596 * No xcall is needed here. For sun4u systems all CPUs in context
9740 9597 * domain share a single physical MMU therefore it's enough to flush
9741 9598 * TLB on local CPU. On sun4v systems we use 1 global context
9742 9599 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9743 9600 * handler. Note that vtag_flushall_uctxs() is called
9744 9601 * for Ultra II machine, where the equivalent flushall functionality
9745 9602 * is implemented in SW, and only user ctx TLB entries are flushed.
9746 9603 */
9747 9604 if (&vtag_flushall_uctxs != NULL) {
9748 9605 vtag_flushall_uctxs();
9749 9606 } else {
9750 9607 vtag_flushall();
9751 9608 }
9752 9609
9753 9610 /* reset mmu cnum, skips cnum 0 and 1 */
9754 9611 if (reset_cnum == B_TRUE)
9755 9612 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9756 9613
9757 9614 done:
9758 9615 mutex_exit(&mmu_ctxp->mmu_lock);
9759 9616 }
9760 9617
9761 9618
9762 9619 /*
9763 9620 * For multi-threaded process, set the process context to INVALID_CONTEXT
9764 9621 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9765 9622 * process, we can just load the MMU state directly without having to
9766 9623 * set context invalid. Caller must hold the hat lock since we don't
9767 9624 * acquire it here.
9768 9625 */
9769 9626 static void
9770 9627 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9771 9628 {
9772 9629 uint_t cnum;
9773 9630 uint_t pstate_save;
9774 9631
9775 9632 ASSERT(sfmmup != ksfmmup);
9776 9633 ASSERT(sfmmu_hat_lock_held(sfmmup));
9777 9634
9778 9635 kpreempt_disable();
9779 9636
9780 9637 /*
9781 9638 * We check whether the pass'ed-in sfmmup is the same as the
9782 9639 * current running proc. This is to makes sure the current proc
9783 9640 * stays single-threaded if it already is.
9784 9641 */
9785 9642 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9786 9643 (curthread->t_procp->p_lwpcnt == 1)) {
9787 9644 /* single-thread */
9788 9645 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9789 9646 if (cnum != INVALID_CONTEXT) {
9790 9647 uint_t curcnum;
9791 9648 /*
9792 9649 * Disable interrupts to prevent race condition
9793 9650 * with sfmmu_ctx_wrap_around ctx invalidation.
9794 9651 * In sun4v, ctx invalidation involves setting
9795 9652 * TSB to NULL, hence, interrupts should be disabled
9796 9653 * untill after sfmmu_load_mmustate is completed.
9797 9654 */
9798 9655 pstate_save = sfmmu_disable_intrs();
9799 9656 curcnum = sfmmu_getctx_sec();
9800 9657 if (curcnum == cnum)
9801 9658 sfmmu_load_mmustate(sfmmup);
9802 9659 sfmmu_enable_intrs(pstate_save);
9803 9660 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9804 9661 }
9805 9662 } else {
9806 9663 /*
9807 9664 * multi-thread
9808 9665 * or when sfmmup is not the same as the curproc.
↓ open down ↓ |
7683 lines elided |
↑ open up ↑ |
9809 9666 */
9810 9667 sfmmu_invalidate_ctx(sfmmup);
9811 9668 }
9812 9669
9813 9670 kpreempt_enable();
9814 9671 }
9815 9672
9816 9673
9817 9674 /*
9818 9675 * Replace the specified TSB with a new TSB. This function gets called when
9819 - * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
9676 + * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the
9820 9677 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9821 9678 * (8K).
9822 9679 *
9823 9680 * Caller must hold the HAT lock, but should assume any tsb_info
9824 9681 * pointers it has are no longer valid after calling this function.
9825 9682 *
9826 9683 * Return values:
9827 9684 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9828 9685 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9829 9686 * something to this tsbinfo/TSB
9830 9687 * TSB_SUCCESS Operation succeeded
9831 9688 */
9832 9689 static tsb_replace_rc_t
9833 9690 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9834 9691 hatlock_t *hatlockp, uint_t flags)
9835 9692 {
9836 9693 struct tsb_info *new_tsbinfo = NULL;
9837 9694 struct tsb_info *curtsb, *prevtsb;
9838 9695 uint_t tte_sz_mask;
9839 9696 int i;
9840 9697
9841 9698 ASSERT(sfmmup != ksfmmup);
9842 9699 ASSERT(sfmmup->sfmmu_ismhat == 0);
9843 9700 ASSERT(sfmmu_hat_lock_held(sfmmup));
9844 9701 ASSERT(szc <= tsb_max_growsize);
9845 9702
9846 9703 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
9847 9704 return (TSB_LOSTRACE);
9848 9705
9849 9706 /*
9850 9707 * Find the tsb_info ahead of this one in the list, and
9851 9708 * also make sure that the tsb_info passed in really
9852 9709 * exists!
9853 9710 */
9854 9711 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9855 9712 curtsb != old_tsbinfo && curtsb != NULL;
9856 9713 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9857 9714 ;
9858 9715 ASSERT(curtsb != NULL);
9859 9716
9860 9717 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9861 9718 /*
9862 9719 * The process is swapped out, so just set the new size
9863 9720 * code. When it swaps back in, we'll allocate a new one
9864 9721 * of the new chosen size.
9865 9722 */
9866 9723 curtsb->tsb_szc = szc;
9867 9724 return (TSB_SUCCESS);
9868 9725 }
9869 9726 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
9870 9727
9871 9728 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
9872 9729
9873 9730 /*
9874 9731 * All initialization is done inside of sfmmu_tsbinfo_alloc().
9875 9732 * If we fail to allocate a TSB, exit.
9876 9733 *
9877 9734 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
9878 9735 * then try 4M slab after the initial alloc fails.
9879 9736 *
9880 9737 * If tsb swapin with tsb size > 4M, then try 4M after the
9881 9738 * initial alloc fails.
9882 9739 */
9883 9740 sfmmu_hat_exit(hatlockp);
9884 9741 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
9885 9742 tte_sz_mask, flags, sfmmup) &&
9886 9743 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
9887 9744 (!(flags & TSB_SWAPIN) &&
9888 9745 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
9889 9746 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
9890 9747 tte_sz_mask, flags, sfmmup))) {
9891 9748 (void) sfmmu_hat_enter(sfmmup);
9892 9749 if (!(flags & TSB_SWAPIN))
9893 9750 SFMMU_STAT(sf_tsb_resize_failures);
9894 9751 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9895 9752 return (TSB_ALLOCFAIL);
9896 9753 }
9897 9754 (void) sfmmu_hat_enter(sfmmup);
9898 9755
9899 9756 /*
9900 9757 * Re-check to make sure somebody else didn't muck with us while we
9901 9758 * didn't hold the HAT lock. If the process swapped out, fine, just
9902 9759 * exit; this can happen if we try to shrink the TSB from the context
9903 9760 * of another process (such as on an ISM unmap), though it is rare.
9904 9761 */
9905 9762 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9906 9763 SFMMU_STAT(sf_tsb_resize_failures);
9907 9764 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9908 9765 sfmmu_hat_exit(hatlockp);
9909 9766 sfmmu_tsbinfo_free(new_tsbinfo);
9910 9767 (void) sfmmu_hat_enter(sfmmup);
9911 9768 return (TSB_LOSTRACE);
9912 9769 }
9913 9770
9914 9771 #ifdef DEBUG
9915 9772 /* Reverify that the tsb_info still exists.. for debugging only */
9916 9773 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9917 9774 curtsb != old_tsbinfo && curtsb != NULL;
9918 9775 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9919 9776 ;
9920 9777 ASSERT(curtsb != NULL);
9921 9778 #endif /* DEBUG */
9922 9779
9923 9780 /*
9924 9781 * Quiesce any CPUs running this process on their next TLB miss
9925 9782 * so they atomically see the new tsb_info. We temporarily set the
9926 9783 * context to invalid context so new threads that come on processor
9927 9784 * after we do the xcall to cpusran will also serialize behind the
9928 9785 * HAT lock on TLB miss and will see the new TSB. Since this short
9929 9786 * race with a new thread coming on processor is relatively rare,
9930 9787 * this synchronization mechanism should be cheaper than always
9931 9788 * pausing all CPUs for the duration of the setup, which is what
9932 9789 * the old implementation did. This is particuarly true if we are
9933 9790 * copying a huge chunk of memory around during that window.
9934 9791 *
9935 9792 * The memory barriers are to make sure things stay consistent
9936 9793 * with resume() since it does not hold the HAT lock while
9937 9794 * walking the list of tsb_info structures.
9938 9795 */
9939 9796 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9940 9797 /* The TSB is either growing or shrinking. */
9941 9798 sfmmu_invalidate_ctx(sfmmup);
9942 9799 } else {
9943 9800 /*
9944 9801 * It is illegal to swap in TSBs from a process other
9945 9802 * than a process being swapped in. This in turn
9946 9803 * implies we do not have a valid MMU context here
9947 9804 * since a process needs one to resolve translation
9948 9805 * misses.
9949 9806 */
9950 9807 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9951 9808 }
9952 9809
9953 9810 #ifdef DEBUG
9954 9811 ASSERT(max_mmu_ctxdoms > 0);
9955 9812
9956 9813 /*
9957 9814 * Process should have INVALID_CONTEXT on all MMUs
9958 9815 */
9959 9816 for (i = 0; i < max_mmu_ctxdoms; i++) {
9960 9817
9961 9818 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9962 9819 }
9963 9820 #endif
9964 9821
9965 9822 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9966 9823 membar_stst(); /* strict ordering required */
9967 9824 if (prevtsb)
9968 9825 prevtsb->tsb_next = new_tsbinfo;
9969 9826 else
9970 9827 sfmmup->sfmmu_tsb = new_tsbinfo;
9971 9828 membar_enter(); /* make sure new TSB globally visible */
9972 9829
9973 9830 /*
9974 9831 * We need to migrate TSB entries from the old TSB to the new TSB
9975 9832 * if tsb_remap_ttes is set and the TSB is growing.
9976 9833 */
9977 9834 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9978 9835 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9979 9836
9980 9837 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9981 9838
9982 9839 /*
9983 9840 * Drop the HAT lock to free our old tsb_info.
9984 9841 */
9985 9842 sfmmu_hat_exit(hatlockp);
9986 9843
9987 9844 if ((flags & TSB_GROW) == TSB_GROW) {
9988 9845 SFMMU_STAT(sf_tsb_grow);
9989 9846 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9990 9847 SFMMU_STAT(sf_tsb_shrink);
9991 9848 }
9992 9849
9993 9850 sfmmu_tsbinfo_free(old_tsbinfo);
9994 9851
9995 9852 (void) sfmmu_hat_enter(sfmmup);
9996 9853 return (TSB_SUCCESS);
9997 9854 }
9998 9855
9999 9856 /*
10000 9857 * This function will re-program hat pgsz array, and invalidate the
10001 9858 * process' context, forcing the process to switch to another
10002 9859 * context on the next TLB miss, and therefore start using the
10003 9860 * TLB that is reprogrammed for the new page sizes.
10004 9861 */
10005 9862 void
10006 9863 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10007 9864 {
10008 9865 int i;
10009 9866 hatlock_t *hatlockp = NULL;
10010 9867
10011 9868 hatlockp = sfmmu_hat_enter(sfmmup);
10012 9869 /* USIII+-IV+ optimization, requires hat lock */
10013 9870 if (tmp_pgsz) {
10014 9871 for (i = 0; i < mmu_page_sizes; i++)
10015 9872 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10016 9873 }
10017 9874 SFMMU_STAT(sf_tlb_reprog_pgsz);
10018 9875
10019 9876 sfmmu_invalidate_ctx(sfmmup);
10020 9877
10021 9878 sfmmu_hat_exit(hatlockp);
10022 9879 }
10023 9880
10024 9881 /*
10025 9882 * The scd_rttecnt field in the SCD must be updated to take account of the
10026 9883 * regions which it contains.
10027 9884 */
10028 9885 static void
10029 9886 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10030 9887 {
10031 9888 uint_t rid;
10032 9889 uint_t i, j;
10033 9890 ulong_t w;
10034 9891 sf_region_t *rgnp;
10035 9892
10036 9893 ASSERT(srdp != NULL);
10037 9894
10038 9895 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10039 9896 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10040 9897 continue;
10041 9898 }
10042 9899
10043 9900 j = 0;
10044 9901 while (w) {
10045 9902 if (!(w & 0x1)) {
10046 9903 j++;
10047 9904 w >>= 1;
10048 9905 continue;
10049 9906 }
10050 9907 rid = (i << BT_ULSHIFT) | j;
10051 9908 j++;
10052 9909 w >>= 1;
10053 9910
10054 9911 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10055 9912 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10056 9913 rgnp = srdp->srd_hmergnp[rid];
10057 9914 ASSERT(rgnp->rgn_refcnt > 0);
10058 9915 ASSERT(rgnp->rgn_id == rid);
10059 9916
10060 9917 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10061 9918 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10062 9919
10063 9920 /*
10064 9921 * Maintain the tsb0 inflation cnt for the regions
10065 9922 * in the SCD.
10066 9923 */
10067 9924 if (rgnp->rgn_pgszc >= TTE4M) {
10068 9925 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10069 9926 rgnp->rgn_size >>
10070 9927 (TTE_PAGE_SHIFT(TTE8K) + 2);
10071 9928 }
10072 9929 }
10073 9930 }
10074 9931 }
10075 9932
10076 9933 /*
10077 9934 * This function assumes that there are either four or six supported page
10078 9935 * sizes and at most two programmable TLBs, so we need to decide which
10079 9936 * page sizes are most important and then tell the MMU layer so it
10080 9937 * can adjust the TLB page sizes accordingly (if supported).
10081 9938 *
10082 9939 * If these assumptions change, this function will need to be
10083 9940 * updated to support whatever the new limits are.
10084 9941 *
10085 9942 * The growing flag is nonzero if we are growing the address space,
10086 9943 * and zero if it is shrinking. This allows us to decide whether
10087 9944 * to grow or shrink our TSB, depending upon available memory
10088 9945 * conditions.
10089 9946 */
10090 9947 static void
10091 9948 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10092 9949 {
10093 9950 uint64_t ttecnt[MMU_PAGE_SIZES];
10094 9951 uint64_t tte8k_cnt, tte4m_cnt;
10095 9952 uint8_t i;
10096 9953 int sectsb_thresh;
10097 9954
10098 9955 /*
10099 9956 * Kernel threads, processes with small address spaces not using
10100 9957 * large pages, and dummy ISM HATs need not apply.
10101 9958 */
10102 9959 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10103 9960 return;
10104 9961
10105 9962 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10106 9963 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10107 9964 return;
10108 9965
10109 9966 for (i = 0; i < mmu_page_sizes; i++) {
10110 9967 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10111 9968 sfmmup->sfmmu_ismttecnt[i];
10112 9969 }
10113 9970
10114 9971 /* Check pagesizes in use, and possibly reprogram DTLB. */
10115 9972 if (&mmu_check_page_sizes)
10116 9973 mmu_check_page_sizes(sfmmup, ttecnt);
10117 9974
10118 9975 /*
10119 9976 * Calculate the number of 8k ttes to represent the span of these
10120 9977 * pages.
10121 9978 */
10122 9979 tte8k_cnt = ttecnt[TTE8K] +
10123 9980 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10124 9981 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10125 9982 if (mmu_page_sizes == max_mmu_page_sizes) {
10126 9983 tte4m_cnt = ttecnt[TTE4M] +
10127 9984 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10128 9985 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10129 9986 } else {
10130 9987 tte4m_cnt = ttecnt[TTE4M];
10131 9988 }
10132 9989
10133 9990 /*
10134 9991 * Inflate tte8k_cnt to allow for region large page allocation failure.
10135 9992 */
10136 9993 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10137 9994
10138 9995 /*
10139 9996 * Inflate TSB sizes by a factor of 2 if this process
10140 9997 * uses 4M text pages to minimize extra conflict misses
10141 9998 * in the first TSB since without counting text pages
10142 9999 * 8K TSB may become too small.
10143 10000 *
10144 10001 * Also double the size of the second TSB to minimize
10145 10002 * extra conflict misses due to competition between 4M text pages
10146 10003 * and data pages.
10147 10004 *
10148 10005 * We need to adjust the second TSB allocation threshold by the
10149 10006 * inflation factor, since there is no point in creating a second
10150 10007 * TSB when we know all the mappings can fit in the I/D TLBs.
10151 10008 */
10152 10009 sectsb_thresh = tsb_sectsb_threshold;
10153 10010 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10154 10011 tte8k_cnt <<= 1;
10155 10012 tte4m_cnt <<= 1;
10156 10013 sectsb_thresh <<= 1;
10157 10014 }
10158 10015
10159 10016 /*
10160 10017 * Check to see if our TSB is the right size; we may need to
10161 10018 * grow or shrink it. If the process is small, our work is
10162 10019 * finished at this point.
10163 10020 */
10164 10021 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10165 10022 return;
10166 10023 }
10167 10024 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10168 10025 }
10169 10026
10170 10027 static void
10171 10028 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10172 10029 uint64_t tte4m_cnt, int sectsb_thresh)
10173 10030 {
10174 10031 int tsb_bits;
10175 10032 uint_t tsb_szc;
10176 10033 struct tsb_info *tsbinfop;
10177 10034 hatlock_t *hatlockp = NULL;
10178 10035
10179 10036 hatlockp = sfmmu_hat_enter(sfmmup);
10180 10037 ASSERT(hatlockp != NULL);
10181 10038 tsbinfop = sfmmup->sfmmu_tsb;
10182 10039 ASSERT(tsbinfop != NULL);
10183 10040
10184 10041 /*
10185 10042 * If we're growing, select the size based on RSS. If we're
10186 10043 * shrinking, leave some room so we don't have to turn around and
10187 10044 * grow again immediately.
10188 10045 */
10189 10046 if (growing)
10190 10047 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10191 10048 else
10192 10049 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10193 10050
10194 10051 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10195 10052 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10196 10053 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10197 10054 hatlockp, TSB_SHRINK);
10198 10055 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10199 10056 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10200 10057 hatlockp, TSB_GROW);
10201 10058 }
10202 10059 tsbinfop = sfmmup->sfmmu_tsb;
10203 10060
10204 10061 /*
10205 10062 * With the TLB and first TSB out of the way, we need to see if
10206 10063 * we need a second TSB for 4M pages. If we managed to reprogram
10207 10064 * the TLB page sizes above, the process will start using this new
10208 10065 * TSB right away; otherwise, it will start using it on the next
10209 10066 * context switch. Either way, it's no big deal so there's no
10210 10067 * synchronization with the trap handlers here unless we grow the
10211 10068 * TSB (in which case it's required to prevent using the old one
10212 10069 * after it's freed). Note: second tsb is required for 32M/256M
10213 10070 * page sizes.
10214 10071 */
10215 10072 if (tte4m_cnt > sectsb_thresh) {
10216 10073 /*
10217 10074 * If we're growing, select the size based on RSS. If we're
10218 10075 * shrinking, leave some room so we don't have to turn
10219 10076 * around and grow again immediately.
10220 10077 */
10221 10078 if (growing)
10222 10079 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10223 10080 else
10224 10081 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10225 10082 if (tsbinfop->tsb_next == NULL) {
10226 10083 struct tsb_info *newtsb;
10227 10084 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10228 10085 0 : TSB_ALLOC;
10229 10086
10230 10087 sfmmu_hat_exit(hatlockp);
10231 10088
10232 10089 /*
10233 10090 * Try to allocate a TSB for 4[32|256]M pages. If we
10234 10091 * can't get the size we want, retry w/a minimum sized
10235 10092 * TSB. If that still didn't work, give up; we can
10236 10093 * still run without one.
10237 10094 */
10238 10095 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10239 10096 TSB4M|TSB32M|TSB256M:TSB4M;
10240 10097 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10241 10098 allocflags, sfmmup)) &&
10242 10099 (tsb_szc <= TSB_4M_SZCODE ||
10243 10100 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10244 10101 tsb_bits, allocflags, sfmmup)) &&
10245 10102 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10246 10103 tsb_bits, allocflags, sfmmup)) {
10247 10104 return;
10248 10105 }
10249 10106
10250 10107 hatlockp = sfmmu_hat_enter(sfmmup);
10251 10108
10252 10109 sfmmu_invalidate_ctx(sfmmup);
10253 10110
10254 10111 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10255 10112 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10256 10113 SFMMU_STAT(sf_tsb_sectsb_create);
10257 10114 sfmmu_hat_exit(hatlockp);
10258 10115 return;
10259 10116 } else {
10260 10117 /*
10261 10118 * It's annoying, but possible for us
10262 10119 * to get here.. we dropped the HAT lock
10263 10120 * because of locking order in the kmem
10264 10121 * allocator, and while we were off getting
10265 10122 * our memory, some other thread decided to
10266 10123 * do us a favor and won the race to get a
10267 10124 * second TSB for this process. Sigh.
10268 10125 */
10269 10126 sfmmu_hat_exit(hatlockp);
10270 10127 sfmmu_tsbinfo_free(newtsb);
10271 10128 return;
10272 10129 }
10273 10130 }
10274 10131
10275 10132 /*
10276 10133 * We have a second TSB, see if it's big enough.
10277 10134 */
10278 10135 tsbinfop = tsbinfop->tsb_next;
10279 10136
10280 10137 /*
10281 10138 * Check to see if our second TSB is the right size;
10282 10139 * we may need to grow or shrink it.
10283 10140 * To prevent thrashing (e.g. growing the TSB on a
10284 10141 * subsequent map operation), only try to shrink if
10285 10142 * the TSB reach exceeds twice the virtual address
10286 10143 * space size.
10287 10144 */
10288 10145 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10289 10146 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10290 10147 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10291 10148 tsb_szc, hatlockp, TSB_SHRINK);
10292 10149 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10293 10150 TSB_OK_GROW()) {
10294 10151 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10295 10152 tsb_szc, hatlockp, TSB_GROW);
10296 10153 }
10297 10154 }
10298 10155
10299 10156 sfmmu_hat_exit(hatlockp);
10300 10157 }
10301 10158
10302 10159 /*
10303 10160 * Free up a sfmmu
10304 10161 * Since the sfmmu is currently embedded in the hat struct we simply zero
10305 10162 * out our fields and free up the ism map blk list if any.
10306 10163 */
10307 10164 static void
10308 10165 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10309 10166 {
10310 10167 ism_blk_t *blkp, *nx_blkp;
10311 10168 #ifdef DEBUG
10312 10169 ism_map_t *map;
10313 10170 int i;
10314 10171 #endif
10315 10172
10316 10173 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10317 10174 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10318 10175 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10319 10176 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10320 10177 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10321 10178 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10322 10179 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10323 10180
10324 10181 sfmmup->sfmmu_free = 0;
10325 10182 sfmmup->sfmmu_ismhat = 0;
10326 10183
10327 10184 blkp = sfmmup->sfmmu_iblk;
10328 10185 sfmmup->sfmmu_iblk = NULL;
10329 10186
10330 10187 while (blkp) {
10331 10188 #ifdef DEBUG
10332 10189 map = blkp->iblk_maps;
10333 10190 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10334 10191 ASSERT(map[i].imap_seg == 0);
10335 10192 ASSERT(map[i].imap_ismhat == NULL);
10336 10193 ASSERT(map[i].imap_ment == NULL);
10337 10194 }
10338 10195 #endif
10339 10196 nx_blkp = blkp->iblk_next;
10340 10197 blkp->iblk_next = NULL;
10341 10198 blkp->iblk_nextpa = (uint64_t)-1;
10342 10199 kmem_cache_free(ism_blk_cache, blkp);
10343 10200 blkp = nx_blkp;
10344 10201 }
10345 10202 }
10346 10203
10347 10204 /*
10348 10205 * Locking primitves accessed by HATLOCK macros
10349 10206 */
10350 10207
10351 10208 #define SFMMU_SPL_MTX (0x0)
10352 10209 #define SFMMU_ML_MTX (0x1)
10353 10210
10354 10211 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10355 10212 SPL_HASH(pg) : MLIST_HASH(pg))
10356 10213
10357 10214 kmutex_t *
10358 10215 sfmmu_page_enter(struct page *pp)
10359 10216 {
10360 10217 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10361 10218 }
10362 10219
10363 10220 void
10364 10221 sfmmu_page_exit(kmutex_t *spl)
10365 10222 {
10366 10223 mutex_exit(spl);
10367 10224 }
10368 10225
10369 10226 int
10370 10227 sfmmu_page_spl_held(struct page *pp)
10371 10228 {
10372 10229 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10373 10230 }
10374 10231
10375 10232 kmutex_t *
10376 10233 sfmmu_mlist_enter(struct page *pp)
10377 10234 {
10378 10235 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10379 10236 }
10380 10237
10381 10238 void
10382 10239 sfmmu_mlist_exit(kmutex_t *mml)
10383 10240 {
10384 10241 mutex_exit(mml);
10385 10242 }
10386 10243
10387 10244 int
10388 10245 sfmmu_mlist_held(struct page *pp)
10389 10246 {
10390 10247
10391 10248 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10392 10249 }
10393 10250
10394 10251 /*
10395 10252 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10396 10253 * sfmmu_mlist_enter() case mml_table lock array is used and for
10397 10254 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10398 10255 *
10399 10256 * The lock is taken on a root page so that it protects an operation on all
10400 10257 * constituent pages of a large page pp belongs to.
10401 10258 *
10402 10259 * The routine takes a lock from the appropriate array. The lock is determined
10403 10260 * by hashing the root page. After taking the lock this routine checks if the
10404 10261 * root page has the same size code that was used to determine the root (i.e
10405 10262 * that root hasn't changed). If root page has the expected p_szc field we
10406 10263 * have the right lock and it's returned to the caller. If root's p_szc
10407 10264 * decreased we release the lock and retry from the beginning. This case can
10408 10265 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10409 10266 * value and taking the lock. The number of retries due to p_szc decrease is
10410 10267 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10411 10268 * determined by hashing pp itself.
10412 10269 *
10413 10270 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10414 10271 * possible that p_szc can increase. To increase p_szc a thread has to lock
10415 10272 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10416 10273 * callers that don't hold a page locked recheck if hmeblk through which pp
10417 10274 * was found still maps this pp. If it doesn't map it anymore returned lock
10418 10275 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10419 10276 * p_szc increase after taking the lock it returns this lock without further
10420 10277 * retries because in this case the caller doesn't care about which lock was
10421 10278 * taken. The caller will drop it right away.
10422 10279 *
10423 10280 * After the routine returns it's guaranteed that hat_page_demote() can't
10424 10281 * change p_szc field of any of constituent pages of a large page pp belongs
10425 10282 * to as long as pp was either locked at least SHARED prior to this call or
10426 10283 * the caller finds that hment that pointed to this pp still references this
10427 10284 * pp (this also assumes that the caller holds hme hash bucket lock so that
10428 10285 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10429 10286 * hat_pageunload()).
10430 10287 */
10431 10288 static kmutex_t *
10432 10289 sfmmu_mlspl_enter(struct page *pp, int type)
10433 10290 {
10434 10291 kmutex_t *mtx;
10435 10292 uint_t prev_rszc = UINT_MAX;
10436 10293 page_t *rootpp;
10437 10294 uint_t szc;
10438 10295 uint_t rszc;
10439 10296 uint_t pszc = pp->p_szc;
10440 10297
10441 10298 ASSERT(pp != NULL);
10442 10299
10443 10300 again:
10444 10301 if (pszc == 0) {
10445 10302 mtx = SFMMU_MLSPL_MTX(type, pp);
10446 10303 mutex_enter(mtx);
10447 10304 return (mtx);
10448 10305 }
10449 10306
10450 10307 /* The lock lives in the root page */
10451 10308 rootpp = PP_GROUPLEADER(pp, pszc);
10452 10309 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10453 10310 mutex_enter(mtx);
10454 10311
10455 10312 /*
10456 10313 * Return mml in the following 3 cases:
10457 10314 *
10458 10315 * 1) If pp itself is root since if its p_szc decreased before we took
10459 10316 * the lock pp is still the root of smaller szc page. And if its p_szc
10460 10317 * increased it doesn't matter what lock we return (see comment in
10461 10318 * front of this routine).
10462 10319 *
10463 10320 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10464 10321 * large page we have the right lock since any previous potential
10465 10322 * hat_page_demote() is done demoting from greater than current root's
10466 10323 * p_szc because hat_page_demote() changes root's p_szc last. No
10467 10324 * further hat_page_demote() can start or be in progress since it
10468 10325 * would need the same lock we currently hold.
10469 10326 *
10470 10327 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10471 10328 * matter what lock we return (see comment in front of this routine).
10472 10329 */
10473 10330 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10474 10331 rszc >= prev_rszc) {
10475 10332 return (mtx);
10476 10333 }
10477 10334
10478 10335 /*
10479 10336 * hat_page_demote() could have decreased root's p_szc.
10480 10337 * In this case pp's p_szc must also be smaller than pszc.
10481 10338 * Retry.
10482 10339 */
10483 10340 if (rszc < pszc) {
10484 10341 szc = pp->p_szc;
10485 10342 if (szc < pszc) {
10486 10343 mutex_exit(mtx);
10487 10344 pszc = szc;
10488 10345 goto again;
10489 10346 }
10490 10347 /*
10491 10348 * pp's p_szc increased after it was decreased.
10492 10349 * page cannot be mapped. Return current lock. The caller
10493 10350 * will drop it right away.
10494 10351 */
10495 10352 return (mtx);
10496 10353 }
10497 10354
10498 10355 /*
10499 10356 * root's p_szc is greater than pp's p_szc.
10500 10357 * hat_page_demote() is not done with all pages
10501 10358 * yet. Wait for it to complete.
10502 10359 */
10503 10360 mutex_exit(mtx);
10504 10361 rootpp = PP_GROUPLEADER(rootpp, rszc);
10505 10362 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10506 10363 mutex_enter(mtx);
10507 10364 mutex_exit(mtx);
10508 10365 prev_rszc = rszc;
10509 10366 goto again;
10510 10367 }
10511 10368
10512 10369 static int
10513 10370 sfmmu_mlspl_held(struct page *pp, int type)
10514 10371 {
10515 10372 kmutex_t *mtx;
10516 10373
10517 10374 ASSERT(pp != NULL);
10518 10375 /* The lock lives in the root page */
10519 10376 pp = PP_PAGEROOT(pp);
10520 10377 ASSERT(pp != NULL);
10521 10378
10522 10379 mtx = SFMMU_MLSPL_MTX(type, pp);
10523 10380 return (MUTEX_HELD(mtx));
10524 10381 }
10525 10382
10526 10383 static uint_t
10527 10384 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10528 10385 {
10529 10386 struct hme_blk *hblkp;
10530 10387
10531 10388
10532 10389 if (freehblkp != NULL) {
10533 10390 mutex_enter(&freehblkp_lock);
10534 10391 if (freehblkp != NULL) {
10535 10392 /*
10536 10393 * If the current thread is owning hblk_reserve OR
10537 10394 * critical request from sfmmu_hblk_steal()
10538 10395 * let it succeed even if freehblkcnt is really low.
10539 10396 */
10540 10397 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10541 10398 SFMMU_STAT(sf_get_free_throttle);
10542 10399 mutex_exit(&freehblkp_lock);
10543 10400 return (0);
10544 10401 }
10545 10402 freehblkcnt--;
10546 10403 *hmeblkpp = freehblkp;
10547 10404 hblkp = *hmeblkpp;
10548 10405 freehblkp = hblkp->hblk_next;
10549 10406 mutex_exit(&freehblkp_lock);
10550 10407 hblkp->hblk_next = NULL;
10551 10408 SFMMU_STAT(sf_get_free_success);
10552 10409
10553 10410 ASSERT(hblkp->hblk_hmecnt == 0);
10554 10411 ASSERT(hblkp->hblk_vcnt == 0);
10555 10412 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10556 10413
10557 10414 return (1);
10558 10415 }
10559 10416 mutex_exit(&freehblkp_lock);
10560 10417 }
10561 10418
10562 10419 /* Check cpu hblk pending queues */
10563 10420 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10564 10421 hblkp = *hmeblkpp;
10565 10422 hblkp->hblk_next = NULL;
10566 10423 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10567 10424
10568 10425 ASSERT(hblkp->hblk_hmecnt == 0);
10569 10426 ASSERT(hblkp->hblk_vcnt == 0);
10570 10427
10571 10428 return (1);
10572 10429 }
10573 10430
10574 10431 SFMMU_STAT(sf_get_free_fail);
10575 10432 return (0);
10576 10433 }
10577 10434
10578 10435 static uint_t
10579 10436 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10580 10437 {
10581 10438 struct hme_blk *hblkp;
10582 10439
10583 10440 ASSERT(hmeblkp->hblk_hmecnt == 0);
10584 10441 ASSERT(hmeblkp->hblk_vcnt == 0);
10585 10442 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10586 10443
10587 10444 /*
10588 10445 * If the current thread is mapping into kernel space,
10589 10446 * let it succede even if freehblkcnt is max
10590 10447 * so that it will avoid freeing it to kmem.
10591 10448 * This will prevent stack overflow due to
10592 10449 * possible recursion since kmem_cache_free()
10593 10450 * might require creation of a slab which
10594 10451 * in turn needs an hmeblk to map that slab;
10595 10452 * let's break this vicious chain at the first
10596 10453 * opportunity.
10597 10454 */
10598 10455 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10599 10456 mutex_enter(&freehblkp_lock);
10600 10457 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10601 10458 SFMMU_STAT(sf_put_free_success);
10602 10459 freehblkcnt++;
10603 10460 hmeblkp->hblk_next = freehblkp;
10604 10461 freehblkp = hmeblkp;
10605 10462 mutex_exit(&freehblkp_lock);
10606 10463 return (1);
10607 10464 }
10608 10465 mutex_exit(&freehblkp_lock);
10609 10466 }
10610 10467
10611 10468 /*
10612 10469 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10613 10470 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10614 10471 * we are not in the process of mapping into kernel space.
10615 10472 */
10616 10473 ASSERT(!critical);
10617 10474 while (freehblkcnt > HBLK_RESERVE_CNT) {
10618 10475 mutex_enter(&freehblkp_lock);
10619 10476 if (freehblkcnt > HBLK_RESERVE_CNT) {
10620 10477 freehblkcnt--;
10621 10478 hblkp = freehblkp;
10622 10479 freehblkp = hblkp->hblk_next;
10623 10480 mutex_exit(&freehblkp_lock);
10624 10481 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10625 10482 kmem_cache_free(sfmmu8_cache, hblkp);
10626 10483 continue;
10627 10484 }
10628 10485 mutex_exit(&freehblkp_lock);
10629 10486 }
10630 10487 SFMMU_STAT(sf_put_free_fail);
10631 10488 return (0);
10632 10489 }
10633 10490
10634 10491 static void
10635 10492 sfmmu_hblk_swap(struct hme_blk *new)
10636 10493 {
10637 10494 struct hme_blk *old, *hblkp, *prev;
10638 10495 uint64_t newpa;
10639 10496 caddr_t base, vaddr, endaddr;
10640 10497 struct hmehash_bucket *hmebp;
10641 10498 struct sf_hment *osfhme, *nsfhme;
10642 10499 page_t *pp;
10643 10500 kmutex_t *pml;
10644 10501 tte_t tte;
10645 10502 struct hme_blk *list = NULL;
10646 10503
10647 10504 #ifdef DEBUG
10648 10505 hmeblk_tag hblktag;
10649 10506 struct hme_blk *found;
10650 10507 #endif
10651 10508 old = HBLK_RESERVE;
10652 10509 ASSERT(!old->hblk_shared);
10653 10510
10654 10511 /*
10655 10512 * save pa before bcopy clobbers it
10656 10513 */
10657 10514 newpa = new->hblk_nextpa;
10658 10515
10659 10516 base = (caddr_t)get_hblk_base(old);
10660 10517 endaddr = base + get_hblk_span(old);
10661 10518
10662 10519 /*
10663 10520 * acquire hash bucket lock.
10664 10521 */
10665 10522 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10666 10523 SFMMU_INVALID_SHMERID);
10667 10524
10668 10525 /*
10669 10526 * copy contents from old to new
10670 10527 */
10671 10528 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10672 10529
10673 10530 /*
10674 10531 * add new to hash chain
10675 10532 */
10676 10533 sfmmu_hblk_hash_add(hmebp, new, newpa);
10677 10534
10678 10535 /*
10679 10536 * search hash chain for hblk_reserve; this needs to be performed
10680 10537 * after adding new, otherwise prev won't correspond to the hblk which
10681 10538 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10682 10539 * remove old later.
10683 10540 */
10684 10541 for (prev = NULL,
10685 10542 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10686 10543 prev = hblkp, hblkp = hblkp->hblk_next)
10687 10544 ;
10688 10545
10689 10546 if (hblkp != old)
10690 10547 panic("sfmmu_hblk_swap: hblk_reserve not found");
10691 10548
10692 10549 /*
10693 10550 * p_mapping list is still pointing to hments in hblk_reserve;
10694 10551 * fix up p_mapping list so that they point to hments in new.
10695 10552 *
10696 10553 * Since all these mappings are created by hblk_reserve_thread
10697 10554 * on the way and it's using at least one of the buffers from each of
10698 10555 * the newly minted slabs, there is no danger of any of these
10699 10556 * mappings getting unloaded by another thread.
10700 10557 *
10701 10558 * tsbmiss could only modify ref/mod bits of hments in old/new.
10702 10559 * Since all of these hments hold mappings established by segkmem
10703 10560 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10704 10561 * have no meaning for the mappings in hblk_reserve. hments in
10705 10562 * old and new are identical except for ref/mod bits.
10706 10563 */
10707 10564 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10708 10565
10709 10566 HBLKTOHME(osfhme, old, vaddr);
10710 10567 sfmmu_copytte(&osfhme->hme_tte, &tte);
10711 10568
10712 10569 if (TTE_IS_VALID(&tte)) {
10713 10570 if ((pp = osfhme->hme_page) == NULL)
10714 10571 panic("sfmmu_hblk_swap: page not mapped");
10715 10572
10716 10573 pml = sfmmu_mlist_enter(pp);
10717 10574
10718 10575 if (pp != osfhme->hme_page)
10719 10576 panic("sfmmu_hblk_swap: mapping changed");
10720 10577
10721 10578 HBLKTOHME(nsfhme, new, vaddr);
10722 10579
10723 10580 HME_ADD(nsfhme, pp);
10724 10581 HME_SUB(osfhme, pp);
10725 10582
10726 10583 sfmmu_mlist_exit(pml);
10727 10584 }
10728 10585 }
10729 10586
10730 10587 /*
10731 10588 * remove old from hash chain
10732 10589 */
10733 10590 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10734 10591
10735 10592 #ifdef DEBUG
10736 10593
10737 10594 hblktag.htag_id = ksfmmup;
10738 10595 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10739 10596 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10740 10597 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10741 10598 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10742 10599
10743 10600 if (found != new)
10744 10601 panic("sfmmu_hblk_swap: new hblk not found");
10745 10602 #endif
10746 10603
10747 10604 SFMMU_HASH_UNLOCK(hmebp);
10748 10605
10749 10606 /*
10750 10607 * Reset hblk_reserve
10751 10608 */
10752 10609 bzero((void *)old, HME8BLK_SZ);
10753 10610 old->hblk_nextpa = va_to_pa((caddr_t)old);
10754 10611 }
10755 10612
10756 10613 /*
10757 10614 * Grab the mlist mutex for both pages passed in.
10758 10615 *
10759 10616 * low and high will be returned as pointers to the mutexes for these pages.
10760 10617 * low refers to the mutex residing in the lower bin of the mlist hash, while
10761 10618 * high refers to the mutex residing in the higher bin of the mlist hash. This
10762 10619 * is due to the locking order restrictions on the same thread grabbing
10763 10620 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10764 10621 *
10765 10622 * If both pages hash to the same mutex, only grab that single mutex, and
10766 10623 * high will be returned as NULL
10767 10624 * If the pages hash to different bins in the hash, grab the lower addressed
10768 10625 * lock first and then the higher addressed lock in order to follow the locking
10769 10626 * rules involved with the same thread grabbing multiple mlist mutexes.
10770 10627 * low and high will both have non-NULL values.
10771 10628 */
10772 10629 static void
10773 10630 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10774 10631 kmutex_t **low, kmutex_t **high)
10775 10632 {
10776 10633 kmutex_t *mml_targ, *mml_repl;
10777 10634
10778 10635 /*
10779 10636 * no need to do the dance around szc as in sfmmu_mlist_enter()
10780 10637 * because this routine is only called by hat_page_relocate() and all
10781 10638 * targ and repl pages are already locked EXCL so szc can't change.
10782 10639 */
10783 10640
10784 10641 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10785 10642 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10786 10643
10787 10644 if (mml_targ == mml_repl) {
10788 10645 *low = mml_targ;
10789 10646 *high = NULL;
10790 10647 } else {
10791 10648 if (mml_targ < mml_repl) {
10792 10649 *low = mml_targ;
10793 10650 *high = mml_repl;
10794 10651 } else {
10795 10652 *low = mml_repl;
10796 10653 *high = mml_targ;
10797 10654 }
10798 10655 }
10799 10656
10800 10657 mutex_enter(*low);
10801 10658 if (*high)
10802 10659 mutex_enter(*high);
10803 10660 }
10804 10661
10805 10662 static void
10806 10663 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10807 10664 {
10808 10665 if (high)
10809 10666 mutex_exit(high);
10810 10667 mutex_exit(low);
10811 10668 }
10812 10669
10813 10670 static hatlock_t *
10814 10671 sfmmu_hat_enter(sfmmu_t *sfmmup)
10815 10672 {
10816 10673 hatlock_t *hatlockp;
10817 10674
10818 10675 if (sfmmup != ksfmmup) {
10819 10676 hatlockp = TSB_HASH(sfmmup);
10820 10677 mutex_enter(HATLOCK_MUTEXP(hatlockp));
10821 10678 return (hatlockp);
10822 10679 }
10823 10680 return (NULL);
10824 10681 }
10825 10682
10826 10683 static hatlock_t *
10827 10684 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
10828 10685 {
10829 10686 hatlock_t *hatlockp;
10830 10687
10831 10688 if (sfmmup != ksfmmup) {
10832 10689 hatlockp = TSB_HASH(sfmmup);
10833 10690 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
10834 10691 return (NULL);
10835 10692 return (hatlockp);
10836 10693 }
10837 10694 return (NULL);
10838 10695 }
10839 10696
10840 10697 static void
10841 10698 sfmmu_hat_exit(hatlock_t *hatlockp)
10842 10699 {
10843 10700 if (hatlockp != NULL)
10844 10701 mutex_exit(HATLOCK_MUTEXP(hatlockp));
10845 10702 }
10846 10703
10847 10704 static void
10848 10705 sfmmu_hat_lock_all(void)
10849 10706 {
10850 10707 int i;
10851 10708 for (i = 0; i < SFMMU_NUM_LOCK; i++)
10852 10709 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
10853 10710 }
10854 10711
10855 10712 static void
10856 10713 sfmmu_hat_unlock_all(void)
10857 10714 {
10858 10715 int i;
10859 10716 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
10860 10717 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
10861 10718 }
10862 10719
10863 10720 int
10864 10721 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
10865 10722 {
10866 10723 ASSERT(sfmmup != ksfmmup);
10867 10724 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10868 10725 }
10869 10726
10870 10727 /*
10871 10728 * Locking primitives to provide consistency between ISM unmap
10872 10729 * and other operations. Since ISM unmap can take a long time, we
10873 10730 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10874 10731 * contention on the hatlock buckets while ISM segments are being
10875 10732 * unmapped. The tradeoff is that the flags don't prevent priority
10876 10733 * inversion from occurring, so we must request kernel priority in
10877 10734 * case we have to sleep to keep from getting buried while holding
10878 10735 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10879 10736 * threads from running (for example, in sfmmu_uvatopfn()).
10880 10737 */
10881 10738 static void
10882 10739 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10883 10740 {
10884 10741 hatlock_t *hatlockp;
10885 10742
10886 10743 THREAD_KPRI_REQUEST();
10887 10744 if (!hatlock_held)
10888 10745 hatlockp = sfmmu_hat_enter(sfmmup);
10889 10746 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10890 10747 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10891 10748 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10892 10749 if (!hatlock_held)
10893 10750 sfmmu_hat_exit(hatlockp);
10894 10751 }
10895 10752
10896 10753 static void
10897 10754 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10898 10755 {
10899 10756 hatlock_t *hatlockp;
10900 10757
10901 10758 if (!hatlock_held)
10902 10759 hatlockp = sfmmu_hat_enter(sfmmup);
10903 10760 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10904 10761 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10905 10762 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10906 10763 if (!hatlock_held)
10907 10764 sfmmu_hat_exit(hatlockp);
10908 10765 THREAD_KPRI_RELEASE();
10909 10766 }
10910 10767
10911 10768 /*
10912 10769 *
10913 10770 * Algorithm:
10914 10771 *
10915 10772 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10916 10773 * hblks.
10917 10774 *
10918 10775 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10919 10776 *
10920 10777 * (a) try to return an hblk from reserve pool of free hblks;
10921 10778 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10922 10779 * and return hblk_reserve.
10923 10780 *
10924 10781 * (3) call kmem_cache_alloc() to allocate hblk;
10925 10782 *
10926 10783 * (a) if hblk_reserve_lock is held by the current thread,
10927 10784 * atomically replace hblk_reserve by the hblk that is
10928 10785 * returned by kmem_cache_alloc; release hblk_reserve_lock
10929 10786 * and call kmem_cache_alloc() again.
10930 10787 * (b) if reserve pool is not full, add the hblk that is
10931 10788 * returned by kmem_cache_alloc to reserve pool and
10932 10789 * call kmem_cache_alloc again.
10933 10790 *
10934 10791 */
10935 10792 static struct hme_blk *
10936 10793 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10937 10794 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10938 10795 uint_t flags, uint_t rid)
10939 10796 {
10940 10797 struct hme_blk *hmeblkp = NULL;
10941 10798 struct hme_blk *newhblkp;
10942 10799 struct hme_blk *shw_hblkp = NULL;
10943 10800 struct kmem_cache *sfmmu_cache = NULL;
10944 10801 uint64_t hblkpa;
10945 10802 ulong_t index;
10946 10803 uint_t owner; /* set to 1 if using hblk_reserve */
10947 10804 uint_t forcefree;
10948 10805 int sleep;
10949 10806 sf_srd_t *srdp;
10950 10807 sf_region_t *rgnp;
10951 10808
10952 10809 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10953 10810 ASSERT(hblktag.htag_rid == rid);
10954 10811 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
10955 10812 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
10956 10813 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
10957 10814
10958 10815 /*
10959 10816 * If segkmem is not created yet, allocate from static hmeblks
10960 10817 * created at the end of startup_modules(). See the block comment
10961 10818 * in startup_modules() describing how we estimate the number of
10962 10819 * static hmeblks that will be needed during re-map.
10963 10820 */
10964 10821 if (!hblk_alloc_dynamic) {
10965 10822
10966 10823 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10967 10824
10968 10825 if (size == TTE8K) {
10969 10826 index = nucleus_hblk8.index;
10970 10827 if (index >= nucleus_hblk8.len) {
10971 10828 /*
10972 10829 * If we panic here, see startup_modules() to
10973 10830 * make sure that we are calculating the
10974 10831 * number of hblk8's that we need correctly.
10975 10832 */
10976 10833 prom_panic("no nucleus hblk8 to allocate");
10977 10834 }
10978 10835 hmeblkp =
10979 10836 (struct hme_blk *)&nucleus_hblk8.list[index];
10980 10837 nucleus_hblk8.index++;
10981 10838 SFMMU_STAT(sf_hblk8_nalloc);
10982 10839 } else {
10983 10840 index = nucleus_hblk1.index;
10984 10841 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10985 10842 /*
10986 10843 * If we panic here, see startup_modules().
10987 10844 * Most likely you need to update the
10988 10845 * calculation of the number of hblk1 elements
10989 10846 * that the kernel needs to boot.
10990 10847 */
10991 10848 prom_panic("no nucleus hblk1 to allocate");
10992 10849 }
10993 10850 hmeblkp =
10994 10851 (struct hme_blk *)&nucleus_hblk1.list[index];
10995 10852 nucleus_hblk1.index++;
10996 10853 SFMMU_STAT(sf_hblk1_nalloc);
10997 10854 }
10998 10855
10999 10856 goto hblk_init;
11000 10857 }
11001 10858
11002 10859 SFMMU_HASH_UNLOCK(hmebp);
11003 10860
11004 10861 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
11005 10862 if (mmu_page_sizes == max_mmu_page_sizes) {
11006 10863 if (size < TTE256M)
11007 10864 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11008 10865 size, flags);
11009 10866 } else {
11010 10867 if (size < TTE4M)
11011 10868 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11012 10869 size, flags);
11013 10870 }
11014 10871 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11015 10872 /*
11016 10873 * Shared hmes use per region bitmaps in rgn_hmeflag
11017 10874 * rather than shadow hmeblks to keep track of the
11018 10875 * mapping sizes which have been allocated for the region.
11019 10876 * Here we cleanup old invalid hmeblks with this rid,
11020 10877 * which may be left around by pageunload().
11021 10878 */
11022 10879 int ttesz;
11023 10880 caddr_t va;
11024 10881 caddr_t eva = vaddr + TTEBYTES(size);
11025 10882
11026 10883 ASSERT(sfmmup != KHATID);
11027 10884
11028 10885 srdp = sfmmup->sfmmu_srdp;
11029 10886 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11030 10887 rgnp = srdp->srd_hmergnp[rid];
11031 10888 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11032 10889 ASSERT(rgnp->rgn_refcnt != 0);
11033 10890 ASSERT(size <= rgnp->rgn_pgszc);
11034 10891
11035 10892 ttesz = HBLK_MIN_TTESZ;
11036 10893 do {
11037 10894 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11038 10895 continue;
11039 10896 }
11040 10897
11041 10898 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11042 10899 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11043 10900 } else if (ttesz < size) {
11044 10901 for (va = vaddr; va < eva;
11045 10902 va += TTEBYTES(ttesz)) {
11046 10903 sfmmu_cleanup_rhblk(srdp, va, rid,
11047 10904 ttesz);
11048 10905 }
11049 10906 }
11050 10907 } while (++ttesz <= rgnp->rgn_pgszc);
11051 10908 }
11052 10909
11053 10910 fill_hblk:
11054 10911 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11055 10912
11056 10913 if (owner && size == TTE8K) {
11057 10914
11058 10915 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11059 10916 /*
11060 10917 * We are really in a tight spot. We already own
11061 10918 * hblk_reserve and we need another hblk. In anticipation
11062 10919 * of this kind of scenario, we specifically set aside
11063 10920 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11064 10921 * by owner of hblk_reserve.
11065 10922 */
11066 10923 SFMMU_STAT(sf_hblk_recurse_cnt);
11067 10924
11068 10925 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11069 10926 panic("sfmmu_hblk_alloc: reserve list is empty");
11070 10927
11071 10928 goto hblk_verify;
11072 10929 }
11073 10930
11074 10931 ASSERT(!owner);
11075 10932
11076 10933 if ((flags & HAT_NO_KALLOC) == 0) {
11077 10934
11078 10935 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11079 10936 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11080 10937
11081 10938 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11082 10939 hmeblkp = sfmmu_hblk_steal(size);
11083 10940 } else {
11084 10941 /*
11085 10942 * if we are the owner of hblk_reserve,
11086 10943 * swap hblk_reserve with hmeblkp and
11087 10944 * start a fresh life. Hope things go
11088 10945 * better this time.
11089 10946 */
11090 10947 if (hblk_reserve_thread == curthread) {
11091 10948 ASSERT(sfmmu_cache == sfmmu8_cache);
11092 10949 sfmmu_hblk_swap(hmeblkp);
11093 10950 hblk_reserve_thread = NULL;
11094 10951 mutex_exit(&hblk_reserve_lock);
11095 10952 goto fill_hblk;
11096 10953 }
11097 10954 /*
11098 10955 * let's donate this hblk to our reserve list if
11099 10956 * we are not mapping kernel range
11100 10957 */
11101 10958 if (size == TTE8K && sfmmup != KHATID) {
11102 10959 if (sfmmu_put_free_hblk(hmeblkp, 0))
11103 10960 goto fill_hblk;
11104 10961 }
11105 10962 }
11106 10963 } else {
11107 10964 /*
11108 10965 * We are here to map the slab in sfmmu8_cache; let's
11109 10966 * check if we could tap our reserve list; if successful,
11110 10967 * this will avoid the pain of going thru sfmmu_hblk_swap
11111 10968 */
11112 10969 SFMMU_STAT(sf_hblk_slab_cnt);
11113 10970 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11114 10971 /*
11115 10972 * let's start hblk_reserve dance
11116 10973 */
11117 10974 SFMMU_STAT(sf_hblk_reserve_cnt);
11118 10975 owner = 1;
11119 10976 mutex_enter(&hblk_reserve_lock);
11120 10977 hmeblkp = HBLK_RESERVE;
11121 10978 hblk_reserve_thread = curthread;
11122 10979 }
11123 10980 }
11124 10981
11125 10982 hblk_verify:
11126 10983 ASSERT(hmeblkp != NULL);
11127 10984 set_hblk_sz(hmeblkp, size);
11128 10985 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11129 10986 SFMMU_HASH_LOCK(hmebp);
11130 10987 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11131 10988 if (newhblkp != NULL) {
11132 10989 SFMMU_HASH_UNLOCK(hmebp);
11133 10990 if (hmeblkp != HBLK_RESERVE) {
11134 10991 /*
11135 10992 * This is really tricky!
11136 10993 *
11137 10994 * vmem_alloc(vmem_seg_arena)
11138 10995 * vmem_alloc(vmem_internal_arena)
11139 10996 * segkmem_alloc(heap_arena)
11140 10997 * vmem_alloc(heap_arena)
11141 10998 * page_create()
11142 10999 * hat_memload()
11143 11000 * kmem_cache_free()
11144 11001 * kmem_cache_alloc()
11145 11002 * kmem_slab_create()
11146 11003 * vmem_alloc(kmem_internal_arena)
11147 11004 * segkmem_alloc(heap_arena)
11148 11005 * vmem_alloc(heap_arena)
11149 11006 * page_create()
11150 11007 * hat_memload()
11151 11008 * kmem_cache_free()
11152 11009 * ...
11153 11010 *
11154 11011 * Thus, hat_memload() could call kmem_cache_free
11155 11012 * for enough number of times that we could easily
11156 11013 * hit the bottom of the stack or run out of reserve
11157 11014 * list of vmem_seg structs. So, we must donate
11158 11015 * this hblk to reserve list if it's allocated
11159 11016 * from sfmmu8_cache *and* mapping kernel range.
11160 11017 * We don't need to worry about freeing hmeblk1's
11161 11018 * to kmem since they don't map any kmem slabs.
11162 11019 *
11163 11020 * Note: When segkmem supports largepages, we must
11164 11021 * free hmeblk1's to reserve list as well.
11165 11022 */
11166 11023 forcefree = (sfmmup == KHATID) ? 1 : 0;
11167 11024 if (size == TTE8K &&
11168 11025 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11169 11026 goto re_verify;
11170 11027 }
11171 11028 ASSERT(sfmmup != KHATID);
11172 11029 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11173 11030 } else {
11174 11031 /*
11175 11032 * Hey! we don't need hblk_reserve any more.
11176 11033 */
11177 11034 ASSERT(owner);
11178 11035 hblk_reserve_thread = NULL;
11179 11036 mutex_exit(&hblk_reserve_lock);
11180 11037 owner = 0;
11181 11038 }
11182 11039 re_verify:
11183 11040 /*
11184 11041 * let's check if the goodies are still present
11185 11042 */
11186 11043 SFMMU_HASH_LOCK(hmebp);
11187 11044 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11188 11045 if (newhblkp != NULL) {
11189 11046 /*
11190 11047 * return newhblkp if it's not hblk_reserve;
11191 11048 * if newhblkp is hblk_reserve, return it
11192 11049 * _only if_ we are the owner of hblk_reserve.
11193 11050 */
11194 11051 if (newhblkp != HBLK_RESERVE || owner) {
11195 11052 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11196 11053 newhblkp->hblk_shared);
11197 11054 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11198 11055 !newhblkp->hblk_shared);
11199 11056 return (newhblkp);
11200 11057 } else {
11201 11058 /*
11202 11059 * we just hit hblk_reserve in the hash and
11203 11060 * we are not the owner of that;
11204 11061 *
11205 11062 * block until hblk_reserve_thread completes
11206 11063 * swapping hblk_reserve and try the dance
11207 11064 * once again.
11208 11065 */
11209 11066 SFMMU_HASH_UNLOCK(hmebp);
11210 11067 mutex_enter(&hblk_reserve_lock);
11211 11068 mutex_exit(&hblk_reserve_lock);
11212 11069 SFMMU_STAT(sf_hblk_reserve_hit);
11213 11070 goto fill_hblk;
11214 11071 }
11215 11072 } else {
11216 11073 /*
11217 11074 * it's no more! try the dance once again.
11218 11075 */
11219 11076 SFMMU_HASH_UNLOCK(hmebp);
11220 11077 goto fill_hblk;
11221 11078 }
11222 11079 }
11223 11080
11224 11081 hblk_init:
11225 11082 if (SFMMU_IS_SHMERID_VALID(rid)) {
11226 11083 uint16_t tteflag = 0x1 <<
11227 11084 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11228 11085
11229 11086 if (!(rgnp->rgn_hmeflags & tteflag)) {
11230 11087 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11231 11088 }
11232 11089 hmeblkp->hblk_shared = 1;
11233 11090 } else {
11234 11091 hmeblkp->hblk_shared = 0;
11235 11092 }
11236 11093 set_hblk_sz(hmeblkp, size);
11237 11094 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11238 11095 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11239 11096 hmeblkp->hblk_tag = hblktag;
11240 11097 hmeblkp->hblk_shadow = shw_hblkp;
11241 11098 hblkpa = hmeblkp->hblk_nextpa;
11242 11099 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11243 11100
11244 11101 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11245 11102 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11246 11103 ASSERT(hmeblkp->hblk_hmecnt == 0);
11247 11104 ASSERT(hmeblkp->hblk_vcnt == 0);
11248 11105 ASSERT(hmeblkp->hblk_lckcnt == 0);
11249 11106 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11250 11107 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11251 11108 return (hmeblkp);
11252 11109 }
11253 11110
11254 11111 /*
11255 11112 * This function cleans up the hme_blk and returns it to the free list.
11256 11113 */
11257 11114 /* ARGSUSED */
11258 11115 static void
11259 11116 sfmmu_hblk_free(struct hme_blk **listp)
11260 11117 {
11261 11118 struct hme_blk *hmeblkp, *next_hmeblkp;
11262 11119 int size;
11263 11120 uint_t critical;
11264 11121 uint64_t hblkpa;
11265 11122
11266 11123 ASSERT(*listp != NULL);
11267 11124
11268 11125 hmeblkp = *listp;
11269 11126 while (hmeblkp != NULL) {
11270 11127 next_hmeblkp = hmeblkp->hblk_next;
11271 11128 ASSERT(!hmeblkp->hblk_hmecnt);
11272 11129 ASSERT(!hmeblkp->hblk_vcnt);
11273 11130 ASSERT(!hmeblkp->hblk_lckcnt);
11274 11131 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11275 11132 ASSERT(hmeblkp->hblk_shared == 0);
11276 11133 ASSERT(hmeblkp->hblk_shw_bit == 0);
11277 11134 ASSERT(hmeblkp->hblk_shadow == NULL);
11278 11135
11279 11136 hblkpa = va_to_pa((caddr_t)hmeblkp);
11280 11137 ASSERT(hblkpa != (uint64_t)-1);
11281 11138 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11282 11139
11283 11140 size = get_hblk_ttesz(hmeblkp);
11284 11141 hmeblkp->hblk_next = NULL;
11285 11142 hmeblkp->hblk_nextpa = hblkpa;
11286 11143
11287 11144 if (hmeblkp->hblk_nuc_bit == 0) {
11288 11145
11289 11146 if (size != TTE8K ||
11290 11147 !sfmmu_put_free_hblk(hmeblkp, critical))
11291 11148 kmem_cache_free(get_hblk_cache(hmeblkp),
11292 11149 hmeblkp);
11293 11150 }
11294 11151 hmeblkp = next_hmeblkp;
11295 11152 }
11296 11153 }
11297 11154
11298 11155 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11299 11156 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11300 11157
11301 11158 static uint_t sfmmu_hblk_steal_twice;
11302 11159 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11303 11160
11304 11161 /*
11305 11162 * Steal a hmeblk from user or kernel hme hash lists.
11306 11163 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11307 11164 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11308 11165 * tap into critical reserve of freehblkp.
11309 11166 * Note: We remain looping in this routine until we find one.
11310 11167 */
11311 11168 static struct hme_blk *
11312 11169 sfmmu_hblk_steal(int size)
11313 11170 {
11314 11171 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11315 11172 struct hmehash_bucket *hmebp;
11316 11173 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11317 11174 uint64_t hblkpa;
11318 11175 int i;
11319 11176 uint_t loop_cnt = 0, critical;
11320 11177
11321 11178 for (;;) {
11322 11179 /* Check cpu hblk pending queues */
11323 11180 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11324 11181 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11325 11182 ASSERT(hmeblkp->hblk_hmecnt == 0);
11326 11183 ASSERT(hmeblkp->hblk_vcnt == 0);
11327 11184 return (hmeblkp);
11328 11185 }
11329 11186
11330 11187 if (size == TTE8K) {
11331 11188 critical =
11332 11189 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11333 11190 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11334 11191 return (hmeblkp);
11335 11192 }
11336 11193
11337 11194 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11338 11195 uhmehash_steal_hand;
11339 11196 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11340 11197
11341 11198 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11342 11199 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11343 11200 SFMMU_HASH_LOCK(hmebp);
11344 11201 hmeblkp = hmebp->hmeblkp;
11345 11202 hblkpa = hmebp->hmeh_nextpa;
11346 11203 pr_hblk = NULL;
11347 11204 while (hmeblkp) {
11348 11205 /*
11349 11206 * check if it is a hmeblk that is not locked
11350 11207 * and not shared. skip shadow hmeblks with
11351 11208 * shadow_mask set i.e valid count non zero.
11352 11209 */
11353 11210 if ((get_hblk_ttesz(hmeblkp) == size) &&
11354 11211 (hmeblkp->hblk_shw_bit == 0 ||
11355 11212 hmeblkp->hblk_vcnt == 0) &&
11356 11213 (hmeblkp->hblk_lckcnt == 0)) {
11357 11214 /*
11358 11215 * there is a high probability that we
11359 11216 * will find a free one. search some
11360 11217 * buckets for a free hmeblk initially
11361 11218 * before unloading a valid hmeblk.
11362 11219 */
11363 11220 if ((hmeblkp->hblk_vcnt == 0 &&
11364 11221 hmeblkp->hblk_hmecnt == 0) || (i >=
11365 11222 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11366 11223 if (sfmmu_steal_this_hblk(hmebp,
11367 11224 hmeblkp, hblkpa, pr_hblk)) {
11368 11225 /*
11369 11226 * Hblk is unloaded
11370 11227 * successfully
11371 11228 */
11372 11229 break;
11373 11230 }
11374 11231 }
11375 11232 }
11376 11233 pr_hblk = hmeblkp;
11377 11234 hblkpa = hmeblkp->hblk_nextpa;
11378 11235 hmeblkp = hmeblkp->hblk_next;
11379 11236 }
11380 11237
11381 11238 SFMMU_HASH_UNLOCK(hmebp);
11382 11239 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11383 11240 hmebp = uhme_hash;
11384 11241 }
11385 11242 uhmehash_steal_hand = hmebp;
11386 11243
11387 11244 if (hmeblkp != NULL)
11388 11245 break;
11389 11246
11390 11247 /*
11391 11248 * in the worst case, look for a free one in the kernel
11392 11249 * hash table.
11393 11250 */
11394 11251 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11395 11252 SFMMU_HASH_LOCK(hmebp);
11396 11253 hmeblkp = hmebp->hmeblkp;
11397 11254 hblkpa = hmebp->hmeh_nextpa;
11398 11255 pr_hblk = NULL;
11399 11256 while (hmeblkp) {
11400 11257 /*
11401 11258 * check if it is free hmeblk
11402 11259 */
11403 11260 if ((get_hblk_ttesz(hmeblkp) == size) &&
11404 11261 (hmeblkp->hblk_lckcnt == 0) &&
11405 11262 (hmeblkp->hblk_vcnt == 0) &&
11406 11263 (hmeblkp->hblk_hmecnt == 0)) {
11407 11264 if (sfmmu_steal_this_hblk(hmebp,
11408 11265 hmeblkp, hblkpa, pr_hblk)) {
11409 11266 break;
11410 11267 } else {
11411 11268 /*
11412 11269 * Cannot fail since we have
11413 11270 * hash lock.
11414 11271 */
11415 11272 panic("fail to steal?");
11416 11273 }
11417 11274 }
11418 11275
11419 11276 pr_hblk = hmeblkp;
11420 11277 hblkpa = hmeblkp->hblk_nextpa;
11421 11278 hmeblkp = hmeblkp->hblk_next;
11422 11279 }
11423 11280
11424 11281 SFMMU_HASH_UNLOCK(hmebp);
11425 11282 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11426 11283 hmebp = khme_hash;
11427 11284 }
11428 11285
11429 11286 if (hmeblkp != NULL)
11430 11287 break;
11431 11288 sfmmu_hblk_steal_twice++;
11432 11289 }
11433 11290 return (hmeblkp);
11434 11291 }
11435 11292
11436 11293 /*
11437 11294 * This routine does real work to prepare a hblk to be "stolen" by
11438 11295 * unloading the mappings, updating shadow counts ....
11439 11296 * It returns 1 if the block is ready to be reused (stolen), or 0
11440 11297 * means the block cannot be stolen yet- pageunload is still working
11441 11298 * on this hblk.
11442 11299 */
11443 11300 static int
11444 11301 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11445 11302 uint64_t hblkpa, struct hme_blk *pr_hblk)
11446 11303 {
11447 11304 int shw_size, vshift;
11448 11305 struct hme_blk *shw_hblkp;
11449 11306 caddr_t vaddr;
11450 11307 uint_t shw_mask, newshw_mask;
11451 11308 struct hme_blk *list = NULL;
11452 11309
11453 11310 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11454 11311
11455 11312 /*
11456 11313 * check if the hmeblk is free, unload if necessary
11457 11314 */
11458 11315 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11459 11316 sfmmu_t *sfmmup;
11460 11317 demap_range_t dmr;
11461 11318
11462 11319 sfmmup = hblktosfmmu(hmeblkp);
11463 11320 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11464 11321 return (0);
11465 11322 }
11466 11323 DEMAP_RANGE_INIT(sfmmup, &dmr);
11467 11324 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11468 11325 (caddr_t)get_hblk_base(hmeblkp),
11469 11326 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11470 11327 DEMAP_RANGE_FLUSH(&dmr);
11471 11328 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11472 11329 /*
11473 11330 * Pageunload is working on the same hblk.
11474 11331 */
11475 11332 return (0);
11476 11333 }
11477 11334
11478 11335 sfmmu_hblk_steal_unload_count++;
11479 11336 }
11480 11337
11481 11338 ASSERT(hmeblkp->hblk_lckcnt == 0);
11482 11339 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11483 11340
11484 11341 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11485 11342 hmeblkp->hblk_nextpa = hblkpa;
11486 11343
11487 11344 shw_hblkp = hmeblkp->hblk_shadow;
11488 11345 if (shw_hblkp) {
11489 11346 ASSERT(!hmeblkp->hblk_shared);
11490 11347 shw_size = get_hblk_ttesz(shw_hblkp);
11491 11348 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11492 11349 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11493 11350 ASSERT(vshift < 8);
11494 11351 /*
11495 11352 * Atomically clear shadow mask bit
11496 11353 */
11497 11354 do {
11498 11355 shw_mask = shw_hblkp->hblk_shw_mask;
11499 11356 ASSERT(shw_mask & (1 << vshift));
11500 11357 newshw_mask = shw_mask & ~(1 << vshift);
11501 11358 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11502 11359 shw_mask, newshw_mask);
11503 11360 } while (newshw_mask != shw_mask);
11504 11361 hmeblkp->hblk_shadow = NULL;
11505 11362 }
11506 11363
11507 11364 /*
11508 11365 * remove shadow bit if we are stealing an unused shadow hmeblk.
11509 11366 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11510 11367 * we are indeed allocating a shadow hmeblk.
11511 11368 */
11512 11369 hmeblkp->hblk_shw_bit = 0;
11513 11370
11514 11371 if (hmeblkp->hblk_shared) {
11515 11372 sf_srd_t *srdp;
11516 11373 sf_region_t *rgnp;
11517 11374 uint_t rid;
11518 11375
11519 11376 srdp = hblktosrd(hmeblkp);
11520 11377 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11521 11378 rid = hmeblkp->hblk_tag.htag_rid;
11522 11379 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11523 11380 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11524 11381 rgnp = srdp->srd_hmergnp[rid];
11525 11382 ASSERT(rgnp != NULL);
11526 11383 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11527 11384 hmeblkp->hblk_shared = 0;
11528 11385 }
11529 11386
11530 11387 sfmmu_hblk_steal_count++;
11531 11388 SFMMU_STAT(sf_steal_count);
11532 11389
11533 11390 return (1);
11534 11391 }
11535 11392
11536 11393 struct hme_blk *
11537 11394 sfmmu_hmetohblk(struct sf_hment *sfhme)
11538 11395 {
11539 11396 struct hme_blk *hmeblkp;
11540 11397 struct sf_hment *sfhme0;
11541 11398 struct hme_blk *hblk_dummy = 0;
11542 11399
11543 11400 /*
11544 11401 * No dummy sf_hments, please.
11545 11402 */
11546 11403 ASSERT(sfhme->hme_tte.ll != 0);
11547 11404
11548 11405 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11549 11406 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11550 11407 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11551 11408
11552 11409 return (hmeblkp);
11553 11410 }
11554 11411
11555 11412 /*
11556 11413 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11557 11414 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11558 11415 * KM_SLEEP allocation.
11559 11416 *
11560 11417 * Return 0 on success, -1 otherwise.
11561 11418 */
11562 11419 static void
11563 11420 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11564 11421 {
11565 11422 struct tsb_info *tsbinfop, *next;
11566 11423 tsb_replace_rc_t rc;
11567 11424 boolean_t gotfirst = B_FALSE;
11568 11425
11569 11426 ASSERT(sfmmup != ksfmmup);
11570 11427 ASSERT(sfmmu_hat_lock_held(sfmmup));
11571 11428
11572 11429 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11573 11430 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11574 11431 }
11575 11432
11576 11433 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11577 11434 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11578 11435 } else {
11579 11436 return;
11580 11437 }
11581 11438
11582 11439 ASSERT(sfmmup->sfmmu_tsb != NULL);
11583 11440
11584 11441 /*
11585 11442 * Loop over all tsbinfo's replacing them with ones that actually have
11586 11443 * a TSB. If any of the replacements ever fail, bail out of the loop.
11587 11444 */
11588 11445 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11589 11446 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11590 11447 next = tsbinfop->tsb_next;
11591 11448 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11592 11449 hatlockp, TSB_SWAPIN);
11593 11450 if (rc != TSB_SUCCESS) {
11594 11451 break;
11595 11452 }
11596 11453 gotfirst = B_TRUE;
11597 11454 }
11598 11455
11599 11456 switch (rc) {
11600 11457 case TSB_SUCCESS:
11601 11458 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11602 11459 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11603 11460 return;
11604 11461 case TSB_LOSTRACE:
11605 11462 break;
11606 11463 case TSB_ALLOCFAIL:
11607 11464 break;
11608 11465 default:
11609 11466 panic("sfmmu_replace_tsb returned unrecognized failure code "
11610 11467 "%d", rc);
11611 11468 }
11612 11469
11613 11470 /*
11614 11471 * In this case, we failed to get one of our TSBs. If we failed to
11615 11472 * get the first TSB, get one of minimum size (8KB). Walk the list
11616 11473 * and throw away the tsbinfos, starting where the allocation failed;
11617 11474 * we can get by with just one TSB as long as we don't leave the
11618 11475 * SWAPPED tsbinfo structures lying around.
11619 11476 */
11620 11477 tsbinfop = sfmmup->sfmmu_tsb;
11621 11478 next = tsbinfop->tsb_next;
11622 11479 tsbinfop->tsb_next = NULL;
11623 11480
11624 11481 sfmmu_hat_exit(hatlockp);
11625 11482 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11626 11483 next = tsbinfop->tsb_next;
11627 11484 sfmmu_tsbinfo_free(tsbinfop);
11628 11485 }
11629 11486 hatlockp = sfmmu_hat_enter(sfmmup);
11630 11487
11631 11488 /*
11632 11489 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11633 11490 * pages.
11634 11491 */
11635 11492 if (!gotfirst) {
11636 11493 tsbinfop = sfmmup->sfmmu_tsb;
11637 11494 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11638 11495 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11639 11496 ASSERT(rc == TSB_SUCCESS);
11640 11497 }
11641 11498
11642 11499 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11643 11500 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11644 11501 }
11645 11502
11646 11503 static int
11647 11504 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11648 11505 {
11649 11506 ulong_t bix = 0;
11650 11507 uint_t rid;
11651 11508 sf_region_t *rgnp;
11652 11509
11653 11510 ASSERT(srdp != NULL);
11654 11511 ASSERT(srdp->srd_refcnt != 0);
11655 11512
11656 11513 w <<= BT_ULSHIFT;
11657 11514 while (bmw) {
11658 11515 if (!(bmw & 0x1)) {
11659 11516 bix++;
11660 11517 bmw >>= 1;
11661 11518 continue;
11662 11519 }
11663 11520 rid = w | bix;
11664 11521 rgnp = srdp->srd_hmergnp[rid];
11665 11522 ASSERT(rgnp->rgn_refcnt > 0);
11666 11523 ASSERT(rgnp->rgn_id == rid);
11667 11524 if (addr < rgnp->rgn_saddr ||
11668 11525 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11669 11526 bix++;
11670 11527 bmw >>= 1;
11671 11528 } else {
11672 11529 return (1);
11673 11530 }
11674 11531 }
11675 11532 return (0);
11676 11533 }
11677 11534
11678 11535 /*
11679 11536 * Handle exceptions for low level tsb_handler.
11680 11537 *
11681 11538 * There are many scenarios that could land us here:
11682 11539 *
11683 11540 * If the context is invalid we land here. The context can be invalid
11684 11541 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11685 11542 * perform a wrap around operation in order to allocate a new context.
11686 11543 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11687 11544 * TSBs configuration is changeing for this process and we are forced into
11688 11545 * here to do a syncronization operation. If the context is valid we can
11689 11546 * be here from window trap hanlder. In this case just call trap to handle
11690 11547 * the fault.
11691 11548 *
11692 11549 * Note that the process will run in INVALID_CONTEXT before
11693 11550 * faulting into here and subsequently loading the MMU registers
11694 11551 * (including the TSB base register) associated with this process.
11695 11552 * For this reason, the trap handlers must all test for
11696 11553 * INVALID_CONTEXT before attempting to access any registers other
11697 11554 * than the context registers.
11698 11555 */
11699 11556 void
11700 11557 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11701 11558 {
11702 11559 sfmmu_t *sfmmup, *shsfmmup;
11703 11560 uint_t ctxtype;
11704 11561 klwp_id_t lwp;
11705 11562 char lwp_save_state;
11706 11563 hatlock_t *hatlockp, *shatlockp;
11707 11564 struct tsb_info *tsbinfop;
11708 11565 struct tsbmiss *tsbmp;
11709 11566 sf_scd_t *scdp;
11710 11567
11711 11568 SFMMU_STAT(sf_tsb_exceptions);
11712 11569 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11713 11570 sfmmup = astosfmmu(curthread->t_procp->p_as);
11714 11571 /*
11715 11572 * note that in sun4u, tagacces register contains ctxnum
11716 11573 * while sun4v passes ctxtype in the tagaccess register.
11717 11574 */
11718 11575 ctxtype = tagaccess & TAGACC_CTX_MASK;
11719 11576
11720 11577 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11721 11578 ASSERT(sfmmup->sfmmu_ismhat == 0);
11722 11579 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11723 11580 ctxtype == INVALID_CONTEXT);
11724 11581
11725 11582 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11726 11583 /*
11727 11584 * We may land here because shme bitmap and pagesize
11728 11585 * flags are updated lazily in tsbmiss area on other cpus.
11729 11586 * If we detect here that tsbmiss area is out of sync with
11730 11587 * sfmmu update it and retry the trapped instruction.
11731 11588 * Otherwise call trap().
11732 11589 */
11733 11590 int ret = 0;
11734 11591 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11735 11592 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11736 11593
11737 11594 /*
11738 11595 * Must set lwp state to LWP_SYS before
11739 11596 * trying to acquire any adaptive lock
11740 11597 */
11741 11598 lwp = ttolwp(curthread);
11742 11599 ASSERT(lwp);
11743 11600 lwp_save_state = lwp->lwp_state;
11744 11601 lwp->lwp_state = LWP_SYS;
11745 11602
11746 11603 hatlockp = sfmmu_hat_enter(sfmmup);
11747 11604 kpreempt_disable();
11748 11605 tsbmp = &tsbmiss_area[CPU->cpu_id];
11749 11606 ASSERT(sfmmup == tsbmp->usfmmup);
11750 11607 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11751 11608 ~tteflag_mask) ||
11752 11609 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11753 11610 ~tteflag_mask)) {
11754 11611 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11755 11612 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11756 11613 ret = 1;
11757 11614 }
11758 11615 if (sfmmup->sfmmu_srdp != NULL) {
11759 11616 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11760 11617 ulong_t *tm = tsbmp->shmermap;
11761 11618 ulong_t i;
11762 11619 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11763 11620 ulong_t d = tm[i] ^ sm[i];
11764 11621 if (d) {
11765 11622 if (d & sm[i]) {
11766 11623 if (!ret && sfmmu_is_rgnva(
11767 11624 sfmmup->sfmmu_srdp,
11768 11625 addr, i, d & sm[i])) {
11769 11626 ret = 1;
11770 11627 }
11771 11628 }
11772 11629 tm[i] = sm[i];
11773 11630 }
11774 11631 }
11775 11632 }
11776 11633 kpreempt_enable();
11777 11634 sfmmu_hat_exit(hatlockp);
11778 11635 lwp->lwp_state = lwp_save_state;
11779 11636 if (ret) {
11780 11637 return;
11781 11638 }
11782 11639 } else if (ctxtype == INVALID_CONTEXT) {
11783 11640 /*
11784 11641 * First, make sure we come out of here with a valid ctx,
11785 11642 * since if we don't get one we'll simply loop on the
11786 11643 * faulting instruction.
11787 11644 *
11788 11645 * If the ISM mappings are changing, the TSB is relocated,
11789 11646 * the process is swapped, the process is joining SCD or
11790 11647 * leaving SCD or shared regions we serialize behind the
11791 11648 * controlling thread with hat lock, sfmmu_flags and
11792 11649 * sfmmu_tsb_cv condition variable.
11793 11650 */
11794 11651
11795 11652 /*
11796 11653 * Must set lwp state to LWP_SYS before
11797 11654 * trying to acquire any adaptive lock
11798 11655 */
11799 11656 lwp = ttolwp(curthread);
11800 11657 ASSERT(lwp);
11801 11658 lwp_save_state = lwp->lwp_state;
11802 11659 lwp->lwp_state = LWP_SYS;
11803 11660
11804 11661 hatlockp = sfmmu_hat_enter(sfmmup);
11805 11662 retry:
11806 11663 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11807 11664 shsfmmup = scdp->scd_sfmmup;
11808 11665 ASSERT(shsfmmup != NULL);
11809 11666
11810 11667 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11811 11668 tsbinfop = tsbinfop->tsb_next) {
11812 11669 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11813 11670 /* drop the private hat lock */
11814 11671 sfmmu_hat_exit(hatlockp);
11815 11672 /* acquire the shared hat lock */
11816 11673 shatlockp = sfmmu_hat_enter(shsfmmup);
11817 11674 /*
11818 11675 * recheck to see if anything changed
11819 11676 * after we drop the private hat lock.
11820 11677 */
11821 11678 if (sfmmup->sfmmu_scdp == scdp &&
11822 11679 shsfmmup == scdp->scd_sfmmup) {
11823 11680 sfmmu_tsb_chk_reloc(shsfmmup,
11824 11681 shatlockp);
11825 11682 }
11826 11683 sfmmu_hat_exit(shatlockp);
11827 11684 hatlockp = sfmmu_hat_enter(sfmmup);
11828 11685 goto retry;
11829 11686 }
11830 11687 }
11831 11688 }
11832 11689
11833 11690 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
11834 11691 tsbinfop = tsbinfop->tsb_next) {
11835 11692 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11836 11693 cv_wait(&sfmmup->sfmmu_tsb_cv,
11837 11694 HATLOCK_MUTEXP(hatlockp));
11838 11695 goto retry;
11839 11696 }
11840 11697 }
11841 11698
11842 11699 /*
11843 11700 * Wait for ISM maps to be updated.
11844 11701 */
11845 11702 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11846 11703 cv_wait(&sfmmup->sfmmu_tsb_cv,
11847 11704 HATLOCK_MUTEXP(hatlockp));
11848 11705 goto retry;
11849 11706 }
11850 11707
11851 11708 /* Is this process joining an SCD? */
11852 11709 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11853 11710 /*
11854 11711 * Flush private TSB and setup shared TSB.
11855 11712 * sfmmu_finish_join_scd() does not drop the
11856 11713 * hat lock.
11857 11714 */
11858 11715 sfmmu_finish_join_scd(sfmmup);
11859 11716 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
11860 11717 }
11861 11718
11862 11719 /*
11863 11720 * If we're swapping in, get TSB(s). Note that we must do
11864 11721 * this before we get a ctx or load the MMU state. Once
11865 11722 * we swap in we have to recheck to make sure the TSB(s) and
11866 11723 * ISM mappings didn't change while we slept.
11867 11724 */
11868 11725 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11869 11726 sfmmu_tsb_swapin(sfmmup, hatlockp);
11870 11727 goto retry;
11871 11728 }
11872 11729
11873 11730 sfmmu_get_ctx(sfmmup);
11874 11731
11875 11732 sfmmu_hat_exit(hatlockp);
11876 11733 /*
11877 11734 * Must restore lwp_state if not calling
11878 11735 * trap() for further processing. Restore
11879 11736 * it anyway.
11880 11737 */
11881 11738 lwp->lwp_state = lwp_save_state;
11882 11739 return;
11883 11740 }
11884 11741 trap(rp, (caddr_t)tagaccess, traptype, 0);
11885 11742 }
11886 11743
11887 11744 static void
11888 11745 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11889 11746 {
11890 11747 struct tsb_info *tp;
11891 11748
11892 11749 ASSERT(sfmmu_hat_lock_held(sfmmup));
11893 11750
11894 11751 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
11895 11752 if (tp->tsb_flags & TSB_RELOC_FLAG) {
11896 11753 cv_wait(&sfmmup->sfmmu_tsb_cv,
11897 11754 HATLOCK_MUTEXP(hatlockp));
11898 11755 break;
11899 11756 }
11900 11757 }
11901 11758 }
11902 11759
11903 11760 /*
11904 11761 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
11905 11762 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
11906 11763 * rather than spinning to avoid send mondo timeouts with
11907 11764 * interrupts enabled. When the lock is acquired it is immediately
11908 11765 * released and we return back to sfmmu_vatopfn just after
11909 11766 * the GET_TTE call.
11910 11767 */
11911 11768 void
11912 11769 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
11913 11770 {
11914 11771 struct page **pp;
11915 11772
11916 11773 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11917 11774 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11918 11775 }
11919 11776
11920 11777 /*
11921 11778 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
11922 11779 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
11923 11780 * cross traps which cannot be handled while spinning in the
11924 11781 * trap handlers. Simply enter and exit the kpr_suspendlock spin
11925 11782 * mutex, which is held by the holder of the suspend bit, and then
11926 11783 * retry the trapped instruction after unwinding.
11927 11784 */
11928 11785 /*ARGSUSED*/
11929 11786 void
11930 11787 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
11931 11788 {
11932 11789 ASSERT(curthread != kreloc_thread);
11933 11790 mutex_enter(&kpr_suspendlock);
11934 11791 mutex_exit(&kpr_suspendlock);
11935 11792 }
11936 11793
11937 11794 /*
11938 11795 * This routine could be optimized to reduce the number of xcalls by flushing
11939 11796 * the entire TLBs if region reference count is above some threshold but the
11940 11797 * tradeoff will depend on the size of the TLB. So for now flush the specific
11941 11798 * page a context at a time.
11942 11799 *
11943 11800 * If uselocks is 0 then it's called after all cpus were captured and all the
11944 11801 * hat locks were taken. In this case don't take the region lock by relying on
11945 11802 * the order of list region update operations in hat_join_region(),
11946 11803 * hat_leave_region() and hat_dup_region(). The ordering in those routines
11947 11804 * guarantees that list is always forward walkable and reaches active sfmmus
11948 11805 * regardless of where xc_attention() captures a cpu.
11949 11806 */
11950 11807 cpuset_t
11951 11808 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
11952 11809 struct hme_blk *hmeblkp, int uselocks)
11953 11810 {
11954 11811 sfmmu_t *sfmmup;
11955 11812 cpuset_t cpuset;
11956 11813 cpuset_t rcpuset;
11957 11814 hatlock_t *hatlockp;
11958 11815 uint_t rid = rgnp->rgn_id;
11959 11816 sf_rgn_link_t *rlink;
11960 11817 sf_scd_t *scdp;
11961 11818
11962 11819 ASSERT(hmeblkp->hblk_shared);
11963 11820 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11964 11821 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11965 11822
11966 11823 CPUSET_ZERO(rcpuset);
11967 11824 if (uselocks) {
11968 11825 mutex_enter(&rgnp->rgn_mutex);
11969 11826 }
11970 11827 sfmmup = rgnp->rgn_sfmmu_head;
11971 11828 while (sfmmup != NULL) {
11972 11829 if (uselocks) {
11973 11830 hatlockp = sfmmu_hat_enter(sfmmup);
11974 11831 }
11975 11832
11976 11833 /*
11977 11834 * When an SCD is created the SCD hat is linked on the sfmmu
11978 11835 * region lists for each hme region which is part of the
11979 11836 * SCD. If we find an SCD hat, when walking these lists,
11980 11837 * then we flush the shared TSBs, if we find a private hat,
11981 11838 * which is part of an SCD, but where the region
11982 11839 * is not part of the SCD then we flush the private TSBs.
11983 11840 */
11984 11841 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11985 11842 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11986 11843 scdp = sfmmup->sfmmu_scdp;
11987 11844 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
11988 11845 if (uselocks) {
11989 11846 sfmmu_hat_exit(hatlockp);
11990 11847 }
11991 11848 goto next;
11992 11849 }
11993 11850 }
11994 11851
11995 11852 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
11996 11853
11997 11854 kpreempt_disable();
11998 11855 cpuset = sfmmup->sfmmu_cpusran;
11999 11856 CPUSET_AND(cpuset, cpu_ready_set);
12000 11857 CPUSET_DEL(cpuset, CPU->cpu_id);
12001 11858 SFMMU_XCALL_STATS(sfmmup);
12002 11859 xt_some(cpuset, vtag_flushpage_tl1,
12003 11860 (uint64_t)addr, (uint64_t)sfmmup);
12004 11861 vtag_flushpage(addr, (uint64_t)sfmmup);
12005 11862 if (uselocks) {
12006 11863 sfmmu_hat_exit(hatlockp);
12007 11864 }
12008 11865 kpreempt_enable();
12009 11866 CPUSET_OR(rcpuset, cpuset);
12010 11867
12011 11868 next:
12012 11869 /* LINTED: constant in conditional context */
12013 11870 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12014 11871 ASSERT(rlink != NULL);
12015 11872 sfmmup = rlink->next;
12016 11873 }
12017 11874 if (uselocks) {
12018 11875 mutex_exit(&rgnp->rgn_mutex);
12019 11876 }
12020 11877 return (rcpuset);
12021 11878 }
12022 11879
12023 11880 /*
12024 11881 * This routine takes an sfmmu pointer and the va for an adddress in an
12025 11882 * ISM region as input and returns the corresponding region id in ism_rid.
12026 11883 * The return value of 1 indicates that a region has been found and ism_rid
12027 11884 * is valid, otherwise 0 is returned.
12028 11885 */
12029 11886 static int
12030 11887 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12031 11888 {
12032 11889 ism_blk_t *ism_blkp;
12033 11890 int i;
12034 11891 ism_map_t *ism_map;
12035 11892 #ifdef DEBUG
12036 11893 struct hat *ism_hatid;
12037 11894 #endif
12038 11895 ASSERT(sfmmu_hat_lock_held(sfmmup));
12039 11896
12040 11897 ism_blkp = sfmmup->sfmmu_iblk;
12041 11898 while (ism_blkp != NULL) {
12042 11899 ism_map = ism_blkp->iblk_maps;
12043 11900 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12044 11901 if ((va >= ism_start(ism_map[i])) &&
12045 11902 (va < ism_end(ism_map[i]))) {
12046 11903
12047 11904 *ism_rid = ism_map[i].imap_rid;
12048 11905 #ifdef DEBUG
12049 11906 ism_hatid = ism_map[i].imap_ismhat;
12050 11907 ASSERT(ism_hatid == ism_sfmmup);
12051 11908 ASSERT(ism_hatid->sfmmu_ismhat);
12052 11909 #endif
12053 11910 return (1);
12054 11911 }
12055 11912 }
12056 11913 ism_blkp = ism_blkp->iblk_next;
12057 11914 }
12058 11915 return (0);
12059 11916 }
12060 11917
12061 11918 /*
12062 11919 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12063 11920 * This routine may be called with all cpu's captured. Therefore, the
12064 11921 * caller is responsible for holding all locks and disabling kernel
12065 11922 * preemption.
12066 11923 */
12067 11924 /* ARGSUSED */
12068 11925 static void
12069 11926 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12070 11927 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12071 11928 {
12072 11929 cpuset_t cpuset;
12073 11930 caddr_t va;
12074 11931 ism_ment_t *ment;
12075 11932 sfmmu_t *sfmmup;
12076 11933 #ifdef VAC
12077 11934 int vcolor;
12078 11935 #endif
12079 11936
12080 11937 sf_scd_t *scdp;
12081 11938 uint_t ism_rid;
12082 11939
12083 11940 ASSERT(!hmeblkp->hblk_shared);
12084 11941 /*
12085 11942 * Walk the ism_hat's mapping list and flush the page
12086 11943 * from every hat sharing this ism_hat. This routine
12087 11944 * may be called while all cpu's have been captured.
12088 11945 * Therefore we can't attempt to grab any locks. For now
12089 11946 * this means we will protect the ism mapping list under
12090 11947 * a single lock which will be grabbed by the caller.
12091 11948 * If hat_share/unshare scalibility becomes a performance
12092 11949 * problem then we may need to re-think ism mapping list locking.
12093 11950 */
12094 11951 ASSERT(ism_sfmmup->sfmmu_ismhat);
12095 11952 ASSERT(MUTEX_HELD(&ism_mlist_lock));
12096 11953 addr = addr - ISMID_STARTADDR;
12097 11954
12098 11955 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12099 11956
12100 11957 sfmmup = ment->iment_hat;
12101 11958
12102 11959 va = ment->iment_base_va;
12103 11960 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
12104 11961
12105 11962 /*
12106 11963 * When an SCD is created the SCD hat is linked on the ism
12107 11964 * mapping lists for each ISM segment which is part of the
12108 11965 * SCD. If we find an SCD hat, when walking these lists,
12109 11966 * then we flush the shared TSBs, if we find a private hat,
12110 11967 * which is part of an SCD, but where the region
12111 11968 * corresponding to this va is not part of the SCD then we
12112 11969 * flush the private TSBs.
12113 11970 */
12114 11971 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12115 11972 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12116 11973 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12117 11974 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12118 11975 &ism_rid)) {
12119 11976 cmn_err(CE_PANIC,
12120 11977 "can't find matching ISM rid!");
12121 11978 }
12122 11979
12123 11980 scdp = sfmmup->sfmmu_scdp;
12124 11981 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12125 11982 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12126 11983 ism_rid)) {
12127 11984 continue;
12128 11985 }
12129 11986 }
12130 11987 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12131 11988
12132 11989 cpuset = sfmmup->sfmmu_cpusran;
12133 11990 CPUSET_AND(cpuset, cpu_ready_set);
12134 11991 CPUSET_DEL(cpuset, CPU->cpu_id);
12135 11992 SFMMU_XCALL_STATS(sfmmup);
12136 11993 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12137 11994 (uint64_t)sfmmup);
12138 11995 vtag_flushpage(va, (uint64_t)sfmmup);
12139 11996
12140 11997 #ifdef VAC
12141 11998 /*
12142 11999 * Flush D$
12143 12000 * When flushing D$ we must flush all
12144 12001 * cpu's. See sfmmu_cache_flush().
12145 12002 */
12146 12003 if (cache_flush_flag == CACHE_FLUSH) {
12147 12004 cpuset = cpu_ready_set;
12148 12005 CPUSET_DEL(cpuset, CPU->cpu_id);
12149 12006
12150 12007 SFMMU_XCALL_STATS(sfmmup);
12151 12008 vcolor = addr_to_vcolor(va);
12152 12009 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12153 12010 vac_flushpage(pfnum, vcolor);
12154 12011 }
12155 12012 #endif /* VAC */
12156 12013 }
12157 12014 }
12158 12015
12159 12016 /*
12160 12017 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12161 12018 * a particular virtual address and ctx. If noflush is set we do not
12162 12019 * flush the TLB/TSB. This function may or may not be called with the
12163 12020 * HAT lock held.
12164 12021 */
12165 12022 static void
12166 12023 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12167 12024 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12168 12025 int hat_lock_held)
12169 12026 {
12170 12027 #ifdef VAC
12171 12028 int vcolor;
12172 12029 #endif
12173 12030 cpuset_t cpuset;
12174 12031 hatlock_t *hatlockp;
12175 12032
12176 12033 ASSERT(!hmeblkp->hblk_shared);
12177 12034
12178 12035 #if defined(lint) && !defined(VAC)
12179 12036 pfnum = pfnum;
12180 12037 cpu_flag = cpu_flag;
12181 12038 cache_flush_flag = cache_flush_flag;
12182 12039 #endif
12183 12040
12184 12041 /*
12185 12042 * There is no longer a need to protect against ctx being
12186 12043 * stolen here since we don't store the ctx in the TSB anymore.
12187 12044 */
12188 12045 #ifdef VAC
12189 12046 vcolor = addr_to_vcolor(addr);
12190 12047 #endif
12191 12048
12192 12049 /*
12193 12050 * We must hold the hat lock during the flush of TLB,
12194 12051 * to avoid a race with sfmmu_invalidate_ctx(), where
12195 12052 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12196 12053 * causing TLB demap routine to skip flush on that MMU.
12197 12054 * If the context on a MMU has already been set to
12198 12055 * INVALID_CONTEXT, we just get an extra flush on
12199 12056 * that MMU.
12200 12057 */
12201 12058 if (!hat_lock_held && !tlb_noflush)
12202 12059 hatlockp = sfmmu_hat_enter(sfmmup);
12203 12060
12204 12061 kpreempt_disable();
12205 12062 if (!tlb_noflush) {
12206 12063 /*
12207 12064 * Flush the TSB and TLB.
12208 12065 */
12209 12066 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12210 12067
12211 12068 cpuset = sfmmup->sfmmu_cpusran;
12212 12069 CPUSET_AND(cpuset, cpu_ready_set);
12213 12070 CPUSET_DEL(cpuset, CPU->cpu_id);
12214 12071
12215 12072 SFMMU_XCALL_STATS(sfmmup);
12216 12073
12217 12074 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12218 12075 (uint64_t)sfmmup);
12219 12076
12220 12077 vtag_flushpage(addr, (uint64_t)sfmmup);
12221 12078 }
12222 12079
12223 12080 if (!hat_lock_held && !tlb_noflush)
12224 12081 sfmmu_hat_exit(hatlockp);
12225 12082
12226 12083 #ifdef VAC
12227 12084 /*
12228 12085 * Flush the D$
12229 12086 *
12230 12087 * Even if the ctx is stolen, we need to flush the
12231 12088 * cache. Our ctx stealer only flushes the TLBs.
12232 12089 */
12233 12090 if (cache_flush_flag == CACHE_FLUSH) {
12234 12091 if (cpu_flag & FLUSH_ALL_CPUS) {
12235 12092 cpuset = cpu_ready_set;
12236 12093 } else {
12237 12094 cpuset = sfmmup->sfmmu_cpusran;
12238 12095 CPUSET_AND(cpuset, cpu_ready_set);
12239 12096 }
12240 12097 CPUSET_DEL(cpuset, CPU->cpu_id);
12241 12098 SFMMU_XCALL_STATS(sfmmup);
12242 12099 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12243 12100 vac_flushpage(pfnum, vcolor);
12244 12101 }
12245 12102 #endif /* VAC */
12246 12103 kpreempt_enable();
12247 12104 }
12248 12105
12249 12106 /*
12250 12107 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12251 12108 * address and ctx. If noflush is set we do not currently do anything.
12252 12109 * This function may or may not be called with the HAT lock held.
12253 12110 */
12254 12111 static void
12255 12112 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12256 12113 int tlb_noflush, int hat_lock_held)
12257 12114 {
12258 12115 cpuset_t cpuset;
12259 12116 hatlock_t *hatlockp;
12260 12117
12261 12118 ASSERT(!hmeblkp->hblk_shared);
12262 12119
12263 12120 /*
12264 12121 * If the process is exiting we have nothing to do.
12265 12122 */
12266 12123 if (tlb_noflush)
12267 12124 return;
12268 12125
12269 12126 /*
12270 12127 * Flush TSB.
12271 12128 */
12272 12129 if (!hat_lock_held)
12273 12130 hatlockp = sfmmu_hat_enter(sfmmup);
12274 12131 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12275 12132
12276 12133 kpreempt_disable();
12277 12134
12278 12135 cpuset = sfmmup->sfmmu_cpusran;
12279 12136 CPUSET_AND(cpuset, cpu_ready_set);
12280 12137 CPUSET_DEL(cpuset, CPU->cpu_id);
12281 12138
12282 12139 SFMMU_XCALL_STATS(sfmmup);
12283 12140 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12284 12141
12285 12142 vtag_flushpage(addr, (uint64_t)sfmmup);
12286 12143
12287 12144 if (!hat_lock_held)
12288 12145 sfmmu_hat_exit(hatlockp);
12289 12146
12290 12147 kpreempt_enable();
12291 12148
12292 12149 }
12293 12150
12294 12151 /*
12295 12152 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12296 12153 * call handler that can flush a range of pages to save on xcalls.
12297 12154 */
12298 12155 static int sfmmu_xcall_save;
12299 12156
12300 12157 /*
12301 12158 * this routine is never used for demaping addresses backed by SRD hmeblks.
12302 12159 */
12303 12160 static void
12304 12161 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12305 12162 {
12306 12163 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12307 12164 hatlock_t *hatlockp;
12308 12165 cpuset_t cpuset;
12309 12166 uint64_t sfmmu_pgcnt;
12310 12167 pgcnt_t pgcnt = 0;
12311 12168 int pgunload = 0;
12312 12169 int dirtypg = 0;
12313 12170 caddr_t addr = dmrp->dmr_addr;
12314 12171 caddr_t eaddr;
12315 12172 uint64_t bitvec = dmrp->dmr_bitvec;
12316 12173
12317 12174 ASSERT(bitvec & 1);
12318 12175
12319 12176 /*
12320 12177 * Flush TSB and calculate number of pages to flush.
12321 12178 */
12322 12179 while (bitvec != 0) {
12323 12180 dirtypg = 0;
12324 12181 /*
12325 12182 * Find the first page to flush and then count how many
12326 12183 * pages there are after it that also need to be flushed.
12327 12184 * This way the number of TSB flushes is minimized.
12328 12185 */
12329 12186 while ((bitvec & 1) == 0) {
12330 12187 pgcnt++;
12331 12188 addr += MMU_PAGESIZE;
12332 12189 bitvec >>= 1;
12333 12190 }
12334 12191 while (bitvec & 1) {
12335 12192 dirtypg++;
12336 12193 bitvec >>= 1;
12337 12194 }
12338 12195 eaddr = addr + ptob(dirtypg);
12339 12196 hatlockp = sfmmu_hat_enter(sfmmup);
12340 12197 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12341 12198 sfmmu_hat_exit(hatlockp);
12342 12199 pgunload += dirtypg;
12343 12200 addr = eaddr;
12344 12201 pgcnt += dirtypg;
12345 12202 }
12346 12203
12347 12204 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12348 12205 if (sfmmup->sfmmu_free == 0) {
12349 12206 addr = dmrp->dmr_addr;
12350 12207 bitvec = dmrp->dmr_bitvec;
12351 12208
12352 12209 /*
12353 12210 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12354 12211 * as it will be used to pack argument for xt_some
12355 12212 */
12356 12213 ASSERT((pgcnt > 0) &&
12357 12214 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12358 12215
12359 12216 /*
12360 12217 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12361 12218 * the low 6 bits of sfmmup. This is doable since pgcnt
12362 12219 * always >= 1.
12363 12220 */
12364 12221 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12365 12222 sfmmu_pgcnt = (uint64_t)sfmmup |
12366 12223 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12367 12224
12368 12225 /*
12369 12226 * We must hold the hat lock during the flush of TLB,
12370 12227 * to avoid a race with sfmmu_invalidate_ctx(), where
12371 12228 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12372 12229 * causing TLB demap routine to skip flush on that MMU.
12373 12230 * If the context on a MMU has already been set to
12374 12231 * INVALID_CONTEXT, we just get an extra flush on
12375 12232 * that MMU.
12376 12233 */
12377 12234 hatlockp = sfmmu_hat_enter(sfmmup);
12378 12235 kpreempt_disable();
12379 12236
12380 12237 cpuset = sfmmup->sfmmu_cpusran;
12381 12238 CPUSET_AND(cpuset, cpu_ready_set);
12382 12239 CPUSET_DEL(cpuset, CPU->cpu_id);
12383 12240
12384 12241 SFMMU_XCALL_STATS(sfmmup);
12385 12242 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12386 12243 sfmmu_pgcnt);
12387 12244
12388 12245 for (; bitvec != 0; bitvec >>= 1) {
12389 12246 if (bitvec & 1)
12390 12247 vtag_flushpage(addr, (uint64_t)sfmmup);
12391 12248 addr += MMU_PAGESIZE;
12392 12249 }
12393 12250 kpreempt_enable();
12394 12251 sfmmu_hat_exit(hatlockp);
12395 12252
12396 12253 sfmmu_xcall_save += (pgunload-1);
12397 12254 }
12398 12255 dmrp->dmr_bitvec = 0;
12399 12256 }
12400 12257
12401 12258 /*
12402 12259 * In cases where we need to synchronize with TLB/TSB miss trap
12403 12260 * handlers, _and_ need to flush the TLB, it's a lot easier to
12404 12261 * throw away the context from the process than to do a
12405 12262 * special song and dance to keep things consistent for the
12406 12263 * handlers.
12407 12264 *
12408 12265 * Since the process suddenly ends up without a context and our caller
12409 12266 * holds the hat lock, threads that fault after this function is called
12410 12267 * will pile up on the lock. We can then do whatever we need to
12411 12268 * atomically from the context of the caller. The first blocked thread
12412 12269 * to resume executing will get the process a new context, and the
12413 12270 * process will resume executing.
12414 12271 *
12415 12272 * One added advantage of this approach is that on MMUs that
12416 12273 * support a "flush all" operation, we will delay the flush until
12417 12274 * cnum wrap-around, and then flush the TLB one time. This
12418 12275 * is rather rare, so it's a lot less expensive than making 8000
12419 12276 * x-calls to flush the TLB 8000 times.
12420 12277 *
12421 12278 * A per-process (PP) lock is used to synchronize ctx allocations in
12422 12279 * resume() and ctx invalidations here.
12423 12280 */
12424 12281 static void
12425 12282 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12426 12283 {
12427 12284 cpuset_t cpuset;
12428 12285 int cnum, currcnum;
12429 12286 mmu_ctx_t *mmu_ctxp;
12430 12287 int i;
12431 12288 uint_t pstate_save;
12432 12289
12433 12290 SFMMU_STAT(sf_ctx_inv);
12434 12291
12435 12292 ASSERT(sfmmu_hat_lock_held(sfmmup));
12436 12293 ASSERT(sfmmup != ksfmmup);
12437 12294
12438 12295 kpreempt_disable();
12439 12296
12440 12297 mmu_ctxp = CPU_MMU_CTXP(CPU);
12441 12298 ASSERT(mmu_ctxp);
12442 12299 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12443 12300 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12444 12301
12445 12302 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12446 12303
12447 12304 pstate_save = sfmmu_disable_intrs();
12448 12305
12449 12306 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12450 12307 /* set HAT cnum invalid across all context domains. */
12451 12308 for (i = 0; i < max_mmu_ctxdoms; i++) {
12452 12309
12453 12310 cnum = sfmmup->sfmmu_ctxs[i].cnum;
12454 12311 if (cnum == INVALID_CONTEXT) {
12455 12312 continue;
12456 12313 }
12457 12314
12458 12315 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12459 12316 }
12460 12317 membar_enter(); /* make sure globally visible to all CPUs */
12461 12318 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12462 12319
12463 12320 sfmmu_enable_intrs(pstate_save);
12464 12321
12465 12322 cpuset = sfmmup->sfmmu_cpusran;
12466 12323 CPUSET_DEL(cpuset, CPU->cpu_id);
12467 12324 CPUSET_AND(cpuset, cpu_ready_set);
12468 12325 if (!CPUSET_ISNULL(cpuset)) {
12469 12326 SFMMU_XCALL_STATS(sfmmup);
12470 12327 xt_some(cpuset, sfmmu_raise_tsb_exception,
12471 12328 (uint64_t)sfmmup, INVALID_CONTEXT);
12472 12329 xt_sync(cpuset);
12473 12330 SFMMU_STAT(sf_tsb_raise_exception);
12474 12331 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12475 12332 }
12476 12333
12477 12334 /*
12478 12335 * If the hat to-be-invalidated is the same as the current
12479 12336 * process on local CPU we need to invalidate
12480 12337 * this CPU context as well.
12481 12338 */
12482 12339 if ((sfmmu_getctx_sec() == currcnum) &&
12483 12340 (currcnum != INVALID_CONTEXT)) {
12484 12341 /* sets shared context to INVALID too */
12485 12342 sfmmu_setctx_sec(INVALID_CONTEXT);
12486 12343 sfmmu_clear_utsbinfo();
12487 12344 }
12488 12345
12489 12346 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12490 12347
12491 12348 kpreempt_enable();
12492 12349
12493 12350 /*
12494 12351 * we hold the hat lock, so nobody should allocate a context
12495 12352 * for us yet
12496 12353 */
12497 12354 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12498 12355 }
12499 12356
12500 12357 #ifdef VAC
12501 12358 /*
12502 12359 * We need to flush the cache in all cpus. It is possible that
12503 12360 * a process referenced a page as cacheable but has sinced exited
12504 12361 * and cleared the mapping list. We still to flush it but have no
12505 12362 * state so all cpus is the only alternative.
12506 12363 */
12507 12364 void
12508 12365 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12509 12366 {
12510 12367 cpuset_t cpuset;
12511 12368
12512 12369 kpreempt_disable();
12513 12370 cpuset = cpu_ready_set;
12514 12371 CPUSET_DEL(cpuset, CPU->cpu_id);
12515 12372 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12516 12373 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12517 12374 xt_sync(cpuset);
12518 12375 vac_flushpage(pfnum, vcolor);
12519 12376 kpreempt_enable();
12520 12377 }
12521 12378
12522 12379 void
12523 12380 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12524 12381 {
12525 12382 cpuset_t cpuset;
12526 12383
12527 12384 ASSERT(vcolor >= 0);
12528 12385
12529 12386 kpreempt_disable();
12530 12387 cpuset = cpu_ready_set;
12531 12388 CPUSET_DEL(cpuset, CPU->cpu_id);
12532 12389 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12533 12390 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12534 12391 xt_sync(cpuset);
12535 12392 vac_flushcolor(vcolor, pfnum);
12536 12393 kpreempt_enable();
12537 12394 }
12538 12395 #endif /* VAC */
12539 12396
12540 12397 /*
12541 12398 * We need to prevent processes from accessing the TSB using a cached physical
12542 12399 * address. It's alright if they try to access the TSB via virtual address
12543 12400 * since they will just fault on that virtual address once the mapping has
12544 12401 * been suspended.
12545 12402 */
12546 12403 #pragma weak sendmondo_in_recover
12547 12404
12548 12405 /* ARGSUSED */
12549 12406 static int
12550 12407 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12551 12408 {
12552 12409 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12553 12410 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12554 12411 hatlock_t *hatlockp;
12555 12412 sf_scd_t *scdp;
12556 12413
12557 12414 if (flags != HAT_PRESUSPEND)
12558 12415 return (0);
12559 12416
12560 12417 /*
12561 12418 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12562 12419 * be a shared hat, then set SCD's tsbinfo's flag.
12563 12420 * If tsb is not shared, sfmmup is a private hat, then set
12564 12421 * its private tsbinfo's flag.
12565 12422 */
12566 12423 hatlockp = sfmmu_hat_enter(sfmmup);
12567 12424 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12568 12425
12569 12426 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12570 12427 sfmmu_tsb_inv_ctx(sfmmup);
12571 12428 sfmmu_hat_exit(hatlockp);
12572 12429 } else {
12573 12430 /* release lock on the shared hat */
12574 12431 sfmmu_hat_exit(hatlockp);
12575 12432 /* sfmmup is a shared hat */
12576 12433 ASSERT(sfmmup->sfmmu_scdhat);
12577 12434 scdp = sfmmup->sfmmu_scdp;
12578 12435 ASSERT(scdp != NULL);
12579 12436 /* get private hat from the scd list */
12580 12437 mutex_enter(&scdp->scd_mutex);
12581 12438 sfmmup = scdp->scd_sf_list;
12582 12439 while (sfmmup != NULL) {
12583 12440 hatlockp = sfmmu_hat_enter(sfmmup);
12584 12441 /*
12585 12442 * We do not call sfmmu_tsb_inv_ctx here because
12586 12443 * sendmondo_in_recover check is only needed for
12587 12444 * sun4u.
12588 12445 */
12589 12446 sfmmu_invalidate_ctx(sfmmup);
12590 12447 sfmmu_hat_exit(hatlockp);
12591 12448 sfmmup = sfmmup->sfmmu_scd_link.next;
12592 12449
12593 12450 }
12594 12451 mutex_exit(&scdp->scd_mutex);
12595 12452 }
12596 12453 return (0);
12597 12454 }
12598 12455
12599 12456 static void
12600 12457 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12601 12458 {
12602 12459 extern uint32_t sendmondo_in_recover;
12603 12460
12604 12461 ASSERT(sfmmu_hat_lock_held(sfmmup));
12605 12462
12606 12463 /*
12607 12464 * For Cheetah+ Erratum 25:
12608 12465 * Wait for any active recovery to finish. We can't risk
12609 12466 * relocating the TSB of the thread running mondo_recover_proc()
12610 12467 * since, if we did that, we would deadlock. The scenario we are
12611 12468 * trying to avoid is as follows:
12612 12469 *
12613 12470 * THIS CPU RECOVER CPU
12614 12471 * -------- -----------
12615 12472 * Begins recovery, walking through TSB
12616 12473 * hat_pagesuspend() TSB TTE
12617 12474 * TLB miss on TSB TTE, spins at TL1
12618 12475 * xt_sync()
12619 12476 * send_mondo_timeout()
12620 12477 * mondo_recover_proc()
12621 12478 * ((deadlocked))
12622 12479 *
12623 12480 * The second half of the workaround is that mondo_recover_proc()
12624 12481 * checks to see if the tsb_info has the RELOC flag set, and if it
12625 12482 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12626 12483 * and hence avoiding the TLB miss that could result in a deadlock.
12627 12484 */
12628 12485 if (&sendmondo_in_recover) {
12629 12486 membar_enter(); /* make sure RELOC flag visible */
12630 12487 while (sendmondo_in_recover) {
12631 12488 drv_usecwait(1);
12632 12489 membar_consumer();
12633 12490 }
12634 12491 }
12635 12492
12636 12493 sfmmu_invalidate_ctx(sfmmup);
12637 12494 }
12638 12495
12639 12496 /* ARGSUSED */
12640 12497 static int
12641 12498 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12642 12499 void *tsbinfo, pfn_t newpfn)
12643 12500 {
12644 12501 hatlock_t *hatlockp;
12645 12502 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12646 12503 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12647 12504
12648 12505 if (flags != HAT_POSTUNSUSPEND)
12649 12506 return (0);
12650 12507
12651 12508 hatlockp = sfmmu_hat_enter(sfmmup);
12652 12509
12653 12510 SFMMU_STAT(sf_tsb_reloc);
12654 12511
12655 12512 /*
12656 12513 * The process may have swapped out while we were relocating one
12657 12514 * of its TSBs. If so, don't bother doing the setup since the
12658 12515 * process can't be using the memory anymore.
12659 12516 */
12660 12517 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12661 12518 ASSERT(va == tsbinfop->tsb_va);
12662 12519 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12663 12520
12664 12521 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12665 12522 sfmmu_inv_tsb(tsbinfop->tsb_va,
12666 12523 TSB_BYTES(tsbinfop->tsb_szc));
12667 12524 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12668 12525 }
12669 12526 }
12670 12527
12671 12528 membar_exit();
12672 12529 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12673 12530 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12674 12531
12675 12532 sfmmu_hat_exit(hatlockp);
12676 12533
12677 12534 return (0);
12678 12535 }
12679 12536
12680 12537 /*
12681 12538 * Allocate and initialize a tsb_info structure. Note that we may or may not
12682 12539 * allocate a TSB here, depending on the flags passed in.
12683 12540 */
12684 12541 static int
12685 12542 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12686 12543 uint_t flags, sfmmu_t *sfmmup)
12687 12544 {
12688 12545 int err;
12689 12546
12690 12547 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12691 12548 sfmmu_tsbinfo_cache, KM_SLEEP);
12692 12549
12693 12550 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12694 12551 tsb_szc, flags, sfmmup)) != 0) {
12695 12552 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12696 12553 SFMMU_STAT(sf_tsb_allocfail);
12697 12554 *tsbinfopp = NULL;
12698 12555 return (err);
12699 12556 }
12700 12557 SFMMU_STAT(sf_tsb_alloc);
12701 12558
12702 12559 /*
12703 12560 * Bump the TSB size counters for this TSB size.
12704 12561 */
12705 12562 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12706 12563 return (0);
12707 12564 }
12708 12565
12709 12566 static void
12710 12567 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12711 12568 {
12712 12569 caddr_t tsbva = tsbinfo->tsb_va;
12713 12570 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12714 12571 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12715 12572 vmem_t *vmp = tsbinfo->tsb_vmp;
12716 12573
12717 12574 /*
12718 12575 * If we allocated this TSB from relocatable kernel memory, then we
12719 12576 * need to uninstall the callback handler.
12720 12577 */
12721 12578 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12722 12579 uintptr_t slab_mask;
12723 12580 caddr_t slab_vaddr;
12724 12581 page_t **ppl;
12725 12582 int ret;
12726 12583
12727 12584 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12728 12585 if (tsb_size > MMU_PAGESIZE4M)
12729 12586 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12730 12587 else
12731 12588 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12732 12589 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12733 12590
12734 12591 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12735 12592 ASSERT(ret == 0);
12736 12593 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12737 12594 0, NULL);
12738 12595 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12739 12596 }
12740 12597
12741 12598 if (kmem_cachep != NULL) {
12742 12599 kmem_cache_free(kmem_cachep, tsbva);
12743 12600 } else {
12744 12601 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12745 12602 }
12746 12603 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12747 12604 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12748 12605 }
12749 12606
12750 12607 static void
12751 12608 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12752 12609 {
12753 12610 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12754 12611 sfmmu_tsb_free(tsbinfo);
12755 12612 }
12756 12613 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12757 12614
12758 12615 }
12759 12616
12760 12617 /*
12761 12618 * Setup all the references to physical memory for this tsbinfo.
12762 12619 * The underlying page(s) must be locked.
12763 12620 */
12764 12621 static void
12765 12622 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12766 12623 {
12767 12624 ASSERT(pfn != PFN_INVALID);
12768 12625 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12769 12626
12770 12627 #ifndef sun4v
12771 12628 if (tsbinfo->tsb_szc == 0) {
12772 12629 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12773 12630 PROT_WRITE|PROT_READ, TTE8K);
12774 12631 } else {
12775 12632 /*
12776 12633 * Round down PA and use a large mapping; the handlers will
12777 12634 * compute the TSB pointer at the correct offset into the
12778 12635 * big virtual page. NOTE: this assumes all TSBs larger
12779 12636 * than 8K must come from physically contiguous slabs of
12780 12637 * size tsb_slab_size.
12781 12638 */
12782 12639 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12783 12640 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12784 12641 }
12785 12642 tsbinfo->tsb_pa = ptob(pfn);
12786 12643
12787 12644 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12788 12645 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12789 12646
12790 12647 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12791 12648 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12792 12649 #else /* sun4v */
12793 12650 tsbinfo->tsb_pa = ptob(pfn);
12794 12651 #endif /* sun4v */
12795 12652 }
12796 12653
12797 12654
12798 12655 /*
12799 12656 * Returns zero on success, ENOMEM if over the high water mark,
12800 12657 * or EAGAIN if the caller needs to retry with a smaller TSB
12801 12658 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12802 12659 *
12803 12660 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12804 12661 * is specified and the TSB requested is PAGESIZE, though it
12805 12662 * may sleep waiting for memory if sufficient memory is not
12806 12663 * available.
12807 12664 */
12808 12665 static int
12809 12666 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12810 12667 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12811 12668 {
12812 12669 caddr_t vaddr = NULL;
12813 12670 caddr_t slab_vaddr;
12814 12671 uintptr_t slab_mask;
12815 12672 int tsbbytes = TSB_BYTES(tsbcode);
12816 12673 int lowmem = 0;
12817 12674 struct kmem_cache *kmem_cachep = NULL;
12818 12675 vmem_t *vmp = NULL;
12819 12676 lgrp_id_t lgrpid = LGRP_NONE;
12820 12677 pfn_t pfn;
12821 12678 uint_t cbflags = HAC_SLEEP;
12822 12679 page_t **pplist;
12823 12680 int ret;
12824 12681
12825 12682 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
12826 12683 if (tsbbytes > MMU_PAGESIZE4M)
12827 12684 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12828 12685 else
12829 12686 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12830 12687
12831 12688 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
12832 12689 flags |= TSB_ALLOC;
12833 12690
12834 12691 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
12835 12692
12836 12693 tsbinfo->tsb_sfmmu = sfmmup;
12837 12694
12838 12695 /*
12839 12696 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
12840 12697 * return.
12841 12698 */
12842 12699 if ((flags & TSB_ALLOC) == 0) {
12843 12700 tsbinfo->tsb_szc = tsbcode;
12844 12701 tsbinfo->tsb_ttesz_mask = tteszmask;
12845 12702 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
12846 12703 tsbinfo->tsb_pa = -1;
12847 12704 tsbinfo->tsb_tte.ll = 0;
12848 12705 tsbinfo->tsb_next = NULL;
12849 12706 tsbinfo->tsb_flags = TSB_SWAPPED;
12850 12707 tsbinfo->tsb_cache = NULL;
12851 12708 tsbinfo->tsb_vmp = NULL;
12852 12709 return (0);
12853 12710 }
12854 12711
12855 12712 #ifdef DEBUG
12856 12713 /*
12857 12714 * For debugging:
12858 12715 * Randomly force allocation failures every tsb_alloc_mtbf
12859 12716 * tries if TSB_FORCEALLOC is not specified. This will
12860 12717 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
12861 12718 * it is even, to allow testing of both failure paths...
12862 12719 */
12863 12720 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
12864 12721 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
12865 12722 tsb_alloc_count = 0;
12866 12723 tsb_alloc_fail_mtbf++;
12867 12724 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
12868 12725 }
12869 12726 #endif /* DEBUG */
12870 12727
12871 12728 /*
12872 12729 * Enforce high water mark if we are not doing a forced allocation
12873 12730 * and are not shrinking a process' TSB.
12874 12731 */
12875 12732 if ((flags & TSB_SHRINK) == 0 &&
12876 12733 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
12877 12734 if ((flags & TSB_FORCEALLOC) == 0)
12878 12735 return (ENOMEM);
12879 12736 lowmem = 1;
12880 12737 }
12881 12738
12882 12739 /*
12883 12740 * Allocate from the correct location based upon the size of the TSB
12884 12741 * compared to the base page size, and what memory conditions dictate.
12885 12742 * Note we always do nonblocking allocations from the TSB arena since
12886 12743 * we don't want memory fragmentation to cause processes to block
12887 12744 * indefinitely waiting for memory; until the kernel algorithms that
12888 12745 * coalesce large pages are improved this is our best option.
12889 12746 *
12890 12747 * Algorithm:
12891 12748 * If allocating a "large" TSB (>8K), allocate from the
12892 12749 * appropriate kmem_tsb_default_arena vmem arena
12893 12750 * else if low on memory or the TSB_FORCEALLOC flag is set or
12894 12751 * tsb_forceheap is set
12895 12752 * Allocate from kernel heap via sfmmu_tsb8k_cache with
12896 12753 * KM_SLEEP (never fails)
12897 12754 * else
12898 12755 * Allocate from appropriate sfmmu_tsb_cache with
12899 12756 * KM_NOSLEEP
12900 12757 * endif
12901 12758 */
12902 12759 if (tsb_lgrp_affinity)
12903 12760 lgrpid = lgrp_home_id(curthread);
12904 12761 if (lgrpid == LGRP_NONE)
12905 12762 lgrpid = 0; /* use lgrp of boot CPU */
12906 12763
12907 12764 if (tsbbytes > MMU_PAGESIZE) {
12908 12765 if (tsbbytes > MMU_PAGESIZE4M) {
12909 12766 vmp = kmem_bigtsb_default_arena[lgrpid];
12910 12767 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12911 12768 0, 0, NULL, NULL, VM_NOSLEEP);
12912 12769 } else {
12913 12770 vmp = kmem_tsb_default_arena[lgrpid];
12914 12771 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12915 12772 0, 0, NULL, NULL, VM_NOSLEEP);
12916 12773 }
12917 12774 #ifdef DEBUG
12918 12775 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
12919 12776 #else /* !DEBUG */
12920 12777 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
12921 12778 #endif /* DEBUG */
12922 12779 kmem_cachep = sfmmu_tsb8k_cache;
12923 12780 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
12924 12781 ASSERT(vaddr != NULL);
12925 12782 } else {
12926 12783 kmem_cachep = sfmmu_tsb_cache[lgrpid];
12927 12784 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12928 12785 }
12929 12786
12930 12787 tsbinfo->tsb_cache = kmem_cachep;
12931 12788 tsbinfo->tsb_vmp = vmp;
12932 12789
12933 12790 if (vaddr == NULL) {
12934 12791 return (EAGAIN);
12935 12792 }
12936 12793
12937 12794 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
12938 12795 kmem_cachep = tsbinfo->tsb_cache;
12939 12796
12940 12797 /*
12941 12798 * If we are allocating from outside the cage, then we need to
12942 12799 * register a relocation callback handler. Note that for now
12943 12800 * since pseudo mappings always hang off of the slab's root page,
12944 12801 * we need only lock the first 8K of the TSB slab. This is a bit
12945 12802 * hacky but it is good for performance.
12946 12803 */
12947 12804 if (kmem_cachep != sfmmu_tsb8k_cache) {
12948 12805 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
12949 12806 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
12950 12807 ASSERT(ret == 0);
12951 12808 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
12952 12809 cbflags, (void *)tsbinfo, &pfn, NULL);
12953 12810
12954 12811 /*
12955 12812 * Need to free up resources if we could not successfully
12956 12813 * add the callback function and return an error condition.
12957 12814 */
12958 12815 if (ret != 0) {
12959 12816 if (kmem_cachep) {
12960 12817 kmem_cache_free(kmem_cachep, vaddr);
12961 12818 } else {
12962 12819 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
12963 12820 }
12964 12821 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
12965 12822 S_WRITE);
12966 12823 return (EAGAIN);
12967 12824 }
12968 12825 } else {
12969 12826 /*
12970 12827 * Since allocation of 8K TSBs from heap is rare and occurs
12971 12828 * during memory pressure we allocate them from permanent
12972 12829 * memory rather than using callbacks to get the PFN.
12973 12830 */
12974 12831 pfn = hat_getpfnum(kas.a_hat, vaddr);
12975 12832 }
12976 12833
12977 12834 tsbinfo->tsb_va = vaddr;
12978 12835 tsbinfo->tsb_szc = tsbcode;
12979 12836 tsbinfo->tsb_ttesz_mask = tteszmask;
12980 12837 tsbinfo->tsb_next = NULL;
12981 12838 tsbinfo->tsb_flags = 0;
12982 12839
12983 12840 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
12984 12841
12985 12842 sfmmu_inv_tsb(vaddr, tsbbytes);
12986 12843
12987 12844 if (kmem_cachep != sfmmu_tsb8k_cache) {
12988 12845 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
12989 12846 }
12990 12847
12991 12848 return (0);
12992 12849 }
12993 12850
12994 12851 /*
12995 12852 * Initialize per cpu tsb and per cpu tsbmiss_area
12996 12853 */
12997 12854 void
12998 12855 sfmmu_init_tsbs(void)
12999 12856 {
13000 12857 int i;
13001 12858 struct tsbmiss *tsbmissp;
13002 12859 struct kpmtsbm *kpmtsbmp;
13003 12860 #ifndef sun4v
13004 12861 extern int dcache_line_mask;
13005 12862 #endif /* sun4v */
13006 12863 extern uint_t vac_colors;
13007 12864
13008 12865 /*
13009 12866 * Init. tsb miss area.
13010 12867 */
13011 12868 tsbmissp = tsbmiss_area;
13012 12869
13013 12870 for (i = 0; i < NCPU; tsbmissp++, i++) {
13014 12871 /*
13015 12872 * initialize the tsbmiss area.
13016 12873 * Do this for all possible CPUs as some may be added
13017 12874 * while the system is running. There is no cost to this.
13018 12875 */
13019 12876 tsbmissp->ksfmmup = ksfmmup;
13020 12877 #ifndef sun4v
13021 12878 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13022 12879 #endif /* sun4v */
13023 12880 tsbmissp->khashstart =
13024 12881 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13025 12882 tsbmissp->uhashstart =
13026 12883 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13027 12884 tsbmissp->khashsz = khmehash_num;
13028 12885 tsbmissp->uhashsz = uhmehash_num;
13029 12886 }
13030 12887
13031 12888 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13032 12889 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13033 12890
13034 12891 if (kpm_enable == 0)
13035 12892 return;
13036 12893
13037 12894 /* -- Begin KPM specific init -- */
13038 12895
13039 12896 if (kpm_smallpages) {
13040 12897 /*
13041 12898 * If we're using base pagesize pages for seg_kpm
13042 12899 * mappings, we use the kernel TSB since we can't afford
13043 12900 * to allocate a second huge TSB for these mappings.
13044 12901 */
13045 12902 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13046 12903 kpm_tsbsz = ktsb_szcode;
13047 12904 kpmsm_tsbbase = kpm_tsbbase;
13048 12905 kpmsm_tsbsz = kpm_tsbsz;
13049 12906 } else {
13050 12907 /*
13051 12908 * In VAC conflict case, just put the entries in the
13052 12909 * kernel 8K indexed TSB for now so we can find them.
13053 12910 * This could really be changed in the future if we feel
13054 12911 * the need...
13055 12912 */
13056 12913 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13057 12914 kpmsm_tsbsz = ktsb_szcode;
13058 12915 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13059 12916 kpm_tsbsz = ktsb4m_szcode;
13060 12917 }
13061 12918
13062 12919 kpmtsbmp = kpmtsbm_area;
13063 12920 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13064 12921 /*
13065 12922 * Initialize the kpmtsbm area.
13066 12923 * Do this for all possible CPUs as some may be added
13067 12924 * while the system is running. There is no cost to this.
13068 12925 */
13069 12926 kpmtsbmp->vbase = kpm_vbase;
13070 12927 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13071 12928 kpmtsbmp->sz_shift = kpm_size_shift;
13072 12929 kpmtsbmp->kpmp_shift = kpmp_shift;
13073 12930 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13074 12931 if (kpm_smallpages == 0) {
13075 12932 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13076 12933 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13077 12934 } else {
13078 12935 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13079 12936 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13080 12937 }
13081 12938 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13082 12939 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13083 12940 #ifdef DEBUG
13084 12941 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
13085 12942 #endif /* DEBUG */
13086 12943 if (ktsb_phys)
13087 12944 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13088 12945 }
13089 12946
13090 12947 /* -- End KPM specific init -- */
13091 12948 }
13092 12949
13093 12950 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13094 12951 struct tsb_info ktsb_info[2];
13095 12952
13096 12953 /*
13097 12954 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13098 12955 */
13099 12956 void
13100 12957 sfmmu_init_ktsbinfo()
13101 12958 {
13102 12959 ASSERT(ksfmmup != NULL);
13103 12960 ASSERT(ksfmmup->sfmmu_tsb == NULL);
13104 12961 /*
13105 12962 * Allocate tsbinfos for kernel and copy in data
13106 12963 * to make debug easier and sun4v setup easier.
13107 12964 */
13108 12965 ktsb_info[0].tsb_sfmmu = ksfmmup;
13109 12966 ktsb_info[0].tsb_szc = ktsb_szcode;
13110 12967 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13111 12968 ktsb_info[0].tsb_va = ktsb_base;
13112 12969 ktsb_info[0].tsb_pa = ktsb_pbase;
13113 12970 ktsb_info[0].tsb_flags = 0;
13114 12971 ktsb_info[0].tsb_tte.ll = 0;
13115 12972 ktsb_info[0].tsb_cache = NULL;
13116 12973
13117 12974 ktsb_info[1].tsb_sfmmu = ksfmmup;
13118 12975 ktsb_info[1].tsb_szc = ktsb4m_szcode;
13119 12976 ktsb_info[1].tsb_ttesz_mask = TSB4M;
13120 12977 ktsb_info[1].tsb_va = ktsb4m_base;
13121 12978 ktsb_info[1].tsb_pa = ktsb4m_pbase;
13122 12979 ktsb_info[1].tsb_flags = 0;
13123 12980 ktsb_info[1].tsb_tte.ll = 0;
13124 12981 ktsb_info[1].tsb_cache = NULL;
13125 12982
13126 12983 /* Link them into ksfmmup. */
13127 12984 ktsb_info[0].tsb_next = &ktsb_info[1];
13128 12985 ktsb_info[1].tsb_next = NULL;
13129 12986 ksfmmup->sfmmu_tsb = &ktsb_info[0];
13130 12987
13131 12988 sfmmu_setup_tsbinfo(ksfmmup);
13132 12989 }
13133 12990
13134 12991 /*
13135 12992 * Cache the last value returned from va_to_pa(). If the VA specified
13136 12993 * in the current call to cached_va_to_pa() maps to the same Page (as the
13137 12994 * previous call to cached_va_to_pa()), then compute the PA using
13138 12995 * cached info, else call va_to_pa().
13139 12996 *
13140 12997 * Note: this function is neither MT-safe nor consistent in the presence
13141 12998 * of multiple, interleaved threads. This function was created to enable
13142 12999 * an optimization used during boot (at a point when there's only one thread
13143 13000 * executing on the "boot CPU", and before startup_vm() has been called).
13144 13001 */
13145 13002 static uint64_t
13146 13003 cached_va_to_pa(void *vaddr)
13147 13004 {
13148 13005 static uint64_t prev_vaddr_base = 0;
13149 13006 static uint64_t prev_pfn = 0;
13150 13007
13151 13008 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13152 13009 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13153 13010 } else {
13154 13011 uint64_t pa = va_to_pa(vaddr);
13155 13012
13156 13013 if (pa != ((uint64_t)-1)) {
13157 13014 /*
13158 13015 * Computed physical address is valid. Cache its
13159 13016 * related info for the next cached_va_to_pa() call.
13160 13017 */
13161 13018 prev_pfn = pa & MMU_PAGEMASK;
13162 13019 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13163 13020 }
13164 13021
13165 13022 return (pa);
13166 13023 }
13167 13024 }
13168 13025
13169 13026 /*
13170 13027 * Carve up our nucleus hblk region. We may allocate more hblks than
13171 13028 * asked due to rounding errors but we are guaranteed to have at least
13172 13029 * enough space to allocate the requested number of hblk8's and hblk1's.
13173 13030 */
13174 13031 void
13175 13032 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13176 13033 {
13177 13034 struct hme_blk *hmeblkp;
13178 13035 size_t hme8blk_sz, hme1blk_sz;
13179 13036 size_t i;
13180 13037 size_t hblk8_bound;
13181 13038 ulong_t j = 0, k = 0;
13182 13039
13183 13040 ASSERT(addr != NULL && size != 0);
13184 13041
13185 13042 /* Need to use proper structure alignment */
13186 13043 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13187 13044 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13188 13045
13189 13046 nucleus_hblk8.list = (void *)addr;
13190 13047 nucleus_hblk8.index = 0;
13191 13048
13192 13049 /*
13193 13050 * Use as much memory as possible for hblk8's since we
13194 13051 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13195 13052 * We need to hold back enough space for the hblk1's which
13196 13053 * we'll allocate next.
13197 13054 */
13198 13055 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13199 13056 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13200 13057 hmeblkp = (struct hme_blk *)addr;
13201 13058 addr += hme8blk_sz;
13202 13059 hmeblkp->hblk_nuc_bit = 1;
13203 13060 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13204 13061 }
13205 13062 nucleus_hblk8.len = j;
13206 13063 ASSERT(j >= nhblk8);
13207 13064 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13208 13065
13209 13066 nucleus_hblk1.list = (void *)addr;
13210 13067 nucleus_hblk1.index = 0;
13211 13068 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13212 13069 hmeblkp = (struct hme_blk *)addr;
13213 13070 addr += hme1blk_sz;
13214 13071 hmeblkp->hblk_nuc_bit = 1;
13215 13072 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13216 13073 }
13217 13074 ASSERT(k >= nhblk1);
13218 13075 nucleus_hblk1.len = k;
13219 13076 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13220 13077 }
13221 13078
13222 13079 /*
13223 13080 * This function is currently not supported on this platform. For what
13224 13081 * it's supposed to do, see hat.c and hat_srmmu.c
13225 13082 */
13226 13083 /* ARGSUSED */
13227 13084 faultcode_t
13228 13085 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13229 13086 uint_t flags)
13230 13087 {
13231 13088 return (FC_NOSUPPORT);
13232 13089 }
13233 13090
13234 13091 /*
13235 13092 * Searchs the mapping list of the page for a mapping of the same size. If not
13236 13093 * found the corresponding bit is cleared in the p_index field. When large
13237 13094 * pages are more prevalent in the system, we can maintain the mapping list
13238 13095 * in order and we don't have to traverse the list each time. Just check the
13239 13096 * next and prev entries, and if both are of different size, we clear the bit.
13240 13097 */
13241 13098 static void
13242 13099 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13243 13100 {
13244 13101 struct sf_hment *sfhmep;
13245 13102 struct hme_blk *hmeblkp;
13246 13103 int index;
13247 13104 pgcnt_t npgs;
13248 13105
13249 13106 ASSERT(ttesz > TTE8K);
13250 13107
13251 13108 ASSERT(sfmmu_mlist_held(pp));
13252 13109
13253 13110 ASSERT(PP_ISMAPPED_LARGE(pp));
13254 13111
13255 13112 /*
13256 13113 * Traverse mapping list looking for another mapping of same size.
13257 13114 * since we only want to clear index field if all mappings of
13258 13115 * that size are gone.
13259 13116 */
13260 13117
13261 13118 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13262 13119 if (IS_PAHME(sfhmep))
13263 13120 continue;
13264 13121 hmeblkp = sfmmu_hmetohblk(sfhmep);
13265 13122 if (hme_size(sfhmep) == ttesz) {
13266 13123 /*
13267 13124 * another mapping of the same size. don't clear index.
13268 13125 */
13269 13126 return;
13270 13127 }
13271 13128 }
13272 13129
13273 13130 /*
13274 13131 * Clear the p_index bit for large page.
13275 13132 */
13276 13133 index = PAGESZ_TO_INDEX(ttesz);
13277 13134 npgs = TTEPAGES(ttesz);
13278 13135 while (npgs-- > 0) {
13279 13136 ASSERT(pp->p_index & index);
13280 13137 pp->p_index &= ~index;
13281 13138 pp = PP_PAGENEXT(pp);
13282 13139 }
13283 13140 }
13284 13141
13285 13142 /*
13286 13143 * return supported features
13287 13144 */
13288 13145 /* ARGSUSED */
13289 13146 int
13290 13147 hat_supported(enum hat_features feature, void *arg)
13291 13148 {
13292 13149 switch (feature) {
13293 13150 case HAT_SHARED_PT:
13294 13151 case HAT_DYNAMIC_ISM_UNMAP:
13295 13152 case HAT_VMODSORT:
13296 13153 return (1);
13297 13154 case HAT_SHARED_REGIONS:
13298 13155 if (shctx_on)
13299 13156 return (1);
13300 13157 else
13301 13158 return (0);
13302 13159 default:
13303 13160 return (0);
13304 13161 }
13305 13162 }
13306 13163
13307 13164 void
13308 13165 hat_enter(struct hat *hat)
13309 13166 {
13310 13167 hatlock_t *hatlockp;
13311 13168
13312 13169 if (hat != ksfmmup) {
13313 13170 hatlockp = TSB_HASH(hat);
13314 13171 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13315 13172 }
13316 13173 }
13317 13174
13318 13175 void
13319 13176 hat_exit(struct hat *hat)
13320 13177 {
13321 13178 hatlock_t *hatlockp;
13322 13179
13323 13180 if (hat != ksfmmup) {
13324 13181 hatlockp = TSB_HASH(hat);
13325 13182 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13326 13183 }
13327 13184 }
13328 13185
13329 13186 /*ARGSUSED*/
13330 13187 void
13331 13188 hat_reserve(struct as *as, caddr_t addr, size_t len)
13332 13189 {
13333 13190 }
13334 13191
13335 13192 static void
13336 13193 hat_kstat_init(void)
13337 13194 {
13338 13195 kstat_t *ksp;
13339 13196
13340 13197 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13341 13198 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13342 13199 KSTAT_FLAG_VIRTUAL);
13343 13200 if (ksp) {
13344 13201 ksp->ks_data = (void *) &sfmmu_global_stat;
13345 13202 kstat_install(ksp);
13346 13203 }
13347 13204 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13348 13205 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13349 13206 KSTAT_FLAG_VIRTUAL);
13350 13207 if (ksp) {
13351 13208 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13352 13209 kstat_install(ksp);
13353 13210 }
13354 13211 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13355 13212 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13356 13213 KSTAT_FLAG_WRITABLE);
13357 13214 if (ksp) {
13358 13215 ksp->ks_update = sfmmu_kstat_percpu_update;
13359 13216 kstat_install(ksp);
13360 13217 }
13361 13218 }
13362 13219
13363 13220 /* ARGSUSED */
13364 13221 static int
13365 13222 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13366 13223 {
13367 13224 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13368 13225 struct tsbmiss *tsbm = tsbmiss_area;
13369 13226 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13370 13227 int i;
13371 13228
13372 13229 ASSERT(cpu_kstat);
13373 13230 if (rw == KSTAT_READ) {
13374 13231 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13375 13232 cpu_kstat->sf_itlb_misses = 0;
13376 13233 cpu_kstat->sf_dtlb_misses = 0;
13377 13234 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13378 13235 tsbm->uprot_traps;
13379 13236 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13380 13237 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13381 13238 cpu_kstat->sf_tsb_hits = 0;
13382 13239 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13383 13240 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13384 13241 }
13385 13242 } else {
13386 13243 /* KSTAT_WRITE is used to clear stats */
13387 13244 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13388 13245 tsbm->utsb_misses = 0;
13389 13246 tsbm->ktsb_misses = 0;
13390 13247 tsbm->uprot_traps = 0;
13391 13248 tsbm->kprot_traps = 0;
13392 13249 kpmtsbm->kpm_dtlb_misses = 0;
13393 13250 kpmtsbm->kpm_tsb_misses = 0;
13394 13251 }
13395 13252 }
13396 13253 return (0);
13397 13254 }
13398 13255
13399 13256 #ifdef DEBUG
13400 13257
13401 13258 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13402 13259
13403 13260 /*
13404 13261 * A tte checker. *orig_old is the value we read before cas.
13405 13262 * *cur is the value returned by cas.
13406 13263 * *new is the desired value when we do the cas.
13407 13264 *
13408 13265 * *hmeblkp is currently unused.
13409 13266 */
13410 13267
13411 13268 /* ARGSUSED */
13412 13269 void
13413 13270 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13414 13271 {
13415 13272 pfn_t i, j, k;
13416 13273 int cpuid = CPU->cpu_id;
13417 13274
13418 13275 gorig[cpuid] = orig_old;
13419 13276 gcur[cpuid] = cur;
13420 13277 gnew[cpuid] = new;
13421 13278
13422 13279 #ifdef lint
13423 13280 hmeblkp = hmeblkp;
13424 13281 #endif
13425 13282
13426 13283 if (TTE_IS_VALID(orig_old)) {
13427 13284 if (TTE_IS_VALID(cur)) {
13428 13285 i = TTE_TO_TTEPFN(orig_old);
13429 13286 j = TTE_TO_TTEPFN(cur);
13430 13287 k = TTE_TO_TTEPFN(new);
13431 13288 if (i != j) {
13432 13289 /* remap error? */
13433 13290 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13434 13291 }
13435 13292
13436 13293 if (i != k) {
13437 13294 /* remap error? */
13438 13295 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13439 13296 }
13440 13297 } else {
13441 13298 if (TTE_IS_VALID(new)) {
13442 13299 panic("chk_tte: invalid cur? ");
13443 13300 }
13444 13301
13445 13302 i = TTE_TO_TTEPFN(orig_old);
13446 13303 k = TTE_TO_TTEPFN(new);
13447 13304 if (i != k) {
13448 13305 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13449 13306 }
13450 13307 }
13451 13308 } else {
13452 13309 if (TTE_IS_VALID(cur)) {
13453 13310 j = TTE_TO_TTEPFN(cur);
13454 13311 if (TTE_IS_VALID(new)) {
13455 13312 k = TTE_TO_TTEPFN(new);
13456 13313 if (j != k) {
13457 13314 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13458 13315 j, k);
13459 13316 }
13460 13317 } else {
13461 13318 panic("chk_tte: why here?");
13462 13319 }
13463 13320 } else {
13464 13321 if (!TTE_IS_VALID(new)) {
13465 13322 panic("chk_tte: why here2 ?");
13466 13323 }
13467 13324 }
13468 13325 }
13469 13326 }
13470 13327
13471 13328 #endif /* DEBUG */
13472 13329
13473 13330 extern void prefetch_tsbe_read(struct tsbe *);
13474 13331 extern void prefetch_tsbe_write(struct tsbe *);
13475 13332
13476 13333
13477 13334 /*
13478 13335 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13479 13336 * us optimal performance on Cheetah+. You can only have 8 outstanding
13480 13337 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13481 13338 * prefetch to make the most utilization of the prefetch capability.
13482 13339 */
13483 13340 #define TSBE_PREFETCH_STRIDE (7)
13484 13341
13485 13342 void
13486 13343 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13487 13344 {
13488 13345 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13489 13346 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13490 13347 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13491 13348 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13492 13349 struct tsbe *old;
13493 13350 struct tsbe *new;
13494 13351 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13495 13352 uint64_t va;
13496 13353 int new_offset;
13497 13354 int i;
13498 13355 int vpshift;
13499 13356 int last_prefetch;
13500 13357
13501 13358 if (old_bytes == new_bytes) {
13502 13359 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13503 13360 } else {
13504 13361
13505 13362 /*
13506 13363 * A TSBE is 16 bytes which means there are four TSBE's per
13507 13364 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13508 13365 */
13509 13366 old = (struct tsbe *)old_tsbinfo->tsb_va;
13510 13367 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13511 13368 for (i = 0; i < old_entries; i++, old++) {
13512 13369 if (((i & (4-1)) == 0) && (i < last_prefetch))
13513 13370 prefetch_tsbe_read(old);
13514 13371 if (!old->tte_tag.tag_invalid) {
13515 13372 /*
13516 13373 * We have a valid TTE to remap. Check the
13517 13374 * size. We won't remap 64K or 512K TTEs
13518 13375 * because they span more than one TSB entry
13519 13376 * and are indexed using an 8K virt. page.
13520 13377 * Ditto for 32M and 256M TTEs.
13521 13378 */
13522 13379 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13523 13380 TTE_CSZ(&old->tte_data) == TTE512K)
13524 13381 continue;
13525 13382 if (mmu_page_sizes == max_mmu_page_sizes) {
13526 13383 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13527 13384 TTE_CSZ(&old->tte_data) == TTE256M)
13528 13385 continue;
13529 13386 }
13530 13387
13531 13388 /* clear the lower 22 bits of the va */
13532 13389 va = *(uint64_t *)old << 22;
13533 13390 /* turn va into a virtual pfn */
13534 13391 va >>= 22 - TSB_START_SIZE;
13535 13392 /*
13536 13393 * or in bits from the offset in the tsb
13537 13394 * to get the real virtual pfn. These
13538 13395 * correspond to bits [21:13] in the va
13539 13396 */
13540 13397 vpshift =
13541 13398 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13542 13399 0x1ff;
13543 13400 va |= (i << vpshift);
13544 13401 va >>= vpshift;
13545 13402 new_offset = va & (new_entries - 1);
13546 13403 new = new_base + new_offset;
13547 13404 prefetch_tsbe_write(new);
13548 13405 *new = *old;
13549 13406 }
13550 13407 }
13551 13408 }
13552 13409 }
13553 13410
13554 13411 /*
13555 13412 * unused in sfmmu
13556 13413 */
13557 13414 void
13558 13415 hat_dump(void)
13559 13416 {
13560 13417 }
13561 13418
13562 13419 /*
13563 13420 * Called when a thread is exiting and we have switched to the kernel address
13564 13421 * space. Perform the same VM initialization resume() uses when switching
13565 13422 * processes.
13566 13423 *
13567 13424 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13568 13425 * we call it anyway in case the semantics change in the future.
13569 13426 */
13570 13427 /*ARGSUSED*/
13571 13428 void
13572 13429 hat_thread_exit(kthread_t *thd)
13573 13430 {
13574 13431 uint_t pgsz_cnum;
13575 13432 uint_t pstate_save;
13576 13433
13577 13434 ASSERT(thd->t_procp->p_as == &kas);
13578 13435
13579 13436 pgsz_cnum = KCONTEXT;
13580 13437 #ifdef sun4u
13581 13438 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13582 13439 #endif
13583 13440
13584 13441 /*
13585 13442 * Note that sfmmu_load_mmustate() is currently a no-op for
13586 13443 * kernel threads. We need to disable interrupts here,
13587 13444 * simply because otherwise sfmmu_load_mmustate() would panic
13588 13445 * if the caller does not disable interrupts.
13589 13446 */
13590 13447 pstate_save = sfmmu_disable_intrs();
13591 13448
13592 13449 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13593 13450 sfmmu_setctx_sec(pgsz_cnum);
13594 13451 sfmmu_load_mmustate(ksfmmup);
13595 13452 sfmmu_enable_intrs(pstate_save);
13596 13453 }
13597 13454
13598 13455
13599 13456 /*
13600 13457 * SRD support
13601 13458 */
13602 13459 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13603 13460 (((uintptr_t)(vp)) >> 11)) & \
13604 13461 srd_hashmask)
13605 13462
13606 13463 /*
13607 13464 * Attach the process to the srd struct associated with the exec vnode
13608 13465 * from which the process is started.
13609 13466 */
13610 13467 void
13611 13468 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13612 13469 {
13613 13470 uint_t hash = SRD_HASH_FUNCTION(evp);
13614 13471 sf_srd_t *srdp;
13615 13472 sf_srd_t *newsrdp;
13616 13473
13617 13474 ASSERT(sfmmup != ksfmmup);
13618 13475 ASSERT(sfmmup->sfmmu_srdp == NULL);
13619 13476
13620 13477 if (!shctx_on) {
13621 13478 return;
13622 13479 }
13623 13480
13624 13481 VN_HOLD(evp);
13625 13482
13626 13483 if (srd_buckets[hash].srdb_srdp != NULL) {
13627 13484 mutex_enter(&srd_buckets[hash].srdb_lock);
13628 13485 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13629 13486 srdp = srdp->srd_hash) {
13630 13487 if (srdp->srd_evp == evp) {
13631 13488 ASSERT(srdp->srd_refcnt >= 0);
13632 13489 sfmmup->sfmmu_srdp = srdp;
13633 13490 atomic_inc_32(
13634 13491 (volatile uint_t *)&srdp->srd_refcnt);
13635 13492 mutex_exit(&srd_buckets[hash].srdb_lock);
13636 13493 return;
13637 13494 }
13638 13495 }
13639 13496 mutex_exit(&srd_buckets[hash].srdb_lock);
13640 13497 }
13641 13498 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13642 13499 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13643 13500
13644 13501 newsrdp->srd_evp = evp;
13645 13502 newsrdp->srd_refcnt = 1;
13646 13503 newsrdp->srd_hmergnfree = NULL;
13647 13504 newsrdp->srd_ismrgnfree = NULL;
13648 13505
13649 13506 mutex_enter(&srd_buckets[hash].srdb_lock);
13650 13507 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13651 13508 srdp = srdp->srd_hash) {
13652 13509 if (srdp->srd_evp == evp) {
13653 13510 ASSERT(srdp->srd_refcnt >= 0);
13654 13511 sfmmup->sfmmu_srdp = srdp;
13655 13512 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13656 13513 mutex_exit(&srd_buckets[hash].srdb_lock);
13657 13514 kmem_cache_free(srd_cache, newsrdp);
13658 13515 return;
13659 13516 }
13660 13517 }
13661 13518 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13662 13519 srd_buckets[hash].srdb_srdp = newsrdp;
13663 13520 sfmmup->sfmmu_srdp = newsrdp;
13664 13521
13665 13522 mutex_exit(&srd_buckets[hash].srdb_lock);
13666 13523
13667 13524 }
13668 13525
13669 13526 static void
13670 13527 sfmmu_leave_srd(sfmmu_t *sfmmup)
13671 13528 {
13672 13529 vnode_t *evp;
13673 13530 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13674 13531 uint_t hash;
13675 13532 sf_srd_t **prev_srdpp;
13676 13533 sf_region_t *rgnp;
13677 13534 sf_region_t *nrgnp;
13678 13535 #ifdef DEBUG
13679 13536 int rgns = 0;
13680 13537 #endif
13681 13538 int i;
13682 13539
13683 13540 ASSERT(sfmmup != ksfmmup);
13684 13541 ASSERT(srdp != NULL);
13685 13542 ASSERT(srdp->srd_refcnt > 0);
13686 13543 ASSERT(sfmmup->sfmmu_scdp == NULL);
13687 13544 ASSERT(sfmmup->sfmmu_free == 1);
13688 13545
13689 13546 sfmmup->sfmmu_srdp = NULL;
13690 13547 evp = srdp->srd_evp;
13691 13548 ASSERT(evp != NULL);
13692 13549 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13693 13550 VN_RELE(evp);
13694 13551 return;
13695 13552 }
13696 13553
13697 13554 hash = SRD_HASH_FUNCTION(evp);
13698 13555 mutex_enter(&srd_buckets[hash].srdb_lock);
13699 13556 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13700 13557 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13701 13558 if (srdp->srd_evp == evp) {
13702 13559 break;
13703 13560 }
13704 13561 }
13705 13562 if (srdp == NULL || srdp->srd_refcnt) {
13706 13563 mutex_exit(&srd_buckets[hash].srdb_lock);
13707 13564 VN_RELE(evp);
13708 13565 return;
13709 13566 }
13710 13567 *prev_srdpp = srdp->srd_hash;
13711 13568 mutex_exit(&srd_buckets[hash].srdb_lock);
13712 13569
13713 13570 ASSERT(srdp->srd_refcnt == 0);
13714 13571 VN_RELE(evp);
13715 13572
13716 13573 #ifdef DEBUG
13717 13574 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13718 13575 ASSERT(srdp->srd_rgnhash[i] == NULL);
13719 13576 }
13720 13577 #endif /* DEBUG */
13721 13578
13722 13579 /* free each hme regions in the srd */
13723 13580 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13724 13581 nrgnp = rgnp->rgn_next;
13725 13582 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13726 13583 ASSERT(rgnp->rgn_refcnt == 0);
13727 13584 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13728 13585 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13729 13586 ASSERT(rgnp->rgn_hmeflags == 0);
13730 13587 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13731 13588 #ifdef DEBUG
13732 13589 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13733 13590 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13734 13591 }
13735 13592 rgns++;
13736 13593 #endif /* DEBUG */
13737 13594 kmem_cache_free(region_cache, rgnp);
13738 13595 }
13739 13596 ASSERT(rgns == srdp->srd_next_hmerid);
13740 13597
13741 13598 #ifdef DEBUG
13742 13599 rgns = 0;
13743 13600 #endif
13744 13601 /* free each ism rgns in the srd */
13745 13602 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13746 13603 nrgnp = rgnp->rgn_next;
13747 13604 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13748 13605 ASSERT(rgnp->rgn_refcnt == 0);
13749 13606 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13750 13607 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13751 13608 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13752 13609 #ifdef DEBUG
13753 13610 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13754 13611 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13755 13612 }
13756 13613 rgns++;
13757 13614 #endif /* DEBUG */
13758 13615 kmem_cache_free(region_cache, rgnp);
13759 13616 }
13760 13617 ASSERT(rgns == srdp->srd_next_ismrid);
13761 13618 ASSERT(srdp->srd_ismbusyrgns == 0);
13762 13619 ASSERT(srdp->srd_hmebusyrgns == 0);
13763 13620
13764 13621 srdp->srd_next_ismrid = 0;
13765 13622 srdp->srd_next_hmerid = 0;
13766 13623
13767 13624 bzero((void *)srdp->srd_ismrgnp,
13768 13625 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13769 13626 bzero((void *)srdp->srd_hmergnp,
13770 13627 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13771 13628
13772 13629 ASSERT(srdp->srd_scdp == NULL);
13773 13630 kmem_cache_free(srd_cache, srdp);
13774 13631 }
13775 13632
13776 13633 /* ARGSUSED */
13777 13634 static int
13778 13635 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13779 13636 {
13780 13637 sf_srd_t *srdp = (sf_srd_t *)buf;
13781 13638 bzero(buf, sizeof (*srdp));
13782 13639
13783 13640 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13784 13641 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13785 13642 return (0);
13786 13643 }
13787 13644
13788 13645 /* ARGSUSED */
13789 13646 static void
13790 13647 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13791 13648 {
13792 13649 sf_srd_t *srdp = (sf_srd_t *)buf;
13793 13650
13794 13651 mutex_destroy(&srdp->srd_mutex);
13795 13652 mutex_destroy(&srdp->srd_scd_mutex);
13796 13653 }
13797 13654
13798 13655 /*
13799 13656 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13800 13657 * at the same time for the same process and address range. This is ensured by
13801 13658 * the fact that address space is locked as writer when a process joins the
13802 13659 * regions. Therefore there's no need to hold an srd lock during the entire
13803 13660 * execution of hat_join_region()/hat_leave_region().
13804 13661 */
13805 13662
13806 13663 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13807 13664 (((uintptr_t)(obj)) >> 11)) & \
13808 13665 srd_rgn_hashmask)
13809 13666 /*
13810 13667 * This routine implements the shared context functionality required when
13811 13668 * attaching a segment to an address space. It must be called from
13812 13669 * hat_share() for D(ISM) segments and from segvn_create() for segments
13813 13670 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13814 13671 * which is saved in the private segment data for hme segments and
13815 13672 * the ism_map structure for ism segments.
13816 13673 */
13817 13674 hat_region_cookie_t
13818 13675 hat_join_region(struct hat *sfmmup,
13819 13676 caddr_t r_saddr,
13820 13677 size_t r_size,
13821 13678 void *r_obj,
13822 13679 u_offset_t r_objoff,
13823 13680 uchar_t r_perm,
13824 13681 uchar_t r_pgszc,
13825 13682 hat_rgn_cb_func_t r_cb_function,
13826 13683 uint_t flags)
13827 13684 {
13828 13685 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13829 13686 uint_t rhash;
13830 13687 uint_t rid;
13831 13688 hatlock_t *hatlockp;
13832 13689 sf_region_t *rgnp;
13833 13690 sf_region_t *new_rgnp = NULL;
13834 13691 int i;
13835 13692 uint16_t *nextidp;
13836 13693 sf_region_t **freelistp;
13837 13694 int maxids;
13838 13695 sf_region_t **rarrp;
13839 13696 uint16_t *busyrgnsp;
13840 13697 ulong_t rttecnt;
13841 13698 uchar_t tteflag;
13842 13699 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13843 13700 int text = (r_type == HAT_REGION_TEXT);
13844 13701
13845 13702 if (srdp == NULL || r_size == 0) {
13846 13703 return (HAT_INVALID_REGION_COOKIE);
13847 13704 }
13848 13705
13849 13706 ASSERT(sfmmup != ksfmmup);
13850 13707 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
13851 13708 ASSERT(srdp->srd_refcnt > 0);
13852 13709 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13853 13710 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13854 13711 ASSERT(r_pgszc < mmu_page_sizes);
13855 13712 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
13856 13713 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
13857 13714 panic("hat_join_region: region addr or size is not aligned\n");
13858 13715 }
13859 13716
13860 13717
13861 13718 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13862 13719 SFMMU_REGION_HME;
13863 13720 /*
13864 13721 * Currently only support shared hmes for the read only main text
13865 13722 * region.
13866 13723 */
13867 13724 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
13868 13725 (r_perm & PROT_WRITE))) {
13869 13726 return (HAT_INVALID_REGION_COOKIE);
13870 13727 }
13871 13728
13872 13729 rhash = RGN_HASH_FUNCTION(r_obj);
13873 13730
13874 13731 if (r_type == SFMMU_REGION_ISM) {
13875 13732 nextidp = &srdp->srd_next_ismrid;
13876 13733 freelistp = &srdp->srd_ismrgnfree;
13877 13734 maxids = SFMMU_MAX_ISM_REGIONS;
13878 13735 rarrp = srdp->srd_ismrgnp;
13879 13736 busyrgnsp = &srdp->srd_ismbusyrgns;
13880 13737 } else {
13881 13738 nextidp = &srdp->srd_next_hmerid;
13882 13739 freelistp = &srdp->srd_hmergnfree;
13883 13740 maxids = SFMMU_MAX_HME_REGIONS;
13884 13741 rarrp = srdp->srd_hmergnp;
13885 13742 busyrgnsp = &srdp->srd_hmebusyrgns;
13886 13743 }
13887 13744
13888 13745 mutex_enter(&srdp->srd_mutex);
13889 13746
13890 13747 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13891 13748 rgnp = rgnp->rgn_hash) {
13892 13749 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
13893 13750 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
13894 13751 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
13895 13752 break;
13896 13753 }
13897 13754 }
13898 13755
13899 13756 rfound:
13900 13757 if (rgnp != NULL) {
13901 13758 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
13902 13759 ASSERT(rgnp->rgn_cb_function == r_cb_function);
13903 13760 ASSERT(rgnp->rgn_refcnt >= 0);
13904 13761 rid = rgnp->rgn_id;
13905 13762 ASSERT(rid < maxids);
13906 13763 ASSERT(rarrp[rid] == rgnp);
13907 13764 ASSERT(rid < *nextidp);
13908 13765 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
13909 13766 mutex_exit(&srdp->srd_mutex);
13910 13767 if (new_rgnp != NULL) {
13911 13768 kmem_cache_free(region_cache, new_rgnp);
13912 13769 }
13913 13770 if (r_type == SFMMU_REGION_HME) {
13914 13771 int myjoin =
13915 13772 (sfmmup == astosfmmu(curthread->t_procp->p_as));
13916 13773
13917 13774 sfmmu_link_to_hmeregion(sfmmup, rgnp);
13918 13775 /*
13919 13776 * bitmap should be updated after linking sfmmu on
13920 13777 * region list so that pageunload() doesn't skip
13921 13778 * TSB/TLB flush. As soon as bitmap is updated another
13922 13779 * thread in this process can already start accessing
13923 13780 * this region.
13924 13781 */
13925 13782 /*
13926 13783 * Normally ttecnt accounting is done as part of
13927 13784 * pagefault handling. But a process may not take any
13928 13785 * pagefaults on shared hmeblks created by some other
13929 13786 * process. To compensate for this assume that the
13930 13787 * entire region will end up faulted in using
13931 13788 * the region's pagesize.
13932 13789 *
13933 13790 */
13934 13791 if (r_pgszc > TTE8K) {
13935 13792 tteflag = 1 << r_pgszc;
13936 13793 if (disable_large_pages & tteflag) {
13937 13794 tteflag = 0;
13938 13795 }
13939 13796 } else {
13940 13797 tteflag = 0;
13941 13798 }
13942 13799 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
13943 13800 hatlockp = sfmmu_hat_enter(sfmmup);
13944 13801 sfmmup->sfmmu_rtteflags |= tteflag;
13945 13802 sfmmu_hat_exit(hatlockp);
13946 13803 }
13947 13804 hatlockp = sfmmu_hat_enter(sfmmup);
13948 13805
13949 13806 /*
13950 13807 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
13951 13808 * region to allow for large page allocation failure.
13952 13809 */
13953 13810 if (r_pgszc >= TTE4M) {
13954 13811 sfmmup->sfmmu_tsb0_4minflcnt +=
13955 13812 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
13956 13813 }
13957 13814
13958 13815 /* update sfmmu_ttecnt with the shme rgn ttecnt */
13959 13816 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
13960 13817 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
13961 13818 rttecnt);
13962 13819
13963 13820 if (text && r_pgszc >= TTE4M &&
13964 13821 (tteflag || ((disable_large_pages >> TTE4M) &
13965 13822 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
13966 13823 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
13967 13824 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
13968 13825 }
13969 13826
13970 13827 sfmmu_hat_exit(hatlockp);
13971 13828 /*
13972 13829 * On Panther we need to make sure TLB is programmed
13973 13830 * to accept 32M/256M pages. Call
13974 13831 * sfmmu_check_page_sizes() now to make sure TLB is
13975 13832 * setup before making hmeregions visible to other
13976 13833 * threads.
13977 13834 */
13978 13835 sfmmu_check_page_sizes(sfmmup, 1);
13979 13836 hatlockp = sfmmu_hat_enter(sfmmup);
13980 13837 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
13981 13838
13982 13839 /*
13983 13840 * if context is invalid tsb miss exception code will
13984 13841 * call sfmmu_check_page_sizes() and update tsbmiss
13985 13842 * area later.
13986 13843 */
13987 13844 kpreempt_disable();
13988 13845 if (myjoin &&
13989 13846 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
13990 13847 != INVALID_CONTEXT)) {
13991 13848 struct tsbmiss *tsbmp;
13992 13849
13993 13850 tsbmp = &tsbmiss_area[CPU->cpu_id];
13994 13851 ASSERT(sfmmup == tsbmp->usfmmup);
13995 13852 BT_SET(tsbmp->shmermap, rid);
13996 13853 if (r_pgszc > TTE64K) {
13997 13854 tsbmp->uhat_rtteflags |= tteflag;
13998 13855 }
13999 13856
14000 13857 }
14001 13858 kpreempt_enable();
14002 13859
14003 13860 sfmmu_hat_exit(hatlockp);
14004 13861 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
14005 13862 HAT_INVALID_REGION_COOKIE);
14006 13863 } else {
14007 13864 hatlockp = sfmmu_hat_enter(sfmmup);
14008 13865 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14009 13866 sfmmu_hat_exit(hatlockp);
14010 13867 }
14011 13868 ASSERT(rid < maxids);
14012 13869
14013 13870 if (r_type == SFMMU_REGION_ISM) {
14014 13871 sfmmu_find_scd(sfmmup);
14015 13872 }
14016 13873 return ((hat_region_cookie_t)((uint64_t)rid));
14017 13874 }
14018 13875
14019 13876 ASSERT(new_rgnp == NULL);
14020 13877
14021 13878 if (*busyrgnsp >= maxids) {
14022 13879 mutex_exit(&srdp->srd_mutex);
14023 13880 return (HAT_INVALID_REGION_COOKIE);
14024 13881 }
14025 13882
14026 13883 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14027 13884 if (*freelistp != NULL) {
14028 13885 rgnp = *freelistp;
14029 13886 *freelistp = rgnp->rgn_next;
14030 13887 ASSERT(rgnp->rgn_id < *nextidp);
14031 13888 ASSERT(rgnp->rgn_id < maxids);
14032 13889 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14033 13890 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14034 13891 == r_type);
14035 13892 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14036 13893 ASSERT(rgnp->rgn_hmeflags == 0);
14037 13894 } else {
14038 13895 /*
14039 13896 * release local locks before memory allocation.
14040 13897 */
14041 13898 mutex_exit(&srdp->srd_mutex);
14042 13899
14043 13900 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14044 13901
14045 13902 mutex_enter(&srdp->srd_mutex);
14046 13903 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14047 13904 rgnp = rgnp->rgn_hash) {
14048 13905 if (rgnp->rgn_saddr == r_saddr &&
14049 13906 rgnp->rgn_size == r_size &&
14050 13907 rgnp->rgn_obj == r_obj &&
14051 13908 rgnp->rgn_objoff == r_objoff &&
14052 13909 rgnp->rgn_perm == r_perm &&
14053 13910 rgnp->rgn_pgszc == r_pgszc) {
14054 13911 break;
14055 13912 }
14056 13913 }
14057 13914 if (rgnp != NULL) {
14058 13915 goto rfound;
14059 13916 }
14060 13917
14061 13918 if (*nextidp >= maxids) {
14062 13919 mutex_exit(&srdp->srd_mutex);
14063 13920 goto fail;
14064 13921 }
14065 13922 rgnp = new_rgnp;
14066 13923 new_rgnp = NULL;
14067 13924 rgnp->rgn_id = (*nextidp)++;
14068 13925 ASSERT(rgnp->rgn_id < maxids);
14069 13926 ASSERT(rarrp[rgnp->rgn_id] == NULL);
14070 13927 rarrp[rgnp->rgn_id] = rgnp;
14071 13928 }
14072 13929
14073 13930 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14074 13931 ASSERT(rgnp->rgn_hmeflags == 0);
14075 13932 #ifdef DEBUG
14076 13933 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14077 13934 ASSERT(rgnp->rgn_ttecnt[i] == 0);
14078 13935 }
14079 13936 #endif
14080 13937 rgnp->rgn_saddr = r_saddr;
14081 13938 rgnp->rgn_size = r_size;
14082 13939 rgnp->rgn_obj = r_obj;
14083 13940 rgnp->rgn_objoff = r_objoff;
14084 13941 rgnp->rgn_perm = r_perm;
14085 13942 rgnp->rgn_pgszc = r_pgszc;
14086 13943 rgnp->rgn_flags = r_type;
14087 13944 rgnp->rgn_refcnt = 0;
14088 13945 rgnp->rgn_cb_function = r_cb_function;
14089 13946 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14090 13947 srdp->srd_rgnhash[rhash] = rgnp;
14091 13948 (*busyrgnsp)++;
14092 13949 ASSERT(*busyrgnsp <= maxids);
14093 13950 goto rfound;
14094 13951
14095 13952 fail:
14096 13953 ASSERT(new_rgnp != NULL);
14097 13954 kmem_cache_free(region_cache, new_rgnp);
14098 13955 return (HAT_INVALID_REGION_COOKIE);
14099 13956 }
14100 13957
14101 13958 /*
14102 13959 * This function implements the shared context functionality required
14103 13960 * when detaching a segment from an address space. It must be called
14104 13961 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14105 13962 * for segments with a valid region_cookie.
14106 13963 * It will also be called from all seg_vn routines which change a
14107 13964 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14108 13965 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14109 13966 * from segvn_fault().
14110 13967 */
14111 13968 void
14112 13969 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14113 13970 {
14114 13971 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14115 13972 sf_scd_t *scdp;
14116 13973 uint_t rhash;
14117 13974 uint_t rid = (uint_t)((uint64_t)rcookie);
14118 13975 hatlock_t *hatlockp = NULL;
14119 13976 sf_region_t *rgnp;
14120 13977 sf_region_t **prev_rgnpp;
14121 13978 sf_region_t *cur_rgnp;
14122 13979 void *r_obj;
14123 13980 int i;
14124 13981 caddr_t r_saddr;
14125 13982 caddr_t r_eaddr;
14126 13983 size_t r_size;
14127 13984 uchar_t r_pgszc;
14128 13985 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14129 13986
14130 13987 ASSERT(sfmmup != ksfmmup);
14131 13988 ASSERT(srdp != NULL);
14132 13989 ASSERT(srdp->srd_refcnt > 0);
14133 13990 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14134 13991 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14135 13992 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14136 13993
14137 13994 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14138 13995 SFMMU_REGION_HME;
14139 13996
14140 13997 if (r_type == SFMMU_REGION_ISM) {
14141 13998 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14142 13999 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14143 14000 rgnp = srdp->srd_ismrgnp[rid];
14144 14001 } else {
14145 14002 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14146 14003 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14147 14004 rgnp = srdp->srd_hmergnp[rid];
14148 14005 }
14149 14006 ASSERT(rgnp != NULL);
14150 14007 ASSERT(rgnp->rgn_id == rid);
14151 14008 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14152 14009 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14153 14010 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14154 14011
14155 14012 if (sfmmup->sfmmu_free) {
14156 14013 ulong_t rttecnt;
14157 14014 r_pgszc = rgnp->rgn_pgszc;
14158 14015 r_size = rgnp->rgn_size;
14159 14016
14160 14017 ASSERT(sfmmup->sfmmu_scdp == NULL);
14161 14018 if (r_type == SFMMU_REGION_ISM) {
14162 14019 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14163 14020 } else {
14164 14021 /* update shme rgns ttecnt in sfmmu_ttecnt */
14165 14022 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14166 14023 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14167 14024
14168 14025 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14169 14026 -rttecnt);
14170 14027
14171 14028 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14172 14029 }
14173 14030 } else if (r_type == SFMMU_REGION_ISM) {
14174 14031 hatlockp = sfmmu_hat_enter(sfmmup);
14175 14032 ASSERT(rid < srdp->srd_next_ismrid);
14176 14033 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14177 14034 scdp = sfmmup->sfmmu_scdp;
14178 14035 if (scdp != NULL &&
14179 14036 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14180 14037 sfmmu_leave_scd(sfmmup, r_type);
14181 14038 ASSERT(sfmmu_hat_lock_held(sfmmup));
14182 14039 }
14183 14040 sfmmu_hat_exit(hatlockp);
14184 14041 } else {
14185 14042 ulong_t rttecnt;
14186 14043 r_pgszc = rgnp->rgn_pgszc;
14187 14044 r_saddr = rgnp->rgn_saddr;
14188 14045 r_size = rgnp->rgn_size;
14189 14046 r_eaddr = r_saddr + r_size;
14190 14047
14191 14048 ASSERT(r_type == SFMMU_REGION_HME);
14192 14049 hatlockp = sfmmu_hat_enter(sfmmup);
14193 14050 ASSERT(rid < srdp->srd_next_hmerid);
14194 14051 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14195 14052
14196 14053 /*
14197 14054 * If region is part of an SCD call sfmmu_leave_scd().
14198 14055 * Otherwise if process is not exiting and has valid context
14199 14056 * just drop the context on the floor to lose stale TLB
14200 14057 * entries and force the update of tsb miss area to reflect
14201 14058 * the new region map. After that clean our TSB entries.
14202 14059 */
14203 14060 scdp = sfmmup->sfmmu_scdp;
14204 14061 if (scdp != NULL &&
14205 14062 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14206 14063 sfmmu_leave_scd(sfmmup, r_type);
14207 14064 ASSERT(sfmmu_hat_lock_held(sfmmup));
14208 14065 }
14209 14066 sfmmu_invalidate_ctx(sfmmup);
14210 14067
14211 14068 i = TTE8K;
14212 14069 while (i < mmu_page_sizes) {
14213 14070 if (rgnp->rgn_ttecnt[i] != 0) {
14214 14071 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14215 14072 r_eaddr, i);
14216 14073 if (i < TTE4M) {
14217 14074 i = TTE4M;
14218 14075 continue;
14219 14076 } else {
14220 14077 break;
14221 14078 }
14222 14079 }
14223 14080 i++;
14224 14081 }
14225 14082 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14226 14083 if (r_pgszc >= TTE4M) {
14227 14084 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14228 14085 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14229 14086 rttecnt);
14230 14087 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14231 14088 }
14232 14089
14233 14090 /* update shme rgns ttecnt in sfmmu_ttecnt */
14234 14091 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14235 14092 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14236 14093 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14237 14094
14238 14095 sfmmu_hat_exit(hatlockp);
14239 14096 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14240 14097 /* sfmmup left the scd, grow private tsb */
14241 14098 sfmmu_check_page_sizes(sfmmup, 1);
14242 14099 } else {
14243 14100 sfmmu_check_page_sizes(sfmmup, 0);
14244 14101 }
14245 14102 }
14246 14103
14247 14104 if (r_type == SFMMU_REGION_HME) {
14248 14105 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14249 14106 }
14250 14107
14251 14108 r_obj = rgnp->rgn_obj;
14252 14109 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14253 14110 return;
14254 14111 }
14255 14112
14256 14113 /*
14257 14114 * looks like nobody uses this region anymore. Free it.
14258 14115 */
14259 14116 rhash = RGN_HASH_FUNCTION(r_obj);
14260 14117 mutex_enter(&srdp->srd_mutex);
14261 14118 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14262 14119 (cur_rgnp = *prev_rgnpp) != NULL;
14263 14120 prev_rgnpp = &cur_rgnp->rgn_hash) {
14264 14121 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14265 14122 break;
14266 14123 }
14267 14124 }
14268 14125
14269 14126 if (cur_rgnp == NULL) {
14270 14127 mutex_exit(&srdp->srd_mutex);
14271 14128 return;
14272 14129 }
14273 14130
14274 14131 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14275 14132 *prev_rgnpp = rgnp->rgn_hash;
14276 14133 if (r_type == SFMMU_REGION_ISM) {
14277 14134 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14278 14135 ASSERT(rid < srdp->srd_next_ismrid);
14279 14136 rgnp->rgn_next = srdp->srd_ismrgnfree;
14280 14137 srdp->srd_ismrgnfree = rgnp;
14281 14138 ASSERT(srdp->srd_ismbusyrgns > 0);
14282 14139 srdp->srd_ismbusyrgns--;
14283 14140 mutex_exit(&srdp->srd_mutex);
14284 14141 return;
14285 14142 }
14286 14143 mutex_exit(&srdp->srd_mutex);
14287 14144
14288 14145 /*
14289 14146 * Destroy region's hmeblks.
14290 14147 */
14291 14148 sfmmu_unload_hmeregion(srdp, rgnp);
14292 14149
14293 14150 rgnp->rgn_hmeflags = 0;
14294 14151
14295 14152 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14296 14153 ASSERT(rgnp->rgn_id == rid);
14297 14154 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14298 14155 rgnp->rgn_ttecnt[i] = 0;
14299 14156 }
14300 14157 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14301 14158 mutex_enter(&srdp->srd_mutex);
14302 14159 ASSERT(rid < srdp->srd_next_hmerid);
14303 14160 rgnp->rgn_next = srdp->srd_hmergnfree;
14304 14161 srdp->srd_hmergnfree = rgnp;
14305 14162 ASSERT(srdp->srd_hmebusyrgns > 0);
14306 14163 srdp->srd_hmebusyrgns--;
14307 14164 mutex_exit(&srdp->srd_mutex);
14308 14165 }
14309 14166
14310 14167 /*
14311 14168 * For now only called for hmeblk regions and not for ISM regions.
14312 14169 */
14313 14170 void
14314 14171 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14315 14172 {
14316 14173 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14317 14174 uint_t rid = (uint_t)((uint64_t)rcookie);
14318 14175 sf_region_t *rgnp;
14319 14176 sf_rgn_link_t *rlink;
14320 14177 sf_rgn_link_t *hrlink;
14321 14178 ulong_t rttecnt;
14322 14179
14323 14180 ASSERT(sfmmup != ksfmmup);
14324 14181 ASSERT(srdp != NULL);
14325 14182 ASSERT(srdp->srd_refcnt > 0);
14326 14183
14327 14184 ASSERT(rid < srdp->srd_next_hmerid);
14328 14185 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14329 14186 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14330 14187
14331 14188 rgnp = srdp->srd_hmergnp[rid];
14332 14189 ASSERT(rgnp->rgn_refcnt > 0);
14333 14190 ASSERT(rgnp->rgn_id == rid);
14334 14191 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14335 14192 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14336 14193
14337 14194 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14338 14195
14339 14196 /* LINTED: constant in conditional context */
14340 14197 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14341 14198 ASSERT(rlink != NULL);
14342 14199 mutex_enter(&rgnp->rgn_mutex);
14343 14200 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14344 14201 /* LINTED: constant in conditional context */
14345 14202 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14346 14203 ASSERT(hrlink != NULL);
14347 14204 ASSERT(hrlink->prev == NULL);
14348 14205 rlink->next = rgnp->rgn_sfmmu_head;
14349 14206 rlink->prev = NULL;
14350 14207 hrlink->prev = sfmmup;
14351 14208 /*
14352 14209 * make sure rlink's next field is correct
14353 14210 * before making this link visible.
14354 14211 */
14355 14212 membar_stst();
14356 14213 rgnp->rgn_sfmmu_head = sfmmup;
14357 14214 mutex_exit(&rgnp->rgn_mutex);
14358 14215
14359 14216 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14360 14217 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14361 14218 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14362 14219 /* update tsb0 inflation count */
14363 14220 if (rgnp->rgn_pgszc >= TTE4M) {
14364 14221 sfmmup->sfmmu_tsb0_4minflcnt +=
14365 14222 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14366 14223 }
14367 14224 /*
14368 14225 * Update regionid bitmask without hat lock since no other thread
14369 14226 * can update this region bitmask right now.
14370 14227 */
14371 14228 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14372 14229 }
14373 14230
14374 14231 /* ARGSUSED */
14375 14232 static int
14376 14233 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14377 14234 {
14378 14235 sf_region_t *rgnp = (sf_region_t *)buf;
14379 14236 bzero(buf, sizeof (*rgnp));
14380 14237
14381 14238 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14382 14239
14383 14240 return (0);
14384 14241 }
14385 14242
14386 14243 /* ARGSUSED */
14387 14244 static void
14388 14245 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14389 14246 {
14390 14247 sf_region_t *rgnp = (sf_region_t *)buf;
14391 14248 mutex_destroy(&rgnp->rgn_mutex);
14392 14249 }
14393 14250
14394 14251 static int
14395 14252 sfrgnmap_isnull(sf_region_map_t *map)
14396 14253 {
14397 14254 int i;
14398 14255
14399 14256 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14400 14257 if (map->bitmap[i] != 0) {
14401 14258 return (0);
14402 14259 }
14403 14260 }
14404 14261 return (1);
14405 14262 }
14406 14263
14407 14264 static int
14408 14265 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14409 14266 {
14410 14267 int i;
14411 14268
14412 14269 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14413 14270 if (map->bitmap[i] != 0) {
14414 14271 return (0);
14415 14272 }
14416 14273 }
14417 14274 return (1);
14418 14275 }
14419 14276
14420 14277 #ifdef DEBUG
14421 14278 static void
14422 14279 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14423 14280 {
14424 14281 sfmmu_t *sp;
14425 14282 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14426 14283
14427 14284 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14428 14285 ASSERT(srdp == sp->sfmmu_srdp);
14429 14286 if (sp == sfmmup) {
14430 14287 if (onlist) {
14431 14288 return;
14432 14289 } else {
14433 14290 panic("shctx: sfmmu 0x%p found on scd"
14434 14291 "list 0x%p", (void *)sfmmup,
14435 14292 (void *)*headp);
14436 14293 }
14437 14294 }
14438 14295 }
14439 14296 if (onlist) {
14440 14297 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14441 14298 (void *)sfmmup, (void *)*headp);
14442 14299 } else {
14443 14300 return;
14444 14301 }
14445 14302 }
14446 14303 #else /* DEBUG */
14447 14304 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14448 14305 #endif /* DEBUG */
14449 14306
14450 14307 /*
14451 14308 * Removes an sfmmu from the SCD sfmmu list.
14452 14309 */
14453 14310 static void
14454 14311 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14455 14312 {
14456 14313 ASSERT(sfmmup->sfmmu_srdp != NULL);
14457 14314 check_scd_sfmmu_list(headp, sfmmup, 1);
14458 14315 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14459 14316 ASSERT(*headp != sfmmup);
14460 14317 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14461 14318 sfmmup->sfmmu_scd_link.next;
14462 14319 } else {
14463 14320 ASSERT(*headp == sfmmup);
14464 14321 *headp = sfmmup->sfmmu_scd_link.next;
14465 14322 }
14466 14323 if (sfmmup->sfmmu_scd_link.next != NULL) {
14467 14324 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14468 14325 sfmmup->sfmmu_scd_link.prev;
14469 14326 }
14470 14327 }
14471 14328
14472 14329
14473 14330 /*
14474 14331 * Adds an sfmmu to the start of the queue.
14475 14332 */
14476 14333 static void
14477 14334 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14478 14335 {
14479 14336 check_scd_sfmmu_list(headp, sfmmup, 0);
14480 14337 sfmmup->sfmmu_scd_link.prev = NULL;
14481 14338 sfmmup->sfmmu_scd_link.next = *headp;
14482 14339 if (*headp != NULL)
14483 14340 (*headp)->sfmmu_scd_link.prev = sfmmup;
14484 14341 *headp = sfmmup;
14485 14342 }
14486 14343
14487 14344 /*
14488 14345 * Remove an scd from the start of the queue.
14489 14346 */
14490 14347 static void
14491 14348 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14492 14349 {
14493 14350 if (scdp->scd_prev != NULL) {
14494 14351 ASSERT(*headp != scdp);
14495 14352 scdp->scd_prev->scd_next = scdp->scd_next;
14496 14353 } else {
14497 14354 ASSERT(*headp == scdp);
14498 14355 *headp = scdp->scd_next;
14499 14356 }
14500 14357
14501 14358 if (scdp->scd_next != NULL) {
14502 14359 scdp->scd_next->scd_prev = scdp->scd_prev;
14503 14360 }
14504 14361 }
14505 14362
14506 14363 /*
14507 14364 * Add an scd to the start of the queue.
14508 14365 */
14509 14366 static void
14510 14367 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14511 14368 {
14512 14369 scdp->scd_prev = NULL;
14513 14370 scdp->scd_next = *headp;
14514 14371 if (*headp != NULL) {
14515 14372 (*headp)->scd_prev = scdp;
14516 14373 }
14517 14374 *headp = scdp;
14518 14375 }
14519 14376
14520 14377 static int
14521 14378 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14522 14379 {
14523 14380 uint_t rid;
14524 14381 uint_t i;
14525 14382 uint_t j;
14526 14383 ulong_t w;
14527 14384 sf_region_t *rgnp;
14528 14385 ulong_t tte8k_cnt = 0;
14529 14386 ulong_t tte4m_cnt = 0;
14530 14387 uint_t tsb_szc;
14531 14388 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14532 14389 sfmmu_t *ism_hatid;
14533 14390 struct tsb_info *newtsb;
14534 14391 int szc;
14535 14392
14536 14393 ASSERT(srdp != NULL);
14537 14394
14538 14395 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14539 14396 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14540 14397 continue;
14541 14398 }
14542 14399 j = 0;
14543 14400 while (w) {
14544 14401 if (!(w & 0x1)) {
14545 14402 j++;
14546 14403 w >>= 1;
14547 14404 continue;
14548 14405 }
14549 14406 rid = (i << BT_ULSHIFT) | j;
14550 14407 j++;
14551 14408 w >>= 1;
14552 14409
14553 14410 if (rid < SFMMU_MAX_HME_REGIONS) {
14554 14411 rgnp = srdp->srd_hmergnp[rid];
14555 14412 ASSERT(rgnp->rgn_id == rid);
14556 14413 ASSERT(rgnp->rgn_refcnt > 0);
14557 14414
14558 14415 if (rgnp->rgn_pgszc < TTE4M) {
14559 14416 tte8k_cnt += rgnp->rgn_size >>
14560 14417 TTE_PAGE_SHIFT(TTE8K);
14561 14418 } else {
14562 14419 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14563 14420 tte4m_cnt += rgnp->rgn_size >>
14564 14421 TTE_PAGE_SHIFT(TTE4M);
14565 14422 /*
14566 14423 * Inflate SCD tsb0 by preallocating
14567 14424 * 1/4 8k ttecnt for 4M regions to
14568 14425 * allow for lgpg alloc failure.
14569 14426 */
14570 14427 tte8k_cnt += rgnp->rgn_size >>
14571 14428 (TTE_PAGE_SHIFT(TTE8K) + 2);
14572 14429 }
14573 14430 } else {
14574 14431 rid -= SFMMU_MAX_HME_REGIONS;
14575 14432 rgnp = srdp->srd_ismrgnp[rid];
14576 14433 ASSERT(rgnp->rgn_id == rid);
14577 14434 ASSERT(rgnp->rgn_refcnt > 0);
14578 14435
14579 14436 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14580 14437 ASSERT(ism_hatid->sfmmu_ismhat);
14581 14438
14582 14439 for (szc = 0; szc < TTE4M; szc++) {
14583 14440 tte8k_cnt +=
14584 14441 ism_hatid->sfmmu_ttecnt[szc] <<
14585 14442 TTE_BSZS_SHIFT(szc);
14586 14443 }
14587 14444
14588 14445 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14589 14446 if (rgnp->rgn_pgszc >= TTE4M) {
14590 14447 tte4m_cnt += rgnp->rgn_size >>
14591 14448 TTE_PAGE_SHIFT(TTE4M);
14592 14449 }
14593 14450 }
14594 14451 }
14595 14452 }
14596 14453
14597 14454 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14598 14455
14599 14456 /* Allocate both the SCD TSBs here. */
14600 14457 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14601 14458 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14602 14459 (tsb_szc <= TSB_4M_SZCODE ||
14603 14460 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14604 14461 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14605 14462 TSB_ALLOC, scsfmmup))) {
14606 14463
14607 14464 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14608 14465 return (TSB_ALLOCFAIL);
14609 14466 } else {
14610 14467 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14611 14468
14612 14469 if (tte4m_cnt) {
14613 14470 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14614 14471 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14615 14472 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14616 14473 (tsb_szc <= TSB_4M_SZCODE ||
14617 14474 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14618 14475 TSB4M|TSB32M|TSB256M,
14619 14476 TSB_ALLOC, scsfmmup))) {
14620 14477 /*
14621 14478 * If we fail to allocate the 2nd shared tsb,
14622 14479 * just free the 1st tsb, return failure.
14623 14480 */
14624 14481 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14625 14482 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14626 14483 return (TSB_ALLOCFAIL);
14627 14484 } else {
14628 14485 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14629 14486 newtsb->tsb_flags |= TSB_SHAREDCTX;
14630 14487 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14631 14488 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14632 14489 }
14633 14490 }
14634 14491 SFMMU_STAT(sf_scd_1sttsb_alloc);
14635 14492 }
14636 14493 return (TSB_SUCCESS);
14637 14494 }
14638 14495
14639 14496 static void
14640 14497 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14641 14498 {
14642 14499 while (scd_sfmmu->sfmmu_tsb != NULL) {
14643 14500 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14644 14501 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14645 14502 scd_sfmmu->sfmmu_tsb = next;
14646 14503 }
14647 14504 }
14648 14505
14649 14506 /*
14650 14507 * Link the sfmmu onto the hme region list.
14651 14508 */
14652 14509 void
14653 14510 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14654 14511 {
14655 14512 uint_t rid;
14656 14513 sf_rgn_link_t *rlink;
14657 14514 sfmmu_t *head;
14658 14515 sf_rgn_link_t *hrlink;
14659 14516
14660 14517 rid = rgnp->rgn_id;
14661 14518 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14662 14519
14663 14520 /* LINTED: constant in conditional context */
14664 14521 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14665 14522 ASSERT(rlink != NULL);
14666 14523 mutex_enter(&rgnp->rgn_mutex);
14667 14524 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14668 14525 rlink->next = NULL;
14669 14526 rlink->prev = NULL;
14670 14527 /*
14671 14528 * make sure rlink's next field is NULL
14672 14529 * before making this link visible.
14673 14530 */
14674 14531 membar_stst();
14675 14532 rgnp->rgn_sfmmu_head = sfmmup;
14676 14533 } else {
14677 14534 /* LINTED: constant in conditional context */
14678 14535 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14679 14536 ASSERT(hrlink != NULL);
14680 14537 ASSERT(hrlink->prev == NULL);
14681 14538 rlink->next = head;
14682 14539 rlink->prev = NULL;
14683 14540 hrlink->prev = sfmmup;
14684 14541 /*
14685 14542 * make sure rlink's next field is correct
14686 14543 * before making this link visible.
14687 14544 */
14688 14545 membar_stst();
14689 14546 rgnp->rgn_sfmmu_head = sfmmup;
14690 14547 }
14691 14548 mutex_exit(&rgnp->rgn_mutex);
14692 14549 }
14693 14550
14694 14551 /*
14695 14552 * Unlink the sfmmu from the hme region list.
14696 14553 */
14697 14554 void
14698 14555 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14699 14556 {
14700 14557 uint_t rid;
14701 14558 sf_rgn_link_t *rlink;
14702 14559
14703 14560 rid = rgnp->rgn_id;
14704 14561 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14705 14562
14706 14563 /* LINTED: constant in conditional context */
14707 14564 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14708 14565 ASSERT(rlink != NULL);
14709 14566 mutex_enter(&rgnp->rgn_mutex);
14710 14567 if (rgnp->rgn_sfmmu_head == sfmmup) {
14711 14568 sfmmu_t *next = rlink->next;
14712 14569 rgnp->rgn_sfmmu_head = next;
14713 14570 /*
14714 14571 * if we are stopped by xc_attention() after this
14715 14572 * point the forward link walking in
14716 14573 * sfmmu_rgntlb_demap() will work correctly since the
14717 14574 * head correctly points to the next element.
14718 14575 */
14719 14576 membar_stst();
14720 14577 rlink->next = NULL;
14721 14578 ASSERT(rlink->prev == NULL);
14722 14579 if (next != NULL) {
14723 14580 sf_rgn_link_t *nrlink;
14724 14581 /* LINTED: constant in conditional context */
14725 14582 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14726 14583 ASSERT(nrlink != NULL);
14727 14584 ASSERT(nrlink->prev == sfmmup);
14728 14585 nrlink->prev = NULL;
14729 14586 }
14730 14587 } else {
14731 14588 sfmmu_t *next = rlink->next;
14732 14589 sfmmu_t *prev = rlink->prev;
14733 14590 sf_rgn_link_t *prlink;
14734 14591
14735 14592 ASSERT(prev != NULL);
14736 14593 /* LINTED: constant in conditional context */
14737 14594 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14738 14595 ASSERT(prlink != NULL);
14739 14596 ASSERT(prlink->next == sfmmup);
14740 14597 prlink->next = next;
14741 14598 /*
14742 14599 * if we are stopped by xc_attention()
14743 14600 * after this point the forward link walking
14744 14601 * will work correctly since the prev element
14745 14602 * correctly points to the next element.
14746 14603 */
14747 14604 membar_stst();
14748 14605 rlink->next = NULL;
14749 14606 rlink->prev = NULL;
14750 14607 if (next != NULL) {
14751 14608 sf_rgn_link_t *nrlink;
14752 14609 /* LINTED: constant in conditional context */
14753 14610 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14754 14611 ASSERT(nrlink != NULL);
14755 14612 ASSERT(nrlink->prev == sfmmup);
14756 14613 nrlink->prev = prev;
14757 14614 }
14758 14615 }
14759 14616 mutex_exit(&rgnp->rgn_mutex);
14760 14617 }
14761 14618
14762 14619 /*
14763 14620 * Link scd sfmmu onto ism or hme region list for each region in the
14764 14621 * scd region map.
14765 14622 */
14766 14623 void
14767 14624 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14768 14625 {
14769 14626 uint_t rid;
14770 14627 uint_t i;
14771 14628 uint_t j;
14772 14629 ulong_t w;
14773 14630 sf_region_t *rgnp;
14774 14631 sfmmu_t *scsfmmup;
14775 14632
14776 14633 scsfmmup = scdp->scd_sfmmup;
14777 14634 ASSERT(scsfmmup->sfmmu_scdhat);
14778 14635 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14779 14636 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14780 14637 continue;
14781 14638 }
14782 14639 j = 0;
14783 14640 while (w) {
14784 14641 if (!(w & 0x1)) {
14785 14642 j++;
14786 14643 w >>= 1;
14787 14644 continue;
14788 14645 }
14789 14646 rid = (i << BT_ULSHIFT) | j;
14790 14647 j++;
14791 14648 w >>= 1;
14792 14649
14793 14650 if (rid < SFMMU_MAX_HME_REGIONS) {
14794 14651 rgnp = srdp->srd_hmergnp[rid];
14795 14652 ASSERT(rgnp->rgn_id == rid);
14796 14653 ASSERT(rgnp->rgn_refcnt > 0);
14797 14654 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14798 14655 } else {
14799 14656 sfmmu_t *ism_hatid = NULL;
14800 14657 ism_ment_t *ism_ment;
14801 14658 rid -= SFMMU_MAX_HME_REGIONS;
14802 14659 rgnp = srdp->srd_ismrgnp[rid];
14803 14660 ASSERT(rgnp->rgn_id == rid);
14804 14661 ASSERT(rgnp->rgn_refcnt > 0);
14805 14662
14806 14663 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14807 14664 ASSERT(ism_hatid->sfmmu_ismhat);
14808 14665 ism_ment = &scdp->scd_ism_links[rid];
14809 14666 ism_ment->iment_hat = scsfmmup;
14810 14667 ism_ment->iment_base_va = rgnp->rgn_saddr;
14811 14668 mutex_enter(&ism_mlist_lock);
14812 14669 iment_add(ism_ment, ism_hatid);
14813 14670 mutex_exit(&ism_mlist_lock);
14814 14671
14815 14672 }
14816 14673 }
14817 14674 }
14818 14675 }
14819 14676 /*
14820 14677 * Unlink scd sfmmu from ism or hme region list for each region in the
14821 14678 * scd region map.
14822 14679 */
14823 14680 void
14824 14681 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14825 14682 {
14826 14683 uint_t rid;
14827 14684 uint_t i;
14828 14685 uint_t j;
14829 14686 ulong_t w;
14830 14687 sf_region_t *rgnp;
14831 14688 sfmmu_t *scsfmmup;
14832 14689
14833 14690 scsfmmup = scdp->scd_sfmmup;
14834 14691 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14835 14692 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14836 14693 continue;
14837 14694 }
14838 14695 j = 0;
14839 14696 while (w) {
14840 14697 if (!(w & 0x1)) {
14841 14698 j++;
14842 14699 w >>= 1;
14843 14700 continue;
14844 14701 }
14845 14702 rid = (i << BT_ULSHIFT) | j;
14846 14703 j++;
14847 14704 w >>= 1;
14848 14705
14849 14706 if (rid < SFMMU_MAX_HME_REGIONS) {
14850 14707 rgnp = srdp->srd_hmergnp[rid];
14851 14708 ASSERT(rgnp->rgn_id == rid);
14852 14709 ASSERT(rgnp->rgn_refcnt > 0);
14853 14710 sfmmu_unlink_from_hmeregion(scsfmmup,
14854 14711 rgnp);
14855 14712
14856 14713 } else {
14857 14714 sfmmu_t *ism_hatid = NULL;
14858 14715 ism_ment_t *ism_ment;
14859 14716 rid -= SFMMU_MAX_HME_REGIONS;
14860 14717 rgnp = srdp->srd_ismrgnp[rid];
14861 14718 ASSERT(rgnp->rgn_id == rid);
14862 14719 ASSERT(rgnp->rgn_refcnt > 0);
14863 14720
14864 14721 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14865 14722 ASSERT(ism_hatid->sfmmu_ismhat);
14866 14723 ism_ment = &scdp->scd_ism_links[rid];
14867 14724 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
14868 14725 ASSERT(ism_ment->iment_base_va ==
14869 14726 rgnp->rgn_saddr);
14870 14727 mutex_enter(&ism_mlist_lock);
14871 14728 iment_sub(ism_ment, ism_hatid);
14872 14729 mutex_exit(&ism_mlist_lock);
14873 14730
14874 14731 }
14875 14732 }
14876 14733 }
14877 14734 }
14878 14735 /*
14879 14736 * Allocates and initialises a new SCD structure, this is called with
14880 14737 * the srd_scd_mutex held and returns with the reference count
14881 14738 * initialised to 1.
14882 14739 */
14883 14740 static sf_scd_t *
14884 14741 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
14885 14742 {
14886 14743 sf_scd_t *new_scdp;
14887 14744 sfmmu_t *scsfmmup;
14888 14745 int i;
14889 14746
14890 14747 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
14891 14748 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
14892 14749
14893 14750 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
14894 14751 new_scdp->scd_sfmmup = scsfmmup;
14895 14752 scsfmmup->sfmmu_srdp = srdp;
14896 14753 scsfmmup->sfmmu_scdp = new_scdp;
14897 14754 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
14898 14755 scsfmmup->sfmmu_scdhat = 1;
14899 14756 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
14900 14757 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
14901 14758
14902 14759 ASSERT(max_mmu_ctxdoms > 0);
14903 14760 for (i = 0; i < max_mmu_ctxdoms; i++) {
14904 14761 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
14905 14762 scsfmmup->sfmmu_ctxs[i].gnum = 0;
14906 14763 }
14907 14764
14908 14765 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14909 14766 new_scdp->scd_rttecnt[i] = 0;
14910 14767 }
14911 14768
14912 14769 new_scdp->scd_region_map = *new_map;
14913 14770 new_scdp->scd_refcnt = 1;
14914 14771 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
14915 14772 kmem_cache_free(scd_cache, new_scdp);
14916 14773 kmem_cache_free(sfmmuid_cache, scsfmmup);
14917 14774 return (NULL);
14918 14775 }
14919 14776 if (&mmu_init_scd) {
14920 14777 mmu_init_scd(new_scdp);
14921 14778 }
14922 14779 return (new_scdp);
14923 14780 }
14924 14781
14925 14782 /*
14926 14783 * The first phase of a process joining an SCD. The hat structure is
14927 14784 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
14928 14785 * and a cross-call with context invalidation is used to cause the
14929 14786 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
14930 14787 * routine.
14931 14788 */
14932 14789 static void
14933 14790 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
14934 14791 {
14935 14792 hatlock_t *hatlockp;
14936 14793 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14937 14794 int i;
14938 14795 sf_scd_t *old_scdp;
14939 14796
14940 14797 ASSERT(srdp != NULL);
14941 14798 ASSERT(scdp != NULL);
14942 14799 ASSERT(scdp->scd_refcnt > 0);
14943 14800 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14944 14801
14945 14802 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
14946 14803 ASSERT(old_scdp != scdp);
14947 14804
14948 14805 mutex_enter(&old_scdp->scd_mutex);
14949 14806 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
14950 14807 mutex_exit(&old_scdp->scd_mutex);
14951 14808 /*
14952 14809 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
14953 14810 * include the shme rgn ttecnt for rgns that
14954 14811 * were in the old SCD
14955 14812 */
14956 14813 for (i = 0; i < mmu_page_sizes; i++) {
14957 14814 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14958 14815 old_scdp->scd_rttecnt[i]);
14959 14816 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14960 14817 sfmmup->sfmmu_scdrttecnt[i]);
14961 14818 }
14962 14819 }
14963 14820
14964 14821 /*
14965 14822 * Move sfmmu to the scd lists.
14966 14823 */
14967 14824 mutex_enter(&scdp->scd_mutex);
14968 14825 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
14969 14826 mutex_exit(&scdp->scd_mutex);
14970 14827 SF_SCD_INCR_REF(scdp);
14971 14828
14972 14829 hatlockp = sfmmu_hat_enter(sfmmup);
14973 14830 /*
14974 14831 * For a multi-thread process, we must stop
14975 14832 * all the other threads before joining the scd.
14976 14833 */
14977 14834
14978 14835 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
14979 14836
14980 14837 sfmmu_invalidate_ctx(sfmmup);
14981 14838 sfmmup->sfmmu_scdp = scdp;
14982 14839
14983 14840 /*
14984 14841 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
14985 14842 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
14986 14843 */
14987 14844 for (i = 0; i < mmu_page_sizes; i++) {
14988 14845 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
14989 14846 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
14990 14847 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14991 14848 -sfmmup->sfmmu_scdrttecnt[i]);
14992 14849 }
14993 14850 /* update tsb0 inflation count */
14994 14851 if (old_scdp != NULL) {
14995 14852 sfmmup->sfmmu_tsb0_4minflcnt +=
14996 14853 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14997 14854 }
14998 14855 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14999 14856 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
15000 14857 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15001 14858
15002 14859 sfmmu_hat_exit(hatlockp);
15003 14860
15004 14861 if (old_scdp != NULL) {
15005 14862 SF_SCD_DECR_REF(srdp, old_scdp);
15006 14863 }
15007 14864
15008 14865 }
15009 14866
15010 14867 /*
15011 14868 * This routine is called by a process to become part of an SCD. It is called
15012 14869 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15013 14870 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15014 14871 */
15015 14872 static void
15016 14873 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15017 14874 {
15018 14875 struct tsb_info *tsbinfop;
15019 14876
15020 14877 ASSERT(sfmmu_hat_lock_held(sfmmup));
15021 14878 ASSERT(sfmmup->sfmmu_scdp != NULL);
15022 14879 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15023 14880 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15024 14881 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15025 14882
15026 14883 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15027 14884 tsbinfop = tsbinfop->tsb_next) {
15028 14885 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15029 14886 continue;
15030 14887 }
15031 14888 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15032 14889
15033 14890 sfmmu_inv_tsb(tsbinfop->tsb_va,
15034 14891 TSB_BYTES(tsbinfop->tsb_szc));
15035 14892 }
15036 14893
15037 14894 /* Set HAT_CTX1_FLAG for all SCD ISMs */
15038 14895 sfmmu_ism_hatflags(sfmmup, 1);
15039 14896
15040 14897 SFMMU_STAT(sf_join_scd);
15041 14898 }
15042 14899
15043 14900 /*
15044 14901 * This routine is called in order to check if there is an SCD which matches
15045 14902 * the process's region map if not then a new SCD may be created.
15046 14903 */
15047 14904 static void
15048 14905 sfmmu_find_scd(sfmmu_t *sfmmup)
15049 14906 {
15050 14907 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15051 14908 sf_scd_t *scdp, *new_scdp;
15052 14909 int ret;
15053 14910
15054 14911 ASSERT(srdp != NULL);
15055 14912 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15056 14913
15057 14914 mutex_enter(&srdp->srd_scd_mutex);
15058 14915 for (scdp = srdp->srd_scdp; scdp != NULL;
15059 14916 scdp = scdp->scd_next) {
15060 14917 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15061 14918 &sfmmup->sfmmu_region_map, ret);
15062 14919 if (ret == 1) {
15063 14920 SF_SCD_INCR_REF(scdp);
15064 14921 mutex_exit(&srdp->srd_scd_mutex);
15065 14922 sfmmu_join_scd(scdp, sfmmup);
15066 14923 ASSERT(scdp->scd_refcnt >= 2);
15067 14924 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15068 14925 return;
15069 14926 } else {
15070 14927 /*
15071 14928 * If the sfmmu region map is a subset of the scd
15072 14929 * region map, then the assumption is that this process
15073 14930 * will continue attaching to ISM segments until the
15074 14931 * region maps are equal.
15075 14932 */
15076 14933 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15077 14934 &sfmmup->sfmmu_region_map, ret);
15078 14935 if (ret == 1) {
15079 14936 mutex_exit(&srdp->srd_scd_mutex);
15080 14937 return;
15081 14938 }
15082 14939 }
15083 14940 }
15084 14941
15085 14942 ASSERT(scdp == NULL);
15086 14943 /*
15087 14944 * No matching SCD has been found, create a new one.
15088 14945 */
15089 14946 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15090 14947 NULL) {
15091 14948 mutex_exit(&srdp->srd_scd_mutex);
15092 14949 return;
15093 14950 }
15094 14951
15095 14952 /*
15096 14953 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15097 14954 */
15098 14955
15099 14956 /* Set scd_rttecnt for shme rgns in SCD */
15100 14957 sfmmu_set_scd_rttecnt(srdp, new_scdp);
15101 14958
15102 14959 /*
15103 14960 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15104 14961 */
15105 14962 sfmmu_link_scd_to_regions(srdp, new_scdp);
15106 14963 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15107 14964 SFMMU_STAT_ADD(sf_create_scd, 1);
15108 14965
15109 14966 mutex_exit(&srdp->srd_scd_mutex);
15110 14967 sfmmu_join_scd(new_scdp, sfmmup);
15111 14968 ASSERT(new_scdp->scd_refcnt >= 2);
15112 14969 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15113 14970 }
15114 14971
15115 14972 /*
15116 14973 * This routine is called by a process to remove itself from an SCD. It is
15117 14974 * either called when the processes has detached from a segment or from
15118 14975 * hat_free_start() as a result of calling exit.
15119 14976 */
15120 14977 static void
15121 14978 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15122 14979 {
15123 14980 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15124 14981 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15125 14982 hatlock_t *hatlockp = TSB_HASH(sfmmup);
15126 14983 int i;
15127 14984
15128 14985 ASSERT(scdp != NULL);
15129 14986 ASSERT(srdp != NULL);
15130 14987
15131 14988 if (sfmmup->sfmmu_free) {
15132 14989 /*
15133 14990 * If the process is part of an SCD the sfmmu is unlinked
15134 14991 * from scd_sf_list.
15135 14992 */
15136 14993 mutex_enter(&scdp->scd_mutex);
15137 14994 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15138 14995 mutex_exit(&scdp->scd_mutex);
15139 14996 /*
15140 14997 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15141 14998 * are about to leave the SCD
15142 14999 */
15143 15000 for (i = 0; i < mmu_page_sizes; i++) {
15144 15001 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15145 15002 scdp->scd_rttecnt[i]);
15146 15003 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15147 15004 sfmmup->sfmmu_scdrttecnt[i]);
15148 15005 sfmmup->sfmmu_scdrttecnt[i] = 0;
15149 15006 }
15150 15007 sfmmup->sfmmu_scdp = NULL;
15151 15008
15152 15009 SF_SCD_DECR_REF(srdp, scdp);
15153 15010 return;
15154 15011 }
15155 15012
15156 15013 ASSERT(r_type != SFMMU_REGION_ISM ||
15157 15014 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15158 15015 ASSERT(scdp->scd_refcnt);
15159 15016 ASSERT(!sfmmup->sfmmu_free);
15160 15017 ASSERT(sfmmu_hat_lock_held(sfmmup));
15161 15018 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15162 15019
15163 15020 /*
15164 15021 * Wait for ISM maps to be updated.
15165 15022 */
15166 15023 if (r_type != SFMMU_REGION_ISM) {
15167 15024 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15168 15025 sfmmup->sfmmu_scdp != NULL) {
15169 15026 cv_wait(&sfmmup->sfmmu_tsb_cv,
15170 15027 HATLOCK_MUTEXP(hatlockp));
15171 15028 }
15172 15029
15173 15030 if (sfmmup->sfmmu_scdp == NULL) {
15174 15031 sfmmu_hat_exit(hatlockp);
15175 15032 return;
15176 15033 }
15177 15034 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15178 15035 }
15179 15036
15180 15037 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15181 15038 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15182 15039 /*
15183 15040 * Since HAT_JOIN_SCD was set our context
15184 15041 * is still invalid.
15185 15042 */
15186 15043 } else {
15187 15044 /*
15188 15045 * For a multi-thread process, we must stop
15189 15046 * all the other threads before leaving the scd.
15190 15047 */
15191 15048
15192 15049 sfmmu_invalidate_ctx(sfmmup);
15193 15050 }
15194 15051
15195 15052 /* Clear all the rid's for ISM, delete flags, etc */
15196 15053 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15197 15054 sfmmu_ism_hatflags(sfmmup, 0);
15198 15055
15199 15056 /*
15200 15057 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15201 15058 * are in SCD before this sfmmup leaves the SCD.
15202 15059 */
15203 15060 for (i = 0; i < mmu_page_sizes; i++) {
15204 15061 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15205 15062 scdp->scd_rttecnt[i]);
15206 15063 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15207 15064 sfmmup->sfmmu_scdrttecnt[i]);
15208 15065 sfmmup->sfmmu_scdrttecnt[i] = 0;
15209 15066 /* update ismttecnt to include SCD ism before hat leaves SCD */
15210 15067 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15211 15068 sfmmup->sfmmu_scdismttecnt[i] = 0;
15212 15069 }
15213 15070 /* update tsb0 inflation count */
15214 15071 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15215 15072
15216 15073 if (r_type != SFMMU_REGION_ISM) {
15217 15074 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15218 15075 }
15219 15076 sfmmup->sfmmu_scdp = NULL;
15220 15077
15221 15078 sfmmu_hat_exit(hatlockp);
15222 15079
15223 15080 /*
15224 15081 * Unlink sfmmu from scd_sf_list this can be done without holding
15225 15082 * the hat lock as we hold the sfmmu_as lock which prevents
15226 15083 * hat_join_region from adding this thread to the scd again. Other
15227 15084 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15228 15085 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15229 15086 * while holding the hat lock.
15230 15087 */
15231 15088 mutex_enter(&scdp->scd_mutex);
15232 15089 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15233 15090 mutex_exit(&scdp->scd_mutex);
15234 15091 SFMMU_STAT(sf_leave_scd);
15235 15092
15236 15093 SF_SCD_DECR_REF(srdp, scdp);
15237 15094 hatlockp = sfmmu_hat_enter(sfmmup);
15238 15095
15239 15096 }
15240 15097
15241 15098 /*
15242 15099 * Unlink and free up an SCD structure with a reference count of 0.
15243 15100 */
15244 15101 static void
15245 15102 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15246 15103 {
15247 15104 sfmmu_t *scsfmmup;
15248 15105 sf_scd_t *sp;
15249 15106 hatlock_t *shatlockp;
15250 15107 int i, ret;
15251 15108
15252 15109 mutex_enter(&srdp->srd_scd_mutex);
15253 15110 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15254 15111 if (sp == scdp)
15255 15112 break;
15256 15113 }
15257 15114 if (sp == NULL || sp->scd_refcnt) {
15258 15115 mutex_exit(&srdp->srd_scd_mutex);
15259 15116 return;
15260 15117 }
15261 15118
15262 15119 /*
15263 15120 * It is possible that the scd has been freed and reallocated with a
15264 15121 * different region map while we've been waiting for the srd_scd_mutex.
15265 15122 */
15266 15123 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15267 15124 if (ret != 1) {
15268 15125 mutex_exit(&srdp->srd_scd_mutex);
15269 15126 return;
15270 15127 }
15271 15128
15272 15129 ASSERT(scdp->scd_sf_list == NULL);
15273 15130 /*
15274 15131 * Unlink scd from srd_scdp list.
15275 15132 */
15276 15133 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15277 15134 mutex_exit(&srdp->srd_scd_mutex);
15278 15135
15279 15136 sfmmu_unlink_scd_from_regions(srdp, scdp);
15280 15137
15281 15138 /* Clear shared context tsb and release ctx */
15282 15139 scsfmmup = scdp->scd_sfmmup;
15283 15140
15284 15141 /*
15285 15142 * create a barrier so that scd will not be destroyed
15286 15143 * if other thread still holds the same shared hat lock.
15287 15144 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15288 15145 * shared hat lock before checking the shared tsb reloc flag.
15289 15146 */
15290 15147 shatlockp = sfmmu_hat_enter(scsfmmup);
15291 15148 sfmmu_hat_exit(shatlockp);
15292 15149
15293 15150 sfmmu_free_scd_tsbs(scsfmmup);
15294 15151
15295 15152 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15296 15153 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15297 15154 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15298 15155 SFMMU_L2_HMERLINKS_SIZE);
15299 15156 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15300 15157 }
15301 15158 }
15302 15159 kmem_cache_free(sfmmuid_cache, scsfmmup);
15303 15160 kmem_cache_free(scd_cache, scdp);
15304 15161 SFMMU_STAT(sf_destroy_scd);
15305 15162 }
15306 15163
15307 15164 /*
15308 15165 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15309 15166 * bits which are set in the ism_region_map parameter. This flag indicates to
15310 15167 * the tsbmiss handler that mapping for these segments should be loaded using
15311 15168 * the shared context.
15312 15169 */
15313 15170 static void
15314 15171 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15315 15172 {
15316 15173 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15317 15174 ism_blk_t *ism_blkp;
15318 15175 ism_map_t *ism_map;
15319 15176 int i, rid;
15320 15177
15321 15178 ASSERT(sfmmup->sfmmu_iblk != NULL);
15322 15179 ASSERT(scdp != NULL);
15323 15180 /*
15324 15181 * Note that the caller either set HAT_ISMBUSY flag or checked
15325 15182 * under hat lock that HAT_ISMBUSY was not set by another thread.
15326 15183 */
15327 15184 ASSERT(sfmmu_hat_lock_held(sfmmup));
15328 15185
15329 15186 ism_blkp = sfmmup->sfmmu_iblk;
15330 15187 while (ism_blkp != NULL) {
15331 15188 ism_map = ism_blkp->iblk_maps;
15332 15189 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15333 15190 rid = ism_map[i].imap_rid;
15334 15191 if (rid == SFMMU_INVALID_ISMRID) {
15335 15192 continue;
15336 15193 }
15337 15194 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15338 15195 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15339 15196 addflag) {
15340 15197 ism_map[i].imap_hatflags |=
15341 15198 HAT_CTX1_FLAG;
15342 15199 } else {
15343 15200 ism_map[i].imap_hatflags &=
15344 15201 ~HAT_CTX1_FLAG;
15345 15202 }
15346 15203 }
15347 15204 ism_blkp = ism_blkp->iblk_next;
15348 15205 }
15349 15206 }
15350 15207
15351 15208 static int
15352 15209 sfmmu_srd_lock_held(sf_srd_t *srdp)
15353 15210 {
15354 15211 return (MUTEX_HELD(&srdp->srd_mutex));
15355 15212 }
15356 15213
15357 15214 /* ARGSUSED */
15358 15215 static int
15359 15216 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15360 15217 {
15361 15218 sf_scd_t *scdp = (sf_scd_t *)buf;
15362 15219
15363 15220 bzero(buf, sizeof (sf_scd_t));
15364 15221 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15365 15222 return (0);
15366 15223 }
15367 15224
15368 15225 /* ARGSUSED */
15369 15226 static void
15370 15227 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15371 15228 {
15372 15229 sf_scd_t *scdp = (sf_scd_t *)buf;
15373 15230
15374 15231 mutex_destroy(&scdp->scd_mutex);
15375 15232 }
15376 15233
15377 15234 /*
15378 15235 * The listp parameter is a pointer to a list of hmeblks which are partially
15379 15236 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15380 15237 * freeing process is to cross-call all cpus to ensure that there are no
15381 15238 * remaining cached references.
15382 15239 *
15383 15240 * If the local generation number is less than the global then we can free
15384 15241 * hmeblks which are already on the pending queue as another cpu has completed
15385 15242 * the cross-call.
15386 15243 *
15387 15244 * We cross-call to make sure that there are no threads on other cpus accessing
15388 15245 * these hmblks and then complete the process of freeing them under the
15389 15246 * following conditions:
15390 15247 * The total number of pending hmeblks is greater than the threshold
15391 15248 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15392 15249 * It is at least 1 second since the last time we cross-called
15393 15250 *
15394 15251 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15395 15252 */
15396 15253 static void
15397 15254 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15398 15255 {
15399 15256 struct hme_blk *hblkp, *pr_hblkp = NULL;
15400 15257 int count = 0;
15401 15258 cpuset_t cpuset = cpu_ready_set;
15402 15259 cpu_hme_pend_t *cpuhp;
15403 15260 timestruc_t now;
15404 15261 int one_second_expired = 0;
15405 15262
15406 15263 gethrestime_lasttick(&now);
15407 15264
15408 15265 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15409 15266 ASSERT(hblkp->hblk_shw_bit == 0);
15410 15267 ASSERT(hblkp->hblk_shared == 0);
15411 15268 count++;
15412 15269 pr_hblkp = hblkp;
15413 15270 }
15414 15271
15415 15272 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15416 15273 mutex_enter(&cpuhp->chp_mutex);
15417 15274
15418 15275 if ((cpuhp->chp_count + count) == 0) {
15419 15276 mutex_exit(&cpuhp->chp_mutex);
15420 15277 return;
15421 15278 }
15422 15279
15423 15280 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15424 15281 one_second_expired = 1;
15425 15282 }
15426 15283
15427 15284 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15428 15285 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15429 15286 one_second_expired)) {
15430 15287 /* Append global list to local */
15431 15288 if (pr_hblkp == NULL) {
15432 15289 *listp = cpuhp->chp_listp;
15433 15290 } else {
15434 15291 pr_hblkp->hblk_next = cpuhp->chp_listp;
15435 15292 }
15436 15293 cpuhp->chp_listp = NULL;
15437 15294 cpuhp->chp_count = 0;
15438 15295 cpuhp->chp_timestamp = now.tv_sec;
15439 15296 mutex_exit(&cpuhp->chp_mutex);
15440 15297
15441 15298 kpreempt_disable();
15442 15299 CPUSET_DEL(cpuset, CPU->cpu_id);
15443 15300 xt_sync(cpuset);
15444 15301 xt_sync(cpuset);
15445 15302 kpreempt_enable();
15446 15303
15447 15304 /*
15448 15305 * At this stage we know that no trap handlers on other
15449 15306 * cpus can have references to hmeblks on the list.
15450 15307 */
15451 15308 sfmmu_hblk_free(listp);
15452 15309 } else if (*listp != NULL) {
15453 15310 pr_hblkp->hblk_next = cpuhp->chp_listp;
15454 15311 cpuhp->chp_listp = *listp;
15455 15312 cpuhp->chp_count += count;
15456 15313 *listp = NULL;
15457 15314 mutex_exit(&cpuhp->chp_mutex);
15458 15315 } else {
15459 15316 mutex_exit(&cpuhp->chp_mutex);
15460 15317 }
15461 15318 }
15462 15319
15463 15320 /*
15464 15321 * Add an hmeblk to the the hash list.
15465 15322 */
15466 15323 void
15467 15324 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15468 15325 uint64_t hblkpa)
15469 15326 {
15470 15327 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15471 15328 #ifdef DEBUG
15472 15329 if (hmebp->hmeblkp == NULL) {
15473 15330 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15474 15331 }
15475 15332 #endif /* DEBUG */
15476 15333
15477 15334 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15478 15335 /*
15479 15336 * Since the TSB miss handler now does not lock the hash chain before
15480 15337 * walking it, make sure that the hmeblks nextpa is globally visible
15481 15338 * before we make the hmeblk globally visible by updating the chain root
15482 15339 * pointer in the hash bucket.
15483 15340 */
15484 15341 membar_producer();
15485 15342 hmebp->hmeh_nextpa = hblkpa;
15486 15343 hmeblkp->hblk_next = hmebp->hmeblkp;
15487 15344 hmebp->hmeblkp = hmeblkp;
15488 15345
15489 15346 }
15490 15347
15491 15348 /*
15492 15349 * This function is the first part of a 2 part process to remove an hmeblk
15493 15350 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15494 15351 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15495 15352 * a per-cpu pending list using the virtual address pointer.
15496 15353 *
15497 15354 * TSB miss trap handlers that start after this phase will no longer see
15498 15355 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15499 15356 * can still use it for further chain traversal because we haven't yet modifed
15500 15357 * the next physical pointer or freed it.
15501 15358 *
15502 15359 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15503 15360 * we reuse or free this hmeblk. This will make sure all lingering references to
15504 15361 * the hmeblk after first phase disappear before we finally reclaim it.
15505 15362 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15506 15363 * during their traversal.
15507 15364 *
15508 15365 * The hmehash_mutex must be held when calling this function.
15509 15366 *
15510 15367 * Input:
15511 15368 * hmebp - hme hash bucket pointer
15512 15369 * hmeblkp - address of hmeblk to be removed
15513 15370 * pr_hblk - virtual address of previous hmeblkp
15514 15371 * listp - pointer to list of hmeblks linked by virtual address
15515 15372 * free_now flag - indicates that a complete removal from the hash chains
15516 15373 * is necessary.
15517 15374 *
15518 15375 * It is inefficient to use the free_now flag as a cross-call is required to
15519 15376 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15520 15377 * in short supply.
15521 15378 */
15522 15379 void
15523 15380 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15524 15381 struct hme_blk *pr_hblk, struct hme_blk **listp,
15525 15382 int free_now)
15526 15383 {
15527 15384 int shw_size, vshift;
15528 15385 struct hme_blk *shw_hblkp;
15529 15386 uint_t shw_mask, newshw_mask;
15530 15387 caddr_t vaddr;
15531 15388 int size;
15532 15389 cpuset_t cpuset = cpu_ready_set;
15533 15390
15534 15391 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15535 15392
15536 15393 if (hmebp->hmeblkp == hmeblkp) {
15537 15394 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15538 15395 hmebp->hmeblkp = hmeblkp->hblk_next;
15539 15396 } else {
15540 15397 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15541 15398 pr_hblk->hblk_next = hmeblkp->hblk_next;
15542 15399 }
15543 15400
15544 15401 size = get_hblk_ttesz(hmeblkp);
15545 15402 shw_hblkp = hmeblkp->hblk_shadow;
15546 15403 if (shw_hblkp) {
15547 15404 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15548 15405 ASSERT(!hmeblkp->hblk_shared);
15549 15406 #ifdef DEBUG
15550 15407 if (mmu_page_sizes == max_mmu_page_sizes) {
15551 15408 ASSERT(size < TTE256M);
15552 15409 } else {
15553 15410 ASSERT(size < TTE4M);
15554 15411 }
15555 15412 #endif /* DEBUG */
15556 15413
15557 15414 shw_size = get_hblk_ttesz(shw_hblkp);
15558 15415 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15559 15416 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15560 15417 ASSERT(vshift < 8);
15561 15418 /*
15562 15419 * Atomically clear shadow mask bit
15563 15420 */
15564 15421 do {
15565 15422 shw_mask = shw_hblkp->hblk_shw_mask;
15566 15423 ASSERT(shw_mask & (1 << vshift));
15567 15424 newshw_mask = shw_mask & ~(1 << vshift);
15568 15425 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15569 15426 shw_mask, newshw_mask);
15570 15427 } while (newshw_mask != shw_mask);
15571 15428 hmeblkp->hblk_shadow = NULL;
15572 15429 }
15573 15430 hmeblkp->hblk_shw_bit = 0;
15574 15431
15575 15432 if (hmeblkp->hblk_shared) {
15576 15433 #ifdef DEBUG
15577 15434 sf_srd_t *srdp;
15578 15435 sf_region_t *rgnp;
15579 15436 uint_t rid;
15580 15437
15581 15438 srdp = hblktosrd(hmeblkp);
15582 15439 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15583 15440 rid = hmeblkp->hblk_tag.htag_rid;
15584 15441 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15585 15442 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15586 15443 rgnp = srdp->srd_hmergnp[rid];
15587 15444 ASSERT(rgnp != NULL);
15588 15445 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15589 15446 #endif /* DEBUG */
15590 15447 hmeblkp->hblk_shared = 0;
15591 15448 }
15592 15449 if (free_now) {
15593 15450 kpreempt_disable();
15594 15451 CPUSET_DEL(cpuset, CPU->cpu_id);
15595 15452 xt_sync(cpuset);
15596 15453 xt_sync(cpuset);
15597 15454 kpreempt_enable();
15598 15455
15599 15456 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15600 15457 hmeblkp->hblk_next = NULL;
15601 15458 } else {
15602 15459 /* Append hmeblkp to listp for processing later. */
15603 15460 hmeblkp->hblk_next = *listp;
15604 15461 *listp = hmeblkp;
15605 15462 }
15606 15463 }
15607 15464
15608 15465 /*
15609 15466 * This routine is called when memory is in short supply and returns a free
15610 15467 * hmeblk of the requested size from the cpu pending lists.
15611 15468 */
15612 15469 static struct hme_blk *
15613 15470 sfmmu_check_pending_hblks(int size)
15614 15471 {
15615 15472 int i;
15616 15473 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15617 15474 int found_hmeblk;
15618 15475 cpuset_t cpuset = cpu_ready_set;
15619 15476 cpu_hme_pend_t *cpuhp;
15620 15477
15621 15478 /* Flush cpu hblk pending queues */
15622 15479 for (i = 0; i < NCPU; i++) {
15623 15480 cpuhp = &cpu_hme_pend[i];
15624 15481 if (cpuhp->chp_listp != NULL) {
15625 15482 mutex_enter(&cpuhp->chp_mutex);
15626 15483 if (cpuhp->chp_listp == NULL) {
15627 15484 mutex_exit(&cpuhp->chp_mutex);
15628 15485 continue;
15629 15486 }
15630 15487 found_hmeblk = 0;
15631 15488 last_hmeblkp = NULL;
15632 15489 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15633 15490 hmeblkp = hmeblkp->hblk_next) {
15634 15491 if (get_hblk_ttesz(hmeblkp) == size) {
15635 15492 if (last_hmeblkp == NULL) {
15636 15493 cpuhp->chp_listp =
15637 15494 hmeblkp->hblk_next;
15638 15495 } else {
15639 15496 last_hmeblkp->hblk_next =
15640 15497 hmeblkp->hblk_next;
15641 15498 }
15642 15499 ASSERT(cpuhp->chp_count > 0);
15643 15500 cpuhp->chp_count--;
15644 15501 found_hmeblk = 1;
15645 15502 break;
15646 15503 } else {
15647 15504 last_hmeblkp = hmeblkp;
15648 15505 }
15649 15506 }
15650 15507 mutex_exit(&cpuhp->chp_mutex);
15651 15508
15652 15509 if (found_hmeblk) {
15653 15510 kpreempt_disable();
15654 15511 CPUSET_DEL(cpuset, CPU->cpu_id);
15655 15512 xt_sync(cpuset);
15656 15513 xt_sync(cpuset);
15657 15514 kpreempt_enable();
15658 15515 return (hmeblkp);
15659 15516 }
15660 15517 }
15661 15518 }
15662 15519 return (NULL);
15663 15520 }
↓ open down ↓ |
5834 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX