Print this page
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * VM - Hardware Address Translation management for Spitfire MMU.
30 30 *
31 31 * This file implements the machine specific hardware translation
32 32 * needed by the VM system. The machine independent interface is
33 33 * described in <vm/hat.h> while the machine dependent interface
34 34 * and data structures are described in <vm/hat_sfmmu.h>.
35 35 *
36 36 * The hat layer manages the address translation hardware as a cache
37 37 * driven by calls from the higher levels in the VM system.
38 38 */
39 39
40 40 #include <sys/types.h>
41 41 #include <sys/kstat.h>
42 42 #include <vm/hat.h>
43 43 #include <vm/hat_sfmmu.h>
44 44 #include <vm/page.h>
45 45 #include <sys/pte.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/machparam.h>
50 50 #include <sys/vtrace.h>
51 51 #include <sys/kmem.h>
52 52 #include <sys/mmu.h>
53 53 #include <sys/cmn_err.h>
54 54 #include <sys/cpu.h>
55 55 #include <sys/cpuvar.h>
56 56 #include <sys/debug.h>
57 57 #include <sys/lgrp.h>
58 58 #include <sys/archsystm.h>
59 59 #include <sys/machsystm.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <vm/as.h>
62 62 #include <vm/seg.h>
63 63 #include <vm/seg_kp.h>
64 64 #include <vm/seg_kmem.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/rm.h>
67 67 #include <sys/t_lock.h>
68 68 #include <sys/obpdefs.h>
69 69 #include <sys/vm_machparam.h>
70 70 #include <sys/var.h>
71 71 #include <sys/trap.h>
72 72 #include <sys/machtrap.h>
73 73 #include <sys/scb.h>
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
74 74 #include <sys/bitmap.h>
75 75 #include <sys/machlock.h>
76 76 #include <sys/membar.h>
77 77 #include <sys/atomic.h>
78 78 #include <sys/cpu_module.h>
79 79 #include <sys/prom_debug.h>
80 80 #include <sys/ksynch.h>
81 81 #include <sys/mem_config.h>
82 82 #include <sys/mem_cage.h>
83 83 #include <vm/vm_dep.h>
84 -#include <vm/xhat_sfmmu.h>
85 84 #include <sys/fpu/fpusystm.h>
86 85 #include <vm/mach_kpm.h>
87 86 #include <sys/callb.h>
88 87
89 88 #ifdef DEBUG
90 89 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
91 90 if (SFMMU_IS_SHMERID_VALID(rid)) { \
92 91 caddr_t _eaddr = (saddr) + (len); \
93 92 sf_srd_t *_srdp; \
94 93 sf_region_t *_rgnp; \
95 94 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
96 95 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
97 96 ASSERT((hat) != ksfmmup); \
98 97 _srdp = (hat)->sfmmu_srdp; \
99 98 ASSERT(_srdp != NULL); \
100 99 ASSERT(_srdp->srd_refcnt != 0); \
101 100 _rgnp = _srdp->srd_hmergnp[(rid)]; \
102 101 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
103 102 ASSERT(_rgnp->rgn_refcnt != 0); \
104 103 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
105 104 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
106 105 SFMMU_REGION_HME); \
107 106 ASSERT((saddr) >= _rgnp->rgn_saddr); \
108 107 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
109 108 ASSERT(_eaddr > _rgnp->rgn_saddr); \
110 109 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
111 110 }
112 111
113 112 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
114 113 { \
115 114 caddr_t _hsva; \
116 115 caddr_t _heva; \
117 116 caddr_t _rsva; \
118 117 caddr_t _reva; \
119 118 int _ttesz = get_hblk_ttesz(hmeblkp); \
120 119 int _flagtte; \
121 120 ASSERT((srdp)->srd_refcnt != 0); \
122 121 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
123 122 ASSERT((rgnp)->rgn_id == rid); \
124 123 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
125 124 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
126 125 SFMMU_REGION_HME); \
127 126 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
128 127 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
129 128 _heva = get_hblk_endaddr(hmeblkp); \
130 129 _rsva = (caddr_t)P2ALIGN( \
131 130 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
132 131 _reva = (caddr_t)P2ROUNDUP( \
133 132 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
134 133 HBLK_MIN_BYTES); \
135 134 ASSERT(_hsva >= _rsva); \
136 135 ASSERT(_hsva < _reva); \
137 136 ASSERT(_heva > _rsva); \
138 137 ASSERT(_heva <= _reva); \
139 138 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
140 139 _ttesz; \
141 140 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
142 141 }
143 142
144 143 #else /* DEBUG */
145 144 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
146 145 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
147 146 #endif /* DEBUG */
148 147
149 148 #if defined(SF_ERRATA_57)
150 149 extern caddr_t errata57_limit;
151 150 #endif
152 151
153 152 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
154 153 (sizeof (int64_t)))
155 154 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
156 155
157 156 #define HBLK_RESERVE_CNT 128
158 157 #define HBLK_RESERVE_MIN 20
159 158
160 159 static struct hme_blk *freehblkp;
161 160 static kmutex_t freehblkp_lock;
162 161 static int freehblkcnt;
163 162
164 163 static int64_t hblk_reserve[HME8BLK_SZ_RND];
165 164 static kmutex_t hblk_reserve_lock;
166 165 static kthread_t *hblk_reserve_thread;
167 166
168 167 static nucleus_hblk8_info_t nucleus_hblk8;
169 168 static nucleus_hblk1_info_t nucleus_hblk1;
170 169
171 170 /*
172 171 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
173 172 * after the initial phase of removing an hmeblk from the hash chain, see
174 173 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
175 174 */
176 175 static cpu_hme_pend_t *cpu_hme_pend;
177 176 static uint_t cpu_hme_pend_thresh;
178 177 /*
179 178 * SFMMU specific hat functions
180 179 */
181 180 void hat_pagecachectl(struct page *, int);
182 181
183 182 /* flags for hat_pagecachectl */
184 183 #define HAT_CACHE 0x1
185 184 #define HAT_UNCACHE 0x2
186 185 #define HAT_TMPNC 0x4
187 186
188 187 /*
189 188 * Flag to allow the creation of non-cacheable translations
190 189 * to system memory. It is off by default. At the moment this
191 190 * flag is used by the ecache error injector. The error injector
192 191 * will turn it on when creating such a translation then shut it
193 192 * off when it's finished.
194 193 */
195 194
196 195 int sfmmu_allow_nc_trans = 0;
197 196
198 197 /*
199 198 * Flag to disable large page support.
200 199 * value of 1 => disable all large pages.
201 200 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
202 201 *
203 202 * For example, use the value 0x4 to disable 512K pages.
204 203 *
205 204 */
206 205 #define LARGE_PAGES_OFF 0x1
207 206
208 207 /*
209 208 * The disable_large_pages and disable_ism_large_pages variables control
210 209 * hat_memload_array and the page sizes to be used by ISM and the kernel.
211 210 *
212 211 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
213 212 * are only used to control which OOB pages to use at upper VM segment creation
214 213 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
215 214 * Their values may come from platform or CPU specific code to disable page
216 215 * sizes that should not be used.
217 216 *
218 217 * WARNING: 512K pages are currently not supported for ISM/DISM.
219 218 */
220 219 uint_t disable_large_pages = 0;
221 220 uint_t disable_ism_large_pages = (1 << TTE512K);
222 221 uint_t disable_auto_data_large_pages = 0;
223 222 uint_t disable_auto_text_large_pages = 0;
224 223
225 224 /*
226 225 * Private sfmmu data structures for hat management
227 226 */
228 227 static struct kmem_cache *sfmmuid_cache;
229 228 static struct kmem_cache *mmuctxdom_cache;
230 229
231 230 /*
232 231 * Private sfmmu data structures for tsb management
233 232 */
234 233 static struct kmem_cache *sfmmu_tsbinfo_cache;
235 234 static struct kmem_cache *sfmmu_tsb8k_cache;
236 235 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
237 236 static vmem_t *kmem_bigtsb_arena;
238 237 static vmem_t *kmem_tsb_arena;
239 238
240 239 /*
241 240 * sfmmu static variables for hmeblk resource management.
242 241 */
243 242 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
244 243 static struct kmem_cache *sfmmu8_cache;
245 244 static struct kmem_cache *sfmmu1_cache;
246 245 static struct kmem_cache *pa_hment_cache;
247 246
248 247 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
249 248 /*
250 249 * private data for ism
251 250 */
252 251 static struct kmem_cache *ism_blk_cache;
253 252 static struct kmem_cache *ism_ment_cache;
254 253 #define ISMID_STARTADDR NULL
255 254
256 255 /*
257 256 * Region management data structures and function declarations.
258 257 */
259 258
260 259 static void sfmmu_leave_srd(sfmmu_t *);
261 260 static int sfmmu_srdcache_constructor(void *, void *, int);
262 261 static void sfmmu_srdcache_destructor(void *, void *);
263 262 static int sfmmu_rgncache_constructor(void *, void *, int);
264 263 static void sfmmu_rgncache_destructor(void *, void *);
265 264 static int sfrgnmap_isnull(sf_region_map_t *);
266 265 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
267 266 static int sfmmu_scdcache_constructor(void *, void *, int);
268 267 static void sfmmu_scdcache_destructor(void *, void *);
269 268 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
270 269 size_t, void *, u_offset_t);
271 270
272 271 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
273 272 static sf_srd_bucket_t *srd_buckets;
274 273 static struct kmem_cache *srd_cache;
275 274 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
276 275 static struct kmem_cache *region_cache;
277 276 static struct kmem_cache *scd_cache;
278 277
279 278 #ifdef sun4v
280 279 int use_bigtsb_arena = 1;
281 280 #else
282 281 int use_bigtsb_arena = 0;
283 282 #endif
284 283
285 284 /* External /etc/system tunable, for turning on&off the shctx support */
286 285 int disable_shctx = 0;
287 286 /* Internal variable, set by MD if the HW supports shctx feature */
288 287 int shctx_on = 0;
289 288
290 289 #ifdef DEBUG
291 290 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
292 291 #endif
293 292 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
294 293 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
295 294
296 295 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
297 296 static void sfmmu_find_scd(sfmmu_t *);
298 297 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
299 298 static void sfmmu_finish_join_scd(sfmmu_t *);
300 299 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
301 300 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
302 301 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
303 302 static void sfmmu_free_scd_tsbs(sfmmu_t *);
304 303 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
305 304 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
306 305 static void sfmmu_ism_hatflags(sfmmu_t *, int);
307 306 static int sfmmu_srd_lock_held(sf_srd_t *);
308 307 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
309 308 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
310 309 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
311 310 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
312 311 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
313 312 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
314 313
315 314 /*
316 315 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
317 316 * HAT flags, synchronizing TLB/TSB coherency, and context management.
318 317 * The lock is hashed on the sfmmup since the case where we need to lock
319 318 * all processes is rare but does occur (e.g. we need to unload a shared
320 319 * mapping from all processes using the mapping). We have a lot of buckets,
321 320 * and each slab of sfmmu_t's can use about a quarter of them, giving us
322 321 * a fairly good distribution without wasting too much space and overhead
323 322 * when we have to grab them all.
324 323 */
325 324 #define SFMMU_NUM_LOCK 128 /* must be power of two */
326 325 hatlock_t hat_lock[SFMMU_NUM_LOCK];
327 326
328 327 /*
329 328 * Hash algorithm optimized for a small number of slabs.
330 329 * 7 is (highbit((sizeof sfmmu_t)) - 1)
331 330 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
332 331 * kmem_cache, and thus they will be sequential within that cache. In
333 332 * addition, each new slab will have a different "color" up to cache_maxcolor
334 333 * which will skew the hashing for each successive slab which is allocated.
335 334 * If the size of sfmmu_t changed to a larger size, this algorithm may need
336 335 * to be revisited.
337 336 */
338 337 #define TSB_HASH_SHIFT_BITS (7)
339 338 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
340 339
341 340 #ifdef DEBUG
342 341 int tsb_hash_debug = 0;
343 342 #define TSB_HASH(sfmmup) \
344 343 (tsb_hash_debug ? &hat_lock[0] : \
345 344 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
346 345 #else /* DEBUG */
347 346 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
348 347 #endif /* DEBUG */
349 348
350 349
351 350 /* sfmmu_replace_tsb() return codes. */
352 351 typedef enum tsb_replace_rc {
353 352 TSB_SUCCESS,
354 353 TSB_ALLOCFAIL,
355 354 TSB_LOSTRACE,
356 355 TSB_ALREADY_SWAPPED,
357 356 TSB_CANTGROW
358 357 } tsb_replace_rc_t;
359 358
360 359 /*
361 360 * Flags for TSB allocation routines.
362 361 */
363 362 #define TSB_ALLOC 0x01
364 363 #define TSB_FORCEALLOC 0x02
365 364 #define TSB_GROW 0x04
366 365 #define TSB_SHRINK 0x08
367 366 #define TSB_SWAPIN 0x10
368 367
369 368 /*
370 369 * Support for HAT callbacks.
371 370 */
372 371 #define SFMMU_MAX_RELOC_CALLBACKS 10
373 372 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
374 373 static id_t sfmmu_cb_nextid = 0;
375 374 static id_t sfmmu_tsb_cb_id;
376 375 struct sfmmu_callback *sfmmu_cb_table;
377 376
378 377 kmutex_t kpr_mutex;
379 378 kmutex_t kpr_suspendlock;
380 379 kthread_t *kreloc_thread;
381 380
382 381 /*
383 382 * Enable VA->PA translation sanity checking on DEBUG kernels.
384 383 * Disabled by default. This is incompatible with some
385 384 * drivers (error injector, RSM) so if it breaks you get
386 385 * to keep both pieces.
387 386 */
388 387 int hat_check_vtop = 0;
389 388
390 389 /*
391 390 * Private sfmmu routines (prototypes)
392 391 */
393 392 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
394 393 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
395 394 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
396 395 uint_t);
397 396 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
398 397 caddr_t, demap_range_t *, uint_t);
399 398 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
400 399 caddr_t, int);
401 400 static void sfmmu_hblk_free(struct hme_blk **);
402 401 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
403 402 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
404 403 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
405 404 static struct hme_blk *sfmmu_hblk_steal(int);
406 405 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
407 406 struct hme_blk *, uint64_t, struct hme_blk *);
408 407 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
409 408
410 409 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
411 410 struct page **, uint_t, uint_t, uint_t);
412 411 static void hat_do_memload(struct hat *, caddr_t, struct page *,
413 412 uint_t, uint_t, uint_t);
414 413 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
415 414 uint_t, uint_t, pgcnt_t, uint_t);
416 415 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
417 416 uint_t);
418 417 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
419 418 uint_t, uint_t);
420 419 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
421 420 caddr_t, int, uint_t);
422 421 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
423 422 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
424 423 uint_t);
425 424 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
426 425 caddr_t, page_t **, uint_t, uint_t);
427 426 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
428 427
429 428 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
430 429 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
431 430 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
432 431 #ifdef VAC
433 432 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
434 433 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
435 434 int tst_tnc(page_t *pp, pgcnt_t);
436 435 void conv_tnc(page_t *pp, int);
437 436 #endif
438 437
439 438 static void sfmmu_get_ctx(sfmmu_t *);
440 439 static void sfmmu_free_sfmmu(sfmmu_t *);
441 440
442 441 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
443 442 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
444 443
445 444 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
446 445 static void hat_pagereload(struct page *, struct page *);
447 446 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
448 447 #ifdef VAC
449 448 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
450 449 static void sfmmu_page_cache(page_t *, int, int, int);
451 450 #endif
452 451
453 452 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
454 453 struct hme_blk *, int);
455 454 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
456 455 pfn_t, int, int, int, int);
457 456 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
458 457 pfn_t, int);
459 458 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
460 459 static void sfmmu_tlb_range_demap(demap_range_t *);
461 460 static void sfmmu_invalidate_ctx(sfmmu_t *);
462 461 static void sfmmu_sync_mmustate(sfmmu_t *);
463 462
464 463 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
465 464 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
466 465 sfmmu_t *);
467 466 static void sfmmu_tsb_free(struct tsb_info *);
468 467 static void sfmmu_tsbinfo_free(struct tsb_info *);
469 468 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
470 469 sfmmu_t *);
471 470 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
472 471 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
473 472 static int sfmmu_select_tsb_szc(pgcnt_t);
474 473 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
475 474 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
476 475 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
477 476 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
478 477 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
479 478 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
480 479 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
481 480 hatlock_t *, uint_t);
482 481 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
483 482
484 483 #ifdef VAC
485 484 void sfmmu_cache_flush(pfn_t, int);
486 485 void sfmmu_cache_flushcolor(int, pfn_t);
487 486 #endif
488 487 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
489 488 caddr_t, demap_range_t *, uint_t, int);
490 489
491 490 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
492 491 static uint_t sfmmu_ptov_attr(tte_t *);
493 492 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
494 493 caddr_t, demap_range_t *, uint_t);
495 494 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
496 495 static int sfmmu_idcache_constructor(void *, void *, int);
497 496 static void sfmmu_idcache_destructor(void *, void *);
498 497 static int sfmmu_hblkcache_constructor(void *, void *, int);
499 498 static void sfmmu_hblkcache_destructor(void *, void *);
500 499 static void sfmmu_hblkcache_reclaim(void *);
501 500 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
502 501 struct hmehash_bucket *);
503 502 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
504 503 struct hme_blk *, struct hme_blk **, int);
505 504 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
506 505 uint64_t);
507 506 static struct hme_blk *sfmmu_check_pending_hblks(int);
508 507 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
509 508 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
510 509 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
511 510 int, caddr_t *);
512 511 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
513 512
514 513 static void sfmmu_rm_large_mappings(page_t *, int);
515 514
516 515 static void hat_lock_init(void);
517 516 static void hat_kstat_init(void);
518 517 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
519 518 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
520 519 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
521 520 static void sfmmu_check_page_sizes(sfmmu_t *, int);
522 521 int fnd_mapping_sz(page_t *);
523 522 static void iment_add(struct ism_ment *, struct hat *);
524 523 static void iment_sub(struct ism_ment *, struct hat *);
525 524 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
526 525 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
527 526 extern void sfmmu_clear_utsbinfo(void);
528 527
529 528 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
530 529
531 530 extern int vpm_enable;
532 531
533 532 /* kpm globals */
534 533 #ifdef DEBUG
535 534 /*
536 535 * Enable trap level tsbmiss handling
537 536 */
538 537 int kpm_tsbmtl = 1;
539 538
540 539 /*
541 540 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
542 541 * required TLB shootdowns in this case, so handle w/ care. Off by default.
543 542 */
544 543 int kpm_tlb_flush;
545 544 #endif /* DEBUG */
546 545
547 546 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
548 547
549 548 #ifdef DEBUG
550 549 static void sfmmu_check_hblk_flist();
551 550 #endif
552 551
553 552 /*
554 553 * Semi-private sfmmu data structures. Some of them are initialize in
555 554 * startup or in hat_init. Some of them are private but accessed by
556 555 * assembly code or mach_sfmmu.c
557 556 */
558 557 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
559 558 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
560 559 uint64_t uhme_hash_pa; /* PA of uhme_hash */
561 560 uint64_t khme_hash_pa; /* PA of khme_hash */
562 561 int uhmehash_num; /* # of buckets in user hash table */
563 562 int khmehash_num; /* # of buckets in kernel hash table */
564 563
565 564 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
566 565 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
567 566 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
568 567
569 568 #define DEFAULT_NUM_CTXS_PER_MMU 8192
570 569 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
571 570
572 571 int cache; /* describes system cache */
573 572
574 573 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
575 574 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
576 575 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
577 576 int ktsb_sz; /* kernel 8k-indexed tsb size */
578 577
579 578 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
580 579 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
581 580 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
582 581 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
583 582
584 583 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
585 584 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
586 585 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
587 586 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
588 587
589 588 #ifndef sun4v
590 589 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
591 590 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
592 591 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
593 592 caddr_t utsb_vabase; /* reserved kernel virtual memory */
594 593 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
595 594 #endif /* sun4v */
596 595 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
597 596 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
598 597 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
599 598
600 599 /*
601 600 * Size to use for TSB slabs. Future platforms that support page sizes
602 601 * larger than 4M may wish to change these values, and provide their own
603 602 * assembly macros for building and decoding the TSB base register contents.
604 603 * Note disable_large_pages will override the value set here.
605 604 */
606 605 static uint_t tsb_slab_ttesz = TTE4M;
607 606 size_t tsb_slab_size = MMU_PAGESIZE4M;
608 607 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
609 608 /* PFN mask for TTE */
610 609 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
611 610
612 611 /*
613 612 * Size to use for TSB slabs. These are used only when 256M tsb arenas
614 613 * exist.
615 614 */
616 615 static uint_t bigtsb_slab_ttesz = TTE256M;
617 616 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
618 617 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
619 618 /* 256M page alignment for 8K pfn */
620 619 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
621 620
622 621 /* largest TSB size to grow to, will be smaller on smaller memory systems */
623 622 static int tsb_max_growsize = 0;
624 623
625 624 /*
626 625 * Tunable parameters dealing with TSB policies.
627 626 */
628 627
629 628 /*
630 629 * This undocumented tunable forces all 8K TSBs to be allocated from
631 630 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
632 631 */
633 632 #ifdef DEBUG
634 633 int tsb_forceheap = 0;
635 634 #endif /* DEBUG */
636 635
637 636 /*
638 637 * Decide whether to use per-lgroup arenas, or one global set of
639 638 * TSB arenas. The default is not to break up per-lgroup, since
640 639 * most platforms don't recognize any tangible benefit from it.
641 640 */
642 641 int tsb_lgrp_affinity = 0;
643 642
644 643 /*
645 644 * Used for growing the TSB based on the process RSS.
646 645 * tsb_rss_factor is based on the smallest TSB, and is
647 646 * shifted by the TSB size to determine if we need to grow.
648 647 * The default will grow the TSB if the number of TTEs for
649 648 * this page size exceeds 75% of the number of TSB entries,
650 649 * which should _almost_ eliminate all conflict misses
651 650 * (at the expense of using up lots and lots of memory).
652 651 */
653 652 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
654 653 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
655 654 #define SELECT_TSB_SIZECODE(pgcnt) ( \
656 655 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
657 656 default_tsb_size)
658 657 #define TSB_OK_SHRINK() \
659 658 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
660 659 #define TSB_OK_GROW() \
661 660 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
662 661
663 662 int enable_tsb_rss_sizing = 1;
664 663 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
665 664
666 665 /* which TSB size code to use for new address spaces or if rss sizing off */
667 666 int default_tsb_size = TSB_8K_SZCODE;
668 667
669 668 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
670 669 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
671 670 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
672 671
673 672 #ifdef DEBUG
674 673 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
675 674 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
676 675 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
677 676 static int tsb_alloc_fail_mtbf = 0;
678 677 static int tsb_alloc_count = 0;
679 678 #endif /* DEBUG */
680 679
681 680 /* if set to 1, will remap valid TTEs when growing TSB. */
682 681 int tsb_remap_ttes = 1;
683 682
684 683 /*
685 684 * If we have more than this many mappings, allocate a second TSB.
686 685 * This default is chosen because the I/D fully associative TLBs are
687 686 * assumed to have at least 8 available entries. Platforms with a
688 687 * larger fully-associative TLB could probably override the default.
689 688 */
690 689
691 690 #ifdef sun4v
692 691 int tsb_sectsb_threshold = 0;
693 692 #else
694 693 int tsb_sectsb_threshold = 8;
695 694 #endif
696 695
697 696 /*
698 697 * kstat data
699 698 */
700 699 struct sfmmu_global_stat sfmmu_global_stat;
701 700 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
702 701
703 702 /*
704 703 * Global data
705 704 */
706 705 sfmmu_t *ksfmmup; /* kernel's hat id */
707 706
708 707 #ifdef DEBUG
709 708 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
710 709 #endif
711 710
712 711 /* sfmmu locking operations */
713 712 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
714 713 static int sfmmu_mlspl_held(struct page *, int);
715 714
716 715 kmutex_t *sfmmu_page_enter(page_t *);
717 716 void sfmmu_page_exit(kmutex_t *);
718 717 int sfmmu_page_spl_held(struct page *);
719 718
720 719 /* sfmmu internal locking operations - accessed directly */
721 720 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
722 721 kmutex_t **, kmutex_t **);
723 722 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
724 723 static hatlock_t *
725 724 sfmmu_hat_enter(sfmmu_t *);
726 725 static hatlock_t *
727 726 sfmmu_hat_tryenter(sfmmu_t *);
728 727 static void sfmmu_hat_exit(hatlock_t *);
729 728 static void sfmmu_hat_lock_all(void);
730 729 static void sfmmu_hat_unlock_all(void);
731 730 static void sfmmu_ismhat_enter(sfmmu_t *, int);
732 731 static void sfmmu_ismhat_exit(sfmmu_t *, int);
733 732
734 733 kpm_hlk_t *kpmp_table;
735 734 uint_t kpmp_table_sz; /* must be a power of 2 */
736 735 uchar_t kpmp_shift;
737 736
738 737 kpm_shlk_t *kpmp_stable;
739 738 uint_t kpmp_stable_sz; /* must be a power of 2 */
740 739
741 740 /*
742 741 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
743 742 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
744 743 */
745 744 #if ((2*NCPU_P2) > 128)
746 745 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
747 746 #else
748 747 #define SPL_SHIFT 7U
749 748 #endif
750 749 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
751 750 #define SPL_MASK (SPL_TABLE_SIZE - 1)
752 751
753 752 /*
754 753 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
755 754 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
756 755 */
757 756 #define SPL_INDEX(pp) \
758 757 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
759 758 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
760 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
761 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
762 761 SPL_MASK)
763 762
764 763 #define SPL_HASH(pp) \
765 764 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
766 765
767 766 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
768 767
769 768 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
770 769
771 770 #define MML_TABLE_SIZE SPL_TABLE_SIZE
772 771 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
773 772
774 773 static pad_mutex_t mml_table[MML_TABLE_SIZE];
775 774
776 775 /*
777 776 * hat_unload_callback() will group together callbacks in order
778 777 * to avoid xt_sync() calls. This is the maximum size of the group.
779 778 */
780 779 #define MAX_CB_ADDR 32
781 780
782 781 tte_t hw_tte;
783 782 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
784 783
785 784 static char *mmu_ctx_kstat_names[] = {
786 785 "mmu_ctx_tsb_exceptions",
787 786 "mmu_ctx_tsb_raise_exception",
788 787 "mmu_ctx_wrap_around",
789 788 };
790 789
791 790 /*
792 791 * Wrapper for vmem_xalloc since vmem_create only allows limited
793 792 * parameters for vm_source_alloc functions. This function allows us
794 793 * to specify alignment consistent with the size of the object being
795 794 * allocated.
796 795 */
797 796 static void *
798 797 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
799 798 {
800 799 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
801 800 }
802 801
803 802 /* Common code for setting tsb_alloc_hiwater. */
804 803 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
805 804 ptob(pages) / tsb_alloc_hiwater_factor
806 805
807 806 /*
808 807 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
809 808 * a single TSB. physmem is the number of physical pages so we need physmem 8K
810 809 * TTEs to represent all those physical pages. We round this up by using
811 810 * 1<<highbit(). To figure out which size code to use, remember that the size
812 811 * code is just an amount to shift the smallest TSB size to get the size of
813 812 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
814 813 * highbit() - 1) to get the size code for the smallest TSB that can represent
815 814 * all of physical memory, while erring on the side of too much.
816 815 *
817 816 * Restrict tsb_max_growsize to make sure that:
818 817 * 1) TSBs can't grow larger than the TSB slab size
819 818 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
820 819 */
821 820 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
822 821 int _i, _szc, _slabszc, _tsbszc; \
823 822 \
824 823 _i = highbit(pages); \
825 824 if ((1 << (_i - 1)) == (pages)) \
826 825 _i--; /* 2^n case, round down */ \
827 826 _szc = _i - TSB_START_SIZE; \
828 827 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
829 828 _tsbszc = MIN(_szc, _slabszc); \
830 829 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
831 830 }
832 831
833 832 /*
834 833 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
835 834 * tsb_info which handles that TTE size.
836 835 */
837 836 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
838 837 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
839 838 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
840 839 sfmmu_hat_lock_held(sfmmup)); \
841 840 if ((tte_szc) >= TTE4M) { \
842 841 ASSERT((tsbinfop) != NULL); \
843 842 (tsbinfop) = (tsbinfop)->tsb_next; \
844 843 } \
845 844 }
846 845
847 846 /*
848 847 * Macro to use to unload entries from the TSB.
849 848 * It has knowledge of which page sizes get replicated in the TSB
850 849 * and will call the appropriate unload routine for the appropriate size.
851 850 */
852 851 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
853 852 { \
854 853 int ttesz = get_hblk_ttesz(hmeblkp); \
855 854 if (ttesz == TTE8K || ttesz == TTE4M) { \
856 855 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
857 856 } else { \
858 857 caddr_t sva = ismhat ? addr : \
859 858 (caddr_t)get_hblk_base(hmeblkp); \
860 859 caddr_t eva = sva + get_hblk_span(hmeblkp); \
861 860 ASSERT(addr >= sva && addr < eva); \
862 861 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
863 862 } \
864 863 }
865 864
866 865
867 866 /* Update tsb_alloc_hiwater after memory is configured. */
868 867 /*ARGSUSED*/
869 868 static void
870 869 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
871 870 {
872 871 /* Assumes physmem has already been updated. */
873 872 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
874 873 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
875 874 }
876 875
877 876 /*
878 877 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
879 878 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
880 879 * deleted.
881 880 */
882 881 /*ARGSUSED*/
883 882 static int
884 883 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
885 884 {
886 885 return (0);
887 886 }
888 887
889 888 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
890 889 /*ARGSUSED*/
891 890 static void
892 891 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
893 892 {
894 893 /*
895 894 * Whether the delete was cancelled or not, just go ahead and update
896 895 * tsb_alloc_hiwater and tsb_max_growsize.
897 896 */
898 897 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
899 898 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
900 899 }
901 900
902 901 static kphysm_setup_vector_t sfmmu_update_vec = {
903 902 KPHYSM_SETUP_VECTOR_VERSION, /* version */
904 903 sfmmu_update_post_add, /* post_add */
905 904 sfmmu_update_pre_del, /* pre_del */
906 905 sfmmu_update_post_del /* post_del */
907 906 };
908 907
909 908
910 909 /*
911 910 * HME_BLK HASH PRIMITIVES
912 911 */
913 912
914 913 /*
915 914 * Enter a hme on the mapping list for page pp.
916 915 * When large pages are more prevalent in the system we might want to
917 916 * keep the mapping list in ascending order by the hment size. For now,
918 917 * small pages are more frequent, so don't slow it down.
919 918 */
920 919 #define HME_ADD(hme, pp) \
921 920 { \
922 921 ASSERT(sfmmu_mlist_held(pp)); \
923 922 \
924 923 hme->hme_prev = NULL; \
925 924 hme->hme_next = pp->p_mapping; \
926 925 hme->hme_page = pp; \
927 926 if (pp->p_mapping) { \
928 927 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
929 928 ASSERT(pp->p_share > 0); \
930 929 } else { \
931 930 /* EMPTY */ \
932 931 ASSERT(pp->p_share == 0); \
933 932 } \
934 933 pp->p_mapping = hme; \
935 934 pp->p_share++; \
936 935 }
937 936
938 937 /*
939 938 * Enter a hme on the mapping list for page pp.
940 939 * If we are unmapping a large translation, we need to make sure that the
941 940 * change is reflect in the corresponding bit of the p_index field.
942 941 */
943 942 #define HME_SUB(hme, pp) \
944 943 { \
945 944 ASSERT(sfmmu_mlist_held(pp)); \
946 945 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
947 946 \
948 947 if (pp->p_mapping == NULL) { \
949 948 panic("hme_remove - no mappings"); \
950 949 } \
951 950 \
952 951 membar_stst(); /* ensure previous stores finish */ \
953 952 \
954 953 ASSERT(pp->p_share > 0); \
955 954 pp->p_share--; \
956 955 \
957 956 if (hme->hme_prev) { \
958 957 ASSERT(pp->p_mapping != hme); \
959 958 ASSERT(hme->hme_prev->hme_page == pp || \
960 959 IS_PAHME(hme->hme_prev)); \
961 960 hme->hme_prev->hme_next = hme->hme_next; \
962 961 } else { \
963 962 ASSERT(pp->p_mapping == hme); \
964 963 pp->p_mapping = hme->hme_next; \
965 964 ASSERT((pp->p_mapping == NULL) ? \
966 965 (pp->p_share == 0) : 1); \
967 966 } \
968 967 \
969 968 if (hme->hme_next) { \
970 969 ASSERT(hme->hme_next->hme_page == pp || \
971 970 IS_PAHME(hme->hme_next)); \
972 971 hme->hme_next->hme_prev = hme->hme_prev; \
973 972 } \
974 973 \
975 974 /* zero out the entry */ \
976 975 hme->hme_next = NULL; \
977 976 hme->hme_prev = NULL; \
978 977 hme->hme_page = NULL; \
979 978 \
980 979 if (hme_size(hme) > TTE8K) { \
981 980 /* remove mappings for remainder of large pg */ \
982 981 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
983 982 } \
984 983 }
985 984
986 985 /*
987 986 * This function returns the hment given the hme_blk and a vaddr.
988 987 * It assumes addr has already been checked to belong to hme_blk's
989 988 * range.
990 989 */
991 990 #define HBLKTOHME(hment, hmeblkp, addr) \
992 991 { \
993 992 int index; \
994 993 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
995 994 }
996 995
997 996 /*
998 997 * Version of HBLKTOHME that also returns the index in hmeblkp
999 998 * of the hment.
1000 999 */
1001 1000 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1002 1001 { \
1003 1002 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1004 1003 \
1005 1004 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1006 1005 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1007 1006 } else \
1008 1007 idx = 0; \
1009 1008 \
1010 1009 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1011 1010 }
1012 1011
1013 1012 /*
1014 1013 * Disable any page sizes not supported by the CPU
1015 1014 */
1016 1015 void
1017 1016 hat_init_pagesizes()
1018 1017 {
1019 1018 int i;
1020 1019
1021 1020 mmu_exported_page_sizes = 0;
1022 1021 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1023 1022
1024 1023 szc_2_userszc[i] = (uint_t)-1;
1025 1024 userszc_2_szc[i] = (uint_t)-1;
1026 1025
1027 1026 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1028 1027 disable_large_pages |= (1 << i);
1029 1028 } else {
1030 1029 szc_2_userszc[i] = mmu_exported_page_sizes;
1031 1030 userszc_2_szc[mmu_exported_page_sizes] = i;
1032 1031 mmu_exported_page_sizes++;
1033 1032 }
1034 1033 }
1035 1034
1036 1035 disable_ism_large_pages |= disable_large_pages;
1037 1036 disable_auto_data_large_pages = disable_large_pages;
1038 1037 disable_auto_text_large_pages = disable_large_pages;
1039 1038
1040 1039 /*
1041 1040 * Initialize mmu-specific large page sizes.
1042 1041 */
1043 1042 if (&mmu_large_pages_disabled) {
1044 1043 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1045 1044 disable_ism_large_pages |=
1046 1045 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1047 1046 disable_auto_data_large_pages |=
1048 1047 mmu_large_pages_disabled(HAT_AUTO_DATA);
1049 1048 disable_auto_text_large_pages |=
1050 1049 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1051 1050 }
1052 1051 }
1053 1052
1054 1053 /*
1055 1054 * Initialize the hardware address translation structures.
1056 1055 */
1057 1056 void
1058 1057 hat_init(void)
1059 1058 {
1060 1059 int i;
1061 1060 uint_t sz;
1062 1061 size_t size;
1063 1062
1064 1063 hat_lock_init();
1065 1064 hat_kstat_init();
1066 1065
1067 1066 /*
1068 1067 * Hardware-only bits in a TTE
1069 1068 */
1070 1069 MAKE_TTE_MASK(&hw_tte);
1071 1070
1072 1071 hat_init_pagesizes();
1073 1072
1074 1073 /* Initialize the hash locks */
1075 1074 for (i = 0; i < khmehash_num; i++) {
1076 1075 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1077 1076 MUTEX_DEFAULT, NULL);
1078 1077 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1079 1078 }
1080 1079 for (i = 0; i < uhmehash_num; i++) {
1081 1080 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1082 1081 MUTEX_DEFAULT, NULL);
1083 1082 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1084 1083 }
1085 1084 khmehash_num--; /* make sure counter starts from 0 */
1086 1085 uhmehash_num--; /* make sure counter starts from 0 */
1087 1086
1088 1087 /*
1089 1088 * Allocate context domain structures.
1090 1089 *
1091 1090 * A platform may choose to modify max_mmu_ctxdoms in
1092 1091 * set_platform_defaults(). If a platform does not define
1093 1092 * a set_platform_defaults() or does not choose to modify
1094 1093 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1095 1094 *
1096 1095 * For all platforms that have CPUs sharing MMUs, this
1097 1096 * value must be defined.
1098 1097 */
1099 1098 if (max_mmu_ctxdoms == 0)
1100 1099 max_mmu_ctxdoms = max_ncpus;
1101 1100
1102 1101 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1103 1102 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1104 1103
1105 1104 /* mmu_ctx_t is 64 bytes aligned */
1106 1105 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1107 1106 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1108 1107 /*
1109 1108 * MMU context domain initialization for the Boot CPU.
1110 1109 * This needs the context domains array allocated above.
1111 1110 */
1112 1111 mutex_enter(&cpu_lock);
1113 1112 sfmmu_cpu_init(CPU);
1114 1113 mutex_exit(&cpu_lock);
1115 1114
1116 1115 /*
1117 1116 * Intialize ism mapping list lock.
1118 1117 */
1119 1118
1120 1119 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1121 1120
1122 1121 /*
1123 1122 * Each sfmmu structure carries an array of MMU context info
1124 1123 * structures, one per context domain. The size of this array depends
1125 1124 * on the maximum number of context domains. So, the size of the
1126 1125 * sfmmu structure varies per platform.
1127 1126 *
1128 1127 * sfmmu is allocated from static arena, because trap
1129 1128 * handler at TL > 0 is not allowed to touch kernel relocatable
1130 1129 * memory. sfmmu's alignment is changed to 64 bytes from
1131 1130 * default 8 bytes, as the lower 6 bits will be used to pass
1132 1131 * pgcnt to vtag_flush_pgcnt_tl1.
1133 1132 */
1134 1133 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1135 1134
1136 1135 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1137 1136 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1138 1137 NULL, NULL, static_arena, 0);
1139 1138
1140 1139 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1141 1140 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1142 1141
1143 1142 /*
1144 1143 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1145 1144 * from the heap when low on memory or when TSB_FORCEALLOC is
1146 1145 * specified, don't use magazines to cache them--we want to return
1147 1146 * them to the system as quickly as possible.
1148 1147 */
1149 1148 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1150 1149 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1151 1150 static_arena, KMC_NOMAGAZINE);
1152 1151
1153 1152 /*
1154 1153 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1155 1154 * memory, which corresponds to the old static reserve for TSBs.
1156 1155 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1157 1156 * memory we'll allocate for TSB slabs; beyond this point TSB
1158 1157 * allocations will be taken from the kernel heap (via
1159 1158 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1160 1159 * consumer.
1161 1160 */
1162 1161 if (tsb_alloc_hiwater_factor == 0) {
1163 1162 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1164 1163 }
1165 1164 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1166 1165
1167 1166 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1168 1167 if (!(disable_large_pages & (1 << sz)))
1169 1168 break;
1170 1169 }
1171 1170
1172 1171 if (sz < tsb_slab_ttesz) {
1173 1172 tsb_slab_ttesz = sz;
1174 1173 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1175 1174 tsb_slab_size = 1 << tsb_slab_shift;
1176 1175 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1177 1176 use_bigtsb_arena = 0;
1178 1177 } else if (use_bigtsb_arena &&
1179 1178 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1180 1179 use_bigtsb_arena = 0;
1181 1180 }
1182 1181
1183 1182 if (!use_bigtsb_arena) {
1184 1183 bigtsb_slab_shift = tsb_slab_shift;
1185 1184 }
1186 1185 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1187 1186
1188 1187 /*
1189 1188 * On smaller memory systems, allocate TSB memory in smaller chunks
1190 1189 * than the default 4M slab size. We also honor disable_large_pages
1191 1190 * here.
1192 1191 *
1193 1192 * The trap handlers need to be patched with the final slab shift,
1194 1193 * since they need to be able to construct the TSB pointer at runtime.
1195 1194 */
1196 1195 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1197 1196 !(disable_large_pages & (1 << TTE512K))) {
1198 1197 tsb_slab_ttesz = TTE512K;
1199 1198 tsb_slab_shift = MMU_PAGESHIFT512K;
1200 1199 tsb_slab_size = MMU_PAGESIZE512K;
1201 1200 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1202 1201 use_bigtsb_arena = 0;
1203 1202 }
1204 1203
1205 1204 if (!use_bigtsb_arena) {
1206 1205 bigtsb_slab_ttesz = tsb_slab_ttesz;
1207 1206 bigtsb_slab_shift = tsb_slab_shift;
1208 1207 bigtsb_slab_size = tsb_slab_size;
1209 1208 bigtsb_slab_mask = tsb_slab_mask;
1210 1209 }
1211 1210
1212 1211
1213 1212 /*
1214 1213 * Set up memory callback to update tsb_alloc_hiwater and
1215 1214 * tsb_max_growsize.
1216 1215 */
1217 1216 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1218 1217 ASSERT(i == 0);
1219 1218
1220 1219 /*
1221 1220 * kmem_tsb_arena is the source from which large TSB slabs are
1222 1221 * drawn. The quantum of this arena corresponds to the largest
1223 1222 * TSB size we can dynamically allocate for user processes.
1224 1223 * Currently it must also be a supported page size since we
1225 1224 * use exactly one translation entry to map each slab page.
1226 1225 *
1227 1226 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1228 1227 * which most TSBs are allocated. Since most TSB allocations are
1229 1228 * typically 8K we have a kmem cache we stack on top of each
1230 1229 * kmem_tsb_default_arena to speed up those allocations.
1231 1230 *
1232 1231 * Note the two-level scheme of arenas is required only
1233 1232 * because vmem_create doesn't allow us to specify alignment
1234 1233 * requirements. If this ever changes the code could be
1235 1234 * simplified to use only one level of arenas.
1236 1235 *
1237 1236 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1238 1237 * will be provided in addition to the 4M kmem_tsb_arena.
1239 1238 */
1240 1239 if (use_bigtsb_arena) {
1241 1240 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1242 1241 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1243 1242 vmem_xfree, heap_arena, 0, VM_SLEEP);
1244 1243 }
1245 1244
1246 1245 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1247 1246 sfmmu_vmem_xalloc_aligned_wrapper,
1248 1247 vmem_xfree, heap_arena, 0, VM_SLEEP);
1249 1248
1250 1249 if (tsb_lgrp_affinity) {
1251 1250 char s[50];
1252 1251 for (i = 0; i < NLGRPS_MAX; i++) {
1253 1252 if (use_bigtsb_arena) {
1254 1253 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1255 1254 kmem_bigtsb_default_arena[i] = vmem_create(s,
1256 1255 NULL, 0, 2 * tsb_slab_size,
1257 1256 sfmmu_tsb_segkmem_alloc,
1258 1257 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1259 1258 0, VM_SLEEP | VM_BESTFIT);
1260 1259 }
1261 1260
1262 1261 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1263 1262 kmem_tsb_default_arena[i] = vmem_create(s,
1264 1263 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1265 1264 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1266 1265 VM_SLEEP | VM_BESTFIT);
1267 1266
1268 1267 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1269 1268 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1270 1269 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1271 1270 kmem_tsb_default_arena[i], 0);
1272 1271 }
1273 1272 } else {
1274 1273 if (use_bigtsb_arena) {
1275 1274 kmem_bigtsb_default_arena[0] =
1276 1275 vmem_create("kmem_bigtsb_default", NULL, 0,
1277 1276 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1278 1277 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1279 1278 VM_SLEEP | VM_BESTFIT);
1280 1279 }
1281 1280
1282 1281 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1283 1282 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1284 1283 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1285 1284 VM_SLEEP | VM_BESTFIT);
1286 1285 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1287 1286 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1288 1287 kmem_tsb_default_arena[0], 0);
1289 1288 }
1290 1289
1291 1290 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1292 1291 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1293 1292 sfmmu_hblkcache_destructor,
1294 1293 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1295 1294 hat_memload_arena, KMC_NOHASH);
1296 1295
1297 1296 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1298 1297 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1299 1298 VMC_DUMPSAFE | VM_SLEEP);
1300 1299
1301 1300 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1302 1301 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1303 1302 sfmmu_hblkcache_destructor,
1304 1303 NULL, (void *)HME1BLK_SZ,
1305 1304 hat_memload1_arena, KMC_NOHASH);
1306 1305
1307 1306 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1308 1307 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1309 1308
1310 1309 ism_blk_cache = kmem_cache_create("ism_blk_cache",
1311 1310 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1312 1311 NULL, NULL, static_arena, KMC_NOHASH);
1313 1312
1314 1313 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1315 1314 sizeof (ism_ment_t), 0, NULL, NULL,
1316 1315 NULL, NULL, NULL, 0);
1317 1316
1318 1317 /*
1319 1318 * We grab the first hat for the kernel,
1320 1319 */
1321 1320 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1322 1321 kas.a_hat = hat_alloc(&kas);
1323 1322 AS_LOCK_EXIT(&kas, &kas.a_lock);
1324 1323
1325 1324 /*
1326 1325 * Initialize hblk_reserve.
1327 1326 */
1328 1327 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1329 1328 va_to_pa((caddr_t)hblk_reserve);
1330 1329
1331 1330 #ifndef UTSB_PHYS
1332 1331 /*
1333 1332 * Reserve some kernel virtual address space for the locked TTEs
1334 1333 * that allow us to probe the TSB from TL>0.
1335 1334 */
1336 1335 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1337 1336 0, 0, NULL, NULL, VM_SLEEP);
1338 1337 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1339 1338 0, 0, NULL, NULL, VM_SLEEP);
1340 1339 #endif
1341 1340
1342 1341 #ifdef VAC
↓ open down ↓ |
1248 lines elided |
↑ open up ↑ |
1343 1342 /*
1344 1343 * The big page VAC handling code assumes VAC
1345 1344 * will not be bigger than the smallest big
1346 1345 * page- which is 64K.
1347 1346 */
1348 1347 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 1348 cmn_err(CE_PANIC, "VAC too big!");
1350 1349 }
1351 1350 #endif
1352 1351
1353 - (void) xhat_init();
1354 -
1355 1352 uhme_hash_pa = va_to_pa(uhme_hash);
1356 1353 khme_hash_pa = va_to_pa(khme_hash);
1357 1354
1358 1355 /*
1359 1356 * Initialize relocation locks. kpr_suspendlock is held
1360 1357 * at PIL_MAX to prevent interrupts from pinning the holder
1361 1358 * of a suspended TTE which may access it leading to a
1362 1359 * deadlock condition.
1363 1360 */
1364 1361 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1365 1362 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1366 1363
1367 1364 /*
1368 1365 * If Shared context support is disabled via /etc/system
1369 1366 * set shctx_on to 0 here if it was set to 1 earlier in boot
1370 1367 * sequence by cpu module initialization code.
1371 1368 */
1372 1369 if (shctx_on && disable_shctx) {
1373 1370 shctx_on = 0;
1374 1371 }
1375 1372
1376 1373 if (shctx_on) {
1377 1374 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1378 1375 sizeof (srd_buckets[0]), KM_SLEEP);
1379 1376 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1380 1377 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1381 1378 MUTEX_DEFAULT, NULL);
1382 1379 }
1383 1380
1384 1381 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1385 1382 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1386 1383 NULL, NULL, NULL, 0);
1387 1384 region_cache = kmem_cache_create("region_cache",
1388 1385 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1389 1386 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1390 1387 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1391 1388 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1392 1389 NULL, NULL, NULL, 0);
1393 1390 }
1394 1391
1395 1392 /*
1396 1393 * Pre-allocate hrm_hashtab before enabling the collection of
1397 1394 * refmod statistics. Allocating on the fly would mean us
1398 1395 * running the risk of suffering recursive mutex enters or
1399 1396 * deadlocks.
1400 1397 */
1401 1398 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1402 1399 KM_SLEEP);
1403 1400
1404 1401 /* Allocate per-cpu pending freelist of hmeblks */
1405 1402 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1406 1403 KM_SLEEP);
1407 1404 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1408 1405 (uintptr_t)cpu_hme_pend, 64);
1409 1406
1410 1407 for (i = 0; i < NCPU; i++) {
1411 1408 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1412 1409 NULL);
1413 1410 }
1414 1411
1415 1412 if (cpu_hme_pend_thresh == 0) {
1416 1413 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1417 1414 }
1418 1415 }
1419 1416
1420 1417 /*
1421 1418 * Initialize locking for the hat layer, called early during boot.
1422 1419 */
1423 1420 static void
1424 1421 hat_lock_init()
1425 1422 {
1426 1423 int i;
1427 1424
1428 1425 /*
1429 1426 * initialize the array of mutexes protecting a page's mapping
1430 1427 * list and p_nrm field.
1431 1428 */
1432 1429 for (i = 0; i < MML_TABLE_SIZE; i++)
1433 1430 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1434 1431
1435 1432 if (kpm_enable) {
1436 1433 for (i = 0; i < kpmp_table_sz; i++) {
1437 1434 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1438 1435 MUTEX_DEFAULT, NULL);
1439 1436 }
1440 1437 }
1441 1438
1442 1439 /*
1443 1440 * Initialize array of mutex locks that protects sfmmu fields and
1444 1441 * TSB lists.
1445 1442 */
1446 1443 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1447 1444 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1448 1445 NULL);
1449 1446 }
1450 1447
1451 1448 #define SFMMU_KERNEL_MAXVA \
1452 1449 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1453 1450
1454 1451 /*
1455 1452 * Allocate a hat structure.
1456 1453 * Called when an address space first uses a hat.
1457 1454 */
1458 1455 struct hat *
1459 1456 hat_alloc(struct as *as)
1460 1457 {
1461 1458 sfmmu_t *sfmmup;
1462 1459 int i;
1463 1460 uint64_t cnum;
1464 1461 extern uint_t get_color_start(struct as *);
1465 1462
1466 1463 ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1467 1464 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1468 1465 sfmmup->sfmmu_as = as;
1469 1466 sfmmup->sfmmu_flags = 0;
1470 1467 sfmmup->sfmmu_tteflags = 0;
1471 1468 sfmmup->sfmmu_rtteflags = 0;
1472 1469 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1473 1470
1474 1471 if (as == &kas) {
1475 1472 ksfmmup = sfmmup;
1476 1473 sfmmup->sfmmu_cext = 0;
1477 1474 cnum = KCONTEXT;
1478 1475
1479 1476 sfmmup->sfmmu_clrstart = 0;
1480 1477 sfmmup->sfmmu_tsb = NULL;
1481 1478 /*
1482 1479 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1483 1480 * to setup tsb_info for ksfmmup.
1484 1481 */
1485 1482 } else {
1486 1483
1487 1484 /*
1488 1485 * Just set to invalid ctx. When it faults, it will
1489 1486 * get a valid ctx. This would avoid the situation
1490 1487 * where we get a ctx, but it gets stolen and then
1491 1488 * we fault when we try to run and so have to get
1492 1489 * another ctx.
1493 1490 */
1494 1491 sfmmup->sfmmu_cext = 0;
1495 1492 cnum = INVALID_CONTEXT;
1496 1493
1497 1494 /* initialize original physical page coloring bin */
1498 1495 sfmmup->sfmmu_clrstart = get_color_start(as);
1499 1496 #ifdef DEBUG
1500 1497 if (tsb_random_size) {
1501 1498 uint32_t randval = (uint32_t)gettick() >> 4;
1502 1499 int size = randval % (tsb_max_growsize + 1);
1503 1500
1504 1501 /* chose a random tsb size for stress testing */
1505 1502 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1506 1503 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1507 1504 } else
1508 1505 #endif /* DEBUG */
1509 1506 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1510 1507 default_tsb_size,
1511 1508 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1512 1509 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1513 1510 ASSERT(sfmmup->sfmmu_tsb != NULL);
1514 1511 }
1515 1512
1516 1513 ASSERT(max_mmu_ctxdoms > 0);
1517 1514 for (i = 0; i < max_mmu_ctxdoms; i++) {
1518 1515 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1519 1516 sfmmup->sfmmu_ctxs[i].gnum = 0;
1520 1517 }
1521 1518
1522 1519 for (i = 0; i < max_mmu_page_sizes; i++) {
1523 1520 sfmmup->sfmmu_ttecnt[i] = 0;
1524 1521 sfmmup->sfmmu_scdrttecnt[i] = 0;
1525 1522 sfmmup->sfmmu_ismttecnt[i] = 0;
1526 1523 sfmmup->sfmmu_scdismttecnt[i] = 0;
1527 1524 sfmmup->sfmmu_pgsz[i] = TTE8K;
1528 1525 }
1529 1526 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1530 1527 sfmmup->sfmmu_iblk = NULL;
1531 1528 sfmmup->sfmmu_ismhat = 0;
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
1532 1529 sfmmup->sfmmu_scdhat = 0;
1533 1530 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1534 1531 if (sfmmup == ksfmmup) {
1535 1532 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1536 1533 } else {
1537 1534 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1538 1535 }
1539 1536 sfmmup->sfmmu_free = 0;
1540 1537 sfmmup->sfmmu_rmstat = 0;
1541 1538 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1542 - sfmmup->sfmmu_xhat_provider = NULL;
1543 1539 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1544 1540 sfmmup->sfmmu_srdp = NULL;
1545 1541 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1546 1542 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1547 1543 sfmmup->sfmmu_scdp = NULL;
1548 1544 sfmmup->sfmmu_scd_link.next = NULL;
1549 1545 sfmmup->sfmmu_scd_link.prev = NULL;
1550 1546 return (sfmmup);
1551 1547 }
1552 1548
1553 1549 /*
1554 1550 * Create per-MMU context domain kstats for a given MMU ctx.
1555 1551 */
1556 1552 static void
1557 1553 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1558 1554 {
1559 1555 mmu_ctx_stat_t stat;
1560 1556 kstat_t *mmu_kstat;
1561 1557
1562 1558 ASSERT(MUTEX_HELD(&cpu_lock));
1563 1559 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1564 1560
1565 1561 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1566 1562 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1567 1563
1568 1564 if (mmu_kstat == NULL) {
1569 1565 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1570 1566 mmu_ctxp->mmu_idx);
1571 1567 } else {
1572 1568 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1573 1569 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1574 1570 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1575 1571 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1576 1572 mmu_ctxp->mmu_kstat = mmu_kstat;
1577 1573 kstat_install(mmu_kstat);
1578 1574 }
1579 1575 }
1580 1576
1581 1577 /*
1582 1578 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1583 1579 * context domain information for a given CPU. If a platform does not
1584 1580 * specify that interface, then the function below is used instead to return
1585 1581 * default information. The defaults are as follows:
1586 1582 *
1587 1583 * - The number of MMU context IDs supported on any CPU in the
1588 1584 * system is 8K.
1589 1585 * - There is one MMU context domain per CPU.
1590 1586 */
1591 1587 /*ARGSUSED*/
1592 1588 static void
1593 1589 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1594 1590 {
1595 1591 infop->mmu_nctxs = nctxs;
1596 1592 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1597 1593 }
1598 1594
1599 1595 /*
1600 1596 * Called during CPU initialization to set the MMU context-related information
1601 1597 * for a CPU.
1602 1598 *
1603 1599 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1604 1600 */
1605 1601 void
1606 1602 sfmmu_cpu_init(cpu_t *cp)
1607 1603 {
1608 1604 mmu_ctx_info_t info;
1609 1605 mmu_ctx_t *mmu_ctxp;
1610 1606
1611 1607 ASSERT(MUTEX_HELD(&cpu_lock));
1612 1608
1613 1609 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1614 1610 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1615 1611 else
1616 1612 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1617 1613
1618 1614 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1619 1615
1620 1616 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1621 1617 /* Each mmu_ctx is cacheline aligned. */
1622 1618 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1623 1619 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1624 1620
1625 1621 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1626 1622 (void *)ipltospl(DISP_LEVEL));
1627 1623 mmu_ctxp->mmu_idx = info.mmu_idx;
1628 1624 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1629 1625 /*
1630 1626 * Globally for lifetime of a system,
1631 1627 * gnum must always increase.
1632 1628 * mmu_saved_gnum is protected by the cpu_lock.
1633 1629 */
1634 1630 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1635 1631 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1636 1632
1637 1633 sfmmu_mmu_kstat_create(mmu_ctxp);
1638 1634
1639 1635 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1640 1636 } else {
1641 1637 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1642 1638 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1643 1639 }
1644 1640
1645 1641 /*
1646 1642 * The mmu_lock is acquired here to prevent races with
1647 1643 * the wrap-around code.
1648 1644 */
1649 1645 mutex_enter(&mmu_ctxp->mmu_lock);
1650 1646
1651 1647
1652 1648 mmu_ctxp->mmu_ncpus++;
1653 1649 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1654 1650 CPU_MMU_IDX(cp) = info.mmu_idx;
1655 1651 CPU_MMU_CTXP(cp) = mmu_ctxp;
1656 1652
1657 1653 mutex_exit(&mmu_ctxp->mmu_lock);
1658 1654 }
1659 1655
1660 1656 static void
1661 1657 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1662 1658 {
1663 1659 ASSERT(MUTEX_HELD(&cpu_lock));
1664 1660 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1665 1661
1666 1662 mutex_destroy(&mmu_ctxp->mmu_lock);
1667 1663
1668 1664 if (mmu_ctxp->mmu_kstat)
1669 1665 kstat_delete(mmu_ctxp->mmu_kstat);
1670 1666
1671 1667 /* mmu_saved_gnum is protected by the cpu_lock. */
1672 1668 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1673 1669 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1674 1670
1675 1671 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1676 1672 }
1677 1673
1678 1674 /*
1679 1675 * Called to perform MMU context-related cleanup for a CPU.
1680 1676 */
1681 1677 void
1682 1678 sfmmu_cpu_cleanup(cpu_t *cp)
1683 1679 {
1684 1680 mmu_ctx_t *mmu_ctxp;
1685 1681
1686 1682 ASSERT(MUTEX_HELD(&cpu_lock));
1687 1683
1688 1684 mmu_ctxp = CPU_MMU_CTXP(cp);
1689 1685 ASSERT(mmu_ctxp != NULL);
1690 1686
1691 1687 /*
1692 1688 * The mmu_lock is acquired here to prevent races with
1693 1689 * the wrap-around code.
1694 1690 */
1695 1691 mutex_enter(&mmu_ctxp->mmu_lock);
1696 1692
1697 1693 CPU_MMU_CTXP(cp) = NULL;
1698 1694
1699 1695 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1700 1696 if (--mmu_ctxp->mmu_ncpus == 0) {
1701 1697 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1702 1698 mutex_exit(&mmu_ctxp->mmu_lock);
1703 1699 sfmmu_ctxdom_free(mmu_ctxp);
1704 1700 return;
1705 1701 }
1706 1702
1707 1703 mutex_exit(&mmu_ctxp->mmu_lock);
1708 1704 }
1709 1705
1710 1706 uint_t
1711 1707 sfmmu_ctxdom_nctxs(int idx)
1712 1708 {
1713 1709 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1714 1710 }
1715 1711
1716 1712 #ifdef sun4v
1717 1713 /*
1718 1714 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1719 1715 * consistant after suspend/resume on system that can resume on a different
1720 1716 * hardware than it was suspended.
1721 1717 *
1722 1718 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1723 1719 * from being allocated. It acquires all hat_locks, which blocks most access to
1724 1720 * context data, except for a few cases that are handled separately or are
1725 1721 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1726 1722 * contexts, and forces cnum to its max. As a result of this call all user
1727 1723 * threads that are running on CPUs trap and try to perform wrap around but
1728 1724 * can't because hat_locks are taken. Threads that were not on CPUs but started
1729 1725 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1730 1726 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1731 1727 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1732 1728 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1733 1729 *
1734 1730 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1735 1731 * the CPUs that had them. It must be called after CPUs have been paused. This
1736 1732 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1737 1733 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1738 1734 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1739 1735 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1740 1736 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1741 1737 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1742 1738 * accessing the old context domains.
1743 1739 *
1744 1740 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1745 1741 * allocates new context domains based on hardware layout. It initializes
1746 1742 * every CPU that had context domain before migration to have one again.
1747 1743 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1748 1744 * could deadlock acquiring locks held by paused CPUs.
1749 1745 *
1750 1746 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1751 1747 * acquire new context ids and continue execution.
1752 1748 *
1753 1749 * Therefore functions should be called in the following order:
1754 1750 * suspend_routine()
1755 1751 * sfmmu_ctxdom_lock()
1756 1752 * pause_cpus()
1757 1753 * suspend()
1758 1754 * if (suspend failed)
1759 1755 * sfmmu_ctxdom_unlock()
1760 1756 * ...
1761 1757 * sfmmu_ctxdom_remove()
1762 1758 * resume_cpus()
1763 1759 * sfmmu_ctxdom_update()
1764 1760 * sfmmu_ctxdom_unlock()
1765 1761 */
1766 1762 static cpuset_t sfmmu_ctxdoms_pset;
1767 1763
1768 1764 void
1769 1765 sfmmu_ctxdoms_remove()
1770 1766 {
1771 1767 processorid_t id;
1772 1768 cpu_t *cp;
1773 1769
1774 1770 /*
1775 1771 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1776 1772 * be restored post-migration. A CPU may be powered off and not have a
1777 1773 * domain, for example.
1778 1774 */
1779 1775 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1780 1776
1781 1777 for (id = 0; id < NCPU; id++) {
1782 1778 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1783 1779 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1784 1780 CPU_MMU_CTXP(cp) = NULL;
1785 1781 }
1786 1782 }
1787 1783 }
1788 1784
1789 1785 void
1790 1786 sfmmu_ctxdoms_lock(void)
1791 1787 {
1792 1788 int idx;
1793 1789 mmu_ctx_t *mmu_ctxp;
1794 1790
1795 1791 sfmmu_hat_lock_all();
1796 1792
1797 1793 /*
1798 1794 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1799 1795 * hat_lock is always taken before calling it.
1800 1796 *
1801 1797 * For each domain, set mmu_cnum to max so no more contexts can be
1802 1798 * allocated, and wrap to flush on-CPU contexts and force threads to
1803 1799 * acquire a new context when we later drop hat_lock after migration.
1804 1800 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1805 1801 * but the latter uses CAS and will miscompare and not overwrite it.
1806 1802 */
1807 1803 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1808 1804 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1809 1805 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1810 1806 mutex_enter(&mmu_ctxp->mmu_lock);
1811 1807 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1812 1808 /* make sure updated cnum visible */
1813 1809 membar_enter();
1814 1810 mutex_exit(&mmu_ctxp->mmu_lock);
1815 1811 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1816 1812 }
1817 1813 }
1818 1814 kpreempt_enable();
1819 1815 }
1820 1816
1821 1817 void
1822 1818 sfmmu_ctxdoms_unlock(void)
1823 1819 {
1824 1820 sfmmu_hat_unlock_all();
1825 1821 }
1826 1822
1827 1823 void
1828 1824 sfmmu_ctxdoms_update(void)
1829 1825 {
1830 1826 processorid_t id;
1831 1827 cpu_t *cp;
1832 1828 uint_t idx;
1833 1829 mmu_ctx_t *mmu_ctxp;
1834 1830
1835 1831 /*
1836 1832 * Free all context domains. As side effect, this increases
1837 1833 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1838 1834 * init gnum in the new domains, which therefore will be larger than the
1839 1835 * sfmmu gnum for any process, guaranteeing that every process will see
1840 1836 * a new generation and allocate a new context regardless of what new
1841 1837 * domain it runs in.
1842 1838 */
1843 1839 mutex_enter(&cpu_lock);
1844 1840
1845 1841 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1846 1842 if (mmu_ctxs_tbl[idx] != NULL) {
1847 1843 mmu_ctxp = mmu_ctxs_tbl[idx];
1848 1844 mmu_ctxs_tbl[idx] = NULL;
1849 1845 sfmmu_ctxdom_free(mmu_ctxp);
1850 1846 }
1851 1847 }
1852 1848
1853 1849 for (id = 0; id < NCPU; id++) {
1854 1850 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1855 1851 (cp = cpu[id]) != NULL)
1856 1852 sfmmu_cpu_init(cp);
1857 1853 }
1858 1854 mutex_exit(&cpu_lock);
1859 1855 }
1860 1856 #endif
1861 1857
1862 1858 /*
1863 1859 * Hat_setup, makes an address space context the current active one.
1864 1860 * In sfmmu this translates to setting the secondary context with the
1865 1861 * corresponding context.
1866 1862 */
1867 1863 void
1868 1864 hat_setup(struct hat *sfmmup, int allocflag)
1869 1865 {
1870 1866 hatlock_t *hatlockp;
1871 1867
1872 1868 /* Init needs some special treatment. */
1873 1869 if (allocflag == HAT_INIT) {
1874 1870 /*
1875 1871 * Make sure that we have
1876 1872 * 1. a TSB
1877 1873 * 2. a valid ctx that doesn't get stolen after this point.
1878 1874 */
1879 1875 hatlockp = sfmmu_hat_enter(sfmmup);
1880 1876
1881 1877 /*
1882 1878 * Swap in the TSB. hat_init() allocates tsbinfos without
1883 1879 * TSBs, but we need one for init, since the kernel does some
1884 1880 * special things to set up its stack and needs the TSB to
1885 1881 * resolve page faults.
1886 1882 */
1887 1883 sfmmu_tsb_swapin(sfmmup, hatlockp);
1888 1884
1889 1885 sfmmu_get_ctx(sfmmup);
1890 1886
1891 1887 sfmmu_hat_exit(hatlockp);
1892 1888 } else {
1893 1889 ASSERT(allocflag == HAT_ALLOC);
1894 1890
1895 1891 hatlockp = sfmmu_hat_enter(sfmmup);
1896 1892 kpreempt_disable();
1897 1893
1898 1894 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1899 1895 /*
1900 1896 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1901 1897 * pagesize bits don't matter in this case since we are passing
1902 1898 * INVALID_CONTEXT to it.
1903 1899 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1904 1900 */
1905 1901 sfmmu_setctx_sec(INVALID_CONTEXT);
1906 1902 sfmmu_clear_utsbinfo();
1907 1903
1908 1904 kpreempt_enable();
1909 1905 sfmmu_hat_exit(hatlockp);
1910 1906 }
1911 1907 }
↓ open down ↓ |
359 lines elided |
↑ open up ↑ |
1912 1908
1913 1909 /*
1914 1910 * Free all the translation resources for the specified address space.
1915 1911 * Called from as_free when an address space is being destroyed.
1916 1912 */
1917 1913 void
1918 1914 hat_free_start(struct hat *sfmmup)
1919 1915 {
1920 1916 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1921 1917 ASSERT(sfmmup != ksfmmup);
1922 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1923 1918
1924 1919 sfmmup->sfmmu_free = 1;
1925 1920 if (sfmmup->sfmmu_scdp != NULL) {
1926 1921 sfmmu_leave_scd(sfmmup, 0);
1927 1922 }
1928 1923
1929 1924 ASSERT(sfmmup->sfmmu_scdp == NULL);
1930 1925 }
1931 1926
1932 1927 void
1933 1928 hat_free_end(struct hat *sfmmup)
1934 1929 {
1935 1930 int i;
1936 1931
1937 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1938 1932 ASSERT(sfmmup->sfmmu_free == 1);
1939 1933 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1940 1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1941 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1942 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1943 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1944 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1945 1939
1946 1940 if (sfmmup->sfmmu_rmstat) {
1947 1941 hat_freestat(sfmmup->sfmmu_as, NULL);
1948 1942 }
1949 1943
1950 1944 while (sfmmup->sfmmu_tsb != NULL) {
1951 1945 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1952 1946 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1953 1947 sfmmup->sfmmu_tsb = next;
1954 1948 }
1955 1949
1956 1950 if (sfmmup->sfmmu_srdp != NULL) {
1957 1951 sfmmu_leave_srd(sfmmup);
1958 1952 ASSERT(sfmmup->sfmmu_srdp == NULL);
1959 1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1960 1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1961 1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1962 1956 SFMMU_L2_HMERLINKS_SIZE);
1963 1957 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1964 1958 }
1965 1959 }
1966 1960 }
1967 1961 sfmmu_free_sfmmu(sfmmup);
1968 1962
1969 1963 #ifdef DEBUG
1970 1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971 1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972 1966 }
1973 1967 #endif
1974 1968
1975 1969 kmem_cache_free(sfmmuid_cache, sfmmup);
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1976 1970 }
1977 1971
1978 1972 /*
1979 1973 * Set up any translation structures, for the specified address space,
1980 1974 * that are needed or preferred when the process is being swapped in.
1981 1975 */
1982 1976 /* ARGSUSED */
1983 1977 void
1984 1978 hat_swapin(struct hat *hat)
1985 1979 {
1986 - ASSERT(hat->sfmmu_xhat_provider == NULL);
1987 1980 }
1988 1981
1989 1982 /*
1990 1983 * Free all of the translation resources, for the specified address space,
1991 1984 * that can be freed while the process is swapped out. Called from as_swapout.
1992 1985 * Also, free up the ctx that this process was using.
1993 1986 */
1994 1987 void
1995 1988 hat_swapout(struct hat *sfmmup)
1996 1989 {
1997 1990 struct hmehash_bucket *hmebp;
1998 1991 struct hme_blk *hmeblkp;
1999 1992 struct hme_blk *pr_hblk = NULL;
2000 1993 struct hme_blk *nx_hblk;
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
2001 1994 int i;
2002 1995 struct hme_blk *list = NULL;
2003 1996 hatlock_t *hatlockp;
2004 1997 struct tsb_info *tsbinfop;
2005 1998 struct free_tsb {
2006 1999 struct free_tsb *next;
2007 2000 struct tsb_info *tsbinfop;
2008 2001 }; /* free list of TSBs */
2009 2002 struct free_tsb *freelist, *last, *next;
2010 2003
2011 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012 2004 SFMMU_STAT(sf_swapout);
2013 2005
2014 2006 /*
2015 2007 * There is no way to go from an as to all its translations in sfmmu.
2016 2008 * Here is one of the times when we take the big hit and traverse
2017 2009 * the hash looking for hme_blks to free up. Not only do we free up
2018 2010 * this as hme_blks but all those that are free. We are obviously
2019 2011 * swapping because we need memory so let's free up as much
2020 2012 * as we can.
2021 2013 *
2022 2014 * Note that we don't flush TLB/TSB here -- it's not necessary
2023 2015 * because:
2024 2016 * 1) we free the ctx we're using and throw away the TSB(s);
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2025 2017 * 2) processes aren't runnable while being swapped out.
2026 2018 */
2027 2019 ASSERT(sfmmup != KHATID);
2028 2020 for (i = 0; i <= UHMEHASH_SZ; i++) {
2029 2021 hmebp = &uhme_hash[i];
2030 2022 SFMMU_HASH_LOCK(hmebp);
2031 2023 hmeblkp = hmebp->hmeblkp;
2032 2024 pr_hblk = NULL;
2033 2025 while (hmeblkp) {
2034 2026
2035 - ASSERT(!hmeblkp->hblk_xhat_bit);
2036 -
2037 2027 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038 2028 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039 2029 ASSERT(!hmeblkp->hblk_shared);
2040 2030 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041 2031 (caddr_t)get_hblk_base(hmeblkp),
2042 2032 get_hblk_endaddr(hmeblkp),
2043 2033 NULL, HAT_UNLOAD);
2044 2034 }
2045 2035 nx_hblk = hmeblkp->hblk_next;
2046 2036 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2047 2037 ASSERT(!hmeblkp->hblk_lckcnt);
2048 2038 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2049 2039 &list, 0);
2050 2040 } else {
2051 2041 pr_hblk = hmeblkp;
2052 2042 }
2053 2043 hmeblkp = nx_hblk;
2054 2044 }
2055 2045 SFMMU_HASH_UNLOCK(hmebp);
2056 2046 }
2057 2047
2058 2048 sfmmu_hblks_list_purge(&list, 0);
2059 2049
2060 2050 /*
2061 2051 * Now free up the ctx so that others can reuse it.
2062 2052 */
2063 2053 hatlockp = sfmmu_hat_enter(sfmmup);
2064 2054
2065 2055 sfmmu_invalidate_ctx(sfmmup);
2066 2056
2067 2057 /*
2068 2058 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2069 2059 * If TSBs were never swapped in, just return.
2070 2060 * This implies that we don't support partial swapping
2071 2061 * of TSBs -- either all are swapped out, or none are.
2072 2062 *
2073 2063 * We must hold the HAT lock here to prevent racing with another
2074 2064 * thread trying to unmap TTEs from the TSB or running the post-
2075 2065 * relocator after relocating the TSB's memory. Unfortunately, we
2076 2066 * can't free memory while holding the HAT lock or we could
2077 2067 * deadlock, so we build a list of TSBs to be freed after marking
2078 2068 * the tsbinfos as swapped out and free them after dropping the
2079 2069 * lock.
2080 2070 */
2081 2071 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2082 2072 sfmmu_hat_exit(hatlockp);
2083 2073 return;
2084 2074 }
2085 2075
2086 2076 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2087 2077 last = freelist = NULL;
2088 2078 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2089 2079 tsbinfop = tsbinfop->tsb_next) {
2090 2080 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2091 2081
2092 2082 /*
2093 2083 * Cast the TSB into a struct free_tsb and put it on the free
2094 2084 * list.
2095 2085 */
2096 2086 if (freelist == NULL) {
2097 2087 last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2098 2088 } else {
2099 2089 last->next = (struct free_tsb *)tsbinfop->tsb_va;
2100 2090 last = last->next;
2101 2091 }
2102 2092 last->next = NULL;
2103 2093 last->tsbinfop = tsbinfop;
2104 2094 tsbinfop->tsb_flags |= TSB_SWAPPED;
2105 2095 /*
2106 2096 * Zero out the TTE to clear the valid bit.
2107 2097 * Note we can't use a value like 0xbad because we want to
2108 2098 * ensure diagnostic bits are NEVER set on TTEs that might
2109 2099 * be loaded. The intent is to catch any invalid access
2110 2100 * to the swapped TSB, such as a thread running with a valid
2111 2101 * context without first calling sfmmu_tsb_swapin() to
2112 2102 * allocate TSB memory.
2113 2103 */
2114 2104 tsbinfop->tsb_tte.ll = 0;
2115 2105 }
2116 2106
2117 2107 /* Now we can drop the lock and free the TSB memory. */
2118 2108 sfmmu_hat_exit(hatlockp);
2119 2109 for (; freelist != NULL; freelist = next) {
2120 2110 next = freelist->next;
2121 2111 sfmmu_tsb_free(freelist->tsbinfop);
2122 2112 }
2123 2113 }
2124 2114
2125 2115 /*
2126 2116 * Duplicate the translations of an as into another newas
2127 2117 */
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
2128 2118 /* ARGSUSED */
2129 2119 int
2130 2120 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 2121 uint_t flag)
2132 2122 {
2133 2123 sf_srd_t *srdp;
2134 2124 sf_scd_t *scdp;
2135 2125 int i;
2136 2126 extern uint_t get_color_start(struct as *);
2137 2127
2138 - ASSERT(hat->sfmmu_xhat_provider == NULL);
2139 2128 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2140 2129 (flag == HAT_DUP_SRD));
2141 2130 ASSERT(hat != ksfmmup);
2142 2131 ASSERT(newhat != ksfmmup);
2143 2132 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2144 2133
2145 2134 if (flag == HAT_DUP_COW) {
2146 2135 panic("hat_dup: HAT_DUP_COW not supported");
2147 2136 }
2148 2137
2149 2138 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2150 2139 ASSERT(srdp->srd_evp != NULL);
2151 2140 VN_HOLD(srdp->srd_evp);
2152 2141 ASSERT(srdp->srd_refcnt > 0);
2153 2142 newhat->sfmmu_srdp = srdp;
2154 2143 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2155 2144 }
2156 2145
2157 2146 /*
2158 2147 * HAT_DUP_ALL flag is used after as duplication is done.
2159 2148 */
2160 2149 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2161 2150 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2162 2151 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2163 2152 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2164 2153 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2165 2154 }
2166 2155
2167 2156 /* check if need to join scd */
2168 2157 if ((scdp = hat->sfmmu_scdp) != NULL &&
2169 2158 newhat->sfmmu_scdp != scdp) {
2170 2159 int ret;
2171 2160 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2172 2161 &scdp->scd_region_map, ret);
2173 2162 ASSERT(ret);
2174 2163 sfmmu_join_scd(scdp, newhat);
2175 2164 ASSERT(newhat->sfmmu_scdp == scdp &&
2176 2165 scdp->scd_refcnt >= 2);
2177 2166 for (i = 0; i < max_mmu_page_sizes; i++) {
2178 2167 newhat->sfmmu_ismttecnt[i] =
2179 2168 hat->sfmmu_ismttecnt[i];
2180 2169 newhat->sfmmu_scdismttecnt[i] =
2181 2170 hat->sfmmu_scdismttecnt[i];
2182 2171 }
2183 2172 }
2184 2173
2185 2174 sfmmu_check_page_sizes(newhat, 1);
2186 2175 }
2187 2176
2188 2177 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2189 2178 update_proc_pgcolorbase_after_fork != 0) {
2190 2179 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2191 2180 }
2192 2181 return (0);
2193 2182 }
2194 2183
2195 2184 void
2196 2185 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2197 2186 uint_t attr, uint_t flags)
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
2198 2187 {
2199 2188 hat_do_memload(hat, addr, pp, attr, flags,
2200 2189 SFMMU_INVALID_SHMERID);
2201 2190 }
2202 2191
2203 2192 void
2204 2193 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2205 2194 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2206 2195 {
2207 2196 uint_t rid;
2208 - if (rcookie == HAT_INVALID_REGION_COOKIE ||
2209 - hat->sfmmu_xhat_provider != NULL) {
2197 + if (rcookie == HAT_INVALID_REGION_COOKIE) {
2210 2198 hat_do_memload(hat, addr, pp, attr, flags,
2211 2199 SFMMU_INVALID_SHMERID);
2212 2200 return;
2213 2201 }
2214 2202 rid = (uint_t)((uint64_t)rcookie);
2215 2203 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2216 2204 hat_do_memload(hat, addr, pp, attr, flags, rid);
2217 2205 }
2218 2206
2219 2207 /*
2220 2208 * Set up addr to map to page pp with protection prot.
2221 2209 * As an optimization we also load the TSB with the
2222 2210 * corresponding tte but it is no big deal if the tte gets kicked out.
2223 2211 */
2224 2212 static void
2225 2213 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2226 2214 uint_t attr, uint_t flags, uint_t rid)
2227 2215 {
2228 2216 tte_t tte;
2229 2217
2230 2218
2231 2219 ASSERT(hat != NULL);
2232 2220 ASSERT(PAGE_LOCKED(pp));
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2233 2221 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2234 2222 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2235 2223 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2236 2224 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2237 2225
2238 2226 if (PP_ISFREE(pp)) {
2239 2227 panic("hat_memload: loading a mapping to free page %p",
2240 2228 (void *)pp);
2241 2229 }
2242 2230
2243 - if (hat->sfmmu_xhat_provider) {
2244 - /* no regions for xhats */
2245 - ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2246 - XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2247 - return;
2248 - }
2249 -
2250 2231 ASSERT((hat == ksfmmup) ||
2251 2232 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2252 2233
2253 2234 if (flags & ~SFMMU_LOAD_ALLFLAG)
2254 2235 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2255 2236 flags & ~SFMMU_LOAD_ALLFLAG);
2256 2237
2257 2238 if (hat->sfmmu_rmstat)
2258 2239 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2259 2240
2260 2241 #if defined(SF_ERRATA_57)
2261 2242 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2262 2243 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2263 2244 !(flags & HAT_LOAD_SHARE)) {
2264 2245 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2265 2246 " page executable");
2266 2247 attr &= ~PROT_EXEC;
2267 2248 }
2268 2249 #endif
2269 2250
2270 2251 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2271 2252 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2272 2253
2273 2254 /*
2274 2255 * Check TSB and TLB page sizes.
2275 2256 */
2276 2257 if ((flags & HAT_LOAD_SHARE) == 0) {
2277 2258 sfmmu_check_page_sizes(hat, 1);
2278 2259 }
2279 2260 }
2280 2261
2281 2262 /*
2282 2263 * hat_devload can be called to map real memory (e.g.
2283 2264 * /dev/kmem) and even though hat_devload will determine pf is
2284 2265 * for memory, it will be unable to get a shared lock on the
2285 2266 * page (because someone else has it exclusively) and will
2286 2267 * pass dp = NULL. If tteload doesn't get a non-NULL
2287 2268 * page pointer it can't cache memory.
2288 2269 */
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2289 2270 void
2290 2271 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2291 2272 uint_t attr, int flags)
2292 2273 {
2293 2274 tte_t tte;
2294 2275 struct page *pp = NULL;
2295 2276 int use_lgpg = 0;
2296 2277
2297 2278 ASSERT(hat != NULL);
2298 2279
2299 - if (hat->sfmmu_xhat_provider) {
2300 - XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2301 - return;
2302 - }
2303 -
2304 2280 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2305 2281 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2306 2282 ASSERT((hat == ksfmmup) ||
2307 2283 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2308 2284 if (len == 0)
2309 2285 panic("hat_devload: zero len");
2310 2286 if (flags & ~SFMMU_LOAD_ALLFLAG)
2311 2287 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2312 2288 flags & ~SFMMU_LOAD_ALLFLAG);
2313 2289
2314 2290 #if defined(SF_ERRATA_57)
2315 2291 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2316 2292 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2317 2293 !(flags & HAT_LOAD_SHARE)) {
2318 2294 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2319 2295 " page executable");
2320 2296 attr &= ~PROT_EXEC;
2321 2297 }
2322 2298 #endif
2323 2299
2324 2300 /*
2325 2301 * If it's a memory page find its pp
2326 2302 */
2327 2303 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2328 2304 pp = page_numtopp_nolock(pfn);
2329 2305 if (pp == NULL) {
2330 2306 flags |= HAT_LOAD_NOCONSIST;
2331 2307 } else {
2332 2308 if (PP_ISFREE(pp)) {
2333 2309 panic("hat_memload: loading "
2334 2310 "a mapping to free page %p",
2335 2311 (void *)pp);
2336 2312 }
2337 2313 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2338 2314 panic("hat_memload: loading a mapping "
2339 2315 "to unlocked relocatable page %p",
2340 2316 (void *)pp);
2341 2317 }
2342 2318 ASSERT(len == MMU_PAGESIZE);
2343 2319 }
2344 2320 }
2345 2321
2346 2322 if (hat->sfmmu_rmstat)
2347 2323 hat_resvstat(len, hat->sfmmu_as, addr);
2348 2324
2349 2325 if (flags & HAT_LOAD_NOCONSIST) {
2350 2326 attr |= SFMMU_UNCACHEVTTE;
2351 2327 use_lgpg = 1;
2352 2328 }
2353 2329 if (!pf_is_memory(pfn)) {
2354 2330 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2355 2331 use_lgpg = 1;
2356 2332 switch (attr & HAT_ORDER_MASK) {
2357 2333 case HAT_STRICTORDER:
2358 2334 case HAT_UNORDERED_OK:
2359 2335 /*
2360 2336 * we set the side effect bit for all non
2361 2337 * memory mappings unless merging is ok
2362 2338 */
2363 2339 attr |= SFMMU_SIDEFFECT;
2364 2340 break;
2365 2341 case HAT_MERGING_OK:
2366 2342 case HAT_LOADCACHING_OK:
2367 2343 case HAT_STORECACHING_OK:
2368 2344 break;
2369 2345 default:
2370 2346 panic("hat_devload: bad attr");
2371 2347 break;
2372 2348 }
2373 2349 }
2374 2350 while (len) {
2375 2351 if (!use_lgpg) {
2376 2352 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2377 2353 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2378 2354 flags, SFMMU_INVALID_SHMERID);
2379 2355 len -= MMU_PAGESIZE;
2380 2356 addr += MMU_PAGESIZE;
2381 2357 pfn++;
2382 2358 continue;
2383 2359 }
2384 2360 /*
2385 2361 * try to use large pages, check va/pa alignments
2386 2362 * Note that 32M/256M page sizes are not (yet) supported.
2387 2363 */
2388 2364 if ((len >= MMU_PAGESIZE4M) &&
2389 2365 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2390 2366 !(disable_large_pages & (1 << TTE4M)) &&
2391 2367 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2392 2368 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2393 2369 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2394 2370 flags, SFMMU_INVALID_SHMERID);
2395 2371 len -= MMU_PAGESIZE4M;
2396 2372 addr += MMU_PAGESIZE4M;
2397 2373 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2398 2374 } else if ((len >= MMU_PAGESIZE512K) &&
2399 2375 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2400 2376 !(disable_large_pages & (1 << TTE512K)) &&
2401 2377 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2402 2378 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2403 2379 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2404 2380 flags, SFMMU_INVALID_SHMERID);
2405 2381 len -= MMU_PAGESIZE512K;
2406 2382 addr += MMU_PAGESIZE512K;
2407 2383 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2408 2384 } else if ((len >= MMU_PAGESIZE64K) &&
2409 2385 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2410 2386 !(disable_large_pages & (1 << TTE64K)) &&
2411 2387 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2412 2388 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2413 2389 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2414 2390 flags, SFMMU_INVALID_SHMERID);
2415 2391 len -= MMU_PAGESIZE64K;
2416 2392 addr += MMU_PAGESIZE64K;
2417 2393 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2418 2394 } else {
2419 2395 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2420 2396 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2421 2397 flags, SFMMU_INVALID_SHMERID);
2422 2398 len -= MMU_PAGESIZE;
2423 2399 addr += MMU_PAGESIZE;
2424 2400 pfn++;
2425 2401 }
2426 2402 }
2427 2403
2428 2404 /*
2429 2405 * Check TSB and TLB page sizes.
2430 2406 */
2431 2407 if ((flags & HAT_LOAD_SHARE) == 0) {
2432 2408 sfmmu_check_page_sizes(hat, 1);
2433 2409 }
2434 2410 }
2435 2411
2436 2412 void
2437 2413 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2438 2414 struct page **pps, uint_t attr, uint_t flags)
2439 2415 {
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
2440 2416 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2441 2417 SFMMU_INVALID_SHMERID);
2442 2418 }
2443 2419
2444 2420 void
2445 2421 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2446 2422 struct page **pps, uint_t attr, uint_t flags,
2447 2423 hat_region_cookie_t rcookie)
2448 2424 {
2449 2425 uint_t rid;
2450 - if (rcookie == HAT_INVALID_REGION_COOKIE ||
2451 - hat->sfmmu_xhat_provider != NULL) {
2426 + if (rcookie == HAT_INVALID_REGION_COOKIE) {
2452 2427 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2453 2428 SFMMU_INVALID_SHMERID);
2454 2429 return;
2455 2430 }
2456 2431 rid = (uint_t)((uint64_t)rcookie);
2457 2432 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2458 2433 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2459 2434 }
2460 2435
2461 2436 /*
2462 2437 * Map the largest extend possible out of the page array. The array may NOT
2463 2438 * be in order. The largest possible mapping a page can have
2464 2439 * is specified in the p_szc field. The p_szc field
2465 2440 * cannot change as long as there any mappings (large or small)
2466 2441 * to any of the pages that make up the large page. (ie. any
2467 2442 * promotion/demotion of page size is not up to the hat but up to
2468 2443 * the page free list manager). The array
2469 2444 * should consist of properly aligned contigous pages that are
2470 2445 * part of a big page for a large mapping to be created.
2471 2446 */
2472 2447 static void
2473 2448 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2474 2449 struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2475 2450 {
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
2476 2451 int ttesz;
2477 2452 size_t mapsz;
2478 2453 pgcnt_t numpg, npgs;
2479 2454 tte_t tte;
2480 2455 page_t *pp;
2481 2456 uint_t large_pages_disable;
2482 2457
2483 2458 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2484 2459 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2485 2460
2486 - if (hat->sfmmu_xhat_provider) {
2487 - ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2488 - XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2489 - return;
2490 - }
2491 -
2492 2461 if (hat->sfmmu_rmstat)
2493 2462 hat_resvstat(len, hat->sfmmu_as, addr);
2494 2463
2495 2464 #if defined(SF_ERRATA_57)
2496 2465 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2497 2466 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2498 2467 !(flags & HAT_LOAD_SHARE)) {
2499 2468 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2500 2469 "user page executable");
2501 2470 attr &= ~PROT_EXEC;
2502 2471 }
2503 2472 #endif
2504 2473
2505 2474 /* Get number of pages */
2506 2475 npgs = len >> MMU_PAGESHIFT;
2507 2476
2508 2477 if (flags & HAT_LOAD_SHARE) {
2509 2478 large_pages_disable = disable_ism_large_pages;
2510 2479 } else {
2511 2480 large_pages_disable = disable_large_pages;
2512 2481 }
2513 2482
2514 2483 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2515 2484 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2516 2485 rid);
2517 2486 return;
2518 2487 }
2519 2488
2520 2489 while (npgs >= NHMENTS) {
2521 2490 pp = *pps;
2522 2491 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2523 2492 /*
2524 2493 * Check if this page size is disabled.
2525 2494 */
2526 2495 if (large_pages_disable & (1 << ttesz))
2527 2496 continue;
2528 2497
2529 2498 numpg = TTEPAGES(ttesz);
2530 2499 mapsz = numpg << MMU_PAGESHIFT;
2531 2500 if ((npgs >= numpg) &&
2532 2501 IS_P2ALIGNED(addr, mapsz) &&
2533 2502 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2534 2503 /*
2535 2504 * At this point we have enough pages and
2536 2505 * we know the virtual address and the pfn
2537 2506 * are properly aligned. We still need
2538 2507 * to check for physical contiguity but since
2539 2508 * it is very likely that this is the case
2540 2509 * we will assume they are so and undo
2541 2510 * the request if necessary. It would
2542 2511 * be great if we could get a hint flag
2543 2512 * like HAT_CONTIG which would tell us
2544 2513 * the pages are contigous for sure.
2545 2514 */
2546 2515 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2547 2516 attr, ttesz);
2548 2517 if (!sfmmu_tteload_array(hat, &tte, addr,
2549 2518 pps, flags, rid)) {
2550 2519 break;
2551 2520 }
2552 2521 }
2553 2522 }
2554 2523 if (ttesz == TTE8K) {
2555 2524 /*
2556 2525 * We were not able to map array using a large page
2557 2526 * batch a hmeblk or fraction at a time.
2558 2527 */
2559 2528 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2560 2529 & (NHMENTS-1);
2561 2530 numpg = NHMENTS - numpg;
2562 2531 ASSERT(numpg <= npgs);
2563 2532 mapsz = numpg * MMU_PAGESIZE;
2564 2533 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2565 2534 numpg, rid);
2566 2535 }
2567 2536 addr += mapsz;
2568 2537 npgs -= numpg;
2569 2538 pps += numpg;
2570 2539 }
2571 2540
2572 2541 if (npgs) {
2573 2542 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2574 2543 rid);
2575 2544 }
2576 2545
2577 2546 /*
2578 2547 * Check TSB and TLB page sizes.
2579 2548 */
2580 2549 if ((flags & HAT_LOAD_SHARE) == 0) {
2581 2550 sfmmu_check_page_sizes(hat, 1);
2582 2551 }
2583 2552 }
2584 2553
2585 2554 /*
2586 2555 * Function tries to batch 8K pages into the same hme blk.
2587 2556 */
2588 2557 static void
2589 2558 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2590 2559 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2591 2560 {
2592 2561 tte_t tte;
2593 2562 page_t *pp;
2594 2563 struct hmehash_bucket *hmebp;
2595 2564 struct hme_blk *hmeblkp;
2596 2565 int index;
2597 2566
2598 2567 while (npgs) {
2599 2568 /*
2600 2569 * Acquire the hash bucket.
2601 2570 */
2602 2571 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2603 2572 rid);
2604 2573 ASSERT(hmebp);
2605 2574
2606 2575 /*
2607 2576 * Find the hment block.
2608 2577 */
2609 2578 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2610 2579 TTE8K, flags, rid);
2611 2580 ASSERT(hmeblkp);
2612 2581
2613 2582 do {
2614 2583 /*
2615 2584 * Make the tte.
2616 2585 */
2617 2586 pp = *pps;
2618 2587 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2619 2588
2620 2589 /*
2621 2590 * Add the translation.
2622 2591 */
2623 2592 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2624 2593 vaddr, pps, flags, rid);
2625 2594
2626 2595 /*
2627 2596 * Goto next page.
2628 2597 */
2629 2598 pps++;
2630 2599 npgs--;
2631 2600
2632 2601 /*
2633 2602 * Goto next address.
2634 2603 */
2635 2604 vaddr += MMU_PAGESIZE;
2636 2605
2637 2606 /*
2638 2607 * Don't crossover into a different hmentblk.
2639 2608 */
2640 2609 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2641 2610 (NHMENTS-1));
2642 2611
2643 2612 } while (index != 0 && npgs != 0);
2644 2613
2645 2614 /*
2646 2615 * Release the hash bucket.
2647 2616 */
2648 2617
2649 2618 sfmmu_tteload_release_hashbucket(hmebp);
2650 2619 }
2651 2620 }
2652 2621
2653 2622 /*
2654 2623 * Construct a tte for a page:
2655 2624 *
2656 2625 * tte_valid = 1
2657 2626 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2658 2627 * tte_size = size
2659 2628 * tte_nfo = attr & HAT_NOFAULT
2660 2629 * tte_ie = attr & HAT_STRUCTURE_LE
2661 2630 * tte_hmenum = hmenum
2662 2631 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2663 2632 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2664 2633 * tte_ref = 1 (optimization)
2665 2634 * tte_wr_perm = attr & PROT_WRITE;
2666 2635 * tte_no_sync = attr & HAT_NOSYNC
2667 2636 * tte_lock = attr & SFMMU_LOCKTTE
2668 2637 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2669 2638 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2670 2639 * tte_e = attr & SFMMU_SIDEFFECT
2671 2640 * tte_priv = !(attr & PROT_USER)
2672 2641 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2673 2642 * tte_glb = 0
2674 2643 */
2675 2644 void
2676 2645 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2677 2646 {
2678 2647 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2679 2648
2680 2649 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2681 2650 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2682 2651
2683 2652 if (TTE_IS_NOSYNC(ttep)) {
2684 2653 TTE_SET_REF(ttep);
2685 2654 if (TTE_IS_WRITABLE(ttep)) {
2686 2655 TTE_SET_MOD(ttep);
2687 2656 }
2688 2657 }
2689 2658 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2690 2659 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2691 2660 }
2692 2661 }
2693 2662
2694 2663 /*
2695 2664 * This function will add a translation to the hme_blk and allocate the
2696 2665 * hme_blk if one does not exist.
2697 2666 * If a page structure is specified then it will add the
2698 2667 * corresponding hment to the mapping list.
2699 2668 * It will also update the hmenum field for the tte.
2700 2669 *
2701 2670 * Currently this function is only used for kernel mappings.
2702 2671 * So pass invalid region to sfmmu_tteload_array().
2703 2672 */
2704 2673 void
2705 2674 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2706 2675 uint_t flags)
2707 2676 {
2708 2677 ASSERT(sfmmup == ksfmmup);
2709 2678 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2710 2679 SFMMU_INVALID_SHMERID);
2711 2680 }
2712 2681
2713 2682 /*
2714 2683 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2715 2684 * Assumes that a particular page size may only be resident in one TSB.
2716 2685 */
2717 2686 static void
2718 2687 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2719 2688 {
2720 2689 struct tsb_info *tsbinfop = NULL;
2721 2690 uint64_t tag;
2722 2691 struct tsbe *tsbe_addr;
2723 2692 uint64_t tsb_base;
2724 2693 uint_t tsb_size;
2725 2694 int vpshift = MMU_PAGESHIFT;
2726 2695 int phys = 0;
2727 2696
2728 2697 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2729 2698 phys = ktsb_phys;
2730 2699 if (ttesz >= TTE4M) {
2731 2700 #ifndef sun4v
2732 2701 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2733 2702 #endif
2734 2703 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2735 2704 tsb_size = ktsb4m_szcode;
2736 2705 } else {
2737 2706 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2738 2707 tsb_size = ktsb_szcode;
2739 2708 }
2740 2709 } else {
2741 2710 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2742 2711
2743 2712 /*
2744 2713 * If there isn't a TSB for this page size, or the TSB is
2745 2714 * swapped out, there is nothing to do. Note that the latter
2746 2715 * case seems impossible but can occur if hat_pageunload()
2747 2716 * is called on an ISM mapping while the process is swapped
2748 2717 * out.
2749 2718 */
2750 2719 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2751 2720 return;
2752 2721
2753 2722 /*
2754 2723 * If another thread is in the middle of relocating a TSB
2755 2724 * we can't unload the entry so set a flag so that the
2756 2725 * TSB will be flushed before it can be accessed by the
2757 2726 * process.
2758 2727 */
2759 2728 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2760 2729 if (ttep == NULL)
2761 2730 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2762 2731 return;
2763 2732 }
2764 2733 #if defined(UTSB_PHYS)
2765 2734 phys = 1;
2766 2735 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2767 2736 #else
2768 2737 tsb_base = (uint64_t)tsbinfop->tsb_va;
2769 2738 #endif
2770 2739 tsb_size = tsbinfop->tsb_szc;
2771 2740 }
2772 2741 if (ttesz >= TTE4M)
2773 2742 vpshift = MMU_PAGESHIFT4M;
2774 2743
2775 2744 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2776 2745 tag = sfmmu_make_tsbtag(vaddr);
2777 2746
2778 2747 if (ttep == NULL) {
2779 2748 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2780 2749 } else {
2781 2750 if (ttesz >= TTE4M) {
2782 2751 SFMMU_STAT(sf_tsb_load4m);
2783 2752 } else {
2784 2753 SFMMU_STAT(sf_tsb_load8k);
2785 2754 }
2786 2755
2787 2756 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2788 2757 }
2789 2758 }
2790 2759
2791 2760 /*
2792 2761 * Unmap all entries from [start, end) matching the given page size.
2793 2762 *
2794 2763 * This function is used primarily to unmap replicated 64K or 512K entries
2795 2764 * from the TSB that are inserted using the base page size TSB pointer, but
2796 2765 * it may also be called to unmap a range of addresses from the TSB.
2797 2766 */
2798 2767 void
2799 2768 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2800 2769 {
2801 2770 struct tsb_info *tsbinfop;
2802 2771 uint64_t tag;
2803 2772 struct tsbe *tsbe_addr;
2804 2773 caddr_t vaddr;
2805 2774 uint64_t tsb_base;
2806 2775 int vpshift, vpgsz;
2807 2776 uint_t tsb_size;
2808 2777 int phys = 0;
2809 2778
2810 2779 /*
2811 2780 * Assumptions:
2812 2781 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2813 2782 * at a time shooting down any valid entries we encounter.
2814 2783 *
2815 2784 * If ttesz >= 4M we walk the range 4M at a time shooting
2816 2785 * down any valid mappings we find.
2817 2786 */
2818 2787 if (sfmmup == ksfmmup) {
2819 2788 phys = ktsb_phys;
2820 2789 if (ttesz >= TTE4M) {
2821 2790 #ifndef sun4v
2822 2791 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2823 2792 #endif
2824 2793 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2825 2794 tsb_size = ktsb4m_szcode;
2826 2795 } else {
2827 2796 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2828 2797 tsb_size = ktsb_szcode;
2829 2798 }
2830 2799 } else {
2831 2800 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2832 2801
2833 2802 /*
2834 2803 * If there isn't a TSB for this page size, or the TSB is
2835 2804 * swapped out, there is nothing to do. Note that the latter
2836 2805 * case seems impossible but can occur if hat_pageunload()
2837 2806 * is called on an ISM mapping while the process is swapped
2838 2807 * out.
2839 2808 */
2840 2809 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2841 2810 return;
2842 2811
2843 2812 /*
2844 2813 * If another thread is in the middle of relocating a TSB
2845 2814 * we can't unload the entry so set a flag so that the
2846 2815 * TSB will be flushed before it can be accessed by the
2847 2816 * process.
2848 2817 */
2849 2818 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2850 2819 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2851 2820 return;
2852 2821 }
2853 2822 #if defined(UTSB_PHYS)
2854 2823 phys = 1;
2855 2824 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2856 2825 #else
2857 2826 tsb_base = (uint64_t)tsbinfop->tsb_va;
2858 2827 #endif
2859 2828 tsb_size = tsbinfop->tsb_szc;
2860 2829 }
2861 2830 if (ttesz >= TTE4M) {
2862 2831 vpshift = MMU_PAGESHIFT4M;
2863 2832 vpgsz = MMU_PAGESIZE4M;
2864 2833 } else {
2865 2834 vpshift = MMU_PAGESHIFT;
2866 2835 vpgsz = MMU_PAGESIZE;
2867 2836 }
2868 2837
2869 2838 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2870 2839 tag = sfmmu_make_tsbtag(vaddr);
2871 2840 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2872 2841 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2873 2842 }
2874 2843 }
2875 2844
2876 2845 /*
2877 2846 * Select the optimum TSB size given the number of mappings
2878 2847 * that need to be cached.
2879 2848 */
2880 2849 static int
2881 2850 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2882 2851 {
2883 2852 int szc = 0;
2884 2853
2885 2854 #ifdef DEBUG
2886 2855 if (tsb_grow_stress) {
2887 2856 uint32_t randval = (uint32_t)gettick() >> 4;
2888 2857 return (randval % (tsb_max_growsize + 1));
2889 2858 }
2890 2859 #endif /* DEBUG */
2891 2860
2892 2861 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2893 2862 szc++;
2894 2863 return (szc);
2895 2864 }
2896 2865
2897 2866 /*
2898 2867 * This function will add a translation to the hme_blk and allocate the
2899 2868 * hme_blk if one does not exist.
2900 2869 * If a page structure is specified then it will add the
2901 2870 * corresponding hment to the mapping list.
2902 2871 * It will also update the hmenum field for the tte.
2903 2872 * Furthermore, it attempts to create a large page translation
2904 2873 * for <addr,hat> at page array pps. It assumes addr and first
2905 2874 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2906 2875 */
2907 2876 static int
2908 2877 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2909 2878 page_t **pps, uint_t flags, uint_t rid)
2910 2879 {
2911 2880 struct hmehash_bucket *hmebp;
2912 2881 struct hme_blk *hmeblkp;
2913 2882 int ret;
2914 2883 uint_t size;
2915 2884
2916 2885 /*
2917 2886 * Get mapping size.
2918 2887 */
2919 2888 size = TTE_CSZ(ttep);
2920 2889 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2921 2890
2922 2891 /*
2923 2892 * Acquire the hash bucket.
2924 2893 */
2925 2894 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2926 2895 ASSERT(hmebp);
2927 2896
2928 2897 /*
2929 2898 * Find the hment block.
2930 2899 */
2931 2900 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2932 2901 rid);
2933 2902 ASSERT(hmeblkp);
2934 2903
2935 2904 /*
2936 2905 * Add the translation.
2937 2906 */
2938 2907 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2939 2908 rid);
2940 2909
2941 2910 /*
2942 2911 * Release the hash bucket.
2943 2912 */
2944 2913 sfmmu_tteload_release_hashbucket(hmebp);
2945 2914
2946 2915 return (ret);
2947 2916 }
2948 2917
2949 2918 /*
2950 2919 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2951 2920 */
2952 2921 static struct hmehash_bucket *
2953 2922 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2954 2923 uint_t rid)
2955 2924 {
2956 2925 struct hmehash_bucket *hmebp;
2957 2926 int hmeshift;
2958 2927 void *htagid = sfmmutohtagid(sfmmup, rid);
2959 2928
2960 2929 ASSERT(htagid != NULL);
2961 2930
2962 2931 hmeshift = HME_HASH_SHIFT(size);
2963 2932
2964 2933 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2965 2934
2966 2935 SFMMU_HASH_LOCK(hmebp);
2967 2936
2968 2937 return (hmebp);
2969 2938 }
2970 2939
2971 2940 /*
2972 2941 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2973 2942 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2974 2943 * allocated.
2975 2944 */
2976 2945 static struct hme_blk *
2977 2946 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2978 2947 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2979 2948 {
2980 2949 hmeblk_tag hblktag;
2981 2950 int hmeshift;
2982 2951 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2983 2952
2984 2953 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2985 2954
2986 2955 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2987 2956 ASSERT(hblktag.htag_id != NULL);
2988 2957 hmeshift = HME_HASH_SHIFT(size);
2989 2958 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2990 2959 hblktag.htag_rehash = HME_HASH_REHASH(size);
2991 2960 hblktag.htag_rid = rid;
2992 2961
2993 2962 ttearray_realloc:
2994 2963
2995 2964 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2996 2965
2997 2966 /*
2998 2967 * We block until hblk_reserve_lock is released; it's held by
2999 2968 * the thread, temporarily using hblk_reserve, until hblk_reserve is
3000 2969 * replaced by a hblk from sfmmu8_cache.
3001 2970 */
3002 2971 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
3003 2972 hblk_reserve_thread != curthread) {
3004 2973 SFMMU_HASH_UNLOCK(hmebp);
3005 2974 mutex_enter(&hblk_reserve_lock);
3006 2975 mutex_exit(&hblk_reserve_lock);
3007 2976 SFMMU_STAT(sf_hblk_reserve_hit);
3008 2977 SFMMU_HASH_LOCK(hmebp);
3009 2978 goto ttearray_realloc;
3010 2979 }
3011 2980
3012 2981 if (hmeblkp == NULL) {
3013 2982 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3014 2983 hblktag, flags, rid);
3015 2984 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3016 2985 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3017 2986 } else {
3018 2987 /*
3019 2988 * It is possible for 8k and 64k hblks to collide since they
3020 2989 * have the same rehash value. This is because we
3021 2990 * lazily free hblks and 8K/64K blks could be lingering.
3022 2991 * If we find size mismatch we free the block and & try again.
3023 2992 */
3024 2993 if (get_hblk_ttesz(hmeblkp) != size) {
3025 2994 ASSERT(!hmeblkp->hblk_vcnt);
3026 2995 ASSERT(!hmeblkp->hblk_hmecnt);
3027 2996 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3028 2997 &list, 0);
3029 2998 goto ttearray_realloc;
3030 2999 }
3031 3000 if (hmeblkp->hblk_shw_bit) {
3032 3001 /*
3033 3002 * if the hblk was previously used as a shadow hblk then
3034 3003 * we will change it to a normal hblk
3035 3004 */
3036 3005 ASSERT(!hmeblkp->hblk_shared);
3037 3006 if (hmeblkp->hblk_shw_mask) {
3038 3007 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3039 3008 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3040 3009 goto ttearray_realloc;
3041 3010 } else {
3042 3011 hmeblkp->hblk_shw_bit = 0;
3043 3012 }
3044 3013 }
3045 3014 SFMMU_STAT(sf_hblk_hit);
3046 3015 }
3047 3016
3048 3017 /*
3049 3018 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3050 3019 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3051 3020 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3052 3021 * just add these hmeblks to the per-cpu pending queue.
3053 3022 */
3054 3023 sfmmu_hblks_list_purge(&list, 1);
3055 3024
3056 3025 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3057 3026 ASSERT(!hmeblkp->hblk_shw_bit);
3058 3027 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3059 3028 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3060 3029 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3061 3030
3062 3031 return (hmeblkp);
3063 3032 }
3064 3033
3065 3034 /*
3066 3035 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3067 3036 * otherwise.
3068 3037 */
3069 3038 static int
3070 3039 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3071 3040 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3072 3041 {
3073 3042 page_t *pp = *pps;
3074 3043 int hmenum, size, remap;
3075 3044 tte_t tteold, flush_tte;
3076 3045 #ifdef DEBUG
3077 3046 tte_t orig_old;
3078 3047 #endif /* DEBUG */
3079 3048 struct sf_hment *sfhme;
3080 3049 kmutex_t *pml, *pmtx;
3081 3050 hatlock_t *hatlockp;
3082 3051 int myflt;
3083 3052
3084 3053 /*
3085 3054 * remove this panic when we decide to let user virtual address
3086 3055 * space be >= USERLIMIT.
3087 3056 */
3088 3057 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3089 3058 panic("user addr %p in kernel space", (void *)vaddr);
3090 3059 #if defined(TTE_IS_GLOBAL)
3091 3060 if (TTE_IS_GLOBAL(ttep))
3092 3061 panic("sfmmu_tteload: creating global tte");
3093 3062 #endif
3094 3063
3095 3064 #ifdef DEBUG
3096 3065 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3097 3066 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3098 3067 panic("sfmmu_tteload: non cacheable memory tte");
3099 3068 #endif /* DEBUG */
3100 3069
3101 3070 /* don't simulate dirty bit for writeable ISM/DISM mappings */
3102 3071 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3103 3072 TTE_SET_REF(ttep);
3104 3073 TTE_SET_MOD(ttep);
3105 3074 }
3106 3075
3107 3076 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3108 3077 !TTE_IS_MOD(ttep)) {
3109 3078 /*
3110 3079 * Don't load TSB for dummy as in ISM. Also don't preload
3111 3080 * the TSB if the TTE isn't writable since we're likely to
3112 3081 * fault on it again -- preloading can be fairly expensive.
3113 3082 */
3114 3083 flags |= SFMMU_NO_TSBLOAD;
3115 3084 }
3116 3085
3117 3086 size = TTE_CSZ(ttep);
3118 3087 switch (size) {
3119 3088 case TTE8K:
3120 3089 SFMMU_STAT(sf_tteload8k);
3121 3090 break;
3122 3091 case TTE64K:
3123 3092 SFMMU_STAT(sf_tteload64k);
3124 3093 break;
3125 3094 case TTE512K:
3126 3095 SFMMU_STAT(sf_tteload512k);
3127 3096 break;
3128 3097 case TTE4M:
3129 3098 SFMMU_STAT(sf_tteload4m);
3130 3099 break;
3131 3100 case (TTE32M):
3132 3101 SFMMU_STAT(sf_tteload32m);
3133 3102 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3134 3103 break;
3135 3104 case (TTE256M):
3136 3105 SFMMU_STAT(sf_tteload256m);
3137 3106 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3138 3107 break;
3139 3108 }
3140 3109
3141 3110 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3142 3111 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3143 3112 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3144 3113 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3145 3114
3146 3115 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3147 3116
3148 3117 /*
3149 3118 * Need to grab mlist lock here so that pageunload
3150 3119 * will not change tte behind us.
3151 3120 */
3152 3121 if (pp) {
3153 3122 pml = sfmmu_mlist_enter(pp);
3154 3123 }
3155 3124
3156 3125 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3157 3126 /*
3158 3127 * Look for corresponding hment and if valid verify
3159 3128 * pfns are equal.
3160 3129 */
3161 3130 remap = TTE_IS_VALID(&tteold);
3162 3131 if (remap) {
3163 3132 pfn_t new_pfn, old_pfn;
3164 3133
3165 3134 old_pfn = TTE_TO_PFN(vaddr, &tteold);
3166 3135 new_pfn = TTE_TO_PFN(vaddr, ttep);
3167 3136
3168 3137 if (flags & HAT_LOAD_REMAP) {
3169 3138 /* make sure we are remapping same type of pages */
3170 3139 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3171 3140 panic("sfmmu_tteload - tte remap io<->memory");
3172 3141 }
3173 3142 if (old_pfn != new_pfn &&
3174 3143 (pp != NULL || sfhme->hme_page != NULL)) {
3175 3144 panic("sfmmu_tteload - tte remap pp != NULL");
3176 3145 }
3177 3146 } else if (old_pfn != new_pfn) {
3178 3147 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3179 3148 (void *)hmeblkp);
3180 3149 }
3181 3150 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3182 3151 }
3183 3152
3184 3153 if (pp) {
3185 3154 if (size == TTE8K) {
3186 3155 #ifdef VAC
3187 3156 /*
3188 3157 * Handle VAC consistency
3189 3158 */
3190 3159 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3191 3160 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3192 3161 }
3193 3162 #endif
3194 3163
3195 3164 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3196 3165 pmtx = sfmmu_page_enter(pp);
3197 3166 PP_CLRRO(pp);
3198 3167 sfmmu_page_exit(pmtx);
3199 3168 } else if (!PP_ISMAPPED(pp) &&
3200 3169 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3201 3170 pmtx = sfmmu_page_enter(pp);
3202 3171 if (!(PP_ISMOD(pp))) {
3203 3172 PP_SETRO(pp);
3204 3173 }
3205 3174 sfmmu_page_exit(pmtx);
3206 3175 }
3207 3176
3208 3177 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3209 3178 /*
3210 3179 * sfmmu_pagearray_setup failed so return
3211 3180 */
3212 3181 sfmmu_mlist_exit(pml);
3213 3182 return (1);
3214 3183 }
3215 3184 }
3216 3185
3217 3186 /*
3218 3187 * Make sure hment is not on a mapping list.
3219 3188 */
3220 3189 ASSERT(remap || (sfhme->hme_page == NULL));
3221 3190
3222 3191 /* if it is not a remap then hme->next better be NULL */
3223 3192 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3224 3193
3225 3194 if (flags & HAT_LOAD_LOCK) {
3226 3195 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3227 3196 panic("too high lckcnt-hmeblk %p",
3228 3197 (void *)hmeblkp);
3229 3198 }
3230 3199 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3231 3200
3232 3201 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3233 3202 }
3234 3203
3235 3204 #ifdef VAC
3236 3205 if (pp && PP_ISNC(pp)) {
3237 3206 /*
3238 3207 * If the physical page is marked to be uncacheable, like
3239 3208 * by a vac conflict, make sure the new mapping is also
3240 3209 * uncacheable.
3241 3210 */
3242 3211 TTE_CLR_VCACHEABLE(ttep);
3243 3212 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3244 3213 }
3245 3214 #endif
3246 3215 ttep->tte_hmenum = hmenum;
3247 3216
3248 3217 #ifdef DEBUG
3249 3218 orig_old = tteold;
3250 3219 #endif /* DEBUG */
3251 3220
3252 3221 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3253 3222 if ((sfmmup == KHATID) &&
3254 3223 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3255 3224 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3256 3225 }
3257 3226 #ifdef DEBUG
3258 3227 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3259 3228 #endif /* DEBUG */
3260 3229 }
3261 3230 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3262 3231
3263 3232 if (!TTE_IS_VALID(&tteold)) {
3264 3233
3265 3234 atomic_inc_16(&hmeblkp->hblk_vcnt);
3266 3235 if (rid == SFMMU_INVALID_SHMERID) {
3267 3236 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3268 3237 } else {
3269 3238 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3270 3239 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3271 3240 /*
3272 3241 * We already accounted for region ttecnt's in sfmmu
3273 3242 * during hat_join_region() processing. Here we
3274 3243 * only update ttecnt's in region struture.
3275 3244 */
3276 3245 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3277 3246 }
3278 3247 }
3279 3248
3280 3249 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3281 3250 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3282 3251 sfmmup != ksfmmup) {
3283 3252 uchar_t tteflag = 1 << size;
3284 3253 if (rid == SFMMU_INVALID_SHMERID) {
3285 3254 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3286 3255 hatlockp = sfmmu_hat_enter(sfmmup);
3287 3256 sfmmup->sfmmu_tteflags |= tteflag;
3288 3257 sfmmu_hat_exit(hatlockp);
3289 3258 }
3290 3259 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3291 3260 hatlockp = sfmmu_hat_enter(sfmmup);
3292 3261 sfmmup->sfmmu_rtteflags |= tteflag;
3293 3262 sfmmu_hat_exit(hatlockp);
3294 3263 }
3295 3264 /*
3296 3265 * Update the current CPU tsbmiss area, so the current thread
3297 3266 * won't need to take the tsbmiss for the new pagesize.
3298 3267 * The other threads in the process will update their tsb
3299 3268 * miss area lazily in sfmmu_tsbmiss_exception() when they
3300 3269 * fail to find the translation for a newly added pagesize.
3301 3270 */
3302 3271 if (size > TTE64K && myflt) {
3303 3272 struct tsbmiss *tsbmp;
3304 3273 kpreempt_disable();
3305 3274 tsbmp = &tsbmiss_area[CPU->cpu_id];
3306 3275 if (rid == SFMMU_INVALID_SHMERID) {
3307 3276 if (!(tsbmp->uhat_tteflags & tteflag)) {
3308 3277 tsbmp->uhat_tteflags |= tteflag;
3309 3278 }
3310 3279 } else {
3311 3280 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3312 3281 tsbmp->uhat_rtteflags |= tteflag;
3313 3282 }
3314 3283 }
3315 3284 kpreempt_enable();
3316 3285 }
3317 3286 }
3318 3287
3319 3288 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3320 3289 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3321 3290 hatlockp = sfmmu_hat_enter(sfmmup);
3322 3291 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3323 3292 sfmmu_hat_exit(hatlockp);
3324 3293 }
3325 3294
3326 3295 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3327 3296 hw_tte.tte_intlo;
3328 3297 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3329 3298 hw_tte.tte_inthi;
3330 3299
3331 3300 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3332 3301 /*
3333 3302 * If remap and new tte differs from old tte we need
3334 3303 * to sync the mod bit and flush TLB/TSB. We don't
3335 3304 * need to sync ref bit because we currently always set
3336 3305 * ref bit in tteload.
3337 3306 */
3338 3307 ASSERT(TTE_IS_REF(ttep));
3339 3308 if (TTE_IS_MOD(&tteold)) {
3340 3309 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3341 3310 }
3342 3311 /*
3343 3312 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3344 3313 * hmes are only used for read only text. Adding this code for
3345 3314 * completeness and future use of shared hmeblks with writable
3346 3315 * mappings of VMODSORT vnodes.
3347 3316 */
3348 3317 if (hmeblkp->hblk_shared) {
3349 3318 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3350 3319 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3351 3320 xt_sync(cpuset);
3352 3321 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3353 3322 } else {
3354 3323 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3355 3324 xt_sync(sfmmup->sfmmu_cpusran);
3356 3325 }
3357 3326 }
3358 3327
3359 3328 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3360 3329 /*
3361 3330 * We only preload 8K and 4M mappings into the TSB, since
3362 3331 * 64K and 512K mappings are replicated and hence don't
3363 3332 * have a single, unique TSB entry. Ditto for 32M/256M.
3364 3333 */
3365 3334 if (size == TTE8K || size == TTE4M) {
3366 3335 sf_scd_t *scdp;
3367 3336 hatlockp = sfmmu_hat_enter(sfmmup);
3368 3337 /*
3369 3338 * Don't preload private TSB if the mapping is used
3370 3339 * by the shctx in the SCD.
3371 3340 */
3372 3341 scdp = sfmmup->sfmmu_scdp;
3373 3342 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3374 3343 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3375 3344 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3376 3345 size);
3377 3346 }
3378 3347 sfmmu_hat_exit(hatlockp);
3379 3348 }
3380 3349 }
3381 3350 if (pp) {
3382 3351 if (!remap) {
3383 3352 HME_ADD(sfhme, pp);
3384 3353 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3385 3354 ASSERT(hmeblkp->hblk_hmecnt > 0);
3386 3355
3387 3356 /*
3388 3357 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3389 3358 * see pageunload() for comment.
3390 3359 */
3391 3360 }
3392 3361 sfmmu_mlist_exit(pml);
3393 3362 }
3394 3363
3395 3364 return (0);
3396 3365 }
3397 3366 /*
3398 3367 * Function unlocks hash bucket.
3399 3368 */
3400 3369 static void
3401 3370 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3402 3371 {
3403 3372 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3404 3373 SFMMU_HASH_UNLOCK(hmebp);
3405 3374 }
3406 3375
3407 3376 /*
3408 3377 * function which checks and sets up page array for a large
3409 3378 * translation. Will set p_vcolor, p_index, p_ro fields.
3410 3379 * Assumes addr and pfnum of first page are properly aligned.
3411 3380 * Will check for physical contiguity. If check fails it return
3412 3381 * non null.
3413 3382 */
3414 3383 static int
3415 3384 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3416 3385 {
3417 3386 int i, index, ttesz;
3418 3387 pfn_t pfnum;
3419 3388 pgcnt_t npgs;
3420 3389 page_t *pp, *pp1;
3421 3390 kmutex_t *pmtx;
3422 3391 #ifdef VAC
3423 3392 int osz;
3424 3393 int cflags = 0;
3425 3394 int vac_err = 0;
3426 3395 #endif
3427 3396 int newidx = 0;
3428 3397
3429 3398 ttesz = TTE_CSZ(ttep);
3430 3399
3431 3400 ASSERT(ttesz > TTE8K);
3432 3401
3433 3402 npgs = TTEPAGES(ttesz);
3434 3403 index = PAGESZ_TO_INDEX(ttesz);
3435 3404
3436 3405 pfnum = (*pps)->p_pagenum;
3437 3406 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3438 3407
3439 3408 /*
3440 3409 * Save the first pp so we can do HAT_TMPNC at the end.
3441 3410 */
3442 3411 pp1 = *pps;
3443 3412 #ifdef VAC
3444 3413 osz = fnd_mapping_sz(pp1);
3445 3414 #endif
3446 3415
3447 3416 for (i = 0; i < npgs; i++, pps++) {
3448 3417 pp = *pps;
3449 3418 ASSERT(PAGE_LOCKED(pp));
3450 3419 ASSERT(pp->p_szc >= ttesz);
3451 3420 ASSERT(pp->p_szc == pp1->p_szc);
3452 3421 ASSERT(sfmmu_mlist_held(pp));
3453 3422
3454 3423 /*
3455 3424 * XXX is it possible to maintain P_RO on the root only?
3456 3425 */
3457 3426 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3458 3427 pmtx = sfmmu_page_enter(pp);
3459 3428 PP_CLRRO(pp);
3460 3429 sfmmu_page_exit(pmtx);
3461 3430 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3462 3431 !PP_ISMOD(pp)) {
3463 3432 pmtx = sfmmu_page_enter(pp);
3464 3433 if (!(PP_ISMOD(pp))) {
3465 3434 PP_SETRO(pp);
3466 3435 }
3467 3436 sfmmu_page_exit(pmtx);
3468 3437 }
3469 3438
3470 3439 /*
3471 3440 * If this is a remap we skip vac & contiguity checks.
3472 3441 */
3473 3442 if (remap)
3474 3443 continue;
3475 3444
3476 3445 /*
3477 3446 * set p_vcolor and detect any vac conflicts.
3478 3447 */
3479 3448 #ifdef VAC
3480 3449 if (vac_err == 0) {
3481 3450 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3482 3451
3483 3452 }
3484 3453 #endif
3485 3454
3486 3455 /*
3487 3456 * Save current index in case we need to undo it.
3488 3457 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3489 3458 * "SFMMU_INDEX_SHIFT 6"
3490 3459 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3491 3460 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3492 3461 *
3493 3462 * So: index = PAGESZ_TO_INDEX(ttesz);
3494 3463 * if ttesz == 1 then index = 0x2
3495 3464 * 2 then index = 0x4
3496 3465 * 3 then index = 0x8
3497 3466 * 4 then index = 0x10
3498 3467 * 5 then index = 0x20
3499 3468 * The code below checks if it's a new pagesize (ie, newidx)
3500 3469 * in case we need to take it back out of p_index,
3501 3470 * and then or's the new index into the existing index.
3502 3471 */
3503 3472 if ((PP_MAPINDEX(pp) & index) == 0)
3504 3473 newidx = 1;
3505 3474 pp->p_index = (PP_MAPINDEX(pp) | index);
3506 3475
3507 3476 /*
3508 3477 * contiguity check
3509 3478 */
3510 3479 if (pp->p_pagenum != pfnum) {
3511 3480 /*
3512 3481 * If we fail the contiguity test then
3513 3482 * the only thing we need to fix is the p_index field.
3514 3483 * We might get a few extra flushes but since this
3515 3484 * path is rare that is ok. The p_ro field will
3516 3485 * get automatically fixed on the next tteload to
3517 3486 * the page. NO TNC bit is set yet.
3518 3487 */
3519 3488 while (i >= 0) {
3520 3489 pp = *pps;
3521 3490 if (newidx)
3522 3491 pp->p_index = (PP_MAPINDEX(pp) &
3523 3492 ~index);
3524 3493 pps--;
3525 3494 i--;
3526 3495 }
3527 3496 return (1);
3528 3497 }
3529 3498 pfnum++;
3530 3499 addr += MMU_PAGESIZE;
3531 3500 }
3532 3501
3533 3502 #ifdef VAC
3534 3503 if (vac_err) {
3535 3504 if (ttesz > osz) {
3536 3505 /*
3537 3506 * There are some smaller mappings that causes vac
3538 3507 * conflicts. Convert all existing small mappings to
3539 3508 * TNC.
3540 3509 */
3541 3510 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3542 3511 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3543 3512 npgs);
3544 3513 } else {
3545 3514 /* EMPTY */
3546 3515 /*
3547 3516 * If there exists an big page mapping,
3548 3517 * that means the whole existing big page
3549 3518 * has TNC setting already. No need to covert to
3550 3519 * TNC again.
3551 3520 */
3552 3521 ASSERT(PP_ISTNC(pp1));
3553 3522 }
3554 3523 }
3555 3524 #endif /* VAC */
3556 3525
3557 3526 return (0);
3558 3527 }
3559 3528
3560 3529 #ifdef VAC
3561 3530 /*
3562 3531 * Routine that detects vac consistency for a large page. It also
3563 3532 * sets virtual color for all pp's for this big mapping.
3564 3533 */
3565 3534 static int
3566 3535 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3567 3536 {
3568 3537 int vcolor, ocolor;
3569 3538
3570 3539 ASSERT(sfmmu_mlist_held(pp));
3571 3540
3572 3541 if (PP_ISNC(pp)) {
3573 3542 return (HAT_TMPNC);
3574 3543 }
3575 3544
3576 3545 vcolor = addr_to_vcolor(addr);
3577 3546 if (PP_NEWPAGE(pp)) {
3578 3547 PP_SET_VCOLOR(pp, vcolor);
3579 3548 return (0);
3580 3549 }
3581 3550
3582 3551 ocolor = PP_GET_VCOLOR(pp);
3583 3552 if (ocolor == vcolor) {
3584 3553 return (0);
3585 3554 }
3586 3555
3587 3556 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3588 3557 /*
3589 3558 * Previous user of page had a differnet color
3590 3559 * but since there are no current users
3591 3560 * we just flush the cache and change the color.
3592 3561 * As an optimization for large pages we flush the
3593 3562 * entire cache of that color and set a flag.
3594 3563 */
3595 3564 SFMMU_STAT(sf_pgcolor_conflict);
3596 3565 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3597 3566 CacheColor_SetFlushed(*cflags, ocolor);
3598 3567 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3599 3568 }
3600 3569 PP_SET_VCOLOR(pp, vcolor);
3601 3570 return (0);
3602 3571 }
3603 3572
3604 3573 /*
3605 3574 * We got a real conflict with a current mapping.
3606 3575 * set flags to start unencaching all mappings
3607 3576 * and return failure so we restart looping
3608 3577 * the pp array from the beginning.
3609 3578 */
3610 3579 return (HAT_TMPNC);
3611 3580 }
3612 3581 #endif /* VAC */
3613 3582
3614 3583 /*
3615 3584 * creates a large page shadow hmeblk for a tte.
3616 3585 * The purpose of this routine is to allow us to do quick unloads because
3617 3586 * the vm layer can easily pass a very large but sparsely populated range.
3618 3587 */
3619 3588 static struct hme_blk *
3620 3589 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3621 3590 {
3622 3591 struct hmehash_bucket *hmebp;
3623 3592 hmeblk_tag hblktag;
3624 3593 int hmeshift, size, vshift;
3625 3594 uint_t shw_mask, newshw_mask;
3626 3595 struct hme_blk *hmeblkp;
3627 3596
3628 3597 ASSERT(sfmmup != KHATID);
3629 3598 if (mmu_page_sizes == max_mmu_page_sizes) {
3630 3599 ASSERT(ttesz < TTE256M);
3631 3600 } else {
3632 3601 ASSERT(ttesz < TTE4M);
3633 3602 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3634 3603 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3635 3604 }
3636 3605
3637 3606 if (ttesz == TTE8K) {
3638 3607 size = TTE512K;
3639 3608 } else {
3640 3609 size = ++ttesz;
3641 3610 }
3642 3611
3643 3612 hblktag.htag_id = sfmmup;
3644 3613 hmeshift = HME_HASH_SHIFT(size);
3645 3614 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3646 3615 hblktag.htag_rehash = HME_HASH_REHASH(size);
3647 3616 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3648 3617 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3649 3618
3650 3619 SFMMU_HASH_LOCK(hmebp);
3651 3620
3652 3621 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3653 3622 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3654 3623 if (hmeblkp == NULL) {
3655 3624 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3656 3625 hblktag, flags, SFMMU_INVALID_SHMERID);
3657 3626 }
3658 3627 ASSERT(hmeblkp);
3659 3628 if (!hmeblkp->hblk_shw_mask) {
3660 3629 /*
3661 3630 * if this is a unused hblk it was just allocated or could
3662 3631 * potentially be a previous large page hblk so we need to
3663 3632 * set the shadow bit.
3664 3633 */
3665 3634 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3666 3635 hmeblkp->hblk_shw_bit = 1;
3667 3636 } else if (hmeblkp->hblk_shw_bit == 0) {
3668 3637 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3669 3638 (void *)hmeblkp);
3670 3639 }
3671 3640 ASSERT(hmeblkp->hblk_shw_bit == 1);
3672 3641 ASSERT(!hmeblkp->hblk_shared);
3673 3642 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3674 3643 ASSERT(vshift < 8);
3675 3644 /*
3676 3645 * Atomically set shw mask bit
3677 3646 */
3678 3647 do {
3679 3648 shw_mask = hmeblkp->hblk_shw_mask;
3680 3649 newshw_mask = shw_mask | (1 << vshift);
3681 3650 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3682 3651 newshw_mask);
3683 3652 } while (newshw_mask != shw_mask);
3684 3653
3685 3654 SFMMU_HASH_UNLOCK(hmebp);
3686 3655
3687 3656 return (hmeblkp);
3688 3657 }
3689 3658
3690 3659 /*
3691 3660 * This routine cleanup a previous shadow hmeblk and changes it to
3692 3661 * a regular hblk. This happens rarely but it is possible
3693 3662 * when a process wants to use large pages and there are hblks still
3694 3663 * lying around from the previous as that used these hmeblks.
3695 3664 * The alternative was to cleanup the shadow hblks at unload time
3696 3665 * but since so few user processes actually use large pages, it is
3697 3666 * better to be lazy and cleanup at this time.
3698 3667 */
3699 3668 static void
3700 3669 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3701 3670 struct hmehash_bucket *hmebp)
3702 3671 {
3703 3672 caddr_t addr, endaddr;
3704 3673 int hashno, size;
3705 3674
3706 3675 ASSERT(hmeblkp->hblk_shw_bit);
3707 3676 ASSERT(!hmeblkp->hblk_shared);
3708 3677
3709 3678 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3710 3679
3711 3680 if (!hmeblkp->hblk_shw_mask) {
3712 3681 hmeblkp->hblk_shw_bit = 0;
3713 3682 return;
3714 3683 }
3715 3684 addr = (caddr_t)get_hblk_base(hmeblkp);
3716 3685 endaddr = get_hblk_endaddr(hmeblkp);
3717 3686 size = get_hblk_ttesz(hmeblkp);
3718 3687 hashno = size - 1;
3719 3688 ASSERT(hashno > 0);
3720 3689 SFMMU_HASH_UNLOCK(hmebp);
3721 3690
3722 3691 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3723 3692
3724 3693 SFMMU_HASH_LOCK(hmebp);
3725 3694 }
3726 3695
3727 3696 static void
3728 3697 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3729 3698 int hashno)
3730 3699 {
3731 3700 int hmeshift, shadow = 0;
3732 3701 hmeblk_tag hblktag;
3733 3702 struct hmehash_bucket *hmebp;
3734 3703 struct hme_blk *hmeblkp;
3735 3704 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3736 3705
3737 3706 ASSERT(hashno > 0);
3738 3707 hblktag.htag_id = sfmmup;
3739 3708 hblktag.htag_rehash = hashno;
3740 3709 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3741 3710
3742 3711 hmeshift = HME_HASH_SHIFT(hashno);
3743 3712
3744 3713 while (addr < endaddr) {
3745 3714 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3746 3715 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3747 3716 SFMMU_HASH_LOCK(hmebp);
3748 3717 /* inline HME_HASH_SEARCH */
3749 3718 hmeblkp = hmebp->hmeblkp;
3750 3719 pr_hblk = NULL;
3751 3720 while (hmeblkp) {
3752 3721 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3753 3722 /* found hme_blk */
3754 3723 ASSERT(!hmeblkp->hblk_shared);
3755 3724 if (hmeblkp->hblk_shw_bit) {
3756 3725 if (hmeblkp->hblk_shw_mask) {
3757 3726 shadow = 1;
3758 3727 sfmmu_shadow_hcleanup(sfmmup,
3759 3728 hmeblkp, hmebp);
3760 3729 break;
3761 3730 } else {
3762 3731 hmeblkp->hblk_shw_bit = 0;
3763 3732 }
3764 3733 }
3765 3734
3766 3735 /*
3767 3736 * Hblk_hmecnt and hblk_vcnt could be non zero
3768 3737 * since hblk_unload() does not gurantee that.
3769 3738 *
3770 3739 * XXX - this could cause tteload() to spin
3771 3740 * where sfmmu_shadow_hcleanup() is called.
3772 3741 */
3773 3742 }
3774 3743
3775 3744 nx_hblk = hmeblkp->hblk_next;
3776 3745 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3777 3746 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3778 3747 &list, 0);
3779 3748 } else {
3780 3749 pr_hblk = hmeblkp;
3781 3750 }
3782 3751 hmeblkp = nx_hblk;
3783 3752 }
3784 3753
3785 3754 SFMMU_HASH_UNLOCK(hmebp);
3786 3755
3787 3756 if (shadow) {
3788 3757 /*
3789 3758 * We found another shadow hblk so cleaned its
3790 3759 * children. We need to go back and cleanup
3791 3760 * the original hblk so we don't change the
3792 3761 * addr.
3793 3762 */
3794 3763 shadow = 0;
3795 3764 } else {
3796 3765 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3797 3766 (1 << hmeshift));
3798 3767 }
3799 3768 }
3800 3769 sfmmu_hblks_list_purge(&list, 0);
3801 3770 }
3802 3771
3803 3772 /*
3804 3773 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3805 3774 * may still linger on after pageunload.
3806 3775 */
3807 3776 static void
3808 3777 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3809 3778 {
3810 3779 int hmeshift;
3811 3780 hmeblk_tag hblktag;
3812 3781 struct hmehash_bucket *hmebp;
3813 3782 struct hme_blk *hmeblkp;
3814 3783 struct hme_blk *pr_hblk;
3815 3784 struct hme_blk *list = NULL;
3816 3785
3817 3786 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3818 3787 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3819 3788
3820 3789 hmeshift = HME_HASH_SHIFT(ttesz);
3821 3790 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3822 3791 hblktag.htag_rehash = ttesz;
3823 3792 hblktag.htag_rid = rid;
3824 3793 hblktag.htag_id = srdp;
3825 3794 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3826 3795
3827 3796 SFMMU_HASH_LOCK(hmebp);
3828 3797 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3829 3798 if (hmeblkp != NULL) {
3830 3799 ASSERT(hmeblkp->hblk_shared);
3831 3800 ASSERT(!hmeblkp->hblk_shw_bit);
3832 3801 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3833 3802 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3834 3803 }
3835 3804 ASSERT(!hmeblkp->hblk_lckcnt);
3836 3805 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3837 3806 &list, 0);
3838 3807 }
3839 3808 SFMMU_HASH_UNLOCK(hmebp);
3840 3809 sfmmu_hblks_list_purge(&list, 0);
3841 3810 }
3842 3811
3843 3812 /* ARGSUSED */
3844 3813 static void
3845 3814 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3846 3815 size_t r_size, void *r_obj, u_offset_t r_objoff)
3847 3816 {
3848 3817 }
3849 3818
3850 3819 /*
3851 3820 * Searches for an hmeblk which maps addr, then unloads this mapping
3852 3821 * and updates *eaddrp, if the hmeblk is found.
3853 3822 */
3854 3823 static void
3855 3824 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3856 3825 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3857 3826 {
3858 3827 int hmeshift;
3859 3828 hmeblk_tag hblktag;
3860 3829 struct hmehash_bucket *hmebp;
3861 3830 struct hme_blk *hmeblkp;
3862 3831 struct hme_blk *pr_hblk;
3863 3832 struct hme_blk *list = NULL;
3864 3833
3865 3834 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3866 3835 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3867 3836 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3868 3837
3869 3838 hmeshift = HME_HASH_SHIFT(ttesz);
3870 3839 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3871 3840 hblktag.htag_rehash = ttesz;
3872 3841 hblktag.htag_rid = rid;
3873 3842 hblktag.htag_id = srdp;
3874 3843 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3875 3844
3876 3845 SFMMU_HASH_LOCK(hmebp);
3877 3846 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3878 3847 if (hmeblkp != NULL) {
3879 3848 ASSERT(hmeblkp->hblk_shared);
3880 3849 ASSERT(!hmeblkp->hblk_lckcnt);
3881 3850 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3882 3851 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3883 3852 eaddr, NULL, HAT_UNLOAD);
3884 3853 ASSERT(*eaddrp > addr);
3885 3854 }
3886 3855 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3887 3856 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3888 3857 &list, 0);
3889 3858 }
3890 3859 SFMMU_HASH_UNLOCK(hmebp);
3891 3860 sfmmu_hblks_list_purge(&list, 0);
3892 3861 }
3893 3862
3894 3863 static void
3895 3864 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3896 3865 {
3897 3866 int ttesz = rgnp->rgn_pgszc;
3898 3867 size_t rsz = rgnp->rgn_size;
3899 3868 caddr_t rsaddr = rgnp->rgn_saddr;
3900 3869 caddr_t readdr = rsaddr + rsz;
3901 3870 caddr_t rhsaddr;
3902 3871 caddr_t va;
3903 3872 uint_t rid = rgnp->rgn_id;
3904 3873 caddr_t cbsaddr;
3905 3874 caddr_t cbeaddr;
3906 3875 hat_rgn_cb_func_t rcbfunc;
3907 3876 ulong_t cnt;
3908 3877
3909 3878 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3910 3879 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3911 3880
3912 3881 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3913 3882 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3914 3883 if (ttesz < HBLK_MIN_TTESZ) {
3915 3884 ttesz = HBLK_MIN_TTESZ;
3916 3885 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3917 3886 } else {
3918 3887 rhsaddr = rsaddr;
3919 3888 }
3920 3889
3921 3890 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3922 3891 rcbfunc = sfmmu_rgn_cb_noop;
3923 3892 }
3924 3893
3925 3894 while (ttesz >= HBLK_MIN_TTESZ) {
3926 3895 cbsaddr = rsaddr;
3927 3896 cbeaddr = rsaddr;
3928 3897 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3929 3898 ttesz--;
3930 3899 continue;
3931 3900 }
3932 3901 cnt = 0;
3933 3902 va = rsaddr;
3934 3903 while (va < readdr) {
3935 3904 ASSERT(va >= rhsaddr);
3936 3905 if (va != cbeaddr) {
3937 3906 if (cbeaddr != cbsaddr) {
3938 3907 ASSERT(cbeaddr > cbsaddr);
3939 3908 (*rcbfunc)(cbsaddr, cbeaddr,
3940 3909 rsaddr, rsz, rgnp->rgn_obj,
3941 3910 rgnp->rgn_objoff);
3942 3911 }
3943 3912 cbsaddr = va;
3944 3913 cbeaddr = va;
3945 3914 }
3946 3915 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3947 3916 ttesz, &cbeaddr);
3948 3917 cnt++;
3949 3918 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3950 3919 }
3951 3920 if (cbeaddr != cbsaddr) {
3952 3921 ASSERT(cbeaddr > cbsaddr);
3953 3922 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3954 3923 rsz, rgnp->rgn_obj,
3955 3924 rgnp->rgn_objoff);
3956 3925 }
3957 3926 ttesz--;
3958 3927 }
3959 3928 }
3960 3929
3961 3930 /*
3962 3931 * Release one hardware address translation lock on the given address range.
3963 3932 */
↓ open down ↓ |
1462 lines elided |
↑ open up ↑ |
3964 3933 void
3965 3934 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3966 3935 {
3967 3936 struct hmehash_bucket *hmebp;
3968 3937 hmeblk_tag hblktag;
3969 3938 int hmeshift, hashno = 1;
3970 3939 struct hme_blk *hmeblkp, *list = NULL;
3971 3940 caddr_t endaddr;
3972 3941
3973 3942 ASSERT(sfmmup != NULL);
3974 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3975 3943
3976 3944 ASSERT((sfmmup == ksfmmup) ||
3977 3945 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3978 3946 ASSERT((len & MMU_PAGEOFFSET) == 0);
3979 3947 endaddr = addr + len;
3980 3948 hblktag.htag_id = sfmmup;
3981 3949 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3982 3950
3983 3951 /*
3984 3952 * Spitfire supports 4 page sizes.
3985 3953 * Most pages are expected to be of the smallest page size (8K) and
3986 3954 * these will not need to be rehashed. 64K pages also don't need to be
3987 3955 * rehashed because an hmeblk spans 64K of address space. 512K pages
3988 3956 * might need 1 rehash and and 4M pages might need 2 rehashes.
3989 3957 */
3990 3958 while (addr < endaddr) {
3991 3959 hmeshift = HME_HASH_SHIFT(hashno);
3992 3960 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3993 3961 hblktag.htag_rehash = hashno;
3994 3962 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3995 3963
3996 3964 SFMMU_HASH_LOCK(hmebp);
3997 3965
3998 3966 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3999 3967 if (hmeblkp != NULL) {
4000 3968 ASSERT(!hmeblkp->hblk_shared);
4001 3969 /*
4002 3970 * If we encounter a shadow hmeblk then
4003 3971 * we know there are no valid hmeblks mapping
4004 3972 * this address at this size or larger.
4005 3973 * Just increment address by the smallest
4006 3974 * page size.
4007 3975 */
4008 3976 if (hmeblkp->hblk_shw_bit) {
4009 3977 addr += MMU_PAGESIZE;
4010 3978 } else {
4011 3979 addr = sfmmu_hblk_unlock(hmeblkp, addr,
4012 3980 endaddr);
4013 3981 }
4014 3982 SFMMU_HASH_UNLOCK(hmebp);
4015 3983 hashno = 1;
4016 3984 continue;
4017 3985 }
4018 3986 SFMMU_HASH_UNLOCK(hmebp);
4019 3987
4020 3988 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4021 3989 /*
4022 3990 * We have traversed the whole list and rehashed
4023 3991 * if necessary without finding the address to unlock
4024 3992 * which should never happen.
4025 3993 */
4026 3994 panic("sfmmu_unlock: addr not found. "
4027 3995 "addr %p hat %p", (void *)addr, (void *)sfmmup);
4028 3996 } else {
4029 3997 hashno++;
4030 3998 }
4031 3999 }
4032 4000
4033 4001 sfmmu_hblks_list_purge(&list, 0);
4034 4002 }
4035 4003
4036 4004 void
4037 4005 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4038 4006 hat_region_cookie_t rcookie)
4039 4007 {
4040 4008 sf_srd_t *srdp;
4041 4009 sf_region_t *rgnp;
4042 4010 int ttesz;
4043 4011 uint_t rid;
4044 4012 caddr_t eaddr;
4045 4013 caddr_t va;
4046 4014 int hmeshift;
4047 4015 hmeblk_tag hblktag;
4048 4016 struct hmehash_bucket *hmebp;
↓ open down ↓ |
64 lines elided |
↑ open up ↑ |
4049 4017 struct hme_blk *hmeblkp;
4050 4018 struct hme_blk *pr_hblk;
4051 4019 struct hme_blk *list;
4052 4020
4053 4021 if (rcookie == HAT_INVALID_REGION_COOKIE) {
4054 4022 hat_unlock(sfmmup, addr, len);
4055 4023 return;
4056 4024 }
4057 4025
4058 4026 ASSERT(sfmmup != NULL);
4059 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4060 4027 ASSERT(sfmmup != ksfmmup);
4061 4028
4062 4029 srdp = sfmmup->sfmmu_srdp;
4063 4030 rid = (uint_t)((uint64_t)rcookie);
4064 4031 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4065 4032 eaddr = addr + len;
4066 4033 va = addr;
4067 4034 list = NULL;
4068 4035 rgnp = srdp->srd_hmergnp[rid];
4069 4036 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4070 4037
4071 4038 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4072 4039 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4073 4040 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4074 4041 ttesz = HBLK_MIN_TTESZ;
4075 4042 } else {
4076 4043 ttesz = rgnp->rgn_pgszc;
4077 4044 }
4078 4045 while (va < eaddr) {
4079 4046 while (ttesz < rgnp->rgn_pgszc &&
4080 4047 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4081 4048 ttesz++;
4082 4049 }
4083 4050 while (ttesz >= HBLK_MIN_TTESZ) {
4084 4051 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4085 4052 ttesz--;
4086 4053 continue;
4087 4054 }
4088 4055 hmeshift = HME_HASH_SHIFT(ttesz);
4089 4056 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4090 4057 hblktag.htag_rehash = ttesz;
4091 4058 hblktag.htag_rid = rid;
4092 4059 hblktag.htag_id = srdp;
4093 4060 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4094 4061 SFMMU_HASH_LOCK(hmebp);
4095 4062 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4096 4063 &list);
4097 4064 if (hmeblkp == NULL) {
4098 4065 SFMMU_HASH_UNLOCK(hmebp);
4099 4066 ttesz--;
4100 4067 continue;
4101 4068 }
4102 4069 ASSERT(hmeblkp->hblk_shared);
4103 4070 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4104 4071 ASSERT(va >= eaddr ||
4105 4072 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4106 4073 SFMMU_HASH_UNLOCK(hmebp);
4107 4074 break;
4108 4075 }
4109 4076 if (ttesz < HBLK_MIN_TTESZ) {
4110 4077 panic("hat_unlock_region: addr not found "
4111 4078 "addr %p hat %p", (void *)va, (void *)sfmmup);
4112 4079 }
4113 4080 }
4114 4081 sfmmu_hblks_list_purge(&list, 0);
4115 4082 }
4116 4083
4117 4084 /*
4118 4085 * Function to unlock a range of addresses in an hmeblk. It returns the
4119 4086 * next address that needs to be unlocked.
4120 4087 * Should be called with the hash lock held.
4121 4088 */
4122 4089 static caddr_t
4123 4090 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4124 4091 {
4125 4092 struct sf_hment *sfhme;
4126 4093 tte_t tteold, ttemod;
4127 4094 int ttesz, ret;
4128 4095
4129 4096 ASSERT(in_hblk_range(hmeblkp, addr));
4130 4097 ASSERT(hmeblkp->hblk_shw_bit == 0);
4131 4098
4132 4099 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4133 4100 ttesz = get_hblk_ttesz(hmeblkp);
4134 4101
4135 4102 HBLKTOHME(sfhme, hmeblkp, addr);
4136 4103 while (addr < endaddr) {
4137 4104 readtte:
4138 4105 sfmmu_copytte(&sfhme->hme_tte, &tteold);
4139 4106 if (TTE_IS_VALID(&tteold)) {
4140 4107
4141 4108 ttemod = tteold;
4142 4109
4143 4110 ret = sfmmu_modifytte_try(&tteold, &ttemod,
4144 4111 &sfhme->hme_tte);
4145 4112
4146 4113 if (ret < 0)
4147 4114 goto readtte;
4148 4115
4149 4116 if (hmeblkp->hblk_lckcnt == 0)
4150 4117 panic("zero hblk lckcnt");
4151 4118
4152 4119 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4153 4120 (uintptr_t)endaddr)
4154 4121 panic("can't unlock large tte");
4155 4122
4156 4123 ASSERT(hmeblkp->hblk_lckcnt > 0);
4157 4124 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4158 4125 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4159 4126 } else {
4160 4127 panic("sfmmu_hblk_unlock: invalid tte");
4161 4128 }
4162 4129 addr += TTEBYTES(ttesz);
4163 4130 sfhme++;
4164 4131 }
4165 4132 return (addr);
4166 4133 }
4167 4134
4168 4135 /*
4169 4136 * Physical Address Mapping Framework
4170 4137 *
4171 4138 * General rules:
4172 4139 *
4173 4140 * (1) Applies only to seg_kmem memory pages. To make things easier,
4174 4141 * seg_kpm addresses are also accepted by the routines, but nothing
4175 4142 * is done with them since by definition their PA mappings are static.
4176 4143 * (2) hat_add_callback() may only be called while holding the page lock
4177 4144 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4178 4145 * or passing HAC_PAGELOCK flag.
4179 4146 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4180 4147 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4181 4148 * callbacks may not sleep or acquire adaptive mutex locks.
4182 4149 * (4) Either prehandler() or posthandler() (but not both) may be specified
4183 4150 * as being NULL. Specifying an errhandler() is optional.
4184 4151 *
4185 4152 * Details of using the framework:
4186 4153 *
4187 4154 * registering a callback (hat_register_callback())
4188 4155 *
4189 4156 * Pass prehandler, posthandler, errhandler addresses
4190 4157 * as described below. If capture_cpus argument is nonzero,
4191 4158 * suspend callback to the prehandler will occur with CPUs
4192 4159 * captured and executing xc_loop() and CPUs will remain
4193 4160 * captured until after the posthandler suspend callback
4194 4161 * occurs.
4195 4162 *
4196 4163 * adding a callback (hat_add_callback())
4197 4164 *
4198 4165 * as_pagelock();
4199 4166 * hat_add_callback();
4200 4167 * save returned pfn in private data structures or program registers;
4201 4168 * as_pageunlock();
4202 4169 *
4203 4170 * prehandler()
4204 4171 *
4205 4172 * Stop all accesses by physical address to this memory page.
4206 4173 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4207 4174 * adaptive locks. The second, SUSPEND, is called at high PIL with
4208 4175 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4209 4176 * locks must be XCALL_PIL or higher locks).
4210 4177 *
4211 4178 * May return the following errors:
4212 4179 * EIO: A fatal error has occurred. This will result in panic.
4213 4180 * EAGAIN: The page cannot be suspended. This will fail the
4214 4181 * relocation.
4215 4182 * 0: Success.
4216 4183 *
4217 4184 * posthandler()
4218 4185 *
4219 4186 * Save new pfn in private data structures or program registers;
4220 4187 * not allowed to fail (non-zero return values will result in panic).
4221 4188 *
4222 4189 * errhandler()
4223 4190 *
4224 4191 * called when an error occurs related to the callback. Currently
4225 4192 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4226 4193 * a page is being freed, but there are still outstanding callback(s)
4227 4194 * registered on the page.
4228 4195 *
4229 4196 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4230 4197 *
4231 4198 * stop using physical address
4232 4199 * hat_delete_callback();
4233 4200 *
4234 4201 */
4235 4202
4236 4203 /*
4237 4204 * Register a callback class. Each subsystem should do this once and
4238 4205 * cache the id_t returned for use in setting up and tearing down callbacks.
4239 4206 *
4240 4207 * There is no facility for removing callback IDs once they are created;
4241 4208 * the "key" should be unique for each module, so in case a module is unloaded
4242 4209 * and subsequently re-loaded, we can recycle the module's previous entry.
4243 4210 */
4244 4211 id_t
4245 4212 hat_register_callback(int key,
4246 4213 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4247 4214 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4248 4215 int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4249 4216 int capture_cpus)
4250 4217 {
4251 4218 id_t id;
4252 4219
4253 4220 /*
4254 4221 * Search the table for a pre-existing callback associated with
4255 4222 * the identifier "key". If one exists, we re-use that entry in
4256 4223 * the table for this instance, otherwise we assign the next
4257 4224 * available table slot.
4258 4225 */
4259 4226 for (id = 0; id < sfmmu_max_cb_id; id++) {
4260 4227 if (sfmmu_cb_table[id].key == key)
4261 4228 break;
4262 4229 }
4263 4230
4264 4231 if (id == sfmmu_max_cb_id) {
4265 4232 id = sfmmu_cb_nextid++;
4266 4233 if (id >= sfmmu_max_cb_id)
4267 4234 panic("hat_register_callback: out of callback IDs");
4268 4235 }
4269 4236
4270 4237 ASSERT(prehandler != NULL || posthandler != NULL);
4271 4238
4272 4239 sfmmu_cb_table[id].key = key;
4273 4240 sfmmu_cb_table[id].prehandler = prehandler;
4274 4241 sfmmu_cb_table[id].posthandler = posthandler;
4275 4242 sfmmu_cb_table[id].errhandler = errhandler;
4276 4243 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4277 4244
4278 4245 return (id);
4279 4246 }
4280 4247
4281 4248 #define HAC_COOKIE_NONE (void *)-1
4282 4249
4283 4250 /*
4284 4251 * Add relocation callbacks to the specified addr/len which will be called
4285 4252 * when relocating the associated page. See the description of pre and
4286 4253 * posthandler above for more details.
4287 4254 *
4288 4255 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4289 4256 * locked internally so the caller must be able to deal with the callback
4290 4257 * running even before this function has returned. If HAC_PAGELOCK is not
4291 4258 * set, it is assumed that the underlying memory pages are locked.
4292 4259 *
4293 4260 * Since the caller must track the individual page boundaries anyway,
4294 4261 * we only allow a callback to be added to a single page (large
4295 4262 * or small). Thus [addr, addr + len) MUST be contained within a single
4296 4263 * page.
4297 4264 *
4298 4265 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4299 4266 * _provided_that_ a unique parameter is specified for each callback.
4300 4267 * If multiple callbacks are registered on the same range the callback will
4301 4268 * be invoked with each unique parameter. Registering the same callback with
4302 4269 * the same argument more than once will result in corrupted kernel state.
4303 4270 *
4304 4271 * Returns the pfn of the underlying kernel page in *rpfn
4305 4272 * on success, or PFN_INVALID on failure.
4306 4273 *
4307 4274 * cookiep (if passed) provides storage space for an opaque cookie
4308 4275 * to return later to hat_delete_callback(). This cookie makes the callback
4309 4276 * deletion significantly quicker by avoiding a potentially lengthy hash
4310 4277 * search.
4311 4278 *
4312 4279 * Returns values:
4313 4280 * 0: success
4314 4281 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4315 4282 * EINVAL: callback ID is not valid
4316 4283 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4317 4284 * space
4318 4285 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4319 4286 */
4320 4287 int
4321 4288 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4322 4289 void *pvt, pfn_t *rpfn, void **cookiep)
4323 4290 {
4324 4291 struct hmehash_bucket *hmebp;
4325 4292 hmeblk_tag hblktag;
4326 4293 struct hme_blk *hmeblkp;
4327 4294 int hmeshift, hashno;
4328 4295 caddr_t saddr, eaddr, baseaddr;
4329 4296 struct pa_hment *pahmep;
4330 4297 struct sf_hment *sfhmep, *osfhmep;
4331 4298 kmutex_t *pml;
4332 4299 tte_t tte;
4333 4300 page_t *pp;
4334 4301 vnode_t *vp;
4335 4302 u_offset_t off;
4336 4303 pfn_t pfn;
4337 4304 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4338 4305 int locked = 0;
4339 4306
4340 4307 /*
4341 4308 * For KPM mappings, just return the physical address since we
4342 4309 * don't need to register any callbacks.
4343 4310 */
4344 4311 if (IS_KPM_ADDR(vaddr)) {
4345 4312 uint64_t paddr;
4346 4313 SFMMU_KPM_VTOP(vaddr, paddr);
4347 4314 *rpfn = btop(paddr);
4348 4315 if (cookiep != NULL)
4349 4316 *cookiep = HAC_COOKIE_NONE;
4350 4317 return (0);
4351 4318 }
4352 4319
4353 4320 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4354 4321 *rpfn = PFN_INVALID;
4355 4322 return (EINVAL);
4356 4323 }
4357 4324
4358 4325 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4359 4326 *rpfn = PFN_INVALID;
4360 4327 return (ENOMEM);
4361 4328 }
4362 4329
4363 4330 sfhmep = &pahmep->sfment;
4364 4331
4365 4332 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4366 4333 eaddr = saddr + len;
4367 4334
4368 4335 rehash:
4369 4336 /* Find the mapping(s) for this page */
4370 4337 for (hashno = TTE64K, hmeblkp = NULL;
4371 4338 hmeblkp == NULL && hashno <= mmu_hashcnt;
4372 4339 hashno++) {
4373 4340 hmeshift = HME_HASH_SHIFT(hashno);
4374 4341 hblktag.htag_id = ksfmmup;
4375 4342 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4376 4343 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4377 4344 hblktag.htag_rehash = hashno;
4378 4345 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4379 4346
4380 4347 SFMMU_HASH_LOCK(hmebp);
4381 4348
4382 4349 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4383 4350
4384 4351 if (hmeblkp == NULL)
4385 4352 SFMMU_HASH_UNLOCK(hmebp);
4386 4353 }
4387 4354
4388 4355 if (hmeblkp == NULL) {
4389 4356 kmem_cache_free(pa_hment_cache, pahmep);
4390 4357 *rpfn = PFN_INVALID;
4391 4358 return (ENXIO);
4392 4359 }
4393 4360
4394 4361 ASSERT(!hmeblkp->hblk_shared);
4395 4362
4396 4363 HBLKTOHME(osfhmep, hmeblkp, saddr);
4397 4364 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4398 4365
4399 4366 if (!TTE_IS_VALID(&tte)) {
4400 4367 SFMMU_HASH_UNLOCK(hmebp);
4401 4368 kmem_cache_free(pa_hment_cache, pahmep);
4402 4369 *rpfn = PFN_INVALID;
4403 4370 return (ENXIO);
4404 4371 }
4405 4372
4406 4373 /*
4407 4374 * Make sure the boundaries for the callback fall within this
4408 4375 * single mapping.
4409 4376 */
4410 4377 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4411 4378 ASSERT(saddr >= baseaddr);
4412 4379 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4413 4380 SFMMU_HASH_UNLOCK(hmebp);
4414 4381 kmem_cache_free(pa_hment_cache, pahmep);
4415 4382 *rpfn = PFN_INVALID;
4416 4383 return (ERANGE);
4417 4384 }
4418 4385
4419 4386 pfn = sfmmu_ttetopfn(&tte, vaddr);
4420 4387
4421 4388 /*
4422 4389 * The pfn may not have a page_t underneath in which case we
4423 4390 * just return it. This can happen if we are doing I/O to a
4424 4391 * static portion of the kernel's address space, for instance.
4425 4392 */
4426 4393 pp = osfhmep->hme_page;
4427 4394 if (pp == NULL) {
4428 4395 SFMMU_HASH_UNLOCK(hmebp);
4429 4396 kmem_cache_free(pa_hment_cache, pahmep);
4430 4397 *rpfn = pfn;
4431 4398 if (cookiep)
4432 4399 *cookiep = HAC_COOKIE_NONE;
4433 4400 return (0);
4434 4401 }
4435 4402 ASSERT(pp == PP_PAGEROOT(pp));
4436 4403
4437 4404 vp = pp->p_vnode;
4438 4405 off = pp->p_offset;
4439 4406
4440 4407 pml = sfmmu_mlist_enter(pp);
4441 4408
4442 4409 if (flags & HAC_PAGELOCK) {
4443 4410 if (!page_trylock(pp, SE_SHARED)) {
4444 4411 /*
4445 4412 * Somebody is holding SE_EXCL lock. Might
4446 4413 * even be hat_page_relocate(). Drop all
4447 4414 * our locks, lookup the page in &kvp, and
4448 4415 * retry. If it doesn't exist in &kvp and &zvp,
4449 4416 * then we must be dealing with a kernel mapped
4450 4417 * page which doesn't actually belong to
4451 4418 * segkmem so we punt.
4452 4419 */
4453 4420 sfmmu_mlist_exit(pml);
4454 4421 SFMMU_HASH_UNLOCK(hmebp);
4455 4422 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4456 4423
4457 4424 /* check zvp before giving up */
4458 4425 if (pp == NULL)
4459 4426 pp = page_lookup(&zvp, (u_offset_t)saddr,
4460 4427 SE_SHARED);
4461 4428
4462 4429 /* Okay, we didn't find it, give up */
4463 4430 if (pp == NULL) {
4464 4431 kmem_cache_free(pa_hment_cache, pahmep);
4465 4432 *rpfn = pfn;
4466 4433 if (cookiep)
4467 4434 *cookiep = HAC_COOKIE_NONE;
4468 4435 return (0);
4469 4436 }
4470 4437 page_unlock(pp);
4471 4438 goto rehash;
4472 4439 }
4473 4440 locked = 1;
4474 4441 }
4475 4442
4476 4443 if (!PAGE_LOCKED(pp) && !panicstr)
4477 4444 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4478 4445
4479 4446 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4480 4447 pp->p_offset != off) {
4481 4448 /*
4482 4449 * The page moved before we got our hands on it. Drop
4483 4450 * all the locks and try again.
4484 4451 */
4485 4452 ASSERT((flags & HAC_PAGELOCK) != 0);
4486 4453 sfmmu_mlist_exit(pml);
4487 4454 SFMMU_HASH_UNLOCK(hmebp);
4488 4455 page_unlock(pp);
4489 4456 locked = 0;
4490 4457 goto rehash;
4491 4458 }
4492 4459
4493 4460 if (!VN_ISKAS(vp)) {
4494 4461 /*
4495 4462 * This is not a segkmem page but another page which
4496 4463 * has been kernel mapped. It had better have at least
4497 4464 * a share lock on it. Return the pfn.
4498 4465 */
4499 4466 sfmmu_mlist_exit(pml);
4500 4467 SFMMU_HASH_UNLOCK(hmebp);
4501 4468 if (locked)
4502 4469 page_unlock(pp);
4503 4470 kmem_cache_free(pa_hment_cache, pahmep);
4504 4471 ASSERT(PAGE_LOCKED(pp));
4505 4472 *rpfn = pfn;
4506 4473 if (cookiep)
4507 4474 *cookiep = HAC_COOKIE_NONE;
4508 4475 return (0);
4509 4476 }
4510 4477
4511 4478 /*
4512 4479 * Setup this pa_hment and link its embedded dummy sf_hment into
4513 4480 * the mapping list.
4514 4481 */
4515 4482 pp->p_share++;
4516 4483 pahmep->cb_id = callback_id;
4517 4484 pahmep->addr = vaddr;
4518 4485 pahmep->len = len;
4519 4486 pahmep->refcnt = 1;
4520 4487 pahmep->flags = 0;
4521 4488 pahmep->pvt = pvt;
4522 4489
4523 4490 sfhmep->hme_tte.ll = 0;
4524 4491 sfhmep->hme_data = pahmep;
4525 4492 sfhmep->hme_prev = osfhmep;
4526 4493 sfhmep->hme_next = osfhmep->hme_next;
4527 4494
4528 4495 if (osfhmep->hme_next)
4529 4496 osfhmep->hme_next->hme_prev = sfhmep;
4530 4497
4531 4498 osfhmep->hme_next = sfhmep;
4532 4499
4533 4500 sfmmu_mlist_exit(pml);
4534 4501 SFMMU_HASH_UNLOCK(hmebp);
4535 4502
4536 4503 if (locked)
4537 4504 page_unlock(pp);
4538 4505
4539 4506 *rpfn = pfn;
4540 4507 if (cookiep)
4541 4508 *cookiep = (void *)pahmep;
4542 4509
4543 4510 return (0);
4544 4511 }
4545 4512
4546 4513 /*
4547 4514 * Remove the relocation callbacks from the specified addr/len.
4548 4515 */
4549 4516 void
4550 4517 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4551 4518 void *cookie)
4552 4519 {
4553 4520 struct hmehash_bucket *hmebp;
4554 4521 hmeblk_tag hblktag;
4555 4522 struct hme_blk *hmeblkp;
4556 4523 int hmeshift, hashno;
4557 4524 caddr_t saddr;
4558 4525 struct pa_hment *pahmep;
4559 4526 struct sf_hment *sfhmep, *osfhmep;
4560 4527 kmutex_t *pml;
4561 4528 tte_t tte;
4562 4529 page_t *pp;
4563 4530 vnode_t *vp;
4564 4531 u_offset_t off;
4565 4532 int locked = 0;
4566 4533
4567 4534 /*
4568 4535 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4569 4536 * remove so just return.
4570 4537 */
4571 4538 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4572 4539 return;
4573 4540
4574 4541 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4575 4542
4576 4543 rehash:
4577 4544 /* Find the mapping(s) for this page */
4578 4545 for (hashno = TTE64K, hmeblkp = NULL;
4579 4546 hmeblkp == NULL && hashno <= mmu_hashcnt;
4580 4547 hashno++) {
4581 4548 hmeshift = HME_HASH_SHIFT(hashno);
4582 4549 hblktag.htag_id = ksfmmup;
4583 4550 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4584 4551 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4585 4552 hblktag.htag_rehash = hashno;
4586 4553 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4587 4554
4588 4555 SFMMU_HASH_LOCK(hmebp);
4589 4556
4590 4557 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4591 4558
4592 4559 if (hmeblkp == NULL)
4593 4560 SFMMU_HASH_UNLOCK(hmebp);
4594 4561 }
4595 4562
4596 4563 if (hmeblkp == NULL)
4597 4564 return;
4598 4565
4599 4566 ASSERT(!hmeblkp->hblk_shared);
4600 4567
4601 4568 HBLKTOHME(osfhmep, hmeblkp, saddr);
4602 4569
4603 4570 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4604 4571 if (!TTE_IS_VALID(&tte)) {
4605 4572 SFMMU_HASH_UNLOCK(hmebp);
4606 4573 return;
4607 4574 }
4608 4575
4609 4576 pp = osfhmep->hme_page;
4610 4577 if (pp == NULL) {
4611 4578 SFMMU_HASH_UNLOCK(hmebp);
4612 4579 ASSERT(cookie == NULL);
4613 4580 return;
4614 4581 }
4615 4582
4616 4583 vp = pp->p_vnode;
4617 4584 off = pp->p_offset;
4618 4585
4619 4586 pml = sfmmu_mlist_enter(pp);
4620 4587
4621 4588 if (flags & HAC_PAGELOCK) {
4622 4589 if (!page_trylock(pp, SE_SHARED)) {
4623 4590 /*
4624 4591 * Somebody is holding SE_EXCL lock. Might
4625 4592 * even be hat_page_relocate(). Drop all
4626 4593 * our locks, lookup the page in &kvp, and
4627 4594 * retry. If it doesn't exist in &kvp and &zvp,
4628 4595 * then we must be dealing with a kernel mapped
4629 4596 * page which doesn't actually belong to
4630 4597 * segkmem so we punt.
4631 4598 */
4632 4599 sfmmu_mlist_exit(pml);
4633 4600 SFMMU_HASH_UNLOCK(hmebp);
4634 4601 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4635 4602 /* check zvp before giving up */
4636 4603 if (pp == NULL)
4637 4604 pp = page_lookup(&zvp, (u_offset_t)saddr,
4638 4605 SE_SHARED);
4639 4606
4640 4607 if (pp == NULL) {
4641 4608 ASSERT(cookie == NULL);
4642 4609 return;
4643 4610 }
4644 4611 page_unlock(pp);
4645 4612 goto rehash;
4646 4613 }
4647 4614 locked = 1;
4648 4615 }
4649 4616
4650 4617 ASSERT(PAGE_LOCKED(pp));
4651 4618
4652 4619 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4653 4620 pp->p_offset != off) {
4654 4621 /*
4655 4622 * The page moved before we got our hands on it. Drop
4656 4623 * all the locks and try again.
4657 4624 */
4658 4625 ASSERT((flags & HAC_PAGELOCK) != 0);
4659 4626 sfmmu_mlist_exit(pml);
4660 4627 SFMMU_HASH_UNLOCK(hmebp);
4661 4628 page_unlock(pp);
4662 4629 locked = 0;
4663 4630 goto rehash;
4664 4631 }
4665 4632
4666 4633 if (!VN_ISKAS(vp)) {
4667 4634 /*
4668 4635 * This is not a segkmem page but another page which
4669 4636 * has been kernel mapped.
4670 4637 */
4671 4638 sfmmu_mlist_exit(pml);
4672 4639 SFMMU_HASH_UNLOCK(hmebp);
4673 4640 if (locked)
4674 4641 page_unlock(pp);
4675 4642 ASSERT(cookie == NULL);
4676 4643 return;
4677 4644 }
4678 4645
4679 4646 if (cookie != NULL) {
4680 4647 pahmep = (struct pa_hment *)cookie;
4681 4648 sfhmep = &pahmep->sfment;
4682 4649 } else {
4683 4650 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4684 4651 sfhmep = sfhmep->hme_next) {
4685 4652
4686 4653 /*
4687 4654 * skip va<->pa mappings
4688 4655 */
4689 4656 if (!IS_PAHME(sfhmep))
4690 4657 continue;
4691 4658
4692 4659 pahmep = sfhmep->hme_data;
4693 4660 ASSERT(pahmep != NULL);
4694 4661
4695 4662 /*
4696 4663 * if pa_hment matches, remove it
4697 4664 */
4698 4665 if ((pahmep->pvt == pvt) &&
4699 4666 (pahmep->addr == vaddr) &&
4700 4667 (pahmep->len == len)) {
4701 4668 break;
4702 4669 }
4703 4670 }
4704 4671 }
4705 4672
4706 4673 if (sfhmep == NULL) {
4707 4674 if (!panicstr) {
4708 4675 panic("hat_delete_callback: pa_hment not found, pp %p",
4709 4676 (void *)pp);
4710 4677 }
4711 4678 return;
4712 4679 }
4713 4680
4714 4681 /*
4715 4682 * Note: at this point a valid kernel mapping must still be
4716 4683 * present on this page.
4717 4684 */
4718 4685 pp->p_share--;
4719 4686 if (pp->p_share <= 0)
4720 4687 panic("hat_delete_callback: zero p_share");
4721 4688
4722 4689 if (--pahmep->refcnt == 0) {
4723 4690 if (pahmep->flags != 0)
4724 4691 panic("hat_delete_callback: pa_hment is busy");
4725 4692
4726 4693 /*
4727 4694 * Remove sfhmep from the mapping list for the page.
4728 4695 */
4729 4696 if (sfhmep->hme_prev) {
4730 4697 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4731 4698 } else {
4732 4699 pp->p_mapping = sfhmep->hme_next;
4733 4700 }
4734 4701
4735 4702 if (sfhmep->hme_next)
4736 4703 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4737 4704
4738 4705 sfmmu_mlist_exit(pml);
4739 4706 SFMMU_HASH_UNLOCK(hmebp);
4740 4707
4741 4708 if (locked)
4742 4709 page_unlock(pp);
4743 4710
4744 4711 kmem_cache_free(pa_hment_cache, pahmep);
4745 4712 return;
4746 4713 }
4747 4714
4748 4715 sfmmu_mlist_exit(pml);
4749 4716 SFMMU_HASH_UNLOCK(hmebp);
4750 4717 if (locked)
4751 4718 page_unlock(pp);
4752 4719 }
4753 4720
4754 4721 /*
4755 4722 * hat_probe returns 1 if the translation for the address 'addr' is
4756 4723 * loaded, zero otherwise.
4757 4724 *
4758 4725 * hat_probe should be used only for advisorary purposes because it may
4759 4726 * occasionally return the wrong value. The implementation must guarantee that
4760 4727 * returning the wrong value is a very rare event. hat_probe is used
↓ open down ↓ |
691 lines elided |
↑ open up ↑ |
4761 4728 * to implement optimizations in the segment drivers.
4762 4729 *
4763 4730 */
4764 4731 int
4765 4732 hat_probe(struct hat *sfmmup, caddr_t addr)
4766 4733 {
4767 4734 pfn_t pfn;
4768 4735 tte_t tte;
4769 4736
4770 4737 ASSERT(sfmmup != NULL);
4771 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4772 4738
4773 4739 ASSERT((sfmmup == ksfmmup) ||
4774 4740 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4775 4741
4776 4742 if (sfmmup == ksfmmup) {
4777 4743 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4778 4744 == PFN_SUSPENDED) {
4779 4745 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4780 4746 }
4781 4747 } else {
4782 4748 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4783 4749 }
4784 4750
4785 4751 if (pfn != PFN_INVALID)
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
4786 4752 return (1);
4787 4753 else
4788 4754 return (0);
4789 4755 }
4790 4756
4791 4757 ssize_t
4792 4758 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4793 4759 {
4794 4760 tte_t tte;
4795 4761
4796 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4797 -
4798 4762 if (sfmmup == ksfmmup) {
4799 4763 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4800 4764 return (-1);
4801 4765 }
4802 4766 } else {
4803 4767 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4804 4768 return (-1);
4805 4769 }
4806 4770 }
4807 4771
4808 4772 ASSERT(TTE_IS_VALID(&tte));
4809 4773 return (TTEBYTES(TTE_CSZ(&tte)));
4810 4774 }
4811 4775
4812 4776 uint_t
4813 4777 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4814 4778 {
4815 4779 tte_t tte;
4816 4780
4817 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4818 -
4819 4781 if (sfmmup == ksfmmup) {
4820 4782 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4821 4783 tte.ll = 0;
4822 4784 }
4823 4785 } else {
4824 4786 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4825 4787 tte.ll = 0;
4826 4788 }
4827 4789 }
4828 4790 if (TTE_IS_VALID(&tte)) {
4829 4791 *attr = sfmmu_ptov_attr(&tte);
4830 4792 return (0);
4831 4793 }
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4832 4794 *attr = 0;
4833 4795 return ((uint_t)0xffffffff);
4834 4796 }
4835 4797
4836 4798 /*
4837 4799 * Enables more attributes on specified address range (ie. logical OR)
4838 4800 */
4839 4801 void
4840 4802 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4841 4803 {
4842 - if (hat->sfmmu_xhat_provider) {
4843 - XHAT_SETATTR(hat, addr, len, attr);
4844 - return;
4845 - } else {
4846 - /*
4847 - * This must be a CPU HAT. If the address space has
4848 - * XHATs attached, change attributes for all of them,
4849 - * just in case
4850 - */
4851 - ASSERT(hat->sfmmu_as != NULL);
4852 - if (hat->sfmmu_as->a_xhat != NULL)
4853 - xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4854 - }
4804 + ASSERT(hat->sfmmu_as != NULL);
4855 4805
4856 4806 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4857 4807 }
4858 4808
4859 4809 /*
4860 4810 * Assigns attributes to the specified address range. All the attributes
4861 4811 * are specified.
4862 4812 */
4863 4813 void
4864 4814 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4865 4815 {
4866 - if (hat->sfmmu_xhat_provider) {
4867 - XHAT_CHGATTR(hat, addr, len, attr);
4868 - return;
4869 - } else {
4870 - /*
4871 - * This must be a CPU HAT. If the address space has
4872 - * XHATs attached, change attributes for all of them,
4873 - * just in case
4874 - */
4875 - ASSERT(hat->sfmmu_as != NULL);
4876 - if (hat->sfmmu_as->a_xhat != NULL)
4877 - xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4878 - }
4816 + ASSERT(hat->sfmmu_as != NULL);
4879 4817
4880 4818 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4881 4819 }
4882 4820
4883 4821 /*
4884 4822 * Remove attributes on the specified address range (ie. loginal NAND)
4885 4823 */
4886 4824 void
4887 4825 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4888 4826 {
4889 - if (hat->sfmmu_xhat_provider) {
4890 - XHAT_CLRATTR(hat, addr, len, attr);
4891 - return;
4892 - } else {
4893 - /*
4894 - * This must be a CPU HAT. If the address space has
4895 - * XHATs attached, change attributes for all of them,
4896 - * just in case
4897 - */
4898 - ASSERT(hat->sfmmu_as != NULL);
4899 - if (hat->sfmmu_as->a_xhat != NULL)
4900 - xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4901 - }
4827 + ASSERT(hat->sfmmu_as != NULL);
4902 4828
4903 4829 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4904 4830 }
4905 4831
4906 4832 /*
4907 4833 * Change attributes on an address range to that specified by attr and mode.
4908 4834 */
4909 4835 static void
4910 4836 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4911 4837 int mode)
4912 4838 {
4913 4839 struct hmehash_bucket *hmebp;
4914 4840 hmeblk_tag hblktag;
4915 4841 int hmeshift, hashno = 1;
4916 4842 struct hme_blk *hmeblkp, *list = NULL;
4917 4843 caddr_t endaddr;
4918 4844 cpuset_t cpuset;
4919 4845 demap_range_t dmr;
4920 4846
4921 4847 CPUSET_ZERO(cpuset);
4922 4848
4923 4849 ASSERT((sfmmup == ksfmmup) ||
4924 4850 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4925 4851 ASSERT((len & MMU_PAGEOFFSET) == 0);
4926 4852 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4927 4853
4928 4854 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4929 4855 ((addr + len) > (caddr_t)USERLIMIT)) {
4930 4856 panic("user addr %p in kernel space",
4931 4857 (void *)addr);
4932 4858 }
4933 4859
4934 4860 endaddr = addr + len;
4935 4861 hblktag.htag_id = sfmmup;
4936 4862 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4937 4863 DEMAP_RANGE_INIT(sfmmup, &dmr);
4938 4864
4939 4865 while (addr < endaddr) {
4940 4866 hmeshift = HME_HASH_SHIFT(hashno);
4941 4867 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4942 4868 hblktag.htag_rehash = hashno;
4943 4869 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4944 4870
4945 4871 SFMMU_HASH_LOCK(hmebp);
4946 4872
4947 4873 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4948 4874 if (hmeblkp != NULL) {
4949 4875 ASSERT(!hmeblkp->hblk_shared);
4950 4876 /*
4951 4877 * We've encountered a shadow hmeblk so skip the range
4952 4878 * of the next smaller mapping size.
4953 4879 */
4954 4880 if (hmeblkp->hblk_shw_bit) {
4955 4881 ASSERT(sfmmup != ksfmmup);
4956 4882 ASSERT(hashno > 1);
4957 4883 addr = (caddr_t)P2END((uintptr_t)addr,
4958 4884 TTEBYTES(hashno - 1));
4959 4885 } else {
4960 4886 addr = sfmmu_hblk_chgattr(sfmmup,
4961 4887 hmeblkp, addr, endaddr, &dmr, attr, mode);
4962 4888 }
4963 4889 SFMMU_HASH_UNLOCK(hmebp);
4964 4890 hashno = 1;
4965 4891 continue;
4966 4892 }
4967 4893 SFMMU_HASH_UNLOCK(hmebp);
4968 4894
4969 4895 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4970 4896 /*
4971 4897 * We have traversed the whole list and rehashed
4972 4898 * if necessary without finding the address to chgattr.
4973 4899 * This is ok, so we increment the address by the
4974 4900 * smallest hmeblk range for kernel mappings or for
4975 4901 * user mappings with no large pages, and the largest
4976 4902 * hmeblk range, to account for shadow hmeblks, for
4977 4903 * user mappings with large pages and continue.
4978 4904 */
4979 4905 if (sfmmup == ksfmmup)
4980 4906 addr = (caddr_t)P2END((uintptr_t)addr,
4981 4907 TTEBYTES(1));
4982 4908 else
4983 4909 addr = (caddr_t)P2END((uintptr_t)addr,
4984 4910 TTEBYTES(hashno));
4985 4911 hashno = 1;
4986 4912 } else {
4987 4913 hashno++;
4988 4914 }
4989 4915 }
4990 4916
4991 4917 sfmmu_hblks_list_purge(&list, 0);
4992 4918 DEMAP_RANGE_FLUSH(&dmr);
4993 4919 cpuset = sfmmup->sfmmu_cpusran;
4994 4920 xt_sync(cpuset);
4995 4921 }
4996 4922
4997 4923 /*
4998 4924 * This function chgattr on a range of addresses in an hmeblk. It returns the
4999 4925 * next addres that needs to be chgattr.
5000 4926 * It should be called with the hash lock held.
5001 4927 * XXX It should be possible to optimize chgattr by not flushing every time but
5002 4928 * on the other hand:
5003 4929 * 1. do one flush crosscall.
5004 4930 * 2. only flush if we are increasing permissions (make sure this will work)
5005 4931 */
5006 4932 static caddr_t
5007 4933 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5008 4934 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
5009 4935 {
5010 4936 tte_t tte, tteattr, tteflags, ttemod;
5011 4937 struct sf_hment *sfhmep;
5012 4938 int ttesz;
5013 4939 struct page *pp = NULL;
5014 4940 kmutex_t *pml, *pmtx;
5015 4941 int ret;
5016 4942 int use_demap_range;
5017 4943 #if defined(SF_ERRATA_57)
5018 4944 int check_exec;
5019 4945 #endif
5020 4946
5021 4947 ASSERT(in_hblk_range(hmeblkp, addr));
5022 4948 ASSERT(hmeblkp->hblk_shw_bit == 0);
5023 4949 ASSERT(!hmeblkp->hblk_shared);
5024 4950
5025 4951 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5026 4952 ttesz = get_hblk_ttesz(hmeblkp);
5027 4953
5028 4954 /*
5029 4955 * Flush the current demap region if addresses have been
5030 4956 * skipped or the page size doesn't match.
5031 4957 */
5032 4958 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
5033 4959 if (use_demap_range) {
5034 4960 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5035 4961 } else if (dmrp != NULL) {
5036 4962 DEMAP_RANGE_FLUSH(dmrp);
5037 4963 }
5038 4964
5039 4965 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
5040 4966 #if defined(SF_ERRATA_57)
5041 4967 check_exec = (sfmmup != ksfmmup) &&
5042 4968 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5043 4969 TTE_IS_EXECUTABLE(&tteattr);
5044 4970 #endif
5045 4971 HBLKTOHME(sfhmep, hmeblkp, addr);
5046 4972 while (addr < endaddr) {
5047 4973 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5048 4974 if (TTE_IS_VALID(&tte)) {
5049 4975 if ((tte.ll & tteflags.ll) == tteattr.ll) {
5050 4976 /*
5051 4977 * if the new attr is the same as old
5052 4978 * continue
5053 4979 */
5054 4980 goto next_addr;
5055 4981 }
5056 4982 if (!TTE_IS_WRITABLE(&tteattr)) {
5057 4983 /*
5058 4984 * make sure we clear hw modify bit if we
5059 4985 * removing write protections
5060 4986 */
5061 4987 tteflags.tte_intlo |= TTE_HWWR_INT;
5062 4988 }
5063 4989
5064 4990 pml = NULL;
5065 4991 pp = sfhmep->hme_page;
5066 4992 if (pp) {
5067 4993 pml = sfmmu_mlist_enter(pp);
5068 4994 }
5069 4995
5070 4996 if (pp != sfhmep->hme_page) {
5071 4997 /*
5072 4998 * tte must have been unloaded.
5073 4999 */
5074 5000 ASSERT(pml);
5075 5001 sfmmu_mlist_exit(pml);
5076 5002 continue;
5077 5003 }
5078 5004
5079 5005 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5080 5006
5081 5007 ttemod = tte;
5082 5008 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5083 5009 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5084 5010
5085 5011 #if defined(SF_ERRATA_57)
5086 5012 if (check_exec && addr < errata57_limit)
5087 5013 ttemod.tte_exec_perm = 0;
5088 5014 #endif
5089 5015 ret = sfmmu_modifytte_try(&tte, &ttemod,
5090 5016 &sfhmep->hme_tte);
5091 5017
5092 5018 if (ret < 0) {
5093 5019 /* tte changed underneath us */
5094 5020 if (pml) {
5095 5021 sfmmu_mlist_exit(pml);
5096 5022 }
5097 5023 continue;
5098 5024 }
5099 5025
5100 5026 if (tteflags.tte_intlo & TTE_HWWR_INT) {
5101 5027 /*
5102 5028 * need to sync if we are clearing modify bit.
5103 5029 */
5104 5030 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5105 5031 }
5106 5032
5107 5033 if (pp && PP_ISRO(pp)) {
5108 5034 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5109 5035 pmtx = sfmmu_page_enter(pp);
5110 5036 PP_CLRRO(pp);
5111 5037 sfmmu_page_exit(pmtx);
5112 5038 }
5113 5039 }
5114 5040
5115 5041 if (ret > 0 && use_demap_range) {
5116 5042 DEMAP_RANGE_MARKPG(dmrp, addr);
5117 5043 } else if (ret > 0) {
5118 5044 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5119 5045 }
5120 5046
5121 5047 if (pml) {
5122 5048 sfmmu_mlist_exit(pml);
5123 5049 }
5124 5050 }
5125 5051 next_addr:
5126 5052 addr += TTEBYTES(ttesz);
5127 5053 sfhmep++;
5128 5054 DEMAP_RANGE_NEXTPG(dmrp);
5129 5055 }
5130 5056 return (addr);
5131 5057 }
5132 5058
5133 5059 /*
5134 5060 * This routine converts virtual attributes to physical ones. It will
5135 5061 * update the tteflags field with the tte mask corresponding to the attributes
5136 5062 * affected and it returns the new attributes. It will also clear the modify
5137 5063 * bit if we are taking away write permission. This is necessary since the
5138 5064 * modify bit is the hardware permission bit and we need to clear it in order
5139 5065 * to detect write faults.
5140 5066 */
5141 5067 static uint64_t
5142 5068 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5143 5069 {
5144 5070 tte_t ttevalue;
5145 5071
5146 5072 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5147 5073
5148 5074 switch (mode) {
5149 5075 case SFMMU_CHGATTR:
5150 5076 /* all attributes specified */
5151 5077 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5152 5078 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5153 5079 ttemaskp->tte_inthi = TTEINTHI_ATTR;
5154 5080 ttemaskp->tte_intlo = TTEINTLO_ATTR;
5155 5081 break;
5156 5082 case SFMMU_SETATTR:
5157 5083 ASSERT(!(attr & ~HAT_PROT_MASK));
5158 5084 ttemaskp->ll = 0;
5159 5085 ttevalue.ll = 0;
5160 5086 /*
5161 5087 * a valid tte implies exec and read for sfmmu
5162 5088 * so no need to do anything about them.
5163 5089 * since priviledged access implies user access
5164 5090 * PROT_USER doesn't make sense either.
5165 5091 */
5166 5092 if (attr & PROT_WRITE) {
5167 5093 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5168 5094 ttevalue.tte_intlo |= TTE_WRPRM_INT;
5169 5095 }
5170 5096 break;
5171 5097 case SFMMU_CLRATTR:
5172 5098 /* attributes will be nand with current ones */
5173 5099 if (attr & ~(PROT_WRITE | PROT_USER)) {
5174 5100 panic("sfmmu: attr %x not supported", attr);
5175 5101 }
5176 5102 ttemaskp->ll = 0;
5177 5103 ttevalue.ll = 0;
5178 5104 if (attr & PROT_WRITE) {
5179 5105 /* clear both writable and modify bit */
5180 5106 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5181 5107 }
5182 5108 if (attr & PROT_USER) {
5183 5109 ttemaskp->tte_intlo |= TTE_PRIV_INT;
5184 5110 ttevalue.tte_intlo |= TTE_PRIV_INT;
5185 5111 }
5186 5112 break;
5187 5113 default:
5188 5114 panic("sfmmu_vtop_attr: bad mode %x", mode);
5189 5115 }
5190 5116 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5191 5117 return (ttevalue.ll);
5192 5118 }
5193 5119
5194 5120 static uint_t
5195 5121 sfmmu_ptov_attr(tte_t *ttep)
5196 5122 {
5197 5123 uint_t attr;
5198 5124
5199 5125 ASSERT(TTE_IS_VALID(ttep));
5200 5126
5201 5127 attr = PROT_READ;
5202 5128
5203 5129 if (TTE_IS_WRITABLE(ttep)) {
5204 5130 attr |= PROT_WRITE;
5205 5131 }
5206 5132 if (TTE_IS_EXECUTABLE(ttep)) {
5207 5133 attr |= PROT_EXEC;
5208 5134 }
5209 5135 if (!TTE_IS_PRIVILEGED(ttep)) {
5210 5136 attr |= PROT_USER;
5211 5137 }
5212 5138 if (TTE_IS_NFO(ttep)) {
5213 5139 attr |= HAT_NOFAULT;
5214 5140 }
5215 5141 if (TTE_IS_NOSYNC(ttep)) {
5216 5142 attr |= HAT_NOSYNC;
5217 5143 }
5218 5144 if (TTE_IS_SIDEFFECT(ttep)) {
5219 5145 attr |= SFMMU_SIDEFFECT;
5220 5146 }
5221 5147 if (!TTE_IS_VCACHEABLE(ttep)) {
5222 5148 attr |= SFMMU_UNCACHEVTTE;
5223 5149 }
5224 5150 if (!TTE_IS_PCACHEABLE(ttep)) {
5225 5151 attr |= SFMMU_UNCACHEPTTE;
5226 5152 }
5227 5153 return (attr);
5228 5154 }
5229 5155
5230 5156 /*
5231 5157 * hat_chgprot is a deprecated hat call. New segment drivers
5232 5158 * should store all attributes and use hat_*attr calls.
5233 5159 *
5234 5160 * Change the protections in the virtual address range
5235 5161 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5236 5162 * then remove write permission, leaving the other
5237 5163 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5238 5164 *
5239 5165 */
5240 5166 void
5241 5167 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5242 5168 {
5243 5169 struct hmehash_bucket *hmebp;
↓ open down ↓ |
332 lines elided |
↑ open up ↑ |
5244 5170 hmeblk_tag hblktag;
5245 5171 int hmeshift, hashno = 1;
5246 5172 struct hme_blk *hmeblkp, *list = NULL;
5247 5173 caddr_t endaddr;
5248 5174 cpuset_t cpuset;
5249 5175 demap_range_t dmr;
5250 5176
5251 5177 ASSERT((len & MMU_PAGEOFFSET) == 0);
5252 5178 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5253 5179
5254 - if (sfmmup->sfmmu_xhat_provider) {
5255 - XHAT_CHGPROT(sfmmup, addr, len, vprot);
5256 - return;
5257 - } else {
5258 - /*
5259 - * This must be a CPU HAT. If the address space has
5260 - * XHATs attached, change attributes for all of them,
5261 - * just in case
5262 - */
5263 - ASSERT(sfmmup->sfmmu_as != NULL);
5264 - if (sfmmup->sfmmu_as->a_xhat != NULL)
5265 - xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
5266 - }
5180 + ASSERT(sfmmup->sfmmu_as != NULL);
5267 5181
5268 5182 CPUSET_ZERO(cpuset);
5269 5183
5270 5184 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5271 5185 ((addr + len) > (caddr_t)USERLIMIT)) {
5272 5186 panic("user addr %p vprot %x in kernel space",
5273 5187 (void *)addr, vprot);
5274 5188 }
5275 5189 endaddr = addr + len;
5276 5190 hblktag.htag_id = sfmmup;
5277 5191 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5278 5192 DEMAP_RANGE_INIT(sfmmup, &dmr);
5279 5193
5280 5194 while (addr < endaddr) {
5281 5195 hmeshift = HME_HASH_SHIFT(hashno);
5282 5196 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5283 5197 hblktag.htag_rehash = hashno;
5284 5198 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5285 5199
5286 5200 SFMMU_HASH_LOCK(hmebp);
5287 5201
5288 5202 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5289 5203 if (hmeblkp != NULL) {
5290 5204 ASSERT(!hmeblkp->hblk_shared);
5291 5205 /*
5292 5206 * We've encountered a shadow hmeblk so skip the range
5293 5207 * of the next smaller mapping size.
5294 5208 */
5295 5209 if (hmeblkp->hblk_shw_bit) {
5296 5210 ASSERT(sfmmup != ksfmmup);
5297 5211 ASSERT(hashno > 1);
5298 5212 addr = (caddr_t)P2END((uintptr_t)addr,
5299 5213 TTEBYTES(hashno - 1));
5300 5214 } else {
5301 5215 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5302 5216 addr, endaddr, &dmr, vprot);
5303 5217 }
5304 5218 SFMMU_HASH_UNLOCK(hmebp);
5305 5219 hashno = 1;
5306 5220 continue;
5307 5221 }
5308 5222 SFMMU_HASH_UNLOCK(hmebp);
5309 5223
5310 5224 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5311 5225 /*
5312 5226 * We have traversed the whole list and rehashed
5313 5227 * if necessary without finding the address to chgprot.
5314 5228 * This is ok so we increment the address by the
5315 5229 * smallest hmeblk range for kernel mappings and the
5316 5230 * largest hmeblk range, to account for shadow hmeblks,
5317 5231 * for user mappings and continue.
5318 5232 */
5319 5233 if (sfmmup == ksfmmup)
5320 5234 addr = (caddr_t)P2END((uintptr_t)addr,
5321 5235 TTEBYTES(1));
5322 5236 else
5323 5237 addr = (caddr_t)P2END((uintptr_t)addr,
5324 5238 TTEBYTES(hashno));
5325 5239 hashno = 1;
5326 5240 } else {
5327 5241 hashno++;
5328 5242 }
5329 5243 }
5330 5244
5331 5245 sfmmu_hblks_list_purge(&list, 0);
5332 5246 DEMAP_RANGE_FLUSH(&dmr);
5333 5247 cpuset = sfmmup->sfmmu_cpusran;
5334 5248 xt_sync(cpuset);
5335 5249 }
5336 5250
5337 5251 /*
5338 5252 * This function chgprots a range of addresses in an hmeblk. It returns the
5339 5253 * next addres that needs to be chgprot.
5340 5254 * It should be called with the hash lock held.
5341 5255 * XXX It shold be possible to optimize chgprot by not flushing every time but
5342 5256 * on the other hand:
5343 5257 * 1. do one flush crosscall.
5344 5258 * 2. only flush if we are increasing permissions (make sure this will work)
5345 5259 */
5346 5260 static caddr_t
5347 5261 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5348 5262 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5349 5263 {
5350 5264 uint_t pprot;
5351 5265 tte_t tte, ttemod;
5352 5266 struct sf_hment *sfhmep;
5353 5267 uint_t tteflags;
5354 5268 int ttesz;
5355 5269 struct page *pp = NULL;
5356 5270 kmutex_t *pml, *pmtx;
5357 5271 int ret;
5358 5272 int use_demap_range;
5359 5273 #if defined(SF_ERRATA_57)
5360 5274 int check_exec;
5361 5275 #endif
5362 5276
5363 5277 ASSERT(in_hblk_range(hmeblkp, addr));
5364 5278 ASSERT(hmeblkp->hblk_shw_bit == 0);
5365 5279 ASSERT(!hmeblkp->hblk_shared);
5366 5280
5367 5281 #ifdef DEBUG
5368 5282 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5369 5283 (endaddr < get_hblk_endaddr(hmeblkp))) {
5370 5284 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5371 5285 }
5372 5286 #endif /* DEBUG */
5373 5287
5374 5288 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5375 5289 ttesz = get_hblk_ttesz(hmeblkp);
5376 5290
5377 5291 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5378 5292 #if defined(SF_ERRATA_57)
5379 5293 check_exec = (sfmmup != ksfmmup) &&
5380 5294 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5381 5295 ((vprot & PROT_EXEC) == PROT_EXEC);
5382 5296 #endif
5383 5297 HBLKTOHME(sfhmep, hmeblkp, addr);
5384 5298
5385 5299 /*
5386 5300 * Flush the current demap region if addresses have been
5387 5301 * skipped or the page size doesn't match.
5388 5302 */
5389 5303 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5390 5304 if (use_demap_range) {
5391 5305 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5392 5306 } else if (dmrp != NULL) {
5393 5307 DEMAP_RANGE_FLUSH(dmrp);
5394 5308 }
5395 5309
5396 5310 while (addr < endaddr) {
5397 5311 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5398 5312 if (TTE_IS_VALID(&tte)) {
5399 5313 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5400 5314 /*
5401 5315 * if the new protection is the same as old
5402 5316 * continue
5403 5317 */
5404 5318 goto next_addr;
5405 5319 }
5406 5320 pml = NULL;
5407 5321 pp = sfhmep->hme_page;
5408 5322 if (pp) {
5409 5323 pml = sfmmu_mlist_enter(pp);
5410 5324 }
5411 5325 if (pp != sfhmep->hme_page) {
5412 5326 /*
5413 5327 * tte most have been unloaded
5414 5328 * underneath us. Recheck
5415 5329 */
5416 5330 ASSERT(pml);
5417 5331 sfmmu_mlist_exit(pml);
5418 5332 continue;
5419 5333 }
5420 5334
5421 5335 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5422 5336
5423 5337 ttemod = tte;
5424 5338 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5425 5339 #if defined(SF_ERRATA_57)
5426 5340 if (check_exec && addr < errata57_limit)
5427 5341 ttemod.tte_exec_perm = 0;
5428 5342 #endif
5429 5343 ret = sfmmu_modifytte_try(&tte, &ttemod,
5430 5344 &sfhmep->hme_tte);
5431 5345
5432 5346 if (ret < 0) {
5433 5347 /* tte changed underneath us */
5434 5348 if (pml) {
5435 5349 sfmmu_mlist_exit(pml);
5436 5350 }
5437 5351 continue;
5438 5352 }
5439 5353
5440 5354 if (tteflags & TTE_HWWR_INT) {
5441 5355 /*
5442 5356 * need to sync if we are clearing modify bit.
5443 5357 */
5444 5358 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5445 5359 }
5446 5360
5447 5361 if (pp && PP_ISRO(pp)) {
5448 5362 if (pprot & TTE_WRPRM_INT) {
5449 5363 pmtx = sfmmu_page_enter(pp);
5450 5364 PP_CLRRO(pp);
5451 5365 sfmmu_page_exit(pmtx);
5452 5366 }
5453 5367 }
5454 5368
5455 5369 if (ret > 0 && use_demap_range) {
5456 5370 DEMAP_RANGE_MARKPG(dmrp, addr);
5457 5371 } else if (ret > 0) {
5458 5372 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5459 5373 }
5460 5374
5461 5375 if (pml) {
5462 5376 sfmmu_mlist_exit(pml);
5463 5377 }
5464 5378 }
5465 5379 next_addr:
5466 5380 addr += TTEBYTES(ttesz);
5467 5381 sfhmep++;
5468 5382 DEMAP_RANGE_NEXTPG(dmrp);
5469 5383 }
5470 5384 return (addr);
5471 5385 }
5472 5386
5473 5387 /*
5474 5388 * This routine is deprecated and should only be used by hat_chgprot.
5475 5389 * The correct routine is sfmmu_vtop_attr.
5476 5390 * This routine converts virtual page protections to physical ones. It will
5477 5391 * update the tteflags field with the tte mask corresponding to the protections
5478 5392 * affected and it returns the new protections. It will also clear the modify
5479 5393 * bit if we are taking away write permission. This is necessary since the
5480 5394 * modify bit is the hardware permission bit and we need to clear it in order
5481 5395 * to detect write faults.
5482 5396 * It accepts the following special protections:
5483 5397 * ~PROT_WRITE = remove write permissions.
5484 5398 * ~PROT_USER = remove user permissions.
5485 5399 */
5486 5400 static uint_t
5487 5401 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5488 5402 {
5489 5403 if (vprot == (uint_t)~PROT_WRITE) {
5490 5404 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5491 5405 return (0); /* will cause wrprm to be cleared */
5492 5406 }
5493 5407 if (vprot == (uint_t)~PROT_USER) {
5494 5408 *tteflagsp = TTE_PRIV_INT;
5495 5409 return (0); /* will cause privprm to be cleared */
5496 5410 }
5497 5411 if ((vprot == 0) || (vprot == PROT_USER) ||
5498 5412 ((vprot & PROT_ALL) != vprot)) {
5499 5413 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5500 5414 }
5501 5415
5502 5416 switch (vprot) {
5503 5417 case (PROT_READ):
5504 5418 case (PROT_EXEC):
5505 5419 case (PROT_EXEC | PROT_READ):
5506 5420 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5507 5421 return (TTE_PRIV_INT); /* set prv and clr wrt */
5508 5422 case (PROT_WRITE):
5509 5423 case (PROT_WRITE | PROT_READ):
5510 5424 case (PROT_EXEC | PROT_WRITE):
5511 5425 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5512 5426 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5513 5427 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5514 5428 case (PROT_USER | PROT_READ):
5515 5429 case (PROT_USER | PROT_EXEC):
5516 5430 case (PROT_USER | PROT_EXEC | PROT_READ):
5517 5431 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5518 5432 return (0); /* clr prv and wrt */
5519 5433 case (PROT_USER | PROT_WRITE):
5520 5434 case (PROT_USER | PROT_WRITE | PROT_READ):
5521 5435 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5522 5436 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5523 5437 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5524 5438 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5525 5439 default:
5526 5440 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5527 5441 }
5528 5442 return (0);
5529 5443 }
5530 5444
5531 5445 /*
5532 5446 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5533 5447 * the normal algorithm would take too long for a very large VA range with
5534 5448 * few real mappings. This routine just walks thru all HMEs in the global
5535 5449 * hash table to find and remove mappings.
5536 5450 */
5537 5451 static void
5538 5452 hat_unload_large_virtual(
5539 5453 struct hat *sfmmup,
5540 5454 caddr_t startaddr,
5541 5455 size_t len,
5542 5456 uint_t flags,
5543 5457 hat_callback_t *callback)
5544 5458 {
5545 5459 struct hmehash_bucket *hmebp;
5546 5460 struct hme_blk *hmeblkp;
5547 5461 struct hme_blk *pr_hblk = NULL;
5548 5462 struct hme_blk *nx_hblk;
5549 5463 struct hme_blk *list = NULL;
5550 5464 int i;
5551 5465 demap_range_t dmr, *dmrp;
5552 5466 cpuset_t cpuset;
5553 5467 caddr_t endaddr = startaddr + len;
5554 5468 caddr_t sa;
5555 5469 caddr_t ea;
5556 5470 caddr_t cb_sa[MAX_CB_ADDR];
5557 5471 caddr_t cb_ea[MAX_CB_ADDR];
5558 5472 int addr_cnt = 0;
5559 5473 int a = 0;
5560 5474
5561 5475 if (sfmmup->sfmmu_free) {
5562 5476 dmrp = NULL;
5563 5477 } else {
5564 5478 dmrp = &dmr;
5565 5479 DEMAP_RANGE_INIT(sfmmup, dmrp);
5566 5480 }
5567 5481
5568 5482 /*
5569 5483 * Loop through all the hash buckets of HME blocks looking for matches.
5570 5484 */
5571 5485 for (i = 0; i <= UHMEHASH_SZ; i++) {
5572 5486 hmebp = &uhme_hash[i];
5573 5487 SFMMU_HASH_LOCK(hmebp);
5574 5488 hmeblkp = hmebp->hmeblkp;
5575 5489 pr_hblk = NULL;
5576 5490 while (hmeblkp) {
5577 5491 nx_hblk = hmeblkp->hblk_next;
5578 5492
5579 5493 /*
5580 5494 * skip if not this context, if a shadow block or
5581 5495 * if the mapping is not in the requested range
5582 5496 */
5583 5497 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5584 5498 hmeblkp->hblk_shw_bit ||
5585 5499 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5586 5500 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5587 5501 pr_hblk = hmeblkp;
5588 5502 goto next_block;
5589 5503 }
5590 5504
5591 5505 ASSERT(!hmeblkp->hblk_shared);
5592 5506 /*
5593 5507 * unload if there are any current valid mappings
5594 5508 */
5595 5509 if (hmeblkp->hblk_vcnt != 0 ||
5596 5510 hmeblkp->hblk_hmecnt != 0)
5597 5511 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5598 5512 sa, ea, dmrp, flags);
5599 5513
5600 5514 /*
5601 5515 * on unmap we also release the HME block itself, once
5602 5516 * all mappings are gone.
5603 5517 */
5604 5518 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5605 5519 !hmeblkp->hblk_vcnt &&
5606 5520 !hmeblkp->hblk_hmecnt) {
5607 5521 ASSERT(!hmeblkp->hblk_lckcnt);
5608 5522 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5609 5523 &list, 0);
5610 5524 } else {
5611 5525 pr_hblk = hmeblkp;
5612 5526 }
5613 5527
5614 5528 if (callback == NULL)
5615 5529 goto next_block;
5616 5530
5617 5531 /*
5618 5532 * HME blocks may span more than one page, but we may be
5619 5533 * unmapping only one page, so check for a smaller range
5620 5534 * for the callback
5621 5535 */
5622 5536 if (sa < startaddr)
5623 5537 sa = startaddr;
5624 5538 if (--ea > endaddr)
5625 5539 ea = endaddr - 1;
5626 5540
5627 5541 cb_sa[addr_cnt] = sa;
5628 5542 cb_ea[addr_cnt] = ea;
5629 5543 if (++addr_cnt == MAX_CB_ADDR) {
5630 5544 if (dmrp != NULL) {
5631 5545 DEMAP_RANGE_FLUSH(dmrp);
5632 5546 cpuset = sfmmup->sfmmu_cpusran;
5633 5547 xt_sync(cpuset);
5634 5548 }
5635 5549
5636 5550 for (a = 0; a < MAX_CB_ADDR; ++a) {
5637 5551 callback->hcb_start_addr = cb_sa[a];
5638 5552 callback->hcb_end_addr = cb_ea[a];
5639 5553 callback->hcb_function(callback);
5640 5554 }
5641 5555 addr_cnt = 0;
5642 5556 }
5643 5557
5644 5558 next_block:
5645 5559 hmeblkp = nx_hblk;
5646 5560 }
5647 5561 SFMMU_HASH_UNLOCK(hmebp);
5648 5562 }
5649 5563
5650 5564 sfmmu_hblks_list_purge(&list, 0);
5651 5565 if (dmrp != NULL) {
5652 5566 DEMAP_RANGE_FLUSH(dmrp);
5653 5567 cpuset = sfmmup->sfmmu_cpusran;
5654 5568 xt_sync(cpuset);
5655 5569 }
5656 5570
5657 5571 for (a = 0; a < addr_cnt; ++a) {
5658 5572 callback->hcb_start_addr = cb_sa[a];
5659 5573 callback->hcb_end_addr = cb_ea[a];
5660 5574 callback->hcb_function(callback);
5661 5575 }
5662 5576
5663 5577 /*
5664 5578 * Check TSB and TLB page sizes if the process isn't exiting.
5665 5579 */
5666 5580 if (!sfmmup->sfmmu_free)
5667 5581 sfmmu_check_page_sizes(sfmmup, 0);
5668 5582 }
5669 5583
5670 5584 /*
5671 5585 * Unload all the mappings in the range [addr..addr+len). addr and len must
5672 5586 * be MMU_PAGESIZE aligned.
5673 5587 */
5674 5588
5675 5589 extern struct seg *segkmap;
5676 5590 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5677 5591 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5678 5592
5679 5593
5680 5594 void
5681 5595 hat_unload_callback(
5682 5596 struct hat *sfmmup,
5683 5597 caddr_t addr,
5684 5598 size_t len,
5685 5599 uint_t flags,
5686 5600 hat_callback_t *callback)
5687 5601 {
5688 5602 struct hmehash_bucket *hmebp;
5689 5603 hmeblk_tag hblktag;
5690 5604 int hmeshift, hashno, iskernel;
↓ open down ↓ |
414 lines elided |
↑ open up ↑ |
5691 5605 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5692 5606 caddr_t endaddr;
5693 5607 cpuset_t cpuset;
5694 5608 int addr_count = 0;
5695 5609 int a;
5696 5610 caddr_t cb_start_addr[MAX_CB_ADDR];
5697 5611 caddr_t cb_end_addr[MAX_CB_ADDR];
5698 5612 int issegkmap = ISSEGKMAP(sfmmup, addr);
5699 5613 demap_range_t dmr, *dmrp;
5700 5614
5701 - if (sfmmup->sfmmu_xhat_provider) {
5702 - XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
5703 - return;
5704 - } else {
5705 - /*
5706 - * This must be a CPU HAT. If the address space has
5707 - * XHATs attached, unload the mappings for all of them,
5708 - * just in case
5709 - */
5710 - ASSERT(sfmmup->sfmmu_as != NULL);
5711 - if (sfmmup->sfmmu_as->a_xhat != NULL)
5712 - xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5713 - len, flags, callback);
5714 - }
5615 + ASSERT(sfmmup->sfmmu_as != NULL);
5715 5616
5716 5617 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5717 5618 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5718 5619
5719 5620 ASSERT(sfmmup != NULL);
5720 5621 ASSERT((len & MMU_PAGEOFFSET) == 0);
5721 5622 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5722 5623
5723 5624 /*
5724 5625 * Probing through a large VA range (say 63 bits) will be slow, even
5725 5626 * at 4 Meg steps between the probes. So, when the virtual address range
5726 5627 * is very large, search the HME entries for what to unload.
5727 5628 *
5728 5629 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5729 5630 *
5730 5631 * UHMEHASH_SZ is number of hash buckets to examine
5731 5632 *
5732 5633 */
5733 5634 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5734 5635 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5735 5636 return;
5736 5637 }
5737 5638
5738 5639 CPUSET_ZERO(cpuset);
5739 5640
5740 5641 /*
5741 5642 * If the process is exiting, we can save a lot of fuss since
5742 5643 * we'll flush the TLB when we free the ctx anyway.
5743 5644 */
5744 5645 if (sfmmup->sfmmu_free) {
5745 5646 dmrp = NULL;
5746 5647 } else {
5747 5648 dmrp = &dmr;
5748 5649 DEMAP_RANGE_INIT(sfmmup, dmrp);
5749 5650 }
5750 5651
5751 5652 endaddr = addr + len;
5752 5653 hblktag.htag_id = sfmmup;
5753 5654 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5754 5655
5755 5656 /*
5756 5657 * It is likely for the vm to call unload over a wide range of
5757 5658 * addresses that are actually very sparsely populated by
5758 5659 * translations. In order to speed this up the sfmmu hat supports
5759 5660 * the concept of shadow hmeblks. Dummy large page hmeblks that
5760 5661 * correspond to actual small translations are allocated at tteload
5761 5662 * time and are referred to as shadow hmeblks. Now, during unload
5762 5663 * time, we first check if we have a shadow hmeblk for that
5763 5664 * translation. The absence of one means the corresponding address
5764 5665 * range is empty and can be skipped.
5765 5666 *
5766 5667 * The kernel is an exception to above statement and that is why
5767 5668 * we don't use shadow hmeblks and hash starting from the smallest
5768 5669 * page size.
5769 5670 */
5770 5671 if (sfmmup == KHATID) {
5771 5672 iskernel = 1;
5772 5673 hashno = TTE64K;
5773 5674 } else {
5774 5675 iskernel = 0;
5775 5676 if (mmu_page_sizes == max_mmu_page_sizes) {
5776 5677 hashno = TTE256M;
5777 5678 } else {
5778 5679 hashno = TTE4M;
5779 5680 }
5780 5681 }
5781 5682 while (addr < endaddr) {
5782 5683 hmeshift = HME_HASH_SHIFT(hashno);
5783 5684 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5784 5685 hblktag.htag_rehash = hashno;
5785 5686 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5786 5687
5787 5688 SFMMU_HASH_LOCK(hmebp);
5788 5689
5789 5690 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5790 5691 if (hmeblkp == NULL) {
5791 5692 /*
5792 5693 * didn't find an hmeblk. skip the appropiate
5793 5694 * address range.
5794 5695 */
5795 5696 SFMMU_HASH_UNLOCK(hmebp);
5796 5697 if (iskernel) {
5797 5698 if (hashno < mmu_hashcnt) {
5798 5699 hashno++;
5799 5700 continue;
5800 5701 } else {
5801 5702 hashno = TTE64K;
5802 5703 addr = (caddr_t)roundup((uintptr_t)addr
5803 5704 + 1, MMU_PAGESIZE64K);
5804 5705 continue;
5805 5706 }
5806 5707 }
5807 5708 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5808 5709 (1 << hmeshift));
5809 5710 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5810 5711 ASSERT(hashno == TTE64K);
5811 5712 continue;
5812 5713 }
5813 5714 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5814 5715 hashno = TTE512K;
5815 5716 continue;
5816 5717 }
5817 5718 if (mmu_page_sizes == max_mmu_page_sizes) {
5818 5719 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5819 5720 hashno = TTE4M;
5820 5721 continue;
5821 5722 }
5822 5723 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5823 5724 hashno = TTE32M;
5824 5725 continue;
5825 5726 }
5826 5727 hashno = TTE256M;
5827 5728 continue;
5828 5729 } else {
5829 5730 hashno = TTE4M;
5830 5731 continue;
5831 5732 }
5832 5733 }
5833 5734 ASSERT(hmeblkp);
5834 5735 ASSERT(!hmeblkp->hblk_shared);
5835 5736 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5836 5737 /*
5837 5738 * If the valid count is zero we can skip the range
5838 5739 * mapped by this hmeblk.
5839 5740 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5840 5741 * is used by segment drivers as a hint
5841 5742 * that the mapping resource won't be used any longer.
5842 5743 * The best example of this is during exit().
5843 5744 */
5844 5745 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5845 5746 get_hblk_span(hmeblkp));
5846 5747 if ((flags & HAT_UNLOAD_UNMAP) ||
5847 5748 (iskernel && !issegkmap)) {
5848 5749 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5849 5750 &list, 0);
5850 5751 }
5851 5752 SFMMU_HASH_UNLOCK(hmebp);
5852 5753
5853 5754 if (iskernel) {
5854 5755 hashno = TTE64K;
5855 5756 continue;
5856 5757 }
5857 5758 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5858 5759 ASSERT(hashno == TTE64K);
5859 5760 continue;
5860 5761 }
5861 5762 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5862 5763 hashno = TTE512K;
5863 5764 continue;
5864 5765 }
5865 5766 if (mmu_page_sizes == max_mmu_page_sizes) {
5866 5767 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5867 5768 hashno = TTE4M;
5868 5769 continue;
5869 5770 }
5870 5771 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5871 5772 hashno = TTE32M;
5872 5773 continue;
5873 5774 }
5874 5775 hashno = TTE256M;
5875 5776 continue;
5876 5777 } else {
5877 5778 hashno = TTE4M;
5878 5779 continue;
5879 5780 }
5880 5781 }
5881 5782 if (hmeblkp->hblk_shw_bit) {
5882 5783 /*
5883 5784 * If we encounter a shadow hmeblk we know there is
5884 5785 * smaller sized hmeblks mapping the same address space.
5885 5786 * Decrement the hash size and rehash.
5886 5787 */
5887 5788 ASSERT(sfmmup != KHATID);
5888 5789 hashno--;
5889 5790 SFMMU_HASH_UNLOCK(hmebp);
5890 5791 continue;
5891 5792 }
5892 5793
5893 5794 /*
5894 5795 * track callback address ranges.
5895 5796 * only start a new range when it's not contiguous
5896 5797 */
5897 5798 if (callback != NULL) {
5898 5799 if (addr_count > 0 &&
5899 5800 addr == cb_end_addr[addr_count - 1])
5900 5801 --addr_count;
5901 5802 else
5902 5803 cb_start_addr[addr_count] = addr;
5903 5804 }
5904 5805
5905 5806 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5906 5807 dmrp, flags);
5907 5808
5908 5809 if (callback != NULL)
5909 5810 cb_end_addr[addr_count++] = addr;
5910 5811
5911 5812 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5912 5813 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5913 5814 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5914 5815 }
5915 5816 SFMMU_HASH_UNLOCK(hmebp);
5916 5817
5917 5818 /*
5918 5819 * Notify our caller as to exactly which pages
5919 5820 * have been unloaded. We do these in clumps,
5920 5821 * to minimize the number of xt_sync()s that need to occur.
5921 5822 */
5922 5823 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5923 5824 if (dmrp != NULL) {
5924 5825 DEMAP_RANGE_FLUSH(dmrp);
5925 5826 cpuset = sfmmup->sfmmu_cpusran;
5926 5827 xt_sync(cpuset);
5927 5828 }
5928 5829
5929 5830 for (a = 0; a < MAX_CB_ADDR; ++a) {
5930 5831 callback->hcb_start_addr = cb_start_addr[a];
5931 5832 callback->hcb_end_addr = cb_end_addr[a];
5932 5833 callback->hcb_function(callback);
5933 5834 }
5934 5835 addr_count = 0;
5935 5836 }
5936 5837 if (iskernel) {
5937 5838 hashno = TTE64K;
5938 5839 continue;
5939 5840 }
5940 5841 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5941 5842 ASSERT(hashno == TTE64K);
5942 5843 continue;
5943 5844 }
5944 5845 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5945 5846 hashno = TTE512K;
5946 5847 continue;
5947 5848 }
5948 5849 if (mmu_page_sizes == max_mmu_page_sizes) {
5949 5850 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5950 5851 hashno = TTE4M;
5951 5852 continue;
5952 5853 }
5953 5854 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5954 5855 hashno = TTE32M;
5955 5856 continue;
5956 5857 }
5957 5858 hashno = TTE256M;
5958 5859 } else {
5959 5860 hashno = TTE4M;
5960 5861 }
5961 5862 }
5962 5863
5963 5864 sfmmu_hblks_list_purge(&list, 0);
5964 5865 if (dmrp != NULL) {
5965 5866 DEMAP_RANGE_FLUSH(dmrp);
5966 5867 cpuset = sfmmup->sfmmu_cpusran;
5967 5868 xt_sync(cpuset);
5968 5869 }
5969 5870 if (callback && addr_count != 0) {
5970 5871 for (a = 0; a < addr_count; ++a) {
5971 5872 callback->hcb_start_addr = cb_start_addr[a];
5972 5873 callback->hcb_end_addr = cb_end_addr[a];
5973 5874 callback->hcb_function(callback);
5974 5875 }
5975 5876 }
5976 5877
5977 5878 /*
5978 5879 * Check TSB and TLB page sizes if the process isn't exiting.
5979 5880 */
5980 5881 if (!sfmmup->sfmmu_free)
↓ open down ↓ |
256 lines elided |
↑ open up ↑ |
5981 5882 sfmmu_check_page_sizes(sfmmup, 0);
5982 5883 }
5983 5884
5984 5885 /*
5985 5886 * Unload all the mappings in the range [addr..addr+len). addr and len must
5986 5887 * be MMU_PAGESIZE aligned.
5987 5888 */
5988 5889 void
5989 5890 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5990 5891 {
5991 - if (sfmmup->sfmmu_xhat_provider) {
5992 - XHAT_UNLOAD(sfmmup, addr, len, flags);
5993 - return;
5994 - }
5995 5892 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5996 5893 }
5997 5894
5998 5895
5999 5896 /*
6000 5897 * Find the largest mapping size for this page.
6001 5898 */
6002 5899 int
6003 5900 fnd_mapping_sz(page_t *pp)
6004 5901 {
6005 5902 int sz;
6006 5903 int p_index;
6007 5904
6008 5905 p_index = PP_MAPINDEX(pp);
6009 5906
6010 5907 sz = 0;
6011 5908 p_index >>= 1; /* don't care about 8K bit */
6012 5909 for (; p_index; p_index >>= 1) {
6013 5910 sz++;
6014 5911 }
6015 5912
6016 5913 return (sz);
6017 5914 }
6018 5915
6019 5916 /*
6020 5917 * This function unloads a range of addresses for an hmeblk.
6021 5918 * It returns the next address to be unloaded.
6022 5919 * It should be called with the hash lock held.
6023 5920 */
6024 5921 static caddr_t
6025 5922 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6026 5923 caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
6027 5924 {
6028 5925 tte_t tte, ttemod;
6029 5926 struct sf_hment *sfhmep;
6030 5927 int ttesz;
6031 5928 long ttecnt;
6032 5929 page_t *pp;
6033 5930 kmutex_t *pml;
6034 5931 int ret;
6035 5932 int use_demap_range;
6036 5933
6037 5934 ASSERT(in_hblk_range(hmeblkp, addr));
6038 5935 ASSERT(!hmeblkp->hblk_shw_bit);
6039 5936 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
6040 5937 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
6041 5938 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
6042 5939
6043 5940 #ifdef DEBUG
6044 5941 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
6045 5942 (endaddr < get_hblk_endaddr(hmeblkp))) {
6046 5943 panic("sfmmu_hblk_unload: partial unload of large page");
6047 5944 }
6048 5945 #endif /* DEBUG */
6049 5946
6050 5947 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6051 5948 ttesz = get_hblk_ttesz(hmeblkp);
6052 5949
6053 5950 use_demap_range = ((dmrp == NULL) ||
6054 5951 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
6055 5952
6056 5953 if (use_demap_range) {
6057 5954 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
6058 5955 } else if (dmrp != NULL) {
6059 5956 DEMAP_RANGE_FLUSH(dmrp);
6060 5957 }
6061 5958 ttecnt = 0;
6062 5959 HBLKTOHME(sfhmep, hmeblkp, addr);
6063 5960
6064 5961 while (addr < endaddr) {
6065 5962 pml = NULL;
6066 5963 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6067 5964 if (TTE_IS_VALID(&tte)) {
6068 5965 pp = sfhmep->hme_page;
6069 5966 if (pp != NULL) {
6070 5967 pml = sfmmu_mlist_enter(pp);
6071 5968 }
6072 5969
6073 5970 /*
6074 5971 * Verify if hme still points to 'pp' now that
6075 5972 * we have p_mapping lock.
6076 5973 */
6077 5974 if (sfhmep->hme_page != pp) {
6078 5975 if (pp != NULL && sfhmep->hme_page != NULL) {
6079 5976 ASSERT(pml != NULL);
6080 5977 sfmmu_mlist_exit(pml);
6081 5978 /* Re-start this iteration. */
6082 5979 continue;
6083 5980 }
6084 5981 ASSERT((pp != NULL) &&
6085 5982 (sfhmep->hme_page == NULL));
6086 5983 goto tte_unloaded;
6087 5984 }
6088 5985
6089 5986 /*
6090 5987 * This point on we have both HASH and p_mapping
6091 5988 * lock.
6092 5989 */
6093 5990 ASSERT(pp == sfhmep->hme_page);
6094 5991 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6095 5992
6096 5993 /*
6097 5994 * We need to loop on modify tte because it is
6098 5995 * possible for pagesync to come along and
6099 5996 * change the software bits beneath us.
6100 5997 *
6101 5998 * Page_unload can also invalidate the tte after
6102 5999 * we read tte outside of p_mapping lock.
6103 6000 */
6104 6001 again:
6105 6002 ttemod = tte;
6106 6003
6107 6004 TTE_SET_INVALID(&ttemod);
6108 6005 ret = sfmmu_modifytte_try(&tte, &ttemod,
6109 6006 &sfhmep->hme_tte);
6110 6007
6111 6008 if (ret <= 0) {
6112 6009 if (TTE_IS_VALID(&tte)) {
6113 6010 ASSERT(ret < 0);
6114 6011 goto again;
6115 6012 }
6116 6013 if (pp != NULL) {
6117 6014 panic("sfmmu_hblk_unload: pp = 0x%p "
6118 6015 "tte became invalid under mlist"
6119 6016 " lock = 0x%p", (void *)pp,
6120 6017 (void *)pml);
6121 6018 }
6122 6019 continue;
6123 6020 }
6124 6021
6125 6022 if (!(flags & HAT_UNLOAD_NOSYNC)) {
6126 6023 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6127 6024 }
6128 6025
6129 6026 /*
6130 6027 * Ok- we invalidated the tte. Do the rest of the job.
6131 6028 */
6132 6029 ttecnt++;
6133 6030
6134 6031 if (flags & HAT_UNLOAD_UNLOCK) {
6135 6032 ASSERT(hmeblkp->hblk_lckcnt > 0);
6136 6033 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6137 6034 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6138 6035 }
6139 6036
6140 6037 /*
6141 6038 * Normally we would need to flush the page
6142 6039 * from the virtual cache at this point in
6143 6040 * order to prevent a potential cache alias
6144 6041 * inconsistency.
6145 6042 * The particular scenario we need to worry
6146 6043 * about is:
6147 6044 * Given: va1 and va2 are two virtual address
6148 6045 * that alias and map the same physical
6149 6046 * address.
6150 6047 * 1. mapping exists from va1 to pa and data
6151 6048 * has been read into the cache.
6152 6049 * 2. unload va1.
6153 6050 * 3. load va2 and modify data using va2.
6154 6051 * 4 unload va2.
6155 6052 * 5. load va1 and reference data. Unless we
6156 6053 * flush the data cache when we unload we will
6157 6054 * get stale data.
6158 6055 * Fortunately, page coloring eliminates the
6159 6056 * above scenario by remembering the color a
6160 6057 * physical page was last or is currently
6161 6058 * mapped to. Now, we delay the flush until
6162 6059 * the loading of translations. Only when the
6163 6060 * new translation is of a different color
6164 6061 * are we forced to flush.
6165 6062 */
6166 6063 if (use_demap_range) {
6167 6064 /*
6168 6065 * Mark this page as needing a demap.
6169 6066 */
6170 6067 DEMAP_RANGE_MARKPG(dmrp, addr);
6171 6068 } else {
6172 6069 ASSERT(sfmmup != NULL);
6173 6070 ASSERT(!hmeblkp->hblk_shared);
6174 6071 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6175 6072 sfmmup->sfmmu_free, 0);
6176 6073 }
6177 6074
6178 6075 if (pp) {
6179 6076 /*
6180 6077 * Remove the hment from the mapping list
6181 6078 */
6182 6079 ASSERT(hmeblkp->hblk_hmecnt > 0);
6183 6080
6184 6081 /*
6185 6082 * Again, we cannot
6186 6083 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6187 6084 */
6188 6085 HME_SUB(sfhmep, pp);
6189 6086 membar_stst();
6190 6087 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6191 6088 }
6192 6089
6193 6090 ASSERT(hmeblkp->hblk_vcnt > 0);
6194 6091 atomic_dec_16(&hmeblkp->hblk_vcnt);
6195 6092
6196 6093 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6197 6094 !hmeblkp->hblk_lckcnt);
6198 6095
6199 6096 #ifdef VAC
6200 6097 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6201 6098 if (PP_ISTNC(pp)) {
6202 6099 /*
6203 6100 * If page was temporary
6204 6101 * uncached, try to recache
6205 6102 * it. Note that HME_SUB() was
6206 6103 * called above so p_index and
6207 6104 * mlist had been updated.
6208 6105 */
6209 6106 conv_tnc(pp, ttesz);
6210 6107 } else if (pp->p_mapping == NULL) {
6211 6108 ASSERT(kpm_enable);
6212 6109 /*
6213 6110 * Page is marked to be in VAC conflict
6214 6111 * to an existing kpm mapping and/or is
6215 6112 * kpm mapped using only the regular
6216 6113 * pagesize.
6217 6114 */
6218 6115 sfmmu_kpm_hme_unload(pp);
6219 6116 }
6220 6117 }
6221 6118 #endif /* VAC */
6222 6119 } else if ((pp = sfhmep->hme_page) != NULL) {
6223 6120 /*
6224 6121 * TTE is invalid but the hme
6225 6122 * still exists. let pageunload
6226 6123 * complete its job.
6227 6124 */
6228 6125 ASSERT(pml == NULL);
6229 6126 pml = sfmmu_mlist_enter(pp);
6230 6127 if (sfhmep->hme_page != NULL) {
6231 6128 sfmmu_mlist_exit(pml);
6232 6129 continue;
6233 6130 }
6234 6131 ASSERT(sfhmep->hme_page == NULL);
6235 6132 } else if (hmeblkp->hblk_hmecnt != 0) {
6236 6133 /*
6237 6134 * pageunload may have not finished decrementing
6238 6135 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6239 6136 * wait for pageunload to finish. Rely on pageunload
6240 6137 * to decrement hblk_hmecnt after hblk_vcnt.
6241 6138 */
6242 6139 pfn_t pfn = TTE_TO_TTEPFN(&tte);
6243 6140 ASSERT(pml == NULL);
6244 6141 if (pf_is_memory(pfn)) {
6245 6142 pp = page_numtopp_nolock(pfn);
6246 6143 if (pp != NULL) {
6247 6144 pml = sfmmu_mlist_enter(pp);
6248 6145 sfmmu_mlist_exit(pml);
6249 6146 pml = NULL;
6250 6147 }
6251 6148 }
6252 6149 }
6253 6150
6254 6151 tte_unloaded:
6255 6152 /*
6256 6153 * At this point, the tte we are looking at
6257 6154 * should be unloaded, and hme has been unlinked
6258 6155 * from page too. This is important because in
6259 6156 * pageunload, it does ttesync() then HME_SUB.
6260 6157 * We need to make sure HME_SUB has been completed
6261 6158 * so we know ttesync() has been completed. Otherwise,
6262 6159 * at exit time, after return from hat layer, VM will
6263 6160 * release as structure which hat_setstat() (called
6264 6161 * by ttesync()) needs.
6265 6162 */
6266 6163 #ifdef DEBUG
6267 6164 {
6268 6165 tte_t dtte;
6269 6166
6270 6167 ASSERT(sfhmep->hme_page == NULL);
6271 6168
6272 6169 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6273 6170 ASSERT(!TTE_IS_VALID(&dtte));
6274 6171 }
6275 6172 #endif
6276 6173
6277 6174 if (pml) {
6278 6175 sfmmu_mlist_exit(pml);
6279 6176 }
6280 6177
6281 6178 addr += TTEBYTES(ttesz);
6282 6179 sfhmep++;
6283 6180 DEMAP_RANGE_NEXTPG(dmrp);
6284 6181 }
6285 6182 /*
6286 6183 * For shared hmeblks this routine is only called when region is freed
6287 6184 * and no longer referenced. So no need to decrement ttecnt
6288 6185 * in the region structure here.
6289 6186 */
6290 6187 if (ttecnt > 0 && sfmmup != NULL) {
6291 6188 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6292 6189 }
6293 6190 return (addr);
6294 6191 }
6295 6192
6296 6193 /*
6297 6194 * Invalidate a virtual address range for the local CPU.
6298 6195 * For best performance ensure that the va range is completely
6299 6196 * mapped, otherwise the entire TLB will be flushed.
6300 6197 */
6301 6198 void
6302 6199 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6303 6200 {
6304 6201 ssize_t sz;
6305 6202 caddr_t endva = va + size;
6306 6203
6307 6204 while (va < endva) {
6308 6205 sz = hat_getpagesize(sfmmup, va);
6309 6206 if (sz < 0) {
6310 6207 vtag_flushall();
6311 6208 break;
6312 6209 }
6313 6210 vtag_flushpage(va, (uint64_t)sfmmup);
6314 6211 va += sz;
6315 6212 }
6316 6213 }
6317 6214
6318 6215 /*
6319 6216 * Synchronize all the mappings in the range [addr..addr+len).
6320 6217 * Can be called with clearflag having two states:
6321 6218 * HAT_SYNC_DONTZERO means just return the rm stats
6322 6219 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6323 6220 */
↓ open down ↓ |
319 lines elided |
↑ open up ↑ |
6324 6221 void
6325 6222 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6326 6223 {
6327 6224 struct hmehash_bucket *hmebp;
6328 6225 hmeblk_tag hblktag;
6329 6226 int hmeshift, hashno = 1;
6330 6227 struct hme_blk *hmeblkp, *list = NULL;
6331 6228 caddr_t endaddr;
6332 6229 cpuset_t cpuset;
6333 6230
6334 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6335 6231 ASSERT((sfmmup == ksfmmup) ||
6336 6232 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6337 6233 ASSERT((len & MMU_PAGEOFFSET) == 0);
6338 6234 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6339 6235 (clearflag == HAT_SYNC_ZERORM));
6340 6236
6341 6237 CPUSET_ZERO(cpuset);
6342 6238
6343 6239 endaddr = addr + len;
6344 6240 hblktag.htag_id = sfmmup;
6345 6241 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6346 6242
6347 6243 /*
6348 6244 * Spitfire supports 4 page sizes.
6349 6245 * Most pages are expected to be of the smallest page
6350 6246 * size (8K) and these will not need to be rehashed. 64K
6351 6247 * pages also don't need to be rehashed because the an hmeblk
6352 6248 * spans 64K of address space. 512K pages might need 1 rehash and
6353 6249 * and 4M pages 2 rehashes.
6354 6250 */
6355 6251 while (addr < endaddr) {
6356 6252 hmeshift = HME_HASH_SHIFT(hashno);
6357 6253 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6358 6254 hblktag.htag_rehash = hashno;
6359 6255 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6360 6256
6361 6257 SFMMU_HASH_LOCK(hmebp);
6362 6258
6363 6259 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6364 6260 if (hmeblkp != NULL) {
6365 6261 ASSERT(!hmeblkp->hblk_shared);
6366 6262 /*
6367 6263 * We've encountered a shadow hmeblk so skip the range
6368 6264 * of the next smaller mapping size.
6369 6265 */
6370 6266 if (hmeblkp->hblk_shw_bit) {
6371 6267 ASSERT(sfmmup != ksfmmup);
6372 6268 ASSERT(hashno > 1);
6373 6269 addr = (caddr_t)P2END((uintptr_t)addr,
6374 6270 TTEBYTES(hashno - 1));
6375 6271 } else {
6376 6272 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6377 6273 addr, endaddr, clearflag);
6378 6274 }
6379 6275 SFMMU_HASH_UNLOCK(hmebp);
6380 6276 hashno = 1;
6381 6277 continue;
6382 6278 }
6383 6279 SFMMU_HASH_UNLOCK(hmebp);
6384 6280
6385 6281 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6386 6282 /*
6387 6283 * We have traversed the whole list and rehashed
6388 6284 * if necessary without finding the address to sync.
6389 6285 * This is ok so we increment the address by the
6390 6286 * smallest hmeblk range for kernel mappings and the
6391 6287 * largest hmeblk range, to account for shadow hmeblks,
6392 6288 * for user mappings and continue.
6393 6289 */
6394 6290 if (sfmmup == ksfmmup)
6395 6291 addr = (caddr_t)P2END((uintptr_t)addr,
6396 6292 TTEBYTES(1));
6397 6293 else
6398 6294 addr = (caddr_t)P2END((uintptr_t)addr,
6399 6295 TTEBYTES(hashno));
6400 6296 hashno = 1;
6401 6297 } else {
6402 6298 hashno++;
6403 6299 }
6404 6300 }
6405 6301 sfmmu_hblks_list_purge(&list, 0);
6406 6302 cpuset = sfmmup->sfmmu_cpusran;
6407 6303 xt_sync(cpuset);
6408 6304 }
6409 6305
6410 6306 static caddr_t
6411 6307 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6412 6308 caddr_t endaddr, int clearflag)
6413 6309 {
6414 6310 tte_t tte, ttemod;
6415 6311 struct sf_hment *sfhmep;
6416 6312 int ttesz;
6417 6313 struct page *pp;
6418 6314 kmutex_t *pml;
6419 6315 int ret;
6420 6316
6421 6317 ASSERT(hmeblkp->hblk_shw_bit == 0);
6422 6318 ASSERT(!hmeblkp->hblk_shared);
6423 6319
6424 6320 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6425 6321
6426 6322 ttesz = get_hblk_ttesz(hmeblkp);
6427 6323 HBLKTOHME(sfhmep, hmeblkp, addr);
6428 6324
6429 6325 while (addr < endaddr) {
6430 6326 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6431 6327 if (TTE_IS_VALID(&tte)) {
6432 6328 pml = NULL;
6433 6329 pp = sfhmep->hme_page;
6434 6330 if (pp) {
6435 6331 pml = sfmmu_mlist_enter(pp);
6436 6332 }
6437 6333 if (pp != sfhmep->hme_page) {
6438 6334 /*
6439 6335 * tte most have been unloaded
6440 6336 * underneath us. Recheck
6441 6337 */
6442 6338 ASSERT(pml);
6443 6339 sfmmu_mlist_exit(pml);
6444 6340 continue;
6445 6341 }
6446 6342
6447 6343 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6448 6344
6449 6345 if (clearflag == HAT_SYNC_ZERORM) {
6450 6346 ttemod = tte;
6451 6347 TTE_CLR_RM(&ttemod);
6452 6348 ret = sfmmu_modifytte_try(&tte, &ttemod,
6453 6349 &sfhmep->hme_tte);
6454 6350 if (ret < 0) {
6455 6351 if (pml) {
6456 6352 sfmmu_mlist_exit(pml);
6457 6353 }
6458 6354 continue;
6459 6355 }
6460 6356
6461 6357 if (ret > 0) {
6462 6358 sfmmu_tlb_demap(addr, sfmmup,
6463 6359 hmeblkp, 0, 0);
6464 6360 }
6465 6361 }
6466 6362 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6467 6363 if (pml) {
6468 6364 sfmmu_mlist_exit(pml);
6469 6365 }
6470 6366 }
6471 6367 addr += TTEBYTES(ttesz);
6472 6368 sfhmep++;
6473 6369 }
6474 6370 return (addr);
6475 6371 }
6476 6372
6477 6373 /*
6478 6374 * This function will sync a tte to the page struct and it will
6479 6375 * update the hat stats. Currently it allows us to pass a NULL pp
6480 6376 * and we will simply update the stats. We may want to change this
6481 6377 * so we only keep stats for pages backed by pp's.
6482 6378 */
6483 6379 static void
6484 6380 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6485 6381 {
6486 6382 uint_t rm = 0;
6487 6383 int sz;
6488 6384 pgcnt_t npgs;
6489 6385
6490 6386 ASSERT(TTE_IS_VALID(ttep));
6491 6387
6492 6388 if (TTE_IS_NOSYNC(ttep)) {
6493 6389 return;
6494 6390 }
6495 6391
6496 6392 if (TTE_IS_REF(ttep)) {
6497 6393 rm = P_REF;
6498 6394 }
6499 6395 if (TTE_IS_MOD(ttep)) {
6500 6396 rm |= P_MOD;
6501 6397 }
6502 6398
6503 6399 if (rm == 0) {
6504 6400 return;
6505 6401 }
6506 6402
6507 6403 sz = TTE_CSZ(ttep);
6508 6404 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6509 6405 int i;
6510 6406 caddr_t vaddr = addr;
6511 6407
6512 6408 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6513 6409 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6514 6410 }
6515 6411
6516 6412 }
6517 6413
6518 6414 /*
6519 6415 * XXX I want to use cas to update nrm bits but they
6520 6416 * currently belong in common/vm and not in hat where
6521 6417 * they should be.
6522 6418 * The nrm bits are protected by the same mutex as
6523 6419 * the one that protects the page's mapping list.
6524 6420 */
6525 6421 if (!pp)
6526 6422 return;
6527 6423 ASSERT(sfmmu_mlist_held(pp));
6528 6424 /*
6529 6425 * If the tte is for a large page, we need to sync all the
6530 6426 * pages covered by the tte.
6531 6427 */
6532 6428 if (sz != TTE8K) {
6533 6429 ASSERT(pp->p_szc != 0);
6534 6430 pp = PP_GROUPLEADER(pp, sz);
6535 6431 ASSERT(sfmmu_mlist_held(pp));
6536 6432 }
6537 6433
6538 6434 /* Get number of pages from tte size. */
6539 6435 npgs = TTEPAGES(sz);
6540 6436
6541 6437 do {
6542 6438 ASSERT(pp);
6543 6439 ASSERT(sfmmu_mlist_held(pp));
6544 6440 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6545 6441 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6546 6442 hat_page_setattr(pp, rm);
6547 6443
6548 6444 /*
6549 6445 * Are we done? If not, we must have a large mapping.
6550 6446 * For large mappings we need to sync the rest of the pages
6551 6447 * covered by this tte; goto the next page.
6552 6448 */
6553 6449 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6554 6450 }
6555 6451
6556 6452 /*
6557 6453 * Execute pre-callback handler of each pa_hment linked to pp
6558 6454 *
6559 6455 * Inputs:
6560 6456 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6561 6457 * capture_cpus: pointer to return value (below)
6562 6458 *
6563 6459 * Returns:
6564 6460 * Propagates the subsystem callback return values back to the caller;
6565 6461 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6566 6462 * is zero if all of the pa_hments are of a type that do not require
6567 6463 * capturing CPUs prior to suspending the mapping, else it is 1.
6568 6464 */
6569 6465 static int
6570 6466 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6571 6467 {
6572 6468 struct sf_hment *sfhmep;
6573 6469 struct pa_hment *pahmep;
6574 6470 int (*f)(caddr_t, uint_t, uint_t, void *);
6575 6471 int ret;
6576 6472 id_t id;
6577 6473 int locked = 0;
6578 6474 kmutex_t *pml;
6579 6475
6580 6476 ASSERT(PAGE_EXCL(pp));
6581 6477 if (!sfmmu_mlist_held(pp)) {
6582 6478 pml = sfmmu_mlist_enter(pp);
6583 6479 locked = 1;
6584 6480 }
6585 6481
6586 6482 if (capture_cpus)
6587 6483 *capture_cpus = 0;
6588 6484
6589 6485 top:
6590 6486 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6591 6487 /*
6592 6488 * skip sf_hments corresponding to VA<->PA mappings;
6593 6489 * for pa_hment's, hme_tte.ll is zero
6594 6490 */
6595 6491 if (!IS_PAHME(sfhmep))
6596 6492 continue;
6597 6493
6598 6494 pahmep = sfhmep->hme_data;
6599 6495 ASSERT(pahmep != NULL);
6600 6496
6601 6497 /*
6602 6498 * skip if pre-handler has been called earlier in this loop
6603 6499 */
6604 6500 if (pahmep->flags & flag)
6605 6501 continue;
6606 6502
6607 6503 id = pahmep->cb_id;
6608 6504 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6609 6505 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6610 6506 *capture_cpus = 1;
6611 6507 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6612 6508 pahmep->flags |= flag;
6613 6509 continue;
6614 6510 }
6615 6511
6616 6512 /*
6617 6513 * Drop the mapping list lock to avoid locking order issues.
6618 6514 */
6619 6515 if (locked)
6620 6516 sfmmu_mlist_exit(pml);
6621 6517
6622 6518 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6623 6519 if (ret != 0)
6624 6520 return (ret); /* caller must do the cleanup */
6625 6521
6626 6522 if (locked) {
6627 6523 pml = sfmmu_mlist_enter(pp);
6628 6524 pahmep->flags |= flag;
6629 6525 goto top;
6630 6526 }
6631 6527
6632 6528 pahmep->flags |= flag;
6633 6529 }
6634 6530
6635 6531 if (locked)
6636 6532 sfmmu_mlist_exit(pml);
6637 6533
6638 6534 return (0);
6639 6535 }
6640 6536
6641 6537 /*
6642 6538 * Execute post-callback handler of each pa_hment linked to pp
6643 6539 *
6644 6540 * Same overall assumptions and restrictions apply as for
6645 6541 * hat_pageprocess_precallbacks().
6646 6542 */
6647 6543 static void
6648 6544 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6649 6545 {
6650 6546 pfn_t pgpfn = pp->p_pagenum;
6651 6547 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6652 6548 pfn_t newpfn;
6653 6549 struct sf_hment *sfhmep;
6654 6550 struct pa_hment *pahmep;
6655 6551 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6656 6552 id_t id;
6657 6553 int locked = 0;
6658 6554 kmutex_t *pml;
6659 6555
6660 6556 ASSERT(PAGE_EXCL(pp));
6661 6557 if (!sfmmu_mlist_held(pp)) {
6662 6558 pml = sfmmu_mlist_enter(pp);
6663 6559 locked = 1;
6664 6560 }
6665 6561
6666 6562 top:
6667 6563 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6668 6564 /*
6669 6565 * skip sf_hments corresponding to VA<->PA mappings;
6670 6566 * for pa_hment's, hme_tte.ll is zero
6671 6567 */
6672 6568 if (!IS_PAHME(sfhmep))
6673 6569 continue;
6674 6570
6675 6571 pahmep = sfhmep->hme_data;
6676 6572 ASSERT(pahmep != NULL);
6677 6573
6678 6574 if ((pahmep->flags & flag) == 0)
6679 6575 continue;
6680 6576
6681 6577 pahmep->flags &= ~flag;
6682 6578
6683 6579 id = pahmep->cb_id;
6684 6580 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6685 6581 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6686 6582 continue;
6687 6583
6688 6584 /*
6689 6585 * Convert the base page PFN into the constituent PFN
6690 6586 * which is needed by the callback handler.
6691 6587 */
6692 6588 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6693 6589
6694 6590 /*
6695 6591 * Drop the mapping list lock to avoid locking order issues.
6696 6592 */
6697 6593 if (locked)
6698 6594 sfmmu_mlist_exit(pml);
6699 6595
6700 6596 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6701 6597 != 0)
6702 6598 panic("sfmmu: posthandler failed");
6703 6599
6704 6600 if (locked) {
6705 6601 pml = sfmmu_mlist_enter(pp);
6706 6602 goto top;
6707 6603 }
6708 6604 }
6709 6605
6710 6606 if (locked)
6711 6607 sfmmu_mlist_exit(pml);
6712 6608 }
6713 6609
6714 6610 /*
6715 6611 * Suspend locked kernel mapping
6716 6612 */
6717 6613 void
6718 6614 hat_pagesuspend(struct page *pp)
6719 6615 {
6720 6616 struct sf_hment *sfhmep;
6721 6617 sfmmu_t *sfmmup;
6722 6618 tte_t tte, ttemod;
6723 6619 struct hme_blk *hmeblkp;
6724 6620 caddr_t addr;
6725 6621 int index, cons;
6726 6622 cpuset_t cpuset;
6727 6623
6728 6624 ASSERT(PAGE_EXCL(pp));
6729 6625 ASSERT(sfmmu_mlist_held(pp));
6730 6626
6731 6627 mutex_enter(&kpr_suspendlock);
6732 6628
6733 6629 /*
6734 6630 * We're about to suspend a kernel mapping so mark this thread as
6735 6631 * non-traceable by DTrace. This prevents us from running into issues
6736 6632 * with probe context trying to touch a suspended page
6737 6633 * in the relocation codepath itself.
6738 6634 */
6739 6635 curthread->t_flag |= T_DONTDTRACE;
6740 6636
6741 6637 index = PP_MAPINDEX(pp);
6742 6638 cons = TTE8K;
6743 6639
6744 6640 retry:
6745 6641 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6746 6642
6747 6643 if (IS_PAHME(sfhmep))
6748 6644 continue;
6749 6645
6750 6646 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6751 6647 continue;
6752 6648
6753 6649 /*
6754 6650 * Loop until we successfully set the suspend bit in
6755 6651 * the TTE.
6756 6652 */
6757 6653 again:
6758 6654 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6759 6655 ASSERT(TTE_IS_VALID(&tte));
6760 6656
6761 6657 ttemod = tte;
6762 6658 TTE_SET_SUSPEND(&ttemod);
6763 6659 if (sfmmu_modifytte_try(&tte, &ttemod,
6764 6660 &sfhmep->hme_tte) < 0)
6765 6661 goto again;
6766 6662
6767 6663 /*
6768 6664 * Invalidate TSB entry
6769 6665 */
6770 6666 hmeblkp = sfmmu_hmetohblk(sfhmep);
6771 6667
6772 6668 sfmmup = hblktosfmmu(hmeblkp);
6773 6669 ASSERT(sfmmup == ksfmmup);
6774 6670 ASSERT(!hmeblkp->hblk_shared);
6775 6671
6776 6672 addr = tte_to_vaddr(hmeblkp, tte);
6777 6673
6778 6674 /*
6779 6675 * No need to make sure that the TSB for this sfmmu is
6780 6676 * not being relocated since it is ksfmmup and thus it
6781 6677 * will never be relocated.
6782 6678 */
6783 6679 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6784 6680
6785 6681 /*
6786 6682 * Update xcall stats
6787 6683 */
6788 6684 cpuset = cpu_ready_set;
6789 6685 CPUSET_DEL(cpuset, CPU->cpu_id);
6790 6686
6791 6687 /* LINTED: constant in conditional context */
6792 6688 SFMMU_XCALL_STATS(ksfmmup);
6793 6689
6794 6690 /*
6795 6691 * Flush TLB entry on remote CPU's
6796 6692 */
6797 6693 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6798 6694 (uint64_t)ksfmmup);
6799 6695 xt_sync(cpuset);
6800 6696
6801 6697 /*
6802 6698 * Flush TLB entry on local CPU
6803 6699 */
6804 6700 vtag_flushpage(addr, (uint64_t)ksfmmup);
6805 6701 }
6806 6702
6807 6703 while (index != 0) {
6808 6704 index = index >> 1;
6809 6705 if (index != 0)
6810 6706 cons++;
6811 6707 if (index & 0x1) {
6812 6708 pp = PP_GROUPLEADER(pp, cons);
6813 6709 goto retry;
6814 6710 }
6815 6711 }
6816 6712 }
6817 6713
6818 6714 #ifdef DEBUG
6819 6715
6820 6716 #define N_PRLE 1024
6821 6717 struct prle {
6822 6718 page_t *targ;
6823 6719 page_t *repl;
6824 6720 int status;
6825 6721 int pausecpus;
6826 6722 hrtime_t whence;
6827 6723 };
6828 6724
6829 6725 static struct prle page_relocate_log[N_PRLE];
6830 6726 static int prl_entry;
6831 6727 static kmutex_t prl_mutex;
6832 6728
6833 6729 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6834 6730 mutex_enter(&prl_mutex); \
6835 6731 page_relocate_log[prl_entry].targ = *(t); \
6836 6732 page_relocate_log[prl_entry].repl = *(r); \
6837 6733 page_relocate_log[prl_entry].status = (s); \
6838 6734 page_relocate_log[prl_entry].pausecpus = (p); \
6839 6735 page_relocate_log[prl_entry].whence = gethrtime(); \
6840 6736 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6841 6737 mutex_exit(&prl_mutex);
6842 6738
6843 6739 #else /* !DEBUG */
6844 6740 #define PAGE_RELOCATE_LOG(t, r, s, p)
6845 6741 #endif
6846 6742
6847 6743 /*
6848 6744 * Core Kernel Page Relocation Algorithm
6849 6745 *
6850 6746 * Input:
6851 6747 *
6852 6748 * target : constituent pages are SE_EXCL locked.
6853 6749 * replacement: constituent pages are SE_EXCL locked.
6854 6750 *
6855 6751 * Output:
6856 6752 *
6857 6753 * nrelocp: number of pages relocated
6858 6754 */
6859 6755 int
6860 6756 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6861 6757 {
6862 6758 page_t *targ, *repl;
6863 6759 page_t *tpp, *rpp;
6864 6760 kmutex_t *low, *high;
6865 6761 spgcnt_t npages, i;
6866 6762 page_t *pl = NULL;
6867 6763 int old_pil;
6868 6764 cpuset_t cpuset;
6869 6765 int cap_cpus;
6870 6766 int ret;
6871 6767 #ifdef VAC
6872 6768 int cflags = 0;
6873 6769 #endif
6874 6770
6875 6771 if (!kcage_on || PP_ISNORELOC(*target)) {
6876 6772 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6877 6773 return (EAGAIN);
6878 6774 }
6879 6775
6880 6776 mutex_enter(&kpr_mutex);
6881 6777 kreloc_thread = curthread;
6882 6778
6883 6779 targ = *target;
6884 6780 repl = *replacement;
6885 6781 ASSERT(repl != NULL);
6886 6782 ASSERT(targ->p_szc == repl->p_szc);
6887 6783
6888 6784 npages = page_get_pagecnt(targ->p_szc);
6889 6785
6890 6786 /*
6891 6787 * unload VA<->PA mappings that are not locked
6892 6788 */
6893 6789 tpp = targ;
6894 6790 for (i = 0; i < npages; i++) {
6895 6791 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6896 6792 tpp++;
6897 6793 }
6898 6794
6899 6795 /*
6900 6796 * Do "presuspend" callbacks, in a context from which we can still
6901 6797 * block as needed. Note that we don't hold the mapping list lock
6902 6798 * of "targ" at this point due to potential locking order issues;
6903 6799 * we assume that between the hat_pageunload() above and holding
6904 6800 * the SE_EXCL lock that the mapping list *cannot* change at this
6905 6801 * point.
6906 6802 */
6907 6803 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6908 6804 if (ret != 0) {
6909 6805 /*
6910 6806 * EIO translates to fatal error, for all others cleanup
6911 6807 * and return EAGAIN.
6912 6808 */
6913 6809 ASSERT(ret != EIO);
6914 6810 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6915 6811 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6916 6812 kreloc_thread = NULL;
6917 6813 mutex_exit(&kpr_mutex);
6918 6814 return (EAGAIN);
6919 6815 }
6920 6816
6921 6817 /*
6922 6818 * acquire p_mapping list lock for both the target and replacement
6923 6819 * root pages.
6924 6820 *
6925 6821 * low and high refer to the need to grab the mlist locks in a
6926 6822 * specific order in order to prevent race conditions. Thus the
6927 6823 * lower lock must be grabbed before the higher lock.
6928 6824 *
6929 6825 * This will block hat_unload's accessing p_mapping list. Since
6930 6826 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6931 6827 * blocked. Thus, no one else will be accessing the p_mapping list
6932 6828 * while we suspend and reload the locked mapping below.
6933 6829 */
6934 6830 tpp = targ;
6935 6831 rpp = repl;
6936 6832 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6937 6833
6938 6834 kpreempt_disable();
6939 6835
6940 6836 /*
6941 6837 * We raise our PIL to 13 so that we don't get captured by
6942 6838 * another CPU or pinned by an interrupt thread. We can't go to
6943 6839 * PIL 14 since the nexus driver(s) may need to interrupt at
6944 6840 * that level in the case of IOMMU pseudo mappings.
6945 6841 */
6946 6842 cpuset = cpu_ready_set;
6947 6843 CPUSET_DEL(cpuset, CPU->cpu_id);
6948 6844 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6949 6845 old_pil = splr(XCALL_PIL);
6950 6846 } else {
6951 6847 old_pil = -1;
6952 6848 xc_attention(cpuset);
6953 6849 }
6954 6850 ASSERT(getpil() == XCALL_PIL);
6955 6851
6956 6852 /*
6957 6853 * Now do suspend callbacks. In the case of an IOMMU mapping
6958 6854 * this will suspend all DMA activity to the page while it is
6959 6855 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6960 6856 * may be captured at this point we should have acquired any needed
6961 6857 * locks in the presuspend callback.
6962 6858 */
6963 6859 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6964 6860 if (ret != 0) {
6965 6861 repl = targ;
6966 6862 goto suspend_fail;
6967 6863 }
6968 6864
6969 6865 /*
6970 6866 * Raise the PIL yet again, this time to block all high-level
6971 6867 * interrupts on this CPU. This is necessary to prevent an
6972 6868 * interrupt routine from pinning the thread which holds the
6973 6869 * mapping suspended and then touching the suspended page.
6974 6870 *
6975 6871 * Once the page is suspended we also need to be careful to
6976 6872 * avoid calling any functions which touch any seg_kmem memory
6977 6873 * since that memory may be backed by the very page we are
6978 6874 * relocating in here!
6979 6875 */
6980 6876 hat_pagesuspend(targ);
6981 6877
6982 6878 /*
6983 6879 * Now that we are confident everybody has stopped using this page,
6984 6880 * copy the page contents. Note we use a physical copy to prevent
6985 6881 * locking issues and to avoid fpRAS because we can't handle it in
6986 6882 * this context.
6987 6883 */
6988 6884 for (i = 0; i < npages; i++, tpp++, rpp++) {
6989 6885 #ifdef VAC
6990 6886 /*
6991 6887 * If the replacement has a different vcolor than
6992 6888 * the one being replacd, we need to handle VAC
6993 6889 * consistency for it just as we were setting up
6994 6890 * a new mapping to it.
6995 6891 */
6996 6892 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6997 6893 (tpp->p_vcolor != rpp->p_vcolor) &&
6998 6894 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6999 6895 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
7000 6896 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
7001 6897 rpp->p_pagenum);
7002 6898 }
7003 6899 #endif
7004 6900 /*
7005 6901 * Copy the contents of the page.
7006 6902 */
7007 6903 ppcopy_kernel(tpp, rpp);
7008 6904 }
7009 6905
7010 6906 tpp = targ;
7011 6907 rpp = repl;
7012 6908 for (i = 0; i < npages; i++, tpp++, rpp++) {
7013 6909 /*
7014 6910 * Copy attributes. VAC consistency was handled above,
7015 6911 * if required.
7016 6912 */
7017 6913 rpp->p_nrm = tpp->p_nrm;
7018 6914 tpp->p_nrm = 0;
7019 6915 rpp->p_index = tpp->p_index;
7020 6916 tpp->p_index = 0;
7021 6917 #ifdef VAC
7022 6918 rpp->p_vcolor = tpp->p_vcolor;
7023 6919 #endif
7024 6920 }
7025 6921
7026 6922 /*
7027 6923 * First, unsuspend the page, if we set the suspend bit, and transfer
7028 6924 * the mapping list from the target page to the replacement page.
7029 6925 * Next process postcallbacks; since pa_hment's are linked only to the
7030 6926 * p_mapping list of root page, we don't iterate over the constituent
7031 6927 * pages.
7032 6928 */
7033 6929 hat_pagereload(targ, repl);
7034 6930
7035 6931 suspend_fail:
7036 6932 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
7037 6933
7038 6934 /*
7039 6935 * Now lower our PIL and release any captured CPUs since we
7040 6936 * are out of the "danger zone". After this it will again be
7041 6937 * safe to acquire adaptive mutex locks, or to drop them...
7042 6938 */
7043 6939 if (old_pil != -1) {
7044 6940 splx(old_pil);
7045 6941 } else {
7046 6942 xc_dismissed(cpuset);
7047 6943 }
7048 6944
7049 6945 kpreempt_enable();
7050 6946
7051 6947 sfmmu_mlist_reloc_exit(low, high);
7052 6948
7053 6949 /*
7054 6950 * Postsuspend callbacks should drop any locks held across
7055 6951 * the suspend callbacks. As before, we don't hold the mapping
7056 6952 * list lock at this point.. our assumption is that the mapping
7057 6953 * list still can't change due to our holding SE_EXCL lock and
7058 6954 * there being no unlocked mappings left. Hence the restriction
7059 6955 * on calling context to hat_delete_callback()
7060 6956 */
7061 6957 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
7062 6958 if (ret != 0) {
7063 6959 /*
7064 6960 * The second presuspend call failed: we got here through
7065 6961 * the suspend_fail label above.
7066 6962 */
7067 6963 ASSERT(ret != EIO);
7068 6964 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
7069 6965 kreloc_thread = NULL;
7070 6966 mutex_exit(&kpr_mutex);
7071 6967 return (EAGAIN);
7072 6968 }
7073 6969
7074 6970 /*
7075 6971 * Now that we're out of the performance critical section we can
7076 6972 * take care of updating the hash table, since we still
7077 6973 * hold all the pages locked SE_EXCL at this point we
7078 6974 * needn't worry about things changing out from under us.
7079 6975 */
7080 6976 tpp = targ;
7081 6977 rpp = repl;
7082 6978 for (i = 0; i < npages; i++, tpp++, rpp++) {
7083 6979
7084 6980 /*
7085 6981 * replace targ with replacement in page_hash table
7086 6982 */
7087 6983 targ = tpp;
7088 6984 page_relocate_hash(rpp, targ);
7089 6985
7090 6986 /*
7091 6987 * concatenate target; caller of platform_page_relocate()
7092 6988 * expects target to be concatenated after returning.
7093 6989 */
7094 6990 ASSERT(targ->p_next == targ);
7095 6991 ASSERT(targ->p_prev == targ);
7096 6992 page_list_concat(&pl, &targ);
7097 6993 }
7098 6994
7099 6995 ASSERT(*target == pl);
7100 6996 *nrelocp = npages;
7101 6997 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
7102 6998 kreloc_thread = NULL;
7103 6999 mutex_exit(&kpr_mutex);
7104 7000 return (0);
7105 7001 }
7106 7002
7107 7003 /*
7108 7004 * Called when stray pa_hments are found attached to a page which is
7109 7005 * being freed. Notify the subsystem which attached the pa_hment of
7110 7006 * the error if it registered a suitable handler, else panic.
7111 7007 */
7112 7008 static void
7113 7009 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7114 7010 {
7115 7011 id_t cb_id = pahmep->cb_id;
7116 7012
7117 7013 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7118 7014 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7119 7015 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7120 7016 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7121 7017 return; /* non-fatal */
7122 7018 }
7123 7019 panic("pa_hment leaked: 0x%p", (void *)pahmep);
7124 7020 }
7125 7021
7126 7022 /*
7127 7023 * Remove all mappings to page 'pp'.
7128 7024 */
7129 7025 int
7130 7026 hat_pageunload(struct page *pp, uint_t forceflag)
↓ open down ↓ |
786 lines elided |
↑ open up ↑ |
7131 7027 {
7132 7028 struct page *origpp = pp;
7133 7029 struct sf_hment *sfhme, *tmphme;
7134 7030 struct hme_blk *hmeblkp;
7135 7031 kmutex_t *pml;
7136 7032 #ifdef VAC
7137 7033 kmutex_t *pmtx;
7138 7034 #endif
7139 7035 cpuset_t cpuset, tset;
7140 7036 int index, cons;
7141 - int xhme_blks;
7142 7037 int pa_hments;
7143 7038
7144 7039 ASSERT(PAGE_EXCL(pp));
7145 7040
7146 -retry_xhat:
7147 7041 tmphme = NULL;
7148 - xhme_blks = 0;
7149 7042 pa_hments = 0;
7150 7043 CPUSET_ZERO(cpuset);
7151 7044
7152 7045 pml = sfmmu_mlist_enter(pp);
7153 7046
7154 7047 #ifdef VAC
7155 7048 if (pp->p_kpmref)
7156 7049 sfmmu_kpm_pageunload(pp);
7157 7050 ASSERT(!PP_ISMAPPED_KPM(pp));
7158 7051 #endif
7159 7052 /*
7160 7053 * Clear vpm reference. Since the page is exclusively locked
7161 7054 * vpm cannot be referencing it.
7162 7055 */
7163 7056 if (vpm_enable) {
7164 7057 pp->p_vpmref = 0;
7165 7058 }
7166 7059
7167 7060 index = PP_MAPINDEX(pp);
7168 7061 cons = TTE8K;
7169 7062 retry:
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
7170 7063 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7171 7064 tmphme = sfhme->hme_next;
7172 7065
7173 7066 if (IS_PAHME(sfhme)) {
7174 7067 ASSERT(sfhme->hme_data != NULL);
7175 7068 pa_hments++;
7176 7069 continue;
7177 7070 }
7178 7071
7179 7072 hmeblkp = sfmmu_hmetohblk(sfhme);
7180 - if (hmeblkp->hblk_xhat_bit) {
7181 - struct xhat_hme_blk *xblk =
7182 - (struct xhat_hme_blk *)hmeblkp;
7183 -
7184 - (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
7185 - pp, forceflag, XBLK2PROVBLK(xblk));
7186 -
7187 - xhme_blks = 1;
7188 - continue;
7189 - }
7190 7073
7191 7074 /*
7192 7075 * If there are kernel mappings don't unload them, they will
7193 7076 * be suspended.
7194 7077 */
7195 7078 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7196 7079 hmeblkp->hblk_tag.htag_id == ksfmmup)
7197 7080 continue;
7198 7081
7199 7082 tset = sfmmu_pageunload(pp, sfhme, cons);
7200 7083 CPUSET_OR(cpuset, tset);
7201 7084 }
7202 7085
7203 7086 while (index != 0) {
7204 7087 index = index >> 1;
7205 7088 if (index != 0)
7206 7089 cons++;
7207 7090 if (index & 0x1) {
7208 7091 /* Go to leading page */
7209 7092 pp = PP_GROUPLEADER(pp, cons);
7210 7093 ASSERT(sfmmu_mlist_held(pp));
7211 7094 goto retry;
7212 7095 }
7213 7096 }
7214 7097
7215 7098 /*
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
7216 7099 * cpuset may be empty if the page was only mapped by segkpm,
7217 7100 * in which case we won't actually cross-trap.
7218 7101 */
7219 7102 xt_sync(cpuset);
7220 7103
7221 7104 /*
7222 7105 * The page should have no mappings at this point, unless
7223 7106 * we were called from hat_page_relocate() in which case we
7224 7107 * leave the locked mappings which will be suspended later.
7225 7108 */
7226 - ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
7109 + ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7227 7110 (forceflag == SFMMU_KERNEL_RELOC));
7228 7111
7229 7112 #ifdef VAC
7230 7113 if (PP_ISTNC(pp)) {
7231 7114 if (cons == TTE8K) {
7232 7115 pmtx = sfmmu_page_enter(pp);
7233 7116 PP_CLRTNC(pp);
7234 7117 sfmmu_page_exit(pmtx);
7235 7118 } else {
7236 7119 conv_tnc(pp, cons);
7237 7120 }
7238 7121 }
7239 7122 #endif /* VAC */
7240 7123
7241 7124 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7242 7125 /*
7243 7126 * Unlink any pa_hments and free them, calling back
7244 7127 * the responsible subsystem to notify it of the error.
7245 7128 * This can occur in situations such as drivers leaking
7246 7129 * DMA handles: naughty, but common enough that we'd like
7247 7130 * to keep the system running rather than bringing it
7248 7131 * down with an obscure error like "pa_hment leaked"
7249 7132 * which doesn't aid the user in debugging their driver.
7250 7133 */
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
7251 7134 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7252 7135 tmphme = sfhme->hme_next;
7253 7136 if (IS_PAHME(sfhme)) {
7254 7137 struct pa_hment *pahmep = sfhme->hme_data;
7255 7138 sfmmu_pahment_leaked(pahmep);
7256 7139 HME_SUB(sfhme, pp);
7257 7140 kmem_cache_free(pa_hment_cache, pahmep);
7258 7141 }
7259 7142 }
7260 7143
7261 - ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
7144 + ASSERT(!PP_ISMAPPED(origpp));
7262 7145 }
7263 7146
7264 7147 sfmmu_mlist_exit(pml);
7265 7148
7266 - /*
7267 - * XHAT may not have finished unloading pages
7268 - * because some other thread was waiting for
7269 - * mlist lock and XHAT_PAGEUNLOAD let it do
7270 - * the job.
7271 - */
7272 - if (xhme_blks) {
7273 - pp = origpp;
7274 - goto retry_xhat;
7275 - }
7276 -
7277 7149 return (0);
7278 7150 }
7279 7151
7280 7152 cpuset_t
7281 7153 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7282 7154 {
7283 7155 struct hme_blk *hmeblkp;
7284 7156 sfmmu_t *sfmmup;
7285 7157 tte_t tte, ttemod;
7286 7158 #ifdef DEBUG
7287 7159 tte_t orig_old;
7288 7160 #endif /* DEBUG */
7289 7161 caddr_t addr;
7290 7162 int ttesz;
7291 7163 int ret;
7292 7164 cpuset_t cpuset;
7293 7165
7294 7166 ASSERT(pp != NULL);
7295 7167 ASSERT(sfmmu_mlist_held(pp));
7296 7168 ASSERT(!PP_ISKAS(pp));
7297 7169
7298 7170 CPUSET_ZERO(cpuset);
7299 7171
7300 7172 hmeblkp = sfmmu_hmetohblk(sfhme);
7301 7173
7302 7174 readtte:
7303 7175 sfmmu_copytte(&sfhme->hme_tte, &tte);
7304 7176 if (TTE_IS_VALID(&tte)) {
7305 7177 sfmmup = hblktosfmmu(hmeblkp);
7306 7178 ttesz = get_hblk_ttesz(hmeblkp);
7307 7179 /*
7308 7180 * Only unload mappings of 'cons' size.
7309 7181 */
7310 7182 if (ttesz != cons)
7311 7183 return (cpuset);
7312 7184
7313 7185 /*
7314 7186 * Note that we have p_mapping lock, but no hash lock here.
7315 7187 * hblk_unload() has to have both hash lock AND p_mapping
7316 7188 * lock before it tries to modify tte. So, the tte could
7317 7189 * not become invalid in the sfmmu_modifytte_try() below.
7318 7190 */
7319 7191 ttemod = tte;
7320 7192 #ifdef DEBUG
7321 7193 orig_old = tte;
7322 7194 #endif /* DEBUG */
7323 7195
7324 7196 TTE_SET_INVALID(&ttemod);
7325 7197 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7326 7198 if (ret < 0) {
7327 7199 #ifdef DEBUG
7328 7200 /* only R/M bits can change. */
7329 7201 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7330 7202 #endif /* DEBUG */
7331 7203 goto readtte;
7332 7204 }
7333 7205
7334 7206 if (ret == 0) {
7335 7207 panic("pageunload: cas failed?");
7336 7208 }
7337 7209
7338 7210 addr = tte_to_vaddr(hmeblkp, tte);
7339 7211
7340 7212 if (hmeblkp->hblk_shared) {
7341 7213 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7342 7214 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7343 7215 sf_region_t *rgnp;
7344 7216 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7345 7217 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7346 7218 ASSERT(srdp != NULL);
7347 7219 rgnp = srdp->srd_hmergnp[rid];
7348 7220 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7349 7221 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7350 7222 sfmmu_ttesync(NULL, addr, &tte, pp);
7351 7223 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7352 7224 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7353 7225 } else {
7354 7226 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7355 7227 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7356 7228
7357 7229 /*
7358 7230 * We need to flush the page from the virtual cache
7359 7231 * in order to prevent a virtual cache alias
7360 7232 * inconsistency. The particular scenario we need
7361 7233 * to worry about is:
7362 7234 * Given: va1 and va2 are two virtual address that
7363 7235 * alias and will map the same physical address.
7364 7236 * 1. mapping exists from va1 to pa and data has
7365 7237 * been read into the cache.
7366 7238 * 2. unload va1.
7367 7239 * 3. load va2 and modify data using va2.
7368 7240 * 4 unload va2.
7369 7241 * 5. load va1 and reference data. Unless we flush
7370 7242 * the data cache when we unload we will get
7371 7243 * stale data.
7372 7244 * This scenario is taken care of by using virtual
7373 7245 * page coloring.
7374 7246 */
7375 7247 if (sfmmup->sfmmu_ismhat) {
7376 7248 /*
7377 7249 * Flush TSBs, TLBs and caches
7378 7250 * of every process
7379 7251 * sharing this ism segment.
7380 7252 */
7381 7253 sfmmu_hat_lock_all();
7382 7254 mutex_enter(&ism_mlist_lock);
7383 7255 kpreempt_disable();
7384 7256 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7385 7257 pp->p_pagenum, CACHE_NO_FLUSH);
7386 7258 kpreempt_enable();
7387 7259 mutex_exit(&ism_mlist_lock);
7388 7260 sfmmu_hat_unlock_all();
7389 7261 cpuset = cpu_ready_set;
7390 7262 } else {
7391 7263 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7392 7264 cpuset = sfmmup->sfmmu_cpusran;
7393 7265 }
7394 7266 }
7395 7267
7396 7268 /*
7397 7269 * Hme_sub has to run after ttesync() and a_rss update.
7398 7270 * See hblk_unload().
7399 7271 */
7400 7272 HME_SUB(sfhme, pp);
7401 7273 membar_stst();
7402 7274
7403 7275 /*
7404 7276 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7405 7277 * since pteload may have done a HME_ADD() right after
7406 7278 * we did the HME_SUB() above. Hmecnt is now maintained
7407 7279 * by cas only. no lock guranteed its value. The only
7408 7280 * gurantee we have is the hmecnt should not be less than
7409 7281 * what it should be so the hblk will not be taken away.
7410 7282 * It's also important that we decremented the hmecnt after
7411 7283 * we are done with hmeblkp so that this hmeblk won't be
7412 7284 * stolen.
7413 7285 */
7414 7286 ASSERT(hmeblkp->hblk_hmecnt > 0);
7415 7287 ASSERT(hmeblkp->hblk_vcnt > 0);
7416 7288 atomic_dec_16(&hmeblkp->hblk_vcnt);
7417 7289 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7418 7290 /*
7419 7291 * This is bug 4063182.
7420 7292 * XXX: fixme
7421 7293 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7422 7294 * !hmeblkp->hblk_lckcnt);
7423 7295 */
7424 7296 } else {
7425 7297 panic("invalid tte? pp %p &tte %p",
7426 7298 (void *)pp, (void *)&tte);
7427 7299 }
7428 7300
7429 7301 return (cpuset);
7430 7302 }
7431 7303
7432 7304 /*
7433 7305 * While relocating a kernel page, this function will move the mappings
7434 7306 * from tpp to dpp and modify any associated data with these mappings.
7435 7307 * It also unsuspends the suspended kernel mapping.
7436 7308 */
7437 7309 static void
7438 7310 hat_pagereload(struct page *tpp, struct page *dpp)
7439 7311 {
7440 7312 struct sf_hment *sfhme;
7441 7313 tte_t tte, ttemod;
7442 7314 int index, cons;
7443 7315
7444 7316 ASSERT(getpil() == PIL_MAX);
7445 7317 ASSERT(sfmmu_mlist_held(tpp));
7446 7318 ASSERT(sfmmu_mlist_held(dpp));
7447 7319
7448 7320 index = PP_MAPINDEX(tpp);
7449 7321 cons = TTE8K;
7450 7322
7451 7323 /* Update real mappings to the page */
7452 7324 retry:
7453 7325 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7454 7326 if (IS_PAHME(sfhme))
7455 7327 continue;
7456 7328 sfmmu_copytte(&sfhme->hme_tte, &tte);
7457 7329 ttemod = tte;
7458 7330
7459 7331 /*
7460 7332 * replace old pfn with new pfn in TTE
7461 7333 */
7462 7334 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7463 7335
7464 7336 /*
7465 7337 * clear suspend bit
7466 7338 */
7467 7339 ASSERT(TTE_IS_SUSPEND(&ttemod));
7468 7340 TTE_CLR_SUSPEND(&ttemod);
7469 7341
7470 7342 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7471 7343 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7472 7344
7473 7345 /*
7474 7346 * set hme_page point to new page
7475 7347 */
7476 7348 sfhme->hme_page = dpp;
7477 7349 }
7478 7350
7479 7351 /*
7480 7352 * move p_mapping list from old page to new page
7481 7353 */
7482 7354 dpp->p_mapping = tpp->p_mapping;
7483 7355 tpp->p_mapping = NULL;
7484 7356 dpp->p_share = tpp->p_share;
7485 7357 tpp->p_share = 0;
7486 7358
7487 7359 while (index != 0) {
7488 7360 index = index >> 1;
7489 7361 if (index != 0)
7490 7362 cons++;
7491 7363 if (index & 0x1) {
7492 7364 tpp = PP_GROUPLEADER(tpp, cons);
7493 7365 dpp = PP_GROUPLEADER(dpp, cons);
7494 7366 goto retry;
7495 7367 }
7496 7368 }
7497 7369
7498 7370 curthread->t_flag &= ~T_DONTDTRACE;
7499 7371 mutex_exit(&kpr_suspendlock);
7500 7372 }
7501 7373
7502 7374 uint_t
7503 7375 hat_pagesync(struct page *pp, uint_t clearflag)
7504 7376 {
7505 7377 struct sf_hment *sfhme, *tmphme = NULL;
7506 7378 struct hme_blk *hmeblkp;
7507 7379 kmutex_t *pml;
7508 7380 cpuset_t cpuset, tset;
7509 7381 int index, cons;
7510 7382 extern ulong_t po_share;
7511 7383 page_t *save_pp = pp;
7512 7384 int stop_on_sh = 0;
7513 7385 uint_t shcnt;
7514 7386
7515 7387 CPUSET_ZERO(cpuset);
7516 7388
7517 7389 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7518 7390 return (PP_GENERIC_ATTR(pp));
7519 7391 }
7520 7392
7521 7393 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7522 7394 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7523 7395 return (PP_GENERIC_ATTR(pp));
7524 7396 }
7525 7397 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7526 7398 return (PP_GENERIC_ATTR(pp));
7527 7399 }
7528 7400 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7529 7401 if (pp->p_share > po_share) {
7530 7402 hat_page_setattr(pp, P_REF);
7531 7403 return (PP_GENERIC_ATTR(pp));
7532 7404 }
7533 7405 stop_on_sh = 1;
7534 7406 shcnt = 0;
7535 7407 }
7536 7408 }
7537 7409
7538 7410 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7539 7411 pml = sfmmu_mlist_enter(pp);
7540 7412 index = PP_MAPINDEX(pp);
7541 7413 cons = TTE8K;
7542 7414 retry:
7543 7415 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7544 7416 /*
7545 7417 * We need to save the next hment on the list since
7546 7418 * it is possible for pagesync to remove an invalid hment
↓ open down ↓ |
260 lines elided |
↑ open up ↑ |
7547 7419 * from the list.
7548 7420 */
7549 7421 tmphme = sfhme->hme_next;
7550 7422 if (IS_PAHME(sfhme))
7551 7423 continue;
7552 7424 /*
7553 7425 * If we are looking for large mappings and this hme doesn't
7554 7426 * reach the range we are seeking, just ignore it.
7555 7427 */
7556 7428 hmeblkp = sfmmu_hmetohblk(sfhme);
7557 - if (hmeblkp->hblk_xhat_bit)
7558 - continue;
7559 7429
7560 7430 if (hme_size(sfhme) < cons)
7561 7431 continue;
7562 7432
7563 7433 if (stop_on_sh) {
7564 7434 if (hmeblkp->hblk_shared) {
7565 7435 sf_srd_t *srdp = hblktosrd(hmeblkp);
7566 7436 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7567 7437 sf_region_t *rgnp;
7568 7438 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7569 7439 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7570 7440 ASSERT(srdp != NULL);
7571 7441 rgnp = srdp->srd_hmergnp[rid];
7572 7442 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7573 7443 rgnp, rid);
7574 7444 shcnt += rgnp->rgn_refcnt;
7575 7445 } else {
7576 7446 shcnt++;
7577 7447 }
7578 7448 if (shcnt > po_share) {
7579 7449 /*
7580 7450 * tell the pager to spare the page this time
7581 7451 * around.
7582 7452 */
7583 7453 hat_page_setattr(save_pp, P_REF);
7584 7454 index = 0;
7585 7455 break;
7586 7456 }
7587 7457 }
7588 7458 tset = sfmmu_pagesync(pp, sfhme,
7589 7459 clearflag & ~HAT_SYNC_STOPON_RM);
7590 7460 CPUSET_OR(cpuset, tset);
7591 7461
7592 7462 /*
7593 7463 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7594 7464 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7595 7465 */
7596 7466 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7597 7467 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7598 7468 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7599 7469 index = 0;
7600 7470 break;
7601 7471 }
7602 7472 }
7603 7473
7604 7474 while (index) {
7605 7475 index = index >> 1;
7606 7476 cons++;
7607 7477 if (index & 0x1) {
7608 7478 /* Go to leading page */
7609 7479 pp = PP_GROUPLEADER(pp, cons);
7610 7480 goto retry;
7611 7481 }
7612 7482 }
7613 7483
7614 7484 xt_sync(cpuset);
7615 7485 sfmmu_mlist_exit(pml);
7616 7486 return (PP_GENERIC_ATTR(save_pp));
7617 7487 }
7618 7488
7619 7489 /*
7620 7490 * Get all the hardware dependent attributes for a page struct
7621 7491 */
7622 7492 static cpuset_t
7623 7493 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7624 7494 uint_t clearflag)
7625 7495 {
7626 7496 caddr_t addr;
7627 7497 tte_t tte, ttemod;
7628 7498 struct hme_blk *hmeblkp;
7629 7499 int ret;
7630 7500 sfmmu_t *sfmmup;
7631 7501 cpuset_t cpuset;
7632 7502
7633 7503 ASSERT(pp != NULL);
7634 7504 ASSERT(sfmmu_mlist_held(pp));
7635 7505 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7636 7506 (clearflag == HAT_SYNC_ZERORM));
7637 7507
7638 7508 SFMMU_STAT(sf_pagesync);
7639 7509
7640 7510 CPUSET_ZERO(cpuset);
7641 7511
7642 7512 sfmmu_pagesync_retry:
7643 7513
7644 7514 sfmmu_copytte(&sfhme->hme_tte, &tte);
7645 7515 if (TTE_IS_VALID(&tte)) {
7646 7516 hmeblkp = sfmmu_hmetohblk(sfhme);
7647 7517 sfmmup = hblktosfmmu(hmeblkp);
7648 7518 addr = tte_to_vaddr(hmeblkp, tte);
7649 7519 if (clearflag == HAT_SYNC_ZERORM) {
7650 7520 ttemod = tte;
7651 7521 TTE_CLR_RM(&ttemod);
7652 7522 ret = sfmmu_modifytte_try(&tte, &ttemod,
7653 7523 &sfhme->hme_tte);
7654 7524 if (ret < 0) {
7655 7525 /*
7656 7526 * cas failed and the new value is not what
7657 7527 * we want.
7658 7528 */
7659 7529 goto sfmmu_pagesync_retry;
7660 7530 }
7661 7531
7662 7532 if (ret > 0) {
7663 7533 /* we win the cas */
7664 7534 if (hmeblkp->hblk_shared) {
7665 7535 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7666 7536 uint_t rid =
7667 7537 hmeblkp->hblk_tag.htag_rid;
7668 7538 sf_region_t *rgnp;
7669 7539 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7670 7540 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7671 7541 ASSERT(srdp != NULL);
7672 7542 rgnp = srdp->srd_hmergnp[rid];
7673 7543 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7674 7544 srdp, rgnp, rid);
7675 7545 cpuset = sfmmu_rgntlb_demap(addr,
7676 7546 rgnp, hmeblkp, 1);
7677 7547 } else {
7678 7548 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7679 7549 0, 0);
7680 7550 cpuset = sfmmup->sfmmu_cpusran;
7681 7551 }
7682 7552 }
7683 7553 }
7684 7554 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7685 7555 &tte, pp);
7686 7556 }
7687 7557 return (cpuset);
7688 7558 }
7689 7559
7690 7560 /*
7691 7561 * Remove write permission from a mappings to a page, so that
7692 7562 * we can detect the next modification of it. This requires modifying
7693 7563 * the TTE then invalidating (demap) any TLB entry using that TTE.
7694 7564 * This code is similar to sfmmu_pagesync().
7695 7565 */
7696 7566 static cpuset_t
7697 7567 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7698 7568 {
7699 7569 caddr_t addr;
7700 7570 tte_t tte;
7701 7571 tte_t ttemod;
7702 7572 struct hme_blk *hmeblkp;
7703 7573 int ret;
7704 7574 sfmmu_t *sfmmup;
7705 7575 cpuset_t cpuset;
7706 7576
7707 7577 ASSERT(pp != NULL);
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
7708 7578 ASSERT(sfmmu_mlist_held(pp));
7709 7579
7710 7580 CPUSET_ZERO(cpuset);
7711 7581 SFMMU_STAT(sf_clrwrt);
7712 7582
7713 7583 retry:
7714 7584
7715 7585 sfmmu_copytte(&sfhme->hme_tte, &tte);
7716 7586 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7717 7587 hmeblkp = sfmmu_hmetohblk(sfhme);
7718 -
7719 - /*
7720 - * xhat mappings should never be to a VMODSORT page.
7721 - */
7722 - ASSERT(hmeblkp->hblk_xhat_bit == 0);
7723 -
7724 7588 sfmmup = hblktosfmmu(hmeblkp);
7725 7589 addr = tte_to_vaddr(hmeblkp, tte);
7726 7590
7727 7591 ttemod = tte;
7728 7592 TTE_CLR_WRT(&ttemod);
7729 7593 TTE_CLR_MOD(&ttemod);
7730 7594 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7731 7595
7732 7596 /*
7733 7597 * if cas failed and the new value is not what
7734 7598 * we want retry
7735 7599 */
7736 7600 if (ret < 0)
7737 7601 goto retry;
7738 7602
7739 7603 /* we win the cas */
7740 7604 if (ret > 0) {
7741 7605 if (hmeblkp->hblk_shared) {
7742 7606 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7743 7607 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7744 7608 sf_region_t *rgnp;
7745 7609 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7746 7610 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7747 7611 ASSERT(srdp != NULL);
7748 7612 rgnp = srdp->srd_hmergnp[rid];
7749 7613 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7750 7614 srdp, rgnp, rid);
7751 7615 cpuset = sfmmu_rgntlb_demap(addr,
7752 7616 rgnp, hmeblkp, 1);
7753 7617 } else {
7754 7618 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7755 7619 cpuset = sfmmup->sfmmu_cpusran;
7756 7620 }
7757 7621 }
7758 7622 }
7759 7623
7760 7624 return (cpuset);
7761 7625 }
7762 7626
7763 7627 /*
7764 7628 * Walk all mappings of a page, removing write permission and clearing the
7765 7629 * ref/mod bits. This code is similar to hat_pagesync()
7766 7630 */
7767 7631 static void
7768 7632 hat_page_clrwrt(page_t *pp)
7769 7633 {
7770 7634 struct sf_hment *sfhme;
7771 7635 struct sf_hment *tmphme = NULL;
7772 7636 kmutex_t *pml;
7773 7637 cpuset_t cpuset;
7774 7638 cpuset_t tset;
7775 7639 int index;
7776 7640 int cons;
7777 7641
7778 7642 CPUSET_ZERO(cpuset);
7779 7643
7780 7644 pml = sfmmu_mlist_enter(pp);
7781 7645 index = PP_MAPINDEX(pp);
7782 7646 cons = TTE8K;
7783 7647 retry:
7784 7648 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7785 7649 tmphme = sfhme->hme_next;
7786 7650
7787 7651 /*
7788 7652 * If we are looking for large mappings and this hme doesn't
7789 7653 * reach the range we are seeking, just ignore its.
7790 7654 */
7791 7655
7792 7656 if (hme_size(sfhme) < cons)
7793 7657 continue;
7794 7658
7795 7659 tset = sfmmu_pageclrwrt(pp, sfhme);
7796 7660 CPUSET_OR(cpuset, tset);
7797 7661 }
7798 7662
7799 7663 while (index) {
7800 7664 index = index >> 1;
7801 7665 cons++;
7802 7666 if (index & 0x1) {
7803 7667 /* Go to leading page */
7804 7668 pp = PP_GROUPLEADER(pp, cons);
7805 7669 goto retry;
7806 7670 }
7807 7671 }
7808 7672
7809 7673 xt_sync(cpuset);
7810 7674 sfmmu_mlist_exit(pml);
7811 7675 }
7812 7676
7813 7677 /*
7814 7678 * Set the given REF/MOD/RO bits for the given page.
7815 7679 * For a vnode with a sorted v_pages list, we need to change
7816 7680 * the attributes and the v_pages list together under page_vnode_mutex.
7817 7681 */
7818 7682 void
7819 7683 hat_page_setattr(page_t *pp, uint_t flag)
7820 7684 {
7821 7685 vnode_t *vp = pp->p_vnode;
7822 7686 page_t **listp;
7823 7687 kmutex_t *pmtx;
7824 7688 kmutex_t *vphm = NULL;
7825 7689 int noshuffle;
7826 7690
7827 7691 noshuffle = flag & P_NSH;
7828 7692 flag &= ~P_NSH;
7829 7693
7830 7694 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7831 7695
7832 7696 /*
7833 7697 * nothing to do if attribute already set
7834 7698 */
7835 7699 if ((pp->p_nrm & flag) == flag)
7836 7700 return;
7837 7701
7838 7702 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7839 7703 !noshuffle) {
7840 7704 vphm = page_vnode_mutex(vp);
7841 7705 mutex_enter(vphm);
7842 7706 }
7843 7707
7844 7708 pmtx = sfmmu_page_enter(pp);
7845 7709 pp->p_nrm |= flag;
7846 7710 sfmmu_page_exit(pmtx);
7847 7711
7848 7712 if (vphm != NULL) {
7849 7713 /*
7850 7714 * Some File Systems examine v_pages for NULL w/o
7851 7715 * grabbing the vphm mutex. Must not let it become NULL when
7852 7716 * pp is the only page on the list.
7853 7717 */
7854 7718 if (pp->p_vpnext != pp) {
7855 7719 page_vpsub(&vp->v_pages, pp);
7856 7720 if (vp->v_pages != NULL)
7857 7721 listp = &vp->v_pages->p_vpprev->p_vpnext;
7858 7722 else
7859 7723 listp = &vp->v_pages;
7860 7724 page_vpadd(listp, pp);
7861 7725 }
7862 7726 mutex_exit(vphm);
7863 7727 }
7864 7728 }
7865 7729
7866 7730 void
7867 7731 hat_page_clrattr(page_t *pp, uint_t flag)
7868 7732 {
7869 7733 vnode_t *vp = pp->p_vnode;
7870 7734 kmutex_t *pmtx;
7871 7735
7872 7736 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7873 7737
7874 7738 pmtx = sfmmu_page_enter(pp);
7875 7739
7876 7740 /*
7877 7741 * Caller is expected to hold page's io lock for VMODSORT to work
7878 7742 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7879 7743 * bit is cleared.
7880 7744 * We don't have assert to avoid tripping some existing third party
7881 7745 * code. The dirty page is moved back to top of the v_page list
7882 7746 * after IO is done in pvn_write_done().
7883 7747 */
7884 7748 pp->p_nrm &= ~flag;
7885 7749 sfmmu_page_exit(pmtx);
7886 7750
7887 7751 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7888 7752
7889 7753 /*
7890 7754 * VMODSORT works by removing write permissions and getting
7891 7755 * a fault when a page is made dirty. At this point
7892 7756 * we need to remove write permission from all mappings
7893 7757 * to this page.
7894 7758 */
7895 7759 hat_page_clrwrt(pp);
7896 7760 }
7897 7761 }
7898 7762
7899 7763 uint_t
7900 7764 hat_page_getattr(page_t *pp, uint_t flag)
7901 7765 {
7902 7766 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7903 7767 return ((uint_t)(pp->p_nrm & flag));
7904 7768 }
7905 7769
7906 7770 /*
7907 7771 * DEBUG kernels: verify that a kernel va<->pa translation
7908 7772 * is safe by checking the underlying page_t is in a page
7909 7773 * relocation-safe state.
7910 7774 */
7911 7775 #ifdef DEBUG
7912 7776 void
7913 7777 sfmmu_check_kpfn(pfn_t pfn)
7914 7778 {
7915 7779 page_t *pp;
7916 7780 int index, cons;
7917 7781
7918 7782 if (hat_check_vtop == 0)
7919 7783 return;
7920 7784
7921 7785 if (kvseg.s_base == NULL || panicstr)
7922 7786 return;
7923 7787
7924 7788 pp = page_numtopp_nolock(pfn);
7925 7789 if (!pp)
7926 7790 return;
7927 7791
7928 7792 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7929 7793 return;
7930 7794
7931 7795 /*
7932 7796 * Handed a large kernel page, we dig up the root page since we
7933 7797 * know the root page might have the lock also.
7934 7798 */
7935 7799 if (pp->p_szc != 0) {
7936 7800 index = PP_MAPINDEX(pp);
7937 7801 cons = TTE8K;
7938 7802 again:
7939 7803 while (index != 0) {
7940 7804 index >>= 1;
7941 7805 if (index != 0)
7942 7806 cons++;
7943 7807 if (index & 0x1) {
7944 7808 pp = PP_GROUPLEADER(pp, cons);
7945 7809 goto again;
7946 7810 }
7947 7811 }
7948 7812 }
7949 7813
7950 7814 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7951 7815 return;
7952 7816
7953 7817 /*
7954 7818 * Pages need to be locked or allocated "permanent" (either from
7955 7819 * static_arena arena or explicitly setting PG_NORELOC when calling
7956 7820 * page_create_va()) for VA->PA translations to be valid.
7957 7821 */
7958 7822 if (!PP_ISNORELOC(pp))
7959 7823 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7960 7824 (void *)pp);
7961 7825 else
7962 7826 panic("Illegal VA->PA translation, pp 0x%p not locked",
7963 7827 (void *)pp);
7964 7828 }
7965 7829 #endif /* DEBUG */
7966 7830
7967 7831 /*
7968 7832 * Returns a page frame number for a given virtual address.
7969 7833 * Returns PFN_INVALID to indicate an invalid mapping
7970 7834 */
7971 7835 pfn_t
7972 7836 hat_getpfnum(struct hat *hat, caddr_t addr)
7973 7837 {
7974 7838 pfn_t pfn;
7975 7839 tte_t tte;
7976 7840
7977 7841 /*
↓ open down ↓ |
244 lines elided |
↑ open up ↑ |
7978 7842 * We would like to
7979 7843 * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7980 7844 * but we can't because the iommu driver will call this
7981 7845 * routine at interrupt time and it can't grab the as lock
7982 7846 * or it will deadlock: A thread could have the as lock
7983 7847 * and be waiting for io. The io can't complete
7984 7848 * because the interrupt thread is blocked trying to grab
7985 7849 * the as lock.
7986 7850 */
7987 7851
7988 - ASSERT(hat->sfmmu_xhat_provider == NULL);
7989 -
7990 7852 if (hat == ksfmmup) {
7991 7853 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7992 7854 ASSERT(segkmem_lpszc > 0);
7993 7855 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7994 7856 if (pfn != PFN_INVALID) {
7995 7857 sfmmu_check_kpfn(pfn);
7996 7858 return (pfn);
7997 7859 }
7998 7860 } else if (segkpm && IS_KPM_ADDR(addr)) {
7999 7861 return (sfmmu_kpm_vatopfn(addr));
8000 7862 }
8001 7863 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
8002 7864 == PFN_SUSPENDED) {
8003 7865 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
8004 7866 }
8005 7867 sfmmu_check_kpfn(pfn);
8006 7868 return (pfn);
8007 7869 } else {
8008 7870 return (sfmmu_uvatopfn(addr, hat, NULL));
8009 7871 }
8010 7872 }
8011 7873
8012 7874 /*
8013 7875 * This routine will return both pfn and tte for the vaddr.
8014 7876 */
8015 7877 static pfn_t
8016 7878 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
8017 7879 {
8018 7880 struct hmehash_bucket *hmebp;
8019 7881 hmeblk_tag hblktag;
8020 7882 int hmeshift, hashno = 1;
8021 7883 struct hme_blk *hmeblkp = NULL;
8022 7884 tte_t tte;
8023 7885
8024 7886 struct sf_hment *sfhmep;
8025 7887 pfn_t pfn;
8026 7888
8027 7889 /* support for ISM */
8028 7890 ism_map_t *ism_map;
8029 7891 ism_blk_t *ism_blkp;
8030 7892 int i;
8031 7893 sfmmu_t *ism_hatid = NULL;
8032 7894 sfmmu_t *locked_hatid = NULL;
8033 7895 sfmmu_t *sv_sfmmup = sfmmup;
8034 7896 caddr_t sv_vaddr = vaddr;
8035 7897 sf_srd_t *srdp;
8036 7898
8037 7899 if (ttep == NULL) {
8038 7900 ttep = &tte;
8039 7901 } else {
8040 7902 ttep->ll = 0;
8041 7903 }
8042 7904
8043 7905 ASSERT(sfmmup != ksfmmup);
8044 7906 SFMMU_STAT(sf_user_vtop);
8045 7907 /*
8046 7908 * Set ism_hatid if vaddr falls in a ISM segment.
8047 7909 */
8048 7910 ism_blkp = sfmmup->sfmmu_iblk;
8049 7911 if (ism_blkp != NULL) {
8050 7912 sfmmu_ismhat_enter(sfmmup, 0);
8051 7913 locked_hatid = sfmmup;
8052 7914 }
8053 7915 while (ism_blkp != NULL && ism_hatid == NULL) {
8054 7916 ism_map = ism_blkp->iblk_maps;
8055 7917 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
8056 7918 if (vaddr >= ism_start(ism_map[i]) &&
8057 7919 vaddr < ism_end(ism_map[i])) {
8058 7920 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
8059 7921 vaddr = (caddr_t)(vaddr -
8060 7922 ism_start(ism_map[i]));
8061 7923 break;
8062 7924 }
8063 7925 }
8064 7926 ism_blkp = ism_blkp->iblk_next;
8065 7927 }
8066 7928 if (locked_hatid) {
8067 7929 sfmmu_ismhat_exit(locked_hatid, 0);
8068 7930 }
8069 7931
8070 7932 hblktag.htag_id = sfmmup;
8071 7933 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
8072 7934 do {
8073 7935 hmeshift = HME_HASH_SHIFT(hashno);
8074 7936 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
8075 7937 hblktag.htag_rehash = hashno;
8076 7938 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
8077 7939
8078 7940 SFMMU_HASH_LOCK(hmebp);
8079 7941
8080 7942 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
8081 7943 if (hmeblkp != NULL) {
8082 7944 ASSERT(!hmeblkp->hblk_shared);
8083 7945 HBLKTOHME(sfhmep, hmeblkp, vaddr);
8084 7946 sfmmu_copytte(&sfhmep->hme_tte, ttep);
8085 7947 SFMMU_HASH_UNLOCK(hmebp);
8086 7948 if (TTE_IS_VALID(ttep)) {
8087 7949 pfn = TTE_TO_PFN(vaddr, ttep);
8088 7950 return (pfn);
8089 7951 }
8090 7952 break;
8091 7953 }
8092 7954 SFMMU_HASH_UNLOCK(hmebp);
8093 7955 hashno++;
8094 7956 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
8095 7957
8096 7958 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
8097 7959 return (PFN_INVALID);
8098 7960 }
8099 7961 srdp = sv_sfmmup->sfmmu_srdp;
8100 7962 ASSERT(srdp != NULL);
8101 7963 ASSERT(srdp->srd_refcnt != 0);
8102 7964 hblktag.htag_id = srdp;
8103 7965 hashno = 1;
8104 7966 do {
8105 7967 hmeshift = HME_HASH_SHIFT(hashno);
8106 7968 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
8107 7969 hblktag.htag_rehash = hashno;
8108 7970 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
8109 7971
8110 7972 SFMMU_HASH_LOCK(hmebp);
8111 7973 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
8112 7974 hmeblkp = hmeblkp->hblk_next) {
8113 7975 uint_t rid;
8114 7976 sf_region_t *rgnp;
8115 7977 caddr_t rsaddr;
8116 7978 caddr_t readdr;
8117 7979
8118 7980 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
8119 7981 sv_sfmmup->sfmmu_hmeregion_map)) {
8120 7982 continue;
8121 7983 }
8122 7984 ASSERT(hmeblkp->hblk_shared);
8123 7985 rid = hmeblkp->hblk_tag.htag_rid;
8124 7986 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8125 7987 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8126 7988 rgnp = srdp->srd_hmergnp[rid];
8127 7989 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
8128 7990 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
8129 7991 sfmmu_copytte(&sfhmep->hme_tte, ttep);
8130 7992 rsaddr = rgnp->rgn_saddr;
8131 7993 readdr = rsaddr + rgnp->rgn_size;
8132 7994 #ifdef DEBUG
8133 7995 if (TTE_IS_VALID(ttep) ||
8134 7996 get_hblk_ttesz(hmeblkp) > TTE8K) {
8135 7997 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
8136 7998 ASSERT(eva > sv_vaddr);
8137 7999 ASSERT(sv_vaddr >= rsaddr);
8138 8000 ASSERT(sv_vaddr < readdr);
8139 8001 ASSERT(eva <= readdr);
8140 8002 }
8141 8003 #endif /* DEBUG */
8142 8004 /*
8143 8005 * Continue the search if we
8144 8006 * found an invalid 8K tte outside of the area
8145 8007 * covered by this hmeblk's region.
8146 8008 */
8147 8009 if (TTE_IS_VALID(ttep)) {
8148 8010 SFMMU_HASH_UNLOCK(hmebp);
8149 8011 pfn = TTE_TO_PFN(sv_vaddr, ttep);
8150 8012 return (pfn);
8151 8013 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8152 8014 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8153 8015 SFMMU_HASH_UNLOCK(hmebp);
8154 8016 pfn = PFN_INVALID;
8155 8017 return (pfn);
8156 8018 }
8157 8019 }
8158 8020 SFMMU_HASH_UNLOCK(hmebp);
8159 8021 hashno++;
8160 8022 } while (hashno <= mmu_hashcnt);
8161 8023 return (PFN_INVALID);
8162 8024 }
↓ open down ↓ |
163 lines elided |
↑ open up ↑ |
8163 8025
8164 8026
8165 8027 /*
8166 8028 * For compatability with AT&T and later optimizations
8167 8029 */
8168 8030 /* ARGSUSED */
8169 8031 void
8170 8032 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8171 8033 {
8172 8034 ASSERT(hat != NULL);
8173 - ASSERT(hat->sfmmu_xhat_provider == NULL);
8174 8035 }
8175 8036
8176 8037 /*
8177 8038 * Return the number of mappings to a particular page. This number is an
8178 8039 * approximation of the number of people sharing the page.
8179 8040 *
8180 8041 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8181 8042 * hat_page_checkshare() can be used to compare threshold to share
8182 8043 * count that reflects the number of region sharers albeit at higher cost.
8183 8044 */
8184 8045 ulong_t
8185 8046 hat_page_getshare(page_t *pp)
8186 8047 {
8187 8048 page_t *spp = pp; /* start page */
8188 8049 kmutex_t *pml;
8189 8050 ulong_t cnt;
8190 8051 int index, sz = TTE64K;
8191 8052
8192 8053 /*
8193 8054 * We need to grab the mlist lock to make sure any outstanding
8194 8055 * load/unloads complete. Otherwise we could return zero
8195 8056 * even though the unload(s) hasn't finished yet.
8196 8057 */
8197 8058 pml = sfmmu_mlist_enter(spp);
8198 8059 cnt = spp->p_share;
8199 8060
8200 8061 #ifdef VAC
8201 8062 if (kpm_enable)
8202 8063 cnt += spp->p_kpmref;
8203 8064 #endif
8204 8065 if (vpm_enable && pp->p_vpmref) {
8205 8066 cnt += 1;
8206 8067 }
8207 8068
8208 8069 /*
8209 8070 * If we have any large mappings, we count the number of
8210 8071 * mappings that this large page is part of.
8211 8072 */
8212 8073 index = PP_MAPINDEX(spp);
8213 8074 index >>= 1;
8214 8075 while (index) {
8215 8076 pp = PP_GROUPLEADER(spp, sz);
8216 8077 if ((index & 0x1) && pp != spp) {
8217 8078 cnt += pp->p_share;
8218 8079 spp = pp;
8219 8080 }
8220 8081 index >>= 1;
8221 8082 sz++;
8222 8083 }
8223 8084 sfmmu_mlist_exit(pml);
8224 8085 return (cnt);
8225 8086 }
8226 8087
8227 8088 /*
8228 8089 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8229 8090 * otherwise. Count shared hmeblks by region's refcnt.
8230 8091 */
8231 8092 int
8232 8093 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8233 8094 {
8234 8095 kmutex_t *pml;
8235 8096 ulong_t cnt = 0;
8236 8097 int index, sz = TTE8K;
8237 8098 struct sf_hment *sfhme, *tmphme = NULL;
8238 8099 struct hme_blk *hmeblkp;
8239 8100
8240 8101 pml = sfmmu_mlist_enter(pp);
8241 8102
8242 8103 #ifdef VAC
8243 8104 if (kpm_enable)
8244 8105 cnt = pp->p_kpmref;
8245 8106 #endif
8246 8107
8247 8108 if (vpm_enable && pp->p_vpmref) {
8248 8109 cnt += 1;
8249 8110 }
8250 8111
8251 8112 if (pp->p_share + cnt > sh_thresh) {
8252 8113 sfmmu_mlist_exit(pml);
8253 8114 return (1);
8254 8115 }
8255 8116
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
8256 8117 index = PP_MAPINDEX(pp);
8257 8118
8258 8119 again:
8259 8120 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8260 8121 tmphme = sfhme->hme_next;
8261 8122 if (IS_PAHME(sfhme)) {
8262 8123 continue;
8263 8124 }
8264 8125
8265 8126 hmeblkp = sfmmu_hmetohblk(sfhme);
8266 - if (hmeblkp->hblk_xhat_bit) {
8267 - cnt++;
8268 - if (cnt > sh_thresh) {
8269 - sfmmu_mlist_exit(pml);
8270 - return (1);
8271 - }
8272 - continue;
8273 - }
8274 8127 if (hme_size(sfhme) != sz) {
8275 8128 continue;
8276 8129 }
8277 8130
8278 8131 if (hmeblkp->hblk_shared) {
8279 8132 sf_srd_t *srdp = hblktosrd(hmeblkp);
8280 8133 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8281 8134 sf_region_t *rgnp;
8282 8135 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8283 8136 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8284 8137 ASSERT(srdp != NULL);
8285 8138 rgnp = srdp->srd_hmergnp[rid];
8286 8139 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8287 8140 rgnp, rid);
8288 8141 cnt += rgnp->rgn_refcnt;
8289 8142 } else {
8290 8143 cnt++;
8291 8144 }
8292 8145 if (cnt > sh_thresh) {
8293 8146 sfmmu_mlist_exit(pml);
8294 8147 return (1);
8295 8148 }
8296 8149 }
8297 8150
8298 8151 index >>= 1;
8299 8152 sz++;
8300 8153 while (index) {
8301 8154 pp = PP_GROUPLEADER(pp, sz);
8302 8155 ASSERT(sfmmu_mlist_held(pp));
8303 8156 if (index & 0x1) {
8304 8157 goto again;
8305 8158 }
8306 8159 index >>= 1;
8307 8160 sz++;
8308 8161 }
8309 8162 sfmmu_mlist_exit(pml);
8310 8163 return (0);
8311 8164 }
8312 8165
8313 8166 /*
8314 8167 * Unload all large mappings to the pp and reset the p_szc field of every
8315 8168 * constituent page according to the remaining mappings.
8316 8169 *
8317 8170 * pp must be locked SE_EXCL. Even though no other constituent pages are
8318 8171 * locked it's legal to unload the large mappings to the pp because all
8319 8172 * constituent pages of large locked mappings have to be locked SE_SHARED.
8320 8173 * This means if we have SE_EXCL lock on one of constituent pages none of the
8321 8174 * large mappings to pp are locked.
8322 8175 *
8323 8176 * Decrease p_szc field starting from the last constituent page and ending
8324 8177 * with the root page. This method is used because other threads rely on the
8325 8178 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8326 8179 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8327 8180 * ensures that p_szc changes of the constituent pages appears atomic for all
8328 8181 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8329 8182 *
8330 8183 * This mechanism is only used for file system pages where it's not always
8331 8184 * possible to get SE_EXCL locks on all constituent pages to demote the size
8332 8185 * code (as is done for anonymous or kernel large pages).
8333 8186 *
8334 8187 * See more comments in front of sfmmu_mlspl_enter().
8335 8188 */
8336 8189 void
8337 8190 hat_page_demote(page_t *pp)
8338 8191 {
8339 8192 int index;
8340 8193 int sz;
8341 8194 cpuset_t cpuset;
8342 8195 int sync = 0;
8343 8196 page_t *rootpp;
8344 8197 struct sf_hment *sfhme;
8345 8198 struct sf_hment *tmphme = NULL;
8346 8199 struct hme_blk *hmeblkp;
8347 8200 uint_t pszc;
8348 8201 page_t *lastpp;
8349 8202 cpuset_t tset;
8350 8203 pgcnt_t npgs;
8351 8204 kmutex_t *pml;
8352 8205 kmutex_t *pmtx = NULL;
8353 8206
8354 8207 ASSERT(PAGE_EXCL(pp));
8355 8208 ASSERT(!PP_ISFREE(pp));
8356 8209 ASSERT(!PP_ISKAS(pp));
8357 8210 ASSERT(page_szc_lock_assert(pp));
8358 8211 pml = sfmmu_mlist_enter(pp);
8359 8212
8360 8213 pszc = pp->p_szc;
8361 8214 if (pszc == 0) {
8362 8215 goto out;
8363 8216 }
8364 8217
8365 8218 index = PP_MAPINDEX(pp) >> 1;
8366 8219
8367 8220 if (index) {
8368 8221 CPUSET_ZERO(cpuset);
8369 8222 sz = TTE64K;
8370 8223 sync = 1;
8371 8224 }
8372 8225
8373 8226 while (index) {
8374 8227 if (!(index & 0x1)) {
8375 8228 index >>= 1;
8376 8229 sz++;
8377 8230 continue;
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
8378 8231 }
8379 8232 ASSERT(sz <= pszc);
8380 8233 rootpp = PP_GROUPLEADER(pp, sz);
8381 8234 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8382 8235 tmphme = sfhme->hme_next;
8383 8236 ASSERT(!IS_PAHME(sfhme));
8384 8237 hmeblkp = sfmmu_hmetohblk(sfhme);
8385 8238 if (hme_size(sfhme) != sz) {
8386 8239 continue;
8387 8240 }
8388 - if (hmeblkp->hblk_xhat_bit) {
8389 - cmn_err(CE_PANIC,
8390 - "hat_page_demote: xhat hmeblk");
8391 - }
8392 8241 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8393 8242 CPUSET_OR(cpuset, tset);
8394 8243 }
8395 8244 if (index >>= 1) {
8396 8245 sz++;
8397 8246 }
8398 8247 }
8399 8248
8400 8249 ASSERT(!PP_ISMAPPED_LARGE(pp));
8401 8250
8402 8251 if (sync) {
8403 8252 xt_sync(cpuset);
8404 8253 #ifdef VAC
8405 8254 if (PP_ISTNC(pp)) {
8406 8255 conv_tnc(rootpp, sz);
8407 8256 }
8408 8257 #endif /* VAC */
8409 8258 }
8410 8259
8411 8260 pmtx = sfmmu_page_enter(pp);
8412 8261
8413 8262 ASSERT(pp->p_szc == pszc);
8414 8263 rootpp = PP_PAGEROOT(pp);
8415 8264 ASSERT(rootpp->p_szc == pszc);
8416 8265 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8417 8266
8418 8267 while (lastpp != rootpp) {
8419 8268 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8420 8269 ASSERT(sz < pszc);
8421 8270 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8422 8271 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8423 8272 while (--npgs > 0) {
8424 8273 lastpp->p_szc = (uchar_t)sz;
8425 8274 lastpp = PP_PAGEPREV(lastpp);
8426 8275 }
8427 8276 if (sz) {
8428 8277 /*
8429 8278 * make sure before current root's pszc
8430 8279 * is updated all updates to constituent pages pszc
8431 8280 * fields are globally visible.
8432 8281 */
8433 8282 membar_producer();
8434 8283 }
8435 8284 lastpp->p_szc = sz;
8436 8285 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8437 8286 if (lastpp != rootpp) {
8438 8287 lastpp = PP_PAGEPREV(lastpp);
8439 8288 }
8440 8289 }
8441 8290 if (sz == 0) {
8442 8291 /* the loop above doesn't cover this case */
8443 8292 rootpp->p_szc = 0;
8444 8293 }
8445 8294 out:
8446 8295 ASSERT(pp->p_szc == 0);
8447 8296 if (pmtx != NULL) {
8448 8297 sfmmu_page_exit(pmtx);
8449 8298 }
8450 8299 sfmmu_mlist_exit(pml);
8451 8300 }
8452 8301
8453 8302 /*
8454 8303 * Refresh the HAT ismttecnt[] element for size szc.
8455 8304 * Caller must have set ISM busy flag to prevent mapping
8456 8305 * lists from changing while we're traversing them.
8457 8306 */
8458 8307 pgcnt_t
8459 8308 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8460 8309 {
8461 8310 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8462 8311 ism_map_t *ism_map;
8463 8312 pgcnt_t npgs = 0;
8464 8313 pgcnt_t npgs_scd = 0;
8465 8314 int j;
8466 8315 sf_scd_t *scdp;
8467 8316 uchar_t rid;
8468 8317
8469 8318 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8470 8319 scdp = sfmmup->sfmmu_scdp;
8471 8320
8472 8321 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8473 8322 ism_map = ism_blkp->iblk_maps;
8474 8323 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8475 8324 rid = ism_map[j].imap_rid;
8476 8325 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8477 8326 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8478 8327
8479 8328 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8480 8329 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8481 8330 /* ISM is in sfmmup's SCD */
8482 8331 npgs_scd +=
8483 8332 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8484 8333 } else {
8485 8334 /* ISMs is not in SCD */
8486 8335 npgs +=
8487 8336 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8488 8337 }
8489 8338 }
8490 8339 }
8491 8340 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8492 8341 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8493 8342 return (npgs);
8494 8343 }
8495 8344
8496 8345 /*
8497 8346 * Yield the memory claim requirement for an address space.
8498 8347 *
8499 8348 * This is currently implemented as the number of bytes that have active
8500 8349 * hardware translations that have page structures. Therefore, it can
8501 8350 * underestimate the traditional resident set size, eg, if the
8502 8351 * physical page is present and the hardware translation is missing;
8503 8352 * and it can overestimate the rss, eg, if there are active
8504 8353 * translations to a frame buffer with page structs.
8505 8354 * Also, it does not take sharing into account.
8506 8355 *
8507 8356 * Note that we don't acquire locks here since this function is most often
8508 8357 * called from the clock thread.
↓ open down ↓ |
107 lines elided |
↑ open up ↑ |
8509 8358 */
8510 8359 size_t
8511 8360 hat_get_mapped_size(struct hat *hat)
8512 8361 {
8513 8362 size_t assize = 0;
8514 8363 int i;
8515 8364
8516 8365 if (hat == NULL)
8517 8366 return (0);
8518 8367
8519 - ASSERT(hat->sfmmu_xhat_provider == NULL);
8520 -
8521 8368 for (i = 0; i < mmu_page_sizes; i++)
8522 8369 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8523 8370 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8524 8371
8525 8372 if (hat->sfmmu_iblk == NULL)
8526 8373 return (assize);
8527 8374
8528 8375 for (i = 0; i < mmu_page_sizes; i++)
8529 8376 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8530 8377 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8531 8378
8532 8379 return (assize);
8533 8380 }
8534 8381
8535 8382 int
8536 8383 hat_stats_enable(struct hat *hat)
8537 8384 {
8538 8385 hatlock_t *hatlockp;
8539 8386
8540 - ASSERT(hat->sfmmu_xhat_provider == NULL);
8541 -
8542 8387 hatlockp = sfmmu_hat_enter(hat);
8543 8388 hat->sfmmu_rmstat++;
8544 8389 sfmmu_hat_exit(hatlockp);
8545 8390 return (1);
8546 8391 }
8547 8392
8548 8393 void
8549 8394 hat_stats_disable(struct hat *hat)
8550 8395 {
8551 8396 hatlock_t *hatlockp;
8552 8397
8553 - ASSERT(hat->sfmmu_xhat_provider == NULL);
8554 -
8555 8398 hatlockp = sfmmu_hat_enter(hat);
8556 8399 hat->sfmmu_rmstat--;
8557 8400 sfmmu_hat_exit(hatlockp);
8558 8401 }
8559 8402
8560 8403 /*
8561 8404 * Routines for entering or removing ourselves from the
8562 8405 * ism_hat's mapping list. This is used for both private and
8563 8406 * SCD hats.
8564 8407 */
8565 8408 static void
8566 8409 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8567 8410 {
8568 8411 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8569 8412
8570 8413 iment->iment_prev = NULL;
8571 8414 iment->iment_next = ism_hat->sfmmu_iment;
8572 8415 if (ism_hat->sfmmu_iment) {
8573 8416 ism_hat->sfmmu_iment->iment_prev = iment;
8574 8417 }
8575 8418 ism_hat->sfmmu_iment = iment;
8576 8419 }
8577 8420
8578 8421 static void
8579 8422 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8580 8423 {
8581 8424 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8582 8425
8583 8426 if (ism_hat->sfmmu_iment == NULL) {
8584 8427 panic("ism map entry remove - no entries");
8585 8428 }
8586 8429
8587 8430 if (iment->iment_prev) {
8588 8431 ASSERT(ism_hat->sfmmu_iment != iment);
8589 8432 iment->iment_prev->iment_next = iment->iment_next;
8590 8433 } else {
8591 8434 ASSERT(ism_hat->sfmmu_iment == iment);
8592 8435 ism_hat->sfmmu_iment = iment->iment_next;
8593 8436 }
8594 8437
8595 8438 if (iment->iment_next) {
8596 8439 iment->iment_next->iment_prev = iment->iment_prev;
8597 8440 }
8598 8441
8599 8442 /*
8600 8443 * zero out the entry
8601 8444 */
8602 8445 iment->iment_next = NULL;
8603 8446 iment->iment_prev = NULL;
8604 8447 iment->iment_hat = NULL;
8605 8448 iment->iment_base_va = 0;
8606 8449 }
8607 8450
8608 8451 /*
8609 8452 * Hat_share()/unshare() return an (non-zero) error
8610 8453 * when saddr and daddr are not properly aligned.
8611 8454 *
8612 8455 * The top level mapping element determines the alignment
8613 8456 * requirement for saddr and daddr, depending on different
8614 8457 * architectures.
8615 8458 *
8616 8459 * When hat_share()/unshare() are not supported,
8617 8460 * HATOP_SHARE()/UNSHARE() return 0
8618 8461 */
8619 8462 int
8620 8463 hat_share(struct hat *sfmmup, caddr_t addr,
8621 8464 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8622 8465 {
8623 8466 ism_blk_t *ism_blkp;
8624 8467 ism_blk_t *new_iblk;
8625 8468 ism_map_t *ism_map;
8626 8469 ism_ment_t *ism_ment;
8627 8470 int i, added;
8628 8471 hatlock_t *hatlockp;
8629 8472 int reload_mmu = 0;
8630 8473 uint_t ismshift = page_get_shift(ismszc);
8631 8474 size_t ismpgsz = page_get_pagesize(ismszc);
8632 8475 uint_t ismmask = (uint_t)ismpgsz - 1;
8633 8476 size_t sh_size = ISM_SHIFT(ismshift, len);
8634 8477 ushort_t ismhatflag;
8635 8478 hat_region_cookie_t rcookie;
8636 8479 sf_scd_t *old_scdp;
8637 8480
8638 8481 #ifdef DEBUG
8639 8482 caddr_t eaddr = addr + len;
8640 8483 #endif /* DEBUG */
8641 8484
8642 8485 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8643 8486 ASSERT(sptaddr == ISMID_STARTADDR);
8644 8487 /*
8645 8488 * Check the alignment.
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
8646 8489 */
8647 8490 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8648 8491 return (EINVAL);
8649 8492
8650 8493 /*
8651 8494 * Check size alignment.
8652 8495 */
8653 8496 if (!ISM_ALIGNED(ismshift, len))
8654 8497 return (EINVAL);
8655 8498
8656 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
8657 -
8658 8499 /*
8659 8500 * Allocate ism_ment for the ism_hat's mapping list, and an
8660 8501 * ism map blk in case we need one. We must do our
8661 8502 * allocations before acquiring locks to prevent a deadlock
8662 8503 * in the kmem allocator on the mapping list lock.
8663 8504 */
8664 8505 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8665 8506 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8666 8507
8667 8508 /*
8668 8509 * Serialize ISM mappings with the ISM busy flag, and also the
8669 8510 * trap handlers.
8670 8511 */
8671 8512 sfmmu_ismhat_enter(sfmmup, 0);
8672 8513
8673 8514 /*
8674 8515 * Allocate an ism map blk if necessary.
8675 8516 */
8676 8517 if (sfmmup->sfmmu_iblk == NULL) {
8677 8518 sfmmup->sfmmu_iblk = new_iblk;
8678 8519 bzero(new_iblk, sizeof (*new_iblk));
8679 8520 new_iblk->iblk_nextpa = (uint64_t)-1;
8680 8521 membar_stst(); /* make sure next ptr visible to all CPUs */
8681 8522 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8682 8523 reload_mmu = 1;
8683 8524 new_iblk = NULL;
8684 8525 }
8685 8526
8686 8527 #ifdef DEBUG
8687 8528 /*
8688 8529 * Make sure mapping does not already exist.
8689 8530 */
8690 8531 ism_blkp = sfmmup->sfmmu_iblk;
8691 8532 while (ism_blkp != NULL) {
8692 8533 ism_map = ism_blkp->iblk_maps;
8693 8534 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8694 8535 if ((addr >= ism_start(ism_map[i]) &&
8695 8536 addr < ism_end(ism_map[i])) ||
8696 8537 eaddr > ism_start(ism_map[i]) &&
8697 8538 eaddr <= ism_end(ism_map[i])) {
8698 8539 panic("sfmmu_share: Already mapped!");
8699 8540 }
8700 8541 }
8701 8542 ism_blkp = ism_blkp->iblk_next;
8702 8543 }
8703 8544 #endif /* DEBUG */
8704 8545
8705 8546 ASSERT(ismszc >= TTE4M);
8706 8547 if (ismszc == TTE4M) {
8707 8548 ismhatflag = HAT_4M_FLAG;
8708 8549 } else if (ismszc == TTE32M) {
8709 8550 ismhatflag = HAT_32M_FLAG;
8710 8551 } else if (ismszc == TTE256M) {
8711 8552 ismhatflag = HAT_256M_FLAG;
8712 8553 }
8713 8554 /*
8714 8555 * Add mapping to first available mapping slot.
8715 8556 */
8716 8557 ism_blkp = sfmmup->sfmmu_iblk;
8717 8558 added = 0;
8718 8559 while (!added) {
8719 8560 ism_map = ism_blkp->iblk_maps;
8720 8561 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8721 8562 if (ism_map[i].imap_ismhat == NULL) {
8722 8563
8723 8564 ism_map[i].imap_ismhat = ism_hatid;
8724 8565 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8725 8566 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8726 8567 ism_map[i].imap_hatflags = ismhatflag;
8727 8568 ism_map[i].imap_sz_mask = ismmask;
8728 8569 /*
8729 8570 * imap_seg is checked in ISM_CHECK to see if
8730 8571 * non-NULL, then other info assumed valid.
8731 8572 */
8732 8573 membar_stst();
8733 8574 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8734 8575 ism_map[i].imap_ment = ism_ment;
8735 8576
8736 8577 /*
8737 8578 * Now add ourselves to the ism_hat's
8738 8579 * mapping list.
8739 8580 */
8740 8581 ism_ment->iment_hat = sfmmup;
8741 8582 ism_ment->iment_base_va = addr;
8742 8583 ism_hatid->sfmmu_ismhat = 1;
8743 8584 mutex_enter(&ism_mlist_lock);
8744 8585 iment_add(ism_ment, ism_hatid);
8745 8586 mutex_exit(&ism_mlist_lock);
8746 8587 added = 1;
8747 8588 break;
8748 8589 }
8749 8590 }
8750 8591 if (!added && ism_blkp->iblk_next == NULL) {
8751 8592 ism_blkp->iblk_next = new_iblk;
8752 8593 new_iblk = NULL;
8753 8594 bzero(ism_blkp->iblk_next,
8754 8595 sizeof (*ism_blkp->iblk_next));
8755 8596 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8756 8597 membar_stst();
8757 8598 ism_blkp->iblk_nextpa =
8758 8599 va_to_pa((caddr_t)ism_blkp->iblk_next);
8759 8600 }
8760 8601 ism_blkp = ism_blkp->iblk_next;
8761 8602 }
8762 8603
8763 8604 /*
8764 8605 * After calling hat_join_region, sfmmup may join a new SCD or
8765 8606 * move from the old scd to a new scd, in which case, we want to
8766 8607 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8767 8608 * sfmmu_check_page_sizes at the end of this routine.
8768 8609 */
8769 8610 old_scdp = sfmmup->sfmmu_scdp;
8770 8611
8771 8612 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8772 8613 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8773 8614 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8774 8615 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8775 8616 }
8776 8617 /*
8777 8618 * Update our counters for this sfmmup's ism mappings.
8778 8619 */
8779 8620 for (i = 0; i <= ismszc; i++) {
8780 8621 if (!(disable_ism_large_pages & (1 << i)))
8781 8622 (void) ism_tsb_entries(sfmmup, i);
8782 8623 }
8783 8624
8784 8625 /*
8785 8626 * For ISM and DISM we do not support 512K pages, so we only only
8786 8627 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8787 8628 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8788 8629 *
8789 8630 * Need to set 32M/256M ISM flags to make sure
8790 8631 * sfmmu_check_page_sizes() enables them on Panther.
8791 8632 */
8792 8633 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8793 8634
8794 8635 switch (ismszc) {
8795 8636 case TTE256M:
8796 8637 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8797 8638 hatlockp = sfmmu_hat_enter(sfmmup);
8798 8639 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8799 8640 sfmmu_hat_exit(hatlockp);
8800 8641 }
8801 8642 break;
8802 8643 case TTE32M:
8803 8644 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8804 8645 hatlockp = sfmmu_hat_enter(sfmmup);
8805 8646 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8806 8647 sfmmu_hat_exit(hatlockp);
8807 8648 }
8808 8649 break;
8809 8650 default:
8810 8651 break;
8811 8652 }
8812 8653
8813 8654 /*
8814 8655 * If we updated the ismblkpa for this HAT we must make
8815 8656 * sure all CPUs running this process reload their tsbmiss area.
8816 8657 * Otherwise they will fail to load the mappings in the tsbmiss
8817 8658 * handler and will loop calling pagefault().
8818 8659 */
8819 8660 if (reload_mmu) {
8820 8661 hatlockp = sfmmu_hat_enter(sfmmup);
8821 8662 sfmmu_sync_mmustate(sfmmup);
8822 8663 sfmmu_hat_exit(hatlockp);
8823 8664 }
8824 8665
8825 8666 sfmmu_ismhat_exit(sfmmup, 0);
8826 8667
8827 8668 /*
8828 8669 * Free up ismblk if we didn't use it.
8829 8670 */
8830 8671 if (new_iblk != NULL)
8831 8672 kmem_cache_free(ism_blk_cache, new_iblk);
8832 8673
8833 8674 /*
8834 8675 * Check TSB and TLB page sizes.
8835 8676 */
8836 8677 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8837 8678 sfmmu_check_page_sizes(sfmmup, 0);
8838 8679 } else {
8839 8680 sfmmu_check_page_sizes(sfmmup, 1);
8840 8681 }
8841 8682 return (0);
8842 8683 }
8843 8684
8844 8685 /*
8845 8686 * hat_unshare removes exactly one ism_map from
8846 8687 * this process's as. It expects multiple calls
8847 8688 * to hat_unshare for multiple shm segments.
8848 8689 */
8849 8690 void
8850 8691 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8851 8692 {
8852 8693 ism_map_t *ism_map;
8853 8694 ism_ment_t *free_ment = NULL;
8854 8695 ism_blk_t *ism_blkp;
8855 8696 struct hat *ism_hatid;
8856 8697 int found, i;
8857 8698 hatlock_t *hatlockp;
8858 8699 struct tsb_info *tsbinfo;
↓ open down ↓ |
191 lines elided |
↑ open up ↑ |
8859 8700 uint_t ismshift = page_get_shift(ismszc);
8860 8701 size_t sh_size = ISM_SHIFT(ismshift, len);
8861 8702 uchar_t ism_rid;
8862 8703 sf_scd_t *old_scdp;
8863 8704
8864 8705 ASSERT(ISM_ALIGNED(ismshift, addr));
8865 8706 ASSERT(ISM_ALIGNED(ismshift, len));
8866 8707 ASSERT(sfmmup != NULL);
8867 8708 ASSERT(sfmmup != ksfmmup);
8868 8709
8869 - if (sfmmup->sfmmu_xhat_provider) {
8870 - XHAT_UNSHARE(sfmmup, addr, len);
8871 - return;
8872 - } else {
8873 - /*
8874 - * This must be a CPU HAT. If the address space has
8875 - * XHATs attached, inform all XHATs that ISM segment
8876 - * is going away
8877 - */
8878 - ASSERT(sfmmup->sfmmu_as != NULL);
8879 - if (sfmmup->sfmmu_as->a_xhat != NULL)
8880 - xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
8881 - }
8710 + ASSERT(sfmmup->sfmmu_as != NULL);
8882 8711
8883 8712 /*
8884 8713 * Make sure that during the entire time ISM mappings are removed,
8885 8714 * the trap handlers serialize behind us, and that no one else
8886 8715 * can be mucking with ISM mappings. This also lets us get away
8887 8716 * with not doing expensive cross calls to flush the TLB -- we
8888 8717 * just discard the context, flush the entire TSB, and call it
8889 8718 * a day.
8890 8719 */
8891 8720 sfmmu_ismhat_enter(sfmmup, 0);
8892 8721
8893 8722 /*
8894 8723 * Remove the mapping.
8895 8724 *
8896 8725 * We can't have any holes in the ism map.
8897 8726 * The tsb miss code while searching the ism map will
8898 8727 * stop on an empty map slot. So we must move
8899 8728 * everyone past the hole up 1 if any.
8900 8729 *
8901 8730 * Also empty ism map blks are not freed until the
8902 8731 * process exits. This is to prevent a MT race condition
8903 8732 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8904 8733 */
8905 8734 found = 0;
8906 8735 ism_blkp = sfmmup->sfmmu_iblk;
8907 8736 while (!found && ism_blkp != NULL) {
8908 8737 ism_map = ism_blkp->iblk_maps;
8909 8738 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8910 8739 if (addr == ism_start(ism_map[i]) &&
8911 8740 sh_size == (size_t)(ism_size(ism_map[i]))) {
8912 8741 found = 1;
8913 8742 break;
8914 8743 }
8915 8744 }
8916 8745 if (!found)
8917 8746 ism_blkp = ism_blkp->iblk_next;
8918 8747 }
8919 8748
8920 8749 if (found) {
8921 8750 ism_hatid = ism_map[i].imap_ismhat;
8922 8751 ism_rid = ism_map[i].imap_rid;
8923 8752 ASSERT(ism_hatid != NULL);
8924 8753 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8925 8754
8926 8755 /*
8927 8756 * After hat_leave_region, the sfmmup may leave SCD,
8928 8757 * in which case, we want to grow the private tsb size when
8929 8758 * calling sfmmu_check_page_sizes at the end of the routine.
8930 8759 */
8931 8760 old_scdp = sfmmup->sfmmu_scdp;
8932 8761 /*
8933 8762 * Then remove ourselves from the region.
8934 8763 */
8935 8764 if (ism_rid != SFMMU_INVALID_ISMRID) {
8936 8765 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8937 8766 HAT_REGION_ISM);
8938 8767 }
8939 8768
8940 8769 /*
8941 8770 * And now guarantee that any other cpu
8942 8771 * that tries to process an ISM miss
8943 8772 * will go to tl=0.
8944 8773 */
8945 8774 hatlockp = sfmmu_hat_enter(sfmmup);
8946 8775 sfmmu_invalidate_ctx(sfmmup);
8947 8776 sfmmu_hat_exit(hatlockp);
8948 8777
8949 8778 /*
8950 8779 * Remove ourselves from the ism mapping list.
8951 8780 */
8952 8781 mutex_enter(&ism_mlist_lock);
8953 8782 iment_sub(ism_map[i].imap_ment, ism_hatid);
8954 8783 mutex_exit(&ism_mlist_lock);
8955 8784 free_ment = ism_map[i].imap_ment;
8956 8785
8957 8786 /*
8958 8787 * We delete the ism map by copying
8959 8788 * the next map over the current one.
8960 8789 * We will take the next one in the maps
8961 8790 * array or from the next ism_blk.
8962 8791 */
8963 8792 while (ism_blkp != NULL) {
8964 8793 ism_map = ism_blkp->iblk_maps;
8965 8794 while (i < (ISM_MAP_SLOTS - 1)) {
8966 8795 ism_map[i] = ism_map[i + 1];
8967 8796 i++;
8968 8797 }
8969 8798 /* i == (ISM_MAP_SLOTS - 1) */
8970 8799 ism_blkp = ism_blkp->iblk_next;
8971 8800 if (ism_blkp != NULL) {
8972 8801 ism_map[i] = ism_blkp->iblk_maps[0];
8973 8802 i = 0;
8974 8803 } else {
8975 8804 ism_map[i].imap_seg = 0;
8976 8805 ism_map[i].imap_vb_shift = 0;
8977 8806 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8978 8807 ism_map[i].imap_hatflags = 0;
8979 8808 ism_map[i].imap_sz_mask = 0;
8980 8809 ism_map[i].imap_ismhat = NULL;
8981 8810 ism_map[i].imap_ment = NULL;
8982 8811 }
8983 8812 }
8984 8813
8985 8814 /*
8986 8815 * Now flush entire TSB for the process, since
8987 8816 * demapping page by page can be too expensive.
8988 8817 * We don't have to flush the TLB here anymore
8989 8818 * since we switch to a new TLB ctx instead.
8990 8819 * Also, there is no need to flush if the process
8991 8820 * is exiting since the TSB will be freed later.
8992 8821 */
8993 8822 if (!sfmmup->sfmmu_free) {
8994 8823 hatlockp = sfmmu_hat_enter(sfmmup);
8995 8824 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8996 8825 tsbinfo = tsbinfo->tsb_next) {
8997 8826 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8998 8827 continue;
8999 8828 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
9000 8829 tsbinfo->tsb_flags |=
9001 8830 TSB_FLUSH_NEEDED;
9002 8831 continue;
9003 8832 }
9004 8833
9005 8834 sfmmu_inv_tsb(tsbinfo->tsb_va,
9006 8835 TSB_BYTES(tsbinfo->tsb_szc));
9007 8836 }
9008 8837 sfmmu_hat_exit(hatlockp);
9009 8838 }
9010 8839 }
9011 8840
9012 8841 /*
9013 8842 * Update our counters for this sfmmup's ism mappings.
9014 8843 */
9015 8844 for (i = 0; i <= ismszc; i++) {
9016 8845 if (!(disable_ism_large_pages & (1 << i)))
9017 8846 (void) ism_tsb_entries(sfmmup, i);
9018 8847 }
9019 8848
9020 8849 sfmmu_ismhat_exit(sfmmup, 0);
9021 8850
9022 8851 /*
9023 8852 * We must do our freeing here after dropping locks
9024 8853 * to prevent a deadlock in the kmem allocator on the
9025 8854 * mapping list lock.
9026 8855 */
9027 8856 if (free_ment != NULL)
9028 8857 kmem_cache_free(ism_ment_cache, free_ment);
9029 8858
9030 8859 /*
9031 8860 * Check TSB and TLB page sizes if the process isn't exiting.
9032 8861 */
9033 8862 if (!sfmmup->sfmmu_free) {
9034 8863 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
9035 8864 sfmmu_check_page_sizes(sfmmup, 1);
9036 8865 } else {
9037 8866 sfmmu_check_page_sizes(sfmmup, 0);
9038 8867 }
9039 8868 }
9040 8869 }
9041 8870
9042 8871 /* ARGSUSED */
9043 8872 static int
9044 8873 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
9045 8874 {
9046 8875 /* void *buf is sfmmu_t pointer */
9047 8876 bzero(buf, sizeof (sfmmu_t));
9048 8877
9049 8878 return (0);
9050 8879 }
9051 8880
9052 8881 /* ARGSUSED */
9053 8882 static void
9054 8883 sfmmu_idcache_destructor(void *buf, void *cdrarg)
9055 8884 {
9056 8885 /* void *buf is sfmmu_t pointer */
9057 8886 }
9058 8887
9059 8888 /*
9060 8889 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
9061 8890 * field to be the pa of this hmeblk
9062 8891 */
9063 8892 /* ARGSUSED */
9064 8893 static int
9065 8894 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
9066 8895 {
9067 8896 struct hme_blk *hmeblkp;
9068 8897
9069 8898 bzero(buf, (size_t)cdrarg);
9070 8899 hmeblkp = (struct hme_blk *)buf;
9071 8900 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
9072 8901
9073 8902 #ifdef HBLK_TRACE
9074 8903 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
9075 8904 #endif /* HBLK_TRACE */
9076 8905
9077 8906 return (0);
9078 8907 }
9079 8908
9080 8909 /* ARGSUSED */
9081 8910 static void
9082 8911 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
9083 8912 {
9084 8913
9085 8914 #ifdef HBLK_TRACE
9086 8915
9087 8916 struct hme_blk *hmeblkp;
9088 8917
9089 8918 hmeblkp = (struct hme_blk *)buf;
9090 8919 mutex_destroy(&hmeblkp->hblk_audit_lock);
9091 8920
9092 8921 #endif /* HBLK_TRACE */
9093 8922 }
9094 8923
9095 8924 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
9096 8925 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
9097 8926 /*
9098 8927 * The kmem allocator will callback into our reclaim routine when the system
9099 8928 * is running low in memory. We traverse the hash and free up all unused but
9100 8929 * still cached hme_blks. We also traverse the free list and free them up
9101 8930 * as well.
9102 8931 */
9103 8932 /*ARGSUSED*/
9104 8933 static void
9105 8934 sfmmu_hblkcache_reclaim(void *cdrarg)
9106 8935 {
9107 8936 int i;
9108 8937 struct hmehash_bucket *hmebp;
9109 8938 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
9110 8939 static struct hmehash_bucket *uhmehash_reclaim_hand;
9111 8940 static struct hmehash_bucket *khmehash_reclaim_hand;
9112 8941 struct hme_blk *list = NULL, *last_hmeblkp;
9113 8942 cpuset_t cpuset = cpu_ready_set;
9114 8943 cpu_hme_pend_t *cpuhp;
9115 8944
9116 8945 /* Free up hmeblks on the cpu pending lists */
9117 8946 for (i = 0; i < NCPU; i++) {
9118 8947 cpuhp = &cpu_hme_pend[i];
9119 8948 if (cpuhp->chp_listp != NULL) {
9120 8949 mutex_enter(&cpuhp->chp_mutex);
9121 8950 if (cpuhp->chp_listp == NULL) {
9122 8951 mutex_exit(&cpuhp->chp_mutex);
9123 8952 continue;
9124 8953 }
9125 8954 for (last_hmeblkp = cpuhp->chp_listp;
9126 8955 last_hmeblkp->hblk_next != NULL;
9127 8956 last_hmeblkp = last_hmeblkp->hblk_next)
9128 8957 ;
9129 8958 last_hmeblkp->hblk_next = list;
9130 8959 list = cpuhp->chp_listp;
9131 8960 cpuhp->chp_listp = NULL;
9132 8961 cpuhp->chp_count = 0;
9133 8962 mutex_exit(&cpuhp->chp_mutex);
9134 8963 }
9135 8964
9136 8965 }
9137 8966
9138 8967 if (list != NULL) {
9139 8968 kpreempt_disable();
9140 8969 CPUSET_DEL(cpuset, CPU->cpu_id);
9141 8970 xt_sync(cpuset);
9142 8971 xt_sync(cpuset);
9143 8972 kpreempt_enable();
9144 8973 sfmmu_hblk_free(&list);
9145 8974 list = NULL;
9146 8975 }
9147 8976
9148 8977 hmebp = uhmehash_reclaim_hand;
9149 8978 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
9150 8979 uhmehash_reclaim_hand = hmebp = uhme_hash;
9151 8980 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9152 8981
9153 8982 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9154 8983 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9155 8984 hmeblkp = hmebp->hmeblkp;
9156 8985 pr_hblk = NULL;
9157 8986 while (hmeblkp) {
9158 8987 nx_hblk = hmeblkp->hblk_next;
9159 8988 if (!hmeblkp->hblk_vcnt &&
9160 8989 !hmeblkp->hblk_hmecnt) {
9161 8990 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9162 8991 pr_hblk, &list, 0);
9163 8992 } else {
9164 8993 pr_hblk = hmeblkp;
9165 8994 }
9166 8995 hmeblkp = nx_hblk;
9167 8996 }
9168 8997 SFMMU_HASH_UNLOCK(hmebp);
9169 8998 }
9170 8999 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
9171 9000 hmebp = uhme_hash;
9172 9001 }
9173 9002
9174 9003 hmebp = khmehash_reclaim_hand;
9175 9004 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
9176 9005 khmehash_reclaim_hand = hmebp = khme_hash;
9177 9006 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9178 9007
9179 9008 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9180 9009 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9181 9010 hmeblkp = hmebp->hmeblkp;
9182 9011 pr_hblk = NULL;
9183 9012 while (hmeblkp) {
9184 9013 nx_hblk = hmeblkp->hblk_next;
9185 9014 if (!hmeblkp->hblk_vcnt &&
9186 9015 !hmeblkp->hblk_hmecnt) {
9187 9016 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9188 9017 pr_hblk, &list, 0);
9189 9018 } else {
9190 9019 pr_hblk = hmeblkp;
9191 9020 }
9192 9021 hmeblkp = nx_hblk;
9193 9022 }
9194 9023 SFMMU_HASH_UNLOCK(hmebp);
9195 9024 }
9196 9025 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9197 9026 hmebp = khme_hash;
9198 9027 }
9199 9028 sfmmu_hblks_list_purge(&list, 0);
9200 9029 }
9201 9030
9202 9031 /*
9203 9032 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9204 9033 * same goes for sfmmu_get_addrvcolor().
9205 9034 *
9206 9035 * This function will return the virtual color for the specified page. The
9207 9036 * virtual color corresponds to this page current mapping or its last mapping.
9208 9037 * It is used by memory allocators to choose addresses with the correct
9209 9038 * alignment so vac consistency is automatically maintained. If the page
9210 9039 * has no color it returns -1.
9211 9040 */
9212 9041 /*ARGSUSED*/
9213 9042 int
9214 9043 sfmmu_get_ppvcolor(struct page *pp)
9215 9044 {
9216 9045 #ifdef VAC
9217 9046 int color;
9218 9047
9219 9048 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9220 9049 return (-1);
9221 9050 }
9222 9051 color = PP_GET_VCOLOR(pp);
9223 9052 ASSERT(color < mmu_btop(shm_alignment));
9224 9053 return (color);
9225 9054 #else
9226 9055 return (-1);
9227 9056 #endif /* VAC */
9228 9057 }
9229 9058
9230 9059 /*
9231 9060 * This function will return the desired alignment for vac consistency
9232 9061 * (vac color) given a virtual address. If no vac is present it returns -1.
9233 9062 */
9234 9063 /*ARGSUSED*/
9235 9064 int
9236 9065 sfmmu_get_addrvcolor(caddr_t vaddr)
9237 9066 {
9238 9067 #ifdef VAC
9239 9068 if (cache & CACHE_VAC) {
9240 9069 return (addr_to_vcolor(vaddr));
9241 9070 } else {
9242 9071 return (-1);
9243 9072 }
9244 9073 #else
9245 9074 return (-1);
9246 9075 #endif /* VAC */
9247 9076 }
9248 9077
9249 9078 #ifdef VAC
9250 9079 /*
9251 9080 * Check for conflicts.
9252 9081 * A conflict exists if the new and existent mappings do not match in
9253 9082 * their "shm_alignment fields. If conflicts exist, the existant mappings
9254 9083 * are flushed unless one of them is locked. If one of them is locked, then
9255 9084 * the mappings are flushed and converted to non-cacheable mappings.
9256 9085 */
9257 9086 static void
9258 9087 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9259 9088 {
9260 9089 struct hat *tmphat;
9261 9090 struct sf_hment *sfhmep, *tmphme = NULL;
9262 9091 struct hme_blk *hmeblkp;
9263 9092 int vcolor;
9264 9093 tte_t tte;
9265 9094
9266 9095 ASSERT(sfmmu_mlist_held(pp));
9267 9096 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
9268 9097
9269 9098 vcolor = addr_to_vcolor(addr);
9270 9099 if (PP_NEWPAGE(pp)) {
9271 9100 PP_SET_VCOLOR(pp, vcolor);
9272 9101 return;
9273 9102 }
9274 9103
9275 9104 if (PP_GET_VCOLOR(pp) == vcolor) {
9276 9105 return;
9277 9106 }
9278 9107
9279 9108 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9280 9109 /*
9281 9110 * Previous user of page had a different color
9282 9111 * but since there are no current users
9283 9112 * we just flush the cache and change the color.
9284 9113 */
9285 9114 SFMMU_STAT(sf_pgcolor_conflict);
9286 9115 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9287 9116 PP_SET_VCOLOR(pp, vcolor);
9288 9117 return;
9289 9118 }
9290 9119
9291 9120 /*
9292 9121 * If we get here we have a vac conflict with a current
9293 9122 * mapping. VAC conflict policy is as follows.
9294 9123 * - The default is to unload the other mappings unless:
9295 9124 * - If we have a large mapping we uncache the page.
9296 9125 * We need to uncache the rest of the large page too.
9297 9126 * - If any of the mappings are locked we uncache the page.
9298 9127 * - If the requested mapping is inconsistent
9299 9128 * with another mapping and that mapping
9300 9129 * is in the same address space we have to
9301 9130 * make it non-cached. The default thing
9302 9131 * to do is unload the inconsistent mapping
9303 9132 * but if they are in the same address space
9304 9133 * we run the risk of unmapping the pc or the
9305 9134 * stack which we will use as we return to the user,
9306 9135 * in which case we can then fault on the thing
9307 9136 * we just unloaded and get into an infinite loop.
9308 9137 */
9309 9138 if (PP_ISMAPPED_LARGE(pp)) {
9310 9139 int sz;
9311 9140
9312 9141 /*
9313 9142 * Existing mapping is for big pages. We don't unload
9314 9143 * existing big mappings to satisfy new mappings.
9315 9144 * Always convert all mappings to TNC.
9316 9145 */
9317 9146 sz = fnd_mapping_sz(pp);
9318 9147 pp = PP_GROUPLEADER(pp, sz);
9319 9148 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9320 9149 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9321 9150 TTEPAGES(sz));
9322 9151
9323 9152 return;
9324 9153 }
↓ open down ↓ |
433 lines elided |
↑ open up ↑ |
9325 9154
9326 9155 /*
9327 9156 * check if any mapping is in same as or if it is locked
9328 9157 * since in that case we need to uncache.
9329 9158 */
9330 9159 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9331 9160 tmphme = sfhmep->hme_next;
9332 9161 if (IS_PAHME(sfhmep))
9333 9162 continue;
9334 9163 hmeblkp = sfmmu_hmetohblk(sfhmep);
9335 - if (hmeblkp->hblk_xhat_bit)
9336 - continue;
9337 9164 tmphat = hblktosfmmu(hmeblkp);
9338 9165 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9339 9166 ASSERT(TTE_IS_VALID(&tte));
9340 9167 if (hmeblkp->hblk_shared || tmphat == hat ||
9341 9168 hmeblkp->hblk_lckcnt) {
9342 9169 /*
9343 9170 * We have an uncache conflict
9344 9171 */
9345 9172 SFMMU_STAT(sf_uncache_conflict);
9346 9173 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9347 9174 return;
9348 9175 }
9349 9176 }
9350 9177
9351 9178 /*
9352 9179 * We have an unload conflict
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
9353 9180 * We have already checked for LARGE mappings, therefore
9354 9181 * the remaining mapping(s) must be TTE8K.
9355 9182 */
9356 9183 SFMMU_STAT(sf_unload_conflict);
9357 9184
9358 9185 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9359 9186 tmphme = sfhmep->hme_next;
9360 9187 if (IS_PAHME(sfhmep))
9361 9188 continue;
9362 9189 hmeblkp = sfmmu_hmetohblk(sfhmep);
9363 - if (hmeblkp->hblk_xhat_bit)
9364 - continue;
9365 9190 ASSERT(!hmeblkp->hblk_shared);
9366 9191 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9367 9192 }
9368 9193
9369 9194 if (PP_ISMAPPED_KPM(pp))
9370 9195 sfmmu_kpm_vac_unload(pp, addr);
9371 9196
9372 9197 /*
9373 9198 * Unloads only do TLB flushes so we need to flush the
9374 9199 * cache here.
9375 9200 */
9376 9201 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9377 9202 PP_SET_VCOLOR(pp, vcolor);
9378 9203 }
9379 9204
9380 9205 /*
9381 9206 * Whenever a mapping is unloaded and the page is in TNC state,
9382 9207 * we see if the page can be made cacheable again. 'pp' is
9383 9208 * the page that we just unloaded a mapping from, the size
9384 9209 * of mapping that was unloaded is 'ottesz'.
9385 9210 * Remark:
9386 9211 * The recache policy for mpss pages can leave a performance problem
9387 9212 * under the following circumstances:
9388 9213 * . A large page in uncached mode has just been unmapped.
9389 9214 * . All constituent pages are TNC due to a conflicting small mapping.
9390 9215 * . There are many other, non conflicting, small mappings around for
9391 9216 * a lot of the constituent pages.
9392 9217 * . We're called w/ the "old" groupleader page and the old ottesz,
9393 9218 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9394 9219 * we end up w/ TTE8K or npages == 1.
9395 9220 * . We call tst_tnc w/ the old groupleader only, and if there is no
9396 9221 * conflict, we re-cache only this page.
9397 9222 * . All other small mappings are not checked and will be left in TNC mode.
9398 9223 * The problem is not very serious because:
9399 9224 * . mpss is actually only defined for heap and stack, so the probability
9400 9225 * is not very high that a large page mapping exists in parallel to a small
9401 9226 * one (this is possible, but seems to be bad programming style in the
9402 9227 * appl).
9403 9228 * . The problem gets a little bit more serious, when those TNC pages
9404 9229 * have to be mapped into kernel space, e.g. for networking.
9405 9230 * . When VAC alias conflicts occur in applications, this is regarded
9406 9231 * as an application bug. So if kstat's show them, the appl should
9407 9232 * be changed anyway.
9408 9233 */
9409 9234 void
9410 9235 conv_tnc(page_t *pp, int ottesz)
9411 9236 {
9412 9237 int cursz, dosz;
9413 9238 pgcnt_t curnpgs, dopgs;
9414 9239 pgcnt_t pg64k;
9415 9240 page_t *pp2;
9416 9241
9417 9242 /*
9418 9243 * Determine how big a range we check for TNC and find
9419 9244 * leader page. cursz is the size of the biggest
9420 9245 * mapping that still exist on 'pp'.
9421 9246 */
9422 9247 if (PP_ISMAPPED_LARGE(pp)) {
9423 9248 cursz = fnd_mapping_sz(pp);
9424 9249 } else {
9425 9250 cursz = TTE8K;
9426 9251 }
9427 9252
9428 9253 if (ottesz >= cursz) {
9429 9254 dosz = ottesz;
9430 9255 pp2 = pp;
9431 9256 } else {
9432 9257 dosz = cursz;
9433 9258 pp2 = PP_GROUPLEADER(pp, dosz);
9434 9259 }
9435 9260
9436 9261 pg64k = TTEPAGES(TTE64K);
9437 9262 dopgs = TTEPAGES(dosz);
9438 9263
9439 9264 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9440 9265
9441 9266 while (dopgs != 0) {
9442 9267 curnpgs = TTEPAGES(cursz);
9443 9268 if (tst_tnc(pp2, curnpgs)) {
9444 9269 SFMMU_STAT_ADD(sf_recache, curnpgs);
9445 9270 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9446 9271 curnpgs);
9447 9272 }
9448 9273
9449 9274 ASSERT(dopgs >= curnpgs);
9450 9275 dopgs -= curnpgs;
9451 9276
9452 9277 if (dopgs == 0) {
9453 9278 break;
9454 9279 }
9455 9280
9456 9281 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9457 9282 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9458 9283 cursz = fnd_mapping_sz(pp2);
9459 9284 } else {
9460 9285 cursz = TTE8K;
9461 9286 }
9462 9287 }
9463 9288 }
9464 9289
9465 9290 /*
9466 9291 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9467 9292 * returns 0 otherwise. Note that oaddr argument is valid for only
9468 9293 * 8k pages.
9469 9294 */
9470 9295 int
9471 9296 tst_tnc(page_t *pp, pgcnt_t npages)
9472 9297 {
9473 9298 struct sf_hment *sfhme;
9474 9299 struct hme_blk *hmeblkp;
9475 9300 tte_t tte;
9476 9301 caddr_t vaddr;
9477 9302 int clr_valid = 0;
9478 9303 int color, color1, bcolor;
9479 9304 int i, ncolors;
9480 9305
9481 9306 ASSERT(pp != NULL);
9482 9307 ASSERT(!(cache & CACHE_WRITEBACK));
9483 9308
9484 9309 if (npages > 1) {
9485 9310 ncolors = CACHE_NUM_COLOR;
9486 9311 }
9487 9312
9488 9313 for (i = 0; i < npages; i++) {
9489 9314 ASSERT(sfmmu_mlist_held(pp));
9490 9315 ASSERT(PP_ISTNC(pp));
9491 9316 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9492 9317
9493 9318 if (PP_ISPNC(pp)) {
9494 9319 return (0);
9495 9320 }
9496 9321
9497 9322 clr_valid = 0;
9498 9323 if (PP_ISMAPPED_KPM(pp)) {
9499 9324 caddr_t kpmvaddr;
9500 9325
9501 9326 ASSERT(kpm_enable);
↓ open down ↓ |
127 lines elided |
↑ open up ↑ |
9502 9327 kpmvaddr = hat_kpm_page2va(pp, 1);
9503 9328 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9504 9329 color1 = addr_to_vcolor(kpmvaddr);
9505 9330 clr_valid = 1;
9506 9331 }
9507 9332
9508 9333 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9509 9334 if (IS_PAHME(sfhme))
9510 9335 continue;
9511 9336 hmeblkp = sfmmu_hmetohblk(sfhme);
9512 - if (hmeblkp->hblk_xhat_bit)
9513 - continue;
9514 9337
9515 9338 sfmmu_copytte(&sfhme->hme_tte, &tte);
9516 9339 ASSERT(TTE_IS_VALID(&tte));
9517 9340
9518 9341 vaddr = tte_to_vaddr(hmeblkp, tte);
9519 9342 color = addr_to_vcolor(vaddr);
9520 9343
9521 9344 if (npages > 1) {
9522 9345 /*
9523 9346 * If there is a big mapping, make sure
9524 9347 * 8K mapping is consistent with the big
9525 9348 * mapping.
9526 9349 */
9527 9350 bcolor = i % ncolors;
9528 9351 if (color != bcolor) {
9529 9352 return (0);
9530 9353 }
9531 9354 }
9532 9355 if (!clr_valid) {
9533 9356 clr_valid = 1;
9534 9357 color1 = color;
9535 9358 }
9536 9359
9537 9360 if (color1 != color) {
9538 9361 return (0);
9539 9362 }
9540 9363 }
9541 9364
9542 9365 pp = PP_PAGENEXT(pp);
9543 9366 }
9544 9367
9545 9368 return (1);
9546 9369 }
9547 9370
9548 9371 void
9549 9372 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9550 9373 pgcnt_t npages)
9551 9374 {
9552 9375 kmutex_t *pmtx;
9553 9376 int i, ncolors, bcolor;
9554 9377 kpm_hlk_t *kpmp;
9555 9378 cpuset_t cpuset;
9556 9379
9557 9380 ASSERT(pp != NULL);
9558 9381 ASSERT(!(cache & CACHE_WRITEBACK));
9559 9382
9560 9383 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9561 9384 pmtx = sfmmu_page_enter(pp);
9562 9385
9563 9386 /*
9564 9387 * Fast path caching single unmapped page
9565 9388 */
9566 9389 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9567 9390 flags == HAT_CACHE) {
9568 9391 PP_CLRTNC(pp);
9569 9392 PP_CLRPNC(pp);
9570 9393 sfmmu_page_exit(pmtx);
9571 9394 sfmmu_kpm_kpmp_exit(kpmp);
9572 9395 return;
9573 9396 }
9574 9397
9575 9398 /*
9576 9399 * We need to capture all cpus in order to change cacheability
9577 9400 * because we can't allow one cpu to access the same physical
9578 9401 * page using a cacheable and a non-cachebale mapping at the same
9579 9402 * time. Since we may end up walking the ism mapping list
9580 9403 * have to grab it's lock now since we can't after all the
9581 9404 * cpus have been captured.
9582 9405 */
9583 9406 sfmmu_hat_lock_all();
9584 9407 mutex_enter(&ism_mlist_lock);
9585 9408 kpreempt_disable();
9586 9409 cpuset = cpu_ready_set;
9587 9410 xc_attention(cpuset);
9588 9411
9589 9412 if (npages > 1) {
9590 9413 /*
9591 9414 * Make sure all colors are flushed since the
9592 9415 * sfmmu_page_cache() only flushes one color-
9593 9416 * it does not know big pages.
9594 9417 */
9595 9418 ncolors = CACHE_NUM_COLOR;
9596 9419 if (flags & HAT_TMPNC) {
9597 9420 for (i = 0; i < ncolors; i++) {
9598 9421 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9599 9422 }
9600 9423 cache_flush_flag = CACHE_NO_FLUSH;
9601 9424 }
9602 9425 }
9603 9426
9604 9427 for (i = 0; i < npages; i++) {
9605 9428
9606 9429 ASSERT(sfmmu_mlist_held(pp));
9607 9430
9608 9431 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9609 9432
9610 9433 if (npages > 1) {
9611 9434 bcolor = i % ncolors;
9612 9435 } else {
9613 9436 bcolor = NO_VCOLOR;
9614 9437 }
9615 9438
9616 9439 sfmmu_page_cache(pp, flags, cache_flush_flag,
9617 9440 bcolor);
9618 9441 }
9619 9442
9620 9443 pp = PP_PAGENEXT(pp);
9621 9444 }
9622 9445
9623 9446 xt_sync(cpuset);
9624 9447 xc_dismissed(cpuset);
9625 9448 mutex_exit(&ism_mlist_lock);
9626 9449 sfmmu_hat_unlock_all();
9627 9450 sfmmu_page_exit(pmtx);
9628 9451 sfmmu_kpm_kpmp_exit(kpmp);
9629 9452 kpreempt_enable();
9630 9453 }
9631 9454
9632 9455 /*
9633 9456 * This function changes the virtual cacheability of all mappings to a
9634 9457 * particular page. When changing from uncache to cacheable the mappings will
9635 9458 * only be changed if all of them have the same virtual color.
9636 9459 * We need to flush the cache in all cpus. It is possible that
9637 9460 * a process referenced a page as cacheable but has sinced exited
9638 9461 * and cleared the mapping list. We still to flush it but have no
9639 9462 * state so all cpus is the only alternative.
9640 9463 */
9641 9464 static void
9642 9465 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9643 9466 {
9644 9467 struct sf_hment *sfhme;
9645 9468 struct hme_blk *hmeblkp;
9646 9469 sfmmu_t *sfmmup;
9647 9470 tte_t tte, ttemod;
9648 9471 caddr_t vaddr;
9649 9472 int ret, color;
9650 9473 pfn_t pfn;
↓ open down ↓ |
127 lines elided |
↑ open up ↑ |
9651 9474
9652 9475 color = bcolor;
9653 9476 pfn = pp->p_pagenum;
9654 9477
9655 9478 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9656 9479
9657 9480 if (IS_PAHME(sfhme))
9658 9481 continue;
9659 9482 hmeblkp = sfmmu_hmetohblk(sfhme);
9660 9483
9661 - if (hmeblkp->hblk_xhat_bit)
9662 - continue;
9663 -
9664 9484 sfmmu_copytte(&sfhme->hme_tte, &tte);
9665 9485 ASSERT(TTE_IS_VALID(&tte));
9666 9486 vaddr = tte_to_vaddr(hmeblkp, tte);
9667 9487 color = addr_to_vcolor(vaddr);
9668 9488
9669 9489 #ifdef DEBUG
9670 9490 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9671 9491 ASSERT(color == bcolor);
9672 9492 }
9673 9493 #endif
9674 9494
9675 9495 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9676 9496
9677 9497 ttemod = tte;
9678 9498 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9679 9499 TTE_CLR_VCACHEABLE(&ttemod);
9680 9500 } else { /* flags & HAT_CACHE */
9681 9501 TTE_SET_VCACHEABLE(&ttemod);
9682 9502 }
9683 9503 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9684 9504 if (ret < 0) {
9685 9505 /*
9686 9506 * Since all cpus are captured modifytte should not
9687 9507 * fail.
9688 9508 */
9689 9509 panic("sfmmu_page_cache: write to tte failed");
9690 9510 }
9691 9511
9692 9512 sfmmup = hblktosfmmu(hmeblkp);
9693 9513 if (cache_flush_flag == CACHE_FLUSH) {
9694 9514 /*
9695 9515 * Flush TSBs, TLBs and caches
9696 9516 */
9697 9517 if (hmeblkp->hblk_shared) {
9698 9518 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9699 9519 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9700 9520 sf_region_t *rgnp;
9701 9521 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9702 9522 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9703 9523 ASSERT(srdp != NULL);
9704 9524 rgnp = srdp->srd_hmergnp[rid];
9705 9525 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9706 9526 srdp, rgnp, rid);
9707 9527 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9708 9528 hmeblkp, 0);
9709 9529 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9710 9530 } else if (sfmmup->sfmmu_ismhat) {
9711 9531 if (flags & HAT_CACHE) {
9712 9532 SFMMU_STAT(sf_ism_recache);
9713 9533 } else {
9714 9534 SFMMU_STAT(sf_ism_uncache);
9715 9535 }
9716 9536 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9717 9537 pfn, CACHE_FLUSH);
9718 9538 } else {
9719 9539 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9720 9540 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9721 9541 }
9722 9542
9723 9543 /*
9724 9544 * all cache entries belonging to this pfn are
9725 9545 * now flushed.
9726 9546 */
9727 9547 cache_flush_flag = CACHE_NO_FLUSH;
9728 9548 } else {
9729 9549 /*
9730 9550 * Flush only TSBs and TLBs.
9731 9551 */
9732 9552 if (hmeblkp->hblk_shared) {
9733 9553 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9734 9554 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9735 9555 sf_region_t *rgnp;
9736 9556 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9737 9557 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9738 9558 ASSERT(srdp != NULL);
9739 9559 rgnp = srdp->srd_hmergnp[rid];
9740 9560 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9741 9561 srdp, rgnp, rid);
9742 9562 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9743 9563 hmeblkp, 0);
9744 9564 } else if (sfmmup->sfmmu_ismhat) {
9745 9565 if (flags & HAT_CACHE) {
9746 9566 SFMMU_STAT(sf_ism_recache);
9747 9567 } else {
9748 9568 SFMMU_STAT(sf_ism_uncache);
9749 9569 }
9750 9570 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9751 9571 pfn, CACHE_NO_FLUSH);
9752 9572 } else {
9753 9573 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9754 9574 }
9755 9575 }
9756 9576 }
9757 9577
9758 9578 if (PP_ISMAPPED_KPM(pp))
9759 9579 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9760 9580
9761 9581 switch (flags) {
9762 9582
9763 9583 default:
9764 9584 panic("sfmmu_pagecache: unknown flags");
9765 9585 break;
9766 9586
9767 9587 case HAT_CACHE:
9768 9588 PP_CLRTNC(pp);
9769 9589 PP_CLRPNC(pp);
9770 9590 PP_SET_VCOLOR(pp, color);
9771 9591 break;
9772 9592
9773 9593 case HAT_TMPNC:
9774 9594 PP_SETTNC(pp);
9775 9595 PP_SET_VCOLOR(pp, NO_VCOLOR);
9776 9596 break;
9777 9597
9778 9598 case HAT_UNCACHE:
9779 9599 PP_SETPNC(pp);
9780 9600 PP_CLRTNC(pp);
9781 9601 PP_SET_VCOLOR(pp, NO_VCOLOR);
9782 9602 break;
9783 9603 }
9784 9604 }
9785 9605 #endif /* VAC */
9786 9606
9787 9607
9788 9608 /*
9789 9609 * Wrapper routine used to return a context.
9790 9610 *
9791 9611 * It's the responsibility of the caller to guarantee that the
9792 9612 * process serializes on calls here by taking the HAT lock for
9793 9613 * the hat.
9794 9614 *
9795 9615 */
9796 9616 static void
9797 9617 sfmmu_get_ctx(sfmmu_t *sfmmup)
9798 9618 {
9799 9619 mmu_ctx_t *mmu_ctxp;
9800 9620 uint_t pstate_save;
9801 9621 int ret;
9802 9622
9803 9623 ASSERT(sfmmu_hat_lock_held(sfmmup));
9804 9624 ASSERT(sfmmup != ksfmmup);
9805 9625
9806 9626 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9807 9627 sfmmu_setup_tsbinfo(sfmmup);
9808 9628 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9809 9629 }
9810 9630
9811 9631 kpreempt_disable();
9812 9632
9813 9633 mmu_ctxp = CPU_MMU_CTXP(CPU);
9814 9634 ASSERT(mmu_ctxp);
9815 9635 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9816 9636 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9817 9637
9818 9638 /*
9819 9639 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9820 9640 */
9821 9641 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9822 9642 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9823 9643
9824 9644 /*
9825 9645 * Let the MMU set up the page sizes to use for
9826 9646 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9827 9647 */
9828 9648 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9829 9649 mmu_set_ctx_page_sizes(sfmmup);
9830 9650 }
9831 9651
9832 9652 /*
9833 9653 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9834 9654 * interrupts disabled to prevent race condition with wrap-around
9835 9655 * ctx invalidatation. In sun4v, ctx invalidation also involves
9836 9656 * a HV call to set the number of TSBs to 0. If interrupts are not
9837 9657 * disabled until after sfmmu_load_mmustate is complete TSBs may
9838 9658 * become assigned to INVALID_CONTEXT. This is not allowed.
9839 9659 */
9840 9660 pstate_save = sfmmu_disable_intrs();
9841 9661
9842 9662 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9843 9663 sfmmup->sfmmu_scdp != NULL) {
9844 9664 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9845 9665 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9846 9666 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9847 9667 /* debug purpose only */
9848 9668 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9849 9669 != INVALID_CONTEXT);
9850 9670 }
9851 9671 sfmmu_load_mmustate(sfmmup);
9852 9672
9853 9673 sfmmu_enable_intrs(pstate_save);
9854 9674
9855 9675 kpreempt_enable();
9856 9676 }
9857 9677
9858 9678 /*
9859 9679 * When all cnums are used up in a MMU, cnum will wrap around to the
9860 9680 * next generation and start from 2.
9861 9681 */
9862 9682 static void
9863 9683 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9864 9684 {
9865 9685
9866 9686 /* caller must have disabled the preemption */
9867 9687 ASSERT(curthread->t_preempt >= 1);
9868 9688 ASSERT(mmu_ctxp != NULL);
9869 9689
9870 9690 /* acquire Per-MMU (PM) spin lock */
9871 9691 mutex_enter(&mmu_ctxp->mmu_lock);
9872 9692
9873 9693 /* re-check to see if wrap-around is needed */
9874 9694 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9875 9695 goto done;
9876 9696
9877 9697 SFMMU_MMU_STAT(mmu_wrap_around);
9878 9698
9879 9699 /* update gnum */
9880 9700 ASSERT(mmu_ctxp->mmu_gnum != 0);
9881 9701 mmu_ctxp->mmu_gnum++;
9882 9702 if (mmu_ctxp->mmu_gnum == 0 ||
9883 9703 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9884 9704 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9885 9705 (void *)mmu_ctxp);
9886 9706 }
9887 9707
9888 9708 if (mmu_ctxp->mmu_ncpus > 1) {
9889 9709 cpuset_t cpuset;
9890 9710
9891 9711 membar_enter(); /* make sure updated gnum visible */
9892 9712
9893 9713 SFMMU_XCALL_STATS(NULL);
9894 9714
9895 9715 /* xcall to others on the same MMU to invalidate ctx */
9896 9716 cpuset = mmu_ctxp->mmu_cpuset;
9897 9717 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9898 9718 CPUSET_DEL(cpuset, CPU->cpu_id);
9899 9719 CPUSET_AND(cpuset, cpu_ready_set);
9900 9720
9901 9721 /*
9902 9722 * Pass in INVALID_CONTEXT as the first parameter to
9903 9723 * sfmmu_raise_tsb_exception, which invalidates the context
9904 9724 * of any process running on the CPUs in the MMU.
9905 9725 */
9906 9726 xt_some(cpuset, sfmmu_raise_tsb_exception,
9907 9727 INVALID_CONTEXT, INVALID_CONTEXT);
9908 9728 xt_sync(cpuset);
9909 9729
9910 9730 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9911 9731 }
9912 9732
9913 9733 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9914 9734 sfmmu_setctx_sec(INVALID_CONTEXT);
9915 9735 sfmmu_clear_utsbinfo();
9916 9736 }
9917 9737
9918 9738 /*
9919 9739 * No xcall is needed here. For sun4u systems all CPUs in context
9920 9740 * domain share a single physical MMU therefore it's enough to flush
9921 9741 * TLB on local CPU. On sun4v systems we use 1 global context
9922 9742 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9923 9743 * handler. Note that vtag_flushall_uctxs() is called
9924 9744 * for Ultra II machine, where the equivalent flushall functionality
9925 9745 * is implemented in SW, and only user ctx TLB entries are flushed.
9926 9746 */
9927 9747 if (&vtag_flushall_uctxs != NULL) {
9928 9748 vtag_flushall_uctxs();
9929 9749 } else {
9930 9750 vtag_flushall();
9931 9751 }
9932 9752
9933 9753 /* reset mmu cnum, skips cnum 0 and 1 */
9934 9754 if (reset_cnum == B_TRUE)
9935 9755 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9936 9756
9937 9757 done:
9938 9758 mutex_exit(&mmu_ctxp->mmu_lock);
9939 9759 }
9940 9760
9941 9761
9942 9762 /*
9943 9763 * For multi-threaded process, set the process context to INVALID_CONTEXT
9944 9764 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9945 9765 * process, we can just load the MMU state directly without having to
9946 9766 * set context invalid. Caller must hold the hat lock since we don't
9947 9767 * acquire it here.
9948 9768 */
9949 9769 static void
9950 9770 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9951 9771 {
9952 9772 uint_t cnum;
9953 9773 uint_t pstate_save;
9954 9774
9955 9775 ASSERT(sfmmup != ksfmmup);
9956 9776 ASSERT(sfmmu_hat_lock_held(sfmmup));
9957 9777
9958 9778 kpreempt_disable();
9959 9779
9960 9780 /*
9961 9781 * We check whether the pass'ed-in sfmmup is the same as the
9962 9782 * current running proc. This is to makes sure the current proc
9963 9783 * stays single-threaded if it already is.
9964 9784 */
9965 9785 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9966 9786 (curthread->t_procp->p_lwpcnt == 1)) {
9967 9787 /* single-thread */
9968 9788 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9969 9789 if (cnum != INVALID_CONTEXT) {
9970 9790 uint_t curcnum;
9971 9791 /*
9972 9792 * Disable interrupts to prevent race condition
9973 9793 * with sfmmu_ctx_wrap_around ctx invalidation.
9974 9794 * In sun4v, ctx invalidation involves setting
9975 9795 * TSB to NULL, hence, interrupts should be disabled
9976 9796 * untill after sfmmu_load_mmustate is completed.
9977 9797 */
9978 9798 pstate_save = sfmmu_disable_intrs();
9979 9799 curcnum = sfmmu_getctx_sec();
9980 9800 if (curcnum == cnum)
9981 9801 sfmmu_load_mmustate(sfmmup);
9982 9802 sfmmu_enable_intrs(pstate_save);
9983 9803 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9984 9804 }
9985 9805 } else {
9986 9806 /*
9987 9807 * multi-thread
9988 9808 * or when sfmmup is not the same as the curproc.
9989 9809 */
9990 9810 sfmmu_invalidate_ctx(sfmmup);
9991 9811 }
9992 9812
9993 9813 kpreempt_enable();
9994 9814 }
9995 9815
9996 9816
9997 9817 /*
9998 9818 * Replace the specified TSB with a new TSB. This function gets called when
9999 9819 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
10000 9820 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10001 9821 * (8K).
10002 9822 *
10003 9823 * Caller must hold the HAT lock, but should assume any tsb_info
10004 9824 * pointers it has are no longer valid after calling this function.
10005 9825 *
10006 9826 * Return values:
10007 9827 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
10008 9828 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
10009 9829 * something to this tsbinfo/TSB
10010 9830 * TSB_SUCCESS Operation succeeded
10011 9831 */
10012 9832 static tsb_replace_rc_t
10013 9833 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
10014 9834 hatlock_t *hatlockp, uint_t flags)
10015 9835 {
10016 9836 struct tsb_info *new_tsbinfo = NULL;
10017 9837 struct tsb_info *curtsb, *prevtsb;
10018 9838 uint_t tte_sz_mask;
10019 9839 int i;
10020 9840
10021 9841 ASSERT(sfmmup != ksfmmup);
10022 9842 ASSERT(sfmmup->sfmmu_ismhat == 0);
10023 9843 ASSERT(sfmmu_hat_lock_held(sfmmup));
10024 9844 ASSERT(szc <= tsb_max_growsize);
10025 9845
10026 9846 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
10027 9847 return (TSB_LOSTRACE);
10028 9848
10029 9849 /*
10030 9850 * Find the tsb_info ahead of this one in the list, and
10031 9851 * also make sure that the tsb_info passed in really
10032 9852 * exists!
10033 9853 */
10034 9854 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10035 9855 curtsb != old_tsbinfo && curtsb != NULL;
10036 9856 prevtsb = curtsb, curtsb = curtsb->tsb_next)
10037 9857 ;
10038 9858 ASSERT(curtsb != NULL);
10039 9859
10040 9860 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10041 9861 /*
10042 9862 * The process is swapped out, so just set the new size
10043 9863 * code. When it swaps back in, we'll allocate a new one
10044 9864 * of the new chosen size.
10045 9865 */
10046 9866 curtsb->tsb_szc = szc;
10047 9867 return (TSB_SUCCESS);
10048 9868 }
10049 9869 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
10050 9870
10051 9871 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
10052 9872
10053 9873 /*
10054 9874 * All initialization is done inside of sfmmu_tsbinfo_alloc().
10055 9875 * If we fail to allocate a TSB, exit.
10056 9876 *
10057 9877 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
10058 9878 * then try 4M slab after the initial alloc fails.
10059 9879 *
10060 9880 * If tsb swapin with tsb size > 4M, then try 4M after the
10061 9881 * initial alloc fails.
10062 9882 */
10063 9883 sfmmu_hat_exit(hatlockp);
10064 9884 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
10065 9885 tte_sz_mask, flags, sfmmup) &&
10066 9886 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
10067 9887 (!(flags & TSB_SWAPIN) &&
10068 9888 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
10069 9889 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
10070 9890 tte_sz_mask, flags, sfmmup))) {
10071 9891 (void) sfmmu_hat_enter(sfmmup);
10072 9892 if (!(flags & TSB_SWAPIN))
10073 9893 SFMMU_STAT(sf_tsb_resize_failures);
10074 9894 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10075 9895 return (TSB_ALLOCFAIL);
10076 9896 }
10077 9897 (void) sfmmu_hat_enter(sfmmup);
10078 9898
10079 9899 /*
10080 9900 * Re-check to make sure somebody else didn't muck with us while we
10081 9901 * didn't hold the HAT lock. If the process swapped out, fine, just
10082 9902 * exit; this can happen if we try to shrink the TSB from the context
10083 9903 * of another process (such as on an ISM unmap), though it is rare.
10084 9904 */
10085 9905 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10086 9906 SFMMU_STAT(sf_tsb_resize_failures);
10087 9907 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10088 9908 sfmmu_hat_exit(hatlockp);
10089 9909 sfmmu_tsbinfo_free(new_tsbinfo);
10090 9910 (void) sfmmu_hat_enter(sfmmup);
10091 9911 return (TSB_LOSTRACE);
10092 9912 }
10093 9913
10094 9914 #ifdef DEBUG
10095 9915 /* Reverify that the tsb_info still exists.. for debugging only */
10096 9916 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10097 9917 curtsb != old_tsbinfo && curtsb != NULL;
10098 9918 prevtsb = curtsb, curtsb = curtsb->tsb_next)
10099 9919 ;
10100 9920 ASSERT(curtsb != NULL);
10101 9921 #endif /* DEBUG */
10102 9922
10103 9923 /*
10104 9924 * Quiesce any CPUs running this process on their next TLB miss
10105 9925 * so they atomically see the new tsb_info. We temporarily set the
10106 9926 * context to invalid context so new threads that come on processor
10107 9927 * after we do the xcall to cpusran will also serialize behind the
10108 9928 * HAT lock on TLB miss and will see the new TSB. Since this short
10109 9929 * race with a new thread coming on processor is relatively rare,
10110 9930 * this synchronization mechanism should be cheaper than always
10111 9931 * pausing all CPUs for the duration of the setup, which is what
10112 9932 * the old implementation did. This is particuarly true if we are
10113 9933 * copying a huge chunk of memory around during that window.
10114 9934 *
10115 9935 * The memory barriers are to make sure things stay consistent
10116 9936 * with resume() since it does not hold the HAT lock while
10117 9937 * walking the list of tsb_info structures.
10118 9938 */
10119 9939 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
10120 9940 /* The TSB is either growing or shrinking. */
10121 9941 sfmmu_invalidate_ctx(sfmmup);
10122 9942 } else {
10123 9943 /*
10124 9944 * It is illegal to swap in TSBs from a process other
10125 9945 * than a process being swapped in. This in turn
10126 9946 * implies we do not have a valid MMU context here
10127 9947 * since a process needs one to resolve translation
10128 9948 * misses.
10129 9949 */
10130 9950 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
10131 9951 }
10132 9952
10133 9953 #ifdef DEBUG
10134 9954 ASSERT(max_mmu_ctxdoms > 0);
10135 9955
10136 9956 /*
10137 9957 * Process should have INVALID_CONTEXT on all MMUs
10138 9958 */
10139 9959 for (i = 0; i < max_mmu_ctxdoms; i++) {
10140 9960
10141 9961 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
10142 9962 }
10143 9963 #endif
10144 9964
10145 9965 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
10146 9966 membar_stst(); /* strict ordering required */
10147 9967 if (prevtsb)
10148 9968 prevtsb->tsb_next = new_tsbinfo;
10149 9969 else
10150 9970 sfmmup->sfmmu_tsb = new_tsbinfo;
10151 9971 membar_enter(); /* make sure new TSB globally visible */
10152 9972
10153 9973 /*
10154 9974 * We need to migrate TSB entries from the old TSB to the new TSB
10155 9975 * if tsb_remap_ttes is set and the TSB is growing.
10156 9976 */
10157 9977 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
10158 9978 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
10159 9979
10160 9980 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10161 9981
10162 9982 /*
10163 9983 * Drop the HAT lock to free our old tsb_info.
10164 9984 */
10165 9985 sfmmu_hat_exit(hatlockp);
10166 9986
10167 9987 if ((flags & TSB_GROW) == TSB_GROW) {
10168 9988 SFMMU_STAT(sf_tsb_grow);
10169 9989 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
10170 9990 SFMMU_STAT(sf_tsb_shrink);
10171 9991 }
10172 9992
10173 9993 sfmmu_tsbinfo_free(old_tsbinfo);
10174 9994
10175 9995 (void) sfmmu_hat_enter(sfmmup);
10176 9996 return (TSB_SUCCESS);
10177 9997 }
10178 9998
10179 9999 /*
10180 10000 * This function will re-program hat pgsz array, and invalidate the
10181 10001 * process' context, forcing the process to switch to another
10182 10002 * context on the next TLB miss, and therefore start using the
10183 10003 * TLB that is reprogrammed for the new page sizes.
10184 10004 */
10185 10005 void
10186 10006 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10187 10007 {
10188 10008 int i;
10189 10009 hatlock_t *hatlockp = NULL;
10190 10010
10191 10011 hatlockp = sfmmu_hat_enter(sfmmup);
10192 10012 /* USIII+-IV+ optimization, requires hat lock */
10193 10013 if (tmp_pgsz) {
10194 10014 for (i = 0; i < mmu_page_sizes; i++)
10195 10015 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10196 10016 }
10197 10017 SFMMU_STAT(sf_tlb_reprog_pgsz);
10198 10018
10199 10019 sfmmu_invalidate_ctx(sfmmup);
10200 10020
10201 10021 sfmmu_hat_exit(hatlockp);
10202 10022 }
10203 10023
10204 10024 /*
10205 10025 * The scd_rttecnt field in the SCD must be updated to take account of the
10206 10026 * regions which it contains.
10207 10027 */
10208 10028 static void
10209 10029 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10210 10030 {
10211 10031 uint_t rid;
10212 10032 uint_t i, j;
10213 10033 ulong_t w;
10214 10034 sf_region_t *rgnp;
10215 10035
10216 10036 ASSERT(srdp != NULL);
10217 10037
10218 10038 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10219 10039 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10220 10040 continue;
10221 10041 }
10222 10042
10223 10043 j = 0;
10224 10044 while (w) {
10225 10045 if (!(w & 0x1)) {
10226 10046 j++;
10227 10047 w >>= 1;
10228 10048 continue;
10229 10049 }
10230 10050 rid = (i << BT_ULSHIFT) | j;
10231 10051 j++;
10232 10052 w >>= 1;
10233 10053
10234 10054 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10235 10055 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10236 10056 rgnp = srdp->srd_hmergnp[rid];
10237 10057 ASSERT(rgnp->rgn_refcnt > 0);
10238 10058 ASSERT(rgnp->rgn_id == rid);
10239 10059
10240 10060 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10241 10061 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10242 10062
10243 10063 /*
10244 10064 * Maintain the tsb0 inflation cnt for the regions
10245 10065 * in the SCD.
10246 10066 */
10247 10067 if (rgnp->rgn_pgszc >= TTE4M) {
10248 10068 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10249 10069 rgnp->rgn_size >>
10250 10070 (TTE_PAGE_SHIFT(TTE8K) + 2);
10251 10071 }
10252 10072 }
10253 10073 }
10254 10074 }
10255 10075
10256 10076 /*
10257 10077 * This function assumes that there are either four or six supported page
10258 10078 * sizes and at most two programmable TLBs, so we need to decide which
10259 10079 * page sizes are most important and then tell the MMU layer so it
10260 10080 * can adjust the TLB page sizes accordingly (if supported).
10261 10081 *
10262 10082 * If these assumptions change, this function will need to be
10263 10083 * updated to support whatever the new limits are.
10264 10084 *
10265 10085 * The growing flag is nonzero if we are growing the address space,
10266 10086 * and zero if it is shrinking. This allows us to decide whether
10267 10087 * to grow or shrink our TSB, depending upon available memory
10268 10088 * conditions.
10269 10089 */
10270 10090 static void
10271 10091 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10272 10092 {
10273 10093 uint64_t ttecnt[MMU_PAGE_SIZES];
10274 10094 uint64_t tte8k_cnt, tte4m_cnt;
10275 10095 uint8_t i;
10276 10096 int sectsb_thresh;
10277 10097
10278 10098 /*
10279 10099 * Kernel threads, processes with small address spaces not using
10280 10100 * large pages, and dummy ISM HATs need not apply.
10281 10101 */
10282 10102 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10283 10103 return;
10284 10104
10285 10105 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10286 10106 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10287 10107 return;
10288 10108
10289 10109 for (i = 0; i < mmu_page_sizes; i++) {
10290 10110 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10291 10111 sfmmup->sfmmu_ismttecnt[i];
10292 10112 }
10293 10113
10294 10114 /* Check pagesizes in use, and possibly reprogram DTLB. */
10295 10115 if (&mmu_check_page_sizes)
10296 10116 mmu_check_page_sizes(sfmmup, ttecnt);
10297 10117
10298 10118 /*
10299 10119 * Calculate the number of 8k ttes to represent the span of these
10300 10120 * pages.
10301 10121 */
10302 10122 tte8k_cnt = ttecnt[TTE8K] +
10303 10123 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10304 10124 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10305 10125 if (mmu_page_sizes == max_mmu_page_sizes) {
10306 10126 tte4m_cnt = ttecnt[TTE4M] +
10307 10127 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10308 10128 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10309 10129 } else {
10310 10130 tte4m_cnt = ttecnt[TTE4M];
10311 10131 }
10312 10132
10313 10133 /*
10314 10134 * Inflate tte8k_cnt to allow for region large page allocation failure.
10315 10135 */
10316 10136 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10317 10137
10318 10138 /*
10319 10139 * Inflate TSB sizes by a factor of 2 if this process
10320 10140 * uses 4M text pages to minimize extra conflict misses
10321 10141 * in the first TSB since without counting text pages
10322 10142 * 8K TSB may become too small.
10323 10143 *
10324 10144 * Also double the size of the second TSB to minimize
10325 10145 * extra conflict misses due to competition between 4M text pages
10326 10146 * and data pages.
10327 10147 *
10328 10148 * We need to adjust the second TSB allocation threshold by the
10329 10149 * inflation factor, since there is no point in creating a second
10330 10150 * TSB when we know all the mappings can fit in the I/D TLBs.
10331 10151 */
10332 10152 sectsb_thresh = tsb_sectsb_threshold;
10333 10153 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10334 10154 tte8k_cnt <<= 1;
10335 10155 tte4m_cnt <<= 1;
10336 10156 sectsb_thresh <<= 1;
10337 10157 }
10338 10158
10339 10159 /*
10340 10160 * Check to see if our TSB is the right size; we may need to
10341 10161 * grow or shrink it. If the process is small, our work is
10342 10162 * finished at this point.
10343 10163 */
10344 10164 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10345 10165 return;
10346 10166 }
10347 10167 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10348 10168 }
10349 10169
10350 10170 static void
10351 10171 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10352 10172 uint64_t tte4m_cnt, int sectsb_thresh)
10353 10173 {
10354 10174 int tsb_bits;
10355 10175 uint_t tsb_szc;
10356 10176 struct tsb_info *tsbinfop;
10357 10177 hatlock_t *hatlockp = NULL;
10358 10178
10359 10179 hatlockp = sfmmu_hat_enter(sfmmup);
10360 10180 ASSERT(hatlockp != NULL);
10361 10181 tsbinfop = sfmmup->sfmmu_tsb;
10362 10182 ASSERT(tsbinfop != NULL);
10363 10183
10364 10184 /*
10365 10185 * If we're growing, select the size based on RSS. If we're
10366 10186 * shrinking, leave some room so we don't have to turn around and
10367 10187 * grow again immediately.
10368 10188 */
10369 10189 if (growing)
10370 10190 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10371 10191 else
10372 10192 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10373 10193
10374 10194 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10375 10195 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10376 10196 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10377 10197 hatlockp, TSB_SHRINK);
10378 10198 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10379 10199 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10380 10200 hatlockp, TSB_GROW);
10381 10201 }
10382 10202 tsbinfop = sfmmup->sfmmu_tsb;
10383 10203
10384 10204 /*
10385 10205 * With the TLB and first TSB out of the way, we need to see if
10386 10206 * we need a second TSB for 4M pages. If we managed to reprogram
10387 10207 * the TLB page sizes above, the process will start using this new
10388 10208 * TSB right away; otherwise, it will start using it on the next
10389 10209 * context switch. Either way, it's no big deal so there's no
10390 10210 * synchronization with the trap handlers here unless we grow the
10391 10211 * TSB (in which case it's required to prevent using the old one
10392 10212 * after it's freed). Note: second tsb is required for 32M/256M
10393 10213 * page sizes.
10394 10214 */
10395 10215 if (tte4m_cnt > sectsb_thresh) {
10396 10216 /*
10397 10217 * If we're growing, select the size based on RSS. If we're
10398 10218 * shrinking, leave some room so we don't have to turn
10399 10219 * around and grow again immediately.
10400 10220 */
10401 10221 if (growing)
10402 10222 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10403 10223 else
10404 10224 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10405 10225 if (tsbinfop->tsb_next == NULL) {
10406 10226 struct tsb_info *newtsb;
10407 10227 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10408 10228 0 : TSB_ALLOC;
10409 10229
10410 10230 sfmmu_hat_exit(hatlockp);
10411 10231
10412 10232 /*
10413 10233 * Try to allocate a TSB for 4[32|256]M pages. If we
10414 10234 * can't get the size we want, retry w/a minimum sized
10415 10235 * TSB. If that still didn't work, give up; we can
10416 10236 * still run without one.
10417 10237 */
10418 10238 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10419 10239 TSB4M|TSB32M|TSB256M:TSB4M;
10420 10240 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10421 10241 allocflags, sfmmup)) &&
10422 10242 (tsb_szc <= TSB_4M_SZCODE ||
10423 10243 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10424 10244 tsb_bits, allocflags, sfmmup)) &&
10425 10245 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10426 10246 tsb_bits, allocflags, sfmmup)) {
10427 10247 return;
10428 10248 }
10429 10249
10430 10250 hatlockp = sfmmu_hat_enter(sfmmup);
10431 10251
10432 10252 sfmmu_invalidate_ctx(sfmmup);
10433 10253
10434 10254 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10435 10255 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10436 10256 SFMMU_STAT(sf_tsb_sectsb_create);
10437 10257 sfmmu_hat_exit(hatlockp);
10438 10258 return;
10439 10259 } else {
10440 10260 /*
10441 10261 * It's annoying, but possible for us
10442 10262 * to get here.. we dropped the HAT lock
10443 10263 * because of locking order in the kmem
10444 10264 * allocator, and while we were off getting
10445 10265 * our memory, some other thread decided to
10446 10266 * do us a favor and won the race to get a
10447 10267 * second TSB for this process. Sigh.
10448 10268 */
10449 10269 sfmmu_hat_exit(hatlockp);
10450 10270 sfmmu_tsbinfo_free(newtsb);
10451 10271 return;
10452 10272 }
10453 10273 }
10454 10274
10455 10275 /*
10456 10276 * We have a second TSB, see if it's big enough.
10457 10277 */
10458 10278 tsbinfop = tsbinfop->tsb_next;
10459 10279
10460 10280 /*
10461 10281 * Check to see if our second TSB is the right size;
10462 10282 * we may need to grow or shrink it.
10463 10283 * To prevent thrashing (e.g. growing the TSB on a
10464 10284 * subsequent map operation), only try to shrink if
10465 10285 * the TSB reach exceeds twice the virtual address
10466 10286 * space size.
10467 10287 */
10468 10288 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10469 10289 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10470 10290 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10471 10291 tsb_szc, hatlockp, TSB_SHRINK);
10472 10292 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10473 10293 TSB_OK_GROW()) {
10474 10294 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10475 10295 tsb_szc, hatlockp, TSB_GROW);
10476 10296 }
10477 10297 }
10478 10298
10479 10299 sfmmu_hat_exit(hatlockp);
10480 10300 }
10481 10301
10482 10302 /*
10483 10303 * Free up a sfmmu
10484 10304 * Since the sfmmu is currently embedded in the hat struct we simply zero
10485 10305 * out our fields and free up the ism map blk list if any.
10486 10306 */
10487 10307 static void
10488 10308 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10489 10309 {
10490 10310 ism_blk_t *blkp, *nx_blkp;
10491 10311 #ifdef DEBUG
10492 10312 ism_map_t *map;
10493 10313 int i;
10494 10314 #endif
10495 10315
10496 10316 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10497 10317 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10498 10318 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10499 10319 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10500 10320 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10501 10321 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10502 10322 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10503 10323
10504 10324 sfmmup->sfmmu_free = 0;
10505 10325 sfmmup->sfmmu_ismhat = 0;
10506 10326
10507 10327 blkp = sfmmup->sfmmu_iblk;
10508 10328 sfmmup->sfmmu_iblk = NULL;
10509 10329
10510 10330 while (blkp) {
10511 10331 #ifdef DEBUG
10512 10332 map = blkp->iblk_maps;
10513 10333 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10514 10334 ASSERT(map[i].imap_seg == 0);
10515 10335 ASSERT(map[i].imap_ismhat == NULL);
10516 10336 ASSERT(map[i].imap_ment == NULL);
10517 10337 }
10518 10338 #endif
10519 10339 nx_blkp = blkp->iblk_next;
10520 10340 blkp->iblk_next = NULL;
10521 10341 blkp->iblk_nextpa = (uint64_t)-1;
10522 10342 kmem_cache_free(ism_blk_cache, blkp);
10523 10343 blkp = nx_blkp;
10524 10344 }
10525 10345 }
10526 10346
10527 10347 /*
10528 10348 * Locking primitves accessed by HATLOCK macros
10529 10349 */
10530 10350
10531 10351 #define SFMMU_SPL_MTX (0x0)
10532 10352 #define SFMMU_ML_MTX (0x1)
10533 10353
10534 10354 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10535 10355 SPL_HASH(pg) : MLIST_HASH(pg))
10536 10356
10537 10357 kmutex_t *
10538 10358 sfmmu_page_enter(struct page *pp)
10539 10359 {
10540 10360 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10541 10361 }
10542 10362
10543 10363 void
10544 10364 sfmmu_page_exit(kmutex_t *spl)
10545 10365 {
10546 10366 mutex_exit(spl);
10547 10367 }
10548 10368
10549 10369 int
10550 10370 sfmmu_page_spl_held(struct page *pp)
10551 10371 {
10552 10372 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10553 10373 }
10554 10374
10555 10375 kmutex_t *
10556 10376 sfmmu_mlist_enter(struct page *pp)
10557 10377 {
10558 10378 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10559 10379 }
10560 10380
10561 10381 void
10562 10382 sfmmu_mlist_exit(kmutex_t *mml)
10563 10383 {
10564 10384 mutex_exit(mml);
10565 10385 }
10566 10386
10567 10387 int
10568 10388 sfmmu_mlist_held(struct page *pp)
10569 10389 {
10570 10390
10571 10391 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10572 10392 }
10573 10393
10574 10394 /*
10575 10395 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10576 10396 * sfmmu_mlist_enter() case mml_table lock array is used and for
10577 10397 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10578 10398 *
10579 10399 * The lock is taken on a root page so that it protects an operation on all
10580 10400 * constituent pages of a large page pp belongs to.
10581 10401 *
10582 10402 * The routine takes a lock from the appropriate array. The lock is determined
10583 10403 * by hashing the root page. After taking the lock this routine checks if the
10584 10404 * root page has the same size code that was used to determine the root (i.e
10585 10405 * that root hasn't changed). If root page has the expected p_szc field we
10586 10406 * have the right lock and it's returned to the caller. If root's p_szc
10587 10407 * decreased we release the lock and retry from the beginning. This case can
10588 10408 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10589 10409 * value and taking the lock. The number of retries due to p_szc decrease is
10590 10410 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10591 10411 * determined by hashing pp itself.
10592 10412 *
10593 10413 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10594 10414 * possible that p_szc can increase. To increase p_szc a thread has to lock
10595 10415 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10596 10416 * callers that don't hold a page locked recheck if hmeblk through which pp
10597 10417 * was found still maps this pp. If it doesn't map it anymore returned lock
10598 10418 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10599 10419 * p_szc increase after taking the lock it returns this lock without further
10600 10420 * retries because in this case the caller doesn't care about which lock was
10601 10421 * taken. The caller will drop it right away.
10602 10422 *
10603 10423 * After the routine returns it's guaranteed that hat_page_demote() can't
10604 10424 * change p_szc field of any of constituent pages of a large page pp belongs
10605 10425 * to as long as pp was either locked at least SHARED prior to this call or
10606 10426 * the caller finds that hment that pointed to this pp still references this
10607 10427 * pp (this also assumes that the caller holds hme hash bucket lock so that
10608 10428 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10609 10429 * hat_pageunload()).
10610 10430 */
10611 10431 static kmutex_t *
10612 10432 sfmmu_mlspl_enter(struct page *pp, int type)
10613 10433 {
10614 10434 kmutex_t *mtx;
10615 10435 uint_t prev_rszc = UINT_MAX;
10616 10436 page_t *rootpp;
10617 10437 uint_t szc;
10618 10438 uint_t rszc;
10619 10439 uint_t pszc = pp->p_szc;
10620 10440
10621 10441 ASSERT(pp != NULL);
10622 10442
10623 10443 again:
10624 10444 if (pszc == 0) {
10625 10445 mtx = SFMMU_MLSPL_MTX(type, pp);
10626 10446 mutex_enter(mtx);
10627 10447 return (mtx);
10628 10448 }
10629 10449
10630 10450 /* The lock lives in the root page */
10631 10451 rootpp = PP_GROUPLEADER(pp, pszc);
10632 10452 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10633 10453 mutex_enter(mtx);
10634 10454
10635 10455 /*
10636 10456 * Return mml in the following 3 cases:
10637 10457 *
10638 10458 * 1) If pp itself is root since if its p_szc decreased before we took
10639 10459 * the lock pp is still the root of smaller szc page. And if its p_szc
10640 10460 * increased it doesn't matter what lock we return (see comment in
10641 10461 * front of this routine).
10642 10462 *
10643 10463 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10644 10464 * large page we have the right lock since any previous potential
10645 10465 * hat_page_demote() is done demoting from greater than current root's
10646 10466 * p_szc because hat_page_demote() changes root's p_szc last. No
10647 10467 * further hat_page_demote() can start or be in progress since it
10648 10468 * would need the same lock we currently hold.
10649 10469 *
10650 10470 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10651 10471 * matter what lock we return (see comment in front of this routine).
10652 10472 */
10653 10473 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10654 10474 rszc >= prev_rszc) {
10655 10475 return (mtx);
10656 10476 }
10657 10477
10658 10478 /*
10659 10479 * hat_page_demote() could have decreased root's p_szc.
10660 10480 * In this case pp's p_szc must also be smaller than pszc.
10661 10481 * Retry.
10662 10482 */
10663 10483 if (rszc < pszc) {
10664 10484 szc = pp->p_szc;
10665 10485 if (szc < pszc) {
10666 10486 mutex_exit(mtx);
10667 10487 pszc = szc;
10668 10488 goto again;
10669 10489 }
10670 10490 /*
10671 10491 * pp's p_szc increased after it was decreased.
10672 10492 * page cannot be mapped. Return current lock. The caller
10673 10493 * will drop it right away.
10674 10494 */
10675 10495 return (mtx);
10676 10496 }
10677 10497
10678 10498 /*
10679 10499 * root's p_szc is greater than pp's p_szc.
10680 10500 * hat_page_demote() is not done with all pages
10681 10501 * yet. Wait for it to complete.
10682 10502 */
10683 10503 mutex_exit(mtx);
10684 10504 rootpp = PP_GROUPLEADER(rootpp, rszc);
10685 10505 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10686 10506 mutex_enter(mtx);
10687 10507 mutex_exit(mtx);
10688 10508 prev_rszc = rszc;
10689 10509 goto again;
10690 10510 }
10691 10511
10692 10512 static int
10693 10513 sfmmu_mlspl_held(struct page *pp, int type)
10694 10514 {
10695 10515 kmutex_t *mtx;
10696 10516
10697 10517 ASSERT(pp != NULL);
10698 10518 /* The lock lives in the root page */
10699 10519 pp = PP_PAGEROOT(pp);
10700 10520 ASSERT(pp != NULL);
10701 10521
10702 10522 mtx = SFMMU_MLSPL_MTX(type, pp);
10703 10523 return (MUTEX_HELD(mtx));
10704 10524 }
10705 10525
10706 10526 static uint_t
10707 10527 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10708 10528 {
10709 10529 struct hme_blk *hblkp;
10710 10530
10711 10531
10712 10532 if (freehblkp != NULL) {
10713 10533 mutex_enter(&freehblkp_lock);
10714 10534 if (freehblkp != NULL) {
10715 10535 /*
10716 10536 * If the current thread is owning hblk_reserve OR
10717 10537 * critical request from sfmmu_hblk_steal()
10718 10538 * let it succeed even if freehblkcnt is really low.
10719 10539 */
10720 10540 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10721 10541 SFMMU_STAT(sf_get_free_throttle);
10722 10542 mutex_exit(&freehblkp_lock);
10723 10543 return (0);
10724 10544 }
10725 10545 freehblkcnt--;
10726 10546 *hmeblkpp = freehblkp;
10727 10547 hblkp = *hmeblkpp;
10728 10548 freehblkp = hblkp->hblk_next;
10729 10549 mutex_exit(&freehblkp_lock);
10730 10550 hblkp->hblk_next = NULL;
10731 10551 SFMMU_STAT(sf_get_free_success);
10732 10552
10733 10553 ASSERT(hblkp->hblk_hmecnt == 0);
10734 10554 ASSERT(hblkp->hblk_vcnt == 0);
10735 10555 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10736 10556
10737 10557 return (1);
10738 10558 }
10739 10559 mutex_exit(&freehblkp_lock);
10740 10560 }
10741 10561
10742 10562 /* Check cpu hblk pending queues */
10743 10563 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10744 10564 hblkp = *hmeblkpp;
10745 10565 hblkp->hblk_next = NULL;
10746 10566 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10747 10567
10748 10568 ASSERT(hblkp->hblk_hmecnt == 0);
10749 10569 ASSERT(hblkp->hblk_vcnt == 0);
10750 10570
10751 10571 return (1);
10752 10572 }
10753 10573
10754 10574 SFMMU_STAT(sf_get_free_fail);
10755 10575 return (0);
10756 10576 }
10757 10577
10758 10578 static uint_t
10759 10579 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10760 10580 {
10761 10581 struct hme_blk *hblkp;
10762 10582
10763 10583 ASSERT(hmeblkp->hblk_hmecnt == 0);
10764 10584 ASSERT(hmeblkp->hblk_vcnt == 0);
10765 10585 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10766 10586
10767 10587 /*
10768 10588 * If the current thread is mapping into kernel space,
10769 10589 * let it succede even if freehblkcnt is max
10770 10590 * so that it will avoid freeing it to kmem.
10771 10591 * This will prevent stack overflow due to
10772 10592 * possible recursion since kmem_cache_free()
10773 10593 * might require creation of a slab which
10774 10594 * in turn needs an hmeblk to map that slab;
10775 10595 * let's break this vicious chain at the first
10776 10596 * opportunity.
10777 10597 */
10778 10598 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10779 10599 mutex_enter(&freehblkp_lock);
10780 10600 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10781 10601 SFMMU_STAT(sf_put_free_success);
10782 10602 freehblkcnt++;
10783 10603 hmeblkp->hblk_next = freehblkp;
10784 10604 freehblkp = hmeblkp;
10785 10605 mutex_exit(&freehblkp_lock);
10786 10606 return (1);
10787 10607 }
10788 10608 mutex_exit(&freehblkp_lock);
10789 10609 }
10790 10610
10791 10611 /*
10792 10612 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10793 10613 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10794 10614 * we are not in the process of mapping into kernel space.
10795 10615 */
10796 10616 ASSERT(!critical);
10797 10617 while (freehblkcnt > HBLK_RESERVE_CNT) {
10798 10618 mutex_enter(&freehblkp_lock);
10799 10619 if (freehblkcnt > HBLK_RESERVE_CNT) {
10800 10620 freehblkcnt--;
10801 10621 hblkp = freehblkp;
10802 10622 freehblkp = hblkp->hblk_next;
10803 10623 mutex_exit(&freehblkp_lock);
10804 10624 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10805 10625 kmem_cache_free(sfmmu8_cache, hblkp);
10806 10626 continue;
10807 10627 }
10808 10628 mutex_exit(&freehblkp_lock);
10809 10629 }
10810 10630 SFMMU_STAT(sf_put_free_fail);
10811 10631 return (0);
10812 10632 }
10813 10633
10814 10634 static void
10815 10635 sfmmu_hblk_swap(struct hme_blk *new)
10816 10636 {
10817 10637 struct hme_blk *old, *hblkp, *prev;
10818 10638 uint64_t newpa;
10819 10639 caddr_t base, vaddr, endaddr;
10820 10640 struct hmehash_bucket *hmebp;
10821 10641 struct sf_hment *osfhme, *nsfhme;
10822 10642 page_t *pp;
10823 10643 kmutex_t *pml;
10824 10644 tte_t tte;
10825 10645 struct hme_blk *list = NULL;
10826 10646
10827 10647 #ifdef DEBUG
10828 10648 hmeblk_tag hblktag;
10829 10649 struct hme_blk *found;
10830 10650 #endif
10831 10651 old = HBLK_RESERVE;
10832 10652 ASSERT(!old->hblk_shared);
10833 10653
10834 10654 /*
10835 10655 * save pa before bcopy clobbers it
10836 10656 */
10837 10657 newpa = new->hblk_nextpa;
10838 10658
10839 10659 base = (caddr_t)get_hblk_base(old);
10840 10660 endaddr = base + get_hblk_span(old);
10841 10661
10842 10662 /*
10843 10663 * acquire hash bucket lock.
10844 10664 */
10845 10665 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10846 10666 SFMMU_INVALID_SHMERID);
10847 10667
10848 10668 /*
10849 10669 * copy contents from old to new
10850 10670 */
10851 10671 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10852 10672
10853 10673 /*
10854 10674 * add new to hash chain
10855 10675 */
10856 10676 sfmmu_hblk_hash_add(hmebp, new, newpa);
10857 10677
10858 10678 /*
10859 10679 * search hash chain for hblk_reserve; this needs to be performed
10860 10680 * after adding new, otherwise prev won't correspond to the hblk which
10861 10681 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10862 10682 * remove old later.
10863 10683 */
10864 10684 for (prev = NULL,
10865 10685 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10866 10686 prev = hblkp, hblkp = hblkp->hblk_next)
10867 10687 ;
10868 10688
10869 10689 if (hblkp != old)
10870 10690 panic("sfmmu_hblk_swap: hblk_reserve not found");
10871 10691
10872 10692 /*
10873 10693 * p_mapping list is still pointing to hments in hblk_reserve;
10874 10694 * fix up p_mapping list so that they point to hments in new.
10875 10695 *
10876 10696 * Since all these mappings are created by hblk_reserve_thread
10877 10697 * on the way and it's using at least one of the buffers from each of
10878 10698 * the newly minted slabs, there is no danger of any of these
10879 10699 * mappings getting unloaded by another thread.
10880 10700 *
10881 10701 * tsbmiss could only modify ref/mod bits of hments in old/new.
10882 10702 * Since all of these hments hold mappings established by segkmem
10883 10703 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10884 10704 * have no meaning for the mappings in hblk_reserve. hments in
10885 10705 * old and new are identical except for ref/mod bits.
10886 10706 */
10887 10707 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10888 10708
10889 10709 HBLKTOHME(osfhme, old, vaddr);
10890 10710 sfmmu_copytte(&osfhme->hme_tte, &tte);
10891 10711
10892 10712 if (TTE_IS_VALID(&tte)) {
10893 10713 if ((pp = osfhme->hme_page) == NULL)
10894 10714 panic("sfmmu_hblk_swap: page not mapped");
10895 10715
10896 10716 pml = sfmmu_mlist_enter(pp);
10897 10717
10898 10718 if (pp != osfhme->hme_page)
10899 10719 panic("sfmmu_hblk_swap: mapping changed");
10900 10720
10901 10721 HBLKTOHME(nsfhme, new, vaddr);
10902 10722
10903 10723 HME_ADD(nsfhme, pp);
10904 10724 HME_SUB(osfhme, pp);
10905 10725
10906 10726 sfmmu_mlist_exit(pml);
10907 10727 }
10908 10728 }
10909 10729
10910 10730 /*
10911 10731 * remove old from hash chain
10912 10732 */
10913 10733 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10914 10734
10915 10735 #ifdef DEBUG
10916 10736
10917 10737 hblktag.htag_id = ksfmmup;
10918 10738 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10919 10739 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10920 10740 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10921 10741 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10922 10742
10923 10743 if (found != new)
10924 10744 panic("sfmmu_hblk_swap: new hblk not found");
10925 10745 #endif
10926 10746
10927 10747 SFMMU_HASH_UNLOCK(hmebp);
10928 10748
10929 10749 /*
10930 10750 * Reset hblk_reserve
10931 10751 */
10932 10752 bzero((void *)old, HME8BLK_SZ);
10933 10753 old->hblk_nextpa = va_to_pa((caddr_t)old);
10934 10754 }
10935 10755
10936 10756 /*
10937 10757 * Grab the mlist mutex for both pages passed in.
10938 10758 *
10939 10759 * low and high will be returned as pointers to the mutexes for these pages.
10940 10760 * low refers to the mutex residing in the lower bin of the mlist hash, while
10941 10761 * high refers to the mutex residing in the higher bin of the mlist hash. This
10942 10762 * is due to the locking order restrictions on the same thread grabbing
10943 10763 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10944 10764 *
10945 10765 * If both pages hash to the same mutex, only grab that single mutex, and
10946 10766 * high will be returned as NULL
10947 10767 * If the pages hash to different bins in the hash, grab the lower addressed
10948 10768 * lock first and then the higher addressed lock in order to follow the locking
10949 10769 * rules involved with the same thread grabbing multiple mlist mutexes.
10950 10770 * low and high will both have non-NULL values.
10951 10771 */
10952 10772 static void
10953 10773 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10954 10774 kmutex_t **low, kmutex_t **high)
10955 10775 {
10956 10776 kmutex_t *mml_targ, *mml_repl;
10957 10777
10958 10778 /*
10959 10779 * no need to do the dance around szc as in sfmmu_mlist_enter()
10960 10780 * because this routine is only called by hat_page_relocate() and all
10961 10781 * targ and repl pages are already locked EXCL so szc can't change.
10962 10782 */
10963 10783
10964 10784 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10965 10785 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10966 10786
10967 10787 if (mml_targ == mml_repl) {
10968 10788 *low = mml_targ;
10969 10789 *high = NULL;
10970 10790 } else {
10971 10791 if (mml_targ < mml_repl) {
10972 10792 *low = mml_targ;
10973 10793 *high = mml_repl;
10974 10794 } else {
10975 10795 *low = mml_repl;
10976 10796 *high = mml_targ;
10977 10797 }
10978 10798 }
10979 10799
10980 10800 mutex_enter(*low);
10981 10801 if (*high)
10982 10802 mutex_enter(*high);
10983 10803 }
10984 10804
10985 10805 static void
10986 10806 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10987 10807 {
10988 10808 if (high)
10989 10809 mutex_exit(high);
10990 10810 mutex_exit(low);
10991 10811 }
10992 10812
10993 10813 static hatlock_t *
10994 10814 sfmmu_hat_enter(sfmmu_t *sfmmup)
10995 10815 {
10996 10816 hatlock_t *hatlockp;
10997 10817
10998 10818 if (sfmmup != ksfmmup) {
10999 10819 hatlockp = TSB_HASH(sfmmup);
11000 10820 mutex_enter(HATLOCK_MUTEXP(hatlockp));
11001 10821 return (hatlockp);
11002 10822 }
11003 10823 return (NULL);
11004 10824 }
11005 10825
11006 10826 static hatlock_t *
11007 10827 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
11008 10828 {
11009 10829 hatlock_t *hatlockp;
11010 10830
11011 10831 if (sfmmup != ksfmmup) {
11012 10832 hatlockp = TSB_HASH(sfmmup);
11013 10833 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
11014 10834 return (NULL);
11015 10835 return (hatlockp);
11016 10836 }
11017 10837 return (NULL);
11018 10838 }
11019 10839
11020 10840 static void
11021 10841 sfmmu_hat_exit(hatlock_t *hatlockp)
11022 10842 {
11023 10843 if (hatlockp != NULL)
11024 10844 mutex_exit(HATLOCK_MUTEXP(hatlockp));
11025 10845 }
11026 10846
11027 10847 static void
11028 10848 sfmmu_hat_lock_all(void)
11029 10849 {
11030 10850 int i;
11031 10851 for (i = 0; i < SFMMU_NUM_LOCK; i++)
11032 10852 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
11033 10853 }
11034 10854
11035 10855 static void
11036 10856 sfmmu_hat_unlock_all(void)
11037 10857 {
11038 10858 int i;
11039 10859 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
11040 10860 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
11041 10861 }
11042 10862
11043 10863 int
11044 10864 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
11045 10865 {
11046 10866 ASSERT(sfmmup != ksfmmup);
11047 10867 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
11048 10868 }
11049 10869
11050 10870 /*
11051 10871 * Locking primitives to provide consistency between ISM unmap
11052 10872 * and other operations. Since ISM unmap can take a long time, we
11053 10873 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
11054 10874 * contention on the hatlock buckets while ISM segments are being
11055 10875 * unmapped. The tradeoff is that the flags don't prevent priority
11056 10876 * inversion from occurring, so we must request kernel priority in
11057 10877 * case we have to sleep to keep from getting buried while holding
11058 10878 * the HAT_ISMBUSY flag set, which in turn could block other kernel
11059 10879 * threads from running (for example, in sfmmu_uvatopfn()).
11060 10880 */
11061 10881 static void
11062 10882 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
11063 10883 {
11064 10884 hatlock_t *hatlockp;
11065 10885
11066 10886 THREAD_KPRI_REQUEST();
11067 10887 if (!hatlock_held)
11068 10888 hatlockp = sfmmu_hat_enter(sfmmup);
11069 10889 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
11070 10890 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11071 10891 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
11072 10892 if (!hatlock_held)
11073 10893 sfmmu_hat_exit(hatlockp);
11074 10894 }
11075 10895
11076 10896 static void
11077 10897 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
11078 10898 {
11079 10899 hatlock_t *hatlockp;
11080 10900
11081 10901 if (!hatlock_held)
11082 10902 hatlockp = sfmmu_hat_enter(sfmmup);
11083 10903 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
11084 10904 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
11085 10905 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11086 10906 if (!hatlock_held)
11087 10907 sfmmu_hat_exit(hatlockp);
11088 10908 THREAD_KPRI_RELEASE();
11089 10909 }
11090 10910
11091 10911 /*
11092 10912 *
11093 10913 * Algorithm:
11094 10914 *
11095 10915 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
11096 10916 * hblks.
11097 10917 *
11098 10918 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
11099 10919 *
11100 10920 * (a) try to return an hblk from reserve pool of free hblks;
11101 10921 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
11102 10922 * and return hblk_reserve.
11103 10923 *
11104 10924 * (3) call kmem_cache_alloc() to allocate hblk;
11105 10925 *
11106 10926 * (a) if hblk_reserve_lock is held by the current thread,
11107 10927 * atomically replace hblk_reserve by the hblk that is
11108 10928 * returned by kmem_cache_alloc; release hblk_reserve_lock
11109 10929 * and call kmem_cache_alloc() again.
11110 10930 * (b) if reserve pool is not full, add the hblk that is
11111 10931 * returned by kmem_cache_alloc to reserve pool and
11112 10932 * call kmem_cache_alloc again.
11113 10933 *
11114 10934 */
11115 10935 static struct hme_blk *
11116 10936 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
11117 10937 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
11118 10938 uint_t flags, uint_t rid)
11119 10939 {
11120 10940 struct hme_blk *hmeblkp = NULL;
11121 10941 struct hme_blk *newhblkp;
11122 10942 struct hme_blk *shw_hblkp = NULL;
11123 10943 struct kmem_cache *sfmmu_cache = NULL;
11124 10944 uint64_t hblkpa;
11125 10945 ulong_t index;
11126 10946 uint_t owner; /* set to 1 if using hblk_reserve */
11127 10947 uint_t forcefree;
11128 10948 int sleep;
11129 10949 sf_srd_t *srdp;
11130 10950 sf_region_t *rgnp;
11131 10951
11132 10952 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11133 10953 ASSERT(hblktag.htag_rid == rid);
11134 10954 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
11135 10955 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11136 10956 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
11137 10957
11138 10958 /*
11139 10959 * If segkmem is not created yet, allocate from static hmeblks
11140 10960 * created at the end of startup_modules(). See the block comment
11141 10961 * in startup_modules() describing how we estimate the number of
11142 10962 * static hmeblks that will be needed during re-map.
11143 10963 */
11144 10964 if (!hblk_alloc_dynamic) {
11145 10965
11146 10966 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11147 10967
11148 10968 if (size == TTE8K) {
11149 10969 index = nucleus_hblk8.index;
11150 10970 if (index >= nucleus_hblk8.len) {
11151 10971 /*
11152 10972 * If we panic here, see startup_modules() to
11153 10973 * make sure that we are calculating the
11154 10974 * number of hblk8's that we need correctly.
11155 10975 */
11156 10976 prom_panic("no nucleus hblk8 to allocate");
11157 10977 }
11158 10978 hmeblkp =
11159 10979 (struct hme_blk *)&nucleus_hblk8.list[index];
11160 10980 nucleus_hblk8.index++;
11161 10981 SFMMU_STAT(sf_hblk8_nalloc);
11162 10982 } else {
11163 10983 index = nucleus_hblk1.index;
11164 10984 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
11165 10985 /*
11166 10986 * If we panic here, see startup_modules().
11167 10987 * Most likely you need to update the
11168 10988 * calculation of the number of hblk1 elements
11169 10989 * that the kernel needs to boot.
11170 10990 */
11171 10991 prom_panic("no nucleus hblk1 to allocate");
11172 10992 }
11173 10993 hmeblkp =
11174 10994 (struct hme_blk *)&nucleus_hblk1.list[index];
11175 10995 nucleus_hblk1.index++;
11176 10996 SFMMU_STAT(sf_hblk1_nalloc);
11177 10997 }
11178 10998
11179 10999 goto hblk_init;
11180 11000 }
11181 11001
11182 11002 SFMMU_HASH_UNLOCK(hmebp);
11183 11003
11184 11004 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
11185 11005 if (mmu_page_sizes == max_mmu_page_sizes) {
11186 11006 if (size < TTE256M)
11187 11007 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11188 11008 size, flags);
11189 11009 } else {
11190 11010 if (size < TTE4M)
11191 11011 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11192 11012 size, flags);
11193 11013 }
11194 11014 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11195 11015 /*
11196 11016 * Shared hmes use per region bitmaps in rgn_hmeflag
11197 11017 * rather than shadow hmeblks to keep track of the
11198 11018 * mapping sizes which have been allocated for the region.
11199 11019 * Here we cleanup old invalid hmeblks with this rid,
11200 11020 * which may be left around by pageunload().
11201 11021 */
11202 11022 int ttesz;
11203 11023 caddr_t va;
11204 11024 caddr_t eva = vaddr + TTEBYTES(size);
11205 11025
11206 11026 ASSERT(sfmmup != KHATID);
11207 11027
11208 11028 srdp = sfmmup->sfmmu_srdp;
11209 11029 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11210 11030 rgnp = srdp->srd_hmergnp[rid];
11211 11031 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11212 11032 ASSERT(rgnp->rgn_refcnt != 0);
11213 11033 ASSERT(size <= rgnp->rgn_pgszc);
11214 11034
11215 11035 ttesz = HBLK_MIN_TTESZ;
11216 11036 do {
11217 11037 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11218 11038 continue;
11219 11039 }
11220 11040
11221 11041 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11222 11042 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11223 11043 } else if (ttesz < size) {
11224 11044 for (va = vaddr; va < eva;
11225 11045 va += TTEBYTES(ttesz)) {
11226 11046 sfmmu_cleanup_rhblk(srdp, va, rid,
11227 11047 ttesz);
11228 11048 }
11229 11049 }
11230 11050 } while (++ttesz <= rgnp->rgn_pgszc);
11231 11051 }
11232 11052
11233 11053 fill_hblk:
11234 11054 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11235 11055
11236 11056 if (owner && size == TTE8K) {
11237 11057
11238 11058 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11239 11059 /*
11240 11060 * We are really in a tight spot. We already own
11241 11061 * hblk_reserve and we need another hblk. In anticipation
11242 11062 * of this kind of scenario, we specifically set aside
11243 11063 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11244 11064 * by owner of hblk_reserve.
11245 11065 */
11246 11066 SFMMU_STAT(sf_hblk_recurse_cnt);
11247 11067
11248 11068 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11249 11069 panic("sfmmu_hblk_alloc: reserve list is empty");
11250 11070
11251 11071 goto hblk_verify;
11252 11072 }
11253 11073
11254 11074 ASSERT(!owner);
11255 11075
11256 11076 if ((flags & HAT_NO_KALLOC) == 0) {
11257 11077
11258 11078 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11259 11079 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11260 11080
11261 11081 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11262 11082 hmeblkp = sfmmu_hblk_steal(size);
11263 11083 } else {
11264 11084 /*
11265 11085 * if we are the owner of hblk_reserve,
11266 11086 * swap hblk_reserve with hmeblkp and
11267 11087 * start a fresh life. Hope things go
11268 11088 * better this time.
11269 11089 */
11270 11090 if (hblk_reserve_thread == curthread) {
11271 11091 ASSERT(sfmmu_cache == sfmmu8_cache);
11272 11092 sfmmu_hblk_swap(hmeblkp);
11273 11093 hblk_reserve_thread = NULL;
11274 11094 mutex_exit(&hblk_reserve_lock);
11275 11095 goto fill_hblk;
11276 11096 }
11277 11097 /*
11278 11098 * let's donate this hblk to our reserve list if
11279 11099 * we are not mapping kernel range
11280 11100 */
11281 11101 if (size == TTE8K && sfmmup != KHATID) {
11282 11102 if (sfmmu_put_free_hblk(hmeblkp, 0))
11283 11103 goto fill_hblk;
11284 11104 }
11285 11105 }
11286 11106 } else {
11287 11107 /*
11288 11108 * We are here to map the slab in sfmmu8_cache; let's
11289 11109 * check if we could tap our reserve list; if successful,
11290 11110 * this will avoid the pain of going thru sfmmu_hblk_swap
11291 11111 */
11292 11112 SFMMU_STAT(sf_hblk_slab_cnt);
11293 11113 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11294 11114 /*
11295 11115 * let's start hblk_reserve dance
11296 11116 */
11297 11117 SFMMU_STAT(sf_hblk_reserve_cnt);
11298 11118 owner = 1;
11299 11119 mutex_enter(&hblk_reserve_lock);
11300 11120 hmeblkp = HBLK_RESERVE;
11301 11121 hblk_reserve_thread = curthread;
11302 11122 }
11303 11123 }
11304 11124
11305 11125 hblk_verify:
11306 11126 ASSERT(hmeblkp != NULL);
11307 11127 set_hblk_sz(hmeblkp, size);
11308 11128 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11309 11129 SFMMU_HASH_LOCK(hmebp);
11310 11130 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11311 11131 if (newhblkp != NULL) {
11312 11132 SFMMU_HASH_UNLOCK(hmebp);
11313 11133 if (hmeblkp != HBLK_RESERVE) {
11314 11134 /*
11315 11135 * This is really tricky!
11316 11136 *
11317 11137 * vmem_alloc(vmem_seg_arena)
11318 11138 * vmem_alloc(vmem_internal_arena)
11319 11139 * segkmem_alloc(heap_arena)
11320 11140 * vmem_alloc(heap_arena)
11321 11141 * page_create()
11322 11142 * hat_memload()
11323 11143 * kmem_cache_free()
11324 11144 * kmem_cache_alloc()
11325 11145 * kmem_slab_create()
11326 11146 * vmem_alloc(kmem_internal_arena)
11327 11147 * segkmem_alloc(heap_arena)
11328 11148 * vmem_alloc(heap_arena)
11329 11149 * page_create()
11330 11150 * hat_memload()
11331 11151 * kmem_cache_free()
11332 11152 * ...
11333 11153 *
11334 11154 * Thus, hat_memload() could call kmem_cache_free
11335 11155 * for enough number of times that we could easily
11336 11156 * hit the bottom of the stack or run out of reserve
11337 11157 * list of vmem_seg structs. So, we must donate
11338 11158 * this hblk to reserve list if it's allocated
11339 11159 * from sfmmu8_cache *and* mapping kernel range.
11340 11160 * We don't need to worry about freeing hmeblk1's
11341 11161 * to kmem since they don't map any kmem slabs.
11342 11162 *
11343 11163 * Note: When segkmem supports largepages, we must
11344 11164 * free hmeblk1's to reserve list as well.
11345 11165 */
11346 11166 forcefree = (sfmmup == KHATID) ? 1 : 0;
11347 11167 if (size == TTE8K &&
11348 11168 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11349 11169 goto re_verify;
11350 11170 }
11351 11171 ASSERT(sfmmup != KHATID);
11352 11172 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11353 11173 } else {
11354 11174 /*
11355 11175 * Hey! we don't need hblk_reserve any more.
11356 11176 */
11357 11177 ASSERT(owner);
11358 11178 hblk_reserve_thread = NULL;
11359 11179 mutex_exit(&hblk_reserve_lock);
11360 11180 owner = 0;
11361 11181 }
11362 11182 re_verify:
11363 11183 /*
11364 11184 * let's check if the goodies are still present
11365 11185 */
11366 11186 SFMMU_HASH_LOCK(hmebp);
11367 11187 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11368 11188 if (newhblkp != NULL) {
11369 11189 /*
11370 11190 * return newhblkp if it's not hblk_reserve;
11371 11191 * if newhblkp is hblk_reserve, return it
11372 11192 * _only if_ we are the owner of hblk_reserve.
11373 11193 */
11374 11194 if (newhblkp != HBLK_RESERVE || owner) {
11375 11195 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11376 11196 newhblkp->hblk_shared);
11377 11197 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11378 11198 !newhblkp->hblk_shared);
11379 11199 return (newhblkp);
11380 11200 } else {
11381 11201 /*
11382 11202 * we just hit hblk_reserve in the hash and
11383 11203 * we are not the owner of that;
11384 11204 *
11385 11205 * block until hblk_reserve_thread completes
11386 11206 * swapping hblk_reserve and try the dance
11387 11207 * once again.
11388 11208 */
11389 11209 SFMMU_HASH_UNLOCK(hmebp);
11390 11210 mutex_enter(&hblk_reserve_lock);
11391 11211 mutex_exit(&hblk_reserve_lock);
11392 11212 SFMMU_STAT(sf_hblk_reserve_hit);
11393 11213 goto fill_hblk;
11394 11214 }
11395 11215 } else {
11396 11216 /*
11397 11217 * it's no more! try the dance once again.
11398 11218 */
11399 11219 SFMMU_HASH_UNLOCK(hmebp);
11400 11220 goto fill_hblk;
11401 11221 }
11402 11222 }
11403 11223
11404 11224 hblk_init:
11405 11225 if (SFMMU_IS_SHMERID_VALID(rid)) {
11406 11226 uint16_t tteflag = 0x1 <<
11407 11227 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11408 11228
11409 11229 if (!(rgnp->rgn_hmeflags & tteflag)) {
11410 11230 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11411 11231 }
11412 11232 hmeblkp->hblk_shared = 1;
11413 11233 } else {
11414 11234 hmeblkp->hblk_shared = 0;
11415 11235 }
11416 11236 set_hblk_sz(hmeblkp, size);
11417 11237 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11418 11238 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11419 11239 hmeblkp->hblk_tag = hblktag;
11420 11240 hmeblkp->hblk_shadow = shw_hblkp;
11421 11241 hblkpa = hmeblkp->hblk_nextpa;
11422 11242 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11423 11243
11424 11244 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11425 11245 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11426 11246 ASSERT(hmeblkp->hblk_hmecnt == 0);
11427 11247 ASSERT(hmeblkp->hblk_vcnt == 0);
11428 11248 ASSERT(hmeblkp->hblk_lckcnt == 0);
11429 11249 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11430 11250 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11431 11251 return (hmeblkp);
11432 11252 }
11433 11253
11434 11254 /*
11435 11255 * This function cleans up the hme_blk and returns it to the free list.
11436 11256 */
11437 11257 /* ARGSUSED */
11438 11258 static void
11439 11259 sfmmu_hblk_free(struct hme_blk **listp)
11440 11260 {
11441 11261 struct hme_blk *hmeblkp, *next_hmeblkp;
11442 11262 int size;
11443 11263 uint_t critical;
11444 11264 uint64_t hblkpa;
11445 11265
11446 11266 ASSERT(*listp != NULL);
11447 11267
11448 11268 hmeblkp = *listp;
11449 11269 while (hmeblkp != NULL) {
11450 11270 next_hmeblkp = hmeblkp->hblk_next;
11451 11271 ASSERT(!hmeblkp->hblk_hmecnt);
11452 11272 ASSERT(!hmeblkp->hblk_vcnt);
11453 11273 ASSERT(!hmeblkp->hblk_lckcnt);
11454 11274 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11455 11275 ASSERT(hmeblkp->hblk_shared == 0);
11456 11276 ASSERT(hmeblkp->hblk_shw_bit == 0);
11457 11277 ASSERT(hmeblkp->hblk_shadow == NULL);
11458 11278
11459 11279 hblkpa = va_to_pa((caddr_t)hmeblkp);
11460 11280 ASSERT(hblkpa != (uint64_t)-1);
11461 11281 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11462 11282
11463 11283 size = get_hblk_ttesz(hmeblkp);
11464 11284 hmeblkp->hblk_next = NULL;
11465 11285 hmeblkp->hblk_nextpa = hblkpa;
11466 11286
11467 11287 if (hmeblkp->hblk_nuc_bit == 0) {
11468 11288
11469 11289 if (size != TTE8K ||
11470 11290 !sfmmu_put_free_hblk(hmeblkp, critical))
11471 11291 kmem_cache_free(get_hblk_cache(hmeblkp),
11472 11292 hmeblkp);
11473 11293 }
11474 11294 hmeblkp = next_hmeblkp;
11475 11295 }
11476 11296 }
11477 11297
11478 11298 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11479 11299 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11480 11300
11481 11301 static uint_t sfmmu_hblk_steal_twice;
11482 11302 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11483 11303
11484 11304 /*
11485 11305 * Steal a hmeblk from user or kernel hme hash lists.
11486 11306 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11487 11307 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11488 11308 * tap into critical reserve of freehblkp.
11489 11309 * Note: We remain looping in this routine until we find one.
11490 11310 */
11491 11311 static struct hme_blk *
11492 11312 sfmmu_hblk_steal(int size)
11493 11313 {
11494 11314 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11495 11315 struct hmehash_bucket *hmebp;
11496 11316 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11497 11317 uint64_t hblkpa;
11498 11318 int i;
11499 11319 uint_t loop_cnt = 0, critical;
11500 11320
11501 11321 for (;;) {
11502 11322 /* Check cpu hblk pending queues */
11503 11323 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11504 11324 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11505 11325 ASSERT(hmeblkp->hblk_hmecnt == 0);
11506 11326 ASSERT(hmeblkp->hblk_vcnt == 0);
11507 11327 return (hmeblkp);
11508 11328 }
11509 11329
11510 11330 if (size == TTE8K) {
11511 11331 critical =
11512 11332 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11513 11333 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11514 11334 return (hmeblkp);
11515 11335 }
11516 11336
11517 11337 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11518 11338 uhmehash_steal_hand;
11519 11339 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11520 11340
11521 11341 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11522 11342 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11523 11343 SFMMU_HASH_LOCK(hmebp);
11524 11344 hmeblkp = hmebp->hmeblkp;
11525 11345 hblkpa = hmebp->hmeh_nextpa;
11526 11346 pr_hblk = NULL;
11527 11347 while (hmeblkp) {
11528 11348 /*
11529 11349 * check if it is a hmeblk that is not locked
11530 11350 * and not shared. skip shadow hmeblks with
11531 11351 * shadow_mask set i.e valid count non zero.
11532 11352 */
11533 11353 if ((get_hblk_ttesz(hmeblkp) == size) &&
11534 11354 (hmeblkp->hblk_shw_bit == 0 ||
11535 11355 hmeblkp->hblk_vcnt == 0) &&
11536 11356 (hmeblkp->hblk_lckcnt == 0)) {
11537 11357 /*
11538 11358 * there is a high probability that we
11539 11359 * will find a free one. search some
11540 11360 * buckets for a free hmeblk initially
11541 11361 * before unloading a valid hmeblk.
11542 11362 */
11543 11363 if ((hmeblkp->hblk_vcnt == 0 &&
11544 11364 hmeblkp->hblk_hmecnt == 0) || (i >=
11545 11365 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11546 11366 if (sfmmu_steal_this_hblk(hmebp,
11547 11367 hmeblkp, hblkpa, pr_hblk)) {
11548 11368 /*
11549 11369 * Hblk is unloaded
11550 11370 * successfully
11551 11371 */
11552 11372 break;
11553 11373 }
11554 11374 }
11555 11375 }
11556 11376 pr_hblk = hmeblkp;
11557 11377 hblkpa = hmeblkp->hblk_nextpa;
11558 11378 hmeblkp = hmeblkp->hblk_next;
11559 11379 }
11560 11380
11561 11381 SFMMU_HASH_UNLOCK(hmebp);
11562 11382 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11563 11383 hmebp = uhme_hash;
11564 11384 }
11565 11385 uhmehash_steal_hand = hmebp;
11566 11386
11567 11387 if (hmeblkp != NULL)
11568 11388 break;
11569 11389
11570 11390 /*
11571 11391 * in the worst case, look for a free one in the kernel
11572 11392 * hash table.
11573 11393 */
11574 11394 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11575 11395 SFMMU_HASH_LOCK(hmebp);
11576 11396 hmeblkp = hmebp->hmeblkp;
11577 11397 hblkpa = hmebp->hmeh_nextpa;
11578 11398 pr_hblk = NULL;
11579 11399 while (hmeblkp) {
11580 11400 /*
11581 11401 * check if it is free hmeblk
11582 11402 */
11583 11403 if ((get_hblk_ttesz(hmeblkp) == size) &&
11584 11404 (hmeblkp->hblk_lckcnt == 0) &&
11585 11405 (hmeblkp->hblk_vcnt == 0) &&
11586 11406 (hmeblkp->hblk_hmecnt == 0)) {
11587 11407 if (sfmmu_steal_this_hblk(hmebp,
11588 11408 hmeblkp, hblkpa, pr_hblk)) {
11589 11409 break;
11590 11410 } else {
11591 11411 /*
11592 11412 * Cannot fail since we have
11593 11413 * hash lock.
11594 11414 */
11595 11415 panic("fail to steal?");
11596 11416 }
11597 11417 }
11598 11418
11599 11419 pr_hblk = hmeblkp;
11600 11420 hblkpa = hmeblkp->hblk_nextpa;
11601 11421 hmeblkp = hmeblkp->hblk_next;
11602 11422 }
11603 11423
11604 11424 SFMMU_HASH_UNLOCK(hmebp);
11605 11425 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11606 11426 hmebp = khme_hash;
11607 11427 }
11608 11428
11609 11429 if (hmeblkp != NULL)
11610 11430 break;
11611 11431 sfmmu_hblk_steal_twice++;
11612 11432 }
11613 11433 return (hmeblkp);
11614 11434 }
11615 11435
11616 11436 /*
11617 11437 * This routine does real work to prepare a hblk to be "stolen" by
11618 11438 * unloading the mappings, updating shadow counts ....
11619 11439 * It returns 1 if the block is ready to be reused (stolen), or 0
11620 11440 * means the block cannot be stolen yet- pageunload is still working
11621 11441 * on this hblk.
11622 11442 */
11623 11443 static int
11624 11444 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11625 11445 uint64_t hblkpa, struct hme_blk *pr_hblk)
11626 11446 {
11627 11447 int shw_size, vshift;
11628 11448 struct hme_blk *shw_hblkp;
11629 11449 caddr_t vaddr;
11630 11450 uint_t shw_mask, newshw_mask;
11631 11451 struct hme_blk *list = NULL;
11632 11452
11633 11453 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11634 11454
11635 11455 /*
11636 11456 * check if the hmeblk is free, unload if necessary
11637 11457 */
11638 11458 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11639 11459 sfmmu_t *sfmmup;
11640 11460 demap_range_t dmr;
11641 11461
11642 11462 sfmmup = hblktosfmmu(hmeblkp);
11643 11463 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11644 11464 return (0);
11645 11465 }
11646 11466 DEMAP_RANGE_INIT(sfmmup, &dmr);
11647 11467 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11648 11468 (caddr_t)get_hblk_base(hmeblkp),
11649 11469 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11650 11470 DEMAP_RANGE_FLUSH(&dmr);
11651 11471 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11652 11472 /*
11653 11473 * Pageunload is working on the same hblk.
11654 11474 */
11655 11475 return (0);
11656 11476 }
11657 11477
11658 11478 sfmmu_hblk_steal_unload_count++;
11659 11479 }
11660 11480
11661 11481 ASSERT(hmeblkp->hblk_lckcnt == 0);
11662 11482 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11663 11483
11664 11484 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11665 11485 hmeblkp->hblk_nextpa = hblkpa;
11666 11486
11667 11487 shw_hblkp = hmeblkp->hblk_shadow;
11668 11488 if (shw_hblkp) {
11669 11489 ASSERT(!hmeblkp->hblk_shared);
11670 11490 shw_size = get_hblk_ttesz(shw_hblkp);
11671 11491 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11672 11492 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11673 11493 ASSERT(vshift < 8);
11674 11494 /*
11675 11495 * Atomically clear shadow mask bit
11676 11496 */
11677 11497 do {
11678 11498 shw_mask = shw_hblkp->hblk_shw_mask;
11679 11499 ASSERT(shw_mask & (1 << vshift));
11680 11500 newshw_mask = shw_mask & ~(1 << vshift);
11681 11501 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11682 11502 shw_mask, newshw_mask);
11683 11503 } while (newshw_mask != shw_mask);
11684 11504 hmeblkp->hblk_shadow = NULL;
11685 11505 }
11686 11506
11687 11507 /*
11688 11508 * remove shadow bit if we are stealing an unused shadow hmeblk.
11689 11509 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11690 11510 * we are indeed allocating a shadow hmeblk.
11691 11511 */
11692 11512 hmeblkp->hblk_shw_bit = 0;
11693 11513
11694 11514 if (hmeblkp->hblk_shared) {
11695 11515 sf_srd_t *srdp;
11696 11516 sf_region_t *rgnp;
11697 11517 uint_t rid;
11698 11518
11699 11519 srdp = hblktosrd(hmeblkp);
11700 11520 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11701 11521 rid = hmeblkp->hblk_tag.htag_rid;
11702 11522 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11703 11523 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11704 11524 rgnp = srdp->srd_hmergnp[rid];
11705 11525 ASSERT(rgnp != NULL);
11706 11526 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11707 11527 hmeblkp->hblk_shared = 0;
11708 11528 }
11709 11529
11710 11530 sfmmu_hblk_steal_count++;
11711 11531 SFMMU_STAT(sf_steal_count);
11712 11532
11713 11533 return (1);
11714 11534 }
11715 11535
11716 11536 struct hme_blk *
11717 11537 sfmmu_hmetohblk(struct sf_hment *sfhme)
11718 11538 {
11719 11539 struct hme_blk *hmeblkp;
11720 11540 struct sf_hment *sfhme0;
11721 11541 struct hme_blk *hblk_dummy = 0;
11722 11542
11723 11543 /*
11724 11544 * No dummy sf_hments, please.
11725 11545 */
11726 11546 ASSERT(sfhme->hme_tte.ll != 0);
11727 11547
11728 11548 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11729 11549 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11730 11550 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11731 11551
11732 11552 return (hmeblkp);
11733 11553 }
11734 11554
11735 11555 /*
11736 11556 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11737 11557 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11738 11558 * KM_SLEEP allocation.
11739 11559 *
11740 11560 * Return 0 on success, -1 otherwise.
11741 11561 */
11742 11562 static void
11743 11563 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11744 11564 {
11745 11565 struct tsb_info *tsbinfop, *next;
11746 11566 tsb_replace_rc_t rc;
11747 11567 boolean_t gotfirst = B_FALSE;
11748 11568
11749 11569 ASSERT(sfmmup != ksfmmup);
11750 11570 ASSERT(sfmmu_hat_lock_held(sfmmup));
11751 11571
11752 11572 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11753 11573 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11754 11574 }
11755 11575
11756 11576 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11757 11577 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11758 11578 } else {
11759 11579 return;
11760 11580 }
11761 11581
11762 11582 ASSERT(sfmmup->sfmmu_tsb != NULL);
11763 11583
11764 11584 /*
11765 11585 * Loop over all tsbinfo's replacing them with ones that actually have
11766 11586 * a TSB. If any of the replacements ever fail, bail out of the loop.
11767 11587 */
11768 11588 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11769 11589 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11770 11590 next = tsbinfop->tsb_next;
11771 11591 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11772 11592 hatlockp, TSB_SWAPIN);
11773 11593 if (rc != TSB_SUCCESS) {
11774 11594 break;
11775 11595 }
11776 11596 gotfirst = B_TRUE;
11777 11597 }
11778 11598
11779 11599 switch (rc) {
11780 11600 case TSB_SUCCESS:
11781 11601 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11782 11602 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11783 11603 return;
11784 11604 case TSB_LOSTRACE:
11785 11605 break;
11786 11606 case TSB_ALLOCFAIL:
11787 11607 break;
11788 11608 default:
11789 11609 panic("sfmmu_replace_tsb returned unrecognized failure code "
11790 11610 "%d", rc);
11791 11611 }
11792 11612
11793 11613 /*
11794 11614 * In this case, we failed to get one of our TSBs. If we failed to
11795 11615 * get the first TSB, get one of minimum size (8KB). Walk the list
11796 11616 * and throw away the tsbinfos, starting where the allocation failed;
11797 11617 * we can get by with just one TSB as long as we don't leave the
11798 11618 * SWAPPED tsbinfo structures lying around.
11799 11619 */
11800 11620 tsbinfop = sfmmup->sfmmu_tsb;
11801 11621 next = tsbinfop->tsb_next;
11802 11622 tsbinfop->tsb_next = NULL;
11803 11623
11804 11624 sfmmu_hat_exit(hatlockp);
11805 11625 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11806 11626 next = tsbinfop->tsb_next;
11807 11627 sfmmu_tsbinfo_free(tsbinfop);
11808 11628 }
11809 11629 hatlockp = sfmmu_hat_enter(sfmmup);
11810 11630
11811 11631 /*
11812 11632 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11813 11633 * pages.
11814 11634 */
11815 11635 if (!gotfirst) {
11816 11636 tsbinfop = sfmmup->sfmmu_tsb;
11817 11637 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11818 11638 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11819 11639 ASSERT(rc == TSB_SUCCESS);
11820 11640 }
11821 11641
11822 11642 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11823 11643 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11824 11644 }
11825 11645
11826 11646 static int
11827 11647 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11828 11648 {
11829 11649 ulong_t bix = 0;
11830 11650 uint_t rid;
11831 11651 sf_region_t *rgnp;
11832 11652
11833 11653 ASSERT(srdp != NULL);
11834 11654 ASSERT(srdp->srd_refcnt != 0);
11835 11655
11836 11656 w <<= BT_ULSHIFT;
11837 11657 while (bmw) {
11838 11658 if (!(bmw & 0x1)) {
11839 11659 bix++;
11840 11660 bmw >>= 1;
11841 11661 continue;
11842 11662 }
11843 11663 rid = w | bix;
11844 11664 rgnp = srdp->srd_hmergnp[rid];
11845 11665 ASSERT(rgnp->rgn_refcnt > 0);
11846 11666 ASSERT(rgnp->rgn_id == rid);
11847 11667 if (addr < rgnp->rgn_saddr ||
11848 11668 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11849 11669 bix++;
11850 11670 bmw >>= 1;
11851 11671 } else {
11852 11672 return (1);
11853 11673 }
11854 11674 }
11855 11675 return (0);
11856 11676 }
11857 11677
11858 11678 /*
11859 11679 * Handle exceptions for low level tsb_handler.
11860 11680 *
11861 11681 * There are many scenarios that could land us here:
11862 11682 *
11863 11683 * If the context is invalid we land here. The context can be invalid
11864 11684 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11865 11685 * perform a wrap around operation in order to allocate a new context.
11866 11686 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11867 11687 * TSBs configuration is changeing for this process and we are forced into
11868 11688 * here to do a syncronization operation. If the context is valid we can
11869 11689 * be here from window trap hanlder. In this case just call trap to handle
11870 11690 * the fault.
11871 11691 *
11872 11692 * Note that the process will run in INVALID_CONTEXT before
11873 11693 * faulting into here and subsequently loading the MMU registers
11874 11694 * (including the TSB base register) associated with this process.
11875 11695 * For this reason, the trap handlers must all test for
11876 11696 * INVALID_CONTEXT before attempting to access any registers other
11877 11697 * than the context registers.
11878 11698 */
11879 11699 void
11880 11700 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11881 11701 {
11882 11702 sfmmu_t *sfmmup, *shsfmmup;
11883 11703 uint_t ctxtype;
11884 11704 klwp_id_t lwp;
11885 11705 char lwp_save_state;
11886 11706 hatlock_t *hatlockp, *shatlockp;
11887 11707 struct tsb_info *tsbinfop;
11888 11708 struct tsbmiss *tsbmp;
11889 11709 sf_scd_t *scdp;
11890 11710
11891 11711 SFMMU_STAT(sf_tsb_exceptions);
11892 11712 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11893 11713 sfmmup = astosfmmu(curthread->t_procp->p_as);
11894 11714 /*
11895 11715 * note that in sun4u, tagacces register contains ctxnum
11896 11716 * while sun4v passes ctxtype in the tagaccess register.
11897 11717 */
11898 11718 ctxtype = tagaccess & TAGACC_CTX_MASK;
11899 11719
11900 11720 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11901 11721 ASSERT(sfmmup->sfmmu_ismhat == 0);
11902 11722 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11903 11723 ctxtype == INVALID_CONTEXT);
11904 11724
11905 11725 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11906 11726 /*
11907 11727 * We may land here because shme bitmap and pagesize
11908 11728 * flags are updated lazily in tsbmiss area on other cpus.
11909 11729 * If we detect here that tsbmiss area is out of sync with
11910 11730 * sfmmu update it and retry the trapped instruction.
11911 11731 * Otherwise call trap().
11912 11732 */
11913 11733 int ret = 0;
11914 11734 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11915 11735 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11916 11736
11917 11737 /*
11918 11738 * Must set lwp state to LWP_SYS before
11919 11739 * trying to acquire any adaptive lock
11920 11740 */
11921 11741 lwp = ttolwp(curthread);
11922 11742 ASSERT(lwp);
11923 11743 lwp_save_state = lwp->lwp_state;
11924 11744 lwp->lwp_state = LWP_SYS;
11925 11745
11926 11746 hatlockp = sfmmu_hat_enter(sfmmup);
11927 11747 kpreempt_disable();
11928 11748 tsbmp = &tsbmiss_area[CPU->cpu_id];
11929 11749 ASSERT(sfmmup == tsbmp->usfmmup);
11930 11750 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11931 11751 ~tteflag_mask) ||
11932 11752 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11933 11753 ~tteflag_mask)) {
11934 11754 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11935 11755 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11936 11756 ret = 1;
11937 11757 }
11938 11758 if (sfmmup->sfmmu_srdp != NULL) {
11939 11759 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11940 11760 ulong_t *tm = tsbmp->shmermap;
11941 11761 ulong_t i;
11942 11762 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11943 11763 ulong_t d = tm[i] ^ sm[i];
11944 11764 if (d) {
11945 11765 if (d & sm[i]) {
11946 11766 if (!ret && sfmmu_is_rgnva(
11947 11767 sfmmup->sfmmu_srdp,
11948 11768 addr, i, d & sm[i])) {
11949 11769 ret = 1;
11950 11770 }
11951 11771 }
11952 11772 tm[i] = sm[i];
11953 11773 }
11954 11774 }
11955 11775 }
11956 11776 kpreempt_enable();
11957 11777 sfmmu_hat_exit(hatlockp);
11958 11778 lwp->lwp_state = lwp_save_state;
11959 11779 if (ret) {
11960 11780 return;
11961 11781 }
11962 11782 } else if (ctxtype == INVALID_CONTEXT) {
11963 11783 /*
11964 11784 * First, make sure we come out of here with a valid ctx,
11965 11785 * since if we don't get one we'll simply loop on the
11966 11786 * faulting instruction.
11967 11787 *
11968 11788 * If the ISM mappings are changing, the TSB is relocated,
11969 11789 * the process is swapped, the process is joining SCD or
11970 11790 * leaving SCD or shared regions we serialize behind the
11971 11791 * controlling thread with hat lock, sfmmu_flags and
11972 11792 * sfmmu_tsb_cv condition variable.
11973 11793 */
11974 11794
11975 11795 /*
11976 11796 * Must set lwp state to LWP_SYS before
11977 11797 * trying to acquire any adaptive lock
11978 11798 */
11979 11799 lwp = ttolwp(curthread);
11980 11800 ASSERT(lwp);
11981 11801 lwp_save_state = lwp->lwp_state;
11982 11802 lwp->lwp_state = LWP_SYS;
11983 11803
11984 11804 hatlockp = sfmmu_hat_enter(sfmmup);
11985 11805 retry:
11986 11806 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11987 11807 shsfmmup = scdp->scd_sfmmup;
11988 11808 ASSERT(shsfmmup != NULL);
11989 11809
11990 11810 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11991 11811 tsbinfop = tsbinfop->tsb_next) {
11992 11812 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11993 11813 /* drop the private hat lock */
11994 11814 sfmmu_hat_exit(hatlockp);
11995 11815 /* acquire the shared hat lock */
11996 11816 shatlockp = sfmmu_hat_enter(shsfmmup);
11997 11817 /*
11998 11818 * recheck to see if anything changed
11999 11819 * after we drop the private hat lock.
12000 11820 */
12001 11821 if (sfmmup->sfmmu_scdp == scdp &&
12002 11822 shsfmmup == scdp->scd_sfmmup) {
12003 11823 sfmmu_tsb_chk_reloc(shsfmmup,
12004 11824 shatlockp);
12005 11825 }
12006 11826 sfmmu_hat_exit(shatlockp);
12007 11827 hatlockp = sfmmu_hat_enter(sfmmup);
12008 11828 goto retry;
12009 11829 }
12010 11830 }
12011 11831 }
12012 11832
12013 11833 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
12014 11834 tsbinfop = tsbinfop->tsb_next) {
12015 11835 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
12016 11836 cv_wait(&sfmmup->sfmmu_tsb_cv,
12017 11837 HATLOCK_MUTEXP(hatlockp));
12018 11838 goto retry;
12019 11839 }
12020 11840 }
12021 11841
12022 11842 /*
12023 11843 * Wait for ISM maps to be updated.
12024 11844 */
12025 11845 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12026 11846 cv_wait(&sfmmup->sfmmu_tsb_cv,
12027 11847 HATLOCK_MUTEXP(hatlockp));
12028 11848 goto retry;
12029 11849 }
12030 11850
12031 11851 /* Is this process joining an SCD? */
12032 11852 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12033 11853 /*
12034 11854 * Flush private TSB and setup shared TSB.
12035 11855 * sfmmu_finish_join_scd() does not drop the
12036 11856 * hat lock.
12037 11857 */
12038 11858 sfmmu_finish_join_scd(sfmmup);
12039 11859 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
12040 11860 }
12041 11861
12042 11862 /*
12043 11863 * If we're swapping in, get TSB(s). Note that we must do
12044 11864 * this before we get a ctx or load the MMU state. Once
12045 11865 * we swap in we have to recheck to make sure the TSB(s) and
12046 11866 * ISM mappings didn't change while we slept.
12047 11867 */
12048 11868 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
12049 11869 sfmmu_tsb_swapin(sfmmup, hatlockp);
12050 11870 goto retry;
12051 11871 }
12052 11872
12053 11873 sfmmu_get_ctx(sfmmup);
12054 11874
12055 11875 sfmmu_hat_exit(hatlockp);
12056 11876 /*
12057 11877 * Must restore lwp_state if not calling
12058 11878 * trap() for further processing. Restore
12059 11879 * it anyway.
12060 11880 */
12061 11881 lwp->lwp_state = lwp_save_state;
12062 11882 return;
12063 11883 }
12064 11884 trap(rp, (caddr_t)tagaccess, traptype, 0);
12065 11885 }
12066 11886
12067 11887 static void
12068 11888 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
12069 11889 {
12070 11890 struct tsb_info *tp;
12071 11891
12072 11892 ASSERT(sfmmu_hat_lock_held(sfmmup));
12073 11893
12074 11894 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
12075 11895 if (tp->tsb_flags & TSB_RELOC_FLAG) {
12076 11896 cv_wait(&sfmmup->sfmmu_tsb_cv,
12077 11897 HATLOCK_MUTEXP(hatlockp));
12078 11898 break;
12079 11899 }
12080 11900 }
12081 11901 }
12082 11902
12083 11903 /*
12084 11904 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
12085 11905 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
12086 11906 * rather than spinning to avoid send mondo timeouts with
12087 11907 * interrupts enabled. When the lock is acquired it is immediately
12088 11908 * released and we return back to sfmmu_vatopfn just after
12089 11909 * the GET_TTE call.
12090 11910 */
12091 11911 void
12092 11912 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
12093 11913 {
12094 11914 struct page **pp;
12095 11915
12096 11916 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12097 11917 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12098 11918 }
12099 11919
12100 11920 /*
12101 11921 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
12102 11922 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
12103 11923 * cross traps which cannot be handled while spinning in the
12104 11924 * trap handlers. Simply enter and exit the kpr_suspendlock spin
12105 11925 * mutex, which is held by the holder of the suspend bit, and then
12106 11926 * retry the trapped instruction after unwinding.
12107 11927 */
12108 11928 /*ARGSUSED*/
12109 11929 void
12110 11930 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
12111 11931 {
12112 11932 ASSERT(curthread != kreloc_thread);
12113 11933 mutex_enter(&kpr_suspendlock);
12114 11934 mutex_exit(&kpr_suspendlock);
12115 11935 }
12116 11936
12117 11937 /*
12118 11938 * This routine could be optimized to reduce the number of xcalls by flushing
12119 11939 * the entire TLBs if region reference count is above some threshold but the
12120 11940 * tradeoff will depend on the size of the TLB. So for now flush the specific
12121 11941 * page a context at a time.
12122 11942 *
12123 11943 * If uselocks is 0 then it's called after all cpus were captured and all the
12124 11944 * hat locks were taken. In this case don't take the region lock by relying on
12125 11945 * the order of list region update operations in hat_join_region(),
12126 11946 * hat_leave_region() and hat_dup_region(). The ordering in those routines
12127 11947 * guarantees that list is always forward walkable and reaches active sfmmus
12128 11948 * regardless of where xc_attention() captures a cpu.
12129 11949 */
12130 11950 cpuset_t
12131 11951 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
12132 11952 struct hme_blk *hmeblkp, int uselocks)
12133 11953 {
12134 11954 sfmmu_t *sfmmup;
12135 11955 cpuset_t cpuset;
12136 11956 cpuset_t rcpuset;
12137 11957 hatlock_t *hatlockp;
12138 11958 uint_t rid = rgnp->rgn_id;
12139 11959 sf_rgn_link_t *rlink;
12140 11960 sf_scd_t *scdp;
12141 11961
12142 11962 ASSERT(hmeblkp->hblk_shared);
12143 11963 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
12144 11964 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
12145 11965
12146 11966 CPUSET_ZERO(rcpuset);
12147 11967 if (uselocks) {
12148 11968 mutex_enter(&rgnp->rgn_mutex);
12149 11969 }
12150 11970 sfmmup = rgnp->rgn_sfmmu_head;
12151 11971 while (sfmmup != NULL) {
12152 11972 if (uselocks) {
12153 11973 hatlockp = sfmmu_hat_enter(sfmmup);
12154 11974 }
12155 11975
12156 11976 /*
12157 11977 * When an SCD is created the SCD hat is linked on the sfmmu
12158 11978 * region lists for each hme region which is part of the
12159 11979 * SCD. If we find an SCD hat, when walking these lists,
12160 11980 * then we flush the shared TSBs, if we find a private hat,
12161 11981 * which is part of an SCD, but where the region
12162 11982 * is not part of the SCD then we flush the private TSBs.
12163 11983 */
12164 11984 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12165 11985 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12166 11986 scdp = sfmmup->sfmmu_scdp;
12167 11987 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
12168 11988 if (uselocks) {
12169 11989 sfmmu_hat_exit(hatlockp);
12170 11990 }
12171 11991 goto next;
12172 11992 }
12173 11993 }
12174 11994
12175 11995 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12176 11996
12177 11997 kpreempt_disable();
12178 11998 cpuset = sfmmup->sfmmu_cpusran;
12179 11999 CPUSET_AND(cpuset, cpu_ready_set);
12180 12000 CPUSET_DEL(cpuset, CPU->cpu_id);
12181 12001 SFMMU_XCALL_STATS(sfmmup);
12182 12002 xt_some(cpuset, vtag_flushpage_tl1,
12183 12003 (uint64_t)addr, (uint64_t)sfmmup);
12184 12004 vtag_flushpage(addr, (uint64_t)sfmmup);
12185 12005 if (uselocks) {
12186 12006 sfmmu_hat_exit(hatlockp);
12187 12007 }
12188 12008 kpreempt_enable();
12189 12009 CPUSET_OR(rcpuset, cpuset);
12190 12010
12191 12011 next:
12192 12012 /* LINTED: constant in conditional context */
12193 12013 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12194 12014 ASSERT(rlink != NULL);
12195 12015 sfmmup = rlink->next;
12196 12016 }
12197 12017 if (uselocks) {
12198 12018 mutex_exit(&rgnp->rgn_mutex);
12199 12019 }
12200 12020 return (rcpuset);
12201 12021 }
12202 12022
12203 12023 /*
12204 12024 * This routine takes an sfmmu pointer and the va for an adddress in an
12205 12025 * ISM region as input and returns the corresponding region id in ism_rid.
12206 12026 * The return value of 1 indicates that a region has been found and ism_rid
12207 12027 * is valid, otherwise 0 is returned.
12208 12028 */
12209 12029 static int
12210 12030 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12211 12031 {
12212 12032 ism_blk_t *ism_blkp;
12213 12033 int i;
12214 12034 ism_map_t *ism_map;
12215 12035 #ifdef DEBUG
12216 12036 struct hat *ism_hatid;
12217 12037 #endif
12218 12038 ASSERT(sfmmu_hat_lock_held(sfmmup));
12219 12039
12220 12040 ism_blkp = sfmmup->sfmmu_iblk;
12221 12041 while (ism_blkp != NULL) {
12222 12042 ism_map = ism_blkp->iblk_maps;
12223 12043 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12224 12044 if ((va >= ism_start(ism_map[i])) &&
12225 12045 (va < ism_end(ism_map[i]))) {
12226 12046
12227 12047 *ism_rid = ism_map[i].imap_rid;
12228 12048 #ifdef DEBUG
12229 12049 ism_hatid = ism_map[i].imap_ismhat;
12230 12050 ASSERT(ism_hatid == ism_sfmmup);
12231 12051 ASSERT(ism_hatid->sfmmu_ismhat);
12232 12052 #endif
12233 12053 return (1);
12234 12054 }
12235 12055 }
12236 12056 ism_blkp = ism_blkp->iblk_next;
12237 12057 }
12238 12058 return (0);
12239 12059 }
12240 12060
12241 12061 /*
12242 12062 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12243 12063 * This routine may be called with all cpu's captured. Therefore, the
12244 12064 * caller is responsible for holding all locks and disabling kernel
12245 12065 * preemption.
12246 12066 */
12247 12067 /* ARGSUSED */
12248 12068 static void
12249 12069 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12250 12070 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12251 12071 {
12252 12072 cpuset_t cpuset;
12253 12073 caddr_t va;
12254 12074 ism_ment_t *ment;
12255 12075 sfmmu_t *sfmmup;
12256 12076 #ifdef VAC
12257 12077 int vcolor;
12258 12078 #endif
12259 12079
12260 12080 sf_scd_t *scdp;
12261 12081 uint_t ism_rid;
12262 12082
12263 12083 ASSERT(!hmeblkp->hblk_shared);
12264 12084 /*
12265 12085 * Walk the ism_hat's mapping list and flush the page
12266 12086 * from every hat sharing this ism_hat. This routine
12267 12087 * may be called while all cpu's have been captured.
12268 12088 * Therefore we can't attempt to grab any locks. For now
12269 12089 * this means we will protect the ism mapping list under
12270 12090 * a single lock which will be grabbed by the caller.
12271 12091 * If hat_share/unshare scalibility becomes a performance
12272 12092 * problem then we may need to re-think ism mapping list locking.
12273 12093 */
12274 12094 ASSERT(ism_sfmmup->sfmmu_ismhat);
12275 12095 ASSERT(MUTEX_HELD(&ism_mlist_lock));
12276 12096 addr = addr - ISMID_STARTADDR;
12277 12097
12278 12098 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12279 12099
12280 12100 sfmmup = ment->iment_hat;
12281 12101
12282 12102 va = ment->iment_base_va;
12283 12103 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
12284 12104
12285 12105 /*
12286 12106 * When an SCD is created the SCD hat is linked on the ism
12287 12107 * mapping lists for each ISM segment which is part of the
12288 12108 * SCD. If we find an SCD hat, when walking these lists,
12289 12109 * then we flush the shared TSBs, if we find a private hat,
12290 12110 * which is part of an SCD, but where the region
12291 12111 * corresponding to this va is not part of the SCD then we
12292 12112 * flush the private TSBs.
12293 12113 */
12294 12114 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12295 12115 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12296 12116 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12297 12117 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12298 12118 &ism_rid)) {
12299 12119 cmn_err(CE_PANIC,
12300 12120 "can't find matching ISM rid!");
12301 12121 }
12302 12122
12303 12123 scdp = sfmmup->sfmmu_scdp;
12304 12124 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12305 12125 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12306 12126 ism_rid)) {
12307 12127 continue;
12308 12128 }
12309 12129 }
12310 12130 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12311 12131
12312 12132 cpuset = sfmmup->sfmmu_cpusran;
12313 12133 CPUSET_AND(cpuset, cpu_ready_set);
12314 12134 CPUSET_DEL(cpuset, CPU->cpu_id);
12315 12135 SFMMU_XCALL_STATS(sfmmup);
12316 12136 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12317 12137 (uint64_t)sfmmup);
12318 12138 vtag_flushpage(va, (uint64_t)sfmmup);
12319 12139
12320 12140 #ifdef VAC
12321 12141 /*
12322 12142 * Flush D$
12323 12143 * When flushing D$ we must flush all
12324 12144 * cpu's. See sfmmu_cache_flush().
12325 12145 */
12326 12146 if (cache_flush_flag == CACHE_FLUSH) {
12327 12147 cpuset = cpu_ready_set;
12328 12148 CPUSET_DEL(cpuset, CPU->cpu_id);
12329 12149
12330 12150 SFMMU_XCALL_STATS(sfmmup);
12331 12151 vcolor = addr_to_vcolor(va);
12332 12152 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12333 12153 vac_flushpage(pfnum, vcolor);
12334 12154 }
12335 12155 #endif /* VAC */
12336 12156 }
12337 12157 }
12338 12158
12339 12159 /*
12340 12160 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12341 12161 * a particular virtual address and ctx. If noflush is set we do not
12342 12162 * flush the TLB/TSB. This function may or may not be called with the
12343 12163 * HAT lock held.
12344 12164 */
12345 12165 static void
12346 12166 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12347 12167 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12348 12168 int hat_lock_held)
12349 12169 {
12350 12170 #ifdef VAC
12351 12171 int vcolor;
12352 12172 #endif
12353 12173 cpuset_t cpuset;
12354 12174 hatlock_t *hatlockp;
12355 12175
12356 12176 ASSERT(!hmeblkp->hblk_shared);
12357 12177
12358 12178 #if defined(lint) && !defined(VAC)
12359 12179 pfnum = pfnum;
12360 12180 cpu_flag = cpu_flag;
12361 12181 cache_flush_flag = cache_flush_flag;
12362 12182 #endif
12363 12183
12364 12184 /*
12365 12185 * There is no longer a need to protect against ctx being
12366 12186 * stolen here since we don't store the ctx in the TSB anymore.
12367 12187 */
12368 12188 #ifdef VAC
12369 12189 vcolor = addr_to_vcolor(addr);
12370 12190 #endif
12371 12191
12372 12192 /*
12373 12193 * We must hold the hat lock during the flush of TLB,
12374 12194 * to avoid a race with sfmmu_invalidate_ctx(), where
12375 12195 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12376 12196 * causing TLB demap routine to skip flush on that MMU.
12377 12197 * If the context on a MMU has already been set to
12378 12198 * INVALID_CONTEXT, we just get an extra flush on
12379 12199 * that MMU.
12380 12200 */
12381 12201 if (!hat_lock_held && !tlb_noflush)
12382 12202 hatlockp = sfmmu_hat_enter(sfmmup);
12383 12203
12384 12204 kpreempt_disable();
12385 12205 if (!tlb_noflush) {
12386 12206 /*
12387 12207 * Flush the TSB and TLB.
12388 12208 */
12389 12209 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12390 12210
12391 12211 cpuset = sfmmup->sfmmu_cpusran;
12392 12212 CPUSET_AND(cpuset, cpu_ready_set);
12393 12213 CPUSET_DEL(cpuset, CPU->cpu_id);
12394 12214
12395 12215 SFMMU_XCALL_STATS(sfmmup);
12396 12216
12397 12217 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12398 12218 (uint64_t)sfmmup);
12399 12219
12400 12220 vtag_flushpage(addr, (uint64_t)sfmmup);
12401 12221 }
12402 12222
12403 12223 if (!hat_lock_held && !tlb_noflush)
12404 12224 sfmmu_hat_exit(hatlockp);
12405 12225
12406 12226 #ifdef VAC
12407 12227 /*
12408 12228 * Flush the D$
12409 12229 *
12410 12230 * Even if the ctx is stolen, we need to flush the
12411 12231 * cache. Our ctx stealer only flushes the TLBs.
12412 12232 */
12413 12233 if (cache_flush_flag == CACHE_FLUSH) {
12414 12234 if (cpu_flag & FLUSH_ALL_CPUS) {
12415 12235 cpuset = cpu_ready_set;
12416 12236 } else {
12417 12237 cpuset = sfmmup->sfmmu_cpusran;
12418 12238 CPUSET_AND(cpuset, cpu_ready_set);
12419 12239 }
12420 12240 CPUSET_DEL(cpuset, CPU->cpu_id);
12421 12241 SFMMU_XCALL_STATS(sfmmup);
12422 12242 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12423 12243 vac_flushpage(pfnum, vcolor);
12424 12244 }
12425 12245 #endif /* VAC */
12426 12246 kpreempt_enable();
12427 12247 }
12428 12248
12429 12249 /*
12430 12250 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12431 12251 * address and ctx. If noflush is set we do not currently do anything.
12432 12252 * This function may or may not be called with the HAT lock held.
12433 12253 */
12434 12254 static void
12435 12255 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12436 12256 int tlb_noflush, int hat_lock_held)
12437 12257 {
12438 12258 cpuset_t cpuset;
12439 12259 hatlock_t *hatlockp;
12440 12260
12441 12261 ASSERT(!hmeblkp->hblk_shared);
12442 12262
12443 12263 /*
12444 12264 * If the process is exiting we have nothing to do.
12445 12265 */
12446 12266 if (tlb_noflush)
12447 12267 return;
12448 12268
12449 12269 /*
12450 12270 * Flush TSB.
12451 12271 */
12452 12272 if (!hat_lock_held)
12453 12273 hatlockp = sfmmu_hat_enter(sfmmup);
12454 12274 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12455 12275
12456 12276 kpreempt_disable();
12457 12277
12458 12278 cpuset = sfmmup->sfmmu_cpusran;
12459 12279 CPUSET_AND(cpuset, cpu_ready_set);
12460 12280 CPUSET_DEL(cpuset, CPU->cpu_id);
12461 12281
12462 12282 SFMMU_XCALL_STATS(sfmmup);
12463 12283 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12464 12284
12465 12285 vtag_flushpage(addr, (uint64_t)sfmmup);
12466 12286
12467 12287 if (!hat_lock_held)
12468 12288 sfmmu_hat_exit(hatlockp);
12469 12289
12470 12290 kpreempt_enable();
12471 12291
12472 12292 }
12473 12293
12474 12294 /*
12475 12295 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12476 12296 * call handler that can flush a range of pages to save on xcalls.
12477 12297 */
12478 12298 static int sfmmu_xcall_save;
12479 12299
12480 12300 /*
12481 12301 * this routine is never used for demaping addresses backed by SRD hmeblks.
12482 12302 */
12483 12303 static void
12484 12304 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12485 12305 {
12486 12306 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12487 12307 hatlock_t *hatlockp;
12488 12308 cpuset_t cpuset;
12489 12309 uint64_t sfmmu_pgcnt;
12490 12310 pgcnt_t pgcnt = 0;
12491 12311 int pgunload = 0;
12492 12312 int dirtypg = 0;
12493 12313 caddr_t addr = dmrp->dmr_addr;
12494 12314 caddr_t eaddr;
12495 12315 uint64_t bitvec = dmrp->dmr_bitvec;
12496 12316
12497 12317 ASSERT(bitvec & 1);
12498 12318
12499 12319 /*
12500 12320 * Flush TSB and calculate number of pages to flush.
12501 12321 */
12502 12322 while (bitvec != 0) {
12503 12323 dirtypg = 0;
12504 12324 /*
12505 12325 * Find the first page to flush and then count how many
12506 12326 * pages there are after it that also need to be flushed.
12507 12327 * This way the number of TSB flushes is minimized.
12508 12328 */
12509 12329 while ((bitvec & 1) == 0) {
12510 12330 pgcnt++;
12511 12331 addr += MMU_PAGESIZE;
12512 12332 bitvec >>= 1;
12513 12333 }
12514 12334 while (bitvec & 1) {
12515 12335 dirtypg++;
12516 12336 bitvec >>= 1;
12517 12337 }
12518 12338 eaddr = addr + ptob(dirtypg);
12519 12339 hatlockp = sfmmu_hat_enter(sfmmup);
12520 12340 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12521 12341 sfmmu_hat_exit(hatlockp);
12522 12342 pgunload += dirtypg;
12523 12343 addr = eaddr;
12524 12344 pgcnt += dirtypg;
12525 12345 }
12526 12346
12527 12347 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12528 12348 if (sfmmup->sfmmu_free == 0) {
12529 12349 addr = dmrp->dmr_addr;
12530 12350 bitvec = dmrp->dmr_bitvec;
12531 12351
12532 12352 /*
12533 12353 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12534 12354 * as it will be used to pack argument for xt_some
12535 12355 */
12536 12356 ASSERT((pgcnt > 0) &&
12537 12357 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12538 12358
12539 12359 /*
12540 12360 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12541 12361 * the low 6 bits of sfmmup. This is doable since pgcnt
12542 12362 * always >= 1.
12543 12363 */
12544 12364 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12545 12365 sfmmu_pgcnt = (uint64_t)sfmmup |
12546 12366 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12547 12367
12548 12368 /*
12549 12369 * We must hold the hat lock during the flush of TLB,
12550 12370 * to avoid a race with sfmmu_invalidate_ctx(), where
12551 12371 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12552 12372 * causing TLB demap routine to skip flush on that MMU.
12553 12373 * If the context on a MMU has already been set to
12554 12374 * INVALID_CONTEXT, we just get an extra flush on
12555 12375 * that MMU.
12556 12376 */
12557 12377 hatlockp = sfmmu_hat_enter(sfmmup);
12558 12378 kpreempt_disable();
12559 12379
12560 12380 cpuset = sfmmup->sfmmu_cpusran;
12561 12381 CPUSET_AND(cpuset, cpu_ready_set);
12562 12382 CPUSET_DEL(cpuset, CPU->cpu_id);
12563 12383
12564 12384 SFMMU_XCALL_STATS(sfmmup);
12565 12385 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12566 12386 sfmmu_pgcnt);
12567 12387
12568 12388 for (; bitvec != 0; bitvec >>= 1) {
12569 12389 if (bitvec & 1)
12570 12390 vtag_flushpage(addr, (uint64_t)sfmmup);
12571 12391 addr += MMU_PAGESIZE;
12572 12392 }
12573 12393 kpreempt_enable();
12574 12394 sfmmu_hat_exit(hatlockp);
12575 12395
12576 12396 sfmmu_xcall_save += (pgunload-1);
12577 12397 }
12578 12398 dmrp->dmr_bitvec = 0;
12579 12399 }
12580 12400
12581 12401 /*
12582 12402 * In cases where we need to synchronize with TLB/TSB miss trap
12583 12403 * handlers, _and_ need to flush the TLB, it's a lot easier to
12584 12404 * throw away the context from the process than to do a
12585 12405 * special song and dance to keep things consistent for the
12586 12406 * handlers.
12587 12407 *
12588 12408 * Since the process suddenly ends up without a context and our caller
12589 12409 * holds the hat lock, threads that fault after this function is called
12590 12410 * will pile up on the lock. We can then do whatever we need to
12591 12411 * atomically from the context of the caller. The first blocked thread
12592 12412 * to resume executing will get the process a new context, and the
12593 12413 * process will resume executing.
12594 12414 *
12595 12415 * One added advantage of this approach is that on MMUs that
12596 12416 * support a "flush all" operation, we will delay the flush until
12597 12417 * cnum wrap-around, and then flush the TLB one time. This
12598 12418 * is rather rare, so it's a lot less expensive than making 8000
12599 12419 * x-calls to flush the TLB 8000 times.
12600 12420 *
12601 12421 * A per-process (PP) lock is used to synchronize ctx allocations in
12602 12422 * resume() and ctx invalidations here.
12603 12423 */
12604 12424 static void
12605 12425 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12606 12426 {
12607 12427 cpuset_t cpuset;
12608 12428 int cnum, currcnum;
12609 12429 mmu_ctx_t *mmu_ctxp;
12610 12430 int i;
12611 12431 uint_t pstate_save;
12612 12432
12613 12433 SFMMU_STAT(sf_ctx_inv);
12614 12434
12615 12435 ASSERT(sfmmu_hat_lock_held(sfmmup));
12616 12436 ASSERT(sfmmup != ksfmmup);
12617 12437
12618 12438 kpreempt_disable();
12619 12439
12620 12440 mmu_ctxp = CPU_MMU_CTXP(CPU);
12621 12441 ASSERT(mmu_ctxp);
12622 12442 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12623 12443 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12624 12444
12625 12445 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12626 12446
12627 12447 pstate_save = sfmmu_disable_intrs();
12628 12448
12629 12449 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12630 12450 /* set HAT cnum invalid across all context domains. */
12631 12451 for (i = 0; i < max_mmu_ctxdoms; i++) {
12632 12452
12633 12453 cnum = sfmmup->sfmmu_ctxs[i].cnum;
12634 12454 if (cnum == INVALID_CONTEXT) {
12635 12455 continue;
12636 12456 }
12637 12457
12638 12458 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12639 12459 }
12640 12460 membar_enter(); /* make sure globally visible to all CPUs */
12641 12461 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12642 12462
12643 12463 sfmmu_enable_intrs(pstate_save);
12644 12464
12645 12465 cpuset = sfmmup->sfmmu_cpusran;
12646 12466 CPUSET_DEL(cpuset, CPU->cpu_id);
12647 12467 CPUSET_AND(cpuset, cpu_ready_set);
12648 12468 if (!CPUSET_ISNULL(cpuset)) {
12649 12469 SFMMU_XCALL_STATS(sfmmup);
12650 12470 xt_some(cpuset, sfmmu_raise_tsb_exception,
12651 12471 (uint64_t)sfmmup, INVALID_CONTEXT);
12652 12472 xt_sync(cpuset);
12653 12473 SFMMU_STAT(sf_tsb_raise_exception);
12654 12474 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12655 12475 }
12656 12476
12657 12477 /*
12658 12478 * If the hat to-be-invalidated is the same as the current
12659 12479 * process on local CPU we need to invalidate
12660 12480 * this CPU context as well.
12661 12481 */
12662 12482 if ((sfmmu_getctx_sec() == currcnum) &&
12663 12483 (currcnum != INVALID_CONTEXT)) {
12664 12484 /* sets shared context to INVALID too */
12665 12485 sfmmu_setctx_sec(INVALID_CONTEXT);
12666 12486 sfmmu_clear_utsbinfo();
12667 12487 }
12668 12488
12669 12489 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12670 12490
12671 12491 kpreempt_enable();
12672 12492
12673 12493 /*
12674 12494 * we hold the hat lock, so nobody should allocate a context
12675 12495 * for us yet
12676 12496 */
12677 12497 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12678 12498 }
12679 12499
12680 12500 #ifdef VAC
12681 12501 /*
12682 12502 * We need to flush the cache in all cpus. It is possible that
12683 12503 * a process referenced a page as cacheable but has sinced exited
12684 12504 * and cleared the mapping list. We still to flush it but have no
12685 12505 * state so all cpus is the only alternative.
12686 12506 */
12687 12507 void
12688 12508 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12689 12509 {
12690 12510 cpuset_t cpuset;
12691 12511
12692 12512 kpreempt_disable();
12693 12513 cpuset = cpu_ready_set;
12694 12514 CPUSET_DEL(cpuset, CPU->cpu_id);
12695 12515 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12696 12516 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12697 12517 xt_sync(cpuset);
12698 12518 vac_flushpage(pfnum, vcolor);
12699 12519 kpreempt_enable();
12700 12520 }
12701 12521
12702 12522 void
12703 12523 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12704 12524 {
12705 12525 cpuset_t cpuset;
12706 12526
12707 12527 ASSERT(vcolor >= 0);
12708 12528
12709 12529 kpreempt_disable();
12710 12530 cpuset = cpu_ready_set;
12711 12531 CPUSET_DEL(cpuset, CPU->cpu_id);
12712 12532 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12713 12533 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12714 12534 xt_sync(cpuset);
12715 12535 vac_flushcolor(vcolor, pfnum);
12716 12536 kpreempt_enable();
12717 12537 }
12718 12538 #endif /* VAC */
12719 12539
12720 12540 /*
12721 12541 * We need to prevent processes from accessing the TSB using a cached physical
12722 12542 * address. It's alright if they try to access the TSB via virtual address
12723 12543 * since they will just fault on that virtual address once the mapping has
12724 12544 * been suspended.
12725 12545 */
12726 12546 #pragma weak sendmondo_in_recover
12727 12547
12728 12548 /* ARGSUSED */
12729 12549 static int
12730 12550 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12731 12551 {
12732 12552 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12733 12553 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12734 12554 hatlock_t *hatlockp;
12735 12555 sf_scd_t *scdp;
12736 12556
12737 12557 if (flags != HAT_PRESUSPEND)
12738 12558 return (0);
12739 12559
12740 12560 /*
12741 12561 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12742 12562 * be a shared hat, then set SCD's tsbinfo's flag.
12743 12563 * If tsb is not shared, sfmmup is a private hat, then set
12744 12564 * its private tsbinfo's flag.
12745 12565 */
12746 12566 hatlockp = sfmmu_hat_enter(sfmmup);
12747 12567 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12748 12568
12749 12569 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12750 12570 sfmmu_tsb_inv_ctx(sfmmup);
12751 12571 sfmmu_hat_exit(hatlockp);
12752 12572 } else {
12753 12573 /* release lock on the shared hat */
12754 12574 sfmmu_hat_exit(hatlockp);
12755 12575 /* sfmmup is a shared hat */
12756 12576 ASSERT(sfmmup->sfmmu_scdhat);
12757 12577 scdp = sfmmup->sfmmu_scdp;
12758 12578 ASSERT(scdp != NULL);
12759 12579 /* get private hat from the scd list */
12760 12580 mutex_enter(&scdp->scd_mutex);
12761 12581 sfmmup = scdp->scd_sf_list;
12762 12582 while (sfmmup != NULL) {
12763 12583 hatlockp = sfmmu_hat_enter(sfmmup);
12764 12584 /*
12765 12585 * We do not call sfmmu_tsb_inv_ctx here because
12766 12586 * sendmondo_in_recover check is only needed for
12767 12587 * sun4u.
12768 12588 */
12769 12589 sfmmu_invalidate_ctx(sfmmup);
12770 12590 sfmmu_hat_exit(hatlockp);
12771 12591 sfmmup = sfmmup->sfmmu_scd_link.next;
12772 12592
12773 12593 }
12774 12594 mutex_exit(&scdp->scd_mutex);
12775 12595 }
12776 12596 return (0);
12777 12597 }
12778 12598
12779 12599 static void
12780 12600 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12781 12601 {
12782 12602 extern uint32_t sendmondo_in_recover;
12783 12603
12784 12604 ASSERT(sfmmu_hat_lock_held(sfmmup));
12785 12605
12786 12606 /*
12787 12607 * For Cheetah+ Erratum 25:
12788 12608 * Wait for any active recovery to finish. We can't risk
12789 12609 * relocating the TSB of the thread running mondo_recover_proc()
12790 12610 * since, if we did that, we would deadlock. The scenario we are
12791 12611 * trying to avoid is as follows:
12792 12612 *
12793 12613 * THIS CPU RECOVER CPU
12794 12614 * -------- -----------
12795 12615 * Begins recovery, walking through TSB
12796 12616 * hat_pagesuspend() TSB TTE
12797 12617 * TLB miss on TSB TTE, spins at TL1
12798 12618 * xt_sync()
12799 12619 * send_mondo_timeout()
12800 12620 * mondo_recover_proc()
12801 12621 * ((deadlocked))
12802 12622 *
12803 12623 * The second half of the workaround is that mondo_recover_proc()
12804 12624 * checks to see if the tsb_info has the RELOC flag set, and if it
12805 12625 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12806 12626 * and hence avoiding the TLB miss that could result in a deadlock.
12807 12627 */
12808 12628 if (&sendmondo_in_recover) {
12809 12629 membar_enter(); /* make sure RELOC flag visible */
12810 12630 while (sendmondo_in_recover) {
12811 12631 drv_usecwait(1);
12812 12632 membar_consumer();
12813 12633 }
12814 12634 }
12815 12635
12816 12636 sfmmu_invalidate_ctx(sfmmup);
12817 12637 }
12818 12638
12819 12639 /* ARGSUSED */
12820 12640 static int
12821 12641 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12822 12642 void *tsbinfo, pfn_t newpfn)
12823 12643 {
12824 12644 hatlock_t *hatlockp;
12825 12645 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12826 12646 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12827 12647
12828 12648 if (flags != HAT_POSTUNSUSPEND)
12829 12649 return (0);
12830 12650
12831 12651 hatlockp = sfmmu_hat_enter(sfmmup);
12832 12652
12833 12653 SFMMU_STAT(sf_tsb_reloc);
12834 12654
12835 12655 /*
12836 12656 * The process may have swapped out while we were relocating one
12837 12657 * of its TSBs. If so, don't bother doing the setup since the
12838 12658 * process can't be using the memory anymore.
12839 12659 */
12840 12660 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12841 12661 ASSERT(va == tsbinfop->tsb_va);
12842 12662 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12843 12663
12844 12664 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12845 12665 sfmmu_inv_tsb(tsbinfop->tsb_va,
12846 12666 TSB_BYTES(tsbinfop->tsb_szc));
12847 12667 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12848 12668 }
12849 12669 }
12850 12670
12851 12671 membar_exit();
12852 12672 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12853 12673 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12854 12674
12855 12675 sfmmu_hat_exit(hatlockp);
12856 12676
12857 12677 return (0);
12858 12678 }
12859 12679
12860 12680 /*
12861 12681 * Allocate and initialize a tsb_info structure. Note that we may or may not
12862 12682 * allocate a TSB here, depending on the flags passed in.
12863 12683 */
12864 12684 static int
12865 12685 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12866 12686 uint_t flags, sfmmu_t *sfmmup)
12867 12687 {
12868 12688 int err;
12869 12689
12870 12690 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12871 12691 sfmmu_tsbinfo_cache, KM_SLEEP);
12872 12692
12873 12693 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12874 12694 tsb_szc, flags, sfmmup)) != 0) {
12875 12695 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12876 12696 SFMMU_STAT(sf_tsb_allocfail);
12877 12697 *tsbinfopp = NULL;
12878 12698 return (err);
12879 12699 }
12880 12700 SFMMU_STAT(sf_tsb_alloc);
12881 12701
12882 12702 /*
12883 12703 * Bump the TSB size counters for this TSB size.
12884 12704 */
12885 12705 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12886 12706 return (0);
12887 12707 }
12888 12708
12889 12709 static void
12890 12710 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12891 12711 {
12892 12712 caddr_t tsbva = tsbinfo->tsb_va;
12893 12713 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12894 12714 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12895 12715 vmem_t *vmp = tsbinfo->tsb_vmp;
12896 12716
12897 12717 /*
12898 12718 * If we allocated this TSB from relocatable kernel memory, then we
12899 12719 * need to uninstall the callback handler.
12900 12720 */
12901 12721 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12902 12722 uintptr_t slab_mask;
12903 12723 caddr_t slab_vaddr;
12904 12724 page_t **ppl;
12905 12725 int ret;
12906 12726
12907 12727 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12908 12728 if (tsb_size > MMU_PAGESIZE4M)
12909 12729 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12910 12730 else
12911 12731 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12912 12732 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12913 12733
12914 12734 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12915 12735 ASSERT(ret == 0);
12916 12736 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12917 12737 0, NULL);
12918 12738 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12919 12739 }
12920 12740
12921 12741 if (kmem_cachep != NULL) {
12922 12742 kmem_cache_free(kmem_cachep, tsbva);
12923 12743 } else {
12924 12744 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12925 12745 }
12926 12746 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12927 12747 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12928 12748 }
12929 12749
12930 12750 static void
12931 12751 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12932 12752 {
12933 12753 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12934 12754 sfmmu_tsb_free(tsbinfo);
12935 12755 }
12936 12756 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12937 12757
12938 12758 }
12939 12759
12940 12760 /*
12941 12761 * Setup all the references to physical memory for this tsbinfo.
12942 12762 * The underlying page(s) must be locked.
12943 12763 */
12944 12764 static void
12945 12765 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12946 12766 {
12947 12767 ASSERT(pfn != PFN_INVALID);
12948 12768 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12949 12769
12950 12770 #ifndef sun4v
12951 12771 if (tsbinfo->tsb_szc == 0) {
12952 12772 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12953 12773 PROT_WRITE|PROT_READ, TTE8K);
12954 12774 } else {
12955 12775 /*
12956 12776 * Round down PA and use a large mapping; the handlers will
12957 12777 * compute the TSB pointer at the correct offset into the
12958 12778 * big virtual page. NOTE: this assumes all TSBs larger
12959 12779 * than 8K must come from physically contiguous slabs of
12960 12780 * size tsb_slab_size.
12961 12781 */
12962 12782 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12963 12783 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12964 12784 }
12965 12785 tsbinfo->tsb_pa = ptob(pfn);
12966 12786
12967 12787 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12968 12788 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12969 12789
12970 12790 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12971 12791 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12972 12792 #else /* sun4v */
12973 12793 tsbinfo->tsb_pa = ptob(pfn);
12974 12794 #endif /* sun4v */
12975 12795 }
12976 12796
12977 12797
12978 12798 /*
12979 12799 * Returns zero on success, ENOMEM if over the high water mark,
12980 12800 * or EAGAIN if the caller needs to retry with a smaller TSB
12981 12801 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12982 12802 *
12983 12803 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12984 12804 * is specified and the TSB requested is PAGESIZE, though it
12985 12805 * may sleep waiting for memory if sufficient memory is not
12986 12806 * available.
12987 12807 */
12988 12808 static int
12989 12809 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12990 12810 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12991 12811 {
12992 12812 caddr_t vaddr = NULL;
12993 12813 caddr_t slab_vaddr;
12994 12814 uintptr_t slab_mask;
12995 12815 int tsbbytes = TSB_BYTES(tsbcode);
12996 12816 int lowmem = 0;
12997 12817 struct kmem_cache *kmem_cachep = NULL;
12998 12818 vmem_t *vmp = NULL;
12999 12819 lgrp_id_t lgrpid = LGRP_NONE;
13000 12820 pfn_t pfn;
13001 12821 uint_t cbflags = HAC_SLEEP;
13002 12822 page_t **pplist;
13003 12823 int ret;
13004 12824
13005 12825 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
13006 12826 if (tsbbytes > MMU_PAGESIZE4M)
13007 12827 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
13008 12828 else
13009 12829 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
13010 12830
13011 12831 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
13012 12832 flags |= TSB_ALLOC;
13013 12833
13014 12834 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
13015 12835
13016 12836 tsbinfo->tsb_sfmmu = sfmmup;
13017 12837
13018 12838 /*
13019 12839 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
13020 12840 * return.
13021 12841 */
13022 12842 if ((flags & TSB_ALLOC) == 0) {
13023 12843 tsbinfo->tsb_szc = tsbcode;
13024 12844 tsbinfo->tsb_ttesz_mask = tteszmask;
13025 12845 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
13026 12846 tsbinfo->tsb_pa = -1;
13027 12847 tsbinfo->tsb_tte.ll = 0;
13028 12848 tsbinfo->tsb_next = NULL;
13029 12849 tsbinfo->tsb_flags = TSB_SWAPPED;
13030 12850 tsbinfo->tsb_cache = NULL;
13031 12851 tsbinfo->tsb_vmp = NULL;
13032 12852 return (0);
13033 12853 }
13034 12854
13035 12855 #ifdef DEBUG
13036 12856 /*
13037 12857 * For debugging:
13038 12858 * Randomly force allocation failures every tsb_alloc_mtbf
13039 12859 * tries if TSB_FORCEALLOC is not specified. This will
13040 12860 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
13041 12861 * it is even, to allow testing of both failure paths...
13042 12862 */
13043 12863 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
13044 12864 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
13045 12865 tsb_alloc_count = 0;
13046 12866 tsb_alloc_fail_mtbf++;
13047 12867 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
13048 12868 }
13049 12869 #endif /* DEBUG */
13050 12870
13051 12871 /*
13052 12872 * Enforce high water mark if we are not doing a forced allocation
13053 12873 * and are not shrinking a process' TSB.
13054 12874 */
13055 12875 if ((flags & TSB_SHRINK) == 0 &&
13056 12876 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
13057 12877 if ((flags & TSB_FORCEALLOC) == 0)
13058 12878 return (ENOMEM);
13059 12879 lowmem = 1;
13060 12880 }
13061 12881
13062 12882 /*
13063 12883 * Allocate from the correct location based upon the size of the TSB
13064 12884 * compared to the base page size, and what memory conditions dictate.
13065 12885 * Note we always do nonblocking allocations from the TSB arena since
13066 12886 * we don't want memory fragmentation to cause processes to block
13067 12887 * indefinitely waiting for memory; until the kernel algorithms that
13068 12888 * coalesce large pages are improved this is our best option.
13069 12889 *
13070 12890 * Algorithm:
13071 12891 * If allocating a "large" TSB (>8K), allocate from the
13072 12892 * appropriate kmem_tsb_default_arena vmem arena
13073 12893 * else if low on memory or the TSB_FORCEALLOC flag is set or
13074 12894 * tsb_forceheap is set
13075 12895 * Allocate from kernel heap via sfmmu_tsb8k_cache with
13076 12896 * KM_SLEEP (never fails)
13077 12897 * else
13078 12898 * Allocate from appropriate sfmmu_tsb_cache with
13079 12899 * KM_NOSLEEP
13080 12900 * endif
13081 12901 */
13082 12902 if (tsb_lgrp_affinity)
13083 12903 lgrpid = lgrp_home_id(curthread);
13084 12904 if (lgrpid == LGRP_NONE)
13085 12905 lgrpid = 0; /* use lgrp of boot CPU */
13086 12906
13087 12907 if (tsbbytes > MMU_PAGESIZE) {
13088 12908 if (tsbbytes > MMU_PAGESIZE4M) {
13089 12909 vmp = kmem_bigtsb_default_arena[lgrpid];
13090 12910 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13091 12911 0, 0, NULL, NULL, VM_NOSLEEP);
13092 12912 } else {
13093 12913 vmp = kmem_tsb_default_arena[lgrpid];
13094 12914 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13095 12915 0, 0, NULL, NULL, VM_NOSLEEP);
13096 12916 }
13097 12917 #ifdef DEBUG
13098 12918 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
13099 12919 #else /* !DEBUG */
13100 12920 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
13101 12921 #endif /* DEBUG */
13102 12922 kmem_cachep = sfmmu_tsb8k_cache;
13103 12923 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
13104 12924 ASSERT(vaddr != NULL);
13105 12925 } else {
13106 12926 kmem_cachep = sfmmu_tsb_cache[lgrpid];
13107 12927 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
13108 12928 }
13109 12929
13110 12930 tsbinfo->tsb_cache = kmem_cachep;
13111 12931 tsbinfo->tsb_vmp = vmp;
13112 12932
13113 12933 if (vaddr == NULL) {
13114 12934 return (EAGAIN);
13115 12935 }
13116 12936
13117 12937 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
13118 12938 kmem_cachep = tsbinfo->tsb_cache;
13119 12939
13120 12940 /*
13121 12941 * If we are allocating from outside the cage, then we need to
13122 12942 * register a relocation callback handler. Note that for now
13123 12943 * since pseudo mappings always hang off of the slab's root page,
13124 12944 * we need only lock the first 8K of the TSB slab. This is a bit
13125 12945 * hacky but it is good for performance.
13126 12946 */
13127 12947 if (kmem_cachep != sfmmu_tsb8k_cache) {
13128 12948 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
13129 12949 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
13130 12950 ASSERT(ret == 0);
13131 12951 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
13132 12952 cbflags, (void *)tsbinfo, &pfn, NULL);
13133 12953
13134 12954 /*
13135 12955 * Need to free up resources if we could not successfully
13136 12956 * add the callback function and return an error condition.
13137 12957 */
13138 12958 if (ret != 0) {
13139 12959 if (kmem_cachep) {
13140 12960 kmem_cache_free(kmem_cachep, vaddr);
13141 12961 } else {
13142 12962 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
13143 12963 }
13144 12964 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
13145 12965 S_WRITE);
13146 12966 return (EAGAIN);
13147 12967 }
13148 12968 } else {
13149 12969 /*
13150 12970 * Since allocation of 8K TSBs from heap is rare and occurs
13151 12971 * during memory pressure we allocate them from permanent
13152 12972 * memory rather than using callbacks to get the PFN.
13153 12973 */
13154 12974 pfn = hat_getpfnum(kas.a_hat, vaddr);
13155 12975 }
13156 12976
13157 12977 tsbinfo->tsb_va = vaddr;
13158 12978 tsbinfo->tsb_szc = tsbcode;
13159 12979 tsbinfo->tsb_ttesz_mask = tteszmask;
13160 12980 tsbinfo->tsb_next = NULL;
13161 12981 tsbinfo->tsb_flags = 0;
13162 12982
13163 12983 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
13164 12984
13165 12985 sfmmu_inv_tsb(vaddr, tsbbytes);
13166 12986
13167 12987 if (kmem_cachep != sfmmu_tsb8k_cache) {
13168 12988 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
13169 12989 }
13170 12990
13171 12991 return (0);
13172 12992 }
13173 12993
13174 12994 /*
13175 12995 * Initialize per cpu tsb and per cpu tsbmiss_area
13176 12996 */
13177 12997 void
13178 12998 sfmmu_init_tsbs(void)
13179 12999 {
13180 13000 int i;
13181 13001 struct tsbmiss *tsbmissp;
13182 13002 struct kpmtsbm *kpmtsbmp;
13183 13003 #ifndef sun4v
13184 13004 extern int dcache_line_mask;
13185 13005 #endif /* sun4v */
13186 13006 extern uint_t vac_colors;
13187 13007
13188 13008 /*
13189 13009 * Init. tsb miss area.
13190 13010 */
13191 13011 tsbmissp = tsbmiss_area;
13192 13012
13193 13013 for (i = 0; i < NCPU; tsbmissp++, i++) {
13194 13014 /*
13195 13015 * initialize the tsbmiss area.
13196 13016 * Do this for all possible CPUs as some may be added
13197 13017 * while the system is running. There is no cost to this.
13198 13018 */
13199 13019 tsbmissp->ksfmmup = ksfmmup;
13200 13020 #ifndef sun4v
13201 13021 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13202 13022 #endif /* sun4v */
13203 13023 tsbmissp->khashstart =
13204 13024 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13205 13025 tsbmissp->uhashstart =
13206 13026 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13207 13027 tsbmissp->khashsz = khmehash_num;
13208 13028 tsbmissp->uhashsz = uhmehash_num;
13209 13029 }
13210 13030
13211 13031 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13212 13032 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13213 13033
13214 13034 if (kpm_enable == 0)
13215 13035 return;
13216 13036
13217 13037 /* -- Begin KPM specific init -- */
13218 13038
13219 13039 if (kpm_smallpages) {
13220 13040 /*
13221 13041 * If we're using base pagesize pages for seg_kpm
13222 13042 * mappings, we use the kernel TSB since we can't afford
13223 13043 * to allocate a second huge TSB for these mappings.
13224 13044 */
13225 13045 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13226 13046 kpm_tsbsz = ktsb_szcode;
13227 13047 kpmsm_tsbbase = kpm_tsbbase;
13228 13048 kpmsm_tsbsz = kpm_tsbsz;
13229 13049 } else {
13230 13050 /*
13231 13051 * In VAC conflict case, just put the entries in the
13232 13052 * kernel 8K indexed TSB for now so we can find them.
13233 13053 * This could really be changed in the future if we feel
13234 13054 * the need...
13235 13055 */
13236 13056 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13237 13057 kpmsm_tsbsz = ktsb_szcode;
13238 13058 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13239 13059 kpm_tsbsz = ktsb4m_szcode;
13240 13060 }
13241 13061
13242 13062 kpmtsbmp = kpmtsbm_area;
13243 13063 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13244 13064 /*
13245 13065 * Initialize the kpmtsbm area.
13246 13066 * Do this for all possible CPUs as some may be added
13247 13067 * while the system is running. There is no cost to this.
13248 13068 */
13249 13069 kpmtsbmp->vbase = kpm_vbase;
13250 13070 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13251 13071 kpmtsbmp->sz_shift = kpm_size_shift;
13252 13072 kpmtsbmp->kpmp_shift = kpmp_shift;
13253 13073 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13254 13074 if (kpm_smallpages == 0) {
13255 13075 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13256 13076 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13257 13077 } else {
13258 13078 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13259 13079 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13260 13080 }
13261 13081 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13262 13082 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13263 13083 #ifdef DEBUG
13264 13084 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
13265 13085 #endif /* DEBUG */
13266 13086 if (ktsb_phys)
13267 13087 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13268 13088 }
13269 13089
13270 13090 /* -- End KPM specific init -- */
13271 13091 }
13272 13092
13273 13093 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13274 13094 struct tsb_info ktsb_info[2];
13275 13095
13276 13096 /*
13277 13097 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13278 13098 */
13279 13099 void
13280 13100 sfmmu_init_ktsbinfo()
13281 13101 {
13282 13102 ASSERT(ksfmmup != NULL);
13283 13103 ASSERT(ksfmmup->sfmmu_tsb == NULL);
13284 13104 /*
13285 13105 * Allocate tsbinfos for kernel and copy in data
13286 13106 * to make debug easier and sun4v setup easier.
13287 13107 */
13288 13108 ktsb_info[0].tsb_sfmmu = ksfmmup;
13289 13109 ktsb_info[0].tsb_szc = ktsb_szcode;
13290 13110 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13291 13111 ktsb_info[0].tsb_va = ktsb_base;
13292 13112 ktsb_info[0].tsb_pa = ktsb_pbase;
13293 13113 ktsb_info[0].tsb_flags = 0;
13294 13114 ktsb_info[0].tsb_tte.ll = 0;
13295 13115 ktsb_info[0].tsb_cache = NULL;
13296 13116
13297 13117 ktsb_info[1].tsb_sfmmu = ksfmmup;
13298 13118 ktsb_info[1].tsb_szc = ktsb4m_szcode;
13299 13119 ktsb_info[1].tsb_ttesz_mask = TSB4M;
13300 13120 ktsb_info[1].tsb_va = ktsb4m_base;
13301 13121 ktsb_info[1].tsb_pa = ktsb4m_pbase;
13302 13122 ktsb_info[1].tsb_flags = 0;
13303 13123 ktsb_info[1].tsb_tte.ll = 0;
13304 13124 ktsb_info[1].tsb_cache = NULL;
13305 13125
13306 13126 /* Link them into ksfmmup. */
13307 13127 ktsb_info[0].tsb_next = &ktsb_info[1];
13308 13128 ktsb_info[1].tsb_next = NULL;
13309 13129 ksfmmup->sfmmu_tsb = &ktsb_info[0];
13310 13130
13311 13131 sfmmu_setup_tsbinfo(ksfmmup);
13312 13132 }
13313 13133
13314 13134 /*
13315 13135 * Cache the last value returned from va_to_pa(). If the VA specified
13316 13136 * in the current call to cached_va_to_pa() maps to the same Page (as the
13317 13137 * previous call to cached_va_to_pa()), then compute the PA using
13318 13138 * cached info, else call va_to_pa().
13319 13139 *
13320 13140 * Note: this function is neither MT-safe nor consistent in the presence
13321 13141 * of multiple, interleaved threads. This function was created to enable
13322 13142 * an optimization used during boot (at a point when there's only one thread
13323 13143 * executing on the "boot CPU", and before startup_vm() has been called).
13324 13144 */
13325 13145 static uint64_t
13326 13146 cached_va_to_pa(void *vaddr)
13327 13147 {
13328 13148 static uint64_t prev_vaddr_base = 0;
13329 13149 static uint64_t prev_pfn = 0;
13330 13150
13331 13151 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13332 13152 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13333 13153 } else {
13334 13154 uint64_t pa = va_to_pa(vaddr);
13335 13155
13336 13156 if (pa != ((uint64_t)-1)) {
13337 13157 /*
13338 13158 * Computed physical address is valid. Cache its
13339 13159 * related info for the next cached_va_to_pa() call.
13340 13160 */
13341 13161 prev_pfn = pa & MMU_PAGEMASK;
13342 13162 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13343 13163 }
13344 13164
13345 13165 return (pa);
13346 13166 }
13347 13167 }
13348 13168
13349 13169 /*
13350 13170 * Carve up our nucleus hblk region. We may allocate more hblks than
13351 13171 * asked due to rounding errors but we are guaranteed to have at least
13352 13172 * enough space to allocate the requested number of hblk8's and hblk1's.
13353 13173 */
13354 13174 void
13355 13175 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13356 13176 {
13357 13177 struct hme_blk *hmeblkp;
13358 13178 size_t hme8blk_sz, hme1blk_sz;
13359 13179 size_t i;
13360 13180 size_t hblk8_bound;
13361 13181 ulong_t j = 0, k = 0;
13362 13182
13363 13183 ASSERT(addr != NULL && size != 0);
13364 13184
13365 13185 /* Need to use proper structure alignment */
13366 13186 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13367 13187 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13368 13188
13369 13189 nucleus_hblk8.list = (void *)addr;
13370 13190 nucleus_hblk8.index = 0;
13371 13191
13372 13192 /*
13373 13193 * Use as much memory as possible for hblk8's since we
13374 13194 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13375 13195 * We need to hold back enough space for the hblk1's which
13376 13196 * we'll allocate next.
13377 13197 */
13378 13198 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13379 13199 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13380 13200 hmeblkp = (struct hme_blk *)addr;
13381 13201 addr += hme8blk_sz;
13382 13202 hmeblkp->hblk_nuc_bit = 1;
13383 13203 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13384 13204 }
13385 13205 nucleus_hblk8.len = j;
13386 13206 ASSERT(j >= nhblk8);
13387 13207 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13388 13208
13389 13209 nucleus_hblk1.list = (void *)addr;
13390 13210 nucleus_hblk1.index = 0;
13391 13211 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13392 13212 hmeblkp = (struct hme_blk *)addr;
13393 13213 addr += hme1blk_sz;
13394 13214 hmeblkp->hblk_nuc_bit = 1;
13395 13215 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13396 13216 }
13397 13217 ASSERT(k >= nhblk1);
13398 13218 nucleus_hblk1.len = k;
13399 13219 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13400 13220 }
↓ open down ↓ |
3727 lines elided |
↑ open up ↑ |
13401 13221
13402 13222 /*
13403 13223 * This function is currently not supported on this platform. For what
13404 13224 * it's supposed to do, see hat.c and hat_srmmu.c
13405 13225 */
13406 13226 /* ARGSUSED */
13407 13227 faultcode_t
13408 13228 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13409 13229 uint_t flags)
13410 13230 {
13411 - ASSERT(hat->sfmmu_xhat_provider == NULL);
13412 13231 return (FC_NOSUPPORT);
13413 13232 }
13414 13233
13415 13234 /*
13416 13235 * Searchs the mapping list of the page for a mapping of the same size. If not
13417 13236 * found the corresponding bit is cleared in the p_index field. When large
13418 13237 * pages are more prevalent in the system, we can maintain the mapping list
13419 13238 * in order and we don't have to traverse the list each time. Just check the
13420 13239 * next and prev entries, and if both are of different size, we clear the bit.
13421 13240 */
13422 13241 static void
13423 13242 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13424 13243 {
13425 13244 struct sf_hment *sfhmep;
13426 13245 struct hme_blk *hmeblkp;
13427 13246 int index;
13428 13247 pgcnt_t npgs;
13429 13248
13430 13249 ASSERT(ttesz > TTE8K);
13431 13250
13432 13251 ASSERT(sfmmu_mlist_held(pp));
13433 13252
13434 13253 ASSERT(PP_ISMAPPED_LARGE(pp));
13435 13254
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
13436 13255 /*
13437 13256 * Traverse mapping list looking for another mapping of same size.
13438 13257 * since we only want to clear index field if all mappings of
13439 13258 * that size are gone.
13440 13259 */
13441 13260
13442 13261 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13443 13262 if (IS_PAHME(sfhmep))
13444 13263 continue;
13445 13264 hmeblkp = sfmmu_hmetohblk(sfhmep);
13446 - if (hmeblkp->hblk_xhat_bit)
13447 - continue;
13448 13265 if (hme_size(sfhmep) == ttesz) {
13449 13266 /*
13450 13267 * another mapping of the same size. don't clear index.
13451 13268 */
13452 13269 return;
13453 13270 }
13454 13271 }
13455 13272
13456 13273 /*
13457 13274 * Clear the p_index bit for large page.
13458 13275 */
13459 13276 index = PAGESZ_TO_INDEX(ttesz);
13460 13277 npgs = TTEPAGES(ttesz);
13461 13278 while (npgs-- > 0) {
13462 13279 ASSERT(pp->p_index & index);
13463 13280 pp->p_index &= ~index;
13464 13281 pp = PP_PAGENEXT(pp);
13465 13282 }
13466 13283 }
13467 13284
13468 13285 /*
13469 13286 * return supported features
13470 13287 */
13471 13288 /* ARGSUSED */
13472 13289 int
13473 13290 hat_supported(enum hat_features feature, void *arg)
13474 13291 {
13475 13292 switch (feature) {
13476 13293 case HAT_SHARED_PT:
13477 13294 case HAT_DYNAMIC_ISM_UNMAP:
13478 13295 case HAT_VMODSORT:
13479 13296 return (1);
13480 13297 case HAT_SHARED_REGIONS:
13481 13298 if (shctx_on)
13482 13299 return (1);
13483 13300 else
13484 13301 return (0);
13485 13302 default:
13486 13303 return (0);
13487 13304 }
13488 13305 }
13489 13306
13490 13307 void
13491 13308 hat_enter(struct hat *hat)
13492 13309 {
13493 13310 hatlock_t *hatlockp;
13494 13311
13495 13312 if (hat != ksfmmup) {
13496 13313 hatlockp = TSB_HASH(hat);
13497 13314 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13498 13315 }
13499 13316 }
13500 13317
13501 13318 void
13502 13319 hat_exit(struct hat *hat)
13503 13320 {
13504 13321 hatlock_t *hatlockp;
13505 13322
13506 13323 if (hat != ksfmmup) {
13507 13324 hatlockp = TSB_HASH(hat);
13508 13325 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13509 13326 }
13510 13327 }
13511 13328
13512 13329 /*ARGSUSED*/
13513 13330 void
13514 13331 hat_reserve(struct as *as, caddr_t addr, size_t len)
13515 13332 {
13516 13333 }
13517 13334
13518 13335 static void
13519 13336 hat_kstat_init(void)
13520 13337 {
13521 13338 kstat_t *ksp;
13522 13339
13523 13340 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13524 13341 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13525 13342 KSTAT_FLAG_VIRTUAL);
13526 13343 if (ksp) {
13527 13344 ksp->ks_data = (void *) &sfmmu_global_stat;
13528 13345 kstat_install(ksp);
13529 13346 }
13530 13347 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13531 13348 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13532 13349 KSTAT_FLAG_VIRTUAL);
13533 13350 if (ksp) {
13534 13351 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13535 13352 kstat_install(ksp);
13536 13353 }
13537 13354 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13538 13355 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13539 13356 KSTAT_FLAG_WRITABLE);
13540 13357 if (ksp) {
13541 13358 ksp->ks_update = sfmmu_kstat_percpu_update;
13542 13359 kstat_install(ksp);
13543 13360 }
13544 13361 }
13545 13362
13546 13363 /* ARGSUSED */
13547 13364 static int
13548 13365 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13549 13366 {
13550 13367 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13551 13368 struct tsbmiss *tsbm = tsbmiss_area;
13552 13369 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13553 13370 int i;
13554 13371
13555 13372 ASSERT(cpu_kstat);
13556 13373 if (rw == KSTAT_READ) {
13557 13374 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13558 13375 cpu_kstat->sf_itlb_misses = 0;
13559 13376 cpu_kstat->sf_dtlb_misses = 0;
13560 13377 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13561 13378 tsbm->uprot_traps;
13562 13379 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13563 13380 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13564 13381 cpu_kstat->sf_tsb_hits = 0;
13565 13382 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13566 13383 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13567 13384 }
13568 13385 } else {
13569 13386 /* KSTAT_WRITE is used to clear stats */
13570 13387 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13571 13388 tsbm->utsb_misses = 0;
13572 13389 tsbm->ktsb_misses = 0;
13573 13390 tsbm->uprot_traps = 0;
13574 13391 tsbm->kprot_traps = 0;
13575 13392 kpmtsbm->kpm_dtlb_misses = 0;
13576 13393 kpmtsbm->kpm_tsb_misses = 0;
13577 13394 }
13578 13395 }
13579 13396 return (0);
13580 13397 }
13581 13398
13582 13399 #ifdef DEBUG
13583 13400
13584 13401 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13585 13402
13586 13403 /*
13587 13404 * A tte checker. *orig_old is the value we read before cas.
13588 13405 * *cur is the value returned by cas.
13589 13406 * *new is the desired value when we do the cas.
13590 13407 *
13591 13408 * *hmeblkp is currently unused.
13592 13409 */
13593 13410
13594 13411 /* ARGSUSED */
13595 13412 void
13596 13413 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13597 13414 {
13598 13415 pfn_t i, j, k;
13599 13416 int cpuid = CPU->cpu_id;
13600 13417
13601 13418 gorig[cpuid] = orig_old;
13602 13419 gcur[cpuid] = cur;
13603 13420 gnew[cpuid] = new;
13604 13421
13605 13422 #ifdef lint
13606 13423 hmeblkp = hmeblkp;
13607 13424 #endif
13608 13425
13609 13426 if (TTE_IS_VALID(orig_old)) {
13610 13427 if (TTE_IS_VALID(cur)) {
13611 13428 i = TTE_TO_TTEPFN(orig_old);
13612 13429 j = TTE_TO_TTEPFN(cur);
13613 13430 k = TTE_TO_TTEPFN(new);
13614 13431 if (i != j) {
13615 13432 /* remap error? */
13616 13433 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13617 13434 }
13618 13435
13619 13436 if (i != k) {
13620 13437 /* remap error? */
13621 13438 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13622 13439 }
13623 13440 } else {
13624 13441 if (TTE_IS_VALID(new)) {
13625 13442 panic("chk_tte: invalid cur? ");
13626 13443 }
13627 13444
13628 13445 i = TTE_TO_TTEPFN(orig_old);
13629 13446 k = TTE_TO_TTEPFN(new);
13630 13447 if (i != k) {
13631 13448 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13632 13449 }
13633 13450 }
13634 13451 } else {
13635 13452 if (TTE_IS_VALID(cur)) {
13636 13453 j = TTE_TO_TTEPFN(cur);
13637 13454 if (TTE_IS_VALID(new)) {
13638 13455 k = TTE_TO_TTEPFN(new);
13639 13456 if (j != k) {
13640 13457 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13641 13458 j, k);
13642 13459 }
13643 13460 } else {
13644 13461 panic("chk_tte: why here?");
13645 13462 }
13646 13463 } else {
13647 13464 if (!TTE_IS_VALID(new)) {
13648 13465 panic("chk_tte: why here2 ?");
13649 13466 }
13650 13467 }
13651 13468 }
13652 13469 }
13653 13470
13654 13471 #endif /* DEBUG */
13655 13472
13656 13473 extern void prefetch_tsbe_read(struct tsbe *);
13657 13474 extern void prefetch_tsbe_write(struct tsbe *);
13658 13475
13659 13476
13660 13477 /*
13661 13478 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13662 13479 * us optimal performance on Cheetah+. You can only have 8 outstanding
13663 13480 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13664 13481 * prefetch to make the most utilization of the prefetch capability.
13665 13482 */
13666 13483 #define TSBE_PREFETCH_STRIDE (7)
13667 13484
13668 13485 void
13669 13486 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13670 13487 {
13671 13488 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13672 13489 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13673 13490 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13674 13491 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13675 13492 struct tsbe *old;
13676 13493 struct tsbe *new;
13677 13494 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13678 13495 uint64_t va;
13679 13496 int new_offset;
13680 13497 int i;
13681 13498 int vpshift;
13682 13499 int last_prefetch;
13683 13500
13684 13501 if (old_bytes == new_bytes) {
13685 13502 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13686 13503 } else {
13687 13504
13688 13505 /*
13689 13506 * A TSBE is 16 bytes which means there are four TSBE's per
13690 13507 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13691 13508 */
13692 13509 old = (struct tsbe *)old_tsbinfo->tsb_va;
13693 13510 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13694 13511 for (i = 0; i < old_entries; i++, old++) {
13695 13512 if (((i & (4-1)) == 0) && (i < last_prefetch))
13696 13513 prefetch_tsbe_read(old);
13697 13514 if (!old->tte_tag.tag_invalid) {
13698 13515 /*
13699 13516 * We have a valid TTE to remap. Check the
13700 13517 * size. We won't remap 64K or 512K TTEs
13701 13518 * because they span more than one TSB entry
13702 13519 * and are indexed using an 8K virt. page.
13703 13520 * Ditto for 32M and 256M TTEs.
13704 13521 */
13705 13522 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13706 13523 TTE_CSZ(&old->tte_data) == TTE512K)
13707 13524 continue;
13708 13525 if (mmu_page_sizes == max_mmu_page_sizes) {
13709 13526 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13710 13527 TTE_CSZ(&old->tte_data) == TTE256M)
13711 13528 continue;
13712 13529 }
13713 13530
13714 13531 /* clear the lower 22 bits of the va */
13715 13532 va = *(uint64_t *)old << 22;
13716 13533 /* turn va into a virtual pfn */
13717 13534 va >>= 22 - TSB_START_SIZE;
13718 13535 /*
13719 13536 * or in bits from the offset in the tsb
13720 13537 * to get the real virtual pfn. These
13721 13538 * correspond to bits [21:13] in the va
13722 13539 */
13723 13540 vpshift =
13724 13541 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13725 13542 0x1ff;
13726 13543 va |= (i << vpshift);
13727 13544 va >>= vpshift;
13728 13545 new_offset = va & (new_entries - 1);
13729 13546 new = new_base + new_offset;
13730 13547 prefetch_tsbe_write(new);
13731 13548 *new = *old;
13732 13549 }
13733 13550 }
13734 13551 }
13735 13552 }
13736 13553
13737 13554 /*
13738 13555 * unused in sfmmu
13739 13556 */
13740 13557 void
13741 13558 hat_dump(void)
13742 13559 {
13743 13560 }
13744 13561
13745 13562 /*
13746 13563 * Called when a thread is exiting and we have switched to the kernel address
13747 13564 * space. Perform the same VM initialization resume() uses when switching
13748 13565 * processes.
13749 13566 *
13750 13567 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13751 13568 * we call it anyway in case the semantics change in the future.
13752 13569 */
13753 13570 /*ARGSUSED*/
13754 13571 void
13755 13572 hat_thread_exit(kthread_t *thd)
13756 13573 {
13757 13574 uint_t pgsz_cnum;
13758 13575 uint_t pstate_save;
13759 13576
13760 13577 ASSERT(thd->t_procp->p_as == &kas);
13761 13578
13762 13579 pgsz_cnum = KCONTEXT;
13763 13580 #ifdef sun4u
13764 13581 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13765 13582 #endif
13766 13583
13767 13584 /*
13768 13585 * Note that sfmmu_load_mmustate() is currently a no-op for
13769 13586 * kernel threads. We need to disable interrupts here,
13770 13587 * simply because otherwise sfmmu_load_mmustate() would panic
13771 13588 * if the caller does not disable interrupts.
13772 13589 */
13773 13590 pstate_save = sfmmu_disable_intrs();
13774 13591
13775 13592 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13776 13593 sfmmu_setctx_sec(pgsz_cnum);
13777 13594 sfmmu_load_mmustate(ksfmmup);
13778 13595 sfmmu_enable_intrs(pstate_save);
13779 13596 }
13780 13597
13781 13598
13782 13599 /*
13783 13600 * SRD support
13784 13601 */
13785 13602 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13786 13603 (((uintptr_t)(vp)) >> 11)) & \
13787 13604 srd_hashmask)
13788 13605
13789 13606 /*
13790 13607 * Attach the process to the srd struct associated with the exec vnode
13791 13608 * from which the process is started.
13792 13609 */
13793 13610 void
13794 13611 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13795 13612 {
13796 13613 uint_t hash = SRD_HASH_FUNCTION(evp);
13797 13614 sf_srd_t *srdp;
13798 13615 sf_srd_t *newsrdp;
13799 13616
13800 13617 ASSERT(sfmmup != ksfmmup);
13801 13618 ASSERT(sfmmup->sfmmu_srdp == NULL);
13802 13619
13803 13620 if (!shctx_on) {
13804 13621 return;
13805 13622 }
13806 13623
13807 13624 VN_HOLD(evp);
13808 13625
13809 13626 if (srd_buckets[hash].srdb_srdp != NULL) {
13810 13627 mutex_enter(&srd_buckets[hash].srdb_lock);
13811 13628 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13812 13629 srdp = srdp->srd_hash) {
13813 13630 if (srdp->srd_evp == evp) {
13814 13631 ASSERT(srdp->srd_refcnt >= 0);
13815 13632 sfmmup->sfmmu_srdp = srdp;
13816 13633 atomic_inc_32(
13817 13634 (volatile uint_t *)&srdp->srd_refcnt);
13818 13635 mutex_exit(&srd_buckets[hash].srdb_lock);
13819 13636 return;
13820 13637 }
13821 13638 }
13822 13639 mutex_exit(&srd_buckets[hash].srdb_lock);
13823 13640 }
13824 13641 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13825 13642 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13826 13643
13827 13644 newsrdp->srd_evp = evp;
13828 13645 newsrdp->srd_refcnt = 1;
13829 13646 newsrdp->srd_hmergnfree = NULL;
13830 13647 newsrdp->srd_ismrgnfree = NULL;
13831 13648
13832 13649 mutex_enter(&srd_buckets[hash].srdb_lock);
13833 13650 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13834 13651 srdp = srdp->srd_hash) {
13835 13652 if (srdp->srd_evp == evp) {
13836 13653 ASSERT(srdp->srd_refcnt >= 0);
13837 13654 sfmmup->sfmmu_srdp = srdp;
13838 13655 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13839 13656 mutex_exit(&srd_buckets[hash].srdb_lock);
13840 13657 kmem_cache_free(srd_cache, newsrdp);
13841 13658 return;
13842 13659 }
13843 13660 }
13844 13661 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13845 13662 srd_buckets[hash].srdb_srdp = newsrdp;
13846 13663 sfmmup->sfmmu_srdp = newsrdp;
13847 13664
13848 13665 mutex_exit(&srd_buckets[hash].srdb_lock);
13849 13666
13850 13667 }
13851 13668
13852 13669 static void
13853 13670 sfmmu_leave_srd(sfmmu_t *sfmmup)
13854 13671 {
13855 13672 vnode_t *evp;
13856 13673 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13857 13674 uint_t hash;
13858 13675 sf_srd_t **prev_srdpp;
13859 13676 sf_region_t *rgnp;
13860 13677 sf_region_t *nrgnp;
13861 13678 #ifdef DEBUG
13862 13679 int rgns = 0;
13863 13680 #endif
13864 13681 int i;
13865 13682
13866 13683 ASSERT(sfmmup != ksfmmup);
13867 13684 ASSERT(srdp != NULL);
13868 13685 ASSERT(srdp->srd_refcnt > 0);
13869 13686 ASSERT(sfmmup->sfmmu_scdp == NULL);
13870 13687 ASSERT(sfmmup->sfmmu_free == 1);
13871 13688
13872 13689 sfmmup->sfmmu_srdp = NULL;
13873 13690 evp = srdp->srd_evp;
13874 13691 ASSERT(evp != NULL);
13875 13692 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13876 13693 VN_RELE(evp);
13877 13694 return;
13878 13695 }
13879 13696
13880 13697 hash = SRD_HASH_FUNCTION(evp);
13881 13698 mutex_enter(&srd_buckets[hash].srdb_lock);
13882 13699 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13883 13700 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13884 13701 if (srdp->srd_evp == evp) {
13885 13702 break;
13886 13703 }
13887 13704 }
13888 13705 if (srdp == NULL || srdp->srd_refcnt) {
13889 13706 mutex_exit(&srd_buckets[hash].srdb_lock);
13890 13707 VN_RELE(evp);
13891 13708 return;
13892 13709 }
13893 13710 *prev_srdpp = srdp->srd_hash;
13894 13711 mutex_exit(&srd_buckets[hash].srdb_lock);
13895 13712
13896 13713 ASSERT(srdp->srd_refcnt == 0);
13897 13714 VN_RELE(evp);
13898 13715
13899 13716 #ifdef DEBUG
13900 13717 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13901 13718 ASSERT(srdp->srd_rgnhash[i] == NULL);
13902 13719 }
13903 13720 #endif /* DEBUG */
13904 13721
13905 13722 /* free each hme regions in the srd */
13906 13723 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13907 13724 nrgnp = rgnp->rgn_next;
13908 13725 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13909 13726 ASSERT(rgnp->rgn_refcnt == 0);
13910 13727 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13911 13728 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13912 13729 ASSERT(rgnp->rgn_hmeflags == 0);
13913 13730 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13914 13731 #ifdef DEBUG
13915 13732 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13916 13733 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13917 13734 }
13918 13735 rgns++;
13919 13736 #endif /* DEBUG */
13920 13737 kmem_cache_free(region_cache, rgnp);
13921 13738 }
13922 13739 ASSERT(rgns == srdp->srd_next_hmerid);
13923 13740
13924 13741 #ifdef DEBUG
13925 13742 rgns = 0;
13926 13743 #endif
13927 13744 /* free each ism rgns in the srd */
13928 13745 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13929 13746 nrgnp = rgnp->rgn_next;
13930 13747 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13931 13748 ASSERT(rgnp->rgn_refcnt == 0);
13932 13749 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13933 13750 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13934 13751 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13935 13752 #ifdef DEBUG
13936 13753 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13937 13754 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13938 13755 }
13939 13756 rgns++;
13940 13757 #endif /* DEBUG */
13941 13758 kmem_cache_free(region_cache, rgnp);
13942 13759 }
13943 13760 ASSERT(rgns == srdp->srd_next_ismrid);
13944 13761 ASSERT(srdp->srd_ismbusyrgns == 0);
13945 13762 ASSERT(srdp->srd_hmebusyrgns == 0);
13946 13763
13947 13764 srdp->srd_next_ismrid = 0;
13948 13765 srdp->srd_next_hmerid = 0;
13949 13766
13950 13767 bzero((void *)srdp->srd_ismrgnp,
13951 13768 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13952 13769 bzero((void *)srdp->srd_hmergnp,
13953 13770 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13954 13771
13955 13772 ASSERT(srdp->srd_scdp == NULL);
13956 13773 kmem_cache_free(srd_cache, srdp);
13957 13774 }
13958 13775
13959 13776 /* ARGSUSED */
13960 13777 static int
13961 13778 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13962 13779 {
13963 13780 sf_srd_t *srdp = (sf_srd_t *)buf;
13964 13781 bzero(buf, sizeof (*srdp));
13965 13782
13966 13783 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13967 13784 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13968 13785 return (0);
13969 13786 }
13970 13787
13971 13788 /* ARGSUSED */
13972 13789 static void
13973 13790 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13974 13791 {
13975 13792 sf_srd_t *srdp = (sf_srd_t *)buf;
13976 13793
13977 13794 mutex_destroy(&srdp->srd_mutex);
13978 13795 mutex_destroy(&srdp->srd_scd_mutex);
13979 13796 }
13980 13797
13981 13798 /*
13982 13799 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13983 13800 * at the same time for the same process and address range. This is ensured by
13984 13801 * the fact that address space is locked as writer when a process joins the
13985 13802 * regions. Therefore there's no need to hold an srd lock during the entire
13986 13803 * execution of hat_join_region()/hat_leave_region().
13987 13804 */
13988 13805
13989 13806 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13990 13807 (((uintptr_t)(obj)) >> 11)) & \
13991 13808 srd_rgn_hashmask)
13992 13809 /*
13993 13810 * This routine implements the shared context functionality required when
13994 13811 * attaching a segment to an address space. It must be called from
13995 13812 * hat_share() for D(ISM) segments and from segvn_create() for segments
13996 13813 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13997 13814 * which is saved in the private segment data for hme segments and
13998 13815 * the ism_map structure for ism segments.
13999 13816 */
14000 13817 hat_region_cookie_t
14001 13818 hat_join_region(struct hat *sfmmup,
14002 13819 caddr_t r_saddr,
14003 13820 size_t r_size,
14004 13821 void *r_obj,
14005 13822 u_offset_t r_objoff,
14006 13823 uchar_t r_perm,
14007 13824 uchar_t r_pgszc,
14008 13825 hat_rgn_cb_func_t r_cb_function,
14009 13826 uint_t flags)
14010 13827 {
14011 13828 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14012 13829 uint_t rhash;
14013 13830 uint_t rid;
14014 13831 hatlock_t *hatlockp;
14015 13832 sf_region_t *rgnp;
14016 13833 sf_region_t *new_rgnp = NULL;
14017 13834 int i;
14018 13835 uint16_t *nextidp;
14019 13836 sf_region_t **freelistp;
14020 13837 int maxids;
14021 13838 sf_region_t **rarrp;
↓ open down ↓ |
564 lines elided |
↑ open up ↑ |
14022 13839 uint16_t *busyrgnsp;
14023 13840 ulong_t rttecnt;
14024 13841 uchar_t tteflag;
14025 13842 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14026 13843 int text = (r_type == HAT_REGION_TEXT);
14027 13844
14028 13845 if (srdp == NULL || r_size == 0) {
14029 13846 return (HAT_INVALID_REGION_COOKIE);
14030 13847 }
14031 13848
14032 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14033 13849 ASSERT(sfmmup != ksfmmup);
14034 13850 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14035 13851 ASSERT(srdp->srd_refcnt > 0);
14036 13852 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14037 13853 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14038 13854 ASSERT(r_pgszc < mmu_page_sizes);
14039 13855 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14040 13856 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14041 13857 panic("hat_join_region: region addr or size is not aligned\n");
14042 13858 }
14043 13859
14044 13860
14045 13861 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14046 13862 SFMMU_REGION_HME;
14047 13863 /*
14048 13864 * Currently only support shared hmes for the read only main text
14049 13865 * region.
14050 13866 */
14051 13867 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
14052 13868 (r_perm & PROT_WRITE))) {
14053 13869 return (HAT_INVALID_REGION_COOKIE);
14054 13870 }
14055 13871
14056 13872 rhash = RGN_HASH_FUNCTION(r_obj);
14057 13873
14058 13874 if (r_type == SFMMU_REGION_ISM) {
14059 13875 nextidp = &srdp->srd_next_ismrid;
14060 13876 freelistp = &srdp->srd_ismrgnfree;
14061 13877 maxids = SFMMU_MAX_ISM_REGIONS;
14062 13878 rarrp = srdp->srd_ismrgnp;
14063 13879 busyrgnsp = &srdp->srd_ismbusyrgns;
14064 13880 } else {
14065 13881 nextidp = &srdp->srd_next_hmerid;
14066 13882 freelistp = &srdp->srd_hmergnfree;
14067 13883 maxids = SFMMU_MAX_HME_REGIONS;
14068 13884 rarrp = srdp->srd_hmergnp;
14069 13885 busyrgnsp = &srdp->srd_hmebusyrgns;
14070 13886 }
14071 13887
14072 13888 mutex_enter(&srdp->srd_mutex);
14073 13889
14074 13890 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14075 13891 rgnp = rgnp->rgn_hash) {
14076 13892 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
14077 13893 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
14078 13894 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
14079 13895 break;
14080 13896 }
14081 13897 }
14082 13898
14083 13899 rfound:
14084 13900 if (rgnp != NULL) {
14085 13901 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14086 13902 ASSERT(rgnp->rgn_cb_function == r_cb_function);
14087 13903 ASSERT(rgnp->rgn_refcnt >= 0);
14088 13904 rid = rgnp->rgn_id;
14089 13905 ASSERT(rid < maxids);
14090 13906 ASSERT(rarrp[rid] == rgnp);
14091 13907 ASSERT(rid < *nextidp);
14092 13908 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14093 13909 mutex_exit(&srdp->srd_mutex);
14094 13910 if (new_rgnp != NULL) {
14095 13911 kmem_cache_free(region_cache, new_rgnp);
14096 13912 }
14097 13913 if (r_type == SFMMU_REGION_HME) {
14098 13914 int myjoin =
14099 13915 (sfmmup == astosfmmu(curthread->t_procp->p_as));
14100 13916
14101 13917 sfmmu_link_to_hmeregion(sfmmup, rgnp);
14102 13918 /*
14103 13919 * bitmap should be updated after linking sfmmu on
14104 13920 * region list so that pageunload() doesn't skip
14105 13921 * TSB/TLB flush. As soon as bitmap is updated another
14106 13922 * thread in this process can already start accessing
14107 13923 * this region.
14108 13924 */
14109 13925 /*
14110 13926 * Normally ttecnt accounting is done as part of
14111 13927 * pagefault handling. But a process may not take any
14112 13928 * pagefaults on shared hmeblks created by some other
14113 13929 * process. To compensate for this assume that the
14114 13930 * entire region will end up faulted in using
14115 13931 * the region's pagesize.
14116 13932 *
14117 13933 */
14118 13934 if (r_pgszc > TTE8K) {
14119 13935 tteflag = 1 << r_pgszc;
14120 13936 if (disable_large_pages & tteflag) {
14121 13937 tteflag = 0;
14122 13938 }
14123 13939 } else {
14124 13940 tteflag = 0;
14125 13941 }
14126 13942 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
14127 13943 hatlockp = sfmmu_hat_enter(sfmmup);
14128 13944 sfmmup->sfmmu_rtteflags |= tteflag;
14129 13945 sfmmu_hat_exit(hatlockp);
14130 13946 }
14131 13947 hatlockp = sfmmu_hat_enter(sfmmup);
14132 13948
14133 13949 /*
14134 13950 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
14135 13951 * region to allow for large page allocation failure.
14136 13952 */
14137 13953 if (r_pgszc >= TTE4M) {
14138 13954 sfmmup->sfmmu_tsb0_4minflcnt +=
14139 13955 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14140 13956 }
14141 13957
14142 13958 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14143 13959 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14144 13960 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14145 13961 rttecnt);
14146 13962
14147 13963 if (text && r_pgszc >= TTE4M &&
14148 13964 (tteflag || ((disable_large_pages >> TTE4M) &
14149 13965 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
14150 13966 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
14151 13967 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
14152 13968 }
14153 13969
14154 13970 sfmmu_hat_exit(hatlockp);
14155 13971 /*
14156 13972 * On Panther we need to make sure TLB is programmed
14157 13973 * to accept 32M/256M pages. Call
14158 13974 * sfmmu_check_page_sizes() now to make sure TLB is
14159 13975 * setup before making hmeregions visible to other
14160 13976 * threads.
14161 13977 */
14162 13978 sfmmu_check_page_sizes(sfmmup, 1);
14163 13979 hatlockp = sfmmu_hat_enter(sfmmup);
14164 13980 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14165 13981
14166 13982 /*
14167 13983 * if context is invalid tsb miss exception code will
14168 13984 * call sfmmu_check_page_sizes() and update tsbmiss
14169 13985 * area later.
14170 13986 */
14171 13987 kpreempt_disable();
14172 13988 if (myjoin &&
14173 13989 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
14174 13990 != INVALID_CONTEXT)) {
14175 13991 struct tsbmiss *tsbmp;
14176 13992
14177 13993 tsbmp = &tsbmiss_area[CPU->cpu_id];
14178 13994 ASSERT(sfmmup == tsbmp->usfmmup);
14179 13995 BT_SET(tsbmp->shmermap, rid);
14180 13996 if (r_pgszc > TTE64K) {
14181 13997 tsbmp->uhat_rtteflags |= tteflag;
14182 13998 }
14183 13999
14184 14000 }
14185 14001 kpreempt_enable();
14186 14002
14187 14003 sfmmu_hat_exit(hatlockp);
14188 14004 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
14189 14005 HAT_INVALID_REGION_COOKIE);
14190 14006 } else {
14191 14007 hatlockp = sfmmu_hat_enter(sfmmup);
14192 14008 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14193 14009 sfmmu_hat_exit(hatlockp);
14194 14010 }
14195 14011 ASSERT(rid < maxids);
14196 14012
14197 14013 if (r_type == SFMMU_REGION_ISM) {
14198 14014 sfmmu_find_scd(sfmmup);
14199 14015 }
14200 14016 return ((hat_region_cookie_t)((uint64_t)rid));
14201 14017 }
14202 14018
14203 14019 ASSERT(new_rgnp == NULL);
14204 14020
14205 14021 if (*busyrgnsp >= maxids) {
14206 14022 mutex_exit(&srdp->srd_mutex);
14207 14023 return (HAT_INVALID_REGION_COOKIE);
14208 14024 }
14209 14025
14210 14026 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14211 14027 if (*freelistp != NULL) {
14212 14028 rgnp = *freelistp;
14213 14029 *freelistp = rgnp->rgn_next;
14214 14030 ASSERT(rgnp->rgn_id < *nextidp);
14215 14031 ASSERT(rgnp->rgn_id < maxids);
14216 14032 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14217 14033 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14218 14034 == r_type);
14219 14035 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14220 14036 ASSERT(rgnp->rgn_hmeflags == 0);
14221 14037 } else {
14222 14038 /*
14223 14039 * release local locks before memory allocation.
14224 14040 */
14225 14041 mutex_exit(&srdp->srd_mutex);
14226 14042
14227 14043 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14228 14044
14229 14045 mutex_enter(&srdp->srd_mutex);
14230 14046 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14231 14047 rgnp = rgnp->rgn_hash) {
14232 14048 if (rgnp->rgn_saddr == r_saddr &&
14233 14049 rgnp->rgn_size == r_size &&
14234 14050 rgnp->rgn_obj == r_obj &&
14235 14051 rgnp->rgn_objoff == r_objoff &&
14236 14052 rgnp->rgn_perm == r_perm &&
14237 14053 rgnp->rgn_pgszc == r_pgszc) {
14238 14054 break;
14239 14055 }
14240 14056 }
14241 14057 if (rgnp != NULL) {
14242 14058 goto rfound;
14243 14059 }
14244 14060
14245 14061 if (*nextidp >= maxids) {
14246 14062 mutex_exit(&srdp->srd_mutex);
14247 14063 goto fail;
14248 14064 }
14249 14065 rgnp = new_rgnp;
14250 14066 new_rgnp = NULL;
14251 14067 rgnp->rgn_id = (*nextidp)++;
14252 14068 ASSERT(rgnp->rgn_id < maxids);
14253 14069 ASSERT(rarrp[rgnp->rgn_id] == NULL);
14254 14070 rarrp[rgnp->rgn_id] = rgnp;
14255 14071 }
14256 14072
14257 14073 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14258 14074 ASSERT(rgnp->rgn_hmeflags == 0);
14259 14075 #ifdef DEBUG
14260 14076 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14261 14077 ASSERT(rgnp->rgn_ttecnt[i] == 0);
14262 14078 }
14263 14079 #endif
14264 14080 rgnp->rgn_saddr = r_saddr;
14265 14081 rgnp->rgn_size = r_size;
14266 14082 rgnp->rgn_obj = r_obj;
14267 14083 rgnp->rgn_objoff = r_objoff;
14268 14084 rgnp->rgn_perm = r_perm;
14269 14085 rgnp->rgn_pgszc = r_pgszc;
14270 14086 rgnp->rgn_flags = r_type;
14271 14087 rgnp->rgn_refcnt = 0;
14272 14088 rgnp->rgn_cb_function = r_cb_function;
14273 14089 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14274 14090 srdp->srd_rgnhash[rhash] = rgnp;
14275 14091 (*busyrgnsp)++;
14276 14092 ASSERT(*busyrgnsp <= maxids);
14277 14093 goto rfound;
14278 14094
14279 14095 fail:
14280 14096 ASSERT(new_rgnp != NULL);
14281 14097 kmem_cache_free(region_cache, new_rgnp);
14282 14098 return (HAT_INVALID_REGION_COOKIE);
14283 14099 }
14284 14100
14285 14101 /*
14286 14102 * This function implements the shared context functionality required
14287 14103 * when detaching a segment from an address space. It must be called
14288 14104 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14289 14105 * for segments with a valid region_cookie.
14290 14106 * It will also be called from all seg_vn routines which change a
14291 14107 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14292 14108 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14293 14109 * from segvn_fault().
14294 14110 */
14295 14111 void
14296 14112 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14297 14113 {
14298 14114 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14299 14115 sf_scd_t *scdp;
14300 14116 uint_t rhash;
14301 14117 uint_t rid = (uint_t)((uint64_t)rcookie);
14302 14118 hatlock_t *hatlockp = NULL;
14303 14119 sf_region_t *rgnp;
14304 14120 sf_region_t **prev_rgnpp;
14305 14121 sf_region_t *cur_rgnp;
14306 14122 void *r_obj;
14307 14123 int i;
14308 14124 caddr_t r_saddr;
14309 14125 caddr_t r_eaddr;
14310 14126 size_t r_size;
14311 14127 uchar_t r_pgszc;
14312 14128 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14313 14129
14314 14130 ASSERT(sfmmup != ksfmmup);
14315 14131 ASSERT(srdp != NULL);
14316 14132 ASSERT(srdp->srd_refcnt > 0);
14317 14133 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14318 14134 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14319 14135 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14320 14136
14321 14137 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14322 14138 SFMMU_REGION_HME;
14323 14139
14324 14140 if (r_type == SFMMU_REGION_ISM) {
14325 14141 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14326 14142 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14327 14143 rgnp = srdp->srd_ismrgnp[rid];
↓ open down ↓ |
285 lines elided |
↑ open up ↑ |
14328 14144 } else {
14329 14145 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14330 14146 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14331 14147 rgnp = srdp->srd_hmergnp[rid];
14332 14148 }
14333 14149 ASSERT(rgnp != NULL);
14334 14150 ASSERT(rgnp->rgn_id == rid);
14335 14151 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14336 14152 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14337 14153 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14338 -
14339 - ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14340 - if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14341 - xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14342 - rgnp->rgn_size, 0, NULL);
14343 - }
14344 14154
14345 14155 if (sfmmup->sfmmu_free) {
14346 14156 ulong_t rttecnt;
14347 14157 r_pgszc = rgnp->rgn_pgszc;
14348 14158 r_size = rgnp->rgn_size;
14349 14159
14350 14160 ASSERT(sfmmup->sfmmu_scdp == NULL);
14351 14161 if (r_type == SFMMU_REGION_ISM) {
14352 14162 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14353 14163 } else {
14354 14164 /* update shme rgns ttecnt in sfmmu_ttecnt */
14355 14165 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14356 14166 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14357 14167
14358 14168 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14359 14169 -rttecnt);
14360 14170
14361 14171 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14362 14172 }
14363 14173 } else if (r_type == SFMMU_REGION_ISM) {
14364 14174 hatlockp = sfmmu_hat_enter(sfmmup);
14365 14175 ASSERT(rid < srdp->srd_next_ismrid);
14366 14176 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14367 14177 scdp = sfmmup->sfmmu_scdp;
14368 14178 if (scdp != NULL &&
14369 14179 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14370 14180 sfmmu_leave_scd(sfmmup, r_type);
14371 14181 ASSERT(sfmmu_hat_lock_held(sfmmup));
14372 14182 }
14373 14183 sfmmu_hat_exit(hatlockp);
14374 14184 } else {
14375 14185 ulong_t rttecnt;
14376 14186 r_pgszc = rgnp->rgn_pgszc;
14377 14187 r_saddr = rgnp->rgn_saddr;
14378 14188 r_size = rgnp->rgn_size;
14379 14189 r_eaddr = r_saddr + r_size;
14380 14190
14381 14191 ASSERT(r_type == SFMMU_REGION_HME);
14382 14192 hatlockp = sfmmu_hat_enter(sfmmup);
14383 14193 ASSERT(rid < srdp->srd_next_hmerid);
14384 14194 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14385 14195
14386 14196 /*
14387 14197 * If region is part of an SCD call sfmmu_leave_scd().
14388 14198 * Otherwise if process is not exiting and has valid context
14389 14199 * just drop the context on the floor to lose stale TLB
14390 14200 * entries and force the update of tsb miss area to reflect
14391 14201 * the new region map. After that clean our TSB entries.
14392 14202 */
14393 14203 scdp = sfmmup->sfmmu_scdp;
14394 14204 if (scdp != NULL &&
14395 14205 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14396 14206 sfmmu_leave_scd(sfmmup, r_type);
14397 14207 ASSERT(sfmmu_hat_lock_held(sfmmup));
14398 14208 }
14399 14209 sfmmu_invalidate_ctx(sfmmup);
14400 14210
14401 14211 i = TTE8K;
14402 14212 while (i < mmu_page_sizes) {
14403 14213 if (rgnp->rgn_ttecnt[i] != 0) {
14404 14214 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14405 14215 r_eaddr, i);
14406 14216 if (i < TTE4M) {
14407 14217 i = TTE4M;
14408 14218 continue;
14409 14219 } else {
14410 14220 break;
14411 14221 }
14412 14222 }
14413 14223 i++;
14414 14224 }
14415 14225 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14416 14226 if (r_pgszc >= TTE4M) {
14417 14227 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14418 14228 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14419 14229 rttecnt);
14420 14230 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14421 14231 }
14422 14232
14423 14233 /* update shme rgns ttecnt in sfmmu_ttecnt */
14424 14234 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14425 14235 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14426 14236 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14427 14237
14428 14238 sfmmu_hat_exit(hatlockp);
14429 14239 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14430 14240 /* sfmmup left the scd, grow private tsb */
14431 14241 sfmmu_check_page_sizes(sfmmup, 1);
14432 14242 } else {
14433 14243 sfmmu_check_page_sizes(sfmmup, 0);
14434 14244 }
14435 14245 }
14436 14246
14437 14247 if (r_type == SFMMU_REGION_HME) {
14438 14248 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14439 14249 }
14440 14250
14441 14251 r_obj = rgnp->rgn_obj;
14442 14252 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14443 14253 return;
14444 14254 }
14445 14255
14446 14256 /*
14447 14257 * looks like nobody uses this region anymore. Free it.
14448 14258 */
14449 14259 rhash = RGN_HASH_FUNCTION(r_obj);
14450 14260 mutex_enter(&srdp->srd_mutex);
14451 14261 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14452 14262 (cur_rgnp = *prev_rgnpp) != NULL;
14453 14263 prev_rgnpp = &cur_rgnp->rgn_hash) {
14454 14264 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14455 14265 break;
14456 14266 }
14457 14267 }
14458 14268
14459 14269 if (cur_rgnp == NULL) {
14460 14270 mutex_exit(&srdp->srd_mutex);
14461 14271 return;
14462 14272 }
14463 14273
14464 14274 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14465 14275 *prev_rgnpp = rgnp->rgn_hash;
14466 14276 if (r_type == SFMMU_REGION_ISM) {
14467 14277 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14468 14278 ASSERT(rid < srdp->srd_next_ismrid);
14469 14279 rgnp->rgn_next = srdp->srd_ismrgnfree;
14470 14280 srdp->srd_ismrgnfree = rgnp;
14471 14281 ASSERT(srdp->srd_ismbusyrgns > 0);
14472 14282 srdp->srd_ismbusyrgns--;
14473 14283 mutex_exit(&srdp->srd_mutex);
14474 14284 return;
14475 14285 }
14476 14286 mutex_exit(&srdp->srd_mutex);
14477 14287
14478 14288 /*
14479 14289 * Destroy region's hmeblks.
14480 14290 */
14481 14291 sfmmu_unload_hmeregion(srdp, rgnp);
14482 14292
14483 14293 rgnp->rgn_hmeflags = 0;
14484 14294
14485 14295 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14486 14296 ASSERT(rgnp->rgn_id == rid);
14487 14297 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14488 14298 rgnp->rgn_ttecnt[i] = 0;
14489 14299 }
14490 14300 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14491 14301 mutex_enter(&srdp->srd_mutex);
14492 14302 ASSERT(rid < srdp->srd_next_hmerid);
14493 14303 rgnp->rgn_next = srdp->srd_hmergnfree;
14494 14304 srdp->srd_hmergnfree = rgnp;
14495 14305 ASSERT(srdp->srd_hmebusyrgns > 0);
14496 14306 srdp->srd_hmebusyrgns--;
14497 14307 mutex_exit(&srdp->srd_mutex);
14498 14308 }
14499 14309
14500 14310 /*
14501 14311 * For now only called for hmeblk regions and not for ISM regions.
14502 14312 */
14503 14313 void
14504 14314 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14505 14315 {
14506 14316 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14507 14317 uint_t rid = (uint_t)((uint64_t)rcookie);
14508 14318 sf_region_t *rgnp;
14509 14319 sf_rgn_link_t *rlink;
14510 14320 sf_rgn_link_t *hrlink;
14511 14321 ulong_t rttecnt;
14512 14322
14513 14323 ASSERT(sfmmup != ksfmmup);
14514 14324 ASSERT(srdp != NULL);
14515 14325 ASSERT(srdp->srd_refcnt > 0);
14516 14326
14517 14327 ASSERT(rid < srdp->srd_next_hmerid);
14518 14328 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14519 14329 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14520 14330
14521 14331 rgnp = srdp->srd_hmergnp[rid];
14522 14332 ASSERT(rgnp->rgn_refcnt > 0);
14523 14333 ASSERT(rgnp->rgn_id == rid);
14524 14334 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14525 14335 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14526 14336
14527 14337 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14528 14338
14529 14339 /* LINTED: constant in conditional context */
14530 14340 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14531 14341 ASSERT(rlink != NULL);
14532 14342 mutex_enter(&rgnp->rgn_mutex);
14533 14343 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14534 14344 /* LINTED: constant in conditional context */
14535 14345 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14536 14346 ASSERT(hrlink != NULL);
14537 14347 ASSERT(hrlink->prev == NULL);
14538 14348 rlink->next = rgnp->rgn_sfmmu_head;
14539 14349 rlink->prev = NULL;
14540 14350 hrlink->prev = sfmmup;
14541 14351 /*
14542 14352 * make sure rlink's next field is correct
14543 14353 * before making this link visible.
14544 14354 */
14545 14355 membar_stst();
14546 14356 rgnp->rgn_sfmmu_head = sfmmup;
14547 14357 mutex_exit(&rgnp->rgn_mutex);
14548 14358
14549 14359 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14550 14360 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14551 14361 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14552 14362 /* update tsb0 inflation count */
14553 14363 if (rgnp->rgn_pgszc >= TTE4M) {
14554 14364 sfmmup->sfmmu_tsb0_4minflcnt +=
14555 14365 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14556 14366 }
14557 14367 /*
14558 14368 * Update regionid bitmask without hat lock since no other thread
14559 14369 * can update this region bitmask right now.
14560 14370 */
14561 14371 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14562 14372 }
14563 14373
14564 14374 /* ARGSUSED */
14565 14375 static int
14566 14376 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14567 14377 {
14568 14378 sf_region_t *rgnp = (sf_region_t *)buf;
14569 14379 bzero(buf, sizeof (*rgnp));
14570 14380
14571 14381 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14572 14382
14573 14383 return (0);
14574 14384 }
14575 14385
14576 14386 /* ARGSUSED */
14577 14387 static void
14578 14388 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14579 14389 {
14580 14390 sf_region_t *rgnp = (sf_region_t *)buf;
14581 14391 mutex_destroy(&rgnp->rgn_mutex);
14582 14392 }
14583 14393
14584 14394 static int
14585 14395 sfrgnmap_isnull(sf_region_map_t *map)
14586 14396 {
14587 14397 int i;
14588 14398
14589 14399 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14590 14400 if (map->bitmap[i] != 0) {
14591 14401 return (0);
14592 14402 }
14593 14403 }
14594 14404 return (1);
14595 14405 }
14596 14406
14597 14407 static int
14598 14408 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14599 14409 {
14600 14410 int i;
14601 14411
14602 14412 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14603 14413 if (map->bitmap[i] != 0) {
14604 14414 return (0);
14605 14415 }
14606 14416 }
14607 14417 return (1);
14608 14418 }
14609 14419
14610 14420 #ifdef DEBUG
14611 14421 static void
14612 14422 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14613 14423 {
14614 14424 sfmmu_t *sp;
14615 14425 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14616 14426
14617 14427 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14618 14428 ASSERT(srdp == sp->sfmmu_srdp);
14619 14429 if (sp == sfmmup) {
14620 14430 if (onlist) {
14621 14431 return;
14622 14432 } else {
14623 14433 panic("shctx: sfmmu 0x%p found on scd"
14624 14434 "list 0x%p", (void *)sfmmup,
14625 14435 (void *)*headp);
14626 14436 }
14627 14437 }
14628 14438 }
14629 14439 if (onlist) {
14630 14440 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14631 14441 (void *)sfmmup, (void *)*headp);
14632 14442 } else {
14633 14443 return;
14634 14444 }
14635 14445 }
14636 14446 #else /* DEBUG */
14637 14447 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14638 14448 #endif /* DEBUG */
14639 14449
14640 14450 /*
14641 14451 * Removes an sfmmu from the SCD sfmmu list.
14642 14452 */
14643 14453 static void
14644 14454 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14645 14455 {
14646 14456 ASSERT(sfmmup->sfmmu_srdp != NULL);
14647 14457 check_scd_sfmmu_list(headp, sfmmup, 1);
14648 14458 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14649 14459 ASSERT(*headp != sfmmup);
14650 14460 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14651 14461 sfmmup->sfmmu_scd_link.next;
14652 14462 } else {
14653 14463 ASSERT(*headp == sfmmup);
14654 14464 *headp = sfmmup->sfmmu_scd_link.next;
14655 14465 }
14656 14466 if (sfmmup->sfmmu_scd_link.next != NULL) {
14657 14467 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14658 14468 sfmmup->sfmmu_scd_link.prev;
14659 14469 }
14660 14470 }
14661 14471
14662 14472
14663 14473 /*
14664 14474 * Adds an sfmmu to the start of the queue.
14665 14475 */
14666 14476 static void
14667 14477 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14668 14478 {
14669 14479 check_scd_sfmmu_list(headp, sfmmup, 0);
14670 14480 sfmmup->sfmmu_scd_link.prev = NULL;
14671 14481 sfmmup->sfmmu_scd_link.next = *headp;
14672 14482 if (*headp != NULL)
14673 14483 (*headp)->sfmmu_scd_link.prev = sfmmup;
14674 14484 *headp = sfmmup;
14675 14485 }
14676 14486
14677 14487 /*
14678 14488 * Remove an scd from the start of the queue.
14679 14489 */
14680 14490 static void
14681 14491 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14682 14492 {
14683 14493 if (scdp->scd_prev != NULL) {
14684 14494 ASSERT(*headp != scdp);
14685 14495 scdp->scd_prev->scd_next = scdp->scd_next;
14686 14496 } else {
14687 14497 ASSERT(*headp == scdp);
14688 14498 *headp = scdp->scd_next;
14689 14499 }
14690 14500
14691 14501 if (scdp->scd_next != NULL) {
14692 14502 scdp->scd_next->scd_prev = scdp->scd_prev;
14693 14503 }
14694 14504 }
14695 14505
14696 14506 /*
14697 14507 * Add an scd to the start of the queue.
14698 14508 */
14699 14509 static void
14700 14510 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14701 14511 {
14702 14512 scdp->scd_prev = NULL;
14703 14513 scdp->scd_next = *headp;
14704 14514 if (*headp != NULL) {
14705 14515 (*headp)->scd_prev = scdp;
14706 14516 }
14707 14517 *headp = scdp;
14708 14518 }
14709 14519
14710 14520 static int
14711 14521 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14712 14522 {
14713 14523 uint_t rid;
14714 14524 uint_t i;
14715 14525 uint_t j;
14716 14526 ulong_t w;
14717 14527 sf_region_t *rgnp;
14718 14528 ulong_t tte8k_cnt = 0;
14719 14529 ulong_t tte4m_cnt = 0;
14720 14530 uint_t tsb_szc;
14721 14531 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14722 14532 sfmmu_t *ism_hatid;
14723 14533 struct tsb_info *newtsb;
14724 14534 int szc;
14725 14535
14726 14536 ASSERT(srdp != NULL);
14727 14537
14728 14538 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14729 14539 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14730 14540 continue;
14731 14541 }
14732 14542 j = 0;
14733 14543 while (w) {
14734 14544 if (!(w & 0x1)) {
14735 14545 j++;
14736 14546 w >>= 1;
14737 14547 continue;
14738 14548 }
14739 14549 rid = (i << BT_ULSHIFT) | j;
14740 14550 j++;
14741 14551 w >>= 1;
14742 14552
14743 14553 if (rid < SFMMU_MAX_HME_REGIONS) {
14744 14554 rgnp = srdp->srd_hmergnp[rid];
14745 14555 ASSERT(rgnp->rgn_id == rid);
14746 14556 ASSERT(rgnp->rgn_refcnt > 0);
14747 14557
14748 14558 if (rgnp->rgn_pgszc < TTE4M) {
14749 14559 tte8k_cnt += rgnp->rgn_size >>
14750 14560 TTE_PAGE_SHIFT(TTE8K);
14751 14561 } else {
14752 14562 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14753 14563 tte4m_cnt += rgnp->rgn_size >>
14754 14564 TTE_PAGE_SHIFT(TTE4M);
14755 14565 /*
14756 14566 * Inflate SCD tsb0 by preallocating
14757 14567 * 1/4 8k ttecnt for 4M regions to
14758 14568 * allow for lgpg alloc failure.
14759 14569 */
14760 14570 tte8k_cnt += rgnp->rgn_size >>
14761 14571 (TTE_PAGE_SHIFT(TTE8K) + 2);
14762 14572 }
14763 14573 } else {
14764 14574 rid -= SFMMU_MAX_HME_REGIONS;
14765 14575 rgnp = srdp->srd_ismrgnp[rid];
14766 14576 ASSERT(rgnp->rgn_id == rid);
14767 14577 ASSERT(rgnp->rgn_refcnt > 0);
14768 14578
14769 14579 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14770 14580 ASSERT(ism_hatid->sfmmu_ismhat);
14771 14581
14772 14582 for (szc = 0; szc < TTE4M; szc++) {
14773 14583 tte8k_cnt +=
14774 14584 ism_hatid->sfmmu_ttecnt[szc] <<
14775 14585 TTE_BSZS_SHIFT(szc);
14776 14586 }
14777 14587
14778 14588 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14779 14589 if (rgnp->rgn_pgszc >= TTE4M) {
14780 14590 tte4m_cnt += rgnp->rgn_size >>
14781 14591 TTE_PAGE_SHIFT(TTE4M);
14782 14592 }
14783 14593 }
14784 14594 }
14785 14595 }
14786 14596
14787 14597 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14788 14598
14789 14599 /* Allocate both the SCD TSBs here. */
14790 14600 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14791 14601 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14792 14602 (tsb_szc <= TSB_4M_SZCODE ||
14793 14603 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14794 14604 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14795 14605 TSB_ALLOC, scsfmmup))) {
14796 14606
14797 14607 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14798 14608 return (TSB_ALLOCFAIL);
14799 14609 } else {
14800 14610 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14801 14611
14802 14612 if (tte4m_cnt) {
14803 14613 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14804 14614 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14805 14615 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14806 14616 (tsb_szc <= TSB_4M_SZCODE ||
14807 14617 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14808 14618 TSB4M|TSB32M|TSB256M,
14809 14619 TSB_ALLOC, scsfmmup))) {
14810 14620 /*
14811 14621 * If we fail to allocate the 2nd shared tsb,
14812 14622 * just free the 1st tsb, return failure.
14813 14623 */
14814 14624 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14815 14625 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14816 14626 return (TSB_ALLOCFAIL);
14817 14627 } else {
14818 14628 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14819 14629 newtsb->tsb_flags |= TSB_SHAREDCTX;
14820 14630 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14821 14631 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14822 14632 }
14823 14633 }
14824 14634 SFMMU_STAT(sf_scd_1sttsb_alloc);
14825 14635 }
14826 14636 return (TSB_SUCCESS);
14827 14637 }
14828 14638
14829 14639 static void
14830 14640 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14831 14641 {
14832 14642 while (scd_sfmmu->sfmmu_tsb != NULL) {
14833 14643 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14834 14644 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14835 14645 scd_sfmmu->sfmmu_tsb = next;
14836 14646 }
14837 14647 }
14838 14648
14839 14649 /*
14840 14650 * Link the sfmmu onto the hme region list.
14841 14651 */
14842 14652 void
14843 14653 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14844 14654 {
14845 14655 uint_t rid;
14846 14656 sf_rgn_link_t *rlink;
14847 14657 sfmmu_t *head;
14848 14658 sf_rgn_link_t *hrlink;
14849 14659
14850 14660 rid = rgnp->rgn_id;
14851 14661 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14852 14662
14853 14663 /* LINTED: constant in conditional context */
14854 14664 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14855 14665 ASSERT(rlink != NULL);
14856 14666 mutex_enter(&rgnp->rgn_mutex);
14857 14667 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14858 14668 rlink->next = NULL;
14859 14669 rlink->prev = NULL;
14860 14670 /*
14861 14671 * make sure rlink's next field is NULL
14862 14672 * before making this link visible.
14863 14673 */
14864 14674 membar_stst();
14865 14675 rgnp->rgn_sfmmu_head = sfmmup;
14866 14676 } else {
14867 14677 /* LINTED: constant in conditional context */
14868 14678 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14869 14679 ASSERT(hrlink != NULL);
14870 14680 ASSERT(hrlink->prev == NULL);
14871 14681 rlink->next = head;
14872 14682 rlink->prev = NULL;
14873 14683 hrlink->prev = sfmmup;
14874 14684 /*
14875 14685 * make sure rlink's next field is correct
14876 14686 * before making this link visible.
14877 14687 */
14878 14688 membar_stst();
14879 14689 rgnp->rgn_sfmmu_head = sfmmup;
14880 14690 }
14881 14691 mutex_exit(&rgnp->rgn_mutex);
14882 14692 }
14883 14693
14884 14694 /*
14885 14695 * Unlink the sfmmu from the hme region list.
14886 14696 */
14887 14697 void
14888 14698 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14889 14699 {
14890 14700 uint_t rid;
14891 14701 sf_rgn_link_t *rlink;
14892 14702
14893 14703 rid = rgnp->rgn_id;
14894 14704 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14895 14705
14896 14706 /* LINTED: constant in conditional context */
14897 14707 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14898 14708 ASSERT(rlink != NULL);
14899 14709 mutex_enter(&rgnp->rgn_mutex);
14900 14710 if (rgnp->rgn_sfmmu_head == sfmmup) {
14901 14711 sfmmu_t *next = rlink->next;
14902 14712 rgnp->rgn_sfmmu_head = next;
14903 14713 /*
14904 14714 * if we are stopped by xc_attention() after this
14905 14715 * point the forward link walking in
14906 14716 * sfmmu_rgntlb_demap() will work correctly since the
14907 14717 * head correctly points to the next element.
14908 14718 */
14909 14719 membar_stst();
14910 14720 rlink->next = NULL;
14911 14721 ASSERT(rlink->prev == NULL);
14912 14722 if (next != NULL) {
14913 14723 sf_rgn_link_t *nrlink;
14914 14724 /* LINTED: constant in conditional context */
14915 14725 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14916 14726 ASSERT(nrlink != NULL);
14917 14727 ASSERT(nrlink->prev == sfmmup);
14918 14728 nrlink->prev = NULL;
14919 14729 }
14920 14730 } else {
14921 14731 sfmmu_t *next = rlink->next;
14922 14732 sfmmu_t *prev = rlink->prev;
14923 14733 sf_rgn_link_t *prlink;
14924 14734
14925 14735 ASSERT(prev != NULL);
14926 14736 /* LINTED: constant in conditional context */
14927 14737 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14928 14738 ASSERT(prlink != NULL);
14929 14739 ASSERT(prlink->next == sfmmup);
14930 14740 prlink->next = next;
14931 14741 /*
14932 14742 * if we are stopped by xc_attention()
14933 14743 * after this point the forward link walking
14934 14744 * will work correctly since the prev element
14935 14745 * correctly points to the next element.
14936 14746 */
14937 14747 membar_stst();
14938 14748 rlink->next = NULL;
14939 14749 rlink->prev = NULL;
14940 14750 if (next != NULL) {
14941 14751 sf_rgn_link_t *nrlink;
14942 14752 /* LINTED: constant in conditional context */
14943 14753 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14944 14754 ASSERT(nrlink != NULL);
14945 14755 ASSERT(nrlink->prev == sfmmup);
14946 14756 nrlink->prev = prev;
14947 14757 }
14948 14758 }
14949 14759 mutex_exit(&rgnp->rgn_mutex);
14950 14760 }
14951 14761
14952 14762 /*
14953 14763 * Link scd sfmmu onto ism or hme region list for each region in the
14954 14764 * scd region map.
14955 14765 */
14956 14766 void
14957 14767 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14958 14768 {
14959 14769 uint_t rid;
14960 14770 uint_t i;
14961 14771 uint_t j;
14962 14772 ulong_t w;
14963 14773 sf_region_t *rgnp;
14964 14774 sfmmu_t *scsfmmup;
14965 14775
14966 14776 scsfmmup = scdp->scd_sfmmup;
14967 14777 ASSERT(scsfmmup->sfmmu_scdhat);
14968 14778 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14969 14779 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14970 14780 continue;
14971 14781 }
14972 14782 j = 0;
14973 14783 while (w) {
14974 14784 if (!(w & 0x1)) {
14975 14785 j++;
14976 14786 w >>= 1;
14977 14787 continue;
14978 14788 }
14979 14789 rid = (i << BT_ULSHIFT) | j;
14980 14790 j++;
14981 14791 w >>= 1;
14982 14792
14983 14793 if (rid < SFMMU_MAX_HME_REGIONS) {
14984 14794 rgnp = srdp->srd_hmergnp[rid];
14985 14795 ASSERT(rgnp->rgn_id == rid);
14986 14796 ASSERT(rgnp->rgn_refcnt > 0);
14987 14797 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14988 14798 } else {
14989 14799 sfmmu_t *ism_hatid = NULL;
14990 14800 ism_ment_t *ism_ment;
14991 14801 rid -= SFMMU_MAX_HME_REGIONS;
14992 14802 rgnp = srdp->srd_ismrgnp[rid];
14993 14803 ASSERT(rgnp->rgn_id == rid);
14994 14804 ASSERT(rgnp->rgn_refcnt > 0);
14995 14805
14996 14806 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14997 14807 ASSERT(ism_hatid->sfmmu_ismhat);
14998 14808 ism_ment = &scdp->scd_ism_links[rid];
14999 14809 ism_ment->iment_hat = scsfmmup;
15000 14810 ism_ment->iment_base_va = rgnp->rgn_saddr;
15001 14811 mutex_enter(&ism_mlist_lock);
15002 14812 iment_add(ism_ment, ism_hatid);
15003 14813 mutex_exit(&ism_mlist_lock);
15004 14814
15005 14815 }
15006 14816 }
15007 14817 }
15008 14818 }
15009 14819 /*
15010 14820 * Unlink scd sfmmu from ism or hme region list for each region in the
15011 14821 * scd region map.
15012 14822 */
15013 14823 void
15014 14824 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
15015 14825 {
15016 14826 uint_t rid;
15017 14827 uint_t i;
15018 14828 uint_t j;
15019 14829 ulong_t w;
15020 14830 sf_region_t *rgnp;
15021 14831 sfmmu_t *scsfmmup;
15022 14832
15023 14833 scsfmmup = scdp->scd_sfmmup;
15024 14834 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
15025 14835 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
15026 14836 continue;
15027 14837 }
15028 14838 j = 0;
15029 14839 while (w) {
15030 14840 if (!(w & 0x1)) {
15031 14841 j++;
15032 14842 w >>= 1;
15033 14843 continue;
15034 14844 }
15035 14845 rid = (i << BT_ULSHIFT) | j;
15036 14846 j++;
15037 14847 w >>= 1;
15038 14848
15039 14849 if (rid < SFMMU_MAX_HME_REGIONS) {
15040 14850 rgnp = srdp->srd_hmergnp[rid];
15041 14851 ASSERT(rgnp->rgn_id == rid);
15042 14852 ASSERT(rgnp->rgn_refcnt > 0);
15043 14853 sfmmu_unlink_from_hmeregion(scsfmmup,
15044 14854 rgnp);
15045 14855
15046 14856 } else {
15047 14857 sfmmu_t *ism_hatid = NULL;
15048 14858 ism_ment_t *ism_ment;
15049 14859 rid -= SFMMU_MAX_HME_REGIONS;
15050 14860 rgnp = srdp->srd_ismrgnp[rid];
15051 14861 ASSERT(rgnp->rgn_id == rid);
15052 14862 ASSERT(rgnp->rgn_refcnt > 0);
15053 14863
15054 14864 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
15055 14865 ASSERT(ism_hatid->sfmmu_ismhat);
15056 14866 ism_ment = &scdp->scd_ism_links[rid];
15057 14867 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
15058 14868 ASSERT(ism_ment->iment_base_va ==
15059 14869 rgnp->rgn_saddr);
15060 14870 mutex_enter(&ism_mlist_lock);
15061 14871 iment_sub(ism_ment, ism_hatid);
15062 14872 mutex_exit(&ism_mlist_lock);
15063 14873
15064 14874 }
15065 14875 }
15066 14876 }
15067 14877 }
15068 14878 /*
15069 14879 * Allocates and initialises a new SCD structure, this is called with
15070 14880 * the srd_scd_mutex held and returns with the reference count
15071 14881 * initialised to 1.
15072 14882 */
15073 14883 static sf_scd_t *
15074 14884 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
15075 14885 {
15076 14886 sf_scd_t *new_scdp;
15077 14887 sfmmu_t *scsfmmup;
15078 14888 int i;
15079 14889
15080 14890 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
15081 14891 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
15082 14892
15083 14893 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
15084 14894 new_scdp->scd_sfmmup = scsfmmup;
15085 14895 scsfmmup->sfmmu_srdp = srdp;
15086 14896 scsfmmup->sfmmu_scdp = new_scdp;
15087 14897 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
15088 14898 scsfmmup->sfmmu_scdhat = 1;
15089 14899 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
15090 14900 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
15091 14901
15092 14902 ASSERT(max_mmu_ctxdoms > 0);
15093 14903 for (i = 0; i < max_mmu_ctxdoms; i++) {
15094 14904 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
15095 14905 scsfmmup->sfmmu_ctxs[i].gnum = 0;
15096 14906 }
15097 14907
15098 14908 for (i = 0; i < MMU_PAGE_SIZES; i++) {
15099 14909 new_scdp->scd_rttecnt[i] = 0;
15100 14910 }
15101 14911
15102 14912 new_scdp->scd_region_map = *new_map;
15103 14913 new_scdp->scd_refcnt = 1;
15104 14914 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
15105 14915 kmem_cache_free(scd_cache, new_scdp);
15106 14916 kmem_cache_free(sfmmuid_cache, scsfmmup);
15107 14917 return (NULL);
15108 14918 }
15109 14919 if (&mmu_init_scd) {
15110 14920 mmu_init_scd(new_scdp);
15111 14921 }
15112 14922 return (new_scdp);
15113 14923 }
15114 14924
15115 14925 /*
15116 14926 * The first phase of a process joining an SCD. The hat structure is
15117 14927 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
15118 14928 * and a cross-call with context invalidation is used to cause the
15119 14929 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
15120 14930 * routine.
15121 14931 */
15122 14932 static void
15123 14933 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
15124 14934 {
15125 14935 hatlock_t *hatlockp;
15126 14936 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15127 14937 int i;
15128 14938 sf_scd_t *old_scdp;
15129 14939
15130 14940 ASSERT(srdp != NULL);
15131 14941 ASSERT(scdp != NULL);
15132 14942 ASSERT(scdp->scd_refcnt > 0);
15133 14943 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15134 14944
15135 14945 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
15136 14946 ASSERT(old_scdp != scdp);
15137 14947
15138 14948 mutex_enter(&old_scdp->scd_mutex);
15139 14949 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
15140 14950 mutex_exit(&old_scdp->scd_mutex);
15141 14951 /*
15142 14952 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
15143 14953 * include the shme rgn ttecnt for rgns that
15144 14954 * were in the old SCD
15145 14955 */
15146 14956 for (i = 0; i < mmu_page_sizes; i++) {
15147 14957 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15148 14958 old_scdp->scd_rttecnt[i]);
15149 14959 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15150 14960 sfmmup->sfmmu_scdrttecnt[i]);
15151 14961 }
15152 14962 }
15153 14963
15154 14964 /*
15155 14965 * Move sfmmu to the scd lists.
15156 14966 */
15157 14967 mutex_enter(&scdp->scd_mutex);
15158 14968 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
15159 14969 mutex_exit(&scdp->scd_mutex);
15160 14970 SF_SCD_INCR_REF(scdp);
15161 14971
15162 14972 hatlockp = sfmmu_hat_enter(sfmmup);
15163 14973 /*
15164 14974 * For a multi-thread process, we must stop
15165 14975 * all the other threads before joining the scd.
15166 14976 */
15167 14977
15168 14978 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
15169 14979
15170 14980 sfmmu_invalidate_ctx(sfmmup);
15171 14981 sfmmup->sfmmu_scdp = scdp;
15172 14982
15173 14983 /*
15174 14984 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
15175 14985 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
15176 14986 */
15177 14987 for (i = 0; i < mmu_page_sizes; i++) {
15178 14988 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
15179 14989 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
15180 14990 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15181 14991 -sfmmup->sfmmu_scdrttecnt[i]);
15182 14992 }
15183 14993 /* update tsb0 inflation count */
15184 14994 if (old_scdp != NULL) {
15185 14995 sfmmup->sfmmu_tsb0_4minflcnt +=
15186 14996 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15187 14997 }
15188 14998 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
15189 14999 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
15190 15000 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15191 15001
15192 15002 sfmmu_hat_exit(hatlockp);
15193 15003
15194 15004 if (old_scdp != NULL) {
15195 15005 SF_SCD_DECR_REF(srdp, old_scdp);
15196 15006 }
15197 15007
15198 15008 }
15199 15009
15200 15010 /*
15201 15011 * This routine is called by a process to become part of an SCD. It is called
15202 15012 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15203 15013 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15204 15014 */
15205 15015 static void
15206 15016 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15207 15017 {
15208 15018 struct tsb_info *tsbinfop;
15209 15019
15210 15020 ASSERT(sfmmu_hat_lock_held(sfmmup));
15211 15021 ASSERT(sfmmup->sfmmu_scdp != NULL);
15212 15022 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15213 15023 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15214 15024 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15215 15025
15216 15026 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15217 15027 tsbinfop = tsbinfop->tsb_next) {
15218 15028 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15219 15029 continue;
15220 15030 }
15221 15031 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15222 15032
15223 15033 sfmmu_inv_tsb(tsbinfop->tsb_va,
15224 15034 TSB_BYTES(tsbinfop->tsb_szc));
15225 15035 }
15226 15036
15227 15037 /* Set HAT_CTX1_FLAG for all SCD ISMs */
15228 15038 sfmmu_ism_hatflags(sfmmup, 1);
15229 15039
15230 15040 SFMMU_STAT(sf_join_scd);
15231 15041 }
15232 15042
15233 15043 /*
15234 15044 * This routine is called in order to check if there is an SCD which matches
15235 15045 * the process's region map if not then a new SCD may be created.
15236 15046 */
15237 15047 static void
15238 15048 sfmmu_find_scd(sfmmu_t *sfmmup)
15239 15049 {
15240 15050 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15241 15051 sf_scd_t *scdp, *new_scdp;
15242 15052 int ret;
15243 15053
15244 15054 ASSERT(srdp != NULL);
15245 15055 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15246 15056
15247 15057 mutex_enter(&srdp->srd_scd_mutex);
15248 15058 for (scdp = srdp->srd_scdp; scdp != NULL;
15249 15059 scdp = scdp->scd_next) {
15250 15060 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15251 15061 &sfmmup->sfmmu_region_map, ret);
15252 15062 if (ret == 1) {
15253 15063 SF_SCD_INCR_REF(scdp);
15254 15064 mutex_exit(&srdp->srd_scd_mutex);
15255 15065 sfmmu_join_scd(scdp, sfmmup);
15256 15066 ASSERT(scdp->scd_refcnt >= 2);
15257 15067 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15258 15068 return;
15259 15069 } else {
15260 15070 /*
15261 15071 * If the sfmmu region map is a subset of the scd
15262 15072 * region map, then the assumption is that this process
15263 15073 * will continue attaching to ISM segments until the
15264 15074 * region maps are equal.
15265 15075 */
15266 15076 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15267 15077 &sfmmup->sfmmu_region_map, ret);
15268 15078 if (ret == 1) {
15269 15079 mutex_exit(&srdp->srd_scd_mutex);
15270 15080 return;
15271 15081 }
15272 15082 }
15273 15083 }
15274 15084
15275 15085 ASSERT(scdp == NULL);
15276 15086 /*
15277 15087 * No matching SCD has been found, create a new one.
15278 15088 */
15279 15089 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15280 15090 NULL) {
15281 15091 mutex_exit(&srdp->srd_scd_mutex);
15282 15092 return;
15283 15093 }
15284 15094
15285 15095 /*
15286 15096 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15287 15097 */
15288 15098
15289 15099 /* Set scd_rttecnt for shme rgns in SCD */
15290 15100 sfmmu_set_scd_rttecnt(srdp, new_scdp);
15291 15101
15292 15102 /*
15293 15103 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15294 15104 */
15295 15105 sfmmu_link_scd_to_regions(srdp, new_scdp);
15296 15106 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15297 15107 SFMMU_STAT_ADD(sf_create_scd, 1);
15298 15108
15299 15109 mutex_exit(&srdp->srd_scd_mutex);
15300 15110 sfmmu_join_scd(new_scdp, sfmmup);
15301 15111 ASSERT(new_scdp->scd_refcnt >= 2);
15302 15112 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15303 15113 }
15304 15114
15305 15115 /*
15306 15116 * This routine is called by a process to remove itself from an SCD. It is
15307 15117 * either called when the processes has detached from a segment or from
15308 15118 * hat_free_start() as a result of calling exit.
15309 15119 */
15310 15120 static void
15311 15121 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15312 15122 {
15313 15123 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15314 15124 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15315 15125 hatlock_t *hatlockp = TSB_HASH(sfmmup);
15316 15126 int i;
15317 15127
15318 15128 ASSERT(scdp != NULL);
15319 15129 ASSERT(srdp != NULL);
15320 15130
15321 15131 if (sfmmup->sfmmu_free) {
15322 15132 /*
15323 15133 * If the process is part of an SCD the sfmmu is unlinked
15324 15134 * from scd_sf_list.
15325 15135 */
15326 15136 mutex_enter(&scdp->scd_mutex);
15327 15137 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15328 15138 mutex_exit(&scdp->scd_mutex);
15329 15139 /*
15330 15140 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15331 15141 * are about to leave the SCD
15332 15142 */
15333 15143 for (i = 0; i < mmu_page_sizes; i++) {
15334 15144 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15335 15145 scdp->scd_rttecnt[i]);
15336 15146 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15337 15147 sfmmup->sfmmu_scdrttecnt[i]);
15338 15148 sfmmup->sfmmu_scdrttecnt[i] = 0;
15339 15149 }
15340 15150 sfmmup->sfmmu_scdp = NULL;
15341 15151
15342 15152 SF_SCD_DECR_REF(srdp, scdp);
15343 15153 return;
15344 15154 }
15345 15155
15346 15156 ASSERT(r_type != SFMMU_REGION_ISM ||
15347 15157 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15348 15158 ASSERT(scdp->scd_refcnt);
15349 15159 ASSERT(!sfmmup->sfmmu_free);
15350 15160 ASSERT(sfmmu_hat_lock_held(sfmmup));
15351 15161 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15352 15162
15353 15163 /*
15354 15164 * Wait for ISM maps to be updated.
15355 15165 */
15356 15166 if (r_type != SFMMU_REGION_ISM) {
15357 15167 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15358 15168 sfmmup->sfmmu_scdp != NULL) {
15359 15169 cv_wait(&sfmmup->sfmmu_tsb_cv,
15360 15170 HATLOCK_MUTEXP(hatlockp));
15361 15171 }
15362 15172
15363 15173 if (sfmmup->sfmmu_scdp == NULL) {
15364 15174 sfmmu_hat_exit(hatlockp);
15365 15175 return;
15366 15176 }
15367 15177 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15368 15178 }
15369 15179
15370 15180 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15371 15181 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15372 15182 /*
15373 15183 * Since HAT_JOIN_SCD was set our context
15374 15184 * is still invalid.
15375 15185 */
15376 15186 } else {
15377 15187 /*
15378 15188 * For a multi-thread process, we must stop
15379 15189 * all the other threads before leaving the scd.
15380 15190 */
15381 15191
15382 15192 sfmmu_invalidate_ctx(sfmmup);
15383 15193 }
15384 15194
15385 15195 /* Clear all the rid's for ISM, delete flags, etc */
15386 15196 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15387 15197 sfmmu_ism_hatflags(sfmmup, 0);
15388 15198
15389 15199 /*
15390 15200 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15391 15201 * are in SCD before this sfmmup leaves the SCD.
15392 15202 */
15393 15203 for (i = 0; i < mmu_page_sizes; i++) {
15394 15204 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15395 15205 scdp->scd_rttecnt[i]);
15396 15206 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15397 15207 sfmmup->sfmmu_scdrttecnt[i]);
15398 15208 sfmmup->sfmmu_scdrttecnt[i] = 0;
15399 15209 /* update ismttecnt to include SCD ism before hat leaves SCD */
15400 15210 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15401 15211 sfmmup->sfmmu_scdismttecnt[i] = 0;
15402 15212 }
15403 15213 /* update tsb0 inflation count */
15404 15214 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15405 15215
15406 15216 if (r_type != SFMMU_REGION_ISM) {
15407 15217 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15408 15218 }
15409 15219 sfmmup->sfmmu_scdp = NULL;
15410 15220
15411 15221 sfmmu_hat_exit(hatlockp);
15412 15222
15413 15223 /*
15414 15224 * Unlink sfmmu from scd_sf_list this can be done without holding
15415 15225 * the hat lock as we hold the sfmmu_as lock which prevents
15416 15226 * hat_join_region from adding this thread to the scd again. Other
15417 15227 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15418 15228 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15419 15229 * while holding the hat lock.
15420 15230 */
15421 15231 mutex_enter(&scdp->scd_mutex);
15422 15232 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15423 15233 mutex_exit(&scdp->scd_mutex);
15424 15234 SFMMU_STAT(sf_leave_scd);
15425 15235
15426 15236 SF_SCD_DECR_REF(srdp, scdp);
15427 15237 hatlockp = sfmmu_hat_enter(sfmmup);
15428 15238
15429 15239 }
15430 15240
15431 15241 /*
15432 15242 * Unlink and free up an SCD structure with a reference count of 0.
15433 15243 */
15434 15244 static void
15435 15245 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15436 15246 {
15437 15247 sfmmu_t *scsfmmup;
15438 15248 sf_scd_t *sp;
15439 15249 hatlock_t *shatlockp;
15440 15250 int i, ret;
15441 15251
15442 15252 mutex_enter(&srdp->srd_scd_mutex);
15443 15253 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15444 15254 if (sp == scdp)
15445 15255 break;
15446 15256 }
15447 15257 if (sp == NULL || sp->scd_refcnt) {
15448 15258 mutex_exit(&srdp->srd_scd_mutex);
15449 15259 return;
15450 15260 }
15451 15261
15452 15262 /*
15453 15263 * It is possible that the scd has been freed and reallocated with a
15454 15264 * different region map while we've been waiting for the srd_scd_mutex.
15455 15265 */
15456 15266 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15457 15267 if (ret != 1) {
15458 15268 mutex_exit(&srdp->srd_scd_mutex);
15459 15269 return;
15460 15270 }
15461 15271
15462 15272 ASSERT(scdp->scd_sf_list == NULL);
15463 15273 /*
15464 15274 * Unlink scd from srd_scdp list.
15465 15275 */
15466 15276 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15467 15277 mutex_exit(&srdp->srd_scd_mutex);
15468 15278
15469 15279 sfmmu_unlink_scd_from_regions(srdp, scdp);
15470 15280
15471 15281 /* Clear shared context tsb and release ctx */
15472 15282 scsfmmup = scdp->scd_sfmmup;
15473 15283
15474 15284 /*
15475 15285 * create a barrier so that scd will not be destroyed
15476 15286 * if other thread still holds the same shared hat lock.
15477 15287 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15478 15288 * shared hat lock before checking the shared tsb reloc flag.
15479 15289 */
15480 15290 shatlockp = sfmmu_hat_enter(scsfmmup);
15481 15291 sfmmu_hat_exit(shatlockp);
15482 15292
15483 15293 sfmmu_free_scd_tsbs(scsfmmup);
15484 15294
15485 15295 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15486 15296 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15487 15297 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15488 15298 SFMMU_L2_HMERLINKS_SIZE);
15489 15299 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15490 15300 }
15491 15301 }
15492 15302 kmem_cache_free(sfmmuid_cache, scsfmmup);
15493 15303 kmem_cache_free(scd_cache, scdp);
15494 15304 SFMMU_STAT(sf_destroy_scd);
15495 15305 }
15496 15306
15497 15307 /*
15498 15308 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15499 15309 * bits which are set in the ism_region_map parameter. This flag indicates to
15500 15310 * the tsbmiss handler that mapping for these segments should be loaded using
15501 15311 * the shared context.
15502 15312 */
15503 15313 static void
15504 15314 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15505 15315 {
15506 15316 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15507 15317 ism_blk_t *ism_blkp;
15508 15318 ism_map_t *ism_map;
15509 15319 int i, rid;
15510 15320
15511 15321 ASSERT(sfmmup->sfmmu_iblk != NULL);
15512 15322 ASSERT(scdp != NULL);
15513 15323 /*
15514 15324 * Note that the caller either set HAT_ISMBUSY flag or checked
15515 15325 * under hat lock that HAT_ISMBUSY was not set by another thread.
15516 15326 */
15517 15327 ASSERT(sfmmu_hat_lock_held(sfmmup));
15518 15328
15519 15329 ism_blkp = sfmmup->sfmmu_iblk;
15520 15330 while (ism_blkp != NULL) {
15521 15331 ism_map = ism_blkp->iblk_maps;
15522 15332 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15523 15333 rid = ism_map[i].imap_rid;
15524 15334 if (rid == SFMMU_INVALID_ISMRID) {
15525 15335 continue;
15526 15336 }
15527 15337 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15528 15338 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15529 15339 addflag) {
15530 15340 ism_map[i].imap_hatflags |=
15531 15341 HAT_CTX1_FLAG;
15532 15342 } else {
15533 15343 ism_map[i].imap_hatflags &=
15534 15344 ~HAT_CTX1_FLAG;
15535 15345 }
15536 15346 }
15537 15347 ism_blkp = ism_blkp->iblk_next;
15538 15348 }
15539 15349 }
15540 15350
15541 15351 static int
15542 15352 sfmmu_srd_lock_held(sf_srd_t *srdp)
15543 15353 {
15544 15354 return (MUTEX_HELD(&srdp->srd_mutex));
15545 15355 }
15546 15356
15547 15357 /* ARGSUSED */
15548 15358 static int
15549 15359 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15550 15360 {
15551 15361 sf_scd_t *scdp = (sf_scd_t *)buf;
15552 15362
15553 15363 bzero(buf, sizeof (sf_scd_t));
15554 15364 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15555 15365 return (0);
15556 15366 }
15557 15367
15558 15368 /* ARGSUSED */
15559 15369 static void
15560 15370 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15561 15371 {
15562 15372 sf_scd_t *scdp = (sf_scd_t *)buf;
15563 15373
15564 15374 mutex_destroy(&scdp->scd_mutex);
15565 15375 }
15566 15376
15567 15377 /*
15568 15378 * The listp parameter is a pointer to a list of hmeblks which are partially
15569 15379 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15570 15380 * freeing process is to cross-call all cpus to ensure that there are no
15571 15381 * remaining cached references.
15572 15382 *
15573 15383 * If the local generation number is less than the global then we can free
15574 15384 * hmeblks which are already on the pending queue as another cpu has completed
15575 15385 * the cross-call.
15576 15386 *
15577 15387 * We cross-call to make sure that there are no threads on other cpus accessing
15578 15388 * these hmblks and then complete the process of freeing them under the
15579 15389 * following conditions:
15580 15390 * The total number of pending hmeblks is greater than the threshold
15581 15391 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15582 15392 * It is at least 1 second since the last time we cross-called
15583 15393 *
15584 15394 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15585 15395 */
15586 15396 static void
15587 15397 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15588 15398 {
15589 15399 struct hme_blk *hblkp, *pr_hblkp = NULL;
15590 15400 int count = 0;
15591 15401 cpuset_t cpuset = cpu_ready_set;
15592 15402 cpu_hme_pend_t *cpuhp;
15593 15403 timestruc_t now;
15594 15404 int one_second_expired = 0;
15595 15405
15596 15406 gethrestime_lasttick(&now);
15597 15407
15598 15408 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15599 15409 ASSERT(hblkp->hblk_shw_bit == 0);
15600 15410 ASSERT(hblkp->hblk_shared == 0);
15601 15411 count++;
15602 15412 pr_hblkp = hblkp;
15603 15413 }
15604 15414
15605 15415 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15606 15416 mutex_enter(&cpuhp->chp_mutex);
15607 15417
15608 15418 if ((cpuhp->chp_count + count) == 0) {
15609 15419 mutex_exit(&cpuhp->chp_mutex);
15610 15420 return;
15611 15421 }
15612 15422
15613 15423 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15614 15424 one_second_expired = 1;
15615 15425 }
15616 15426
15617 15427 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15618 15428 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15619 15429 one_second_expired)) {
15620 15430 /* Append global list to local */
15621 15431 if (pr_hblkp == NULL) {
15622 15432 *listp = cpuhp->chp_listp;
15623 15433 } else {
15624 15434 pr_hblkp->hblk_next = cpuhp->chp_listp;
15625 15435 }
15626 15436 cpuhp->chp_listp = NULL;
15627 15437 cpuhp->chp_count = 0;
15628 15438 cpuhp->chp_timestamp = now.tv_sec;
15629 15439 mutex_exit(&cpuhp->chp_mutex);
15630 15440
15631 15441 kpreempt_disable();
15632 15442 CPUSET_DEL(cpuset, CPU->cpu_id);
15633 15443 xt_sync(cpuset);
15634 15444 xt_sync(cpuset);
15635 15445 kpreempt_enable();
15636 15446
15637 15447 /*
15638 15448 * At this stage we know that no trap handlers on other
15639 15449 * cpus can have references to hmeblks on the list.
15640 15450 */
15641 15451 sfmmu_hblk_free(listp);
15642 15452 } else if (*listp != NULL) {
15643 15453 pr_hblkp->hblk_next = cpuhp->chp_listp;
15644 15454 cpuhp->chp_listp = *listp;
15645 15455 cpuhp->chp_count += count;
15646 15456 *listp = NULL;
15647 15457 mutex_exit(&cpuhp->chp_mutex);
15648 15458 } else {
15649 15459 mutex_exit(&cpuhp->chp_mutex);
15650 15460 }
15651 15461 }
15652 15462
15653 15463 /*
15654 15464 * Add an hmeblk to the the hash list.
15655 15465 */
15656 15466 void
15657 15467 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15658 15468 uint64_t hblkpa)
15659 15469 {
15660 15470 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15661 15471 #ifdef DEBUG
15662 15472 if (hmebp->hmeblkp == NULL) {
15663 15473 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15664 15474 }
15665 15475 #endif /* DEBUG */
15666 15476
15667 15477 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15668 15478 /*
15669 15479 * Since the TSB miss handler now does not lock the hash chain before
15670 15480 * walking it, make sure that the hmeblks nextpa is globally visible
15671 15481 * before we make the hmeblk globally visible by updating the chain root
15672 15482 * pointer in the hash bucket.
15673 15483 */
15674 15484 membar_producer();
15675 15485 hmebp->hmeh_nextpa = hblkpa;
15676 15486 hmeblkp->hblk_next = hmebp->hmeblkp;
15677 15487 hmebp->hmeblkp = hmeblkp;
15678 15488
15679 15489 }
15680 15490
15681 15491 /*
15682 15492 * This function is the first part of a 2 part process to remove an hmeblk
15683 15493 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15684 15494 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15685 15495 * a per-cpu pending list using the virtual address pointer.
15686 15496 *
15687 15497 * TSB miss trap handlers that start after this phase will no longer see
15688 15498 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15689 15499 * can still use it for further chain traversal because we haven't yet modifed
15690 15500 * the next physical pointer or freed it.
15691 15501 *
15692 15502 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15693 15503 * we reuse or free this hmeblk. This will make sure all lingering references to
15694 15504 * the hmeblk after first phase disappear before we finally reclaim it.
15695 15505 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15696 15506 * during their traversal.
15697 15507 *
15698 15508 * The hmehash_mutex must be held when calling this function.
15699 15509 *
15700 15510 * Input:
15701 15511 * hmebp - hme hash bucket pointer
15702 15512 * hmeblkp - address of hmeblk to be removed
15703 15513 * pr_hblk - virtual address of previous hmeblkp
15704 15514 * listp - pointer to list of hmeblks linked by virtual address
15705 15515 * free_now flag - indicates that a complete removal from the hash chains
15706 15516 * is necessary.
15707 15517 *
15708 15518 * It is inefficient to use the free_now flag as a cross-call is required to
15709 15519 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15710 15520 * in short supply.
15711 15521 */
15712 15522 void
15713 15523 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15714 15524 struct hme_blk *pr_hblk, struct hme_blk **listp,
15715 15525 int free_now)
15716 15526 {
15717 15527 int shw_size, vshift;
15718 15528 struct hme_blk *shw_hblkp;
15719 15529 uint_t shw_mask, newshw_mask;
15720 15530 caddr_t vaddr;
15721 15531 int size;
15722 15532 cpuset_t cpuset = cpu_ready_set;
15723 15533
15724 15534 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15725 15535
15726 15536 if (hmebp->hmeblkp == hmeblkp) {
15727 15537 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15728 15538 hmebp->hmeblkp = hmeblkp->hblk_next;
15729 15539 } else {
15730 15540 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15731 15541 pr_hblk->hblk_next = hmeblkp->hblk_next;
15732 15542 }
15733 15543
15734 15544 size = get_hblk_ttesz(hmeblkp);
15735 15545 shw_hblkp = hmeblkp->hblk_shadow;
15736 15546 if (shw_hblkp) {
15737 15547 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15738 15548 ASSERT(!hmeblkp->hblk_shared);
15739 15549 #ifdef DEBUG
15740 15550 if (mmu_page_sizes == max_mmu_page_sizes) {
15741 15551 ASSERT(size < TTE256M);
15742 15552 } else {
15743 15553 ASSERT(size < TTE4M);
15744 15554 }
15745 15555 #endif /* DEBUG */
15746 15556
15747 15557 shw_size = get_hblk_ttesz(shw_hblkp);
15748 15558 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15749 15559 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15750 15560 ASSERT(vshift < 8);
15751 15561 /*
15752 15562 * Atomically clear shadow mask bit
15753 15563 */
15754 15564 do {
15755 15565 shw_mask = shw_hblkp->hblk_shw_mask;
15756 15566 ASSERT(shw_mask & (1 << vshift));
15757 15567 newshw_mask = shw_mask & ~(1 << vshift);
15758 15568 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15759 15569 shw_mask, newshw_mask);
15760 15570 } while (newshw_mask != shw_mask);
15761 15571 hmeblkp->hblk_shadow = NULL;
15762 15572 }
15763 15573 hmeblkp->hblk_shw_bit = 0;
15764 15574
15765 15575 if (hmeblkp->hblk_shared) {
15766 15576 #ifdef DEBUG
15767 15577 sf_srd_t *srdp;
15768 15578 sf_region_t *rgnp;
15769 15579 uint_t rid;
15770 15580
15771 15581 srdp = hblktosrd(hmeblkp);
15772 15582 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15773 15583 rid = hmeblkp->hblk_tag.htag_rid;
15774 15584 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15775 15585 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15776 15586 rgnp = srdp->srd_hmergnp[rid];
15777 15587 ASSERT(rgnp != NULL);
15778 15588 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15779 15589 #endif /* DEBUG */
15780 15590 hmeblkp->hblk_shared = 0;
15781 15591 }
15782 15592 if (free_now) {
15783 15593 kpreempt_disable();
15784 15594 CPUSET_DEL(cpuset, CPU->cpu_id);
15785 15595 xt_sync(cpuset);
15786 15596 xt_sync(cpuset);
15787 15597 kpreempt_enable();
15788 15598
15789 15599 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15790 15600 hmeblkp->hblk_next = NULL;
15791 15601 } else {
15792 15602 /* Append hmeblkp to listp for processing later. */
15793 15603 hmeblkp->hblk_next = *listp;
15794 15604 *listp = hmeblkp;
15795 15605 }
15796 15606 }
15797 15607
15798 15608 /*
15799 15609 * This routine is called when memory is in short supply and returns a free
15800 15610 * hmeblk of the requested size from the cpu pending lists.
15801 15611 */
15802 15612 static struct hme_blk *
15803 15613 sfmmu_check_pending_hblks(int size)
15804 15614 {
15805 15615 int i;
15806 15616 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15807 15617 int found_hmeblk;
15808 15618 cpuset_t cpuset = cpu_ready_set;
15809 15619 cpu_hme_pend_t *cpuhp;
15810 15620
15811 15621 /* Flush cpu hblk pending queues */
15812 15622 for (i = 0; i < NCPU; i++) {
15813 15623 cpuhp = &cpu_hme_pend[i];
15814 15624 if (cpuhp->chp_listp != NULL) {
15815 15625 mutex_enter(&cpuhp->chp_mutex);
15816 15626 if (cpuhp->chp_listp == NULL) {
15817 15627 mutex_exit(&cpuhp->chp_mutex);
15818 15628 continue;
15819 15629 }
15820 15630 found_hmeblk = 0;
15821 15631 last_hmeblkp = NULL;
15822 15632 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15823 15633 hmeblkp = hmeblkp->hblk_next) {
15824 15634 if (get_hblk_ttesz(hmeblkp) == size) {
15825 15635 if (last_hmeblkp == NULL) {
15826 15636 cpuhp->chp_listp =
15827 15637 hmeblkp->hblk_next;
15828 15638 } else {
15829 15639 last_hmeblkp->hblk_next =
15830 15640 hmeblkp->hblk_next;
15831 15641 }
15832 15642 ASSERT(cpuhp->chp_count > 0);
15833 15643 cpuhp->chp_count--;
15834 15644 found_hmeblk = 1;
15835 15645 break;
15836 15646 } else {
15837 15647 last_hmeblkp = hmeblkp;
15838 15648 }
15839 15649 }
15840 15650 mutex_exit(&cpuhp->chp_mutex);
15841 15651
15842 15652 if (found_hmeblk) {
15843 15653 kpreempt_disable();
15844 15654 CPUSET_DEL(cpuset, CPU->cpu_id);
15845 15655 xt_sync(cpuset);
15846 15656 xt_sync(cpuset);
15847 15657 kpreempt_enable();
15848 15658 return (hmeblkp);
15849 15659 }
15850 15660 }
15851 15661 }
15852 15662 return (NULL);
15853 15663 }
↓ open down ↓ |
1500 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX