Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1987, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * VM - Hardware Address Translation management.
27 27 *
28 28 * This file describes the contents of the sun-reference-mmu(sfmmu)-
29 29 * specific hat data structures and the sfmmu-specific hat procedures.
30 30 * The machine-independent interface is described in <vm/hat.h>.
31 31 */
32 32
33 33 #ifndef _VM_HAT_SFMMU_H
34 34 #define _VM_HAT_SFMMU_H
35 35
36 36 #ifdef __cplusplus
37 37 extern "C" {
38 38 #endif
39 39
40 40 #ifndef _ASM
41 41
42 42 #include <sys/types.h>
43 43
44 44 #endif /* _ASM */
45 45
46 46 #ifdef _KERNEL
47 47
48 48 #include <sys/pte.h>
49 49 #include <vm/mach_sfmmu.h>
50 50 #include <sys/mmu.h>
51 51
52 52 /*
53 53 * Don't alter these without considering changes to ism_map_t.
54 54 */
55 55 #define DEFAULT_ISM_PAGESIZE MMU_PAGESIZE4M
56 56 #define DEFAULT_ISM_PAGESZC TTE4M
57 57 #define ISM_PG_SIZE(ism_vbshift) (1 << ism_vbshift)
58 58 #define ISM_SZ_MASK(ism_vbshift) (ISM_PG_SIZE(ism_vbshift) - 1)
59 59 #define ISM_MAP_SLOTS 8 /* Change this carefully. */
60 60
61 61 #ifndef _ASM
62 62
63 63 #include <sys/t_lock.h>
64 64 #include <vm/hat.h>
65 65 #include <vm/seg.h>
66 66 #include <sys/machparam.h>
67 67 #include <sys/systm.h>
68 68 #include <sys/x_call.h>
69 69 #include <vm/page.h>
70 70 #include <sys/ksynch.h>
71 71
72 72 typedef struct hat sfmmu_t;
73 73 typedef struct sf_scd sf_scd_t;
74 74
75 75 /*
76 76 * SFMMU attributes for hat_memload/hat_devload
77 77 */
78 78 #define SFMMU_UNCACHEPTTE 0x01000000 /* unencache in physical $ */
79 79 #define SFMMU_UNCACHEVTTE 0x02000000 /* unencache in virtual $ */
80 80 #define SFMMU_SIDEFFECT 0x04000000 /* set side effect bit */
81 81 #define SFMMU_LOAD_ALLATTR (HAT_PROT_MASK | HAT_ORDER_MASK | \
82 82 HAT_ENDIAN_MASK | HAT_NOFAULT | HAT_NOSYNC | \
83 83 SFMMU_UNCACHEPTTE | SFMMU_UNCACHEVTTE | SFMMU_SIDEFFECT)
84 84
85 85
86 86 /*
87 87 * sfmmu flags for hat_memload/hat_devload
88 88 */
89 89 #define SFMMU_NO_TSBLOAD 0x08000000 /* do not preload tsb */
90 90 #define SFMMU_LOAD_ALLFLAG (HAT_LOAD | HAT_LOAD_LOCK | \
91 91 HAT_LOAD_ADV | HAT_LOAD_CONTIG | HAT_LOAD_NOCONSIST | \
92 92 HAT_LOAD_SHARE | HAT_LOAD_REMAP | SFMMU_NO_TSBLOAD | \
93 93 HAT_RELOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_TEXT)
94 94
95 95 /*
96 96 * sfmmu internal flag to hat_pageunload that spares locked mappings
97 97 */
98 98 #define SFMMU_KERNEL_RELOC 0x8000
99 99
100 100 /*
101 101 * mode for sfmmu_chgattr
102 102 */
103 103 #define SFMMU_SETATTR 0x0
104 104 #define SFMMU_CLRATTR 0x1
105 105 #define SFMMU_CHGATTR 0x2
106 106
107 107 /*
108 108 * sfmmu specific flags for page_t
109 109 */
110 110 #define P_PNC 0x8 /* non-caching is permanent bit */
111 111 #define P_TNC 0x10 /* non-caching is temporary bit */
112 112 #define P_KPMS 0x20 /* kpm mapped small (vac alias prevention) */
113 113 #define P_KPMC 0x40 /* kpm conflict page (vac alias prevention) */
114 114
115 115 #define PP_GENERIC_ATTR(pp) ((pp)->p_nrm & (P_MOD | P_REF | P_RO))
116 116 #define PP_ISMOD(pp) ((pp)->p_nrm & P_MOD)
117 117 #define PP_ISREF(pp) ((pp)->p_nrm & P_REF)
118 118 #define PP_ISRO(pp) ((pp)->p_nrm & P_RO)
119 119 #define PP_ISNC(pp) ((pp)->p_nrm & (P_PNC|P_TNC))
120 120 #define PP_ISPNC(pp) ((pp)->p_nrm & P_PNC)
121 121 #ifdef VAC
122 122 #define PP_ISTNC(pp) ((pp)->p_nrm & P_TNC)
123 123 #endif
124 124 #define PP_ISKPMS(pp) ((pp)->p_nrm & P_KPMS)
125 125 #define PP_ISKPMC(pp) ((pp)->p_nrm & P_KPMC)
126 126
127 127 #define PP_SETMOD(pp) ((pp)->p_nrm |= P_MOD)
128 128 #define PP_SETREF(pp) ((pp)->p_nrm |= P_REF)
129 129 #define PP_SETREFMOD(pp) ((pp)->p_nrm |= (P_REF|P_MOD))
130 130 #define PP_SETRO(pp) ((pp)->p_nrm |= P_RO)
131 131 #define PP_SETREFRO(pp) ((pp)->p_nrm |= (P_REF|P_RO))
132 132 #define PP_SETPNC(pp) ((pp)->p_nrm |= P_PNC)
133 133 #ifdef VAC
134 134 #define PP_SETTNC(pp) ((pp)->p_nrm |= P_TNC)
135 135 #endif
136 136 #define PP_SETKPMS(pp) ((pp)->p_nrm |= P_KPMS)
137 137 #define PP_SETKPMC(pp) ((pp)->p_nrm |= P_KPMC)
138 138
139 139 #define PP_CLRMOD(pp) ((pp)->p_nrm &= ~P_MOD)
140 140 #define PP_CLRREF(pp) ((pp)->p_nrm &= ~P_REF)
141 141 #define PP_CLRREFMOD(pp) ((pp)->p_nrm &= ~(P_REF|P_MOD))
142 142 #define PP_CLRRO(pp) ((pp)->p_nrm &= ~P_RO)
143 143 #define PP_CLRPNC(pp) ((pp)->p_nrm &= ~P_PNC)
144 144 #ifdef VAC
145 145 #define PP_CLRTNC(pp) ((pp)->p_nrm &= ~P_TNC)
146 146 #endif
147 147 #define PP_CLRKPMS(pp) ((pp)->p_nrm &= ~P_KPMS)
148 148 #define PP_CLRKPMC(pp) ((pp)->p_nrm &= ~P_KPMC)
149 149
150 150 /*
151 151 * All shared memory segments attached with the SHM_SHARE_MMU flag (ISM)
152 152 * will be constrained to a 4M, 32M or 256M alignment. Also since every newly-
153 153 * created ISM segment is created out of a new address space at base va
154 154 * of 0 we don't need to store it.
155 155 */
156 156 #define ISM_ALIGN(shift) (1 << shift) /* base va aligned to <n>M */
157 157 #define ISM_ALIGNED(shift, va) (((uintptr_t)va & (ISM_ALIGN(shift) - 1)) == 0)
158 158 #define ISM_SHIFT(shift, x) ((uintptr_t)x >> (shift))
159 159
160 160 /*
161 161 * Pad locks out to cache sub-block boundaries to prevent
162 162 * false sharing, so several processes don't contend for
163 163 * the same line if they aren't using the same lock. Since
164 164 * this is a typedef we also have a bit of freedom in
165 165 * changing lock implementations later if we decide it
166 166 * is necessary.
167 167 */
168 168 typedef struct hat_lock {
169 169 kmutex_t hl_mutex;
170 170 uchar_t hl_pad[64 - sizeof (kmutex_t)];
171 171 } hatlock_t;
172 172
173 173 #define HATLOCK_MUTEXP(hatlockp) (&((hatlockp)->hl_mutex))
174 174
175 175 /*
176 176 * All segments mapped with ISM are guaranteed to be 4M, 32M or 256M aligned.
177 177 * Also size is guaranteed to be in 4M, 32M or 256M chunks.
178 178 * ism_seg consists of the following members:
179 179 * [XX..22] base address of ism segment. XX is 63 or 31 depending whether
180 180 * caddr_t is 64 bits or 32 bits.
181 181 * [21..0] size of segment.
182 182 *
183 183 * NOTE: Don't alter this structure without changing defines above and
184 184 * the tsb_miss and protection handlers.
185 185 */
186 186 typedef struct ism_map {
187 187 uintptr_t imap_seg; /* base va + sz of ISM segment */
188 188 uchar_t imap_vb_shift; /* mmu_pageshift for ism page size */
189 189 uchar_t imap_rid; /* region id for ism */
190 190 ushort_t imap_hatflags; /* primary ism page size */
191 191 uint_t imap_sz_mask; /* mmu_pagemask for ism page size */
192 192 sfmmu_t *imap_ismhat; /* hat id of dummy ISM as */
193 193 struct ism_ment *imap_ment; /* pointer to mapping list entry */
194 194 } ism_map_t;
195 195
196 196 #define ism_start(map) ((caddr_t)((map).imap_seg & \
197 197 ~ISM_SZ_MASK((map).imap_vb_shift)))
198 198 #define ism_size(map) ((map).imap_seg & ISM_SZ_MASK((map).imap_vb_shift))
199 199 #define ism_end(map) ((caddr_t)(ism_start(map) + (ism_size(map) * \
200 200 ISM_PG_SIZE((map).imap_vb_shift))))
201 201 /*
202 202 * ISM mapping entry. Used to link all hat's sharing a ism_hat.
203 203 * Same function as the p_mapping list for a page.
204 204 */
205 205 typedef struct ism_ment {
206 206 sfmmu_t *iment_hat; /* back pointer to hat_share() hat */
207 207 caddr_t iment_base_va; /* hat's va base for this ism seg */
208 208 struct ism_ment *iment_next; /* next ism map entry */
209 209 struct ism_ment *iment_prev; /* prev ism map entry */
210 210 } ism_ment_t;
211 211
212 212 /*
213 213 * ISM segment block. One will be hung off the sfmmu structure if a
214 214 * a process uses ISM. More will be linked using ismblk_next if more
215 215 * than ISM_MAP_SLOTS segments are attached to this proc.
216 216 *
217 217 * All modifications to fields in this structure will be protected
218 218 * by the hat mutex. In order to avoid grabbing this lock in low level
219 219 * routines (tsb miss/protection handlers and vatopfn) while not
220 220 * introducing any race conditions with hat_unshare, we will set
221 221 * CTX_ISM_BUSY bit in the ctx struct. Any mmu traps that occur
222 222 * for this ctx while this bit is set will be handled in sfmmu_tsb_excption
223 223 * where it will synchronize behind the hat mutex.
224 224 */
225 225 typedef struct ism_blk {
226 226 ism_map_t iblk_maps[ISM_MAP_SLOTS];
227 227 struct ism_blk *iblk_next;
228 228 uint64_t iblk_nextpa;
229 229 } ism_blk_t;
230 230
231 231 /*
232 232 * TSB access information. All fields are protected by the process's
233 233 * hat lock.
234 234 */
235 235
236 236 struct tsb_info {
237 237 caddr_t tsb_va; /* tsb base virtual address */
238 238 uint64_t tsb_pa; /* tsb base physical address */
239 239 struct tsb_info *tsb_next; /* next tsb used by this process */
240 240 uint16_t tsb_szc; /* tsb size code */
241 241 uint16_t tsb_flags; /* flags for this tsb; see below */
242 242 uint_t tsb_ttesz_mask; /* page size masks; see below */
243 243
244 244 tte_t tsb_tte; /* tte to lock into DTLB */
245 245 sfmmu_t *tsb_sfmmu; /* sfmmu */
246 246 kmem_cache_t *tsb_cache; /* cache from which mem allocated */
247 247 vmem_t *tsb_vmp; /* vmem arena from which mem alloc'd */
248 248 };
249 249
250 250 /*
251 251 * Values for "tsb_ttesz_mask" bitmask.
252 252 */
253 253 #define TSB8K (1 << TTE8K)
254 254 #define TSB64K (1 << TTE64K)
255 255 #define TSB512K (1 << TTE512K)
256 256 #define TSB4M (1 << TTE4M)
257 257 #define TSB32M (1 << TTE32M)
258 258 #define TSB256M (1 << TTE256M)
259 259
260 260 /*
261 261 * Values for "tsb_flags" field.
262 262 */
263 263 #define TSB_RELOC_FLAG 0x1
264 264 #define TSB_FLUSH_NEEDED 0x2
265 265 #define TSB_SWAPPED 0x4
266 266 #define TSB_SHAREDCTX 0x8
267 267
268 268 #endif /* !_ASM */
269 269
270 270 /*
271 271 * Data structures for shared hmeblk support.
272 272 */
273 273
274 274 /*
275 275 * Do not increase the maximum number of ism/hme regions without checking first
276 276 * the impact on ism_map_t, TSB miss area, hblk tag and region id type in
277 277 * sf_region structure.
278 278 * Initially, shared hmes will only be used for the main text segment
279 279 * therefore this value will be set to 64, it will be increased when shared
280 280 * libraries are included.
281 281 */
282 282
283 283 #define SFMMU_MAX_HME_REGIONS (64)
284 284 #define SFMMU_HMERGNMAP_WORDS BT_BITOUL(SFMMU_MAX_HME_REGIONS)
285 285
286 286 #define SFMMU_PRIVATE 0
287 287 #define SFMMU_SHARED 1
288 288
289 289 #define HMEBLK_ENDPA 1
290 290
291 291 #ifndef _ASM
292 292
293 293 #define SFMMU_MAX_ISM_REGIONS (64)
294 294 #define SFMMU_ISMRGNMAP_WORDS BT_BITOUL(SFMMU_MAX_ISM_REGIONS)
295 295
296 296 #define SFMMU_RGNMAP_WORDS (SFMMU_HMERGNMAP_WORDS + SFMMU_ISMRGNMAP_WORDS)
297 297
298 298 #define SFMMU_MAX_REGION_BUCKETS (128)
299 299 #define SFMMU_MAX_SRD_BUCKETS (2048)
300 300
301 301 typedef struct sf_hmeregion_map {
302 302 ulong_t bitmap[SFMMU_HMERGNMAP_WORDS];
303 303 } sf_hmeregion_map_t;
304 304
305 305 typedef struct sf_ismregion_map {
306 306 ulong_t bitmap[SFMMU_ISMRGNMAP_WORDS];
307 307 } sf_ismregion_map_t;
308 308
309 309 typedef union sf_region_map_u {
310 310 struct _h_rmap_s {
311 311 sf_hmeregion_map_t hmeregion_map;
312 312 sf_ismregion_map_t ismregion_map;
313 313 } h_rmap_s;
314 314 ulong_t bitmap[SFMMU_RGNMAP_WORDS];
315 315 } sf_region_map_t;
316 316
317 317 #define SF_RGNMAP_ZERO(map) { \
318 318 int _i; \
319 319 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
320 320 (map).bitmap[_i] = 0; \
321 321 } \
322 322 }
323 323
324 324 /*
325 325 * Returns 1 if map1 and map2 are equal.
326 326 */
327 327 #define SF_RGNMAP_EQUAL(map1, map2, rval) { \
328 328 int _i; \
329 329 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
330 330 if ((map1)->bitmap[_i] != (map2)->bitmap[_i]) \
331 331 break; \
332 332 } \
333 333 if (_i < SFMMU_RGNMAP_WORDS) \
334 334 rval = 0; \
335 335 else \
336 336 rval = 1; \
337 337 }
338 338
339 339 #define SF_RGNMAP_ADD(map, r) BT_SET((map).bitmap, r)
340 340 #define SF_RGNMAP_DEL(map, r) BT_CLEAR((map).bitmap, r)
341 341 #define SF_RGNMAP_TEST(map, r) BT_TEST((map).bitmap, r)
342 342
343 343 /*
344 344 * Tests whether map2 is a subset of map1, returns 1 if
345 345 * this assertion is true.
346 346 */
347 347 #define SF_RGNMAP_IS_SUBSET(map1, map2, rval) { \
348 348 int _i; \
349 349 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
350 350 if (((map1)->bitmap[_i] & (map2)->bitmap[_i]) \
351 351 != (map2)->bitmap[_i]) { \
↓ open down ↓ |
351 lines elided |
↑ open up ↑ |
352 352 break; \
353 353 } \
354 354 } \
355 355 if (_i < SFMMU_RGNMAP_WORDS) \
356 356 rval = 0; \
357 357 else \
358 358 rval = 1; \
359 359 }
360 360
361 361 #define SF_SCD_INCR_REF(scdp) { \
362 - atomic_add_32((volatile uint32_t *)&(scdp)->scd_refcnt, 1); \
362 + atomic_inc_32((volatile uint32_t *)&(scdp)->scd_refcnt); \
363 363 }
364 364
365 365 #define SF_SCD_DECR_REF(srdp, scdp) { \
366 366 sf_region_map_t _scd_rmap = (scdp)->scd_region_map; \
367 - if (!atomic_add_32_nv( \
368 - (volatile uint32_t *)&(scdp)->scd_refcnt, -1)) { \
367 + if (!atomic_dec_32_nv((volatile uint32_t *)&(scdp)->scd_refcnt)) {\
369 368 sfmmu_destroy_scd((srdp), (scdp), &_scd_rmap); \
370 369 } \
371 370 }
372 371
373 372 /*
374 373 * A sfmmup link in the link list of sfmmups that share the same region.
375 374 */
376 375 typedef struct sf_rgn_link {
377 376 sfmmu_t *next;
378 377 sfmmu_t *prev;
379 378 } sf_rgn_link_t;
380 379
381 380 /*
382 381 * rgn_flags values.
383 382 */
384 383 #define SFMMU_REGION_HME 0x1
385 384 #define SFMMU_REGION_ISM 0x2
386 385 #define SFMMU_REGION_FREE 0x8
387 386
388 387 #define SFMMU_REGION_TYPE_MASK (0x3)
389 388
390 389 /*
391 390 * sf_region defines a text or (D)ISM segment which map
392 391 * the same underlying physical object.
393 392 */
394 393 typedef struct sf_region {
395 394 caddr_t rgn_saddr; /* base addr of attached seg */
396 395 size_t rgn_size; /* size of attached seg */
397 396 void *rgn_obj; /* the underlying object id */
398 397 u_offset_t rgn_objoff; /* offset in the object mapped */
399 398 uchar_t rgn_perm; /* PROT_READ/WRITE/EXEC */
400 399 uchar_t rgn_pgszc; /* page size of the region */
401 400 uchar_t rgn_flags; /* region type, free flag */
402 401 uchar_t rgn_id;
403 402 int rgn_refcnt; /* # of hats sharing the region */
404 403 /* callback function for hat_unload_callback */
405 404 hat_rgn_cb_func_t rgn_cb_function;
406 405 struct sf_region *rgn_hash; /* hash chain linking the rgns */
407 406 kmutex_t rgn_mutex; /* protect region sfmmu list */
408 407 /* A link list of processes attached to this region */
409 408 sfmmu_t *rgn_sfmmu_head;
410 409 ulong_t rgn_ttecnt[MMU_PAGE_SIZES];
411 410 uint16_t rgn_hmeflags; /* rgn tte size flags */
412 411 } sf_region_t;
413 412
414 413 #define rgn_next rgn_hash
415 414
416 415 /* srd */
417 416 typedef struct sf_shared_region_domain {
418 417 vnode_t *srd_evp; /* executable vnode */
419 418 /* hme region table */
420 419 sf_region_t *srd_hmergnp[SFMMU_MAX_HME_REGIONS];
421 420 /* ism region table */
422 421 sf_region_t *srd_ismrgnp[SFMMU_MAX_ISM_REGIONS];
423 422 /* hash chain linking srds */
424 423 struct sf_shared_region_domain *srd_hash;
425 424 /* pointer to the next free hme region */
426 425 sf_region_t *srd_hmergnfree;
427 426 /* pointer to the next free ism region */
428 427 sf_region_t *srd_ismrgnfree;
429 428 /* id of next ism region created */
430 429 uint16_t srd_next_ismrid;
431 430 /* id of next hme region created */
432 431 uint16_t srd_next_hmerid;
433 432 uint16_t srd_ismbusyrgns; /* # of ism rgns in use */
434 433 uint16_t srd_hmebusyrgns; /* # of hme rgns in use */
435 434 int srd_refcnt; /* # of procs in the srd */
436 435 kmutex_t srd_mutex; /* sync add/remove rgns */
437 436 kmutex_t srd_scd_mutex;
438 437 sf_scd_t *srd_scdp; /* list of scds in srd */
439 438 /* hash of regions associated with the same executable */
440 439 sf_region_t *srd_rgnhash[SFMMU_MAX_REGION_BUCKETS];
441 440 } sf_srd_t;
442 441
443 442 typedef struct sf_srd_bucket {
444 443 kmutex_t srdb_lock;
445 444 sf_srd_t *srdb_srdp;
446 445 } sf_srd_bucket_t;
447 446
448 447 /*
449 448 * The value of SFMMU_L1_HMERLINKS and SFMMU_L2_HMERLINKS will be increased
450 449 * to 16 when the use of shared hmes for shared libraries is enabled.
451 450 */
452 451
453 452 #define SFMMU_L1_HMERLINKS (8)
454 453 #define SFMMU_L2_HMERLINKS (8)
455 454 #define SFMMU_L1_HMERLINKS_SHIFT (3)
456 455 #define SFMMU_L1_HMERLINKS_MASK (SFMMU_L1_HMERLINKS - 1)
457 456 #define SFMMU_L2_HMERLINKS_MASK (SFMMU_L2_HMERLINKS - 1)
458 457 #define SFMMU_L1_HMERLINKS_SIZE \
459 458 (SFMMU_L1_HMERLINKS * sizeof (sf_rgn_link_t *))
460 459 #define SFMMU_L2_HMERLINKS_SIZE \
461 460 (SFMMU_L2_HMERLINKS * sizeof (sf_rgn_link_t))
462 461
463 462 #if (SFMMU_L1_HMERLINKS * SFMMU_L2_HMERLINKS < SFMMU_MAX_HME_REGIONS)
464 463 #error Not Enough HMERLINKS
465 464 #endif
466 465
467 466 /*
468 467 * This macro grabs hat lock and allocates level 2 hat chain
469 468 * associated with a shme rgn. In the majority of cases, the macro
470 469 * is called with alloc = 0, and lock = 0.
471 470 * A pointer to the level 2 sf_rgn_link_t structure is returned in the lnkp
472 471 * parameter.
473 472 */
474 473 #define SFMMU_HMERID2RLINKP(sfmmup, rid, lnkp, alloc, lock) \
475 474 { \
476 475 int _l1ix = ((rid) >> SFMMU_L1_HMERLINKS_SHIFT) & \
477 476 SFMMU_L1_HMERLINKS_MASK; \
478 477 int _l2ix = ((rid) & SFMMU_L2_HMERLINKS_MASK); \
479 478 hatlock_t *_hatlockp; \
480 479 lnkp = (sfmmup)->sfmmu_hmeregion_links[_l1ix]; \
481 480 if (lnkp != NULL) { \
482 481 lnkp = &lnkp[_l2ix]; \
483 482 } else if (alloc && lock) { \
484 483 lnkp = kmem_zalloc(SFMMU_L2_HMERLINKS_SIZE, KM_SLEEP); \
485 484 _hatlockp = sfmmu_hat_enter(sfmmup); \
486 485 if ((sfmmup)->sfmmu_hmeregion_links[_l1ix] != NULL) { \
487 486 sfmmu_hat_exit(_hatlockp); \
488 487 kmem_free(lnkp, SFMMU_L2_HMERLINKS_SIZE); \
489 488 lnkp = (sfmmup)->sfmmu_hmeregion_links[_l1ix]; \
490 489 ASSERT(lnkp != NULL); \
491 490 } else { \
492 491 (sfmmup)->sfmmu_hmeregion_links[_l1ix] = lnkp; \
493 492 sfmmu_hat_exit(_hatlockp); \
494 493 } \
495 494 lnkp = &lnkp[_l2ix]; \
496 495 } else if (alloc) { \
497 496 lnkp = kmem_zalloc(SFMMU_L2_HMERLINKS_SIZE, KM_SLEEP); \
498 497 ASSERT((sfmmup)->sfmmu_hmeregion_links[_l1ix] == NULL); \
499 498 (sfmmup)->sfmmu_hmeregion_links[_l1ix] = lnkp; \
500 499 lnkp = &lnkp[_l2ix]; \
501 500 } \
502 501 }
503 502
504 503 /*
505 504 * Per cpu pending freelist of hmeblks.
506 505 */
507 506 typedef struct cpu_hme_pend {
508 507 struct hme_blk *chp_listp;
509 508 kmutex_t chp_mutex;
510 509 time_t chp_timestamp;
511 510 uint_t chp_count;
512 511 uint8_t chp_pad[36]; /* pad to 64 bytes */
513 512 } cpu_hme_pend_t;
514 513
515 514 /*
516 515 * The default value of the threshold for the per cpu pending queues of hmeblks.
517 516 * The queues are flushed if either the number of hmeblks on the queue is above
518 517 * the threshold, or one second has elapsed since the last flush.
519 518 */
520 519 #define CPU_HME_PEND_THRESH 1000
521 520
522 521 /*
523 522 * Per-MMU context domain kstats.
524 523 *
525 524 * TSB Miss Exceptions
526 525 * Number of times a TSB miss exception is handled in an MMU. See
527 526 * sfmmu_tsbmiss_exception() for more details.
528 527 * TSB Raise Exception
529 528 * Number of times the CPUs within an MMU are cross-called
530 529 * to invalidate either a specific process context (when the process
531 530 * switches MMU contexts) or the context of any process that is
532 531 * running on those CPUs (as part of the MMU context wrap-around).
533 532 * Wrap Around
534 533 * The number of times a wrap-around of MMU context happens.
535 534 */
536 535 typedef enum mmu_ctx_stat_types {
537 536 MMU_CTX_TSB_EXCEPTIONS, /* TSB miss exceptions handled */
538 537 MMU_CTX_TSB_RAISE_EXCEPTION, /* ctx invalidation cross calls */
539 538 MMU_CTX_WRAP_AROUND, /* wraparounds */
540 539 MMU_CTX_NUM_STATS
541 540 } mmu_ctx_stat_t;
542 541
543 542 /*
544 543 * Per-MMU context domain structure. This is instantiated the first time a CPU
545 544 * belonging to the MMU context domain is configured into the system, at boot
546 545 * time or at DR time.
547 546 *
548 547 * mmu_gnum
549 548 * The current generation number for the context IDs on this MMU context
550 549 * domain. It is protected by mmu_lock.
551 550 * mmu_cnum
552 551 * The current cnum to be allocated on this MMU context domain. It
553 552 * is protected via CAS.
554 553 * mmu_nctxs
555 554 * The max number of context IDs supported on every CPU in this
556 555 * MMU context domain. This is needed here in case the system supports
557 556 * mixed type of processors/MMUs. It also helps to make ctx switch code
558 557 * access fewer cache lines i.e. no need to retrieve it from some global
559 558 * nctxs.
560 559 * mmu_lock
561 560 * The mutex spin lock used to serialize context ID wrap around
562 561 * mmu_idx
563 562 * The index for this MMU context domain structure in the global array
564 563 * mmu_ctxdoms.
565 564 * mmu_ncpus
566 565 * The actual number of CPUs that have been configured in this
567 566 * MMU context domain. This also acts as a reference count for the
568 567 * structure. When the last CPU in an MMU context domain is unconfigured,
569 568 * the structure is freed. It is protected by mmu_lock.
570 569 * mmu_cpuset
571 570 * The CPU set of configured CPUs for this MMU context domain. Used
572 571 * to cross-call all the CPUs in the MMU context domain to invalidate
573 572 * context IDs during a wraparound operation. It is protected by mmu_lock.
574 573 */
575 574
576 575 typedef struct mmu_ctx {
577 576 uint64_t mmu_gnum;
578 577 uint_t mmu_cnum;
579 578 uint_t mmu_nctxs;
580 579 kmutex_t mmu_lock;
581 580 uint_t mmu_idx;
582 581 uint_t mmu_ncpus;
583 582 cpuset_t mmu_cpuset;
584 583 kstat_t *mmu_kstat;
585 584 kstat_named_t mmu_kstat_data[MMU_CTX_NUM_STATS];
586 585 } mmu_ctx_t;
587 586
588 587 #define mmu_tsb_exceptions \
589 588 mmu_kstat_data[MMU_CTX_TSB_EXCEPTIONS].value.ui64
590 589 #define mmu_tsb_raise_exception \
591 590 mmu_kstat_data[MMU_CTX_TSB_RAISE_EXCEPTION].value.ui64
592 591 #define mmu_wrap_around \
593 592 mmu_kstat_data[MMU_CTX_WRAP_AROUND].value.ui64
594 593
595 594 extern uint_t max_mmu_ctxdoms;
596 595 extern mmu_ctx_t **mmu_ctxs_tbl;
597 596
598 597 extern void sfmmu_cpu_init(cpu_t *);
599 598 extern void sfmmu_cpu_cleanup(cpu_t *);
600 599
601 600 extern uint_t sfmmu_ctxdom_nctxs(int);
602 601
603 602 #ifdef sun4v
604 603 extern void sfmmu_ctxdoms_remove(void);
605 604 extern void sfmmu_ctxdoms_lock(void);
606 605 extern void sfmmu_ctxdoms_unlock(void);
607 606 extern void sfmmu_ctxdoms_update(void);
608 607 #endif
609 608
610 609 /*
611 610 * The following structure is used to get MMU context domain information for
612 611 * a CPU from the platform.
613 612 *
614 613 * mmu_idx
615 614 * The MMU context domain index within the global array mmu_ctxs
616 615 * mmu_nctxs
617 616 * The number of context IDs supported in the MMU context domain
618 617 */
619 618 typedef struct mmu_ctx_info {
620 619 uint_t mmu_idx;
621 620 uint_t mmu_nctxs;
622 621 } mmu_ctx_info_t;
623 622
624 623 #pragma weak plat_cpuid_to_mmu_ctx_info
625 624
626 625 extern void plat_cpuid_to_mmu_ctx_info(processorid_t, mmu_ctx_info_t *);
627 626
628 627 /*
629 628 * Each address space has an array of sfmmu_ctx_t structures, one structure
630 629 * per MMU context domain.
631 630 *
632 631 * cnum
633 632 * The context ID allocated for an address space on an MMU context domain
634 633 * gnum
635 634 * The generation number for the context ID in the MMU context domain.
636 635 *
637 636 * This structure needs to be a power-of-two in size.
638 637 */
639 638 typedef struct sfmmu_ctx {
640 639 uint64_t gnum:48;
641 640 uint64_t cnum:16;
642 641 } sfmmu_ctx_t;
643 642
644 643
645 644 /*
646 645 * The platform dependent hat structure.
647 646 * tte counts should be protected by cas.
648 647 * cpuset is protected by cas.
649 648 *
650 649 * ttecnt accounting for mappings which do not use shared hme is carried out
651 650 * during pagefault handling. In the shared hme case, only the first process
652 651 * to access a mapping generates a pagefault, subsequent processes simply
653 652 * find the shared hme entry during trap handling and therefore there is no
654 653 * corresponding event to initiate ttecnt accounting. Currently, as shared
655 654 * hmes are only used for text segments, when joining a region we assume the
656 655 * worst case and add the the number of ttes required to map the entire region
657 656 * to the ttecnt corresponding to the region pagesize. However, if the region
658 657 * has a 4M pagesize, and memory is low, the allocation of 4M pages may fail
659 658 * then 8K pages will be allocated instead and the first TSB which stores 8K
660 659 * mappings will potentially be undersized. To compensate for the potential
661 660 * underaccounting in this case we always add 1/4 of the region size to the 8K
662 661 * ttecnt.
663 662 *
664 663 * Note that sfmmu_xhat_provider MUST be the first element.
665 664 */
666 665
667 666 struct hat {
668 667 void *sfmmu_xhat_provider; /* NULL for CPU hat */
669 668 cpuset_t sfmmu_cpusran; /* cpu bit mask for efficient xcalls */
670 669 struct as *sfmmu_as; /* as this hat provides mapping for */
671 670 /* per pgsz private ttecnt + shme rgns ttecnt for rgns not in SCD */
672 671 ulong_t sfmmu_ttecnt[MMU_PAGE_SIZES];
673 672 /* shme rgns ttecnt for rgns in SCD */
674 673 ulong_t sfmmu_scdrttecnt[MMU_PAGE_SIZES];
675 674 /* est. ism ttes that are NOT in a SCD */
676 675 ulong_t sfmmu_ismttecnt[MMU_PAGE_SIZES];
677 676 /* ttecnt for isms that are in a SCD */
678 677 ulong_t sfmmu_scdismttecnt[MMU_PAGE_SIZES];
679 678 /* inflate tsb0 to allow for large page alloc failure in region */
680 679 ulong_t sfmmu_tsb0_4minflcnt;
681 680 union _h_un {
682 681 ism_blk_t *sfmmu_iblkp; /* maps to ismhat(s) */
683 682 ism_ment_t *sfmmu_imentp; /* ism hat's mapping list */
684 683 } h_un;
685 684 uint_t sfmmu_free:1; /* hat to be freed - set on as_free */
686 685 uint_t sfmmu_ismhat:1; /* hat is dummy ism hatid */
687 686 uint_t sfmmu_scdhat:1; /* hat is dummy scd hatid */
688 687 uchar_t sfmmu_rmstat; /* refmod stats refcnt */
689 688 ushort_t sfmmu_clrstart; /* start color bin for page coloring */
690 689 ushort_t sfmmu_clrbin; /* per as phys page coloring bin */
691 690 ushort_t sfmmu_flags; /* flags */
692 691 uchar_t sfmmu_tteflags; /* pgsz flags */
693 692 uchar_t sfmmu_rtteflags; /* pgsz flags for SRD hmes */
694 693 struct tsb_info *sfmmu_tsb; /* list of per as tsbs */
695 694 uint64_t sfmmu_ismblkpa; /* pa of sfmmu_iblkp, or -1 */
696 695 lock_t sfmmu_ctx_lock; /* sync ctx alloc and invalidation */
697 696 kcondvar_t sfmmu_tsb_cv; /* signals TSB swapin or relocation */
698 697 uchar_t sfmmu_cext; /* context page size encoding */
699 698 uint8_t sfmmu_pgsz[MMU_PAGE_SIZES]; /* ranking for MMU */
700 699 sf_srd_t *sfmmu_srdp;
701 700 sf_scd_t *sfmmu_scdp; /* scd this address space belongs to */
702 701 sf_region_map_t sfmmu_region_map;
703 702 sf_rgn_link_t *sfmmu_hmeregion_links[SFMMU_L1_HMERLINKS];
704 703 sf_rgn_link_t sfmmu_scd_link; /* link to scd or pending queue */
705 704 #ifdef sun4v
706 705 struct hv_tsb_block sfmmu_hvblock;
707 706 #endif
708 707 /*
709 708 * sfmmu_ctxs is a variable length array of max_mmu_ctxdoms # of
710 709 * elements. max_mmu_ctxdoms is determined at run-time.
711 710 * sfmmu_ctxs[1] is just the fist element of an array, it always
712 711 * has to be the last field to ensure that the memory allocated
713 712 * for sfmmu_ctxs is consecutive with the memory of the rest of
714 713 * the hat data structure.
715 714 */
716 715 sfmmu_ctx_t sfmmu_ctxs[1];
717 716
718 717 };
719 718
720 719 #define sfmmu_iblk h_un.sfmmu_iblkp
721 720 #define sfmmu_iment h_un.sfmmu_imentp
722 721
723 722 #define sfmmu_hmeregion_map sfmmu_region_map.h_rmap_s.hmeregion_map
724 723 #define sfmmu_ismregion_map sfmmu_region_map.h_rmap_s.ismregion_map
725 724
726 725 #define SF_RGNMAP_ISNULL(sfmmup) \
727 726 (sfrgnmap_isnull(&(sfmmup)->sfmmu_region_map))
728 727 #define SF_HMERGNMAP_ISNULL(sfmmup) \
729 728 (sfhmergnmap_isnull(&(sfmmup)->sfmmu_hmeregion_map))
730 729
731 730 struct sf_scd {
732 731 sfmmu_t *scd_sfmmup; /* shared context hat */
733 732 /* per pgsz ttecnt for shme rgns in SCD */
734 733 ulong_t scd_rttecnt[MMU_PAGE_SIZES];
735 734 uint_t scd_refcnt; /* address spaces attached to scd */
736 735 sf_region_map_t scd_region_map; /* bit mask of attached segments */
737 736 sf_scd_t *scd_next; /* link pointers for srd_scd list */
738 737 sf_scd_t *scd_prev;
739 738 sfmmu_t *scd_sf_list; /* list of doubly linked hat structs */
740 739 kmutex_t scd_mutex;
741 740 /*
742 741 * Link used to add an scd to the sfmmu_iment list.
743 742 */
744 743 ism_ment_t scd_ism_links[SFMMU_MAX_ISM_REGIONS];
745 744 };
746 745
747 746 #define scd_hmeregion_map scd_region_map.h_rmap_s.hmeregion_map
748 747 #define scd_ismregion_map scd_region_map.h_rmap_s.ismregion_map
749 748
750 749 extern int disable_shctx;
751 750 extern int shctx_on;
752 751
753 752 /*
754 753 * bit mask for managing vac conflicts on large pages.
755 754 * bit 1 is for uncache flag.
756 755 * bits 2 through min(num of cache colors + 1,31) are
757 756 * for cache colors that have already been flushed.
758 757 */
759 758 #ifdef VAC
760 759 #define CACHE_NUM_COLOR (shm_alignment >> MMU_PAGESHIFT)
761 760 #else
762 761 #define CACHE_NUM_COLOR 1
763 762 #endif
764 763
765 764 #define CACHE_VCOLOR_MASK(vcolor) (2 << (vcolor & (CACHE_NUM_COLOR - 1)))
766 765
767 766 #define CacheColor_IsFlushed(flag, vcolor) \
768 767 ((flag) & CACHE_VCOLOR_MASK(vcolor))
769 768
770 769 #define CacheColor_SetFlushed(flag, vcolor) \
771 770 ((flag) |= CACHE_VCOLOR_MASK(vcolor))
772 771 /*
773 772 * Flags passed to sfmmu_page_cache to flush page from vac or not.
774 773 */
775 774 #define CACHE_FLUSH 0
776 775 #define CACHE_NO_FLUSH 1
777 776
778 777 /*
779 778 * Flags passed to sfmmu_tlbcache_demap
780 779 */
781 780 #define FLUSH_NECESSARY_CPUS 0
782 781 #define FLUSH_ALL_CPUS 1
783 782
784 783 #ifdef DEBUG
785 784 /*
786 785 * For debugging purpose only. Maybe removed later.
787 786 */
788 787 struct ctx_trace {
789 788 sfmmu_t *sc_sfmmu_stolen;
790 789 sfmmu_t *sc_sfmmu_stealing;
791 790 clock_t sc_time;
792 791 ushort_t sc_type;
793 792 ushort_t sc_cnum;
794 793 };
795 794 #define CTX_TRC_STEAL 0x1
796 795 #define CTX_TRC_FREE 0x0
797 796 #define TRSIZE 0x400
798 797 #define NEXT_CTXTR(ptr) (((ptr) >= ctx_trace_last) ? \
799 798 ctx_trace_first : ((ptr) + 1))
800 799 #define TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type) \
801 800 mutex_enter(mutex); \
802 801 (ptr)->sc_sfmmu_stolen = (stolen_sfmmu); \
803 802 (ptr)->sc_sfmmu_stealing = (stealing_sfmmu); \
804 803 (ptr)->sc_cnum = (cnum); \
805 804 (ptr)->sc_type = (type); \
806 805 (ptr)->sc_time = ddi_get_lbolt(); \
807 806 (ptr) = NEXT_CTXTR(ptr); \
808 807 num_ctx_stolen += (type); \
809 808 mutex_exit(mutex);
810 809 #else
811 810
812 811 #define TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type)
813 812
814 813 #endif /* DEBUG */
815 814
816 815 #endif /* !_ASM */
817 816
818 817 /*
819 818 * Macros for sfmmup->sfmmu_flags access. The macros that change the flags
820 819 * ASSERT() that we're holding the HAT lock before changing the flags;
821 820 * however callers that read the flags may do so without acquiring the lock
822 821 * in a fast path, and then recheck the flag after acquiring the lock in
823 822 * a slow path.
824 823 */
825 824 #define SFMMU_FLAGS_ISSET(sfmmup, flags) \
826 825 (((sfmmup)->sfmmu_flags & (flags)) == (flags))
827 826
828 827 #define SFMMU_FLAGS_CLEAR(sfmmup, flags) \
829 828 (ASSERT(sfmmu_hat_lock_held((sfmmup))), \
830 829 (sfmmup)->sfmmu_flags &= ~(flags))
831 830
832 831 #define SFMMU_FLAGS_SET(sfmmup, flags) \
833 832 (ASSERT(sfmmu_hat_lock_held((sfmmup))), \
834 833 (sfmmup)->sfmmu_flags |= (flags))
835 834
836 835 #define SFMMU_TTEFLAGS_ISSET(sfmmup, flags) \
837 836 ((((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) & (flags)) == \
838 837 (flags))
839 838
840 839
841 840 /*
842 841 * sfmmu tte HAT flags, must fit in 8 bits
843 842 */
844 843 #define HAT_CHKCTX1_FLAG 0x1
845 844 #define HAT_64K_FLAG (0x1 << TTE64K)
846 845 #define HAT_512K_FLAG (0x1 << TTE512K)
847 846 #define HAT_4M_FLAG (0x1 << TTE4M)
848 847 #define HAT_32M_FLAG (0x1 << TTE32M)
849 848 #define HAT_256M_FLAG (0x1 << TTE256M)
850 849
851 850 /*
852 851 * sfmmu HAT flags, 16 bits at the moment.
853 852 */
854 853 #define HAT_4MTEXT_FLAG 0x01
855 854 #define HAT_32M_ISM 0x02
856 855 #define HAT_256M_ISM 0x04
857 856 #define HAT_SWAPPED 0x08 /* swapped out */
858 857 #define HAT_SWAPIN 0x10 /* swapping in */
859 858 #define HAT_BUSY 0x20 /* replacing TSB(s) */
860 859 #define HAT_ISMBUSY 0x40 /* adding/removing/traversing ISM maps */
861 860
862 861 #define HAT_CTX1_FLAG 0x100 /* ISM imap hatflag for ctx1 */
863 862 #define HAT_JOIN_SCD 0x200 /* region is joining scd */
864 863 #define HAT_ALLCTX_INVALID 0x400 /* all per-MMU ctxs are invalidated */
865 864
866 865 #define SFMMU_LGPGS_INUSE(sfmmup) \
867 866 (((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) || \
868 867 ((sfmmup)->sfmmu_iblk != NULL))
869 868
870 869 /*
871 870 * Starting with context 0, the first NUM_LOCKED_CTXS contexts
872 871 * are locked so that sfmmu_getctx can't steal any of these
873 872 * contexts. At the time this software was being developed, the
874 873 * only context that needs to be locked is context 0 (the kernel
875 874 * context), and context 1 (reserved for stolen context). So this constant
876 875 * was originally defined to be 2.
877 876 *
878 877 * For sun4v only, USER_CONTEXT_TYPE represents any user context. Many
879 878 * routines only care whether the context is kernel, invalid or user.
880 879 */
881 880
882 881 #define NUM_LOCKED_CTXS 2
883 882 #define INVALID_CONTEXT 1
884 883
885 884 #ifdef sun4v
886 885 #define USER_CONTEXT_TYPE NUM_LOCKED_CTXS
887 886 #endif
888 887 #if defined(sun4v) || defined(UTSB_PHYS)
889 888 /*
890 889 * Get the location in the 4MB base TSB of the tsbe for this fault.
891 890 * Assumes that the second TSB only contains 4M mappings.
892 891 *
893 892 * In:
894 893 * tagacc = tag access register (not clobbered)
895 894 * tsbe = 2nd TSB base register
896 895 * tmp1, tmp2 = scratch registers
897 896 * Out:
898 897 * tsbe = pointer to the tsbe in the 2nd TSB
899 898 */
900 899
901 900 #define GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
902 901 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \
903 902 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \
904 903 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \
905 904 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \
906 905 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \
907 906 srlx tagacc, MMU_PAGESHIFT4M, tmp2; \
908 907 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \
909 908 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \
910 909 add tsbe, tmp1, tsbe /* add entry offset to TSB base */
911 910
912 911 #define GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
913 912 GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)
914 913
915 914 /*
916 915 * Get the location in the 3rd TSB of the tsbe for this fault.
917 916 * The 3rd TSB corresponds to the shared context, and is used
918 917 * for 8K - 512k pages.
919 918 *
920 919 * In:
921 920 * tagacc = tag access register (not clobbered)
922 921 * tsbe, tmp1, tmp2 = scratch registers
923 922 * Out:
924 923 * tsbe = pointer to the tsbe in the 3rd TSB
925 924 */
926 925
927 926 #define GET_3RD_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
928 927 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \
929 928 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \
930 929 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \
931 930 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \
932 931 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \
933 932 srlx tagacc, MMU_PAGESHIFT, tmp2; \
934 933 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \
935 934 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \
936 935 add tsbe, tmp1, tsbe /* add entry offset to TSB base */
937 936
938 937 #define GET_4TH_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
939 938 GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)
940 939 /*
941 940 * Copy the sfmmu_region_map or scd_region_map to the tsbmiss
942 941 * shmermap or scd_shmermap, from sfmmu_load_mmustate.
943 942 */
944 943 #define SET_REGION_MAP(rgn_map, tsbmiss_map, cnt, tmp, label) \
945 944 /* BEGIN CSTYLED */ \
946 945 label: ;\
947 946 ldx [rgn_map], tmp ;\
948 947 dec cnt ;\
949 948 add rgn_map, CLONGSIZE, rgn_map ;\
950 949 stx tmp, [tsbmiss_map] ;\
951 950 brnz,pt cnt, label ;\
952 951 add tsbmiss_map, CLONGSIZE, tsbmiss_map \
953 952 /* END CSTYLED */
954 953
955 954 /*
956 955 * If there is no scd, then zero the tsbmiss scd_shmermap,
957 956 * from sfmmu_load_mmustate.
958 957 */
959 958 #define ZERO_REGION_MAP(tsbmiss_map, cnt, label) \
960 959 /* BEGIN CSTYLED */ \
961 960 label: ;\
962 961 dec cnt ;\
963 962 stx %g0, [tsbmiss_map] ;\
964 963 brnz,pt cnt, label ;\
965 964 add tsbmiss_map, CLONGSIZE, tsbmiss_map
966 965 /* END CSTYLED */
967 966
968 967 /*
969 968 * Set hmemisc to 1 if the shared hme is also part of an scd.
970 969 * In:
971 970 * tsbarea = tsbmiss area (not clobbered)
972 971 * hmeblkpa = hmeblkpa + hmentoff + SFHME_TTE (not clobbered)
973 972 * hmentoff = hmentoff + SFHME_TTE = tte offset(clobbered)
974 973 * Out:
975 974 * use_shctx = 1 if shme is in scd and 0 otherwise
976 975 */
977 976 #define GET_SCDSHMERMAP(tsbarea, hmeblkpa, hmentoff, use_shctx) \
978 977 /* BEGIN CSTYLED */ \
979 978 sub hmeblkpa, hmentoff, hmentoff /* hmentofff = hmeblkpa */ ;\
980 979 add hmentoff, HMEBLK_TAG, hmentoff ;\
981 980 ldxa [hmentoff]ASI_MEM, hmentoff /* read 1st part of tag */ ;\
982 981 and hmentoff, HTAG_RID_MASK, hmentoff /* mask off rid */ ;\
983 982 and hmentoff, BT_ULMASK, use_shctx /* mask bit index */ ;\
984 983 srlx hmentoff, BT_ULSHIFT, hmentoff /* extract word */ ;\
985 984 sllx hmentoff, CLONGSHIFT, hmentoff /* index */ ;\
986 985 add tsbarea, hmentoff, hmentoff /* add to tsbarea */ ;\
987 986 ldx [hmentoff + TSBMISS_SCDSHMERMAP], hmentoff /* scdrgn */ ;\
988 987 srlx hmentoff, use_shctx, use_shctx ;\
989 988 and use_shctx, 0x1, use_shctx \
990 989 /* END CSTYLED */
991 990
992 991 /*
993 992 * Synthesize a TSB base register contents for a process.
994 993 *
995 994 * In:
996 995 * tsbinfo = TSB info pointer (ro)
997 996 * tsbreg, tmp1 = scratch registers
998 997 * Out:
999 998 * tsbreg = value to program into TSB base register
1000 999 */
1001 1000
1002 1001 #define MAKE_UTSBREG(tsbinfo, tsbreg, tmp1) \
1003 1002 ldx [tsbinfo + TSBINFO_PADDR], tsbreg; \
1004 1003 lduh [tsbinfo + TSBINFO_SZCODE], tmp1; \
1005 1004 and tmp1, TSB_SOFTSZ_MASK, tmp1; \
1006 1005 or tsbreg, tmp1, tsbreg;
1007 1006
1008 1007
1009 1008 /*
1010 1009 * Load TSB base register to TSBMISS area for privte contexts.
1011 1010 * This register contains utsb_pabase in bits 63:13, and TSB size
1012 1011 * code in bits 2:0.
1013 1012 *
1014 1013 * For private context
1015 1014 * In:
1016 1015 * tsbreg = value to load (ro)
1017 1016 * regnum = constant or register
1018 1017 * tmp1 = scratch register
1019 1018 * Out:
1020 1019 * Specified scratchpad register updated
1021 1020 *
1022 1021 */
1023 1022 #define SET_UTSBREG(regnum, tsbreg, tmp1) \
1024 1023 mov regnum, tmp1; \
1025 1024 stxa tsbreg, [tmp1]ASI_SCRATCHPAD /* save tsbreg */
1026 1025 /*
1027 1026 * Get TSB base register from the scratchpad for private contexts
1028 1027 *
1029 1028 * In:
1030 1029 * regnum = constant or register
1031 1030 * tsbreg = scratch
1032 1031 * Out:
1033 1032 * tsbreg = tsbreg from the specified scratchpad register
1034 1033 */
1035 1034 #define GET_UTSBREG(regnum, tsbreg) \
1036 1035 mov regnum, tsbreg; \
1037 1036 ldxa [tsbreg]ASI_SCRATCHPAD, tsbreg
1038 1037
1039 1038 /*
1040 1039 * Load TSB base register to TSBMISS area for shared contexts.
1041 1040 * This register contains utsb_pabase in bits 63:13, and TSB size
1042 1041 * code in bits 2:0.
1043 1042 *
1044 1043 * In:
1045 1044 * tsbmiss = pointer to tsbmiss area
1046 1045 * tsbmissoffset = offset to right tsb pointer
1047 1046 * tsbreg = value to load (ro)
1048 1047 * Out:
1049 1048 * Specified tsbmiss area updated
1050 1049 *
1051 1050 */
1052 1051 #define SET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg) \
1053 1052 stx tsbreg, [tsbmiss + tsbmissoffset] /* save tsbreg */
1054 1053
1055 1054 /*
1056 1055 * Get TSB base register from the scratchpad for
1057 1056 * shared contexts
1058 1057 *
1059 1058 * In:
1060 1059 * tsbmiss = pointer to tsbmiss area
1061 1060 * tsbmissoffset = offset to right tsb pointer
1062 1061 * tsbreg = scratch
1063 1062 * Out:
1064 1063 * tsbreg = tsbreg from the specified scratchpad register
1065 1064 */
1066 1065 #define GET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg) \
1067 1066 ldx [tsbmiss + tsbmissoffset], tsbreg
1068 1067
1069 1068 #endif /* defined(sun4v) || defined(UTSB_PHYS) */
1070 1069
1071 1070 #ifndef _ASM
1072 1071
1073 1072 /*
1074 1073 * Kernel page relocation stuff.
1075 1074 */
1076 1075 struct sfmmu_callback {
1077 1076 int key;
1078 1077 int (*prehandler)(caddr_t, uint_t, uint_t, void *);
1079 1078 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t);
1080 1079 int (*errhandler)(caddr_t, uint_t, uint_t, void *);
1081 1080 int capture_cpus;
1082 1081 };
1083 1082
1084 1083 extern int sfmmu_max_cb_id;
1085 1084 extern struct sfmmu_callback *sfmmu_cb_table;
1086 1085
1087 1086 struct pa_hment;
1088 1087
1089 1088 /*
1090 1089 * RFE: With multihat gone we gain back an int. We could use this to
1091 1090 * keep ref bits on a per cpu basis to eliminate xcalls.
1092 1091 */
1093 1092 struct sf_hment {
1094 1093 tte_t hme_tte; /* tte for this hment */
1095 1094
1096 1095 union {
1097 1096 struct page *page; /* what page this maps */
1098 1097 struct pa_hment *data; /* pa_hment */
1099 1098 } sf_hment_un;
1100 1099
1101 1100 struct sf_hment *hme_next; /* next hment */
1102 1101 struct sf_hment *hme_prev; /* prev hment */
1103 1102 };
1104 1103
1105 1104 struct pa_hment {
1106 1105 caddr_t addr; /* va */
1107 1106 uint_t len; /* bytes */
1108 1107 ushort_t flags; /* internal flags */
1109 1108 ushort_t refcnt; /* reference count */
1110 1109 id_t cb_id; /* callback id, table index */
1111 1110 void *pvt; /* handler's private data */
1112 1111 struct sf_hment sfment; /* corresponding dummy sf_hment */
1113 1112 };
1114 1113
1115 1114 #define hme_page sf_hment_un.page
1116 1115 #define hme_data sf_hment_un.data
1117 1116 #define hme_size(sfhmep) ((int)(TTE_CSZ(&(sfhmep)->hme_tte)))
1118 1117 #define PAHME_SZ (sizeof (struct pa_hment))
1119 1118 #define SFHME_SZ (sizeof (struct sf_hment))
1120 1119
1121 1120 #define IS_PAHME(hme) ((hme)->hme_tte.ll == 0)
1122 1121
1123 1122 /*
1124 1123 * hmeblk_tag structure
1125 1124 * structure used to obtain a match on a hme_blk. Currently consists of
1126 1125 * the address of the sfmmu struct (or hatid), the base page address of the
1127 1126 * hme_blk, and the rehash count. The rehash count is actually only 2 bits
1128 1127 * and has the following meaning:
1129 1128 * 1 = 8k or 64k hash sequence.
1130 1129 * 2 = 512k hash sequence.
1131 1130 * 3 = 4M hash sequence.
1132 1131 * We require this count because we don't want to get a false hit on a 512K or
1133 1132 * 4M rehash with a base address corresponding to a 8k or 64k hmeblk.
1134 1133 * Note: The ordering and size of the hmeblk_tag members are implictly known
1135 1134 * by the tsb miss handlers written in assembly. Do not change this structure
1136 1135 * without checking those routines. See HTAG_SFMMUPSZ define.
1137 1136 */
1138 1137
1139 1138 /*
1140 1139 * In private hmeblks hblk_rid field must be SFMMU_INVALID_RID.
1141 1140 */
1142 1141 typedef union {
1143 1142 struct {
1144 1143 uint64_t hblk_basepg: 51, /* hme_blk base pg # */
1145 1144 hblk_rehash: 3, /* rehash number */
1146 1145 hblk_rid: 10; /* hme_blk region id */
1147 1146 void *hblk_id;
1148 1147 } hblk_tag_un;
1149 1148 uint64_t htag_tag[2];
1150 1149 } hmeblk_tag;
1151 1150
1152 1151 #define htag_id hblk_tag_un.hblk_id
1153 1152 #define htag_bspage hblk_tag_un.hblk_basepg
1154 1153 #define htag_rehash hblk_tag_un.hblk_rehash
1155 1154 #define htag_rid hblk_tag_un.hblk_rid
1156 1155
1157 1156 #endif /* !_ASM */
1158 1157
1159 1158 #define HTAG_REHASH_SHIFT 10
1160 1159 #define HTAG_MAX_RID (((0x1 << HTAG_REHASH_SHIFT) - 1))
1161 1160 #define HTAG_RID_MASK HTAG_MAX_RID
1162 1161
1163 1162 /* used for tagging all per sfmmu (i.e. non SRD) private hmeblks */
1164 1163 #define SFMMU_INVALID_SHMERID HTAG_MAX_RID
1165 1164
1166 1165 #if SFMMU_INVALID_SHMERID < SFMMU_MAX_HME_REGIONS
1167 1166 #error SFMMU_INVALID_SHMERID < SFMMU_MAX_HME_REGIONS
1168 1167 #endif
1169 1168
1170 1169 #define SFMMU_IS_SHMERID_VALID(rid) ((rid) != SFMMU_INVALID_SHMERID)
1171 1170
1172 1171 /* ISM regions */
1173 1172 #define SFMMU_INVALID_ISMRID 0xff
1174 1173
1175 1174 #if SFMMU_INVALID_ISMRID < SFMMU_MAX_ISM_REGIONS
1176 1175 #error SFMMU_INVALID_ISMRID < SFMMU_MAX_ISM_REGIONS
1177 1176 #endif
1178 1177
1179 1178 #define SFMMU_IS_ISMRID_VALID(rid) ((rid) != SFMMU_INVALID_ISMRID)
1180 1179
1181 1180
1182 1181 #define HTAGS_EQ(tag1, tag2) (((tag1.htag_tag[0] ^ tag2.htag_tag[0]) | \
1183 1182 (tag1.htag_tag[1] ^ tag2.htag_tag[1])) == 0)
1184 1183
1185 1184 /*
1186 1185 * this macro must only be used for comparing tags in shared hmeblks.
1187 1186 */
1188 1187 #define HTAGS_EQ_SHME(hmetag, tag, hrmap) \
1189 1188 (((hmetag).htag_rid != SFMMU_INVALID_SHMERID) && \
1190 1189 (((((hmetag).htag_tag[0] ^ (tag).htag_tag[0]) & \
1191 1190 ~HTAG_RID_MASK) | \
1192 1191 ((hmetag).htag_tag[1] ^ (tag).htag_tag[1])) == 0) && \
1193 1192 SF_RGNMAP_TEST(hrmap, hmetag.htag_rid))
1194 1193
1195 1194 #define HME_REHASH(sfmmup) \
1196 1195 ((sfmmup)->sfmmu_ttecnt[TTE512K] != 0 || \
1197 1196 (sfmmup)->sfmmu_ttecnt[TTE4M] != 0 || \
1198 1197 (sfmmup)->sfmmu_ttecnt[TTE32M] != 0 || \
1199 1198 (sfmmup)->sfmmu_ttecnt[TTE256M] != 0)
1200 1199
1201 1200 #define NHMENTS 8 /* # of hments in an 8k hme_blk */
1202 1201 /* needs to be multiple of 2 */
1203 1202
1204 1203 #ifndef _ASM
1205 1204
1206 1205 #ifdef HBLK_TRACE
1207 1206
1208 1207 #define HBLK_LOCK 1
1209 1208 #define HBLK_UNLOCK 0
1210 1209 #define HBLK_STACK_DEPTH 6
1211 1210 #define HBLK_AUDIT_CACHE_SIZE 16
1212 1211 #define HBLK_LOCK_PATTERN 0xaaaaaaaa
1213 1212 #define HBLK_UNLOCK_PATTERN 0xbbbbbbbb
1214 1213
1215 1214 struct hblk_lockcnt_audit {
1216 1215 int flag; /* lock or unlock */
1217 1216 kthread_id_t thread;
1218 1217 int depth;
1219 1218 pc_t stack[HBLK_STACK_DEPTH];
1220 1219 };
1221 1220
1222 1221 #endif /* HBLK_TRACE */
1223 1222
1224 1223
1225 1224 /*
1226 1225 * Hment block structure.
1227 1226 * The hme_blk is the node data structure which the hash structure
1228 1227 * mantains. An hme_blk can have 2 different sizes depending on the
1229 1228 * number of hments it implicitly contains. When dealing with 64K, 512K,
1230 1229 * or 4M hments there is one hment per hme_blk. When dealing with
1231 1230 * 8k hments we allocate an hme_blk plus an additional 7 hments to
1232 1231 * give us a total of 8 (NHMENTS) hments that can be referenced through a
1233 1232 * hme_blk.
1234 1233 *
1235 1234 * The hmeblk structure contains 2 tte reference counters used to determine if
1236 1235 * it is ok to free up the hmeblk. Both counters have to be zero in order
1237 1236 * to be able to free up hmeblk. They are protected by cas.
1238 1237 * hblk_hmecnt is the number of hments present on pp mapping lists.
1239 1238 * hblk_vcnt reflects number of valid ttes in hmeblk.
1240 1239 *
1241 1240 * The hmeblk now also has per tte lock cnts. This is required because
1242 1241 * the counts can be high and there are not enough bits in the tte. When
1243 1242 * physio is fixed to not lock the translations we should be able to move
1244 1243 * the lock cnt back to the tte. See bug id 1198554.
1245 1244 *
1246 1245 * Note that xhat_hme_blk's layout follows this structure: hme_blk_misc
1247 1246 * and sf_hment are at the same offsets in both structures. Whenever
1248 1247 * hme_blk is changed, xhat_hme_blk may need to be updated as well.
1249 1248 */
1250 1249
1251 1250 struct hme_blk_misc {
1252 1251 uint_t notused:25;
1253 1252 uint_t shared_bit:1; /* set for SRD shared hmeblk */
1254 1253 uint_t xhat_bit:1; /* set for an xhat hme_blk */
1255 1254 uint_t shadow_bit:1; /* set for a shadow hme_blk */
1256 1255 uint_t nucleus_bit:1; /* set for a nucleus hme_blk */
1257 1256 uint_t ttesize:3; /* contains ttesz of hmeblk */
1258 1257 };
1259 1258
1260 1259 struct hme_blk {
1261 1260 volatile uint64_t hblk_nextpa; /* physical address for hash list */
1262 1261
1263 1262 hmeblk_tag hblk_tag; /* tag used to obtain an hmeblk match */
1264 1263
1265 1264 struct hme_blk *hblk_next; /* on free list or on hash list */
1266 1265 /* protected by hash lock */
1267 1266
1268 1267 struct hme_blk *hblk_shadow; /* pts to shadow hblk */
1269 1268 /* protected by hash lock */
1270 1269 uint_t hblk_span; /* span of memory hmeblk maps */
1271 1270
1272 1271 struct hme_blk_misc hblk_misc;
1273 1272
1274 1273 union {
1275 1274 struct {
1276 1275 ushort_t hblk_hmecount; /* hment on mlists counter */
1277 1276 ushort_t hblk_validcnt; /* valid tte reference count */
1278 1277 } hblk_counts;
1279 1278 uint_t hblk_shadow_mask;
1280 1279 } hblk_un;
1281 1280
1282 1281 uint_t hblk_lckcnt;
1283 1282
1284 1283 #ifdef HBLK_TRACE
1285 1284 kmutex_t hblk_audit_lock; /* lock to protect index */
1286 1285 uint_t hblk_audit_index; /* index into audit_cache */
1287 1286 struct hblk_lockcnt_audit hblk_audit_cache[HBLK_AUDIT_CACHE_SIZE];
1288 1287 #endif /* HBLK_AUDIT */
1289 1288
1290 1289 struct sf_hment hblk_hme[1]; /* hment array */
1291 1290 };
1292 1291
1293 1292 #define hblk_shared hblk_misc.shared_bit
1294 1293 #define hblk_xhat_bit hblk_misc.xhat_bit
1295 1294 #define hblk_shw_bit hblk_misc.shadow_bit
1296 1295 #define hblk_nuc_bit hblk_misc.nucleus_bit
1297 1296 #define hblk_ttesz hblk_misc.ttesize
1298 1297 #define hblk_hmecnt hblk_un.hblk_counts.hblk_hmecount
1299 1298 #define hblk_vcnt hblk_un.hblk_counts.hblk_validcnt
1300 1299 #define hblk_shw_mask hblk_un.hblk_shadow_mask
1301 1300
1302 1301 #define MAX_HBLK_LCKCNT 0xFFFFFFFF
1303 1302 #define HMEBLK_ALIGN 0x8 /* hmeblk has to be double aligned */
1304 1303
1305 1304 #ifdef HBLK_TRACE
1306 1305
1307 1306 #define HBLK_STACK_TRACE(hmeblkp, lock) \
1308 1307 { \
1309 1308 int flag = lock; /* to pacify lint */ \
1310 1309 int audit_index; \
1311 1310 \
1312 1311 mutex_enter(&hmeblkp->hblk_audit_lock); \
1313 1312 audit_index = hmeblkp->hblk_audit_index; \
1314 1313 hmeblkp->hblk_audit_index = ((hmeblkp->hblk_audit_index + 1) & \
1315 1314 (HBLK_AUDIT_CACHE_SIZE - 1)); \
1316 1315 mutex_exit(&hmeblkp->hblk_audit_lock); \
1317 1316 \
1318 1317 if (flag) \
1319 1318 hmeblkp->hblk_audit_cache[audit_index].flag = \
1320 1319 HBLK_LOCK_PATTERN; \
1321 1320 else \
1322 1321 hmeblkp->hblk_audit_cache[audit_index].flag = \
1323 1322 HBLK_UNLOCK_PATTERN; \
1324 1323 \
1325 1324 hmeblkp->hblk_audit_cache[audit_index].thread = curthread; \
1326 1325 hmeblkp->hblk_audit_cache[audit_index].depth = \
1327 1326 getpcstack(hmeblkp->hblk_audit_cache[audit_index].stack, \
1328 1327 HBLK_STACK_DEPTH); \
1329 1328 }
1330 1329
1331 1330 #else
1332 1331
1333 1332 #define HBLK_STACK_TRACE(hmeblkp, lock)
1334 1333
1335 1334 #endif /* HBLK_TRACE */
1336 1335
1337 1336 #define HMEHASH_FACTOR 16 /* used to calc # of buckets in hme hash */
1338 1337
1339 1338 /*
1340 1339 * A maximum number of user hmeblks is defined in order to place an upper
1341 1340 * limit on how much nucleus memory is required and to avoid overflowing the
1342 1341 * tsbmiss uhashsz and khashsz data areas. The number below corresponds to
1343 1342 * the number of buckets required, for an average hash chain length of 4 on
1344 1343 * a 16TB machine.
1345 1344 */
1346 1345
1347 1346 #define MAX_UHME_BUCKETS (0x1 << 30)
1348 1347 #define MAX_KHME_BUCKETS (0x1 << 30)
1349 1348
1350 1349 /*
1351 1350 * The minimum number of kernel hash buckets.
1352 1351 */
1353 1352 #define MIN_KHME_BUCKETS 0x800
1354 1353
1355 1354 /*
1356 1355 * The number of hash buckets must be a power of 2. If the initial calculated
1357 1356 * value is less than USER_BUCKETS_THRESHOLD we round up to the next greater
1358 1357 * power of 2, otherwise we round down to avoid huge over allocations.
1359 1358 */
1360 1359 #define USER_BUCKETS_THRESHOLD (1<<22)
1361 1360
1362 1361 #define MAX_NUCUHME_BUCKETS 0x4000
1363 1362 #define MAX_NUCKHME_BUCKETS 0x2000
1364 1363
1365 1364 /*
1366 1365 * There are 2 locks in the hmehash bucket. The hmehash_mutex is
1367 1366 * a regular mutex used to make sure operations on a hash link are only
1368 1367 * done by one thread. Any operation which comes into the hat with
1369 1368 * a <vaddr, as> will grab the hmehash_mutex. Normally one would expect
1370 1369 * the tsb miss handlers to grab the hash lock to make sure the hash list
1371 1370 * is consistent while we traverse it. Unfortunately this can lead to
1372 1371 * deadlocks or recursive mutex enters since it is possible for
1373 1372 * someone holding the lock to take a tlb/tsb miss.
1374 1373 * To solve this problem we have added the hmehash_listlock. This lock
1375 1374 * is only grabbed by the tsb miss handlers, vatopfn, and while
1376 1375 * adding/removing a hmeblk from the hash list. The code is written to
1377 1376 * guarantee we won't take a tlb miss while holding this lock.
1378 1377 */
1379 1378 struct hmehash_bucket {
1380 1379 kmutex_t hmehash_mutex;
1381 1380 volatile uint64_t hmeh_nextpa; /* physical address for hash list */
1382 1381 struct hme_blk *hmeblkp;
1383 1382 uint_t hmeh_listlock;
1384 1383 };
1385 1384
1386 1385 #endif /* !_ASM */
1387 1386
1388 1387 #define SFMMU_PGCNT_MASK 0x3f
1389 1388 #define SFMMU_PGCNT_SHIFT 6
1390 1389 #define INVALID_MMU_ID -1
1391 1390 #define SFMMU_MMU_GNUM_RSHIFT 16
1392 1391 #define SFMMU_MMU_CNUM_LSHIFT (64 - SFMMU_MMU_GNUM_RSHIFT)
1393 1392 #define MAX_SFMMU_CTX_VAL ((1 << 16) - 1) /* for sanity check */
1394 1393 #define MAX_SFMMU_GNUM_VAL ((0x1UL << 48) - 1)
1395 1394
1396 1395 /*
1397 1396 * The tsb miss handlers written in assembly know that sfmmup
1398 1397 * is a 64 bit ptr.
1399 1398 *
1400 1399 * The bspage and re-hash part is 64 bits, with the sfmmup being another 64
1401 1400 * bits.
1402 1401 */
1403 1402 #define HTAG_SFMMUPSZ 0 /* Not really used for LP64 */
1404 1403 #define HTAG_BSPAGE_SHIFT 13
1405 1404
1406 1405 /*
1407 1406 * Assembly routines need to be able to get to ttesz
1408 1407 */
1409 1408 #define HBLK_SZMASK 0x7
1410 1409
1411 1410 #ifndef _ASM
1412 1411
1413 1412 /*
1414 1413 * Returns the number of bytes that an hmeblk spans given its tte size
1415 1414 */
1416 1415 #define get_hblk_span(hmeblkp) ((hmeblkp)->hblk_span)
1417 1416 #define get_hblk_ttesz(hmeblkp) ((hmeblkp)->hblk_ttesz)
1418 1417 #define get_hblk_cache(hmeblkp) (((hmeblkp)->hblk_ttesz == TTE8K) ? \
1419 1418 sfmmu8_cache : sfmmu1_cache)
1420 1419 #define HMEBLK_SPAN(ttesz) \
1421 1420 ((ttesz == TTE8K)? (TTEBYTES(ttesz) * NHMENTS) : TTEBYTES(ttesz))
1422 1421
1423 1422 #define set_hblk_sz(hmeblkp, ttesz) \
1424 1423 (hmeblkp)->hblk_ttesz = (ttesz); \
1425 1424 (hmeblkp)->hblk_span = HMEBLK_SPAN(ttesz)
1426 1425
1427 1426 #define get_hblk_base(hmeblkp) \
1428 1427 ((uintptr_t)(hmeblkp)->hblk_tag.htag_bspage << MMU_PAGESHIFT)
1429 1428
1430 1429 #define get_hblk_endaddr(hmeblkp) \
1431 1430 ((caddr_t)(get_hblk_base(hmeblkp) + get_hblk_span(hmeblkp)))
1432 1431
1433 1432 #define in_hblk_range(hmeblkp, vaddr) \
1434 1433 (((uintptr_t)(vaddr) >= get_hblk_base(hmeblkp)) && \
1435 1434 ((uintptr_t)(vaddr) < (get_hblk_base(hmeblkp) + \
1436 1435 get_hblk_span(hmeblkp))))
1437 1436
1438 1437 #define tte_to_vaddr(hmeblkp, tte) ((caddr_t)(get_hblk_base(hmeblkp) \
1439 1438 + (TTEBYTES(TTE_CSZ(&tte)) * (tte).tte_hmenum)))
1440 1439
1441 1440 #define tte_to_evaddr(hmeblkp, ttep) ((caddr_t)(get_hblk_base(hmeblkp) \
1442 1441 + (TTEBYTES(TTE_CSZ(ttep)) * ((ttep)->tte_hmenum + 1))))
1443 1442
1444 1443 #define vaddr_to_vshift(hblktag, vaddr, shwsz) \
1445 1444 ((((uintptr_t)(vaddr) >> MMU_PAGESHIFT) - (hblktag.htag_bspage)) >>\
1446 1445 TTE_BSZS_SHIFT((shwsz) - 1))
1447 1446
1448 1447 #define HME8BLK_SZ (sizeof (struct hme_blk) + \
1449 1448 (NHMENTS - 1) * sizeof (struct sf_hment))
1450 1449 #define HME1BLK_SZ (sizeof (struct hme_blk))
1451 1450 #define H1MIN (2 + MAX_BIGKTSB_TTES) /* nucleus text+data, ktsb */
1452 1451
1453 1452 /*
1454 1453 * Hme_blk hash structure
1455 1454 * Active mappings are kept in a hash structure of hme_blks. The hash
1456 1455 * function is based on (ctx, vaddr) The size of the hash table size is a
1457 1456 * power of 2 such that the average hash chain lenth is HMENT_HASHAVELEN.
1458 1457 * The hash actually consists of 2 separate hashes. One hash is for the user
1459 1458 * address space and the other hash is for the kernel address space.
1460 1459 * The number of buckets are calculated at boot time and stored in the global
1461 1460 * variables "uhmehash_num" and "khmehash_num". By making the hash table size
1462 1461 * a power of 2 we can use a simply & function to derive an index instead of
1463 1462 * a divide.
1464 1463 *
1465 1464 * HME_HASH_FUNCTION(hatid, vaddr, shift) returns a pointer to a hme_hash
1466 1465 * bucket.
1467 1466 * An hme hash bucket contains a pointer to an hme_blk and the mutex that
1468 1467 * protects the link list.
1469 1468 * Spitfire supports 4 page sizes. 8k and 64K pages only need one hash.
1470 1469 * 512K pages need 2 hashes and 4M pages need 3 hashes.
1471 1470 * The 'shift' parameter controls how many bits the vaddr will be shifted in
1472 1471 * the hash function. It is calculated in the HME_HASH_SHIFT(ttesz) function
1473 1472 * and it varies depending on the page size as follows:
1474 1473 * 8k pages: HBLK_RANGE_SHIFT
1475 1474 * 64k pages: MMU_PAGESHIFT64K
1476 1475 * 512K pages: MMU_PAGESHIFT512K
1477 1476 * 4M pages: MMU_PAGESHIFT4M
1478 1477 * An assembly version of the hash function exists in sfmmu_ktsb_miss(). All
1479 1478 * changes should be reflected in both versions. This function and the TSB
1480 1479 * miss handlers are the only places which know about the two hashes.
1481 1480 *
1482 1481 * HBLK_RANGE_SHIFT controls range of virtual addresses that will fall
1483 1482 * into the same bucket for a particular process. It is currently set to
1484 1483 * be equivalent to 64K range or one hme_blk.
1485 1484 *
1486 1485 * The hme_blks in the hash are protected by a per hash bucket mutex
1487 1486 * known as SFMMU_HASH_LOCK.
1488 1487 * You need to acquire this lock before traversing the hash bucket link
1489 1488 * list, while adding/removing a hme_blk to the list, and while
1490 1489 * modifying an hme_blk. A possible optimization is to replace these
1491 1490 * mutexes by readers/writer lock but right now it is not clear whether
1492 1491 * this is a win or not.
1493 1492 *
1494 1493 * The HME_HASH_TABLE_SEARCH will search the hash table for the
1495 1494 * hme_blk that contains the hment that corresponds to the passed
1496 1495 * ctx and vaddr. It assumed the SFMMU_HASH_LOCK is held.
1497 1496 */
1498 1497
1499 1498 #endif /* ! _ASM */
1500 1499
1501 1500 #define KHATID ksfmmup
1502 1501 #define UHMEHASH_SZ uhmehash_num
1503 1502 #define KHMEHASH_SZ khmehash_num
1504 1503 #define HMENT_HASHAVELEN 4
1505 1504 #define HBLK_RANGE_SHIFT MMU_PAGESHIFT64K /* shift for HBLK_BS_MASK */
1506 1505 #define HBLK_MIN_TTESZ 1
1507 1506 #define HBLK_MIN_BYTES MMU_PAGESIZE64K
1508 1507 #define HBLK_MIN_SHIFT MMU_PAGESHIFT64K
1509 1508 #define MAX_HASHCNT 5
1510 1509 #define DEFAULT_MAX_HASHCNT 3
1511 1510
1512 1511 #ifndef _ASM
1513 1512
1514 1513 #define HASHADDR_MASK(hashno) TTE_PAGEMASK(hashno)
1515 1514
1516 1515 #define HME_HASH_SHIFT(ttesz) \
1517 1516 ((ttesz == TTE8K)? HBLK_RANGE_SHIFT : TTE_PAGE_SHIFT(ttesz))
1518 1517
1519 1518 #define HME_HASH_ADDR(vaddr, hmeshift) \
1520 1519 ((caddr_t)(((uintptr_t)(vaddr) >> (hmeshift)) << (hmeshift)))
1521 1520
1522 1521 #define HME_HASH_BSPAGE(vaddr, hmeshift) \
1523 1522 (((uintptr_t)(vaddr) >> (hmeshift)) << ((hmeshift) - MMU_PAGESHIFT))
1524 1523
1525 1524 #define HME_HASH_REHASH(ttesz) \
1526 1525 (((ttesz) < TTE512K)? 1 : (ttesz))
1527 1526
1528 1527 #define HME_HASH_FUNCTION(hatid, vaddr, shift) \
1529 1528 ((((void *)hatid) != ((void *)KHATID)) ? \
1530 1529 (&uhme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
1531 1530 UHMEHASH_SZ) ]): \
1532 1531 (&khme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
1533 1532 KHMEHASH_SZ) ]))
1534 1533
1535 1534 /*
1536 1535 * This macro will traverse a hmeblk hash link list looking for an hme_blk
1537 1536 * that owns the specified vaddr and hatid. If if doesn't find one , hmeblkp
1538 1537 * will be set to NULL, otherwise it will point to the correct hme_blk.
1539 1538 * This macro also cleans empty hblks.
1540 1539 */
1541 1540 #define HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, pr_hblk, listp) \
1542 1541 { \
1543 1542 struct hme_blk *nx_hblk; \
1544 1543 \
1545 1544 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); \
1546 1545 hblkp = hmebp->hmeblkp; \
1547 1546 pr_hblk = NULL; \
1548 1547 while (hblkp) { \
1549 1548 if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) { \
1550 1549 /* found hme_blk */ \
1551 1550 break; \
1552 1551 } \
1553 1552 nx_hblk = hblkp->hblk_next; \
1554 1553 if (!hblkp->hblk_vcnt && !hblkp->hblk_hmecnt) { \
1555 1554 sfmmu_hblk_hash_rm(hmebp, hblkp, pr_hblk, \
1556 1555 listp, 0); \
1557 1556 } else { \
1558 1557 pr_hblk = hblkp; \
1559 1558 } \
1560 1559 hblkp = nx_hblk; \
1561 1560 } \
1562 1561 }
1563 1562
1564 1563 #define HME_HASH_SEARCH(hmebp, hblktag, hblkp, listp) \
1565 1564 { \
1566 1565 struct hme_blk *pr_hblk; \
1567 1566 \
1568 1567 HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, pr_hblk, listp); \
1569 1568 }
1570 1569
1571 1570 /*
1572 1571 * This macro will traverse a hmeblk hash link list looking for an hme_blk
1573 1572 * that owns the specified vaddr and hatid. If if doesn't find one , hmeblkp
1574 1573 * will be set to NULL, otherwise it will point to the correct hme_blk.
1575 1574 * It doesn't remove empty hblks.
1576 1575 */
1577 1576 #define HME_HASH_FAST_SEARCH(hmebp, hblktag, hblkp) \
1578 1577 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); \
1579 1578 for (hblkp = hmebp->hmeblkp; hblkp; \
1580 1579 hblkp = hblkp->hblk_next) { \
1581 1580 if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) { \
1582 1581 /* found hme_blk */ \
1583 1582 break; \
1584 1583 } \
1585 1584 }
1586 1585
1587 1586 #define SFMMU_HASH_LOCK(hmebp) \
1588 1587 (mutex_enter(&hmebp->hmehash_mutex))
1589 1588
1590 1589 #define SFMMU_HASH_UNLOCK(hmebp) \
1591 1590 (mutex_exit(&hmebp->hmehash_mutex))
1592 1591
1593 1592 #define SFMMU_HASH_LOCK_TRYENTER(hmebp) \
1594 1593 (mutex_tryenter(&hmebp->hmehash_mutex))
1595 1594
1596 1595 #define SFMMU_HASH_LOCK_ISHELD(hmebp) \
1597 1596 (mutex_owned(&hmebp->hmehash_mutex))
1598 1597
1599 1598 #define SFMMU_XCALL_STATS(sfmmup) \
1600 1599 { \
1601 1600 if (sfmmup == ksfmmup) { \
1602 1601 SFMMU_STAT(sf_kernel_xcalls); \
1603 1602 } else { \
1604 1603 SFMMU_STAT(sf_user_xcalls); \
1605 1604 } \
1606 1605 }
1607 1606
1608 1607 #define astosfmmu(as) ((as)->a_hat)
1609 1608 #define hblktosfmmu(hmeblkp) ((sfmmu_t *)(hmeblkp)->hblk_tag.htag_id)
1610 1609 #define hblktosrd(hmeblkp) ((sf_srd_t *)(hmeblkp)->hblk_tag.htag_id)
1611 1610 #define sfmmutoas(sfmmup) ((sfmmup)->sfmmu_as)
1612 1611
1613 1612 #define sfmmutohtagid(sfmmup, rid) \
1614 1613 (((rid) == SFMMU_INVALID_SHMERID) ? (void *)(sfmmup) : \
1615 1614 (void *)((sfmmup)->sfmmu_srdp))
1616 1615
1617 1616 /*
1618 1617 * We use the sfmmu data structure to keep the per as page coloring info.
1619 1618 */
1620 1619 #define as_color_bin(as) (astosfmmu(as)->sfmmu_clrbin)
1621 1620 #define as_color_start(as) (astosfmmu(as)->sfmmu_clrstart)
1622 1621
1623 1622 typedef struct {
1624 1623 char h8[HME8BLK_SZ];
1625 1624 } hblk8_t;
1626 1625
1627 1626 typedef struct {
1628 1627 char h1[HME1BLK_SZ];
1629 1628 } hblk1_t;
1630 1629
1631 1630 typedef struct {
1632 1631 ulong_t index;
1633 1632 ulong_t len;
1634 1633 hblk8_t *list;
1635 1634 } nucleus_hblk8_info_t;
1636 1635
1637 1636 typedef struct {
1638 1637 ulong_t index;
1639 1638 ulong_t len;
1640 1639 hblk1_t *list;
1641 1640 } nucleus_hblk1_info_t;
1642 1641
1643 1642 /*
1644 1643 * This struct is used for accumlating information about a range
1645 1644 * of pages that are unloading so that a single xcall can flush
1646 1645 * the entire range from remote tlbs. A function that must demap
1647 1646 * a range of virtual addresses declares one of these structures
1648 1647 * and initializes using DEMP_RANGE_INIT(). It then passes a pointer to this
1649 1648 * struct to the appropriate sfmmu_hblk_* level function which does
1650 1649 * all the bookkeeping using the other macros. When the function has
1651 1650 * finished the virtual address range, it needs to call DEMAP_RANGE_FLUSH()
1652 1651 * macro to take care of any remaining unflushed mappings.
1653 1652 *
1654 1653 * The maximum range this struct can represent is the number of bits
1655 1654 * in the dmr_bitvec field times the pagesize in dmr_pgsz. Currently, only
1656 1655 * MMU_PAGESIZE pages are supported.
1657 1656 *
1658 1657 * Since there are now cases where it's no longer necessary to do
1659 1658 * flushes (e.g. when the process isn't runnable because it's swapping
1660 1659 * out or exiting) we allow these macros to take a NULL dmr input and do
1661 1660 * nothing in that case.
1662 1661 */
1663 1662 typedef struct {
1664 1663 sfmmu_t *dmr_sfmmup; /* relevant hat */
1665 1664 caddr_t dmr_addr; /* beginning address */
1666 1665 caddr_t dmr_endaddr; /* ending address */
1667 1666 ulong_t dmr_bitvec; /* valid pages found */
1668 1667 ulong_t dmr_bit; /* next page to examine */
1669 1668 ulong_t dmr_maxbit; /* highest page in range */
1670 1669 ulong_t dmr_pgsz; /* page size in range */
1671 1670 } demap_range_t;
1672 1671
1673 1672 #define DMR_MAXBIT ((ulong_t)1<<63) /* dmr_bit high bit */
1674 1673
1675 1674 #define DEMAP_RANGE_INIT(sfmmup, dmrp) \
1676 1675 (dmrp)->dmr_sfmmup = (sfmmup); \
1677 1676 (dmrp)->dmr_bitvec = 0; \
1678 1677 (dmrp)->dmr_maxbit = sfmmu_dmr_maxbit; \
1679 1678 (dmrp)->dmr_pgsz = MMU_PAGESIZE;
1680 1679
1681 1680 #define DEMAP_RANGE_PGSZ(dmrp) ((dmrp)? (dmrp)->dmr_pgsz : MMU_PAGESIZE)
1682 1681
1683 1682 #define DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr) \
1684 1683 if ((dmrp) != NULL) { \
1685 1684 if ((dmrp)->dmr_bitvec != 0 && (dmrp)->dmr_endaddr != (addr)) \
1686 1685 sfmmu_tlb_range_demap(dmrp); \
1687 1686 (dmrp)->dmr_endaddr = (endaddr); \
1688 1687 }
1689 1688
1690 1689 #define DEMAP_RANGE_FLUSH(dmrp) \
1691 1690 if ((dmrp)->dmr_bitvec != 0) \
1692 1691 sfmmu_tlb_range_demap(dmrp);
1693 1692
1694 1693
1695 1694 #define DEMAP_RANGE_MARKPG(dmrp, addr) \
1696 1695 if ((dmrp) != NULL) { \
1697 1696 if ((dmrp)->dmr_bitvec == 0) { \
1698 1697 (dmrp)->dmr_addr = (addr); \
1699 1698 (dmrp)->dmr_bit = 1; \
1700 1699 } \
1701 1700 (dmrp)->dmr_bitvec |= (dmrp)->dmr_bit; \
1702 1701 }
1703 1702
1704 1703 #define DEMAP_RANGE_NEXTPG(dmrp) \
1705 1704 if ((dmrp) != NULL && (dmrp)->dmr_bitvec != 0) { \
1706 1705 if ((dmrp)->dmr_bit & (dmrp)->dmr_maxbit) { \
1707 1706 sfmmu_tlb_range_demap(dmrp); \
1708 1707 } else { \
1709 1708 (dmrp)->dmr_bit <<= 1; \
1710 1709 } \
1711 1710 }
1712 1711
1713 1712 /*
1714 1713 * TSB related structures
1715 1714 *
1716 1715 * The TSB is made up of tte entries. Both the tag and data are present
1717 1716 * in the TSB. The TSB locking is managed as follows:
1718 1717 * A software bit in the tsb tag is used to indicate that entry is locked.
1719 1718 * If a cpu servicing a tsb miss reads a locked entry the tag compare will
1720 1719 * fail forcing the cpu to go to the hat hash for the translation.
1721 1720 * The cpu who holds the lock can then modify the data side, and the tag side.
1722 1721 * The last write should be to the word containing the lock bit which will
1723 1722 * clear the lock and allow the tsb entry to be read. It is assumed that all
1724 1723 * cpus reading the tsb will do so with atomic 128-bit loads. An atomic 128
1725 1724 * bit load is required to prevent the following from happening:
1726 1725 *
1727 1726 * cpu 0 cpu 1 comments
1728 1727 *
1729 1728 * ldx tag tag unlocked
1730 1729 * ldstub lock set lock
1731 1730 * stx data
1732 1731 * stx tag unlock
1733 1732 * ldx tag incorrect tte!!!
1734 1733 *
1735 1734 * The software also maintains a bit in the tag to indicate an invalid
1736 1735 * tsb entry. The purpose of this bit is to allow the tsb invalidate code
1737 1736 * to invalidate a tsb entry with a single cas. See code for details.
1738 1737 */
1739 1738
1740 1739 union tsb_tag {
1741 1740 struct {
1742 1741 uint32_t tag_res0:16; /* reserved - context area */
1743 1742 uint32_t tag_inv:1; /* sw - invalid tsb entry */
1744 1743 uint32_t tag_lock:1; /* sw - locked tsb entry */
1745 1744 uint32_t tag_res1:4; /* reserved */
1746 1745 uint32_t tag_va_hi:10; /* va[63:54] */
1747 1746 uint32_t tag_va_lo; /* va[53:22] */
1748 1747 } tagbits;
1749 1748 struct tsb_tagints {
1750 1749 uint32_t inthi;
1751 1750 uint32_t intlo;
1752 1751 } tagints;
1753 1752 };
1754 1753 #define tag_invalid tagbits.tag_inv
1755 1754 #define tag_locked tagbits.tag_lock
1756 1755 #define tag_vahi tagbits.tag_va_hi
1757 1756 #define tag_valo tagbits.tag_va_lo
1758 1757 #define tag_inthi tagints.inthi
1759 1758 #define tag_intlo tagints.intlo
1760 1759
1761 1760 struct tsbe {
1762 1761 union tsb_tag tte_tag;
1763 1762 tte_t tte_data;
1764 1763 };
1765 1764
1766 1765 /*
1767 1766 * A per cpu struct is kept that duplicates some info
1768 1767 * used by the tl>0 tsb miss handlers plus it provides
1769 1768 * a scratch area. Its purpose is to minimize cache misses
1770 1769 * in the tsb miss handler and is 128 bytes (2 e$ lines).
1771 1770 *
1772 1771 * There should be one allocated per cpu in nucleus memory
1773 1772 * and should be aligned on an ecache line boundary.
1774 1773 */
1775 1774 struct tsbmiss {
1776 1775 sfmmu_t *ksfmmup; /* kernel hat id */
1777 1776 sfmmu_t *usfmmup; /* user hat id */
1778 1777 sf_srd_t *usrdp; /* user's SRD hat id */
1779 1778 struct tsbe *tsbptr; /* hardware computed ptr */
1780 1779 struct tsbe *tsbptr4m; /* hardware computed ptr */
1781 1780 struct tsbe *tsbscdptr; /* hardware computed ptr */
1782 1781 struct tsbe *tsbscdptr4m; /* hardware computed ptr */
1783 1782 uint64_t ismblkpa;
1784 1783 struct hmehash_bucket *khashstart;
1785 1784 struct hmehash_bucket *uhashstart;
1786 1785 uint_t khashsz;
1787 1786 uint_t uhashsz;
1788 1787 uint16_t dcache_line_mask; /* used to flush dcache */
1789 1788 uchar_t uhat_tteflags; /* private page sizes */
1790 1789 uchar_t uhat_rtteflags; /* SHME pagesizes */
1791 1790 uint32_t utsb_misses;
1792 1791 uint32_t ktsb_misses;
1793 1792 uint16_t uprot_traps;
1794 1793 uint16_t kprot_traps;
1795 1794 /*
1796 1795 * scratch[0] -> TSB_TAGACC
1797 1796 * scratch[1] -> TSBMISS_HMEBP
1798 1797 * scratch[2] -> TSBMISS_HATID
1799 1798 */
1800 1799 uintptr_t scratch[3];
1801 1800 ulong_t shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */
1802 1801 ulong_t scd_shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */
1803 1802 uint8_t pad[48]; /* pad to 64 bytes */
1804 1803 };
1805 1804
1806 1805 /*
1807 1806 * A per cpu struct is kept for the use within the tl>0 kpm tsb
1808 1807 * miss handler. Some members are duplicates of common data or
1809 1808 * the physical addresses of common data. A few members are also
1810 1809 * written by the tl>0 kpm tsb miss handler. Its purpose is to
1811 1810 * minimize cache misses in the kpm tsb miss handler and occupies
1812 1811 * one ecache line. There should be one allocated per cpu in
1813 1812 * nucleus memory and it should be aligned on an ecache line
1814 1813 * boundary. It is not merged w/ struct tsbmiss since there is
1815 1814 * not much to share and the tsbmiss pathes are different, so
1816 1815 * a kpm tlbmiss/tsbmiss only touches one cacheline, except for
1817 1816 * (DEBUG || SFMMU_STAT_GATHER) where the dtlb_misses counter
1818 1817 * of struct tsbmiss is used on every dtlb miss.
1819 1818 */
1820 1819 struct kpmtsbm {
1821 1820 caddr_t vbase; /* start of address kpm range */
1822 1821 caddr_t vend; /* end of address kpm range */
1823 1822 uchar_t flags; /* flags needed in TL tsbmiss handler */
1824 1823 uchar_t sz_shift; /* for single kpm window */
1825 1824 uchar_t kpmp_shift; /* hash lock shift */
1826 1825 uchar_t kpmp2pshft; /* kpm page to page shift */
1827 1826 uint_t kpmp_table_sz; /* size of kpmp_table or kpmp_stable */
1828 1827 uint64_t kpmp_tablepa; /* paddr of kpmp_table or kpmp_stable */
1829 1828 uint64_t msegphashpa; /* paddr of memseg_phash */
1830 1829 struct tsbe *tsbptr; /* saved ktsb pointer */
1831 1830 uint_t kpm_dtlb_misses; /* kpm tlbmiss counter */
1832 1831 uint_t kpm_tsb_misses; /* kpm tsbmiss counter */
1833 1832 uintptr_t pad[1];
1834 1833 };
1835 1834
1836 1835 extern size_t tsb_slab_size;
1837 1836 extern uint_t tsb_slab_shift;
1838 1837 extern size_t tsb_slab_mask;
1839 1838
1840 1839 #endif /* !_ASM */
1841 1840
1842 1841 /*
1843 1842 * Flags for TL kpm tsbmiss handler
1844 1843 */
1845 1844 #define KPMTSBM_ENABLE_FLAG 0x01 /* bit copy of kpm_enable */
1846 1845 #define KPMTSBM_TLTSBM_FLAG 0x02 /* use TL tsbmiss handler */
1847 1846 #define KPMTSBM_TSBPHYS_FLAG 0x04 /* use ASI_MEM for TSB update */
1848 1847
1849 1848 /*
1850 1849 * The TSB
1851 1850 * All TSB sizes supported by the hardware are now supported (8K - 1M).
1852 1851 * For kernel TSBs we may go beyond the hardware supported sizes and support
1853 1852 * larger TSBs via software.
1854 1853 * All TTE sizes are supported in the TSB; the manner in which this is
1855 1854 * done is cpu dependent.
1856 1855 */
1857 1856 #define TSB_MIN_SZCODE TSB_8K_SZCODE /* min. supported TSB size */
1858 1857 #define TSB_MIN_OFFSET_MASK (TSB_OFFSET_MASK(TSB_MIN_SZCODE))
1859 1858
1860 1859 #ifdef sun4v
1861 1860 #define UTSB_MAX_SZCODE TSB_256M_SZCODE /* max. supported TSB size */
1862 1861 #else /* sun4u */
1863 1862 #define UTSB_MAX_SZCODE TSB_1M_SZCODE /* max. supported TSB size */
1864 1863 #endif /* sun4v */
1865 1864
1866 1865 #define UTSB_MAX_OFFSET_MASK (TSB_OFFSET_MASK(UTSB_MAX_SZCODE))
1867 1866
1868 1867 #define TSB_FREEMEM_MIN 0x1000 /* 32 mb */
1869 1868 #define TSB_FREEMEM_LARGE 0x10000 /* 512 mb */
1870 1869 #define TSB_8K_SZCODE 0 /* 512 entries */
1871 1870 #define TSB_16K_SZCODE 1 /* 1k entries */
1872 1871 #define TSB_32K_SZCODE 2 /* 2k entries */
1873 1872 #define TSB_64K_SZCODE 3 /* 4k entries */
1874 1873 #define TSB_128K_SZCODE 4 /* 8k entries */
1875 1874 #define TSB_256K_SZCODE 5 /* 16k entries */
1876 1875 #define TSB_512K_SZCODE 6 /* 32k entries */
1877 1876 #define TSB_1M_SZCODE 7 /* 64k entries */
1878 1877 #define TSB_2M_SZCODE 8 /* 128k entries */
1879 1878 #define TSB_4M_SZCODE 9 /* 256k entries */
1880 1879 #define TSB_8M_SZCODE 10 /* 512k entries */
1881 1880 #define TSB_16M_SZCODE 11 /* 1M entries */
1882 1881 #define TSB_32M_SZCODE 12 /* 2M entries */
1883 1882 #define TSB_64M_SZCODE 13 /* 4M entries */
1884 1883 #define TSB_128M_SZCODE 14 /* 8M entries */
1885 1884 #define TSB_256M_SZCODE 15 /* 16M entries */
1886 1885 #define TSB_ENTRY_SHIFT 4 /* each entry = 128 bits = 16 bytes */
1887 1886 #define TSB_ENTRY_SIZE (1 << 4)
1888 1887 #define TSB_START_SIZE 9
1889 1888 #define TSB_ENTRIES(tsbsz) (1 << (TSB_START_SIZE + tsbsz))
1890 1889 #define TSB_BYTES(tsbsz) (TSB_ENTRIES(tsbsz) << TSB_ENTRY_SHIFT)
1891 1890 #define TSB_OFFSET_MASK(tsbsz) (TSB_ENTRIES(tsbsz) - 1)
1892 1891 #define TSB_BASEADDR_MASK ((1 << 12) - 1)
1893 1892
1894 1893 /*
1895 1894 * sun4u platforms
1896 1895 * ---------------
1897 1896 * We now support two user TSBs with one TSB base register.
1898 1897 * Hence the TSB base register is split up as follows:
1899 1898 *
1900 1899 * When only one TSB present:
1901 1900 * [63 62..42 41..13 12..4 3..0]
1902 1901 * ^ ^ ^ ^ ^
1903 1902 * | | | | |
1904 1903 * | | | | |_ TSB size code
1905 1904 * | | | |
1906 1905 * | | | |_ Reserved 0
1907 1906 * | | |
1908 1907 * | | |_ TSB VA[41..13]
1909 1908 * | |
1910 1909 * | |_ VA hole (Spitfire), zeros (Cheetah and beyond)
1911 1910 * |
1912 1911 * |_ 0
1913 1912 *
1914 1913 * When second TSB present:
1915 1914 * [63 62..42 41..33 32..29 28..22 21..13 12..4 3..0]
1916 1915 * ^ ^ ^ ^ ^ ^ ^ ^
1917 1916 * | | | | | | | |
1918 1917 * | | | | | | | |_ First TSB size code
1919 1918 * | | | | | | |
1920 1919 * | | | | | | |_ Reserved 0
1921 1920 * | | | | | |
1922 1921 * | | | | | |_ First TSB's VA[21..13]
1923 1922 * | | | | |
1924 1923 * | | | | |_ Reserved for future use
1925 1924 * | | | |
1926 1925 * | | | |_ Second TSB's size code
1927 1926 * | | |
1928 1927 * | | |_ Second TSB's VA[21..13]
1929 1928 * | |
1930 1929 * | |_ VA hole (Spitfire) / ones (Cheetah and beyond)
1931 1930 * |
1932 1931 * |_ 1
1933 1932 *
1934 1933 * Note that since we store 21..13 of each TSB's VA, TSBs and their slabs
1935 1934 * may be up to 4M in size. For now, only hardware supported TSB sizes
1936 1935 * are supported, though the slabs are usually 4M in size.
1937 1936 *
1938 1937 * sun4u platforms that define UTSB_PHYS use physical addressing to access
1939 1938 * the user TSBs at TL>0. The first user TSB base is in the MMU I/D TSB Base
1940 1939 * registers. The second TSB base uses a dedicated scratchpad register which
1941 1940 * requires a definition of SCRATCHPAD_UTSBREG2 in mach_sfmmu.h. The layout for
1942 1941 * both registers is equivalent to sun4v below, except the TSB PA range is
1943 1942 * [46..13] for sun4u.
1944 1943 *
1945 1944 * sun4v platforms
1946 1945 * ---------------
1947 1946 * On sun4v platforms, we use two dedicated scratchpad registers as pseudo
1948 1947 * hardware TSB base registers to hold up to two different user TSBs.
1949 1948 *
1950 1949 * Each register contains TSB's physical base and size code information
1951 1950 * as follows:
1952 1951 *
1953 1952 * [63..56 55..13 12..4 3..0]
1954 1953 * ^ ^ ^ ^
1955 1954 * | | | |
1956 1955 * | | | |_ TSB size code
1957 1956 * | | |
1958 1957 * | | |_ Reserved 0
1959 1958 * | |
1960 1959 * | |_ TSB PA[55..13]
1961 1960 * |
1962 1961 * |
1963 1962 * |
1964 1963 * |_ 0 for valid TSB
1965 1964 *
1966 1965 * Absence of a user TSB (primarily the second user TSB) is indicated by
1967 1966 * storing a negative value in the TSB base register. This allows us to
1968 1967 * check for presence of a user TSB by simply checking bit# 63.
1969 1968 */
1970 1969 #define TSBREG_MSB_SHIFT 32 /* set upper bits */
1971 1970 #define TSBREG_MSB_CONST 0xfffff800 /* set bits 63..43 */
1972 1971 #define TSBREG_FIRTSB_SHIFT 42 /* to clear bits 63:22 */
1973 1972 #define TSBREG_SECTSB_MKSHIFT 20 /* 21:13 --> 41:33 */
1974 1973 #define TSBREG_SECTSB_LSHIFT 22 /* to clear bits 63:42 */
1975 1974 #define TSBREG_SECTSB_RSHIFT (TSBREG_SECTSB_MKSHIFT + TSBREG_SECTSB_LSHIFT)
1976 1975 /* sectsb va -> bits 21:13 */
1977 1976 /* after clearing upper bits */
1978 1977 #define TSBREG_SECSZ_SHIFT 29 /* to get sectsb szc to 3:0 */
1979 1978 #define TSBREG_VAMASK_SHIFT 13 /* set up VA mask */
1980 1979
1981 1980 #define BIGKTSB_SZ_MASK 0xf
1982 1981 #define TSB_SOFTSZ_MASK BIGKTSB_SZ_MASK
1983 1982 #define MIN_BIGKTSB_SZCODE 9 /* 256k entries */
1984 1983 #define MAX_BIGKTSB_SZCODE 11 /* 1024k entries */
1985 1984 #define MAX_BIGKTSB_TTES (TSB_BYTES(MAX_BIGKTSB_SZCODE) / MMU_PAGESIZE4M)
1986 1985
1987 1986 #define TAG_VALO_SHIFT 22 /* tag's va are bits 63-22 */
1988 1987 /*
1989 1988 * sw bits used on tsb_tag - bit masks used only in assembly
1990 1989 * use only a sethi for these fields.
1991 1990 */
1992 1991 #define TSBTAG_INVALID 0x00008000 /* tsb_tag.tag_invalid */
1993 1992 #define TSBTAG_LOCKED 0x00004000 /* tsb_tag.tag_locked */
1994 1993
1995 1994 #ifdef _ASM
1996 1995
1997 1996 /*
1998 1997 * Marker to indicate that this instruction will be hot patched at runtime
1999 1998 * to some other value.
2000 1999 * This value must be zero since it fills in the imm bits of the target
2001 2000 * instructions to be patched
2002 2001 */
2003 2002 #define RUNTIME_PATCH (0)
2004 2003
2005 2004 /*
2006 2005 * V9 defines nop instruction as the following, which we use
2007 2006 * at runtime to nullify some instructions we don't want to
2008 2007 * execute in the trap handlers on certain platforms.
2009 2008 */
2010 2009 #define MAKE_NOP_INSTR(reg) \
2011 2010 sethi %hi(0x1000000), reg
2012 2011
2013 2012 /*
2014 2013 * This macro constructs a SPARC V9 "jmpl <source reg>, %g0"
2015 2014 * instruction, with the source register specified by the jump_reg_number.
2016 2015 * The jmp opcode [24:19] = 11 1000 and source register is bits [18:14].
2017 2016 * The instruction is returned in reg. The macro is used to patch in a jmpl
2018 2017 * instruction at runtime.
2019 2018 */
2020 2019 #define MAKE_JMP_INSTR(jump_reg_number, reg, tmp) \
2021 2020 sethi %hi(0x81c00000), reg; \
2022 2021 mov jump_reg_number, tmp; \
2023 2022 sll tmp, 14, tmp; \
2024 2023 or reg, tmp, reg
2025 2024
2026 2025 /*
2027 2026 * Macro to get hat per-MMU cnum on this CPU.
2028 2027 * sfmmu - In, pass in "sfmmup" from the caller.
2029 2028 * cnum - Out, return 'cnum' to the caller
2030 2029 * scr - scratch
2031 2030 */
2032 2031 #define SFMMU_CPU_CNUM(sfmmu, cnum, scr) \
2033 2032 CPU_ADDR(scr, cnum); /* scr = load CPU struct addr */ \
2034 2033 ld [scr + CPU_MMU_IDX], cnum; /* cnum = mmuid */ \
2035 2034 add sfmmu, SFMMU_CTXS, scr; /* scr = sfmmup->sfmmu_ctxs[] */ \
2036 2035 sllx cnum, SFMMU_MMU_CTX_SHIFT, cnum; \
2037 2036 add scr, cnum, scr; /* scr = sfmmup->sfmmu_ctxs[id] */ \
2038 2037 ldx [scr + SFMMU_MMU_GC_NUM], scr; /* sfmmu_ctxs[id].gcnum */ \
2039 2038 sllx scr, SFMMU_MMU_CNUM_LSHIFT, scr; \
2040 2039 srlx scr, SFMMU_MMU_CNUM_LSHIFT, cnum; /* cnum = sfmmu cnum */
2041 2040
2042 2041 /*
2043 2042 * Macro to get hat gnum & cnum assocaited with sfmmu_ctx[mmuid] entry
2044 2043 * entry - In, pass in (&sfmmu_ctxs[mmuid] - SFMMU_CTXS) from the caller.
2045 2044 * gnum - Out, return sfmmu gnum
2046 2045 * cnum - Out, return sfmmu cnum
2047 2046 * reg - scratch
2048 2047 */
2049 2048 #define SFMMU_MMUID_GNUM_CNUM(entry, gnum, cnum, reg) \
2050 2049 ldx [entry + SFMMU_CTXS], reg; /* reg = sfmmu (gnum | cnum) */ \
2051 2050 srlx reg, SFMMU_MMU_GNUM_RSHIFT, gnum; /* gnum = sfmmu gnum */ \
2052 2051 sllx reg, SFMMU_MMU_CNUM_LSHIFT, cnum; \
2053 2052 srlx cnum, SFMMU_MMU_CNUM_LSHIFT, cnum; /* cnum = sfmmu cnum */
2054 2053
2055 2054 /*
2056 2055 * Macro to get this CPU's tsbmiss area.
2057 2056 */
2058 2057 #define CPU_TSBMISS_AREA(tsbmiss, tmp1) \
2059 2058 CPU_INDEX(tmp1, tsbmiss); /* tmp1 = cpu idx */ \
2060 2059 sethi %hi(tsbmiss_area), tsbmiss; /* tsbmiss base ptr */ \
2061 2060 mulx tmp1, TSBMISS_SIZE, tmp1; /* byte offset */ \
2062 2061 or tsbmiss, %lo(tsbmiss_area), tsbmiss; \
2063 2062 add tsbmiss, tmp1, tsbmiss /* tsbmiss area of CPU */
2064 2063
2065 2064
2066 2065 /*
2067 2066 * Macro to set kernel context + page size codes in DMMU primary context
2068 2067 * register. It is only necessary for sun4u because sun4v does not need
2069 2068 * page size codes
2070 2069 */
2071 2070 #ifdef sun4v
2072 2071
2073 2072 #define SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
2074 2073
2075 2074 #else
2076 2075
2077 2076 #define SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) \
2078 2077 sethi %hi(kcontextreg), reg0; \
2079 2078 ldx [reg0 + %lo(kcontextreg)], reg0; \
2080 2079 mov MMU_PCONTEXT, reg1; \
2081 2080 ldxa [reg1]ASI_MMU_CTX, reg2; \
2082 2081 xor reg0, reg2, reg2; \
2083 2082 brz reg2, label3; \
2084 2083 srlx reg2, CTXREG_NEXT_SHIFT, reg2; \
2085 2084 rdpr %pstate, reg3; /* disable interrupts */ \
2086 2085 btst PSTATE_IE, reg3; \
2087 2086 /*CSTYLED*/ \
2088 2087 bnz,a,pt %icc, label1; \
2089 2088 wrpr reg3, PSTATE_IE, %pstate; \
2090 2089 /*CSTYLED*/ \
2091 2090 label1:; \
2092 2091 brz reg2, label2; /* need demap if N_pgsz0/1 change */ \
2093 2092 sethi %hi(FLUSH_ADDR), reg4; \
2094 2093 mov DEMAP_ALL_TYPE, reg2; \
2095 2094 stxa %g0, [reg2]ASI_DTLB_DEMAP; \
2096 2095 stxa %g0, [reg2]ASI_ITLB_DEMAP; \
2097 2096 /*CSTYLED*/ \
2098 2097 label2:; \
2099 2098 stxa reg0, [reg1]ASI_MMU_CTX; \
2100 2099 flush reg4; \
2101 2100 btst PSTATE_IE, reg3; \
2102 2101 /*CSTYLED*/ \
2103 2102 bnz,a,pt %icc, label3; \
2104 2103 wrpr %g0, reg3, %pstate; /* restore interrupt state */ \
2105 2104 label3:;
2106 2105
2107 2106 #endif
2108 2107
2109 2108 /*
2110 2109 * Macro to setup arguments with kernel sfmmup context + page size before
2111 2110 * calling sfmmu_setctx_sec()
2112 2111 */
2113 2112 #ifdef sun4v
2114 2113 #define SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1) \
2115 2114 set KCONTEXT, arg0; \
2116 2115 set 0, arg1;
2117 2116 #else
2118 2117 #define SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1) \
2119 2118 ldub [sfmmup + SFMMU_CEXT], arg1; \
2120 2119 set KCONTEXT, arg0; \
2121 2120 sll arg1, CTXREG_EXT_SHIFT, arg1;
2122 2121 #endif
2123 2122
2124 2123 #define PANIC_IF_INTR_DISABLED_PSTR(pstatereg, label, scr) \
2125 2124 andcc pstatereg, PSTATE_IE, %g0; /* panic if intrs */ \
2126 2125 /*CSTYLED*/ \
2127 2126 bnz,pt %icc, label; /* already disabled */ \
2128 2127 nop; \
2129 2128 \
2130 2129 sethi %hi(panicstr), scr; \
2131 2130 ldx [scr + %lo(panicstr)], scr; \
2132 2131 tst scr; \
2133 2132 /*CSTYLED*/ \
2134 2133 bnz,pt %xcc, label; \
2135 2134 nop; \
2136 2135 \
2137 2136 save %sp, -SA(MINFRAME), %sp; \
2138 2137 sethi %hi(sfmmu_panic1), %o0; \
2139 2138 call panic; \
2140 2139 or %o0, %lo(sfmmu_panic1), %o0; \
2141 2140 /*CSTYLED*/ \
2142 2141 label:
2143 2142
2144 2143 #define PANIC_IF_INTR_ENABLED_PSTR(label, scr) \
2145 2144 /* \
2146 2145 * The caller must have disabled interrupts. \
2147 2146 * If interrupts are not disabled, panic \
2148 2147 */ \
2149 2148 rdpr %pstate, scr; \
2150 2149 andcc scr, PSTATE_IE, %g0; \
2151 2150 /*CSTYLED*/ \
2152 2151 bz,pt %icc, label; \
2153 2152 nop; \
2154 2153 \
2155 2154 sethi %hi(panicstr), scr; \
2156 2155 ldx [scr + %lo(panicstr)], scr; \
2157 2156 tst scr; \
2158 2157 /*CSTYLED*/ \
2159 2158 bnz,pt %xcc, label; \
2160 2159 nop; \
2161 2160 \
2162 2161 sethi %hi(sfmmu_panic6), %o0; \
2163 2162 call panic; \
2164 2163 or %o0, %lo(sfmmu_panic6), %o0; \
2165 2164 /*CSTYLED*/ \
2166 2165 label:
2167 2166
2168 2167 #endif /* _ASM */
2169 2168
2170 2169 #ifndef _ASM
2171 2170
2172 2171 #ifdef VAC
2173 2172 /*
2174 2173 * Page coloring
2175 2174 * The p_vcolor field of the page struct (1 byte) is used to store the
2176 2175 * virtual page color. This provides for 255 colors. The value zero is
2177 2176 * used to mean the page has no color - never been mapped or somehow
2178 2177 * purified.
2179 2178 */
2180 2179
2181 2180 #define PP_GET_VCOLOR(pp) (((pp)->p_vcolor) - 1)
2182 2181 #define PP_NEWPAGE(pp) (!(pp)->p_vcolor)
2183 2182 #define PP_SET_VCOLOR(pp, color) \
2184 2183 ((pp)->p_vcolor = ((color) + 1))
2185 2184
2186 2185 /*
2187 2186 * As mentioned p_vcolor == 0 means there is no color for this page.
2188 2187 * But PP_SET_VCOLOR(pp, color) expects 'color' to be real color minus
2189 2188 * one so we define this constant.
2190 2189 */
2191 2190 #define NO_VCOLOR (-1)
2192 2191
2193 2192 #define addr_to_vcolor(addr) \
2194 2193 (((uint_t)(uintptr_t)(addr) >> MMU_PAGESHIFT) & vac_colors_mask)
2195 2194 #else /* VAC */
2196 2195 #define addr_to_vcolor(addr) (0)
2197 2196 #endif /* VAC */
2198 2197
2199 2198 /*
2200 2199 * The field p_index in the psm page structure is for large pages support.
2201 2200 * P_index is a bit-vector of the different mapping sizes that a given page
2202 2201 * is part of. An hme structure for a large mapping is only added in the
2203 2202 * group leader page (first page). All pages covered by a given large mapping
2204 2203 * have the corrosponding mapping bit set in their p_index field. This allows
2205 2204 * us to only store an explicit hme structure in the leading page which
2206 2205 * simplifies the mapping link list management. Furthermore, it provides us
2207 2206 * a fast mechanism for determining the largest mapping a page is part of. For
2208 2207 * exmaple, a page with a 64K and a 4M mappings has a p_index value of 0x0A.
2209 2208 *
2210 2209 * Implementation note: even though the first bit in p_index is reserved
2211 2210 * for 8K mappings, it is NOT USED by the code and SHOULD NOT be set.
2212 2211 * In addition, the upper four bits of the p_index field are used by the
2213 2212 * code as temporaries
2214 2213 */
2215 2214
2216 2215 /*
2217 2216 * Defines for psm page struct fields and large page support
2218 2217 */
2219 2218 #define SFMMU_INDEX_SHIFT 6
2220 2219 #define SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)
2221 2220
2222 2221 /* Return the mapping index */
2223 2222 #define PP_MAPINDEX(pp) ((pp)->p_index & SFMMU_INDEX_MASK)
2224 2223
2225 2224 /*
2226 2225 * These macros rely on the following property:
2227 2226 * All pages constituting a large page are covered by a virtually
2228 2227 * contiguous set of page_t's.
2229 2228 */
2230 2229
2231 2230 /* Return the leader for this mapping size */
2232 2231 #define PP_GROUPLEADER(pp, sz) \
2233 2232 (&(pp)[-(int)(pp->p_pagenum & (TTEPAGES(sz)-1))])
2234 2233
2235 2234 /* Return the root page for this page based on p_szc */
2236 2235 #define PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \
2237 2236 PP_GROUPLEADER((pp), (pp)->p_szc))
2238 2237
2239 2238 #define PP_PAGENEXT_N(pp, n) ((pp) + (n))
2240 2239 #define PP_PAGENEXT(pp) PP_PAGENEXT_N((pp), 1)
2241 2240
2242 2241 #define PP_PAGEPREV_N(pp, n) ((pp) - (n))
2243 2242 #define PP_PAGEPREV(pp) PP_PAGEPREV_N((pp), 1)
2244 2243
2245 2244 #define PP_ISMAPPED_LARGE(pp) (PP_MAPINDEX(pp) != 0)
2246 2245
2247 2246 /* Need function to test the page mappping which takes p_index into account */
2248 2247 #define PP_ISMAPPED(pp) ((pp)->p_mapping || PP_ISMAPPED_LARGE(pp))
2249 2248
2250 2249 /*
2251 2250 * Don't call this macro with sz equal to zero. 8K mappings SHOULD NOT
2252 2251 * set p_index field.
2253 2252 */
2254 2253 #define PAGESZ_TO_INDEX(sz) (1 << (sz))
2255 2254
2256 2255
2257 2256 /*
2258 2257 * prototypes for hat assembly routines. Some of these are
2259 2258 * known to machine dependent VM code.
2260 2259 */
2261 2260 extern uint64_t sfmmu_make_tsbtag(caddr_t);
2262 2261 extern struct tsbe *
2263 2262 sfmmu_get_tsbe(uint64_t, caddr_t, int, int);
2264 2263 extern void sfmmu_load_tsbe(struct tsbe *, uint64_t, tte_t *, int);
2265 2264 extern void sfmmu_unload_tsbe(struct tsbe *, uint64_t, int);
2266 2265 extern void sfmmu_load_mmustate(sfmmu_t *);
2267 2266 extern void sfmmu_raise_tsb_exception(uint64_t, uint64_t);
2268 2267 #ifndef sun4v
2269 2268 extern void sfmmu_itlb_ld_kva(caddr_t, tte_t *);
2270 2269 extern void sfmmu_dtlb_ld_kva(caddr_t, tte_t *);
2271 2270 #endif /* sun4v */
2272 2271 extern void sfmmu_copytte(tte_t *, tte_t *);
2273 2272 extern int sfmmu_modifytte(tte_t *, tte_t *, tte_t *);
2274 2273 extern int sfmmu_modifytte_try(tte_t *, tte_t *, tte_t *);
2275 2274 extern pfn_t sfmmu_ttetopfn(tte_t *, caddr_t);
2276 2275 extern uint_t sfmmu_disable_intrs(void);
2277 2276 extern void sfmmu_enable_intrs(uint_t);
2278 2277 /*
2279 2278 * functions exported to machine dependent VM code
2280 2279 */
2281 2280 extern void sfmmu_patch_ktsb(void);
2282 2281 #ifndef UTSB_PHYS
2283 2282 extern void sfmmu_patch_utsb(void);
2284 2283 #endif /* UTSB_PHYS */
2285 2284 extern pfn_t sfmmu_vatopfn(caddr_t, sfmmu_t *, tte_t *);
2286 2285 extern void sfmmu_vatopfn_suspended(caddr_t, sfmmu_t *, tte_t *);
2287 2286 extern pfn_t sfmmu_kvaszc2pfn(caddr_t, int);
2288 2287 #ifdef DEBUG
2289 2288 extern void sfmmu_check_kpfn(pfn_t);
2290 2289 #else
2291 2290 #define sfmmu_check_kpfn(pfn) /* disabled */
2292 2291 #endif /* DEBUG */
2293 2292 extern void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
2294 2293 extern void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, uint_t);
2295 2294 extern void sfmmu_tsbmiss_exception(struct regs *, uintptr_t, uint_t);
2296 2295 extern void sfmmu_init_tsbs(void);
2297 2296 extern caddr_t sfmmu_ktsb_alloc(caddr_t);
2298 2297 extern int sfmmu_getctx_pri(void);
2299 2298 extern int sfmmu_getctx_sec(void);
2300 2299 extern void sfmmu_setctx_sec(uint_t);
2301 2300 extern void sfmmu_inv_tsb(caddr_t, uint_t);
2302 2301 extern void sfmmu_init_ktsbinfo(void);
2303 2302 extern int sfmmu_setup_4lp(void);
2304 2303 extern void sfmmu_patch_mmu_asi(int);
2305 2304 extern void sfmmu_init_nucleus_hblks(caddr_t, size_t, int, int);
2306 2305 extern void sfmmu_cache_flushall(void);
2307 2306 extern pgcnt_t sfmmu_tte_cnt(sfmmu_t *, uint_t);
2308 2307 extern void *sfmmu_tsb_segkmem_alloc(vmem_t *, size_t, int);
2309 2308 extern void sfmmu_tsb_segkmem_free(vmem_t *, void *, size_t);
2310 2309 extern void sfmmu_reprog_pgsz_arr(sfmmu_t *, uint8_t *);
2311 2310
2312 2311 extern void hat_kern_setup(void);
2313 2312 extern int hat_page_relocate(page_t **, page_t **, spgcnt_t *);
2314 2313 extern int sfmmu_get_ppvcolor(struct page *);
2315 2314 extern int sfmmu_get_addrvcolor(caddr_t);
2316 2315 extern int sfmmu_hat_lock_held(sfmmu_t *);
2317 2316 extern int sfmmu_alloc_ctx(sfmmu_t *, int, struct cpu *, int);
2318 2317
2319 2318 /*
2320 2319 * Functions exported to xhat_sfmmu.c
2321 2320 */
2322 2321 extern kmutex_t *sfmmu_mlist_enter(page_t *);
2323 2322 extern void sfmmu_mlist_exit(kmutex_t *);
2324 2323 extern int sfmmu_mlist_held(struct page *);
2325 2324 extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *);
2326 2325
2327 2326 /*
2328 2327 * MMU-specific functions optionally imported from the CPU module
2329 2328 */
2330 2329 #pragma weak mmu_init_scd
2331 2330 #pragma weak mmu_large_pages_disabled
2332 2331 #pragma weak mmu_set_ctx_page_sizes
2333 2332 #pragma weak mmu_check_page_sizes
2334 2333
2335 2334 extern void mmu_init_scd(sf_scd_t *);
2336 2335 extern uint_t mmu_large_pages_disabled(uint_t);
2337 2336 extern void mmu_set_ctx_page_sizes(sfmmu_t *);
2338 2337 extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *);
2339 2338
2340 2339 extern sfmmu_t *ksfmmup;
2341 2340 extern caddr_t ktsb_base;
2342 2341 extern uint64_t ktsb_pbase;
2343 2342 extern int ktsb_sz;
2344 2343 extern int ktsb_szcode;
2345 2344 extern caddr_t ktsb4m_base;
2346 2345 extern uint64_t ktsb4m_pbase;
2347 2346 extern int ktsb4m_sz;
2348 2347 extern int ktsb4m_szcode;
2349 2348 extern uint64_t kpm_tsbbase;
2350 2349 extern int kpm_tsbsz;
2351 2350 extern int ktsb_phys;
2352 2351 extern int enable_bigktsb;
2353 2352 #ifndef sun4v
2354 2353 extern int utsb_dtlb_ttenum;
2355 2354 extern int utsb4m_dtlb_ttenum;
2356 2355 #endif /* sun4v */
2357 2356 extern int uhmehash_num;
2358 2357 extern int khmehash_num;
2359 2358 extern struct hmehash_bucket *uhme_hash;
2360 2359 extern struct hmehash_bucket *khme_hash;
2361 2360 extern uint_t hblk_alloc_dynamic;
2362 2361 extern struct tsbmiss tsbmiss_area[NCPU];
2363 2362 extern struct kpmtsbm kpmtsbm_area[NCPU];
2364 2363
2365 2364 #ifndef sun4v
2366 2365 extern int dtlb_resv_ttenum;
2367 2366 extern caddr_t utsb_vabase;
2368 2367 extern caddr_t utsb4m_vabase;
2369 2368 #endif /* sun4v */
2370 2369 extern vmem_t *kmem_tsb_default_arena[];
2371 2370 extern int tsb_lgrp_affinity;
2372 2371
2373 2372 extern uint_t disable_large_pages;
2374 2373 extern uint_t disable_ism_large_pages;
2375 2374 extern uint_t disable_auto_data_large_pages;
2376 2375 extern uint_t disable_auto_text_large_pages;
2377 2376
2378 2377 /* kpm externals */
2379 2378 extern pfn_t sfmmu_kpm_vatopfn(caddr_t);
2380 2379 extern void sfmmu_kpm_patch_tlbm(void);
2381 2380 extern void sfmmu_kpm_patch_tsbm(void);
2382 2381 extern void sfmmu_patch_shctx(void);
2383 2382 extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int);
2384 2383 extern void sfmmu_kpm_unload_tsb(caddr_t, int);
2385 2384 extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int);
2386 2385 extern int sfmmu_kpm_stsbmtl(uchar_t *, uint_t *, int);
2387 2386 extern caddr_t kpm_vbase;
2388 2387 extern size_t kpm_size;
2389 2388 extern struct memseg *memseg_hash[];
2390 2389 extern uint64_t memseg_phash[];
2391 2390 extern kpm_hlk_t *kpmp_table;
2392 2391 extern kpm_shlk_t *kpmp_stable;
2393 2392 extern uint_t kpmp_table_sz;
2394 2393 extern uint_t kpmp_stable_sz;
2395 2394 extern uchar_t kpmp_shift;
2396 2395
2397 2396 #define PP_ISMAPPED_KPM(pp) ((pp)->p_kpmref > 0)
2398 2397
2399 2398 #define IS_KPM_ALIAS_RANGE(vaddr) \
2400 2399 (((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift > 0)
2401 2400
2402 2401 #endif /* !_ASM */
2403 2402
2404 2403 /* sfmmu_kpm_tsbmtl flags */
2405 2404 #define KPMTSBM_STOP 0
2406 2405 #define KPMTSBM_START 1
2407 2406
2408 2407 /*
2409 2408 * For kpm_smallpages, the state about how a kpm page is mapped and whether
2410 2409 * it is ready to go is indicated by the two 4-bit fields defined in the
2411 2410 * kpm_spage structure as follows:
2412 2411 * kp_mapped_flag bit[0:3] - the page is mapped cacheable or not
2413 2412 * kp_mapped_flag bit[4:7] - the mapping is ready to go or not
2414 2413 * If the bit KPM_MAPPED_GO is on, it indicates that the assembly tsb miss
2415 2414 * handler can drop the mapping in regardless of the caching state of the
2416 2415 * mapping. Otherwise, we will have C handler resolve the VAC conflict no
2417 2416 * matter the page is currently mapped cacheable or non-cacheable.
2418 2417 */
2419 2418 #define KPM_MAPPEDS 0x1 /* small mapping valid, no conflict */
2420 2419 #define KPM_MAPPEDSC 0x2 /* small mapping valid, conflict */
2421 2420 #define KPM_MAPPED_GO 0x10 /* the mapping is ready to go */
2422 2421 #define KPM_MAPPED_MASK 0xf
2423 2422
2424 2423 /* Physical memseg address NULL marker */
2425 2424 #define MSEG_NULLPTR_PA -1
2426 2425
2427 2426 /*
2428 2427 * Memseg hash defines for kpm trap level tsbmiss handler.
2429 2428 * Must be in sync w/ page.h .
2430 2429 */
2431 2430 #define SFMMU_MEM_HASH_SHIFT 0x9
2432 2431 #define SFMMU_N_MEM_SLOTS 0x200
2433 2432 #define SFMMU_MEM_HASH_ENTRY_SHIFT 3
2434 2433
2435 2434 #ifndef _ASM
2436 2435 #if (SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT)
2437 2436 #error SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT
2438 2437 #endif
2439 2438 #if (SFMMU_N_MEM_SLOTS != N_MEM_SLOTS)
2440 2439 #error SFMMU_N_MEM_SLOTS != N_MEM_SLOTS
2441 2440 #endif
2442 2441
2443 2442 /* Physical memseg address NULL marker */
2444 2443 #define SFMMU_MEMSEG_NULLPTR_PA -1
2445 2444
2446 2445 /*
2447 2446 * Check KCONTEXT to be zero, asm parts depend on that assumption.
2448 2447 */
2449 2448 #if (KCONTEXT != 0)
2450 2449 #error KCONTEXT != 0
2451 2450 #endif
2452 2451 #endif /* !_ASM */
2453 2452
2454 2453
2455 2454 #endif /* _KERNEL */
2456 2455
2457 2456 #ifndef _ASM
2458 2457 /*
2459 2458 * ctx, hmeblk, mlistlock and other stats for sfmmu
2460 2459 */
2461 2460 struct sfmmu_global_stat {
2462 2461 int sf_tsb_exceptions; /* # of tsb exceptions */
2463 2462 int sf_tsb_raise_exception; /* # tsb exc. w/o TLB flush */
2464 2463
2465 2464 int sf_pagefaults; /* # of pagefaults */
2466 2465
2467 2466 int sf_uhash_searches; /* # of user hash searches */
2468 2467 int sf_uhash_links; /* # of user hash links */
2469 2468 int sf_khash_searches; /* # of kernel hash searches */
2470 2469 int sf_khash_links; /* # of kernel hash links */
2471 2470
2472 2471 int sf_swapout; /* # times hat swapped out */
2473 2472
2474 2473 int sf_tsb_alloc; /* # TSB allocations */
2475 2474 int sf_tsb_allocfail; /* # times TSB alloc fail */
2476 2475 int sf_tsb_sectsb_create; /* # times second TSB added */
2477 2476
2478 2477 int sf_scd_1sttsb_alloc; /* # SCD 1st TSB allocations */
2479 2478 int sf_scd_2ndtsb_alloc; /* # SCD 2nd TSB allocations */
2480 2479 int sf_scd_1sttsb_allocfail; /* # SCD 1st TSB alloc fail */
2481 2480 int sf_scd_2ndtsb_allocfail; /* # SCD 2nd TSB alloc fail */
2482 2481
2483 2482
2484 2483 int sf_tteload8k; /* calls to sfmmu_tteload */
2485 2484 int sf_tteload64k; /* calls to sfmmu_tteload */
2486 2485 int sf_tteload512k; /* calls to sfmmu_tteload */
2487 2486 int sf_tteload4m; /* calls to sfmmu_tteload */
2488 2487 int sf_tteload32m; /* calls to sfmmu_tteload */
2489 2488 int sf_tteload256m; /* calls to sfmmu_tteload */
2490 2489
2491 2490 int sf_tsb_load8k; /* # times loaded 8K tsbent */
2492 2491 int sf_tsb_load4m; /* # times loaded 4M tsbent */
2493 2492
2494 2493 int sf_hblk_hit; /* found hblk during tteload */
2495 2494 int sf_hblk8_ncreate; /* static hblk8's created */
2496 2495 int sf_hblk8_nalloc; /* static hblk8's allocated */
2497 2496 int sf_hblk1_ncreate; /* static hblk1's created */
2498 2497 int sf_hblk1_nalloc; /* static hblk1's allocated */
2499 2498 int sf_hblk_slab_cnt; /* sfmmu8_cache slab creates */
2500 2499 int sf_hblk_reserve_cnt; /* hblk_reserve usage */
2501 2500 int sf_hblk_recurse_cnt; /* hblk_reserve owner reqs */
2502 2501 int sf_hblk_reserve_hit; /* hblk_reserve hash hits */
2503 2502 int sf_get_free_success; /* reserve list allocs */
2504 2503 int sf_get_free_throttle; /* fails due to throttling */
2505 2504 int sf_get_free_fail; /* fails due to empty list */
2506 2505 int sf_put_free_success; /* reserve list frees */
2507 2506 int sf_put_free_fail; /* fails due to full list */
2508 2507
2509 2508 int sf_pgcolor_conflict; /* VAC conflict resolution */
2510 2509 int sf_uncache_conflict; /* VAC conflict resolution */
2511 2510 int sf_unload_conflict; /* VAC unload resolution */
2512 2511 int sf_ism_uncache; /* VAC conflict resolution */
2513 2512 int sf_ism_recache; /* VAC conflict resolution */
2514 2513 int sf_recache; /* VAC conflict resolution */
2515 2514
2516 2515 int sf_steal_count; /* # of hblks stolen */
2517 2516
2518 2517 int sf_pagesync; /* # of pagesyncs */
2519 2518 int sf_clrwrt; /* # of clear write perms */
2520 2519 int sf_pagesync_invalid; /* pagesync with inv tte */
2521 2520
2522 2521 int sf_kernel_xcalls; /* # of kernel cross calls */
2523 2522 int sf_user_xcalls; /* # of user cross calls */
2524 2523
2525 2524 int sf_tsb_grow; /* # of user tsb grows */
2526 2525 int sf_tsb_shrink; /* # of user tsb shrinks */
2527 2526 int sf_tsb_resize_failures; /* # of user tsb resize */
2528 2527 int sf_tsb_reloc; /* # of user tsb relocations */
2529 2528
2530 2529 int sf_user_vtop; /* # of user vatopfn calls */
2531 2530
2532 2531 int sf_ctx_inv; /* #times invalidate MMU ctx */
2533 2532
2534 2533 int sf_tlb_reprog_pgsz; /* # times switch TLB pgsz */
2535 2534
2536 2535 int sf_region_remap_demap; /* # times shme remap demap */
2537 2536
2538 2537 int sf_create_scd; /* # times SCD is created */
2539 2538 int sf_join_scd; /* # process joined scd */
2540 2539 int sf_leave_scd; /* # process left scd */
2541 2540 int sf_destroy_scd; /* # times SCD is destroyed */
2542 2541 };
2543 2542
2544 2543 struct sfmmu_tsbsize_stat {
2545 2544 int sf_tsbsz_8k;
2546 2545 int sf_tsbsz_16k;
2547 2546 int sf_tsbsz_32k;
2548 2547 int sf_tsbsz_64k;
2549 2548 int sf_tsbsz_128k;
2550 2549 int sf_tsbsz_256k;
2551 2550 int sf_tsbsz_512k;
2552 2551 int sf_tsbsz_1m;
2553 2552 int sf_tsbsz_2m;
2554 2553 int sf_tsbsz_4m;
2555 2554 int sf_tsbsz_8m;
2556 2555 int sf_tsbsz_16m;
2557 2556 int sf_tsbsz_32m;
2558 2557 int sf_tsbsz_64m;
2559 2558 int sf_tsbsz_128m;
2560 2559 int sf_tsbsz_256m;
2561 2560 };
2562 2561
2563 2562 struct sfmmu_percpu_stat {
2564 2563 int sf_itlb_misses; /* # of itlb misses */
2565 2564 int sf_dtlb_misses; /* # of dtlb misses */
2566 2565 int sf_utsb_misses; /* # of user tsb misses */
2567 2566 int sf_ktsb_misses; /* # of kernel tsb misses */
2568 2567 int sf_tsb_hits; /* # of tsb hits */
2569 2568 int sf_umod_faults; /* # of mod (prot viol) flts */
2570 2569 int sf_kmod_faults; /* # of mod (prot viol) flts */
2571 2570 };
2572 2571
2573 2572 #define SFMMU_STAT(stat) sfmmu_global_stat.stat++
2574 2573 #define SFMMU_STAT_ADD(stat, amount) sfmmu_global_stat.stat += (amount)
2575 2574 #define SFMMU_STAT_SET(stat, count) sfmmu_global_stat.stat = (count)
2576 2575
2577 2576 #define SFMMU_MMU_STAT(stat) { \
2578 2577 mmu_ctx_t *ctx = CPU->cpu_m.cpu_mmu_ctxp; \
2579 2578 if (ctx) \
2580 2579 ctx->stat++; \
2581 2580 }
2582 2581
2583 2582 #endif /* !_ASM */
2584 2583
2585 2584 #ifdef __cplusplus
2586 2585 }
2587 2586 #endif
2588 2587
2589 2588 #endif /* _VM_HAT_SFMMU_H */
↓ open down ↓ |
2211 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX