Print this page
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1987, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * VM - Hardware Address Translation management.
27 27 *
28 28 * This file describes the contents of the sun-reference-mmu(sfmmu)-
29 29 * specific hat data structures and the sfmmu-specific hat procedures.
30 30 * The machine-independent interface is described in <vm/hat.h>.
31 31 */
32 32
33 33 #ifndef _VM_HAT_SFMMU_H
34 34 #define _VM_HAT_SFMMU_H
35 35
36 36 #ifdef __cplusplus
37 37 extern "C" {
38 38 #endif
39 39
40 40 #ifndef _ASM
41 41
42 42 #include <sys/types.h>
43 43
44 44 #endif /* _ASM */
45 45
46 46 #ifdef _KERNEL
47 47
48 48 #include <sys/pte.h>
49 49 #include <vm/mach_sfmmu.h>
50 50 #include <sys/mmu.h>
51 51
52 52 /*
53 53 * Don't alter these without considering changes to ism_map_t.
54 54 */
55 55 #define DEFAULT_ISM_PAGESIZE MMU_PAGESIZE4M
56 56 #define DEFAULT_ISM_PAGESZC TTE4M
57 57 #define ISM_PG_SIZE(ism_vbshift) (1 << ism_vbshift)
58 58 #define ISM_SZ_MASK(ism_vbshift) (ISM_PG_SIZE(ism_vbshift) - 1)
59 59 #define ISM_MAP_SLOTS 8 /* Change this carefully. */
60 60
61 61 #ifndef _ASM
62 62
63 63 #include <sys/t_lock.h>
64 64 #include <vm/hat.h>
65 65 #include <vm/seg.h>
66 66 #include <sys/machparam.h>
67 67 #include <sys/systm.h>
68 68 #include <sys/x_call.h>
69 69 #include <vm/page.h>
70 70 #include <sys/ksynch.h>
71 71
72 72 typedef struct hat sfmmu_t;
73 73 typedef struct sf_scd sf_scd_t;
74 74
75 75 /*
76 76 * SFMMU attributes for hat_memload/hat_devload
77 77 */
78 78 #define SFMMU_UNCACHEPTTE 0x01000000 /* unencache in physical $ */
79 79 #define SFMMU_UNCACHEVTTE 0x02000000 /* unencache in virtual $ */
80 80 #define SFMMU_SIDEFFECT 0x04000000 /* set side effect bit */
81 81 #define SFMMU_LOAD_ALLATTR (HAT_PROT_MASK | HAT_ORDER_MASK | \
82 82 HAT_ENDIAN_MASK | HAT_NOFAULT | HAT_NOSYNC | \
83 83 SFMMU_UNCACHEPTTE | SFMMU_UNCACHEVTTE | SFMMU_SIDEFFECT)
84 84
85 85
86 86 /*
87 87 * sfmmu flags for hat_memload/hat_devload
88 88 */
89 89 #define SFMMU_NO_TSBLOAD 0x08000000 /* do not preload tsb */
90 90 #define SFMMU_LOAD_ALLFLAG (HAT_LOAD | HAT_LOAD_LOCK | \
91 91 HAT_LOAD_ADV | HAT_LOAD_CONTIG | HAT_LOAD_NOCONSIST | \
92 92 HAT_LOAD_SHARE | HAT_LOAD_REMAP | SFMMU_NO_TSBLOAD | \
93 93 HAT_RELOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_TEXT)
94 94
95 95 /*
96 96 * sfmmu internal flag to hat_pageunload that spares locked mappings
97 97 */
98 98 #define SFMMU_KERNEL_RELOC 0x8000
99 99
100 100 /*
101 101 * mode for sfmmu_chgattr
102 102 */
103 103 #define SFMMU_SETATTR 0x0
104 104 #define SFMMU_CLRATTR 0x1
105 105 #define SFMMU_CHGATTR 0x2
106 106
107 107 /*
108 108 * sfmmu specific flags for page_t
109 109 */
110 110 #define P_PNC 0x8 /* non-caching is permanent bit */
111 111 #define P_TNC 0x10 /* non-caching is temporary bit */
112 112 #define P_KPMS 0x20 /* kpm mapped small (vac alias prevention) */
113 113 #define P_KPMC 0x40 /* kpm conflict page (vac alias prevention) */
114 114
115 115 #define PP_GENERIC_ATTR(pp) ((pp)->p_nrm & (P_MOD | P_REF | P_RO))
116 116 #define PP_ISMOD(pp) ((pp)->p_nrm & P_MOD)
117 117 #define PP_ISREF(pp) ((pp)->p_nrm & P_REF)
118 118 #define PP_ISRO(pp) ((pp)->p_nrm & P_RO)
119 119 #define PP_ISNC(pp) ((pp)->p_nrm & (P_PNC|P_TNC))
120 120 #define PP_ISPNC(pp) ((pp)->p_nrm & P_PNC)
121 121 #ifdef VAC
122 122 #define PP_ISTNC(pp) ((pp)->p_nrm & P_TNC)
123 123 #endif
124 124 #define PP_ISKPMS(pp) ((pp)->p_nrm & P_KPMS)
125 125 #define PP_ISKPMC(pp) ((pp)->p_nrm & P_KPMC)
126 126
127 127 #define PP_SETMOD(pp) ((pp)->p_nrm |= P_MOD)
128 128 #define PP_SETREF(pp) ((pp)->p_nrm |= P_REF)
129 129 #define PP_SETREFMOD(pp) ((pp)->p_nrm |= (P_REF|P_MOD))
130 130 #define PP_SETRO(pp) ((pp)->p_nrm |= P_RO)
131 131 #define PP_SETREFRO(pp) ((pp)->p_nrm |= (P_REF|P_RO))
132 132 #define PP_SETPNC(pp) ((pp)->p_nrm |= P_PNC)
133 133 #ifdef VAC
134 134 #define PP_SETTNC(pp) ((pp)->p_nrm |= P_TNC)
135 135 #endif
136 136 #define PP_SETKPMS(pp) ((pp)->p_nrm |= P_KPMS)
137 137 #define PP_SETKPMC(pp) ((pp)->p_nrm |= P_KPMC)
138 138
139 139 #define PP_CLRMOD(pp) ((pp)->p_nrm &= ~P_MOD)
140 140 #define PP_CLRREF(pp) ((pp)->p_nrm &= ~P_REF)
141 141 #define PP_CLRREFMOD(pp) ((pp)->p_nrm &= ~(P_REF|P_MOD))
142 142 #define PP_CLRRO(pp) ((pp)->p_nrm &= ~P_RO)
143 143 #define PP_CLRPNC(pp) ((pp)->p_nrm &= ~P_PNC)
144 144 #ifdef VAC
145 145 #define PP_CLRTNC(pp) ((pp)->p_nrm &= ~P_TNC)
146 146 #endif
147 147 #define PP_CLRKPMS(pp) ((pp)->p_nrm &= ~P_KPMS)
148 148 #define PP_CLRKPMC(pp) ((pp)->p_nrm &= ~P_KPMC)
149 149
150 150 /*
151 151 * All shared memory segments attached with the SHM_SHARE_MMU flag (ISM)
152 152 * will be constrained to a 4M, 32M or 256M alignment. Also since every newly-
153 153 * created ISM segment is created out of a new address space at base va
154 154 * of 0 we don't need to store it.
155 155 */
156 156 #define ISM_ALIGN(shift) (1 << shift) /* base va aligned to <n>M */
157 157 #define ISM_ALIGNED(shift, va) (((uintptr_t)va & (ISM_ALIGN(shift) - 1)) == 0)
158 158 #define ISM_SHIFT(shift, x) ((uintptr_t)x >> (shift))
159 159
160 160 /*
161 161 * Pad locks out to cache sub-block boundaries to prevent
162 162 * false sharing, so several processes don't contend for
163 163 * the same line if they aren't using the same lock. Since
164 164 * this is a typedef we also have a bit of freedom in
165 165 * changing lock implementations later if we decide it
166 166 * is necessary.
167 167 */
168 168 typedef struct hat_lock {
169 169 kmutex_t hl_mutex;
170 170 uchar_t hl_pad[64 - sizeof (kmutex_t)];
171 171 } hatlock_t;
172 172
173 173 #define HATLOCK_MUTEXP(hatlockp) (&((hatlockp)->hl_mutex))
174 174
175 175 /*
176 176 * All segments mapped with ISM are guaranteed to be 4M, 32M or 256M aligned.
177 177 * Also size is guaranteed to be in 4M, 32M or 256M chunks.
178 178 * ism_seg consists of the following members:
179 179 * [XX..22] base address of ism segment. XX is 63 or 31 depending whether
180 180 * caddr_t is 64 bits or 32 bits.
181 181 * [21..0] size of segment.
182 182 *
183 183 * NOTE: Don't alter this structure without changing defines above and
184 184 * the tsb_miss and protection handlers.
185 185 */
186 186 typedef struct ism_map {
187 187 uintptr_t imap_seg; /* base va + sz of ISM segment */
188 188 uchar_t imap_vb_shift; /* mmu_pageshift for ism page size */
189 189 uchar_t imap_rid; /* region id for ism */
190 190 ushort_t imap_hatflags; /* primary ism page size */
191 191 uint_t imap_sz_mask; /* mmu_pagemask for ism page size */
192 192 sfmmu_t *imap_ismhat; /* hat id of dummy ISM as */
193 193 struct ism_ment *imap_ment; /* pointer to mapping list entry */
194 194 } ism_map_t;
195 195
196 196 #define ism_start(map) ((caddr_t)((map).imap_seg & \
197 197 ~ISM_SZ_MASK((map).imap_vb_shift)))
198 198 #define ism_size(map) ((map).imap_seg & ISM_SZ_MASK((map).imap_vb_shift))
199 199 #define ism_end(map) ((caddr_t)(ism_start(map) + (ism_size(map) * \
200 200 ISM_PG_SIZE((map).imap_vb_shift))))
201 201 /*
202 202 * ISM mapping entry. Used to link all hat's sharing a ism_hat.
203 203 * Same function as the p_mapping list for a page.
204 204 */
205 205 typedef struct ism_ment {
206 206 sfmmu_t *iment_hat; /* back pointer to hat_share() hat */
207 207 caddr_t iment_base_va; /* hat's va base for this ism seg */
208 208 struct ism_ment *iment_next; /* next ism map entry */
209 209 struct ism_ment *iment_prev; /* prev ism map entry */
210 210 } ism_ment_t;
211 211
212 212 /*
213 213 * ISM segment block. One will be hung off the sfmmu structure if a
214 214 * a process uses ISM. More will be linked using ismblk_next if more
215 215 * than ISM_MAP_SLOTS segments are attached to this proc.
216 216 *
217 217 * All modifications to fields in this structure will be protected
218 218 * by the hat mutex. In order to avoid grabbing this lock in low level
219 219 * routines (tsb miss/protection handlers and vatopfn) while not
220 220 * introducing any race conditions with hat_unshare, we will set
221 221 * CTX_ISM_BUSY bit in the ctx struct. Any mmu traps that occur
222 222 * for this ctx while this bit is set will be handled in sfmmu_tsb_excption
223 223 * where it will synchronize behind the hat mutex.
224 224 */
225 225 typedef struct ism_blk {
226 226 ism_map_t iblk_maps[ISM_MAP_SLOTS];
227 227 struct ism_blk *iblk_next;
228 228 uint64_t iblk_nextpa;
229 229 } ism_blk_t;
230 230
231 231 /*
232 232 * TSB access information. All fields are protected by the process's
233 233 * hat lock.
234 234 */
235 235
236 236 struct tsb_info {
237 237 caddr_t tsb_va; /* tsb base virtual address */
238 238 uint64_t tsb_pa; /* tsb base physical address */
239 239 struct tsb_info *tsb_next; /* next tsb used by this process */
240 240 uint16_t tsb_szc; /* tsb size code */
241 241 uint16_t tsb_flags; /* flags for this tsb; see below */
242 242 uint_t tsb_ttesz_mask; /* page size masks; see below */
243 243
244 244 tte_t tsb_tte; /* tte to lock into DTLB */
245 245 sfmmu_t *tsb_sfmmu; /* sfmmu */
246 246 kmem_cache_t *tsb_cache; /* cache from which mem allocated */
247 247 vmem_t *tsb_vmp; /* vmem arena from which mem alloc'd */
248 248 };
249 249
250 250 /*
251 251 * Values for "tsb_ttesz_mask" bitmask.
252 252 */
253 253 #define TSB8K (1 << TTE8K)
254 254 #define TSB64K (1 << TTE64K)
255 255 #define TSB512K (1 << TTE512K)
256 256 #define TSB4M (1 << TTE4M)
257 257 #define TSB32M (1 << TTE32M)
258 258 #define TSB256M (1 << TTE256M)
259 259
260 260 /*
261 261 * Values for "tsb_flags" field.
262 262 */
263 263 #define TSB_RELOC_FLAG 0x1
264 264 #define TSB_FLUSH_NEEDED 0x2
265 265 #define TSB_SWAPPED 0x4
266 266 #define TSB_SHAREDCTX 0x8
267 267
268 268 #endif /* !_ASM */
269 269
270 270 /*
271 271 * Data structures for shared hmeblk support.
272 272 */
273 273
274 274 /*
275 275 * Do not increase the maximum number of ism/hme regions without checking first
276 276 * the impact on ism_map_t, TSB miss area, hblk tag and region id type in
277 277 * sf_region structure.
278 278 * Initially, shared hmes will only be used for the main text segment
279 279 * therefore this value will be set to 64, it will be increased when shared
280 280 * libraries are included.
281 281 */
282 282
283 283 #define SFMMU_MAX_HME_REGIONS (64)
284 284 #define SFMMU_HMERGNMAP_WORDS BT_BITOUL(SFMMU_MAX_HME_REGIONS)
285 285
286 286 #define SFMMU_PRIVATE 0
287 287 #define SFMMU_SHARED 1
288 288
289 289 #define HMEBLK_ENDPA 1
290 290
291 291 #ifndef _ASM
292 292
293 293 #define SFMMU_MAX_ISM_REGIONS (64)
294 294 #define SFMMU_ISMRGNMAP_WORDS BT_BITOUL(SFMMU_MAX_ISM_REGIONS)
295 295
296 296 #define SFMMU_RGNMAP_WORDS (SFMMU_HMERGNMAP_WORDS + SFMMU_ISMRGNMAP_WORDS)
297 297
298 298 #define SFMMU_MAX_REGION_BUCKETS (128)
299 299 #define SFMMU_MAX_SRD_BUCKETS (2048)
300 300
301 301 typedef struct sf_hmeregion_map {
302 302 ulong_t bitmap[SFMMU_HMERGNMAP_WORDS];
303 303 } sf_hmeregion_map_t;
304 304
305 305 typedef struct sf_ismregion_map {
306 306 ulong_t bitmap[SFMMU_ISMRGNMAP_WORDS];
307 307 } sf_ismregion_map_t;
308 308
309 309 typedef union sf_region_map_u {
310 310 struct _h_rmap_s {
311 311 sf_hmeregion_map_t hmeregion_map;
312 312 sf_ismregion_map_t ismregion_map;
313 313 } h_rmap_s;
314 314 ulong_t bitmap[SFMMU_RGNMAP_WORDS];
315 315 } sf_region_map_t;
316 316
317 317 #define SF_RGNMAP_ZERO(map) { \
318 318 int _i; \
319 319 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
320 320 (map).bitmap[_i] = 0; \
321 321 } \
322 322 }
323 323
324 324 /*
325 325 * Returns 1 if map1 and map2 are equal.
326 326 */
327 327 #define SF_RGNMAP_EQUAL(map1, map2, rval) { \
328 328 int _i; \
329 329 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
330 330 if ((map1)->bitmap[_i] != (map2)->bitmap[_i]) \
331 331 break; \
332 332 } \
333 333 if (_i < SFMMU_RGNMAP_WORDS) \
334 334 rval = 0; \
335 335 else \
336 336 rval = 1; \
337 337 }
338 338
339 339 #define SF_RGNMAP_ADD(map, r) BT_SET((map).bitmap, r)
340 340 #define SF_RGNMAP_DEL(map, r) BT_CLEAR((map).bitmap, r)
341 341 #define SF_RGNMAP_TEST(map, r) BT_TEST((map).bitmap, r)
342 342
343 343 /*
344 344 * Tests whether map2 is a subset of map1, returns 1 if
345 345 * this assertion is true.
346 346 */
347 347 #define SF_RGNMAP_IS_SUBSET(map1, map2, rval) { \
348 348 int _i; \
349 349 for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
350 350 if (((map1)->bitmap[_i] & (map2)->bitmap[_i]) \
351 351 != (map2)->bitmap[_i]) { \
352 352 break; \
353 353 } \
354 354 } \
355 355 if (_i < SFMMU_RGNMAP_WORDS) \
356 356 rval = 0; \
357 357 else \
358 358 rval = 1; \
359 359 }
360 360
361 361 #define SF_SCD_INCR_REF(scdp) { \
362 362 atomic_inc_32((volatile uint32_t *)&(scdp)->scd_refcnt); \
363 363 }
364 364
365 365 #define SF_SCD_DECR_REF(srdp, scdp) { \
366 366 sf_region_map_t _scd_rmap = (scdp)->scd_region_map; \
367 367 if (!atomic_dec_32_nv((volatile uint32_t *)&(scdp)->scd_refcnt)) {\
368 368 sfmmu_destroy_scd((srdp), (scdp), &_scd_rmap); \
369 369 } \
370 370 }
371 371
372 372 /*
373 373 * A sfmmup link in the link list of sfmmups that share the same region.
374 374 */
375 375 typedef struct sf_rgn_link {
376 376 sfmmu_t *next;
377 377 sfmmu_t *prev;
378 378 } sf_rgn_link_t;
379 379
380 380 /*
381 381 * rgn_flags values.
382 382 */
383 383 #define SFMMU_REGION_HME 0x1
384 384 #define SFMMU_REGION_ISM 0x2
385 385 #define SFMMU_REGION_FREE 0x8
386 386
387 387 #define SFMMU_REGION_TYPE_MASK (0x3)
388 388
389 389 /*
390 390 * sf_region defines a text or (D)ISM segment which map
391 391 * the same underlying physical object.
392 392 */
393 393 typedef struct sf_region {
394 394 caddr_t rgn_saddr; /* base addr of attached seg */
395 395 size_t rgn_size; /* size of attached seg */
396 396 void *rgn_obj; /* the underlying object id */
397 397 u_offset_t rgn_objoff; /* offset in the object mapped */
398 398 uchar_t rgn_perm; /* PROT_READ/WRITE/EXEC */
399 399 uchar_t rgn_pgszc; /* page size of the region */
400 400 uchar_t rgn_flags; /* region type, free flag */
401 401 uchar_t rgn_id;
402 402 int rgn_refcnt; /* # of hats sharing the region */
403 403 /* callback function for hat_unload_callback */
404 404 hat_rgn_cb_func_t rgn_cb_function;
405 405 struct sf_region *rgn_hash; /* hash chain linking the rgns */
406 406 kmutex_t rgn_mutex; /* protect region sfmmu list */
407 407 /* A link list of processes attached to this region */
408 408 sfmmu_t *rgn_sfmmu_head;
409 409 ulong_t rgn_ttecnt[MMU_PAGE_SIZES];
410 410 uint16_t rgn_hmeflags; /* rgn tte size flags */
411 411 } sf_region_t;
412 412
413 413 #define rgn_next rgn_hash
414 414
415 415 /* srd */
416 416 typedef struct sf_shared_region_domain {
417 417 vnode_t *srd_evp; /* executable vnode */
418 418 /* hme region table */
419 419 sf_region_t *srd_hmergnp[SFMMU_MAX_HME_REGIONS];
420 420 /* ism region table */
421 421 sf_region_t *srd_ismrgnp[SFMMU_MAX_ISM_REGIONS];
422 422 /* hash chain linking srds */
423 423 struct sf_shared_region_domain *srd_hash;
424 424 /* pointer to the next free hme region */
425 425 sf_region_t *srd_hmergnfree;
426 426 /* pointer to the next free ism region */
427 427 sf_region_t *srd_ismrgnfree;
428 428 /* id of next ism region created */
429 429 uint16_t srd_next_ismrid;
430 430 /* id of next hme region created */
431 431 uint16_t srd_next_hmerid;
432 432 uint16_t srd_ismbusyrgns; /* # of ism rgns in use */
433 433 uint16_t srd_hmebusyrgns; /* # of hme rgns in use */
434 434 int srd_refcnt; /* # of procs in the srd */
435 435 kmutex_t srd_mutex; /* sync add/remove rgns */
436 436 kmutex_t srd_scd_mutex;
437 437 sf_scd_t *srd_scdp; /* list of scds in srd */
438 438 /* hash of regions associated with the same executable */
439 439 sf_region_t *srd_rgnhash[SFMMU_MAX_REGION_BUCKETS];
440 440 } sf_srd_t;
441 441
442 442 typedef struct sf_srd_bucket {
443 443 kmutex_t srdb_lock;
444 444 sf_srd_t *srdb_srdp;
445 445 } sf_srd_bucket_t;
446 446
447 447 /*
448 448 * The value of SFMMU_L1_HMERLINKS and SFMMU_L2_HMERLINKS will be increased
449 449 * to 16 when the use of shared hmes for shared libraries is enabled.
450 450 */
451 451
452 452 #define SFMMU_L1_HMERLINKS (8)
453 453 #define SFMMU_L2_HMERLINKS (8)
454 454 #define SFMMU_L1_HMERLINKS_SHIFT (3)
455 455 #define SFMMU_L1_HMERLINKS_MASK (SFMMU_L1_HMERLINKS - 1)
456 456 #define SFMMU_L2_HMERLINKS_MASK (SFMMU_L2_HMERLINKS - 1)
457 457 #define SFMMU_L1_HMERLINKS_SIZE \
458 458 (SFMMU_L1_HMERLINKS * sizeof (sf_rgn_link_t *))
459 459 #define SFMMU_L2_HMERLINKS_SIZE \
460 460 (SFMMU_L2_HMERLINKS * sizeof (sf_rgn_link_t))
461 461
462 462 #if (SFMMU_L1_HMERLINKS * SFMMU_L2_HMERLINKS < SFMMU_MAX_HME_REGIONS)
463 463 #error Not Enough HMERLINKS
464 464 #endif
465 465
466 466 /*
467 467 * This macro grabs hat lock and allocates level 2 hat chain
468 468 * associated with a shme rgn. In the majority of cases, the macro
469 469 * is called with alloc = 0, and lock = 0.
470 470 * A pointer to the level 2 sf_rgn_link_t structure is returned in the lnkp
471 471 * parameter.
472 472 */
473 473 #define SFMMU_HMERID2RLINKP(sfmmup, rid, lnkp, alloc, lock) \
474 474 { \
475 475 int _l1ix = ((rid) >> SFMMU_L1_HMERLINKS_SHIFT) & \
476 476 SFMMU_L1_HMERLINKS_MASK; \
477 477 int _l2ix = ((rid) & SFMMU_L2_HMERLINKS_MASK); \
478 478 hatlock_t *_hatlockp; \
479 479 lnkp = (sfmmup)->sfmmu_hmeregion_links[_l1ix]; \
480 480 if (lnkp != NULL) { \
481 481 lnkp = &lnkp[_l2ix]; \
482 482 } else if (alloc && lock) { \
483 483 lnkp = kmem_zalloc(SFMMU_L2_HMERLINKS_SIZE, KM_SLEEP); \
484 484 _hatlockp = sfmmu_hat_enter(sfmmup); \
485 485 if ((sfmmup)->sfmmu_hmeregion_links[_l1ix] != NULL) { \
486 486 sfmmu_hat_exit(_hatlockp); \
487 487 kmem_free(lnkp, SFMMU_L2_HMERLINKS_SIZE); \
488 488 lnkp = (sfmmup)->sfmmu_hmeregion_links[_l1ix]; \
489 489 ASSERT(lnkp != NULL); \
490 490 } else { \
491 491 (sfmmup)->sfmmu_hmeregion_links[_l1ix] = lnkp; \
492 492 sfmmu_hat_exit(_hatlockp); \
493 493 } \
494 494 lnkp = &lnkp[_l2ix]; \
495 495 } else if (alloc) { \
496 496 lnkp = kmem_zalloc(SFMMU_L2_HMERLINKS_SIZE, KM_SLEEP); \
497 497 ASSERT((sfmmup)->sfmmu_hmeregion_links[_l1ix] == NULL); \
498 498 (sfmmup)->sfmmu_hmeregion_links[_l1ix] = lnkp; \
499 499 lnkp = &lnkp[_l2ix]; \
500 500 } \
501 501 }
502 502
503 503 /*
504 504 * Per cpu pending freelist of hmeblks.
505 505 */
506 506 typedef struct cpu_hme_pend {
507 507 struct hme_blk *chp_listp;
508 508 kmutex_t chp_mutex;
509 509 time_t chp_timestamp;
510 510 uint_t chp_count;
511 511 uint8_t chp_pad[36]; /* pad to 64 bytes */
512 512 } cpu_hme_pend_t;
513 513
514 514 /*
515 515 * The default value of the threshold for the per cpu pending queues of hmeblks.
516 516 * The queues are flushed if either the number of hmeblks on the queue is above
517 517 * the threshold, or one second has elapsed since the last flush.
518 518 */
519 519 #define CPU_HME_PEND_THRESH 1000
520 520
521 521 /*
522 522 * Per-MMU context domain kstats.
523 523 *
524 524 * TSB Miss Exceptions
525 525 * Number of times a TSB miss exception is handled in an MMU. See
526 526 * sfmmu_tsbmiss_exception() for more details.
527 527 * TSB Raise Exception
528 528 * Number of times the CPUs within an MMU are cross-called
529 529 * to invalidate either a specific process context (when the process
530 530 * switches MMU contexts) or the context of any process that is
531 531 * running on those CPUs (as part of the MMU context wrap-around).
532 532 * Wrap Around
533 533 * The number of times a wrap-around of MMU context happens.
534 534 */
535 535 typedef enum mmu_ctx_stat_types {
536 536 MMU_CTX_TSB_EXCEPTIONS, /* TSB miss exceptions handled */
537 537 MMU_CTX_TSB_RAISE_EXCEPTION, /* ctx invalidation cross calls */
538 538 MMU_CTX_WRAP_AROUND, /* wraparounds */
539 539 MMU_CTX_NUM_STATS
540 540 } mmu_ctx_stat_t;
541 541
542 542 /*
543 543 * Per-MMU context domain structure. This is instantiated the first time a CPU
544 544 * belonging to the MMU context domain is configured into the system, at boot
545 545 * time or at DR time.
546 546 *
547 547 * mmu_gnum
548 548 * The current generation number for the context IDs on this MMU context
549 549 * domain. It is protected by mmu_lock.
550 550 * mmu_cnum
551 551 * The current cnum to be allocated on this MMU context domain. It
552 552 * is protected via CAS.
553 553 * mmu_nctxs
554 554 * The max number of context IDs supported on every CPU in this
555 555 * MMU context domain. This is needed here in case the system supports
556 556 * mixed type of processors/MMUs. It also helps to make ctx switch code
557 557 * access fewer cache lines i.e. no need to retrieve it from some global
558 558 * nctxs.
559 559 * mmu_lock
560 560 * The mutex spin lock used to serialize context ID wrap around
561 561 * mmu_idx
562 562 * The index for this MMU context domain structure in the global array
563 563 * mmu_ctxdoms.
564 564 * mmu_ncpus
565 565 * The actual number of CPUs that have been configured in this
566 566 * MMU context domain. This also acts as a reference count for the
567 567 * structure. When the last CPU in an MMU context domain is unconfigured,
568 568 * the structure is freed. It is protected by mmu_lock.
569 569 * mmu_cpuset
570 570 * The CPU set of configured CPUs for this MMU context domain. Used
571 571 * to cross-call all the CPUs in the MMU context domain to invalidate
572 572 * context IDs during a wraparound operation. It is protected by mmu_lock.
573 573 */
574 574
575 575 typedef struct mmu_ctx {
576 576 uint64_t mmu_gnum;
577 577 uint_t mmu_cnum;
578 578 uint_t mmu_nctxs;
579 579 kmutex_t mmu_lock;
580 580 uint_t mmu_idx;
581 581 uint_t mmu_ncpus;
582 582 cpuset_t mmu_cpuset;
583 583 kstat_t *mmu_kstat;
584 584 kstat_named_t mmu_kstat_data[MMU_CTX_NUM_STATS];
585 585 } mmu_ctx_t;
586 586
587 587 #define mmu_tsb_exceptions \
588 588 mmu_kstat_data[MMU_CTX_TSB_EXCEPTIONS].value.ui64
589 589 #define mmu_tsb_raise_exception \
590 590 mmu_kstat_data[MMU_CTX_TSB_RAISE_EXCEPTION].value.ui64
591 591 #define mmu_wrap_around \
592 592 mmu_kstat_data[MMU_CTX_WRAP_AROUND].value.ui64
593 593
594 594 extern uint_t max_mmu_ctxdoms;
595 595 extern mmu_ctx_t **mmu_ctxs_tbl;
596 596
597 597 extern void sfmmu_cpu_init(cpu_t *);
598 598 extern void sfmmu_cpu_cleanup(cpu_t *);
599 599
600 600 extern uint_t sfmmu_ctxdom_nctxs(int);
601 601
602 602 #ifdef sun4v
603 603 extern void sfmmu_ctxdoms_remove(void);
604 604 extern void sfmmu_ctxdoms_lock(void);
605 605 extern void sfmmu_ctxdoms_unlock(void);
606 606 extern void sfmmu_ctxdoms_update(void);
607 607 #endif
608 608
609 609 /*
610 610 * The following structure is used to get MMU context domain information for
611 611 * a CPU from the platform.
612 612 *
613 613 * mmu_idx
614 614 * The MMU context domain index within the global array mmu_ctxs
615 615 * mmu_nctxs
616 616 * The number of context IDs supported in the MMU context domain
617 617 */
618 618 typedef struct mmu_ctx_info {
619 619 uint_t mmu_idx;
620 620 uint_t mmu_nctxs;
621 621 } mmu_ctx_info_t;
622 622
623 623 #pragma weak plat_cpuid_to_mmu_ctx_info
624 624
625 625 extern void plat_cpuid_to_mmu_ctx_info(processorid_t, mmu_ctx_info_t *);
626 626
627 627 /*
628 628 * Each address space has an array of sfmmu_ctx_t structures, one structure
629 629 * per MMU context domain.
630 630 *
631 631 * cnum
632 632 * The context ID allocated for an address space on an MMU context domain
633 633 * gnum
634 634 * The generation number for the context ID in the MMU context domain.
635 635 *
636 636 * This structure needs to be a power-of-two in size.
637 637 */
638 638 typedef struct sfmmu_ctx {
639 639 uint64_t gnum:48;
640 640 uint64_t cnum:16;
641 641 } sfmmu_ctx_t;
642 642
643 643
644 644 /*
645 645 * The platform dependent hat structure.
646 646 * tte counts should be protected by cas.
647 647 * cpuset is protected by cas.
648 648 *
649 649 * ttecnt accounting for mappings which do not use shared hme is carried out
650 650 * during pagefault handling. In the shared hme case, only the first process
651 651 * to access a mapping generates a pagefault, subsequent processes simply
↓ open down ↓ |
651 lines elided |
↑ open up ↑ |
652 652 * find the shared hme entry during trap handling and therefore there is no
653 653 * corresponding event to initiate ttecnt accounting. Currently, as shared
654 654 * hmes are only used for text segments, when joining a region we assume the
655 655 * worst case and add the the number of ttes required to map the entire region
656 656 * to the ttecnt corresponding to the region pagesize. However, if the region
657 657 * has a 4M pagesize, and memory is low, the allocation of 4M pages may fail
658 658 * then 8K pages will be allocated instead and the first TSB which stores 8K
659 659 * mappings will potentially be undersized. To compensate for the potential
660 660 * underaccounting in this case we always add 1/4 of the region size to the 8K
661 661 * ttecnt.
662 - *
663 - * Note that sfmmu_xhat_provider MUST be the first element.
664 662 */
665 663
666 664 struct hat {
667 - void *sfmmu_xhat_provider; /* NULL for CPU hat */
668 665 cpuset_t sfmmu_cpusran; /* cpu bit mask for efficient xcalls */
669 666 struct as *sfmmu_as; /* as this hat provides mapping for */
670 667 /* per pgsz private ttecnt + shme rgns ttecnt for rgns not in SCD */
671 668 ulong_t sfmmu_ttecnt[MMU_PAGE_SIZES];
672 669 /* shme rgns ttecnt for rgns in SCD */
673 670 ulong_t sfmmu_scdrttecnt[MMU_PAGE_SIZES];
674 671 /* est. ism ttes that are NOT in a SCD */
675 672 ulong_t sfmmu_ismttecnt[MMU_PAGE_SIZES];
676 673 /* ttecnt for isms that are in a SCD */
677 674 ulong_t sfmmu_scdismttecnt[MMU_PAGE_SIZES];
678 675 /* inflate tsb0 to allow for large page alloc failure in region */
679 676 ulong_t sfmmu_tsb0_4minflcnt;
680 677 union _h_un {
681 678 ism_blk_t *sfmmu_iblkp; /* maps to ismhat(s) */
682 679 ism_ment_t *sfmmu_imentp; /* ism hat's mapping list */
683 680 } h_un;
684 681 uint_t sfmmu_free:1; /* hat to be freed - set on as_free */
685 682 uint_t sfmmu_ismhat:1; /* hat is dummy ism hatid */
686 683 uint_t sfmmu_scdhat:1; /* hat is dummy scd hatid */
687 684 uchar_t sfmmu_rmstat; /* refmod stats refcnt */
688 685 ushort_t sfmmu_clrstart; /* start color bin for page coloring */
689 686 ushort_t sfmmu_clrbin; /* per as phys page coloring bin */
690 687 ushort_t sfmmu_flags; /* flags */
691 688 uchar_t sfmmu_tteflags; /* pgsz flags */
692 689 uchar_t sfmmu_rtteflags; /* pgsz flags for SRD hmes */
693 690 struct tsb_info *sfmmu_tsb; /* list of per as tsbs */
694 691 uint64_t sfmmu_ismblkpa; /* pa of sfmmu_iblkp, or -1 */
695 692 lock_t sfmmu_ctx_lock; /* sync ctx alloc and invalidation */
696 693 kcondvar_t sfmmu_tsb_cv; /* signals TSB swapin or relocation */
697 694 uchar_t sfmmu_cext; /* context page size encoding */
698 695 uint8_t sfmmu_pgsz[MMU_PAGE_SIZES]; /* ranking for MMU */
699 696 sf_srd_t *sfmmu_srdp;
700 697 sf_scd_t *sfmmu_scdp; /* scd this address space belongs to */
701 698 sf_region_map_t sfmmu_region_map;
702 699 sf_rgn_link_t *sfmmu_hmeregion_links[SFMMU_L1_HMERLINKS];
703 700 sf_rgn_link_t sfmmu_scd_link; /* link to scd or pending queue */
704 701 #ifdef sun4v
705 702 struct hv_tsb_block sfmmu_hvblock;
706 703 #endif
707 704 /*
708 705 * sfmmu_ctxs is a variable length array of max_mmu_ctxdoms # of
709 706 * elements. max_mmu_ctxdoms is determined at run-time.
710 707 * sfmmu_ctxs[1] is just the fist element of an array, it always
711 708 * has to be the last field to ensure that the memory allocated
712 709 * for sfmmu_ctxs is consecutive with the memory of the rest of
713 710 * the hat data structure.
714 711 */
715 712 sfmmu_ctx_t sfmmu_ctxs[1];
716 713
717 714 };
718 715
719 716 #define sfmmu_iblk h_un.sfmmu_iblkp
720 717 #define sfmmu_iment h_un.sfmmu_imentp
721 718
722 719 #define sfmmu_hmeregion_map sfmmu_region_map.h_rmap_s.hmeregion_map
723 720 #define sfmmu_ismregion_map sfmmu_region_map.h_rmap_s.ismregion_map
724 721
725 722 #define SF_RGNMAP_ISNULL(sfmmup) \
726 723 (sfrgnmap_isnull(&(sfmmup)->sfmmu_region_map))
727 724 #define SF_HMERGNMAP_ISNULL(sfmmup) \
728 725 (sfhmergnmap_isnull(&(sfmmup)->sfmmu_hmeregion_map))
729 726
730 727 struct sf_scd {
731 728 sfmmu_t *scd_sfmmup; /* shared context hat */
732 729 /* per pgsz ttecnt for shme rgns in SCD */
733 730 ulong_t scd_rttecnt[MMU_PAGE_SIZES];
734 731 uint_t scd_refcnt; /* address spaces attached to scd */
735 732 sf_region_map_t scd_region_map; /* bit mask of attached segments */
736 733 sf_scd_t *scd_next; /* link pointers for srd_scd list */
737 734 sf_scd_t *scd_prev;
738 735 sfmmu_t *scd_sf_list; /* list of doubly linked hat structs */
739 736 kmutex_t scd_mutex;
740 737 /*
741 738 * Link used to add an scd to the sfmmu_iment list.
742 739 */
743 740 ism_ment_t scd_ism_links[SFMMU_MAX_ISM_REGIONS];
744 741 };
745 742
746 743 #define scd_hmeregion_map scd_region_map.h_rmap_s.hmeregion_map
747 744 #define scd_ismregion_map scd_region_map.h_rmap_s.ismregion_map
748 745
749 746 extern int disable_shctx;
750 747 extern int shctx_on;
751 748
752 749 /*
753 750 * bit mask for managing vac conflicts on large pages.
754 751 * bit 1 is for uncache flag.
755 752 * bits 2 through min(num of cache colors + 1,31) are
756 753 * for cache colors that have already been flushed.
757 754 */
758 755 #ifdef VAC
759 756 #define CACHE_NUM_COLOR (shm_alignment >> MMU_PAGESHIFT)
760 757 #else
761 758 #define CACHE_NUM_COLOR 1
762 759 #endif
763 760
764 761 #define CACHE_VCOLOR_MASK(vcolor) (2 << (vcolor & (CACHE_NUM_COLOR - 1)))
765 762
766 763 #define CacheColor_IsFlushed(flag, vcolor) \
767 764 ((flag) & CACHE_VCOLOR_MASK(vcolor))
768 765
769 766 #define CacheColor_SetFlushed(flag, vcolor) \
770 767 ((flag) |= CACHE_VCOLOR_MASK(vcolor))
771 768 /*
772 769 * Flags passed to sfmmu_page_cache to flush page from vac or not.
773 770 */
774 771 #define CACHE_FLUSH 0
775 772 #define CACHE_NO_FLUSH 1
776 773
777 774 /*
778 775 * Flags passed to sfmmu_tlbcache_demap
779 776 */
780 777 #define FLUSH_NECESSARY_CPUS 0
781 778 #define FLUSH_ALL_CPUS 1
782 779
783 780 #ifdef DEBUG
784 781 /*
785 782 * For debugging purpose only. Maybe removed later.
786 783 */
787 784 struct ctx_trace {
788 785 sfmmu_t *sc_sfmmu_stolen;
789 786 sfmmu_t *sc_sfmmu_stealing;
790 787 clock_t sc_time;
791 788 ushort_t sc_type;
792 789 ushort_t sc_cnum;
793 790 };
794 791 #define CTX_TRC_STEAL 0x1
795 792 #define CTX_TRC_FREE 0x0
796 793 #define TRSIZE 0x400
797 794 #define NEXT_CTXTR(ptr) (((ptr) >= ctx_trace_last) ? \
798 795 ctx_trace_first : ((ptr) + 1))
799 796 #define TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type) \
800 797 mutex_enter(mutex); \
801 798 (ptr)->sc_sfmmu_stolen = (stolen_sfmmu); \
802 799 (ptr)->sc_sfmmu_stealing = (stealing_sfmmu); \
803 800 (ptr)->sc_cnum = (cnum); \
804 801 (ptr)->sc_type = (type); \
805 802 (ptr)->sc_time = ddi_get_lbolt(); \
806 803 (ptr) = NEXT_CTXTR(ptr); \
807 804 num_ctx_stolen += (type); \
808 805 mutex_exit(mutex);
809 806 #else
810 807
811 808 #define TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type)
812 809
813 810 #endif /* DEBUG */
814 811
815 812 #endif /* !_ASM */
816 813
817 814 /*
818 815 * Macros for sfmmup->sfmmu_flags access. The macros that change the flags
819 816 * ASSERT() that we're holding the HAT lock before changing the flags;
820 817 * however callers that read the flags may do so without acquiring the lock
821 818 * in a fast path, and then recheck the flag after acquiring the lock in
822 819 * a slow path.
823 820 */
824 821 #define SFMMU_FLAGS_ISSET(sfmmup, flags) \
825 822 (((sfmmup)->sfmmu_flags & (flags)) == (flags))
826 823
827 824 #define SFMMU_FLAGS_CLEAR(sfmmup, flags) \
828 825 (ASSERT(sfmmu_hat_lock_held((sfmmup))), \
829 826 (sfmmup)->sfmmu_flags &= ~(flags))
830 827
831 828 #define SFMMU_FLAGS_SET(sfmmup, flags) \
832 829 (ASSERT(sfmmu_hat_lock_held((sfmmup))), \
833 830 (sfmmup)->sfmmu_flags |= (flags))
834 831
835 832 #define SFMMU_TTEFLAGS_ISSET(sfmmup, flags) \
836 833 ((((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) & (flags)) == \
837 834 (flags))
838 835
839 836
840 837 /*
841 838 * sfmmu tte HAT flags, must fit in 8 bits
842 839 */
843 840 #define HAT_CHKCTX1_FLAG 0x1
844 841 #define HAT_64K_FLAG (0x1 << TTE64K)
845 842 #define HAT_512K_FLAG (0x1 << TTE512K)
846 843 #define HAT_4M_FLAG (0x1 << TTE4M)
847 844 #define HAT_32M_FLAG (0x1 << TTE32M)
848 845 #define HAT_256M_FLAG (0x1 << TTE256M)
849 846
850 847 /*
851 848 * sfmmu HAT flags, 16 bits at the moment.
852 849 */
853 850 #define HAT_4MTEXT_FLAG 0x01
854 851 #define HAT_32M_ISM 0x02
855 852 #define HAT_256M_ISM 0x04
856 853 #define HAT_SWAPPED 0x08 /* swapped out */
857 854 #define HAT_SWAPIN 0x10 /* swapping in */
858 855 #define HAT_BUSY 0x20 /* replacing TSB(s) */
859 856 #define HAT_ISMBUSY 0x40 /* adding/removing/traversing ISM maps */
860 857
861 858 #define HAT_CTX1_FLAG 0x100 /* ISM imap hatflag for ctx1 */
862 859 #define HAT_JOIN_SCD 0x200 /* region is joining scd */
863 860 #define HAT_ALLCTX_INVALID 0x400 /* all per-MMU ctxs are invalidated */
864 861
865 862 #define SFMMU_LGPGS_INUSE(sfmmup) \
866 863 (((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) || \
867 864 ((sfmmup)->sfmmu_iblk != NULL))
868 865
869 866 /*
870 867 * Starting with context 0, the first NUM_LOCKED_CTXS contexts
871 868 * are locked so that sfmmu_getctx can't steal any of these
872 869 * contexts. At the time this software was being developed, the
873 870 * only context that needs to be locked is context 0 (the kernel
874 871 * context), and context 1 (reserved for stolen context). So this constant
875 872 * was originally defined to be 2.
876 873 *
877 874 * For sun4v only, USER_CONTEXT_TYPE represents any user context. Many
878 875 * routines only care whether the context is kernel, invalid or user.
879 876 */
880 877
881 878 #define NUM_LOCKED_CTXS 2
882 879 #define INVALID_CONTEXT 1
883 880
884 881 #ifdef sun4v
885 882 #define USER_CONTEXT_TYPE NUM_LOCKED_CTXS
886 883 #endif
887 884 #if defined(sun4v) || defined(UTSB_PHYS)
888 885 /*
889 886 * Get the location in the 4MB base TSB of the tsbe for this fault.
890 887 * Assumes that the second TSB only contains 4M mappings.
891 888 *
892 889 * In:
893 890 * tagacc = tag access register (not clobbered)
894 891 * tsbe = 2nd TSB base register
895 892 * tmp1, tmp2 = scratch registers
896 893 * Out:
897 894 * tsbe = pointer to the tsbe in the 2nd TSB
898 895 */
899 896
900 897 #define GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
901 898 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \
902 899 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \
903 900 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \
904 901 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \
905 902 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \
906 903 srlx tagacc, MMU_PAGESHIFT4M, tmp2; \
907 904 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \
908 905 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \
909 906 add tsbe, tmp1, tsbe /* add entry offset to TSB base */
910 907
911 908 #define GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
912 909 GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)
913 910
914 911 /*
915 912 * Get the location in the 3rd TSB of the tsbe for this fault.
916 913 * The 3rd TSB corresponds to the shared context, and is used
917 914 * for 8K - 512k pages.
918 915 *
919 916 * In:
920 917 * tagacc = tag access register (not clobbered)
921 918 * tsbe, tmp1, tmp2 = scratch registers
922 919 * Out:
923 920 * tsbe = pointer to the tsbe in the 3rd TSB
924 921 */
925 922
926 923 #define GET_3RD_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
927 924 and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \
928 925 andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \
929 926 mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \
930 927 sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \
931 928 sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \
932 929 srlx tagacc, MMU_PAGESHIFT, tmp2; \
933 930 and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \
934 931 sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \
935 932 add tsbe, tmp1, tsbe /* add entry offset to TSB base */
936 933
937 934 #define GET_4TH_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
938 935 GET_4MBASE_TSBE_PTR(tagacc, tsbe, tmp1, tmp2)
939 936 /*
940 937 * Copy the sfmmu_region_map or scd_region_map to the tsbmiss
941 938 * shmermap or scd_shmermap, from sfmmu_load_mmustate.
942 939 */
943 940 #define SET_REGION_MAP(rgn_map, tsbmiss_map, cnt, tmp, label) \
944 941 /* BEGIN CSTYLED */ \
945 942 label: ;\
946 943 ldx [rgn_map], tmp ;\
947 944 dec cnt ;\
948 945 add rgn_map, CLONGSIZE, rgn_map ;\
949 946 stx tmp, [tsbmiss_map] ;\
950 947 brnz,pt cnt, label ;\
951 948 add tsbmiss_map, CLONGSIZE, tsbmiss_map \
952 949 /* END CSTYLED */
953 950
954 951 /*
955 952 * If there is no scd, then zero the tsbmiss scd_shmermap,
956 953 * from sfmmu_load_mmustate.
957 954 */
958 955 #define ZERO_REGION_MAP(tsbmiss_map, cnt, label) \
959 956 /* BEGIN CSTYLED */ \
960 957 label: ;\
961 958 dec cnt ;\
962 959 stx %g0, [tsbmiss_map] ;\
963 960 brnz,pt cnt, label ;\
964 961 add tsbmiss_map, CLONGSIZE, tsbmiss_map
965 962 /* END CSTYLED */
966 963
967 964 /*
968 965 * Set hmemisc to 1 if the shared hme is also part of an scd.
969 966 * In:
970 967 * tsbarea = tsbmiss area (not clobbered)
971 968 * hmeblkpa = hmeblkpa + hmentoff + SFHME_TTE (not clobbered)
972 969 * hmentoff = hmentoff + SFHME_TTE = tte offset(clobbered)
973 970 * Out:
974 971 * use_shctx = 1 if shme is in scd and 0 otherwise
975 972 */
976 973 #define GET_SCDSHMERMAP(tsbarea, hmeblkpa, hmentoff, use_shctx) \
977 974 /* BEGIN CSTYLED */ \
978 975 sub hmeblkpa, hmentoff, hmentoff /* hmentofff = hmeblkpa */ ;\
979 976 add hmentoff, HMEBLK_TAG, hmentoff ;\
980 977 ldxa [hmentoff]ASI_MEM, hmentoff /* read 1st part of tag */ ;\
981 978 and hmentoff, HTAG_RID_MASK, hmentoff /* mask off rid */ ;\
982 979 and hmentoff, BT_ULMASK, use_shctx /* mask bit index */ ;\
983 980 srlx hmentoff, BT_ULSHIFT, hmentoff /* extract word */ ;\
984 981 sllx hmentoff, CLONGSHIFT, hmentoff /* index */ ;\
985 982 add tsbarea, hmentoff, hmentoff /* add to tsbarea */ ;\
986 983 ldx [hmentoff + TSBMISS_SCDSHMERMAP], hmentoff /* scdrgn */ ;\
987 984 srlx hmentoff, use_shctx, use_shctx ;\
988 985 and use_shctx, 0x1, use_shctx \
989 986 /* END CSTYLED */
990 987
991 988 /*
992 989 * Synthesize a TSB base register contents for a process.
993 990 *
994 991 * In:
995 992 * tsbinfo = TSB info pointer (ro)
996 993 * tsbreg, tmp1 = scratch registers
997 994 * Out:
998 995 * tsbreg = value to program into TSB base register
999 996 */
1000 997
1001 998 #define MAKE_UTSBREG(tsbinfo, tsbreg, tmp1) \
1002 999 ldx [tsbinfo + TSBINFO_PADDR], tsbreg; \
1003 1000 lduh [tsbinfo + TSBINFO_SZCODE], tmp1; \
1004 1001 and tmp1, TSB_SOFTSZ_MASK, tmp1; \
1005 1002 or tsbreg, tmp1, tsbreg;
1006 1003
1007 1004
1008 1005 /*
1009 1006 * Load TSB base register to TSBMISS area for privte contexts.
1010 1007 * This register contains utsb_pabase in bits 63:13, and TSB size
1011 1008 * code in bits 2:0.
1012 1009 *
1013 1010 * For private context
1014 1011 * In:
1015 1012 * tsbreg = value to load (ro)
1016 1013 * regnum = constant or register
1017 1014 * tmp1 = scratch register
1018 1015 * Out:
1019 1016 * Specified scratchpad register updated
1020 1017 *
1021 1018 */
1022 1019 #define SET_UTSBREG(regnum, tsbreg, tmp1) \
1023 1020 mov regnum, tmp1; \
1024 1021 stxa tsbreg, [tmp1]ASI_SCRATCHPAD /* save tsbreg */
1025 1022 /*
1026 1023 * Get TSB base register from the scratchpad for private contexts
1027 1024 *
1028 1025 * In:
1029 1026 * regnum = constant or register
1030 1027 * tsbreg = scratch
1031 1028 * Out:
1032 1029 * tsbreg = tsbreg from the specified scratchpad register
1033 1030 */
1034 1031 #define GET_UTSBREG(regnum, tsbreg) \
1035 1032 mov regnum, tsbreg; \
1036 1033 ldxa [tsbreg]ASI_SCRATCHPAD, tsbreg
1037 1034
1038 1035 /*
1039 1036 * Load TSB base register to TSBMISS area for shared contexts.
1040 1037 * This register contains utsb_pabase in bits 63:13, and TSB size
1041 1038 * code in bits 2:0.
1042 1039 *
1043 1040 * In:
1044 1041 * tsbmiss = pointer to tsbmiss area
1045 1042 * tsbmissoffset = offset to right tsb pointer
1046 1043 * tsbreg = value to load (ro)
1047 1044 * Out:
1048 1045 * Specified tsbmiss area updated
1049 1046 *
1050 1047 */
1051 1048 #define SET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg) \
1052 1049 stx tsbreg, [tsbmiss + tsbmissoffset] /* save tsbreg */
1053 1050
1054 1051 /*
1055 1052 * Get TSB base register from the scratchpad for
1056 1053 * shared contexts
1057 1054 *
1058 1055 * In:
1059 1056 * tsbmiss = pointer to tsbmiss area
1060 1057 * tsbmissoffset = offset to right tsb pointer
1061 1058 * tsbreg = scratch
1062 1059 * Out:
1063 1060 * tsbreg = tsbreg from the specified scratchpad register
1064 1061 */
1065 1062 #define GET_UTSBREG_SHCTX(tsbmiss, tsbmissoffset, tsbreg) \
1066 1063 ldx [tsbmiss + tsbmissoffset], tsbreg
1067 1064
1068 1065 #endif /* defined(sun4v) || defined(UTSB_PHYS) */
1069 1066
1070 1067 #ifndef _ASM
1071 1068
1072 1069 /*
1073 1070 * Kernel page relocation stuff.
1074 1071 */
1075 1072 struct sfmmu_callback {
1076 1073 int key;
1077 1074 int (*prehandler)(caddr_t, uint_t, uint_t, void *);
1078 1075 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t);
1079 1076 int (*errhandler)(caddr_t, uint_t, uint_t, void *);
1080 1077 int capture_cpus;
1081 1078 };
1082 1079
1083 1080 extern int sfmmu_max_cb_id;
1084 1081 extern struct sfmmu_callback *sfmmu_cb_table;
1085 1082
1086 1083 struct pa_hment;
1087 1084
1088 1085 /*
1089 1086 * RFE: With multihat gone we gain back an int. We could use this to
1090 1087 * keep ref bits on a per cpu basis to eliminate xcalls.
1091 1088 */
1092 1089 struct sf_hment {
1093 1090 tte_t hme_tte; /* tte for this hment */
1094 1091
1095 1092 union {
1096 1093 struct page *page; /* what page this maps */
1097 1094 struct pa_hment *data; /* pa_hment */
1098 1095 } sf_hment_un;
1099 1096
1100 1097 struct sf_hment *hme_next; /* next hment */
1101 1098 struct sf_hment *hme_prev; /* prev hment */
1102 1099 };
1103 1100
1104 1101 struct pa_hment {
1105 1102 caddr_t addr; /* va */
1106 1103 uint_t len; /* bytes */
1107 1104 ushort_t flags; /* internal flags */
1108 1105 ushort_t refcnt; /* reference count */
1109 1106 id_t cb_id; /* callback id, table index */
1110 1107 void *pvt; /* handler's private data */
1111 1108 struct sf_hment sfment; /* corresponding dummy sf_hment */
1112 1109 };
1113 1110
1114 1111 #define hme_page sf_hment_un.page
1115 1112 #define hme_data sf_hment_un.data
1116 1113 #define hme_size(sfhmep) ((int)(TTE_CSZ(&(sfhmep)->hme_tte)))
1117 1114 #define PAHME_SZ (sizeof (struct pa_hment))
1118 1115 #define SFHME_SZ (sizeof (struct sf_hment))
1119 1116
1120 1117 #define IS_PAHME(hme) ((hme)->hme_tte.ll == 0)
1121 1118
1122 1119 /*
1123 1120 * hmeblk_tag structure
1124 1121 * structure used to obtain a match on a hme_blk. Currently consists of
1125 1122 * the address of the sfmmu struct (or hatid), the base page address of the
1126 1123 * hme_blk, and the rehash count. The rehash count is actually only 2 bits
1127 1124 * and has the following meaning:
1128 1125 * 1 = 8k or 64k hash sequence.
1129 1126 * 2 = 512k hash sequence.
1130 1127 * 3 = 4M hash sequence.
1131 1128 * We require this count because we don't want to get a false hit on a 512K or
1132 1129 * 4M rehash with a base address corresponding to a 8k or 64k hmeblk.
1133 1130 * Note: The ordering and size of the hmeblk_tag members are implictly known
1134 1131 * by the tsb miss handlers written in assembly. Do not change this structure
1135 1132 * without checking those routines. See HTAG_SFMMUPSZ define.
1136 1133 */
1137 1134
1138 1135 /*
1139 1136 * In private hmeblks hblk_rid field must be SFMMU_INVALID_RID.
1140 1137 */
1141 1138 typedef union {
1142 1139 struct {
1143 1140 uint64_t hblk_basepg: 51, /* hme_blk base pg # */
1144 1141 hblk_rehash: 3, /* rehash number */
1145 1142 hblk_rid: 10; /* hme_blk region id */
1146 1143 void *hblk_id;
1147 1144 } hblk_tag_un;
1148 1145 uint64_t htag_tag[2];
1149 1146 } hmeblk_tag;
1150 1147
1151 1148 #define htag_id hblk_tag_un.hblk_id
1152 1149 #define htag_bspage hblk_tag_un.hblk_basepg
1153 1150 #define htag_rehash hblk_tag_un.hblk_rehash
1154 1151 #define htag_rid hblk_tag_un.hblk_rid
1155 1152
1156 1153 #endif /* !_ASM */
1157 1154
1158 1155 #define HTAG_REHASH_SHIFT 10
1159 1156 #define HTAG_MAX_RID (((0x1 << HTAG_REHASH_SHIFT) - 1))
1160 1157 #define HTAG_RID_MASK HTAG_MAX_RID
1161 1158
1162 1159 /* used for tagging all per sfmmu (i.e. non SRD) private hmeblks */
1163 1160 #define SFMMU_INVALID_SHMERID HTAG_MAX_RID
1164 1161
1165 1162 #if SFMMU_INVALID_SHMERID < SFMMU_MAX_HME_REGIONS
1166 1163 #error SFMMU_INVALID_SHMERID < SFMMU_MAX_HME_REGIONS
1167 1164 #endif
1168 1165
1169 1166 #define SFMMU_IS_SHMERID_VALID(rid) ((rid) != SFMMU_INVALID_SHMERID)
1170 1167
1171 1168 /* ISM regions */
1172 1169 #define SFMMU_INVALID_ISMRID 0xff
1173 1170
1174 1171 #if SFMMU_INVALID_ISMRID < SFMMU_MAX_ISM_REGIONS
1175 1172 #error SFMMU_INVALID_ISMRID < SFMMU_MAX_ISM_REGIONS
1176 1173 #endif
1177 1174
1178 1175 #define SFMMU_IS_ISMRID_VALID(rid) ((rid) != SFMMU_INVALID_ISMRID)
1179 1176
1180 1177
1181 1178 #define HTAGS_EQ(tag1, tag2) (((tag1.htag_tag[0] ^ tag2.htag_tag[0]) | \
1182 1179 (tag1.htag_tag[1] ^ tag2.htag_tag[1])) == 0)
1183 1180
1184 1181 /*
1185 1182 * this macro must only be used for comparing tags in shared hmeblks.
1186 1183 */
1187 1184 #define HTAGS_EQ_SHME(hmetag, tag, hrmap) \
1188 1185 (((hmetag).htag_rid != SFMMU_INVALID_SHMERID) && \
1189 1186 (((((hmetag).htag_tag[0] ^ (tag).htag_tag[0]) & \
1190 1187 ~HTAG_RID_MASK) | \
1191 1188 ((hmetag).htag_tag[1] ^ (tag).htag_tag[1])) == 0) && \
1192 1189 SF_RGNMAP_TEST(hrmap, hmetag.htag_rid))
1193 1190
1194 1191 #define HME_REHASH(sfmmup) \
1195 1192 ((sfmmup)->sfmmu_ttecnt[TTE512K] != 0 || \
1196 1193 (sfmmup)->sfmmu_ttecnt[TTE4M] != 0 || \
1197 1194 (sfmmup)->sfmmu_ttecnt[TTE32M] != 0 || \
1198 1195 (sfmmup)->sfmmu_ttecnt[TTE256M] != 0)
1199 1196
1200 1197 #define NHMENTS 8 /* # of hments in an 8k hme_blk */
1201 1198 /* needs to be multiple of 2 */
1202 1199
1203 1200 #ifndef _ASM
1204 1201
1205 1202 #ifdef HBLK_TRACE
1206 1203
1207 1204 #define HBLK_LOCK 1
1208 1205 #define HBLK_UNLOCK 0
1209 1206 #define HBLK_STACK_DEPTH 6
1210 1207 #define HBLK_AUDIT_CACHE_SIZE 16
1211 1208 #define HBLK_LOCK_PATTERN 0xaaaaaaaa
1212 1209 #define HBLK_UNLOCK_PATTERN 0xbbbbbbbb
1213 1210
1214 1211 struct hblk_lockcnt_audit {
1215 1212 int flag; /* lock or unlock */
1216 1213 kthread_id_t thread;
1217 1214 int depth;
1218 1215 pc_t stack[HBLK_STACK_DEPTH];
1219 1216 };
1220 1217
1221 1218 #endif /* HBLK_TRACE */
1222 1219
1223 1220
1224 1221 /*
1225 1222 * Hment block structure.
1226 1223 * The hme_blk is the node data structure which the hash structure
1227 1224 * mantains. An hme_blk can have 2 different sizes depending on the
1228 1225 * number of hments it implicitly contains. When dealing with 64K, 512K,
1229 1226 * or 4M hments there is one hment per hme_blk. When dealing with
1230 1227 * 8k hments we allocate an hme_blk plus an additional 7 hments to
1231 1228 * give us a total of 8 (NHMENTS) hments that can be referenced through a
1232 1229 * hme_blk.
1233 1230 *
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
1234 1231 * The hmeblk structure contains 2 tte reference counters used to determine if
1235 1232 * it is ok to free up the hmeblk. Both counters have to be zero in order
1236 1233 * to be able to free up hmeblk. They are protected by cas.
1237 1234 * hblk_hmecnt is the number of hments present on pp mapping lists.
1238 1235 * hblk_vcnt reflects number of valid ttes in hmeblk.
1239 1236 *
1240 1237 * The hmeblk now also has per tte lock cnts. This is required because
1241 1238 * the counts can be high and there are not enough bits in the tte. When
1242 1239 * physio is fixed to not lock the translations we should be able to move
1243 1240 * the lock cnt back to the tte. See bug id 1198554.
1244 - *
1245 - * Note that xhat_hme_blk's layout follows this structure: hme_blk_misc
1246 - * and sf_hment are at the same offsets in both structures. Whenever
1247 - * hme_blk is changed, xhat_hme_blk may need to be updated as well.
1248 1241 */
1249 1242
1250 1243 struct hme_blk_misc {
1251 - uint_t notused:25;
1244 + uint_t notused:26;
1252 1245 uint_t shared_bit:1; /* set for SRD shared hmeblk */
1253 - uint_t xhat_bit:1; /* set for an xhat hme_blk */
1254 1246 uint_t shadow_bit:1; /* set for a shadow hme_blk */
1255 1247 uint_t nucleus_bit:1; /* set for a nucleus hme_blk */
1256 1248 uint_t ttesize:3; /* contains ttesz of hmeblk */
1257 1249 };
1258 1250
1259 1251 struct hme_blk {
1260 1252 volatile uint64_t hblk_nextpa; /* physical address for hash list */
1261 1253
1262 1254 hmeblk_tag hblk_tag; /* tag used to obtain an hmeblk match */
1263 1255
1264 1256 struct hme_blk *hblk_next; /* on free list or on hash list */
1265 1257 /* protected by hash lock */
1266 1258
1267 1259 struct hme_blk *hblk_shadow; /* pts to shadow hblk */
1268 1260 /* protected by hash lock */
1269 1261 uint_t hblk_span; /* span of memory hmeblk maps */
1270 1262
1271 1263 struct hme_blk_misc hblk_misc;
1272 1264
1273 1265 union {
1274 1266 struct {
1275 1267 ushort_t hblk_hmecount; /* hment on mlists counter */
1276 1268 ushort_t hblk_validcnt; /* valid tte reference count */
1277 1269 } hblk_counts;
1278 1270 uint_t hblk_shadow_mask;
1279 1271 } hblk_un;
1280 1272
1281 1273 uint_t hblk_lckcnt;
1282 1274
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1283 1275 #ifdef HBLK_TRACE
1284 1276 kmutex_t hblk_audit_lock; /* lock to protect index */
1285 1277 uint_t hblk_audit_index; /* index into audit_cache */
1286 1278 struct hblk_lockcnt_audit hblk_audit_cache[HBLK_AUDIT_CACHE_SIZE];
1287 1279 #endif /* HBLK_AUDIT */
1288 1280
1289 1281 struct sf_hment hblk_hme[1]; /* hment array */
1290 1282 };
1291 1283
1292 1284 #define hblk_shared hblk_misc.shared_bit
1293 -#define hblk_xhat_bit hblk_misc.xhat_bit
1294 1285 #define hblk_shw_bit hblk_misc.shadow_bit
1295 1286 #define hblk_nuc_bit hblk_misc.nucleus_bit
1296 1287 #define hblk_ttesz hblk_misc.ttesize
1297 1288 #define hblk_hmecnt hblk_un.hblk_counts.hblk_hmecount
1298 1289 #define hblk_vcnt hblk_un.hblk_counts.hblk_validcnt
1299 1290 #define hblk_shw_mask hblk_un.hblk_shadow_mask
1300 1291
1301 1292 #define MAX_HBLK_LCKCNT 0xFFFFFFFF
1302 1293 #define HMEBLK_ALIGN 0x8 /* hmeblk has to be double aligned */
1303 1294
1304 1295 #ifdef HBLK_TRACE
1305 1296
1306 1297 #define HBLK_STACK_TRACE(hmeblkp, lock) \
1307 1298 { \
1308 1299 int flag = lock; /* to pacify lint */ \
1309 1300 int audit_index; \
1310 1301 \
1311 1302 mutex_enter(&hmeblkp->hblk_audit_lock); \
1312 1303 audit_index = hmeblkp->hblk_audit_index; \
1313 1304 hmeblkp->hblk_audit_index = ((hmeblkp->hblk_audit_index + 1) & \
1314 1305 (HBLK_AUDIT_CACHE_SIZE - 1)); \
1315 1306 mutex_exit(&hmeblkp->hblk_audit_lock); \
1316 1307 \
1317 1308 if (flag) \
1318 1309 hmeblkp->hblk_audit_cache[audit_index].flag = \
1319 1310 HBLK_LOCK_PATTERN; \
1320 1311 else \
1321 1312 hmeblkp->hblk_audit_cache[audit_index].flag = \
1322 1313 HBLK_UNLOCK_PATTERN; \
1323 1314 \
1324 1315 hmeblkp->hblk_audit_cache[audit_index].thread = curthread; \
1325 1316 hmeblkp->hblk_audit_cache[audit_index].depth = \
1326 1317 getpcstack(hmeblkp->hblk_audit_cache[audit_index].stack, \
1327 1318 HBLK_STACK_DEPTH); \
1328 1319 }
1329 1320
1330 1321 #else
1331 1322
1332 1323 #define HBLK_STACK_TRACE(hmeblkp, lock)
1333 1324
1334 1325 #endif /* HBLK_TRACE */
1335 1326
1336 1327 #define HMEHASH_FACTOR 16 /* used to calc # of buckets in hme hash */
1337 1328
1338 1329 /*
1339 1330 * A maximum number of user hmeblks is defined in order to place an upper
1340 1331 * limit on how much nucleus memory is required and to avoid overflowing the
1341 1332 * tsbmiss uhashsz and khashsz data areas. The number below corresponds to
1342 1333 * the number of buckets required, for an average hash chain length of 4 on
1343 1334 * a 16TB machine.
1344 1335 */
1345 1336
1346 1337 #define MAX_UHME_BUCKETS (0x1 << 30)
1347 1338 #define MAX_KHME_BUCKETS (0x1 << 30)
1348 1339
1349 1340 /*
1350 1341 * The minimum number of kernel hash buckets.
1351 1342 */
1352 1343 #define MIN_KHME_BUCKETS 0x800
1353 1344
1354 1345 /*
1355 1346 * The number of hash buckets must be a power of 2. If the initial calculated
1356 1347 * value is less than USER_BUCKETS_THRESHOLD we round up to the next greater
1357 1348 * power of 2, otherwise we round down to avoid huge over allocations.
1358 1349 */
1359 1350 #define USER_BUCKETS_THRESHOLD (1<<22)
1360 1351
1361 1352 #define MAX_NUCUHME_BUCKETS 0x4000
1362 1353 #define MAX_NUCKHME_BUCKETS 0x2000
1363 1354
1364 1355 /*
1365 1356 * There are 2 locks in the hmehash bucket. The hmehash_mutex is
1366 1357 * a regular mutex used to make sure operations on a hash link are only
1367 1358 * done by one thread. Any operation which comes into the hat with
1368 1359 * a <vaddr, as> will grab the hmehash_mutex. Normally one would expect
1369 1360 * the tsb miss handlers to grab the hash lock to make sure the hash list
1370 1361 * is consistent while we traverse it. Unfortunately this can lead to
1371 1362 * deadlocks or recursive mutex enters since it is possible for
1372 1363 * someone holding the lock to take a tlb/tsb miss.
1373 1364 * To solve this problem we have added the hmehash_listlock. This lock
1374 1365 * is only grabbed by the tsb miss handlers, vatopfn, and while
1375 1366 * adding/removing a hmeblk from the hash list. The code is written to
1376 1367 * guarantee we won't take a tlb miss while holding this lock.
1377 1368 */
1378 1369 struct hmehash_bucket {
1379 1370 kmutex_t hmehash_mutex;
1380 1371 volatile uint64_t hmeh_nextpa; /* physical address for hash list */
1381 1372 struct hme_blk *hmeblkp;
1382 1373 uint_t hmeh_listlock;
1383 1374 };
1384 1375
1385 1376 #endif /* !_ASM */
1386 1377
1387 1378 #define SFMMU_PGCNT_MASK 0x3f
1388 1379 #define SFMMU_PGCNT_SHIFT 6
1389 1380 #define INVALID_MMU_ID -1
1390 1381 #define SFMMU_MMU_GNUM_RSHIFT 16
1391 1382 #define SFMMU_MMU_CNUM_LSHIFT (64 - SFMMU_MMU_GNUM_RSHIFT)
1392 1383 #define MAX_SFMMU_CTX_VAL ((1 << 16) - 1) /* for sanity check */
1393 1384 #define MAX_SFMMU_GNUM_VAL ((0x1UL << 48) - 1)
1394 1385
1395 1386 /*
1396 1387 * The tsb miss handlers written in assembly know that sfmmup
1397 1388 * is a 64 bit ptr.
1398 1389 *
1399 1390 * The bspage and re-hash part is 64 bits, with the sfmmup being another 64
1400 1391 * bits.
1401 1392 */
1402 1393 #define HTAG_SFMMUPSZ 0 /* Not really used for LP64 */
1403 1394 #define HTAG_BSPAGE_SHIFT 13
1404 1395
1405 1396 /*
1406 1397 * Assembly routines need to be able to get to ttesz
1407 1398 */
1408 1399 #define HBLK_SZMASK 0x7
1409 1400
1410 1401 #ifndef _ASM
1411 1402
1412 1403 /*
1413 1404 * Returns the number of bytes that an hmeblk spans given its tte size
1414 1405 */
1415 1406 #define get_hblk_span(hmeblkp) ((hmeblkp)->hblk_span)
1416 1407 #define get_hblk_ttesz(hmeblkp) ((hmeblkp)->hblk_ttesz)
1417 1408 #define get_hblk_cache(hmeblkp) (((hmeblkp)->hblk_ttesz == TTE8K) ? \
1418 1409 sfmmu8_cache : sfmmu1_cache)
1419 1410 #define HMEBLK_SPAN(ttesz) \
1420 1411 ((ttesz == TTE8K)? (TTEBYTES(ttesz) * NHMENTS) : TTEBYTES(ttesz))
1421 1412
1422 1413 #define set_hblk_sz(hmeblkp, ttesz) \
1423 1414 (hmeblkp)->hblk_ttesz = (ttesz); \
1424 1415 (hmeblkp)->hblk_span = HMEBLK_SPAN(ttesz)
1425 1416
1426 1417 #define get_hblk_base(hmeblkp) \
1427 1418 ((uintptr_t)(hmeblkp)->hblk_tag.htag_bspage << MMU_PAGESHIFT)
1428 1419
1429 1420 #define get_hblk_endaddr(hmeblkp) \
1430 1421 ((caddr_t)(get_hblk_base(hmeblkp) + get_hblk_span(hmeblkp)))
1431 1422
1432 1423 #define in_hblk_range(hmeblkp, vaddr) \
1433 1424 (((uintptr_t)(vaddr) >= get_hblk_base(hmeblkp)) && \
1434 1425 ((uintptr_t)(vaddr) < (get_hblk_base(hmeblkp) + \
1435 1426 get_hblk_span(hmeblkp))))
1436 1427
1437 1428 #define tte_to_vaddr(hmeblkp, tte) ((caddr_t)(get_hblk_base(hmeblkp) \
1438 1429 + (TTEBYTES(TTE_CSZ(&tte)) * (tte).tte_hmenum)))
1439 1430
1440 1431 #define tte_to_evaddr(hmeblkp, ttep) ((caddr_t)(get_hblk_base(hmeblkp) \
1441 1432 + (TTEBYTES(TTE_CSZ(ttep)) * ((ttep)->tte_hmenum + 1))))
1442 1433
1443 1434 #define vaddr_to_vshift(hblktag, vaddr, shwsz) \
1444 1435 ((((uintptr_t)(vaddr) >> MMU_PAGESHIFT) - (hblktag.htag_bspage)) >>\
1445 1436 TTE_BSZS_SHIFT((shwsz) - 1))
1446 1437
1447 1438 #define HME8BLK_SZ (sizeof (struct hme_blk) + \
1448 1439 (NHMENTS - 1) * sizeof (struct sf_hment))
1449 1440 #define HME1BLK_SZ (sizeof (struct hme_blk))
1450 1441 #define H1MIN (2 + MAX_BIGKTSB_TTES) /* nucleus text+data, ktsb */
1451 1442
1452 1443 /*
1453 1444 * Hme_blk hash structure
1454 1445 * Active mappings are kept in a hash structure of hme_blks. The hash
1455 1446 * function is based on (ctx, vaddr) The size of the hash table size is a
1456 1447 * power of 2 such that the average hash chain lenth is HMENT_HASHAVELEN.
1457 1448 * The hash actually consists of 2 separate hashes. One hash is for the user
1458 1449 * address space and the other hash is for the kernel address space.
1459 1450 * The number of buckets are calculated at boot time and stored in the global
1460 1451 * variables "uhmehash_num" and "khmehash_num". By making the hash table size
1461 1452 * a power of 2 we can use a simply & function to derive an index instead of
1462 1453 * a divide.
1463 1454 *
1464 1455 * HME_HASH_FUNCTION(hatid, vaddr, shift) returns a pointer to a hme_hash
1465 1456 * bucket.
1466 1457 * An hme hash bucket contains a pointer to an hme_blk and the mutex that
1467 1458 * protects the link list.
1468 1459 * Spitfire supports 4 page sizes. 8k and 64K pages only need one hash.
1469 1460 * 512K pages need 2 hashes and 4M pages need 3 hashes.
1470 1461 * The 'shift' parameter controls how many bits the vaddr will be shifted in
1471 1462 * the hash function. It is calculated in the HME_HASH_SHIFT(ttesz) function
1472 1463 * and it varies depending on the page size as follows:
1473 1464 * 8k pages: HBLK_RANGE_SHIFT
1474 1465 * 64k pages: MMU_PAGESHIFT64K
1475 1466 * 512K pages: MMU_PAGESHIFT512K
1476 1467 * 4M pages: MMU_PAGESHIFT4M
1477 1468 * An assembly version of the hash function exists in sfmmu_ktsb_miss(). All
1478 1469 * changes should be reflected in both versions. This function and the TSB
1479 1470 * miss handlers are the only places which know about the two hashes.
1480 1471 *
1481 1472 * HBLK_RANGE_SHIFT controls range of virtual addresses that will fall
1482 1473 * into the same bucket for a particular process. It is currently set to
1483 1474 * be equivalent to 64K range or one hme_blk.
1484 1475 *
1485 1476 * The hme_blks in the hash are protected by a per hash bucket mutex
1486 1477 * known as SFMMU_HASH_LOCK.
1487 1478 * You need to acquire this lock before traversing the hash bucket link
1488 1479 * list, while adding/removing a hme_blk to the list, and while
1489 1480 * modifying an hme_blk. A possible optimization is to replace these
1490 1481 * mutexes by readers/writer lock but right now it is not clear whether
1491 1482 * this is a win or not.
1492 1483 *
1493 1484 * The HME_HASH_TABLE_SEARCH will search the hash table for the
1494 1485 * hme_blk that contains the hment that corresponds to the passed
1495 1486 * ctx and vaddr. It assumed the SFMMU_HASH_LOCK is held.
1496 1487 */
1497 1488
1498 1489 #endif /* ! _ASM */
1499 1490
1500 1491 #define KHATID ksfmmup
1501 1492 #define UHMEHASH_SZ uhmehash_num
1502 1493 #define KHMEHASH_SZ khmehash_num
1503 1494 #define HMENT_HASHAVELEN 4
1504 1495 #define HBLK_RANGE_SHIFT MMU_PAGESHIFT64K /* shift for HBLK_BS_MASK */
1505 1496 #define HBLK_MIN_TTESZ 1
1506 1497 #define HBLK_MIN_BYTES MMU_PAGESIZE64K
1507 1498 #define HBLK_MIN_SHIFT MMU_PAGESHIFT64K
1508 1499 #define MAX_HASHCNT 5
1509 1500 #define DEFAULT_MAX_HASHCNT 3
1510 1501
1511 1502 #ifndef _ASM
1512 1503
1513 1504 #define HASHADDR_MASK(hashno) TTE_PAGEMASK(hashno)
1514 1505
1515 1506 #define HME_HASH_SHIFT(ttesz) \
1516 1507 ((ttesz == TTE8K)? HBLK_RANGE_SHIFT : TTE_PAGE_SHIFT(ttesz))
1517 1508
1518 1509 #define HME_HASH_ADDR(vaddr, hmeshift) \
1519 1510 ((caddr_t)(((uintptr_t)(vaddr) >> (hmeshift)) << (hmeshift)))
1520 1511
1521 1512 #define HME_HASH_BSPAGE(vaddr, hmeshift) \
1522 1513 (((uintptr_t)(vaddr) >> (hmeshift)) << ((hmeshift) - MMU_PAGESHIFT))
1523 1514
1524 1515 #define HME_HASH_REHASH(ttesz) \
1525 1516 (((ttesz) < TTE512K)? 1 : (ttesz))
1526 1517
1527 1518 #define HME_HASH_FUNCTION(hatid, vaddr, shift) \
1528 1519 ((((void *)hatid) != ((void *)KHATID)) ? \
1529 1520 (&uhme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
1530 1521 UHMEHASH_SZ) ]): \
1531 1522 (&khme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
1532 1523 KHMEHASH_SZ) ]))
1533 1524
1534 1525 /*
1535 1526 * This macro will traverse a hmeblk hash link list looking for an hme_blk
1536 1527 * that owns the specified vaddr and hatid. If if doesn't find one , hmeblkp
1537 1528 * will be set to NULL, otherwise it will point to the correct hme_blk.
1538 1529 * This macro also cleans empty hblks.
1539 1530 */
1540 1531 #define HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, pr_hblk, listp) \
1541 1532 { \
1542 1533 struct hme_blk *nx_hblk; \
1543 1534 \
1544 1535 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); \
1545 1536 hblkp = hmebp->hmeblkp; \
1546 1537 pr_hblk = NULL; \
1547 1538 while (hblkp) { \
1548 1539 if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) { \
1549 1540 /* found hme_blk */ \
1550 1541 break; \
1551 1542 } \
1552 1543 nx_hblk = hblkp->hblk_next; \
1553 1544 if (!hblkp->hblk_vcnt && !hblkp->hblk_hmecnt) { \
1554 1545 sfmmu_hblk_hash_rm(hmebp, hblkp, pr_hblk, \
1555 1546 listp, 0); \
1556 1547 } else { \
1557 1548 pr_hblk = hblkp; \
1558 1549 } \
1559 1550 hblkp = nx_hblk; \
1560 1551 } \
1561 1552 }
1562 1553
1563 1554 #define HME_HASH_SEARCH(hmebp, hblktag, hblkp, listp) \
1564 1555 { \
1565 1556 struct hme_blk *pr_hblk; \
1566 1557 \
1567 1558 HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, pr_hblk, listp); \
1568 1559 }
1569 1560
1570 1561 /*
1571 1562 * This macro will traverse a hmeblk hash link list looking for an hme_blk
1572 1563 * that owns the specified vaddr and hatid. If if doesn't find one , hmeblkp
1573 1564 * will be set to NULL, otherwise it will point to the correct hme_blk.
1574 1565 * It doesn't remove empty hblks.
1575 1566 */
1576 1567 #define HME_HASH_FAST_SEARCH(hmebp, hblktag, hblkp) \
1577 1568 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); \
1578 1569 for (hblkp = hmebp->hmeblkp; hblkp; \
1579 1570 hblkp = hblkp->hblk_next) { \
1580 1571 if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) { \
1581 1572 /* found hme_blk */ \
1582 1573 break; \
1583 1574 } \
1584 1575 }
1585 1576
1586 1577 #define SFMMU_HASH_LOCK(hmebp) \
1587 1578 (mutex_enter(&hmebp->hmehash_mutex))
1588 1579
1589 1580 #define SFMMU_HASH_UNLOCK(hmebp) \
1590 1581 (mutex_exit(&hmebp->hmehash_mutex))
1591 1582
1592 1583 #define SFMMU_HASH_LOCK_TRYENTER(hmebp) \
1593 1584 (mutex_tryenter(&hmebp->hmehash_mutex))
1594 1585
1595 1586 #define SFMMU_HASH_LOCK_ISHELD(hmebp) \
1596 1587 (mutex_owned(&hmebp->hmehash_mutex))
1597 1588
1598 1589 #define SFMMU_XCALL_STATS(sfmmup) \
1599 1590 { \
1600 1591 if (sfmmup == ksfmmup) { \
1601 1592 SFMMU_STAT(sf_kernel_xcalls); \
1602 1593 } else { \
1603 1594 SFMMU_STAT(sf_user_xcalls); \
1604 1595 } \
1605 1596 }
1606 1597
1607 1598 #define astosfmmu(as) ((as)->a_hat)
1608 1599 #define hblktosfmmu(hmeblkp) ((sfmmu_t *)(hmeblkp)->hblk_tag.htag_id)
1609 1600 #define hblktosrd(hmeblkp) ((sf_srd_t *)(hmeblkp)->hblk_tag.htag_id)
1610 1601 #define sfmmutoas(sfmmup) ((sfmmup)->sfmmu_as)
1611 1602
1612 1603 #define sfmmutohtagid(sfmmup, rid) \
1613 1604 (((rid) == SFMMU_INVALID_SHMERID) ? (void *)(sfmmup) : \
1614 1605 (void *)((sfmmup)->sfmmu_srdp))
1615 1606
1616 1607 /*
1617 1608 * We use the sfmmu data structure to keep the per as page coloring info.
1618 1609 */
1619 1610 #define as_color_bin(as) (astosfmmu(as)->sfmmu_clrbin)
1620 1611 #define as_color_start(as) (astosfmmu(as)->sfmmu_clrstart)
1621 1612
1622 1613 typedef struct {
1623 1614 char h8[HME8BLK_SZ];
1624 1615 } hblk8_t;
1625 1616
1626 1617 typedef struct {
1627 1618 char h1[HME1BLK_SZ];
1628 1619 } hblk1_t;
1629 1620
1630 1621 typedef struct {
1631 1622 ulong_t index;
1632 1623 ulong_t len;
1633 1624 hblk8_t *list;
1634 1625 } nucleus_hblk8_info_t;
1635 1626
1636 1627 typedef struct {
1637 1628 ulong_t index;
1638 1629 ulong_t len;
1639 1630 hblk1_t *list;
1640 1631 } nucleus_hblk1_info_t;
1641 1632
1642 1633 /*
1643 1634 * This struct is used for accumlating information about a range
1644 1635 * of pages that are unloading so that a single xcall can flush
1645 1636 * the entire range from remote tlbs. A function that must demap
1646 1637 * a range of virtual addresses declares one of these structures
1647 1638 * and initializes using DEMP_RANGE_INIT(). It then passes a pointer to this
1648 1639 * struct to the appropriate sfmmu_hblk_* level function which does
1649 1640 * all the bookkeeping using the other macros. When the function has
1650 1641 * finished the virtual address range, it needs to call DEMAP_RANGE_FLUSH()
1651 1642 * macro to take care of any remaining unflushed mappings.
1652 1643 *
1653 1644 * The maximum range this struct can represent is the number of bits
1654 1645 * in the dmr_bitvec field times the pagesize in dmr_pgsz. Currently, only
1655 1646 * MMU_PAGESIZE pages are supported.
1656 1647 *
1657 1648 * Since there are now cases where it's no longer necessary to do
1658 1649 * flushes (e.g. when the process isn't runnable because it's swapping
1659 1650 * out or exiting) we allow these macros to take a NULL dmr input and do
1660 1651 * nothing in that case.
1661 1652 */
1662 1653 typedef struct {
1663 1654 sfmmu_t *dmr_sfmmup; /* relevant hat */
1664 1655 caddr_t dmr_addr; /* beginning address */
1665 1656 caddr_t dmr_endaddr; /* ending address */
1666 1657 ulong_t dmr_bitvec; /* valid pages found */
1667 1658 ulong_t dmr_bit; /* next page to examine */
1668 1659 ulong_t dmr_maxbit; /* highest page in range */
1669 1660 ulong_t dmr_pgsz; /* page size in range */
1670 1661 } demap_range_t;
1671 1662
1672 1663 #define DMR_MAXBIT ((ulong_t)1<<63) /* dmr_bit high bit */
1673 1664
1674 1665 #define DEMAP_RANGE_INIT(sfmmup, dmrp) \
1675 1666 (dmrp)->dmr_sfmmup = (sfmmup); \
1676 1667 (dmrp)->dmr_bitvec = 0; \
1677 1668 (dmrp)->dmr_maxbit = sfmmu_dmr_maxbit; \
1678 1669 (dmrp)->dmr_pgsz = MMU_PAGESIZE;
1679 1670
1680 1671 #define DEMAP_RANGE_PGSZ(dmrp) ((dmrp)? (dmrp)->dmr_pgsz : MMU_PAGESIZE)
1681 1672
1682 1673 #define DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr) \
1683 1674 if ((dmrp) != NULL) { \
1684 1675 if ((dmrp)->dmr_bitvec != 0 && (dmrp)->dmr_endaddr != (addr)) \
1685 1676 sfmmu_tlb_range_demap(dmrp); \
1686 1677 (dmrp)->dmr_endaddr = (endaddr); \
1687 1678 }
1688 1679
1689 1680 #define DEMAP_RANGE_FLUSH(dmrp) \
1690 1681 if ((dmrp)->dmr_bitvec != 0) \
1691 1682 sfmmu_tlb_range_demap(dmrp);
1692 1683
1693 1684
1694 1685 #define DEMAP_RANGE_MARKPG(dmrp, addr) \
1695 1686 if ((dmrp) != NULL) { \
1696 1687 if ((dmrp)->dmr_bitvec == 0) { \
1697 1688 (dmrp)->dmr_addr = (addr); \
1698 1689 (dmrp)->dmr_bit = 1; \
1699 1690 } \
1700 1691 (dmrp)->dmr_bitvec |= (dmrp)->dmr_bit; \
1701 1692 }
1702 1693
1703 1694 #define DEMAP_RANGE_NEXTPG(dmrp) \
1704 1695 if ((dmrp) != NULL && (dmrp)->dmr_bitvec != 0) { \
1705 1696 if ((dmrp)->dmr_bit & (dmrp)->dmr_maxbit) { \
1706 1697 sfmmu_tlb_range_demap(dmrp); \
1707 1698 } else { \
1708 1699 (dmrp)->dmr_bit <<= 1; \
1709 1700 } \
1710 1701 }
1711 1702
1712 1703 /*
1713 1704 * TSB related structures
1714 1705 *
1715 1706 * The TSB is made up of tte entries. Both the tag and data are present
1716 1707 * in the TSB. The TSB locking is managed as follows:
1717 1708 * A software bit in the tsb tag is used to indicate that entry is locked.
1718 1709 * If a cpu servicing a tsb miss reads a locked entry the tag compare will
1719 1710 * fail forcing the cpu to go to the hat hash for the translation.
1720 1711 * The cpu who holds the lock can then modify the data side, and the tag side.
1721 1712 * The last write should be to the word containing the lock bit which will
1722 1713 * clear the lock and allow the tsb entry to be read. It is assumed that all
1723 1714 * cpus reading the tsb will do so with atomic 128-bit loads. An atomic 128
1724 1715 * bit load is required to prevent the following from happening:
1725 1716 *
1726 1717 * cpu 0 cpu 1 comments
1727 1718 *
1728 1719 * ldx tag tag unlocked
1729 1720 * ldstub lock set lock
1730 1721 * stx data
1731 1722 * stx tag unlock
1732 1723 * ldx tag incorrect tte!!!
1733 1724 *
1734 1725 * The software also maintains a bit in the tag to indicate an invalid
1735 1726 * tsb entry. The purpose of this bit is to allow the tsb invalidate code
1736 1727 * to invalidate a tsb entry with a single cas. See code for details.
1737 1728 */
1738 1729
1739 1730 union tsb_tag {
1740 1731 struct {
1741 1732 uint32_t tag_res0:16; /* reserved - context area */
1742 1733 uint32_t tag_inv:1; /* sw - invalid tsb entry */
1743 1734 uint32_t tag_lock:1; /* sw - locked tsb entry */
1744 1735 uint32_t tag_res1:4; /* reserved */
1745 1736 uint32_t tag_va_hi:10; /* va[63:54] */
1746 1737 uint32_t tag_va_lo; /* va[53:22] */
1747 1738 } tagbits;
1748 1739 struct tsb_tagints {
1749 1740 uint32_t inthi;
1750 1741 uint32_t intlo;
1751 1742 } tagints;
1752 1743 };
1753 1744 #define tag_invalid tagbits.tag_inv
1754 1745 #define tag_locked tagbits.tag_lock
1755 1746 #define tag_vahi tagbits.tag_va_hi
1756 1747 #define tag_valo tagbits.tag_va_lo
1757 1748 #define tag_inthi tagints.inthi
1758 1749 #define tag_intlo tagints.intlo
1759 1750
1760 1751 struct tsbe {
1761 1752 union tsb_tag tte_tag;
1762 1753 tte_t tte_data;
1763 1754 };
1764 1755
1765 1756 /*
1766 1757 * A per cpu struct is kept that duplicates some info
1767 1758 * used by the tl>0 tsb miss handlers plus it provides
1768 1759 * a scratch area. Its purpose is to minimize cache misses
1769 1760 * in the tsb miss handler and is 128 bytes (2 e$ lines).
1770 1761 *
1771 1762 * There should be one allocated per cpu in nucleus memory
1772 1763 * and should be aligned on an ecache line boundary.
1773 1764 */
1774 1765 struct tsbmiss {
1775 1766 sfmmu_t *ksfmmup; /* kernel hat id */
1776 1767 sfmmu_t *usfmmup; /* user hat id */
1777 1768 sf_srd_t *usrdp; /* user's SRD hat id */
1778 1769 struct tsbe *tsbptr; /* hardware computed ptr */
1779 1770 struct tsbe *tsbptr4m; /* hardware computed ptr */
1780 1771 struct tsbe *tsbscdptr; /* hardware computed ptr */
1781 1772 struct tsbe *tsbscdptr4m; /* hardware computed ptr */
1782 1773 uint64_t ismblkpa;
1783 1774 struct hmehash_bucket *khashstart;
1784 1775 struct hmehash_bucket *uhashstart;
1785 1776 uint_t khashsz;
1786 1777 uint_t uhashsz;
1787 1778 uint16_t dcache_line_mask; /* used to flush dcache */
1788 1779 uchar_t uhat_tteflags; /* private page sizes */
1789 1780 uchar_t uhat_rtteflags; /* SHME pagesizes */
1790 1781 uint32_t utsb_misses;
1791 1782 uint32_t ktsb_misses;
1792 1783 uint16_t uprot_traps;
1793 1784 uint16_t kprot_traps;
1794 1785 /*
1795 1786 * scratch[0] -> TSB_TAGACC
1796 1787 * scratch[1] -> TSBMISS_HMEBP
1797 1788 * scratch[2] -> TSBMISS_HATID
1798 1789 */
1799 1790 uintptr_t scratch[3];
1800 1791 ulong_t shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */
1801 1792 ulong_t scd_shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */
1802 1793 uint8_t pad[48]; /* pad to 64 bytes */
1803 1794 };
1804 1795
1805 1796 /*
1806 1797 * A per cpu struct is kept for the use within the tl>0 kpm tsb
1807 1798 * miss handler. Some members are duplicates of common data or
1808 1799 * the physical addresses of common data. A few members are also
1809 1800 * written by the tl>0 kpm tsb miss handler. Its purpose is to
1810 1801 * minimize cache misses in the kpm tsb miss handler and occupies
1811 1802 * one ecache line. There should be one allocated per cpu in
1812 1803 * nucleus memory and it should be aligned on an ecache line
1813 1804 * boundary. It is not merged w/ struct tsbmiss since there is
1814 1805 * not much to share and the tsbmiss pathes are different, so
1815 1806 * a kpm tlbmiss/tsbmiss only touches one cacheline, except for
1816 1807 * (DEBUG || SFMMU_STAT_GATHER) where the dtlb_misses counter
1817 1808 * of struct tsbmiss is used on every dtlb miss.
1818 1809 */
1819 1810 struct kpmtsbm {
1820 1811 caddr_t vbase; /* start of address kpm range */
1821 1812 caddr_t vend; /* end of address kpm range */
1822 1813 uchar_t flags; /* flags needed in TL tsbmiss handler */
1823 1814 uchar_t sz_shift; /* for single kpm window */
1824 1815 uchar_t kpmp_shift; /* hash lock shift */
1825 1816 uchar_t kpmp2pshft; /* kpm page to page shift */
1826 1817 uint_t kpmp_table_sz; /* size of kpmp_table or kpmp_stable */
1827 1818 uint64_t kpmp_tablepa; /* paddr of kpmp_table or kpmp_stable */
1828 1819 uint64_t msegphashpa; /* paddr of memseg_phash */
1829 1820 struct tsbe *tsbptr; /* saved ktsb pointer */
1830 1821 uint_t kpm_dtlb_misses; /* kpm tlbmiss counter */
1831 1822 uint_t kpm_tsb_misses; /* kpm tsbmiss counter */
1832 1823 uintptr_t pad[1];
1833 1824 };
1834 1825
1835 1826 extern size_t tsb_slab_size;
1836 1827 extern uint_t tsb_slab_shift;
1837 1828 extern size_t tsb_slab_mask;
1838 1829
1839 1830 #endif /* !_ASM */
1840 1831
1841 1832 /*
1842 1833 * Flags for TL kpm tsbmiss handler
1843 1834 */
1844 1835 #define KPMTSBM_ENABLE_FLAG 0x01 /* bit copy of kpm_enable */
1845 1836 #define KPMTSBM_TLTSBM_FLAG 0x02 /* use TL tsbmiss handler */
1846 1837 #define KPMTSBM_TSBPHYS_FLAG 0x04 /* use ASI_MEM for TSB update */
1847 1838
1848 1839 /*
1849 1840 * The TSB
1850 1841 * All TSB sizes supported by the hardware are now supported (8K - 1M).
1851 1842 * For kernel TSBs we may go beyond the hardware supported sizes and support
1852 1843 * larger TSBs via software.
1853 1844 * All TTE sizes are supported in the TSB; the manner in which this is
1854 1845 * done is cpu dependent.
1855 1846 */
1856 1847 #define TSB_MIN_SZCODE TSB_8K_SZCODE /* min. supported TSB size */
1857 1848 #define TSB_MIN_OFFSET_MASK (TSB_OFFSET_MASK(TSB_MIN_SZCODE))
1858 1849
1859 1850 #ifdef sun4v
1860 1851 #define UTSB_MAX_SZCODE TSB_256M_SZCODE /* max. supported TSB size */
1861 1852 #else /* sun4u */
1862 1853 #define UTSB_MAX_SZCODE TSB_1M_SZCODE /* max. supported TSB size */
1863 1854 #endif /* sun4v */
1864 1855
1865 1856 #define UTSB_MAX_OFFSET_MASK (TSB_OFFSET_MASK(UTSB_MAX_SZCODE))
1866 1857
1867 1858 #define TSB_FREEMEM_MIN 0x1000 /* 32 mb */
1868 1859 #define TSB_FREEMEM_LARGE 0x10000 /* 512 mb */
1869 1860 #define TSB_8K_SZCODE 0 /* 512 entries */
1870 1861 #define TSB_16K_SZCODE 1 /* 1k entries */
1871 1862 #define TSB_32K_SZCODE 2 /* 2k entries */
1872 1863 #define TSB_64K_SZCODE 3 /* 4k entries */
1873 1864 #define TSB_128K_SZCODE 4 /* 8k entries */
1874 1865 #define TSB_256K_SZCODE 5 /* 16k entries */
1875 1866 #define TSB_512K_SZCODE 6 /* 32k entries */
1876 1867 #define TSB_1M_SZCODE 7 /* 64k entries */
1877 1868 #define TSB_2M_SZCODE 8 /* 128k entries */
1878 1869 #define TSB_4M_SZCODE 9 /* 256k entries */
1879 1870 #define TSB_8M_SZCODE 10 /* 512k entries */
1880 1871 #define TSB_16M_SZCODE 11 /* 1M entries */
1881 1872 #define TSB_32M_SZCODE 12 /* 2M entries */
1882 1873 #define TSB_64M_SZCODE 13 /* 4M entries */
1883 1874 #define TSB_128M_SZCODE 14 /* 8M entries */
1884 1875 #define TSB_256M_SZCODE 15 /* 16M entries */
1885 1876 #define TSB_ENTRY_SHIFT 4 /* each entry = 128 bits = 16 bytes */
1886 1877 #define TSB_ENTRY_SIZE (1 << 4)
1887 1878 #define TSB_START_SIZE 9
1888 1879 #define TSB_ENTRIES(tsbsz) (1 << (TSB_START_SIZE + tsbsz))
1889 1880 #define TSB_BYTES(tsbsz) (TSB_ENTRIES(tsbsz) << TSB_ENTRY_SHIFT)
1890 1881 #define TSB_OFFSET_MASK(tsbsz) (TSB_ENTRIES(tsbsz) - 1)
1891 1882 #define TSB_BASEADDR_MASK ((1 << 12) - 1)
1892 1883
1893 1884 /*
1894 1885 * sun4u platforms
1895 1886 * ---------------
1896 1887 * We now support two user TSBs with one TSB base register.
1897 1888 * Hence the TSB base register is split up as follows:
1898 1889 *
1899 1890 * When only one TSB present:
1900 1891 * [63 62..42 41..13 12..4 3..0]
1901 1892 * ^ ^ ^ ^ ^
1902 1893 * | | | | |
1903 1894 * | | | | |_ TSB size code
1904 1895 * | | | |
1905 1896 * | | | |_ Reserved 0
1906 1897 * | | |
1907 1898 * | | |_ TSB VA[41..13]
1908 1899 * | |
1909 1900 * | |_ VA hole (Spitfire), zeros (Cheetah and beyond)
1910 1901 * |
1911 1902 * |_ 0
1912 1903 *
1913 1904 * When second TSB present:
1914 1905 * [63 62..42 41..33 32..29 28..22 21..13 12..4 3..0]
1915 1906 * ^ ^ ^ ^ ^ ^ ^ ^
1916 1907 * | | | | | | | |
1917 1908 * | | | | | | | |_ First TSB size code
1918 1909 * | | | | | | |
1919 1910 * | | | | | | |_ Reserved 0
1920 1911 * | | | | | |
1921 1912 * | | | | | |_ First TSB's VA[21..13]
1922 1913 * | | | | |
1923 1914 * | | | | |_ Reserved for future use
1924 1915 * | | | |
1925 1916 * | | | |_ Second TSB's size code
1926 1917 * | | |
1927 1918 * | | |_ Second TSB's VA[21..13]
1928 1919 * | |
1929 1920 * | |_ VA hole (Spitfire) / ones (Cheetah and beyond)
1930 1921 * |
1931 1922 * |_ 1
1932 1923 *
1933 1924 * Note that since we store 21..13 of each TSB's VA, TSBs and their slabs
1934 1925 * may be up to 4M in size. For now, only hardware supported TSB sizes
1935 1926 * are supported, though the slabs are usually 4M in size.
1936 1927 *
1937 1928 * sun4u platforms that define UTSB_PHYS use physical addressing to access
1938 1929 * the user TSBs at TL>0. The first user TSB base is in the MMU I/D TSB Base
1939 1930 * registers. The second TSB base uses a dedicated scratchpad register which
1940 1931 * requires a definition of SCRATCHPAD_UTSBREG2 in mach_sfmmu.h. The layout for
1941 1932 * both registers is equivalent to sun4v below, except the TSB PA range is
1942 1933 * [46..13] for sun4u.
1943 1934 *
1944 1935 * sun4v platforms
1945 1936 * ---------------
1946 1937 * On sun4v platforms, we use two dedicated scratchpad registers as pseudo
1947 1938 * hardware TSB base registers to hold up to two different user TSBs.
1948 1939 *
1949 1940 * Each register contains TSB's physical base and size code information
1950 1941 * as follows:
1951 1942 *
1952 1943 * [63..56 55..13 12..4 3..0]
1953 1944 * ^ ^ ^ ^
1954 1945 * | | | |
1955 1946 * | | | |_ TSB size code
1956 1947 * | | |
1957 1948 * | | |_ Reserved 0
1958 1949 * | |
1959 1950 * | |_ TSB PA[55..13]
1960 1951 * |
1961 1952 * |
1962 1953 * |
1963 1954 * |_ 0 for valid TSB
1964 1955 *
1965 1956 * Absence of a user TSB (primarily the second user TSB) is indicated by
1966 1957 * storing a negative value in the TSB base register. This allows us to
1967 1958 * check for presence of a user TSB by simply checking bit# 63.
1968 1959 */
1969 1960 #define TSBREG_MSB_SHIFT 32 /* set upper bits */
1970 1961 #define TSBREG_MSB_CONST 0xfffff800 /* set bits 63..43 */
1971 1962 #define TSBREG_FIRTSB_SHIFT 42 /* to clear bits 63:22 */
1972 1963 #define TSBREG_SECTSB_MKSHIFT 20 /* 21:13 --> 41:33 */
1973 1964 #define TSBREG_SECTSB_LSHIFT 22 /* to clear bits 63:42 */
1974 1965 #define TSBREG_SECTSB_RSHIFT (TSBREG_SECTSB_MKSHIFT + TSBREG_SECTSB_LSHIFT)
1975 1966 /* sectsb va -> bits 21:13 */
1976 1967 /* after clearing upper bits */
1977 1968 #define TSBREG_SECSZ_SHIFT 29 /* to get sectsb szc to 3:0 */
1978 1969 #define TSBREG_VAMASK_SHIFT 13 /* set up VA mask */
1979 1970
1980 1971 #define BIGKTSB_SZ_MASK 0xf
1981 1972 #define TSB_SOFTSZ_MASK BIGKTSB_SZ_MASK
1982 1973 #define MIN_BIGKTSB_SZCODE 9 /* 256k entries */
1983 1974 #define MAX_BIGKTSB_SZCODE 11 /* 1024k entries */
1984 1975 #define MAX_BIGKTSB_TTES (TSB_BYTES(MAX_BIGKTSB_SZCODE) / MMU_PAGESIZE4M)
1985 1976
1986 1977 #define TAG_VALO_SHIFT 22 /* tag's va are bits 63-22 */
1987 1978 /*
1988 1979 * sw bits used on tsb_tag - bit masks used only in assembly
1989 1980 * use only a sethi for these fields.
1990 1981 */
1991 1982 #define TSBTAG_INVALID 0x00008000 /* tsb_tag.tag_invalid */
1992 1983 #define TSBTAG_LOCKED 0x00004000 /* tsb_tag.tag_locked */
1993 1984
1994 1985 #ifdef _ASM
1995 1986
1996 1987 /*
1997 1988 * Marker to indicate that this instruction will be hot patched at runtime
1998 1989 * to some other value.
1999 1990 * This value must be zero since it fills in the imm bits of the target
2000 1991 * instructions to be patched
2001 1992 */
2002 1993 #define RUNTIME_PATCH (0)
2003 1994
2004 1995 /*
2005 1996 * V9 defines nop instruction as the following, which we use
2006 1997 * at runtime to nullify some instructions we don't want to
2007 1998 * execute in the trap handlers on certain platforms.
2008 1999 */
2009 2000 #define MAKE_NOP_INSTR(reg) \
2010 2001 sethi %hi(0x1000000), reg
2011 2002
2012 2003 /*
2013 2004 * This macro constructs a SPARC V9 "jmpl <source reg>, %g0"
2014 2005 * instruction, with the source register specified by the jump_reg_number.
2015 2006 * The jmp opcode [24:19] = 11 1000 and source register is bits [18:14].
2016 2007 * The instruction is returned in reg. The macro is used to patch in a jmpl
2017 2008 * instruction at runtime.
2018 2009 */
2019 2010 #define MAKE_JMP_INSTR(jump_reg_number, reg, tmp) \
2020 2011 sethi %hi(0x81c00000), reg; \
2021 2012 mov jump_reg_number, tmp; \
2022 2013 sll tmp, 14, tmp; \
2023 2014 or reg, tmp, reg
2024 2015
2025 2016 /*
2026 2017 * Macro to get hat per-MMU cnum on this CPU.
2027 2018 * sfmmu - In, pass in "sfmmup" from the caller.
2028 2019 * cnum - Out, return 'cnum' to the caller
2029 2020 * scr - scratch
2030 2021 */
2031 2022 #define SFMMU_CPU_CNUM(sfmmu, cnum, scr) \
2032 2023 CPU_ADDR(scr, cnum); /* scr = load CPU struct addr */ \
2033 2024 ld [scr + CPU_MMU_IDX], cnum; /* cnum = mmuid */ \
2034 2025 add sfmmu, SFMMU_CTXS, scr; /* scr = sfmmup->sfmmu_ctxs[] */ \
2035 2026 sllx cnum, SFMMU_MMU_CTX_SHIFT, cnum; \
2036 2027 add scr, cnum, scr; /* scr = sfmmup->sfmmu_ctxs[id] */ \
2037 2028 ldx [scr + SFMMU_MMU_GC_NUM], scr; /* sfmmu_ctxs[id].gcnum */ \
2038 2029 sllx scr, SFMMU_MMU_CNUM_LSHIFT, scr; \
2039 2030 srlx scr, SFMMU_MMU_CNUM_LSHIFT, cnum; /* cnum = sfmmu cnum */
2040 2031
2041 2032 /*
2042 2033 * Macro to get hat gnum & cnum assocaited with sfmmu_ctx[mmuid] entry
2043 2034 * entry - In, pass in (&sfmmu_ctxs[mmuid] - SFMMU_CTXS) from the caller.
2044 2035 * gnum - Out, return sfmmu gnum
2045 2036 * cnum - Out, return sfmmu cnum
2046 2037 * reg - scratch
2047 2038 */
2048 2039 #define SFMMU_MMUID_GNUM_CNUM(entry, gnum, cnum, reg) \
2049 2040 ldx [entry + SFMMU_CTXS], reg; /* reg = sfmmu (gnum | cnum) */ \
2050 2041 srlx reg, SFMMU_MMU_GNUM_RSHIFT, gnum; /* gnum = sfmmu gnum */ \
2051 2042 sllx reg, SFMMU_MMU_CNUM_LSHIFT, cnum; \
2052 2043 srlx cnum, SFMMU_MMU_CNUM_LSHIFT, cnum; /* cnum = sfmmu cnum */
2053 2044
2054 2045 /*
2055 2046 * Macro to get this CPU's tsbmiss area.
2056 2047 */
2057 2048 #define CPU_TSBMISS_AREA(tsbmiss, tmp1) \
2058 2049 CPU_INDEX(tmp1, tsbmiss); /* tmp1 = cpu idx */ \
2059 2050 sethi %hi(tsbmiss_area), tsbmiss; /* tsbmiss base ptr */ \
2060 2051 mulx tmp1, TSBMISS_SIZE, tmp1; /* byte offset */ \
2061 2052 or tsbmiss, %lo(tsbmiss_area), tsbmiss; \
2062 2053 add tsbmiss, tmp1, tsbmiss /* tsbmiss area of CPU */
2063 2054
2064 2055
2065 2056 /*
2066 2057 * Macro to set kernel context + page size codes in DMMU primary context
2067 2058 * register. It is only necessary for sun4u because sun4v does not need
2068 2059 * page size codes
2069 2060 */
2070 2061 #ifdef sun4v
2071 2062
2072 2063 #define SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
2073 2064
2074 2065 #else
2075 2066
2076 2067 #define SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) \
2077 2068 sethi %hi(kcontextreg), reg0; \
2078 2069 ldx [reg0 + %lo(kcontextreg)], reg0; \
2079 2070 mov MMU_PCONTEXT, reg1; \
2080 2071 ldxa [reg1]ASI_MMU_CTX, reg2; \
2081 2072 xor reg0, reg2, reg2; \
2082 2073 brz reg2, label3; \
2083 2074 srlx reg2, CTXREG_NEXT_SHIFT, reg2; \
2084 2075 rdpr %pstate, reg3; /* disable interrupts */ \
2085 2076 btst PSTATE_IE, reg3; \
2086 2077 /*CSTYLED*/ \
2087 2078 bnz,a,pt %icc, label1; \
2088 2079 wrpr reg3, PSTATE_IE, %pstate; \
2089 2080 /*CSTYLED*/ \
2090 2081 label1:; \
2091 2082 brz reg2, label2; /* need demap if N_pgsz0/1 change */ \
2092 2083 sethi %hi(FLUSH_ADDR), reg4; \
2093 2084 mov DEMAP_ALL_TYPE, reg2; \
2094 2085 stxa %g0, [reg2]ASI_DTLB_DEMAP; \
2095 2086 stxa %g0, [reg2]ASI_ITLB_DEMAP; \
2096 2087 /*CSTYLED*/ \
2097 2088 label2:; \
2098 2089 stxa reg0, [reg1]ASI_MMU_CTX; \
2099 2090 flush reg4; \
2100 2091 btst PSTATE_IE, reg3; \
2101 2092 /*CSTYLED*/ \
2102 2093 bnz,a,pt %icc, label3; \
2103 2094 wrpr %g0, reg3, %pstate; /* restore interrupt state */ \
2104 2095 label3:;
2105 2096
2106 2097 #endif
2107 2098
2108 2099 /*
2109 2100 * Macro to setup arguments with kernel sfmmup context + page size before
2110 2101 * calling sfmmu_setctx_sec()
2111 2102 */
2112 2103 #ifdef sun4v
2113 2104 #define SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1) \
2114 2105 set KCONTEXT, arg0; \
2115 2106 set 0, arg1;
2116 2107 #else
2117 2108 #define SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1) \
2118 2109 ldub [sfmmup + SFMMU_CEXT], arg1; \
2119 2110 set KCONTEXT, arg0; \
2120 2111 sll arg1, CTXREG_EXT_SHIFT, arg1;
2121 2112 #endif
2122 2113
2123 2114 #define PANIC_IF_INTR_DISABLED_PSTR(pstatereg, label, scr) \
2124 2115 andcc pstatereg, PSTATE_IE, %g0; /* panic if intrs */ \
2125 2116 /*CSTYLED*/ \
2126 2117 bnz,pt %icc, label; /* already disabled */ \
2127 2118 nop; \
2128 2119 \
2129 2120 sethi %hi(panicstr), scr; \
2130 2121 ldx [scr + %lo(panicstr)], scr; \
2131 2122 tst scr; \
2132 2123 /*CSTYLED*/ \
2133 2124 bnz,pt %xcc, label; \
2134 2125 nop; \
2135 2126 \
2136 2127 save %sp, -SA(MINFRAME), %sp; \
2137 2128 sethi %hi(sfmmu_panic1), %o0; \
2138 2129 call panic; \
2139 2130 or %o0, %lo(sfmmu_panic1), %o0; \
2140 2131 /*CSTYLED*/ \
2141 2132 label:
2142 2133
2143 2134 #define PANIC_IF_INTR_ENABLED_PSTR(label, scr) \
2144 2135 /* \
2145 2136 * The caller must have disabled interrupts. \
2146 2137 * If interrupts are not disabled, panic \
2147 2138 */ \
2148 2139 rdpr %pstate, scr; \
2149 2140 andcc scr, PSTATE_IE, %g0; \
2150 2141 /*CSTYLED*/ \
2151 2142 bz,pt %icc, label; \
2152 2143 nop; \
2153 2144 \
2154 2145 sethi %hi(panicstr), scr; \
2155 2146 ldx [scr + %lo(panicstr)], scr; \
2156 2147 tst scr; \
2157 2148 /*CSTYLED*/ \
2158 2149 bnz,pt %xcc, label; \
2159 2150 nop; \
2160 2151 \
2161 2152 sethi %hi(sfmmu_panic6), %o0; \
2162 2153 call panic; \
2163 2154 or %o0, %lo(sfmmu_panic6), %o0; \
2164 2155 /*CSTYLED*/ \
2165 2156 label:
2166 2157
2167 2158 #endif /* _ASM */
2168 2159
2169 2160 #ifndef _ASM
2170 2161
2171 2162 #ifdef VAC
2172 2163 /*
2173 2164 * Page coloring
2174 2165 * The p_vcolor field of the page struct (1 byte) is used to store the
2175 2166 * virtual page color. This provides for 255 colors. The value zero is
2176 2167 * used to mean the page has no color - never been mapped or somehow
2177 2168 * purified.
2178 2169 */
2179 2170
2180 2171 #define PP_GET_VCOLOR(pp) (((pp)->p_vcolor) - 1)
2181 2172 #define PP_NEWPAGE(pp) (!(pp)->p_vcolor)
2182 2173 #define PP_SET_VCOLOR(pp, color) \
2183 2174 ((pp)->p_vcolor = ((color) + 1))
2184 2175
2185 2176 /*
2186 2177 * As mentioned p_vcolor == 0 means there is no color for this page.
2187 2178 * But PP_SET_VCOLOR(pp, color) expects 'color' to be real color minus
2188 2179 * one so we define this constant.
2189 2180 */
2190 2181 #define NO_VCOLOR (-1)
2191 2182
2192 2183 #define addr_to_vcolor(addr) \
2193 2184 (((uint_t)(uintptr_t)(addr) >> MMU_PAGESHIFT) & vac_colors_mask)
2194 2185 #else /* VAC */
2195 2186 #define addr_to_vcolor(addr) (0)
2196 2187 #endif /* VAC */
2197 2188
2198 2189 /*
2199 2190 * The field p_index in the psm page structure is for large pages support.
2200 2191 * P_index is a bit-vector of the different mapping sizes that a given page
2201 2192 * is part of. An hme structure for a large mapping is only added in the
2202 2193 * group leader page (first page). All pages covered by a given large mapping
2203 2194 * have the corrosponding mapping bit set in their p_index field. This allows
2204 2195 * us to only store an explicit hme structure in the leading page which
2205 2196 * simplifies the mapping link list management. Furthermore, it provides us
2206 2197 * a fast mechanism for determining the largest mapping a page is part of. For
2207 2198 * exmaple, a page with a 64K and a 4M mappings has a p_index value of 0x0A.
2208 2199 *
2209 2200 * Implementation note: even though the first bit in p_index is reserved
2210 2201 * for 8K mappings, it is NOT USED by the code and SHOULD NOT be set.
2211 2202 * In addition, the upper four bits of the p_index field are used by the
2212 2203 * code as temporaries
2213 2204 */
2214 2205
2215 2206 /*
2216 2207 * Defines for psm page struct fields and large page support
2217 2208 */
2218 2209 #define SFMMU_INDEX_SHIFT 6
2219 2210 #define SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)
2220 2211
2221 2212 /* Return the mapping index */
2222 2213 #define PP_MAPINDEX(pp) ((pp)->p_index & SFMMU_INDEX_MASK)
2223 2214
2224 2215 /*
2225 2216 * These macros rely on the following property:
2226 2217 * All pages constituting a large page are covered by a virtually
2227 2218 * contiguous set of page_t's.
2228 2219 */
2229 2220
2230 2221 /* Return the leader for this mapping size */
2231 2222 #define PP_GROUPLEADER(pp, sz) \
2232 2223 (&(pp)[-(int)(pp->p_pagenum & (TTEPAGES(sz)-1))])
2233 2224
2234 2225 /* Return the root page for this page based on p_szc */
2235 2226 #define PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \
2236 2227 PP_GROUPLEADER((pp), (pp)->p_szc))
2237 2228
2238 2229 #define PP_PAGENEXT_N(pp, n) ((pp) + (n))
2239 2230 #define PP_PAGENEXT(pp) PP_PAGENEXT_N((pp), 1)
2240 2231
2241 2232 #define PP_PAGEPREV_N(pp, n) ((pp) - (n))
2242 2233 #define PP_PAGEPREV(pp) PP_PAGEPREV_N((pp), 1)
2243 2234
2244 2235 #define PP_ISMAPPED_LARGE(pp) (PP_MAPINDEX(pp) != 0)
2245 2236
2246 2237 /* Need function to test the page mappping which takes p_index into account */
2247 2238 #define PP_ISMAPPED(pp) ((pp)->p_mapping || PP_ISMAPPED_LARGE(pp))
2248 2239
2249 2240 /*
2250 2241 * Don't call this macro with sz equal to zero. 8K mappings SHOULD NOT
2251 2242 * set p_index field.
2252 2243 */
2253 2244 #define PAGESZ_TO_INDEX(sz) (1 << (sz))
2254 2245
2255 2246
2256 2247 /*
2257 2248 * prototypes for hat assembly routines. Some of these are
2258 2249 * known to machine dependent VM code.
2259 2250 */
2260 2251 extern uint64_t sfmmu_make_tsbtag(caddr_t);
2261 2252 extern struct tsbe *
2262 2253 sfmmu_get_tsbe(uint64_t, caddr_t, int, int);
2263 2254 extern void sfmmu_load_tsbe(struct tsbe *, uint64_t, tte_t *, int);
2264 2255 extern void sfmmu_unload_tsbe(struct tsbe *, uint64_t, int);
2265 2256 extern void sfmmu_load_mmustate(sfmmu_t *);
2266 2257 extern void sfmmu_raise_tsb_exception(uint64_t, uint64_t);
2267 2258 #ifndef sun4v
2268 2259 extern void sfmmu_itlb_ld_kva(caddr_t, tte_t *);
2269 2260 extern void sfmmu_dtlb_ld_kva(caddr_t, tte_t *);
2270 2261 #endif /* sun4v */
2271 2262 extern void sfmmu_copytte(tte_t *, tte_t *);
2272 2263 extern int sfmmu_modifytte(tte_t *, tte_t *, tte_t *);
2273 2264 extern int sfmmu_modifytte_try(tte_t *, tte_t *, tte_t *);
2274 2265 extern pfn_t sfmmu_ttetopfn(tte_t *, caddr_t);
2275 2266 extern uint_t sfmmu_disable_intrs(void);
2276 2267 extern void sfmmu_enable_intrs(uint_t);
2277 2268 /*
2278 2269 * functions exported to machine dependent VM code
2279 2270 */
2280 2271 extern void sfmmu_patch_ktsb(void);
2281 2272 #ifndef UTSB_PHYS
2282 2273 extern void sfmmu_patch_utsb(void);
2283 2274 #endif /* UTSB_PHYS */
2284 2275 extern pfn_t sfmmu_vatopfn(caddr_t, sfmmu_t *, tte_t *);
2285 2276 extern void sfmmu_vatopfn_suspended(caddr_t, sfmmu_t *, tte_t *);
2286 2277 extern pfn_t sfmmu_kvaszc2pfn(caddr_t, int);
2287 2278 #ifdef DEBUG
2288 2279 extern void sfmmu_check_kpfn(pfn_t);
2289 2280 #else
2290 2281 #define sfmmu_check_kpfn(pfn) /* disabled */
2291 2282 #endif /* DEBUG */
2292 2283 extern void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
2293 2284 extern void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, uint_t);
2294 2285 extern void sfmmu_tsbmiss_exception(struct regs *, uintptr_t, uint_t);
2295 2286 extern void sfmmu_init_tsbs(void);
2296 2287 extern caddr_t sfmmu_ktsb_alloc(caddr_t);
2297 2288 extern int sfmmu_getctx_pri(void);
2298 2289 extern int sfmmu_getctx_sec(void);
2299 2290 extern void sfmmu_setctx_sec(uint_t);
2300 2291 extern void sfmmu_inv_tsb(caddr_t, uint_t);
2301 2292 extern void sfmmu_init_ktsbinfo(void);
2302 2293 extern int sfmmu_setup_4lp(void);
2303 2294 extern void sfmmu_patch_mmu_asi(int);
2304 2295 extern void sfmmu_init_nucleus_hblks(caddr_t, size_t, int, int);
2305 2296 extern void sfmmu_cache_flushall(void);
2306 2297 extern pgcnt_t sfmmu_tte_cnt(sfmmu_t *, uint_t);
2307 2298 extern void *sfmmu_tsb_segkmem_alloc(vmem_t *, size_t, int);
↓ open down ↓ |
1004 lines elided |
↑ open up ↑ |
2308 2299 extern void sfmmu_tsb_segkmem_free(vmem_t *, void *, size_t);
2309 2300 extern void sfmmu_reprog_pgsz_arr(sfmmu_t *, uint8_t *);
2310 2301
2311 2302 extern void hat_kern_setup(void);
2312 2303 extern int hat_page_relocate(page_t **, page_t **, spgcnt_t *);
2313 2304 extern int sfmmu_get_ppvcolor(struct page *);
2314 2305 extern int sfmmu_get_addrvcolor(caddr_t);
2315 2306 extern int sfmmu_hat_lock_held(sfmmu_t *);
2316 2307 extern int sfmmu_alloc_ctx(sfmmu_t *, int, struct cpu *, int);
2317 2308
2318 -/*
2319 - * Functions exported to xhat_sfmmu.c
2320 - */
2321 2309 extern kmutex_t *sfmmu_mlist_enter(page_t *);
2322 2310 extern void sfmmu_mlist_exit(kmutex_t *);
2323 2311 extern int sfmmu_mlist_held(struct page *);
2324 2312 extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *);
2325 2313
2326 2314 /*
2327 2315 * MMU-specific functions optionally imported from the CPU module
2328 2316 */
2329 2317 #pragma weak mmu_init_scd
2330 2318 #pragma weak mmu_large_pages_disabled
2331 2319 #pragma weak mmu_set_ctx_page_sizes
2332 2320 #pragma weak mmu_check_page_sizes
2333 2321
2334 2322 extern void mmu_init_scd(sf_scd_t *);
2335 2323 extern uint_t mmu_large_pages_disabled(uint_t);
2336 2324 extern void mmu_set_ctx_page_sizes(sfmmu_t *);
2337 2325 extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *);
2338 2326
2339 2327 extern sfmmu_t *ksfmmup;
2340 2328 extern caddr_t ktsb_base;
2341 2329 extern uint64_t ktsb_pbase;
2342 2330 extern int ktsb_sz;
2343 2331 extern int ktsb_szcode;
2344 2332 extern caddr_t ktsb4m_base;
2345 2333 extern uint64_t ktsb4m_pbase;
2346 2334 extern int ktsb4m_sz;
2347 2335 extern int ktsb4m_szcode;
2348 2336 extern uint64_t kpm_tsbbase;
2349 2337 extern int kpm_tsbsz;
2350 2338 extern int ktsb_phys;
2351 2339 extern int enable_bigktsb;
2352 2340 #ifndef sun4v
2353 2341 extern int utsb_dtlb_ttenum;
2354 2342 extern int utsb4m_dtlb_ttenum;
2355 2343 #endif /* sun4v */
2356 2344 extern int uhmehash_num;
2357 2345 extern int khmehash_num;
2358 2346 extern struct hmehash_bucket *uhme_hash;
2359 2347 extern struct hmehash_bucket *khme_hash;
2360 2348 extern uint_t hblk_alloc_dynamic;
2361 2349 extern struct tsbmiss tsbmiss_area[NCPU];
2362 2350 extern struct kpmtsbm kpmtsbm_area[NCPU];
2363 2351
2364 2352 #ifndef sun4v
2365 2353 extern int dtlb_resv_ttenum;
2366 2354 extern caddr_t utsb_vabase;
2367 2355 extern caddr_t utsb4m_vabase;
2368 2356 #endif /* sun4v */
2369 2357 extern vmem_t *kmem_tsb_default_arena[];
2370 2358 extern int tsb_lgrp_affinity;
2371 2359
2372 2360 extern uint_t disable_large_pages;
2373 2361 extern uint_t disable_ism_large_pages;
2374 2362 extern uint_t disable_auto_data_large_pages;
2375 2363 extern uint_t disable_auto_text_large_pages;
2376 2364
2377 2365 /* kpm externals */
2378 2366 extern pfn_t sfmmu_kpm_vatopfn(caddr_t);
2379 2367 extern void sfmmu_kpm_patch_tlbm(void);
2380 2368 extern void sfmmu_kpm_patch_tsbm(void);
2381 2369 extern void sfmmu_patch_shctx(void);
2382 2370 extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int);
2383 2371 extern void sfmmu_kpm_unload_tsb(caddr_t, int);
2384 2372 extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int);
2385 2373 extern int sfmmu_kpm_stsbmtl(uchar_t *, uint_t *, int);
2386 2374 extern caddr_t kpm_vbase;
2387 2375 extern size_t kpm_size;
2388 2376 extern struct memseg *memseg_hash[];
2389 2377 extern uint64_t memseg_phash[];
2390 2378 extern kpm_hlk_t *kpmp_table;
2391 2379 extern kpm_shlk_t *kpmp_stable;
2392 2380 extern uint_t kpmp_table_sz;
2393 2381 extern uint_t kpmp_stable_sz;
2394 2382 extern uchar_t kpmp_shift;
2395 2383
2396 2384 #define PP_ISMAPPED_KPM(pp) ((pp)->p_kpmref > 0)
2397 2385
2398 2386 #define IS_KPM_ALIAS_RANGE(vaddr) \
2399 2387 (((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift > 0)
2400 2388
2401 2389 #endif /* !_ASM */
2402 2390
2403 2391 /* sfmmu_kpm_tsbmtl flags */
2404 2392 #define KPMTSBM_STOP 0
2405 2393 #define KPMTSBM_START 1
2406 2394
2407 2395 /*
2408 2396 * For kpm_smallpages, the state about how a kpm page is mapped and whether
2409 2397 * it is ready to go is indicated by the two 4-bit fields defined in the
2410 2398 * kpm_spage structure as follows:
2411 2399 * kp_mapped_flag bit[0:3] - the page is mapped cacheable or not
2412 2400 * kp_mapped_flag bit[4:7] - the mapping is ready to go or not
2413 2401 * If the bit KPM_MAPPED_GO is on, it indicates that the assembly tsb miss
2414 2402 * handler can drop the mapping in regardless of the caching state of the
2415 2403 * mapping. Otherwise, we will have C handler resolve the VAC conflict no
2416 2404 * matter the page is currently mapped cacheable or non-cacheable.
2417 2405 */
2418 2406 #define KPM_MAPPEDS 0x1 /* small mapping valid, no conflict */
2419 2407 #define KPM_MAPPEDSC 0x2 /* small mapping valid, conflict */
2420 2408 #define KPM_MAPPED_GO 0x10 /* the mapping is ready to go */
2421 2409 #define KPM_MAPPED_MASK 0xf
2422 2410
2423 2411 /* Physical memseg address NULL marker */
2424 2412 #define MSEG_NULLPTR_PA -1
2425 2413
2426 2414 /*
2427 2415 * Memseg hash defines for kpm trap level tsbmiss handler.
2428 2416 * Must be in sync w/ page.h .
2429 2417 */
2430 2418 #define SFMMU_MEM_HASH_SHIFT 0x9
2431 2419 #define SFMMU_N_MEM_SLOTS 0x200
2432 2420 #define SFMMU_MEM_HASH_ENTRY_SHIFT 3
2433 2421
2434 2422 #ifndef _ASM
2435 2423 #if (SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT)
2436 2424 #error SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT
2437 2425 #endif
2438 2426 #if (SFMMU_N_MEM_SLOTS != N_MEM_SLOTS)
2439 2427 #error SFMMU_N_MEM_SLOTS != N_MEM_SLOTS
2440 2428 #endif
2441 2429
2442 2430 /* Physical memseg address NULL marker */
2443 2431 #define SFMMU_MEMSEG_NULLPTR_PA -1
2444 2432
2445 2433 /*
2446 2434 * Check KCONTEXT to be zero, asm parts depend on that assumption.
2447 2435 */
2448 2436 #if (KCONTEXT != 0)
2449 2437 #error KCONTEXT != 0
2450 2438 #endif
2451 2439 #endif /* !_ASM */
2452 2440
2453 2441
2454 2442 #endif /* _KERNEL */
2455 2443
2456 2444 #ifndef _ASM
2457 2445 /*
2458 2446 * ctx, hmeblk, mlistlock and other stats for sfmmu
2459 2447 */
2460 2448 struct sfmmu_global_stat {
2461 2449 int sf_tsb_exceptions; /* # of tsb exceptions */
2462 2450 int sf_tsb_raise_exception; /* # tsb exc. w/o TLB flush */
2463 2451
2464 2452 int sf_pagefaults; /* # of pagefaults */
2465 2453
2466 2454 int sf_uhash_searches; /* # of user hash searches */
2467 2455 int sf_uhash_links; /* # of user hash links */
2468 2456 int sf_khash_searches; /* # of kernel hash searches */
2469 2457 int sf_khash_links; /* # of kernel hash links */
2470 2458
2471 2459 int sf_swapout; /* # times hat swapped out */
2472 2460
2473 2461 int sf_tsb_alloc; /* # TSB allocations */
2474 2462 int sf_tsb_allocfail; /* # times TSB alloc fail */
2475 2463 int sf_tsb_sectsb_create; /* # times second TSB added */
2476 2464
2477 2465 int sf_scd_1sttsb_alloc; /* # SCD 1st TSB allocations */
2478 2466 int sf_scd_2ndtsb_alloc; /* # SCD 2nd TSB allocations */
2479 2467 int sf_scd_1sttsb_allocfail; /* # SCD 1st TSB alloc fail */
2480 2468 int sf_scd_2ndtsb_allocfail; /* # SCD 2nd TSB alloc fail */
2481 2469
2482 2470
2483 2471 int sf_tteload8k; /* calls to sfmmu_tteload */
2484 2472 int sf_tteload64k; /* calls to sfmmu_tteload */
2485 2473 int sf_tteload512k; /* calls to sfmmu_tteload */
2486 2474 int sf_tteload4m; /* calls to sfmmu_tteload */
2487 2475 int sf_tteload32m; /* calls to sfmmu_tteload */
2488 2476 int sf_tteload256m; /* calls to sfmmu_tteload */
2489 2477
2490 2478 int sf_tsb_load8k; /* # times loaded 8K tsbent */
2491 2479 int sf_tsb_load4m; /* # times loaded 4M tsbent */
2492 2480
2493 2481 int sf_hblk_hit; /* found hblk during tteload */
2494 2482 int sf_hblk8_ncreate; /* static hblk8's created */
2495 2483 int sf_hblk8_nalloc; /* static hblk8's allocated */
2496 2484 int sf_hblk1_ncreate; /* static hblk1's created */
2497 2485 int sf_hblk1_nalloc; /* static hblk1's allocated */
2498 2486 int sf_hblk_slab_cnt; /* sfmmu8_cache slab creates */
2499 2487 int sf_hblk_reserve_cnt; /* hblk_reserve usage */
2500 2488 int sf_hblk_recurse_cnt; /* hblk_reserve owner reqs */
2501 2489 int sf_hblk_reserve_hit; /* hblk_reserve hash hits */
2502 2490 int sf_get_free_success; /* reserve list allocs */
2503 2491 int sf_get_free_throttle; /* fails due to throttling */
2504 2492 int sf_get_free_fail; /* fails due to empty list */
2505 2493 int sf_put_free_success; /* reserve list frees */
2506 2494 int sf_put_free_fail; /* fails due to full list */
2507 2495
2508 2496 int sf_pgcolor_conflict; /* VAC conflict resolution */
2509 2497 int sf_uncache_conflict; /* VAC conflict resolution */
2510 2498 int sf_unload_conflict; /* VAC unload resolution */
2511 2499 int sf_ism_uncache; /* VAC conflict resolution */
2512 2500 int sf_ism_recache; /* VAC conflict resolution */
2513 2501 int sf_recache; /* VAC conflict resolution */
2514 2502
2515 2503 int sf_steal_count; /* # of hblks stolen */
2516 2504
2517 2505 int sf_pagesync; /* # of pagesyncs */
2518 2506 int sf_clrwrt; /* # of clear write perms */
2519 2507 int sf_pagesync_invalid; /* pagesync with inv tte */
2520 2508
2521 2509 int sf_kernel_xcalls; /* # of kernel cross calls */
2522 2510 int sf_user_xcalls; /* # of user cross calls */
2523 2511
2524 2512 int sf_tsb_grow; /* # of user tsb grows */
2525 2513 int sf_tsb_shrink; /* # of user tsb shrinks */
2526 2514 int sf_tsb_resize_failures; /* # of user tsb resize */
2527 2515 int sf_tsb_reloc; /* # of user tsb relocations */
2528 2516
2529 2517 int sf_user_vtop; /* # of user vatopfn calls */
2530 2518
2531 2519 int sf_ctx_inv; /* #times invalidate MMU ctx */
2532 2520
2533 2521 int sf_tlb_reprog_pgsz; /* # times switch TLB pgsz */
2534 2522
2535 2523 int sf_region_remap_demap; /* # times shme remap demap */
2536 2524
2537 2525 int sf_create_scd; /* # times SCD is created */
2538 2526 int sf_join_scd; /* # process joined scd */
2539 2527 int sf_leave_scd; /* # process left scd */
2540 2528 int sf_destroy_scd; /* # times SCD is destroyed */
2541 2529 };
2542 2530
2543 2531 struct sfmmu_tsbsize_stat {
2544 2532 int sf_tsbsz_8k;
2545 2533 int sf_tsbsz_16k;
2546 2534 int sf_tsbsz_32k;
2547 2535 int sf_tsbsz_64k;
2548 2536 int sf_tsbsz_128k;
2549 2537 int sf_tsbsz_256k;
2550 2538 int sf_tsbsz_512k;
2551 2539 int sf_tsbsz_1m;
2552 2540 int sf_tsbsz_2m;
2553 2541 int sf_tsbsz_4m;
2554 2542 int sf_tsbsz_8m;
2555 2543 int sf_tsbsz_16m;
2556 2544 int sf_tsbsz_32m;
2557 2545 int sf_tsbsz_64m;
2558 2546 int sf_tsbsz_128m;
2559 2547 int sf_tsbsz_256m;
2560 2548 };
2561 2549
2562 2550 struct sfmmu_percpu_stat {
2563 2551 int sf_itlb_misses; /* # of itlb misses */
2564 2552 int sf_dtlb_misses; /* # of dtlb misses */
2565 2553 int sf_utsb_misses; /* # of user tsb misses */
2566 2554 int sf_ktsb_misses; /* # of kernel tsb misses */
2567 2555 int sf_tsb_hits; /* # of tsb hits */
2568 2556 int sf_umod_faults; /* # of mod (prot viol) flts */
2569 2557 int sf_kmod_faults; /* # of mod (prot viol) flts */
2570 2558 };
2571 2559
2572 2560 #define SFMMU_STAT(stat) sfmmu_global_stat.stat++
2573 2561 #define SFMMU_STAT_ADD(stat, amount) sfmmu_global_stat.stat += (amount)
2574 2562 #define SFMMU_STAT_SET(stat, count) sfmmu_global_stat.stat = (count)
2575 2563
2576 2564 #define SFMMU_MMU_STAT(stat) { \
2577 2565 mmu_ctx_t *ctx = CPU->cpu_m.cpu_mmu_ctxp; \
2578 2566 if (ctx) \
2579 2567 ctx->stat++; \
2580 2568 }
2581 2569
2582 2570 #endif /* !_ASM */
2583 2571
2584 2572 #ifdef __cplusplus
2585 2573 }
2586 2574 #endif
2587 2575
2588 2576 #endif /* _VM_HAT_SFMMU_H */
↓ open down ↓ |
258 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX