Print this page
patch vm-cleanup
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/vm/zulu_hat.c
+++ new/usr/src/uts/sun4u/vm/zulu_hat.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/types.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/mman.h>
30 30 #include <sys/sunddi.h>
31 31 #include <sys/tnf_probe.h>
32 32 #include <vm/hat_sfmmu.h>
33 33 #include <vm/as.h>
34 34 #include <vm/xhat.h>
35 35 #include <vm/xhat_sfmmu.h>
36 36 #include <sys/zulu_hat.h>
37 37 #include <sys/zulumod.h>
38 38
39 39 /*
40 40 * This file contains the implementation of zulu_hat: an XHAT provider
41 41 * to support the MMU for the XVR-4000 graphics accelerator (code name zulu).
42 42 *
43 43 * The zulu hat is linked into the kernel misc module zuluvm.
44 44 * zuluvm provides services that the zulu device driver module requires
45 45 * that are not part of the standard ddi. See PSARC 2002/231.
46 46 *
47 47 * The zulu driver is delivered by the graphics consolidation.
48 48 * zuluvm is in ON workspace.
49 49 *
50 50 * There are two types of interfaces provided by zulu_hat
51 51 * 1. The set of functions and data structures used by zuluvm to obtain
52 52 * tte entries for the zulu MMU and to manage the association between
53 53 * user process's address spaces and zulu graphics contexts.
54 54 *
55 55 * 2. The entry points required for an XHAT provider: zulu_hat_ops
56 56 */
57 57
58 58 /*
59 59 * zulu_ctx_tab contains an array of pointers to the zulu_hats.
60 60 *
61 61 * During zulu graphics context switch, the zulu MMU's current context register
62 62 * is set to the index of the process's zulu hat's location in the array
63 63 * zulu_ctx_tab.
64 64 *
65 65 * This allows the TL=1 TLB miss handler to quickly find the zulu hat and
66 66 * lookup a tte in the zulu hat's TSB.
67 67 *
68 68 * To synchronize with the trap handler we use bit zero of
69 69 * the pointer as a lock bit. See the function zulu_ctx_tsb_lock_enter().
70 70 *
71 71 * If the trap handler finds the ctx locked it doesn't wait, it
72 72 * posts a soft interrupt which is handled at TL=0.
73 73 */
74 74
75 75 #define ZULU_HAT_MAX_CTX 32
76 76 struct zulu_hat *zulu_ctx_tab[ZULU_HAT_MAX_CTX];
77 77
78 78 /*
79 79 * To avoid searching through the whole zulu_ctx_tab for a free slot,
80 80 * we maintain the value of zulu_ctx_search_start.
81 81 *
82 82 * This value is a guess as to where a free slot in the context table might be.
83 83 * All slots < zulu_ctx_search_start are definitely occupied.
84 84 */
85 85 static int zulu_ctx_search_start = 0;
86 86
87 87
88 88 /*
89 89 * this mutex protects the zulu_ctx_tab and zulu_ctx_search_start
90 90 */
91 91 static kmutex_t zulu_ctx_lock;
92 92
93 93
94 94 uint64_t zulu_tsb_hit = 0; /* assembly code increments this */
95 95 static uint64_t zulu_tsb_miss = 0;
96 96 static uint64_t zulu_as_fault = 0;
97 97
98 98 /*
99 99 * The zulu device has two zulu data mmus.
100 100 * We use the base pagesize for one of them and the and 4M for the other.
101 101 */
102 102 extern int zuluvm_base_pgsize;
103 103
104 104
105 105
106 106 /*
107 107 * call zuluvm to remove translations for a page
108 108 */
109 109 static void
110 110 zulu_hat_demap_page(struct zulu_hat *zhat, caddr_t vaddr, int size)
111 111 {
112 112 if (zhat->zulu_ctx < 0) {
113 113 /* context has been stolen, so page is already demapped */
114 114 return;
115 115 }
116 116 zuluvm_demap_page(zhat->zdev, NULL, zhat->zulu_ctx, vaddr, size);
117 117 }
118 118
119 119 static void
120 120 zulu_hat_demap_ctx(void *zdev, int zulu_ctx)
121 121 {
122 122 if (zulu_ctx < 0) {
123 123 /* context has been stolen */
124 124 return;
125 125 }
126 126 zuluvm_demap_ctx(zdev, zulu_ctx);
127 127 }
128 128
129 129
130 130 /*
131 131 * steal the least recently used context slot.
132 132 */
133 133 static int
134 134 zulu_hat_steal_ctx()
135 135 {
136 136 int ctx;
137 137 hrtime_t delta = INT64_MAX;
138 138 struct zulu_hat *zhat_oldest = NULL;
139 139
140 140 ASSERT(mutex_owned(&zulu_ctx_lock));
141 141
142 142 for (ctx = 0; ctx < ZULU_HAT_MAX_CTX; ctx++) {
143 143 struct zulu_hat *zhat = ZULU_CTX_GET_HAT(ctx);
144 144
145 145 /*
146 146 * we shouldn't be here unless all slots are occupied
147 147 */
148 148 ASSERT(zhat != NULL);
149 149
150 150 TNF_PROBE_3(steal_ctx_loop, "zulu_hat", /* CSTYLED */,
151 151 tnf_int, ctx, ctx,
152 152 tnf_long, last_used, zhat->last_used,
153 153 tnf_long, oldest, delta);
154 154
155 155 if (zhat->last_used < delta) {
156 156 zhat_oldest = zhat;
157 157 delta = zhat->last_used;
158 158 }
159 159 }
160 160
161 161 ASSERT(zhat_oldest != NULL);
162 162
163 163 mutex_enter(&zhat_oldest->lock);
164 164
165 165 /* Nobody should have the tsb lock bit set here */
166 166 ASSERT(((uint64_t)zulu_ctx_tab[zhat_oldest->zulu_ctx] & ZULU_CTX_LOCK)
167 167 == 0);
168 168
169 169 ctx = zhat_oldest->zulu_ctx;
170 170 zhat_oldest->zulu_ctx = -1;
171 171
172 172 ZULU_CTX_SET_HAT(ctx, NULL);
173 173
174 174 zulu_hat_demap_ctx(zhat_oldest->zdev, ctx);
175 175
176 176 mutex_exit(&zhat_oldest->lock);
177 177
178 178 TNF_PROBE_1(zulu_hat_steal_ctx, "zulu_hat", /* CSTYLED */,
179 179 tnf_int, ctx, ctx);
180 180
181 181 return (ctx);
182 182 }
183 183
184 184 /*
185 185 * find a slot in the context table for a zulu_hat
186 186 */
187 187 static void
188 188 zulu_hat_ctx_alloc(struct zulu_hat *zhat)
189 189 {
190 190 int ctx;
191 191
192 192 mutex_enter(&zulu_ctx_lock);
193 193
194 194 for (ctx = zulu_ctx_search_start; ctx < ZULU_HAT_MAX_CTX; ctx++) {
195 195 if (ZULU_CTX_IS_FREE(ctx)) {
196 196 zulu_ctx_search_start = ctx + 1;
197 197 break;
198 198 }
199 199 }
200 200
201 201 if (ctx == ZULU_HAT_MAX_CTX) {
202 202 /* table is full need to steal an entry */
203 203 zulu_ctx_search_start = ZULU_HAT_MAX_CTX;
204 204 ctx = zulu_hat_steal_ctx();
205 205 }
206 206
207 207 mutex_enter(&zhat->lock);
208 208
209 209 ZULU_CTX_SET_HAT(ctx, zhat);
210 210 zhat->zulu_ctx = ctx;
211 211
212 212 mutex_exit(&zhat->lock);
213 213
214 214 mutex_exit(&zulu_ctx_lock);
215 215
216 216 TNF_PROBE_2(zulu_hat_ctx_alloc, "zulu_hat", /* CSTYLED */,
217 217 tnf_opaque, zhat, zhat, tnf_int, ctx, ctx);
218 218 }
219 219
220 220 /*
221 221 * zulu_hat_validate_ctx: Called before the graphics context associated
222 222 * with a given zulu hat becomes the current zulu graphics context.
223 223 * Make sure that the hat has a slot in zulu_ctx_tab.
224 224 */
225 225 void
226 226 zulu_hat_validate_ctx(struct zulu_hat *zhat)
227 227 {
228 228 if (zhat->zulu_ctx < 0) {
229 229 zulu_hat_ctx_alloc(zhat);
230 230 }
231 231 zhat->last_used = gethrtime();
232 232 }
233 233
234 234
235 235 static void
236 236 zulu_hat_ctx_free(struct zulu_hat *zhat)
237 237 {
238 238 TNF_PROBE_1(zulu_hat_ctx_free, "zulu_hat", /* CSTYLED */,
239 239 tnf_int, ctx, zhat->zulu_ctx);
240 240
241 241 mutex_enter(&zulu_ctx_lock);
242 242
243 243 mutex_enter(&zhat->lock);
244 244 if (zhat->zulu_ctx >= 0) {
245 245 ZULU_CTX_SET_HAT(zhat->zulu_ctx, NULL);
246 246
247 247 if (zulu_ctx_search_start > zhat->zulu_ctx) {
248 248 zulu_ctx_search_start = zhat->zulu_ctx;
249 249 }
250 250 }
251 251 mutex_exit(&zhat->lock);
252 252 mutex_exit(&zulu_ctx_lock);
253 253 }
254 254
255 255 /*
256 256 * Lock the zulu tsb for a given zulu_hat.
257 257 *
258 258 * We're just protecting against the TLB trap handler here. Other operations
259 259 * on the zulu_hat require entering the zhat's lock.
260 260 */
261 261 static void
262 262 zulu_ctx_tsb_lock_enter(struct zulu_hat *zhat)
263 263 {
264 264 uint64_t lck;
265 265 uint64_t *plck;
266 266
267 267 ASSERT(mutex_owned(&zhat->lock));
268 268
269 269 if (zhat->zulu_ctx < 0) {
270 270 return;
271 271 }
272 272 plck = (uint64_t *)&zulu_ctx_tab[zhat->zulu_ctx];
273 273
274 274 for (; ; ) {
275 275 lck = *plck;
276 276 if (!(lck & ZULU_CTX_LOCK)) {
277 277 uint64_t old_lck, new_lck;
278 278
279 279 new_lck = lck | ZULU_CTX_LOCK;
280 280
281 281 old_lck = cas64(plck, lck, new_lck);
282 282
283 283 if (old_lck == lck) {
284 284 /*
285 285 * success
286 286 */
287 287 break;
288 288 }
289 289 }
290 290 }
291 291 }
292 292
293 293 static void
294 294 zulu_ctx_tsb_lock_exit(struct zulu_hat *zhat)
295 295 {
296 296 uint64_t lck;
297 297 int zulu_ctx = zhat->zulu_ctx;
298 298
299 299 if (zulu_ctx < 0) {
300 300 return;
301 301 }
302 302 lck = (uint64_t)zulu_ctx_tab[zulu_ctx];
303 303 ASSERT(lck & ZULU_CTX_LOCK);
304 304 lck &= ~ZULU_CTX_LOCK;
305 305 zulu_ctx_tab[zulu_ctx] = (struct zulu_hat *)lck;
306 306 }
307 307
308 308 /*
309 309 * Each zulu hat has a "shadow tree" which is a table of 4MB address regions
310 310 * for which the zhat has mappings.
311 311 *
312 312 * This table is maintained in an avl tree.
313 313 * Nodes in the tree are called shadow blocks (or sblks)
314 314 *
315 315 * This data structure allows unload operations by (address, range) to be
316 316 * much more efficent.
317 317 *
318 318 * We get called a lot for address ranges that have never been supplied
319 319 * to zulu.
320 320 */
321 321
322 322 /*
323 323 * compare the base address of two nodes in the shadow tree
324 324 */
325 325 static int
326 326 zulu_shadow_tree_compare(const void *a, const void *b)
327 327 {
328 328 struct zulu_shadow_blk *zba = (struct zulu_shadow_blk *)a;
329 329 struct zulu_shadow_blk *zbb = (struct zulu_shadow_blk *)b;
330 330 uint64_t addr_a = zba->ivaddr;
331 331 uint64_t addr_b = zbb->ivaddr;
332 332
333 333 TNF_PROBE_2(zulu_shadow_tree_compare, "zulu_shadow_tree", /* CSTYLED */,
334 334 tnf_opaque, addr_a, addr_a, tnf_opaque, addr_b, addr_b);
335 335
336 336 if (addr_a < addr_b) {
337 337 return (-1);
338 338 } else if (addr_a > addr_b) {
339 339 return (1);
340 340 } else {
341 341 return (0);
342 342 }
343 343 }
344 344
345 345 /*
346 346 * lookup the entry in the shadow tree for a given virtual address
347 347 */
348 348 static struct zulu_shadow_blk *
349 349 zulu_shadow_tree_lookup(struct zulu_hat *zhat, uint64_t ivaddr,
350 350 avl_index_t *where)
351 351 {
352 352 struct zulu_shadow_blk proto;
353 353 struct zulu_shadow_blk *sblk;
354 354
355 355 proto.ivaddr = ivaddr & ZULU_SHADOW_BLK_MASK;
356 356
357 357 /*
358 358 * pages typically fault in in order so we cache the last shadow
359 359 * block that was referenced so we usually get to reduce calls to
360 360 * avl_find.
361 361 */
362 362 if ((zhat->sblk_last != NULL) &&
363 363 (proto.ivaddr == zhat->sblk_last->ivaddr)) {
364 364 sblk = zhat->sblk_last;
365 365 } else {
366 366 sblk = (struct zulu_shadow_blk *)avl_find(&zhat->shadow_tree,
367 367 &proto, where);
368 368 zhat->sblk_last = sblk;
369 369 }
370 370
371 371 TNF_PROBE_2(zulu_shadow_tree_lookup, "zulu_shadow_tree", /* CSTYLED */,
372 372 tnf_opaque, ivaddr, proto.ivaddr,
373 373 tnf_opaque, where, where ? *where : ~0);
374 374
375 375 return (sblk);
376 376 }
377 377
378 378 /*
379 379 * insert a sblk into the shadow tree for a given zblk.
380 380 * If a sblk already exists, just increment it's refcount.
381 381 */
382 382 static void
383 383 zulu_shadow_tree_insert(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
384 384 {
385 385 avl_index_t where;
386 386 struct zulu_shadow_blk *sblk = NULL;
387 387 uint64_t ivaddr;
388 388 uint64_t end;
389 389
390 390 ivaddr = zblk->zulu_hat_blk_vaddr & ZULU_SHADOW_BLK_MASK;
391 391
392 392 end = zblk->zulu_hat_blk_vaddr + ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
393 393
394 394 sblk = zulu_shadow_tree_lookup(zhat, ivaddr, &where);
395 395 if (sblk != NULL) {
396 396 sblk->ref_count++;
397 397
398 398 end = zblk->zulu_hat_blk_vaddr +
399 399 ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
400 400 if (zblk->zulu_hat_blk_vaddr < sblk->min_addr) {
401 401 sblk->min_addr = zblk->zulu_hat_blk_vaddr;
402 402 }
403 403 /*
404 404 * a blk can set both the minimum and maximum when it
405 405 * is the first zblk added to a previously emptied sblk
406 406 */
407 407 if (end > sblk->max_addr) {
408 408 sblk->max_addr = end;
409 409 }
410 410 } else {
411 411 sblk = kmem_zalloc(sizeof (*sblk), KM_SLEEP);
412 412 sblk->ref_count = 1;
413 413 sblk->ivaddr = ivaddr;
414 414 sblk->min_addr = zblk->zulu_hat_blk_vaddr;
415 415 sblk->max_addr = end;
416 416 zhat->sblk_last = sblk;
417 417
418 418 avl_insert(&zhat->shadow_tree, sblk, where);
419 419 }
420 420 zblk->zulu_shadow_blk = sblk;
421 421 TNF_PROBE_2(zulu_shadow_tree_insert, "zulu_shadow_tree", /* CSTYLED */,
422 422 tnf_opaque, vaddr, ivaddr,
423 423 tnf_opaque, ref_count, sblk->ref_count);
424 424 }
425 425
426 426 /*
427 427 * decrement the ref_count for the sblk that corresponds to a given zblk.
428 428 * When the ref_count goes to zero remove the sblk from the tree and free it.
429 429 */
430 430
431 431 static void
432 432 zulu_shadow_tree_delete(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
433 433 {
434 434 struct zulu_shadow_blk *sblk;
435 435
436 436 ASSERT(zblk->zulu_shadow_blk != NULL);
437 437
438 438 sblk = zblk->zulu_shadow_blk;
439 439
440 440 TNF_PROBE_2(zulu_shadow_tree_delete, "zulu_shadow_tree", /* CSTYLED */,
441 441 tnf_opaque, vaddr, sblk->ivaddr,
442 442 tnf_opaque, ref_count, sblk->ref_count-1);
443 443
444 444 if (--sblk->ref_count == 0) {
445 445 if (zhat->sblk_last == sblk) {
446 446 zhat->sblk_last = NULL;
447 447 }
448 448 sblk->min_addr = sblk->ivaddr + ZULU_SHADOW_BLK_RANGE;
449 449 sblk->max_addr = sblk->ivaddr;
450 450 } else {
451 451 /*
452 452 * Update the high and low water marks for this sblk.
453 453 * These are estimates, because we don't know if the previous
454 454 * or next region are actually occupied, but we can tell
455 455 * whether the previous values have become invalid.
456 456 *
457 457 * In the most often applied case a segment is being
458 458 * unloaded, and the min_addr will be kept up to date as
459 459 * the zblks are deleted in order.
460 460 */
461 461 uint64_t end = zblk->zulu_hat_blk_vaddr +
462 462 ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
463 463
464 464 if (zblk->zulu_hat_blk_vaddr == sblk->min_addr) {
465 465 sblk->min_addr = end;
466 466 }
467 467 if (end == sblk->max_addr) {
468 468 sblk->max_addr = zblk->zulu_hat_blk_vaddr;
469 469 }
470 470 }
471 471
472 472 zblk->zulu_shadow_blk = NULL;
473 473 }
474 474
475 475 static void
476 476 zulu_shadow_tree_destroy(struct zulu_hat *zhat)
477 477 {
478 478 struct zulu_shadow_blk *sblk;
479 479 void *cookie = NULL;
480 480
481 481 while ((sblk = (struct zulu_shadow_blk *)avl_destroy_nodes(
482 482 &zhat->shadow_tree, &cookie)) != NULL) {
483 483 TNF_PROBE_2(shadow_tree_destroy, "zulu_hat", /* CSTYLED */,
484 484 tnf_opaque, vaddr, sblk->ivaddr,
485 485 tnf_opaque, ref_count, sblk->ref_count);
486 486 kmem_free(sblk, sizeof (*sblk));
487 487 }
488 488 avl_destroy(&zhat->shadow_tree);
489 489 }
490 490
491 491 /*
492 492 * zulu_hat_insert_map:
493 493 *
494 494 * Add a zulu_hat_blk to the a zhat's mappings list.
495 495 *
496 496 * Several data stuctures are used
497 497 * tsb: for simple fast lookups by the trap handler
498 498 * hash table: for efficent lookups by address, range
499 499 * An shadow tree of 4MB ranges with mappings for unloading big regions.
500 500 */
501 501 static void
502 502 zulu_hat_insert_map(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
503 503 {
504 504 int tsb_hash;
505 505
506 506 tsb_hash = ZULU_TSB_HASH(zblk->zulu_hat_blk_vaddr,
507 507 zblk->zulu_hat_blk_size, zhat->zulu_tsb_size);
508 508
509 509 TNF_PROBE_3(zulu_hat_insert_map, "zulu_hat", /* CSTYLED */,
510 510 tnf_opaque, zblkp, zblk,
511 511 tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
512 512 tnf_opaque, hash, tsb_hash);
513 513
514 514 ASSERT(tsb_hash < zhat->zulu_tsb_size);
515 515
516 516 zulu_shadow_tree_insert(zhat, zblk);
517 517
518 518 /*
519 519 * The hash table is an array of buckets. Each bucket is the
520 520 * head of a linked list of mappings who's address hashess to the bucket
521 521 * New entries go to the head of the list.
522 522 */
523 523 zblk->zulu_hash_prev = NULL;
524 524 zblk->zulu_hash_next = ZULU_MAP_HASH_HEAD(zhat,
525 525 zblk->zulu_hat_blk_vaddr, zblk->zulu_hat_blk_size);
526 526 if (zblk->zulu_hash_next) {
527 527 zblk->zulu_hash_next->zulu_hash_prev = zblk;
528 528 }
529 529 ZULU_MAP_HASH_HEAD(zhat, zblk->zulu_hat_blk_vaddr,
530 530 zblk->zulu_hat_blk_size) = zblk;
531 531
532 532 zulu_ctx_tsb_lock_enter(zhat);
533 533 zhat->zulu_tsb[tsb_hash] = zblk->zulu_hat_blk_tte;
534 534 zulu_ctx_tsb_lock_exit(zhat);
535 535 }
536 536
537 537 /*
538 538 * remove a block from a zhat
539 539 */
540 540 static void
541 541 zulu_hat_remove_map(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
542 542 {
543 543 int tsb_hash = ZULU_TSB_HASH(zblk->zulu_hat_blk_vaddr,
544 544 zblk->zulu_hat_blk_size, zhat->zulu_tsb_size);
545 545
546 546 TNF_PROBE_2(zulu_hat_remove_map, "zulu_hat", /* CSTYLED */,
547 547 tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
548 548 tnf_opaque, hash, tsb_hash);
549 549
550 550 ASSERT(tsb_hash < zhat->zulu_tsb_size);
551 551 ASSERT(mutex_owned(&zhat->lock));
552 552
553 553 zulu_shadow_tree_delete(zhat, zblk);
554 554
555 555 /*
556 556 * first remove zblk from hash table
557 557 */
558 558 if (zblk->zulu_hash_prev) {
559 559 zblk->zulu_hash_prev->zulu_hash_next = zblk->zulu_hash_next;
560 560 } else {
561 561 ZULU_MAP_HASH_HEAD(zhat, zblk->zulu_hat_blk_vaddr,
562 562 zblk->zulu_hat_blk_size) = NULL;
563 563 }
564 564 if (zblk->zulu_hash_next) {
565 565 zblk->zulu_hash_next->zulu_hash_prev = zblk->zulu_hash_prev;
566 566 }
567 567 zblk->zulu_hash_next = NULL;
568 568 zblk->zulu_hash_prev = NULL;
569 569
570 570 /*
571 571 * then remove the tsb entry
572 572 */
573 573 zulu_ctx_tsb_lock_enter(zhat);
574 574 if (zhat->zulu_tsb[tsb_hash].un.zulu_tte_addr ==
575 575 zblk->zulu_hat_blk_vaddr) {
576 576 zhat->zulu_tsb[tsb_hash].zulu_tte_valid = 0;
577 577 }
578 578 zulu_ctx_tsb_lock_exit(zhat);
579 579 }
580 580
581 581 /*
582 582 * look for a mapping to a given vaddr and page size
583 583 */
584 584 static struct zulu_hat_blk *
585 585 zulu_lookup_map_bysize(struct zulu_hat *zhat, caddr_t vaddr, int page_sz)
586 586 {
587 587 struct zulu_hat_blk *zblkp;
588 588 uint64_t ivaddr = (uint64_t)vaddr;
589 589 int blks_checked = 0;
590 590
591 591 ASSERT(mutex_owned(&zhat->lock));
592 592
593 593 for (zblkp = ZULU_MAP_HASH_HEAD(zhat, ivaddr, page_sz); zblkp != NULL;
594 594 zblkp = zblkp->zulu_hash_next) {
595 595 uint64_t size;
596 596 uint64_t iaddr;
597 597
598 598 blks_checked++;
599 599
600 600 size = ZULU_HAT_PGSZ(zblkp->zulu_hat_blk_size);
601 601 iaddr = ZULU_VADDR((uint64_t)zblkp->zulu_hat_blk_vaddr);
602 602
603 603 if (iaddr <= ivaddr && (iaddr + size) > ivaddr) {
604 604 int tsb_hash;
605 605
606 606 tsb_hash = ZULU_TSB_HASH(zblkp->zulu_hat_blk_vaddr,
607 607 zblkp->zulu_hat_blk_size,
608 608 zhat->zulu_tsb_size);
609 609 ASSERT(tsb_hash < zhat->zulu_tsb_size);
610 610
611 611 zulu_ctx_tsb_lock_enter(zhat);
612 612 zhat->zulu_tsb[tsb_hash] = zblkp->zulu_hat_blk_tte;
613 613 zulu_ctx_tsb_lock_exit(zhat);
614 614 break;
615 615 }
616 616
617 617 }
618 618
619 619 TNF_PROBE_3(zulu_hat_lookup_map_bysz, "zulu_hat", /* CSTYLED */,
620 620 tnf_opaque, zblkp, zblkp,
621 621 tnf_int, blks_checked, blks_checked,
622 622 tnf_int, page_sz, page_sz);
623 623
624 624 return (zblkp);
625 625 }
626 626
627 627 /*
628 628 * Lookup a zblk for a given virtual address.
629 629 */
630 630 static struct zulu_hat_blk *
631 631 zulu_lookup_map(struct zulu_hat *zhat, caddr_t vaddr)
632 632 {
633 633 struct zulu_hat_blk *zblkp = NULL;
634 634
635 635 /*
636 636 * if the hat is using 4M pages, look first for a 4M page
637 637 */
638 638 if (zhat->map4m) {
639 639 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE4M);
640 640 if (zblkp != NULL) {
641 641 return (zblkp);
642 642 }
643 643 }
644 644 /*
645 645 * Otherwise look for a 8k page
646 646 * Note: if base pagesize gets increased to 64K remove this test
647 647 */
648 648 if (zhat->map8k) {
649 649 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE8K);
650 650 if (zblkp != NULL) {
651 651 return (zblkp);
652 652 }
653 653 }
654 654 /*
655 655 * only if the page isn't found in the sizes that match the zulu mmus
656 656 * look for the inefficient 64K or 512K page sizes
657 657 */
658 658 if (zhat->map64k) {
659 659 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE64K);
660 660 if (zblkp != NULL) {
661 661 return (zblkp);
662 662 }
663 663 }
664 664 if (zhat->map512k) {
665 665 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE512K);
666 666 }
667 667
668 668 return (zblkp);
669 669 }
670 670
671 671 /*
672 672 * zulu_hat_load: Load translation for given vaddr
673 673 */
674 674 int
675 675 zulu_hat_load(struct zulu_hat *zhat, caddr_t vaddr,
676 676 enum seg_rw rw, int *ppg_size)
677 677 {
678 678 faultcode_t as_err;
679 679 struct zulu_hat_blk *zblkp;
680 680 int rval;
681 681 uint64_t flags_pfn;
682 682 struct zulu_tte tte;
683 683
684 684 TNF_PROBE_2(zulu_hat_load, "zulu_hat", /* CSTYLED */,
685 685 tnf_int, zulu_ctx, zhat->zulu_ctx,
686 686 tnf_opaque, vaddr, vaddr);
687 687
688 688 mutex_enter(&zhat->lock);
689 689 ASSERT(zhat->zulu_ctx >= 0);
690 690 /*
691 691 * lookup in our tsb first
692 692 */
693 693 zulu_ctx_tsb_lock_enter(zhat);
694 694 flags_pfn = zulu_hat_tsb_lookup_tl0(zhat, vaddr);
695 695 zulu_ctx_tsb_lock_exit(zhat);
696 696
697 697 if (flags_pfn) {
698 698 uint64_t *p = (uint64_t *)&tte;
699 699
700 700 p++; /* ignore the tag */
701 701 *p = flags_pfn; /* load the flags */
702 702
703 703 zuluvm_load_tte(zhat, vaddr, flags_pfn, tte.zulu_tte_perm,
704 704 tte.zulu_tte_size);
705 705 if (ppg_size != NULL) {
706 706 *ppg_size = tte.zulu_tte_size;
707 707 }
708 708
709 709 zulu_tsb_hit++;
710 710 mutex_exit(&zhat->lock);
711 711 return (0);
712 712 }
713 713
714 714 zulu_tsb_miss++;
715 715
716 716 zblkp = zulu_lookup_map(zhat, vaddr);
717 717 if (zblkp) {
718 718 tte = zblkp->zulu_hat_blk_tte;
719 719 tte.zulu_tte_pfn = ZULU_HAT_ADJ_PFN((&tte), vaddr);
720 720 zuluvm_load_tte(zhat, vaddr, tte.zulu_tte_pfn,
721 721 tte.zulu_tte_perm, tte.zulu_tte_size);
722 722 if (ppg_size != NULL) {
723 723 *ppg_size = tte.zulu_tte_size;
724 724 }
725 725 mutex_exit(&zhat->lock);
726 726 return (0);
727 727 }
728 728
729 729 /*
730 730 * Set a flag indicating that we're processing a fault.
731 731 * See comments in zulu_hat_unload_region.
732 732 */
733 733 zhat->in_fault = 1;
734 734 mutex_exit(&zhat->lock);
735 735
736 736 zulu_as_fault++;
737 737 TNF_PROBE_0(calling_as_fault, "zulu_hat", /* CSTYLED */);
738 738
739 739 as_err = as_fault((struct hat *)zhat, zhat->zulu_xhat.xhat_as,
740 740 (caddr_t)(ZULU_VADDR((uint64_t)vaddr) & PAGEMASK),
741 741 PAGESIZE, F_INVAL, rw);
742 742
743 743 mutex_enter(&zhat->lock);
744 744 zhat->in_fault = 0;
745 745 if (ppg_size != NULL) {
746 746 /*
747 747 * caller wants to know the page size (used by preload)
748 748 */
749 749 zblkp = zulu_lookup_map(zhat, vaddr);
750 750 if (zblkp != NULL) {
751 751 *ppg_size = zblkp->zulu_hat_blk_size;
752 752 } else {
753 753 *ppg_size = -1;
754 754 }
755 755 }
756 756 mutex_exit(&zhat->lock);
757 757
758 758 TNF_PROBE_1(as_fault_returned, "zulu_hat", /* CSTYLED */,
759 759 tnf_int, as_err, as_err);
760 760
761 761 if (as_err != 0) {
762 762 printf("as_fault returned %d\n", as_err);
763 763 rval = as_err;
764 764 } else if (zhat->freed) {
765 765 rval = -1;
766 766 } else {
767 767 rval = 0;
768 768 }
769 769
770 770 return (rval);
771 771 }
772 772
773 773 static struct xhat *
774 774 zulu_hat_alloc(void *arg)
775 775 {
776 776 struct zulu_hat *zhat = kmem_zalloc(sizeof (struct zulu_hat), KM_SLEEP);
777 777
778 778 (void) arg;
779 779
780 780 zulu_hat_ctx_alloc(zhat);
781 781
782 782 mutex_init(&zhat->lock, NULL, MUTEX_DEFAULT, NULL);
783 783
784 784 zhat->zulu_tsb = kmem_zalloc(ZULU_TSB_SZ, KM_SLEEP);
785 785 zhat->zulu_tsb_size = ZULU_TSB_NUM;
786 786 zhat->hash_tbl = kmem_zalloc(ZULU_HASH_TBL_SZ, KM_SLEEP);
787 787 avl_create(&zhat->shadow_tree, zulu_shadow_tree_compare,
788 788 sizeof (zhat->shadow_tree), ZULU_SHADOW_BLK_LINK_OFFSET);
789 789 /*
790 790 * The zulu hat has a few opaque data structs embedded in it.
791 791 * This tag makes finding the our data easier with a debugger.
792 792 */
793 793 zhat->magic = 0x42;
794 794
795 795 zhat->freed = 0;
796 796 TNF_PROBE_1(zulu_hat_alloc, "zulu_hat", /* CSTYLED */,
797 797 tnf_int, zulu_ctx, zhat->zulu_ctx);
798 798 return ((struct xhat *)zhat);
799 799 }
800 800
801 801 static void
802 802 zulu_hat_free(struct xhat *xhat)
803 803 {
804 804 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
805 805
806 806 TNF_PROBE_1(zulu_hat_free, "zulu_hat", /* CSTYLED */,
807 807 tnf_int, zulu_ctx, zhat->zulu_ctx);
808 808
809 809 zulu_shadow_tree_destroy(zhat);
810 810 kmem_free(zhat->hash_tbl, ZULU_HASH_TBL_SZ);
811 811 kmem_free(zhat->zulu_tsb, ZULU_TSB_SZ);
812 812 mutex_destroy(&zhat->lock);
813 813 kmem_free(xhat, sizeof (struct zulu_hat));
814 814 }
815 815
816 816 static void
817 817 zulu_hat_free_start(struct xhat *xhat)
818 818 {
819 819 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
820 820
821 821 TNF_PROBE_1(zulu_hat_free_start, "zulu_hat", /* CSTYLED */,
822 822 tnf_int, zulu_ctx, zhat->zulu_ctx);
823 823 (void) xhat;
824 824 }
825 825
826 826 /*
827 827 * zulu_hat_memload: This is the callback where the vm system gives us our
828 828 * translations
829 829 */
830 830 static void
831 831 zulu_do_hat_memload(struct xhat *xhat, caddr_t vaddr, struct page *page,
832 832 uint_t attr, uint_t flags, int use_pszc)
833 833 {
834 834 void *blk;
835 835 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
836 836 struct zulu_hat_blk *zblk;
837 837 pfn_t pfn;
838 838
839 839 TNF_PROBE_4(zulu_hat_memload, "zulu_hat", /* CSTYLED */,
840 840 tnf_int, zulu_ctx, zhat->zulu_ctx,
841 841 tnf_opaque, vaddr, vaddr, tnf_opaque, attr, attr,
842 842 tnf_opaque, flags, flags);
843 843
844 844 /*
845 845 * keep track of the highest address that this zhat has had
846 846 * a mapping for.
847 847 * We use this in unload to avoid searching for regions that
848 848 * we've never seen.
849 849 *
850 850 * This is particularly useful avoiding repeated searches for
851 851 * for the process's mappings to the zulu hardware. These mappings
852 852 * are explicitly unloaded at each graphics context switch..
853 853 *
854 854 * This takes advantage of the fact that the device addresses
855 855 * are always above than the heap where most DMA data is stored.
856 856 */
857 857 if (vaddr > zhat->vaddr_max) {
858 858 zhat->vaddr_max = vaddr;
859 859 }
860 860
861 861 pfn = xhat_insert_xhatblk(page, xhat, &blk);
862 862 zblk = (struct zulu_hat_blk *)blk;
863 863 zblk->zulu_hat_blk_vaddr = (uintptr_t)vaddr;
864 864 zblk->zulu_hat_blk_pfn = (uint_t)pfn;
865 865 /*
866 866 * The perm bit is actually in the tte which gets copied to the TSB
867 867 */
868 868 zblk->zulu_hat_blk_perm = (attr & PROT_WRITE) ? 1 : 0;
869 869 zblk->zulu_hat_blk_size = use_pszc ? page->p_szc : 0;
870 870 zblk->zulu_hat_blk_valid = 1;
871 871
872 872 switch (zblk->zulu_hat_blk_size) {
873 873 case ZULU_TTE8K:
874 874 zhat->map8k = 1;
875 875 break;
876 876 case ZULU_TTE64K:
877 877 zhat->map64k = 1;
878 878 break;
879 879 case ZULU_TTE512K:
880 880 zhat->map512k = 1;
881 881 break;
882 882 case ZULU_TTE4M:
883 883 zhat->map4m = 1;
884 884 break;
885 885 default:
886 886 panic("zulu_hat illegal page size\n");
887 887 }
888 888
889 889 mutex_enter(&zhat->lock);
890 890
891 891 zulu_hat_insert_map(zhat, zblk);
892 892 if (!zhat->freed) {
893 893 zuluvm_load_tte(zhat, vaddr, zblk->zulu_hat_blk_pfn,
894 894 zblk->zulu_hat_blk_perm, zblk->zulu_hat_blk_size);
895 895 }
896 896 zhat->fault_ivaddr_last =
897 897 ZULU_VADDR((uint64_t)zblk->zulu_hat_blk_vaddr);
898 898
899 899 mutex_exit(&zhat->lock);
900 900 }
901 901
902 902 static void
903 903 zulu_hat_memload(struct xhat *xhat, caddr_t vaddr, struct page *page,
904 904 uint_t attr, uint_t flags)
905 905 {
906 906 zulu_do_hat_memload(xhat, vaddr, page, attr, flags, 0);
907 907 }
908 908
909 909 static void
910 910 zulu_hat_devload(struct xhat *xhat, caddr_t vaddr, size_t size, pfn_t pfn,
911 911 uint_t attr, int flags)
912 912 {
913 913 struct page *pp = page_numtopp_nolock(pfn);
914 914 (void) size;
915 915 zulu_do_hat_memload(xhat, vaddr, pp, attr, (uint_t)flags, 1);
916 916 }
917 917
918 918 static void
919 919 zulu_hat_memload_array(struct xhat *xhat, caddr_t addr, size_t len,
920 920 struct page **gen_pps, uint_t attr, uint_t flags)
921 921 {
922 922 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
923 923
924 924 TNF_PROBE_3(zulu_hat_memload_array, "zulu_hat", /* CSTYLED */,
925 925 tnf_int, zulu_ctx, zhat->zulu_ctx,
926 926 tnf_opaque, addr, addr,
927 927 tnf_opaque, len, len);
928 928
929 929 for (; len > 0; len -= ZULU_HAT_PGSZ((*gen_pps)->p_szc),
930 930 gen_pps += ZULU_HAT_NUM_PGS((*gen_pps)->p_szc)) {
931 931 zulu_do_hat_memload(xhat, addr, *gen_pps, attr, flags, 1);
932 932
933 933 addr += ZULU_HAT_PGSZ((*gen_pps)->p_szc);
934 934 }
935 935 }
936 936
937 937 static void
938 938 free_zblks(struct zulu_hat_blk *free_list)
939 939 {
940 940 struct zulu_hat_blk *zblkp;
941 941 struct zulu_hat_blk *next;
942 942
943 943 for (zblkp = free_list; zblkp != NULL; zblkp = next) {
944 944 next = zblkp->zulu_hash_next;
945 945 (void) xhat_delete_xhatblk((struct xhat_hme_blk *)zblkp, 0);
946 946 }
947 947 }
948 948
949 949 static void
950 950 add_to_free_list(struct zulu_hat_blk **pfree_list, struct zulu_hat_blk *zblk)
951 951 {
952 952 zblk->zulu_hash_next = *pfree_list;
953 953 *pfree_list = zblk;
954 954 }
955 955
956 956 static void
957 957 zulu_hat_unload_region(struct zulu_hat *zhat, uint64_t ivaddr, size_t size,
958 958 struct zulu_shadow_blk *sblk, struct zulu_hat_blk **pfree_list)
959 959 {
960 960 uint64_t end = ivaddr + size;
961 961 int found = 0;
962 962
963 963 TNF_PROBE_2(zulu_hat_unload_region, "zulu_hat", /* CSTYLED */,
964 964 tnf_opaque, vaddr, ivaddr, tnf_opaque, size, size);
965 965
966 966 /*
967 967 * check address against the low and highwater marks for mappings
968 968 * in this sblk
969 969 */
970 970 if (ivaddr < sblk->min_addr) {
971 971 ivaddr = sblk->min_addr;
972 972 TNF_PROBE_1(zulu_hat_unload_skip, "zulu_hat", /* CSTYLED */,
973 973 tnf_opaque, ivaddr, ivaddr);
974 974 }
975 975 if (end > sblk->max_addr) {
976 976 end = sblk->max_addr;
977 977 TNF_PROBE_1(zulu_hat_unload_reg_skip, "zulu_hat", /* CSTYLED */,
978 978 tnf_opaque, end, end);
979 979 }
980 980 /*
981 981 * REMIND: It's not safe to touch the sblk after we enter this loop
982 982 * because it may get deleted.
983 983 */
984 984
985 985 while (ivaddr < end) {
986 986 uint64_t iaddr;
987 987 size_t pg_sz;
988 988 struct zulu_hat_blk *zblkp;
989 989
990 990 zblkp = zulu_lookup_map(zhat, (caddr_t)ivaddr);
991 991 if (zblkp == NULL) {
992 992 ivaddr += PAGESIZE;
993 993 continue;
994 994 }
995 995
996 996 iaddr = ZULU_VADDR((uint64_t)zblkp->zulu_hat_blk_vaddr);
997 997 pg_sz = ZULU_HAT_PGSZ(zblkp->zulu_hat_blk_size);
998 998
999 999 found++;
1000 1000
1001 1001 zulu_hat_remove_map(zhat, zblkp);
1002 1002 /*
1003 1003 * skip demap page if as_free has already been entered
1004 1004 * zuluvm demapped the context already
1005 1005 */
1006 1006 if (!zhat->freed) {
1007 1007 if ((zhat->in_fault) &&
1008 1008 (iaddr == zhat->fault_ivaddr_last)) {
1009 1009 /*
1010 1010 * We're being called from within as_fault to
1011 1011 * unload the last translation we loaded.
1012 1012 *
1013 1013 * This is probably due to watchpoint handling.
1014 1014 * Delay the demap for a millisecond
1015 1015 * to allow zulu to make some progress.
1016 1016 */
1017 1017 drv_usecwait(1000);
1018 1018 zhat->fault_ivaddr_last = 0;
1019 1019 }
1020 1020 zulu_hat_demap_page(zhat, (caddr_t)iaddr,
1021 1021 zblkp->zulu_hat_blk_size);
1022 1022 }
1023 1023
1024 1024 add_to_free_list(pfree_list, zblkp);
1025 1025
1026 1026 if ((iaddr + pg_sz) >= end) {
1027 1027 break;
1028 1028 }
1029 1029
1030 1030 ivaddr += pg_sz;
1031 1031 }
1032 1032 TNF_PROBE_1(zulu_hat_unload_region_done, "zulu_hat", /* CSTYLED */,
1033 1033 tnf_opaque, found, found);
1034 1034 }
1035 1035
1036 1036 static void
1037 1037 zulu_hat_unload(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1038 1038 {
1039 1039 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1040 1040 uint64_t ivaddr;
1041 1041 uint64_t end;
1042 1042 int found = 0;
1043 1043 struct zulu_hat_blk *free_list = NULL;
1044 1044
1045 1045 (void) flags;
1046 1046
1047 1047 TNF_PROBE_4(zulu_hat_unload, "zulu_hat", /* CSTYLED */,
1048 1048 tnf_int, zulu_ctx, zhat->zulu_ctx,
1049 1049 tnf_opaque, vaddr, vaddr,
1050 1050 tnf_opaque, vaddr_max, zhat->vaddr_max,
1051 1051 tnf_opaque, size, size);
1052 1052
1053 1053 mutex_enter(&zhat->lock);
1054 1054
1055 1055 /*
1056 1056 * The following test prevents us from searching for the user's
1057 1057 * mappings to the zulu device registers. Those mappings get unloaded
1058 1058 * every time a graphics context switch away from a given context
1059 1059 * occurs.
1060 1060 *
1061 1061 * Since the heap is located at smaller virtual addresses than the
1062 1062 * registers, this simple test avoids quite a bit of useless work.
1063 1063 */
1064 1064 if (vaddr > zhat->vaddr_max) {
1065 1065 /*
1066 1066 * all existing mappings have lower addresses than vaddr
1067 1067 * no need to search further.
1068 1068 */
1069 1069 mutex_exit(&zhat->lock);
1070 1070 return;
1071 1071 }
1072 1072
1073 1073 ivaddr = (uint64_t)vaddr;
1074 1074 end = ivaddr + size;
1075 1075
1076 1076 do {
1077 1077 struct zulu_shadow_blk *sblk;
1078 1078
1079 1079 sblk = zulu_shadow_tree_lookup(zhat, ivaddr, NULL);
1080 1080 if (sblk != NULL) {
1081 1081 uint64_t sblk_end;
1082 1082 size_t region_size;
1083 1083
1084 1084 found++;
1085 1085
1086 1086 sblk_end = (ivaddr + ZULU_SHADOW_BLK_RANGE) &
1087 1087 ZULU_SHADOW_BLK_MASK;
1088 1088
1089 1089 if (sblk_end < end) {
1090 1090 region_size = sblk_end - ivaddr;
1091 1091 } else {
1092 1092 region_size = end - ivaddr;
1093 1093 }
1094 1094 zulu_hat_unload_region(zhat, ivaddr, region_size, sblk,
1095 1095 &free_list);
1096 1096
1097 1097 }
1098 1098 ivaddr += ZULU_SHADOW_BLK_RANGE;
1099 1099 } while (ivaddr < end);
1100 1100
1101 1101 mutex_exit(&zhat->lock);
1102 1102
1103 1103 free_zblks(free_list);
1104 1104
1105 1105 TNF_PROBE_1(zulu_hat_unload_done, "zulu_hat", /* CSTYLED */,
1106 1106 tnf_int, found, found);
1107 1107 }
1108 1108
1109 1109 static void
1110 1110 zulu_hat_unload_callback(struct xhat *xhat, caddr_t vaddr, size_t size,
1111 1111 uint_t flags, hat_callback_t *pcb)
1112 1112 {
1113 1113 (void) size;
1114 1114 (void) pcb;
1115 1115 zulu_hat_unload(xhat, vaddr, size, flags);
1116 1116 }
1117 1117
1118 1118
1119 1119 /*
1120 1120 * unload one page
1121 1121 */
1122 1122 static int
1123 1123 zulu_hat_pageunload(struct xhat *xhat, struct page *pp, uint_t flags,
1124 1124 void *xblk)
1125 1125 {
1126 1126 struct zulu_hat_blk *zblk = (struct zulu_hat_blk *)xblk;
1127 1127 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1128 1128 int do_delete;
1129 1129
1130 1130 (void) pp;
1131 1131 (void) flags;
1132 1132
1133 1133 TNF_PROBE_3(zulu_hat_pageunload, "zulu_hat", /* CSTYLED */,
1134 1134 tnf_int, zulu_ctx, zhat->zulu_ctx,
1135 1135 tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
1136 1136 tnf_int, pg_size, zblk->zulu_hat_blk_size);
1137 1137
1138 1138 mutex_enter(&zhat->lock);
1139 1139 if (zblk->zulu_shadow_blk != NULL) {
1140 1140
1141 1141 do_delete = 1;
1142 1142
1143 1143 zulu_hat_remove_map(zhat, zblk);
1144 1144
1145 1145 /*
1146 1146 * now that the entry is removed from the TSB, remove the
1147 1147 * translation from the zulu hardware.
1148 1148 *
1149 1149 * Skip the demap if this as is in the process of being freed.
1150 1150 * The zuluvm as callback has demapped the whole context.
1151 1151 */
1152 1152 if (!zhat->freed) {
1153 1153 zulu_hat_demap_page(zhat,
1154 1154 (caddr_t)(uintptr_t)(zblk->zulu_hat_blk_page <<
1155 1155 ZULU_HAT_BP_SHIFT),
1156 1156 zblk->zulu_hat_blk_size);
1157 1157 }
1158 1158 } else {
1159 1159 /*
1160 1160 * This block has already been removed from the zulu_hat,
1161 1161 * it's on a free list waiting for our thread to release
1162 1162 * a mutex so it can be freed
1163 1163 */
1164 1164 do_delete = 0;
1165 1165
1166 1166 TNF_PROBE_0(zulu_hat_pageunload_skip, "zulu_hat",
1167 1167 /* CSTYLED */);
1168 1168 }
↓ open down ↓ |
1168 lines elided |
↑ open up ↑ |
1169 1169 mutex_exit(&zhat->lock);
1170 1170
1171 1171 if (do_delete) {
1172 1172 (void) xhat_delete_xhatblk(xblk, 1);
1173 1173 }
1174 1174
1175 1175 return (0);
1176 1176 }
1177 1177
1178 1178 static void
1179 -zulu_hat_swapout(struct xhat *xhat)
1180 -{
1181 - struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1182 - struct zulu_hat_blk *zblk;
1183 - struct zulu_hat_blk *free_list = NULL;
1184 - int i;
1185 - int nblks = 0;
1186 -
1187 - TNF_PROBE_1(zulu_hat_swapout, "zulu_hat", /* CSTYLED */,
1188 - tnf_int, zulu_ctx, zhat->zulu_ctx);
1189 -
1190 - mutex_enter(&zhat->lock);
1191 -
1192 - /*
1193 - * real swapout calls are rare so we don't do anything in
1194 - * particular to optimize them.
1195 - *
1196 - * Just loop over all buckets in the hash table and free each
1197 - * zblk.
1198 - */
1199 - for (i = 0; i < ZULU_HASH_TBL_NUM; i++) {
1200 - struct zulu_hat_blk *next;
1201 - for (zblk = zhat->hash_tbl[i]; zblk != NULL; zblk = next) {
1202 - next = zblk->zulu_hash_next;
1203 - zulu_hat_remove_map(zhat, zblk);
1204 - add_to_free_list(&free_list, zblk);
1205 - nblks++;
1206 - }
1207 - }
1208 -
1209 - /*
1210 - * remove all mappings for this context from zulu hardware.
1211 - */
1212 - zulu_hat_demap_ctx(zhat->zdev, zhat->zulu_ctx);
1213 -
1214 - mutex_exit(&zhat->lock);
1215 -
1216 - free_zblks(free_list);
1217 -
1218 - TNF_PROBE_1(zulu_hat_swapout_done, "zulu_hat", /* CSTYLED */,
1219 - tnf_int, nblks, nblks);
1220 -}
1221 -
1222 -
1223 -static void
1224 1179 zulu_hat_unshare(struct xhat *xhat, caddr_t vaddr, size_t size)
1225 1180 {
1226 1181 TNF_PROBE_0(zulu_hat_unshare, "zulu_hat", /* CSTYLED */);
1227 1182
1228 1183 zulu_hat_unload(xhat, vaddr, size, 0);
1229 1184 }
1230 1185
1231 1186 /*
1232 1187 * Functions to manage changes in protections for mappings.
1233 1188 *
1234 1189 * These are rarely called in normal operation so for now just unload
1235 1190 * the region.
1236 1191 * If the mapping is still needed, it will fault in later with the new
1237 1192 * attrributes.
1238 1193 */
1239 1194 typedef enum {
1240 1195 ZULU_HAT_CHGATTR,
1241 1196 ZULU_HAT_SETATTR,
1242 1197 ZULU_HAT_CLRATTR
1243 1198 } zulu_hat_prot_op;
1244 1199
1245 1200 static void
1246 1201 zulu_hat_update_attr(struct xhat *xhat, caddr_t vaddr, size_t size,
1247 1202 uint_t flags, zulu_hat_prot_op op)
1248 1203 {
1249 1204 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1250 1205
1251 1206 TNF_PROBE_5(zulu_hat_changeprot, "zulu_hat", /* CSTYLED */,
1252 1207 tnf_int, ctx, zhat->zulu_ctx,
1253 1208 tnf_opaque, vaddr, vaddr, tnf_opaque, size, size,
1254 1209 tnf_uint, flags, flags, tnf_int, op, op);
1255 1210
1256 1211 zulu_hat_unload(xhat, vaddr, size, 0);
1257 1212 }
1258 1213
1259 1214 static void
1260 1215 zulu_hat_chgprot(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1261 1216 {
1262 1217 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1263 1218 #ifdef DEBUG
1264 1219 printf("zulu_hat_chgprot: ctx: %d addr: %lx, size: %lx flags: %x\n",
1265 1220 zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1266 1221 #endif
1267 1222 zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CHGATTR);
1268 1223 }
1269 1224
1270 1225
1271 1226 static void
1272 1227 zulu_hat_setattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1273 1228 {
1274 1229 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1275 1230 #ifdef DEBUG
1276 1231 printf("zulu_hat_setattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1277 1232 zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1278 1233 #endif
1279 1234 zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_SETATTR);
1280 1235 }
1281 1236
1282 1237 static void
1283 1238 zulu_hat_clrattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1284 1239 {
1285 1240 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1286 1241 #ifdef DEBUG
1287 1242 printf("zulu_hat_clrattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1288 1243 zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1289 1244 #endif
1290 1245 zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CLRATTR);
1291 1246 }
1292 1247
1293 1248 static void
1294 1249 zulu_hat_chgattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1295 1250 {
1296 1251 struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1297 1252 TNF_PROBE_3(zulu_hat_chgattr, "zulu_hat", /* CSTYLED */,
1298 1253 tnf_int, ctx, zhat->zulu_ctx,
1299 1254 tnf_opaque, vaddr, vaddr,
1300 1255 tnf_opaque, flags, flags);
1301 1256 #ifdef DEBUG
1302 1257 printf("zulu_hat_chgattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1303 1258 zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1304 1259 #endif
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
1305 1260 zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CHGATTR);
1306 1261 }
1307 1262
1308 1263
1309 1264 struct xhat_ops zulu_hat_ops = {
1310 1265 zulu_hat_alloc, /* xhat_alloc */
1311 1266 zulu_hat_free, /* xhat_free */
1312 1267 zulu_hat_free_start, /* xhat_free_start */
1313 1268 NULL, /* xhat_free_end */
1314 1269 NULL, /* xhat_dup */
1315 - NULL, /* xhat_swapin */
1316 - zulu_hat_swapout, /* xhat_swapout */
1317 1270 zulu_hat_memload, /* xhat_memload */
1318 1271 zulu_hat_memload_array, /* xhat_memload_array */
1319 1272 zulu_hat_devload, /* xhat_devload */
1320 1273 zulu_hat_unload, /* xhat_unload */
1321 1274 zulu_hat_unload_callback, /* xhat_unload_callback */
1322 1275 zulu_hat_setattr, /* xhat_setattr */
1323 1276 zulu_hat_clrattr, /* xhat_clrattr */
1324 1277 zulu_hat_chgattr, /* xhat_chgattr */
1325 1278 zulu_hat_unshare, /* xhat_unshare */
1326 1279 zulu_hat_chgprot, /* xhat_chgprot */
1327 1280 zulu_hat_pageunload, /* xhat_pageunload */
1328 1281 };
1329 1282
1330 1283 xblk_cache_t zulu_xblk_cache = {
1331 1284 NULL,
1332 1285 NULL,
1333 1286 NULL,
1334 1287 xhat_xblkcache_reclaim
1335 1288 };
1336 1289
1337 1290 xhat_provider_t zulu_hat_provider = {
1338 1291 XHAT_PROVIDER_VERSION,
1339 1292 0,
1340 1293 NULL,
1341 1294 NULL,
1342 1295 "zulu_hat_provider",
1343 1296 &zulu_xblk_cache,
1344 1297 &zulu_hat_ops,
1345 1298 sizeof (struct zulu_hat_blk) + sizeof (struct xhat_hme_blk)
1346 1299 };
1347 1300
1348 1301 /*
1349 1302 * The following functions are the entry points that zuluvm uses.
1350 1303 */
1351 1304
1352 1305 /*
1353 1306 * initialize this module. Called from zuluvm's _init function
1354 1307 */
1355 1308 int
1356 1309 zulu_hat_init()
1357 1310 {
1358 1311 int c;
1359 1312 int rval;
1360 1313 mutex_init(&zulu_ctx_lock, NULL, MUTEX_DEFAULT, NULL);
1361 1314
1362 1315 for (c = 0; c < ZULU_HAT_MAX_CTX; c++) {
1363 1316 ZULU_CTX_LOCK_INIT(c);
1364 1317 }
1365 1318 zulu_ctx_search_start = 0;
1366 1319 rval = xhat_provider_register(&zulu_hat_provider);
1367 1320 if (rval != 0) {
1368 1321 mutex_destroy(&zulu_ctx_lock);
1369 1322 }
1370 1323 return (rval);
1371 1324 }
1372 1325
1373 1326 /*
1374 1327 * un-initialize this module. Called from zuluvm's _fini function
1375 1328 */
1376 1329 int
1377 1330 zulu_hat_destroy()
1378 1331 {
1379 1332 if (xhat_provider_unregister(&zulu_hat_provider) != 0) {
1380 1333 return (-1);
1381 1334 }
1382 1335 mutex_destroy(&zulu_ctx_lock);
1383 1336 return (0);
1384 1337 }
1385 1338
1386 1339 int
1387 1340 zulu_hat_attach(void *arg)
1388 1341 {
1389 1342 (void) arg;
1390 1343 return (0);
1391 1344 }
1392 1345
1393 1346 int
1394 1347 zulu_hat_detach(void *arg)
1395 1348 {
1396 1349 (void) arg;
1397 1350 return (0);
1398 1351 }
1399 1352
1400 1353 /*
1401 1354 * create a zulu hat for this address space.
1402 1355 */
1403 1356 struct zulu_hat *
1404 1357 zulu_hat_proc_attach(struct as *as, void *zdev)
1405 1358 {
1406 1359 struct zulu_hat *zhat;
1407 1360 int xhat_rval;
1408 1361
1409 1362 xhat_rval = xhat_attach_xhat(&zulu_hat_provider, as,
1410 1363 (struct xhat **)&zhat, NULL);
1411 1364 if ((xhat_rval == 0) && (zhat != NULL)) {
1412 1365 mutex_enter(&zhat->lock);
1413 1366 ZULU_HAT2AS(zhat) = as;
1414 1367 zhat->zdev = zdev;
1415 1368 mutex_exit(&zhat->lock);
1416 1369 }
1417 1370
1418 1371 TNF_PROBE_3(zulu_hat_proc_attach, "zulu_hat", /* CSTYLED */,
1419 1372 tnf_int, xhat_rval, xhat_rval, tnf_opaque, as, as,
1420 1373 tnf_opaque, zhat, zhat);
1421 1374
1422 1375 return (zhat);
1423 1376 }
1424 1377
1425 1378 void
1426 1379 zulu_hat_proc_detach(struct zulu_hat *zhat)
1427 1380 {
1428 1381 struct as *as = ZULU_HAT2AS(zhat);
1429 1382
1430 1383 zulu_hat_ctx_free(zhat);
1431 1384
1432 1385 (void) xhat_detach_xhat(&zulu_hat_provider, ZULU_HAT2AS(zhat));
1433 1386
1434 1387 TNF_PROBE_1(zulu_hat_proc_detach, "zulu_hat", /* CSTYLED */,
1435 1388 tnf_opaque, as, as);
1436 1389 }
1437 1390
1438 1391 /*
1439 1392 * zulu_hat_terminate
1440 1393 *
1441 1394 * Disables any further TLB miss processing for this hat
1442 1395 * Called by zuluvm's as_free callback. The primary purpose of this
1443 1396 * function is to cause any pending zulu DMA to abort quickly.
1444 1397 */
1445 1398 void
1446 1399 zulu_hat_terminate(struct zulu_hat *zhat)
1447 1400 {
1448 1401 int ctx = zhat->zulu_ctx;
1449 1402
1450 1403 TNF_PROBE_1(zulu_hat_terminate, "zulu_hat", /* CSTYLED */,
1451 1404 tnf_int, ctx, ctx);
1452 1405
1453 1406 mutex_enter(&zhat->lock);
1454 1407
1455 1408 zhat->freed = 1;
1456 1409
1457 1410 zulu_ctx_tsb_lock_enter(zhat);
1458 1411 /*
1459 1412 * zap the tsb
1460 1413 */
1461 1414 bzero(zhat->zulu_tsb, ZULU_TSB_SZ);
1462 1415 zulu_ctx_tsb_lock_exit(zhat);
1463 1416
1464 1417 zulu_hat_demap_ctx(zhat->zdev, zhat->zulu_ctx);
1465 1418
1466 1419 mutex_exit(&zhat->lock);
1467 1420
1468 1421 TNF_PROBE_0(zulu_hat_terminate_done, "zulu_hat", /* CSTYLED */);
1469 1422 }
↓ open down ↓ |
143 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX