Print this page
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
@@ -57,11 +57,10 @@
#include <sys/debug.h>
#include <sys/tnf_probe.h>
#include <sys/vtrace.h>
#include <vm/hat.h>
-#include <vm/xhat.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/seg_vn.h>
#include <vm/seg_dev.h>
#include <vm/seg_kmem.h>
@@ -669,12 +668,10 @@
AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
as->a_hat = hat_alloc(as); /* create hat for default system mmu */
AS_LOCK_EXIT(as, &as->a_lock);
- as->a_xhat = NULL;
-
return (as);
}
/*
* Free an address space data structure.
@@ -685,11 +682,11 @@
void
as_free(struct as *as)
{
struct hat *hat = as->a_hat;
struct seg *seg, *next;
- int called = 0;
+ boolean_t free_started = B_FALSE;
top:
/*
* Invoke ALL callbacks. as_do_callbacks will do one callback
* per call, and not return (-1) until the callback has completed.
@@ -697,21 +694,16 @@
*/
mutex_enter(&as->a_contents);
while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0))
;
- /* This will prevent new XHATs from attaching to as */
- if (!called)
- AS_SETBUSY(as);
mutex_exit(&as->a_contents);
AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
- if (!called) {
- called = 1;
+ if (!free_started) {
+ free_started = B_TRUE;
hat_free_start(hat);
- if (as->a_xhat != NULL)
- xhat_free_start_all(as);
}
for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) {
int err;
next = AS_SEGNEXT(as, seg);
@@ -757,12 +749,10 @@
*/
ASSERT(err == 0);
}
}
hat_free_end(hat);
- if (as->a_xhat != NULL)
- xhat_free_end_all(as);
AS_LOCK_EXIT(as, &as->a_lock);
/* /proc stuff */
ASSERT(avl_numnodes(&as->a_wpage) == 0);
if (as->a_objectdir) {
@@ -792,18 +782,10 @@
newas->a_userlimit = as->a_userlimit;
newas->a_proc = forkedproc;
AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER);
- /* This will prevent new XHATs from attaching */
- mutex_enter(&as->a_contents);
- AS_SETBUSY(as);
- mutex_exit(&as->a_contents);
- mutex_enter(&newas->a_contents);
- AS_SETBUSY(newas);
- mutex_exit(&newas->a_contents);
-
(void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD);
for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
if (seg->s_flags & S_PURGE) {
@@ -813,13 +795,10 @@
newseg = seg_alloc(newas, seg->s_base, seg->s_size);
if (newseg == NULL) {
AS_LOCK_EXIT(newas, &newas->a_lock);
as_setwatch(as);
- mutex_enter(&as->a_contents);
- AS_CLRBUSY(as);
- mutex_exit(&as->a_contents);
AS_LOCK_EXIT(as, &as->a_lock);
as_free(newas);
return (-1);
}
if ((error = SEGOP_DUP(seg, newseg)) != 0) {
@@ -827,13 +806,10 @@
* We call seg_free() on the new seg
* because the segment is not set up
* completely; i.e. it has no ops.
*/
as_setwatch(as);
- mutex_enter(&as->a_contents);
- AS_CLRBUSY(as);
- mutex_exit(&as->a_contents);
AS_LOCK_EXIT(as, &as->a_lock);
seg_free(newseg);
AS_LOCK_EXIT(newas, &newas->a_lock);
as_free(newas);
return (error);
@@ -841,22 +817,14 @@
newas->a_size += seg->s_size;
}
newas->a_resvsize = as->a_resvsize - purgesize;
error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL);
- if (as->a_xhat != NULL)
- error |= xhat_dup_all(as, newas, NULL, 0, HAT_DUP_ALL);
- mutex_enter(&newas->a_contents);
- AS_CLRBUSY(newas);
- mutex_exit(&newas->a_contents);
AS_LOCK_EXIT(newas, &newas->a_lock);
as_setwatch(as);
- mutex_enter(&as->a_contents);
- AS_CLRBUSY(as);
- mutex_exit(&as->a_contents);
AS_LOCK_EXIT(as, &as->a_lock);
if (error != 0) {
as_free(newas);
return (error);
}
@@ -878,39 +846,28 @@
faultcode_t res = 0;
caddr_t addrsav;
struct seg *segsav;
int as_lock_held;
klwp_t *lwp = ttolwp(curthread);
- int is_xhat = 0;
int holding_wpage = 0;
- extern struct seg_ops segdev_ops;
-
- if (as->a_hat != hat) {
- /* This must be an XHAT then */
- is_xhat = 1;
-
- if ((type != F_INVAL) || (as == &kas))
- return (FC_NOSUPPORT);
- }
retry:
- if (!is_xhat) {
/*
- * Indicate that the lwp is not to be stopped while waiting
- * for a pagefault. This is to avoid deadlock while debugging
- * a process via /proc over NFS (in particular).
+ * Indicate that the lwp is not to be stopped while waiting for a
+ * pagefault. This is to avoid deadlock while debugging a process
+ * via /proc over NFS (in particular).
*/
if (lwp != NULL)
lwp->lwp_nostop++;
/*
- * same length must be used when we softlock and softunlock.
- * We don't support softunlocking lengths less than
- * the original length when there is largepage support.
- * See seg_dev.c for more comments.
+ * same length must be used when we softlock and softunlock. We
+ * don't support softunlocking lengths less than the original length
+ * when there is largepage support. See seg_dev.c for more
+ * comments.
*/
switch (type) {
case F_SOFTLOCK:
CPU_STATS_ADD_K(vm, softlock, 1);
@@ -929,11 +886,10 @@
if (as == &kas)
CPU_STATS_ADDQ(CPU, vm, kernel_asflt, 1);
CPU_STATS_EXIT_K();
break;
}
- }
/* Kernel probe */
TNF_PROBE_3(address_fault, "vm pagefault", /* CSTYLED */,
tnf_opaque, address, addr,
tnf_fault_type, fault_type, type,
@@ -950,39 +906,19 @@
* filesystem, and then no-one will be able to exec new commands,
* as exec'ing requires the write lock on the as.
*/
if (as == &kas && segkmap && segkmap->s_base <= raddr &&
raddr + size < segkmap->s_base + segkmap->s_size) {
- /*
- * if (as==&kas), this can't be XHAT: we've already returned
- * FC_NOSUPPORT.
- */
seg = segkmap;
as_lock_held = 0;
} else {
AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
- if (is_xhat && avl_numnodes(&as->a_wpage) != 0) {
- /*
- * Grab and hold the writers' lock on the as
- * if the fault is to a watched page.
- * This will keep CPUs from "peeking" at the
- * address range while we're temporarily boosting
- * the permissions for the XHAT device to
- * resolve the fault in the segment layer.
- *
- * We could check whether faulted address
- * is within a watched page and only then grab
- * the writer lock, but this is simpler.
- */
- AS_LOCK_EXIT(as, &as->a_lock);
- AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
- }
seg = as_segat(as, raddr);
if (seg == NULL) {
AS_LOCK_EXIT(as, &as->a_lock);
- if ((lwp != NULL) && (!is_xhat))
+ if (lwp != NULL)
lwp->lwp_nostop--;
return (FC_NOMAP);
}
as_lock_held = 1;
@@ -1002,39 +938,20 @@
if (raddr + rsize > seg->s_base + seg->s_size)
ssize = seg->s_base + seg->s_size - raddr;
else
ssize = rsize;
- if (!is_xhat || (seg->s_ops != &segdev_ops)) {
-
- if (is_xhat && avl_numnodes(&as->a_wpage) != 0 &&
- pr_is_watchpage_as(raddr, rw, as)) {
- /*
- * Handle watch pages. If we're faulting on a
- * watched page from an X-hat, we have to
- * restore the original permissions while we
- * handle the fault.
- */
- as_clearwatch(as);
- holding_wpage = 1;
- }
-
res = SEGOP_FAULT(hat, seg, raddr, ssize, type, rw);
/* Restore watchpoints */
if (holding_wpage) {
as_setwatch(as);
holding_wpage = 0;
}
if (res != 0)
break;
- } else {
- /* XHAT does not support seg_dev */
- res = FC_NOSUPPORT;
- break;
- }
}
/*
* If we were SOFTLOCKing and encountered a failure,
* we must SOFTUNLOCK the range we already did. (Maybe we
@@ -1059,11 +976,11 @@
F_SOFTUNLOCK, S_OTHER);
}
}
if (as_lock_held)
AS_LOCK_EXIT(as, &as->a_lock);
- if ((lwp != NULL) && (!is_xhat))
+ if (lwp != NULL)
lwp->lwp_nostop--;
/*
* If the lower levels returned EDEADLK for a fault,
* It means that we should retry the fault. Let's wait
@@ -2164,29 +2081,17 @@
if (as == NULL)
return (0);
AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
- /* Prevent XHATs from attaching */
- mutex_enter(&as->a_contents);
- AS_SETBUSY(as);
- mutex_exit(&as->a_contents);
-
-
/*
* Free all mapping resources associated with the address
* space. The segment-level swapout routines capitalize
* on this unmapping by scavanging pages that have become
* unmapped here.
*/
hat_swapout(as->a_hat);
- if (as->a_xhat != NULL)
- xhat_swapout_all(as);
-
- mutex_enter(&as->a_contents);
- AS_CLRBUSY(as);
- mutex_exit(&as->a_contents);
/*
* Call the swapout routines of all segments in the address
* space to do the actual work, accumulating the amount of
* space reclaimed.