Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs.  The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync.  Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat

*** 57,67 **** #include <sys/debug.h> #include <sys/tnf_probe.h> #include <sys/vtrace.h> #include <vm/hat.h> - #include <vm/xhat.h> #include <vm/as.h> #include <vm/seg.h> #include <vm/seg_vn.h> #include <vm/seg_dev.h> #include <vm/seg_kmem.h> --- 57,66 ----
*** 472,482 **** * the insertion point is immediately before seg. */ if (base + seg->s_size > addr) { if (addr >= base || eaddr > base) { #ifdef __sparc ! extern struct seg_ops segnf_ops; /* * no-fault segs must disappear if overlaid. * XXX need new segment type so * we don't have to check s_ops --- 471,481 ---- * the insertion point is immediately before seg. */ if (base + seg->s_size > addr) { if (addr >= base || eaddr > base) { #ifdef __sparc ! extern const struct seg_ops segnf_ops; /* * no-fault segs must disappear if overlaid. * XXX need new segment type so * we don't have to check s_ops
*** 669,680 **** AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); as->a_hat = hat_alloc(as); /* create hat for default system mmu */ AS_LOCK_EXIT(as, &as->a_lock); - as->a_xhat = NULL; - return (as); } /* * Free an address space data structure. --- 668,677 ----
*** 685,695 **** void as_free(struct as *as) { struct hat *hat = as->a_hat; struct seg *seg, *next; ! int called = 0; top: /* * Invoke ALL callbacks. as_do_callbacks will do one callback * per call, and not return (-1) until the callback has completed. --- 682,692 ---- void as_free(struct as *as) { struct hat *hat = as->a_hat; struct seg *seg, *next; ! boolean_t free_started = B_FALSE; top: /* * Invoke ALL callbacks. as_do_callbacks will do one callback * per call, and not return (-1) until the callback has completed.
*** 697,724 **** */ mutex_enter(&as->a_contents); while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0)) ; - /* This will prevent new XHATs from attaching to as */ - if (!called) - AS_SETBUSY(as); mutex_exit(&as->a_contents); AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); ! if (!called) { ! called = 1; hat_free_start(hat); - if (as->a_xhat != NULL) - xhat_free_start_all(as); } for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) { int err; next = AS_SEGNEXT(as, seg); retry: ! err = SEGOP_UNMAP(seg, seg->s_base, seg->s_size); if (err == EAGAIN) { mutex_enter(&as->a_contents); if (as->a_callbacks) { AS_LOCK_EXIT(as, &as->a_lock); } else if (!AS_ISNOUNMAPWAIT(as)) { --- 694,716 ---- */ mutex_enter(&as->a_contents); while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0)) ; mutex_exit(&as->a_contents); AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); ! if (!free_started) { ! free_started = B_TRUE; hat_free_start(hat); } for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) { int err; next = AS_SEGNEXT(as, seg); retry: ! err = segop_unmap(seg, seg->s_base, seg->s_size); if (err == EAGAIN) { mutex_enter(&as->a_contents); if (as->a_callbacks) { AS_LOCK_EXIT(as, &as->a_lock); } else if (!AS_ISNOUNMAPWAIT(as)) {
*** 757,768 **** */ ASSERT(err == 0); } } hat_free_end(hat); - if (as->a_xhat != NULL) - xhat_free_end_all(as); AS_LOCK_EXIT(as, &as->a_lock); /* /proc stuff */ ASSERT(avl_numnodes(&as->a_wpage) == 0); if (as->a_objectdir) { --- 749,758 ----
*** 792,809 **** newas->a_userlimit = as->a_userlimit; newas->a_proc = forkedproc; AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER); - /* This will prevent new XHATs from attaching */ - mutex_enter(&as->a_contents); - AS_SETBUSY(as); - mutex_exit(&as->a_contents); - mutex_enter(&newas->a_contents); - AS_SETBUSY(newas); - mutex_exit(&newas->a_contents); - (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { if (seg->s_flags & S_PURGE) { --- 782,791 ----
*** 813,839 **** newseg = seg_alloc(newas, seg->s_base, seg->s_size); if (newseg == NULL) { AS_LOCK_EXIT(newas, &newas->a_lock); as_setwatch(as); - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); AS_LOCK_EXIT(as, &as->a_lock); as_free(newas); return (-1); } ! if ((error = SEGOP_DUP(seg, newseg)) != 0) { /* * We call seg_free() on the new seg * because the segment is not set up * completely; i.e. it has no ops. */ as_setwatch(as); - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); AS_LOCK_EXIT(as, &as->a_lock); seg_free(newseg); AS_LOCK_EXIT(newas, &newas->a_lock); as_free(newas); return (error); --- 795,815 ---- newseg = seg_alloc(newas, seg->s_base, seg->s_size); if (newseg == NULL) { AS_LOCK_EXIT(newas, &newas->a_lock); as_setwatch(as); AS_LOCK_EXIT(as, &as->a_lock); as_free(newas); return (-1); } ! if ((error = segop_dup(seg, newseg)) != 0) { /* * We call seg_free() on the new seg * because the segment is not set up * completely; i.e. it has no ops. */ as_setwatch(as); AS_LOCK_EXIT(as, &as->a_lock); seg_free(newseg); AS_LOCK_EXIT(newas, &newas->a_lock); as_free(newas); return (error);
*** 841,862 **** newas->a_size += seg->s_size; } newas->a_resvsize = as->a_resvsize - purgesize; error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL); - if (as->a_xhat != NULL) - error |= xhat_dup_all(as, newas, NULL, 0, HAT_DUP_ALL); - mutex_enter(&newas->a_contents); - AS_CLRBUSY(newas); - mutex_exit(&newas->a_contents); AS_LOCK_EXIT(newas, &newas->a_lock); as_setwatch(as); - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); AS_LOCK_EXIT(as, &as->a_lock); if (error != 0) { as_free(newas); return (error); } --- 817,830 ----
*** 878,916 **** faultcode_t res = 0; caddr_t addrsav; struct seg *segsav; int as_lock_held; klwp_t *lwp = ttolwp(curthread); - int is_xhat = 0; int holding_wpage = 0; - extern struct seg_ops segdev_ops; - if (as->a_hat != hat) { - /* This must be an XHAT then */ - is_xhat = 1; - - if ((type != F_INVAL) || (as == &kas)) - return (FC_NOSUPPORT); - } - retry: - if (!is_xhat) { /* ! * Indicate that the lwp is not to be stopped while waiting ! * for a pagefault. This is to avoid deadlock while debugging ! * a process via /proc over NFS (in particular). */ if (lwp != NULL) lwp->lwp_nostop++; /* ! * same length must be used when we softlock and softunlock. ! * We don't support softunlocking lengths less than ! * the original length when there is largepage support. ! * See seg_dev.c for more comments. */ switch (type) { case F_SOFTLOCK: CPU_STATS_ADD_K(vm, softlock, 1); --- 846,873 ---- faultcode_t res = 0; caddr_t addrsav; struct seg *segsav; int as_lock_held; klwp_t *lwp = ttolwp(curthread); int holding_wpage = 0; retry: /* ! * Indicate that the lwp is not to be stopped while waiting for a ! * pagefault. This is to avoid deadlock while debugging a process ! * via /proc over NFS (in particular). */ if (lwp != NULL) lwp->lwp_nostop++; /* ! * same length must be used when we softlock and softunlock. We ! * don't support softunlocking lengths less than the original length ! * when there is largepage support. See seg_dev.c for more ! * comments. */ switch (type) { case F_SOFTLOCK: CPU_STATS_ADD_K(vm, softlock, 1);
*** 929,939 **** if (as == &kas) CPU_STATS_ADDQ(CPU, vm, kernel_asflt, 1); CPU_STATS_EXIT_K(); break; } - } /* Kernel probe */ TNF_PROBE_3(address_fault, "vm pagefault", /* CSTYLED */, tnf_opaque, address, addr, tnf_fault_type, fault_type, type, --- 886,895 ----
*** 950,988 **** * filesystem, and then no-one will be able to exec new commands, * as exec'ing requires the write lock on the as. */ if (as == &kas && segkmap && segkmap->s_base <= raddr && raddr + size < segkmap->s_base + segkmap->s_size) { - /* - * if (as==&kas), this can't be XHAT: we've already returned - * FC_NOSUPPORT. - */ seg = segkmap; as_lock_held = 0; } else { AS_LOCK_ENTER(as, &as->a_lock, RW_READER); - if (is_xhat && avl_numnodes(&as->a_wpage) != 0) { - /* - * Grab and hold the writers' lock on the as - * if the fault is to a watched page. - * This will keep CPUs from "peeking" at the - * address range while we're temporarily boosting - * the permissions for the XHAT device to - * resolve the fault in the segment layer. - * - * We could check whether faulted address - * is within a watched page and only then grab - * the writer lock, but this is simpler. - */ - AS_LOCK_EXIT(as, &as->a_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); - } seg = as_segat(as, raddr); if (seg == NULL) { AS_LOCK_EXIT(as, &as->a_lock); ! if ((lwp != NULL) && (!is_xhat)) lwp->lwp_nostop--; return (FC_NOMAP); } as_lock_held = 1; --- 906,924 ---- * filesystem, and then no-one will be able to exec new commands, * as exec'ing requires the write lock on the as. */ if (as == &kas && segkmap && segkmap->s_base <= raddr && raddr + size < segkmap->s_base + segkmap->s_size) { seg = segkmap; as_lock_held = 0; } else { AS_LOCK_ENTER(as, &as->a_lock, RW_READER); seg = as_segat(as, raddr); if (seg == NULL) { AS_LOCK_EXIT(as, &as->a_lock); ! if (lwp != NULL) lwp->lwp_nostop--; return (FC_NOMAP); } as_lock_held = 1;
*** 1002,1040 **** if (raddr + rsize > seg->s_base + seg->s_size) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; ! if (!is_xhat || (seg->s_ops != &segdev_ops)) { ! ! if (is_xhat && avl_numnodes(&as->a_wpage) != 0 && ! pr_is_watchpage_as(raddr, rw, as)) { ! /* ! * Handle watch pages. If we're faulting on a ! * watched page from an X-hat, we have to ! * restore the original permissions while we ! * handle the fault. ! */ ! as_clearwatch(as); ! holding_wpage = 1; ! } ! ! res = SEGOP_FAULT(hat, seg, raddr, ssize, type, rw); /* Restore watchpoints */ if (holding_wpage) { as_setwatch(as); holding_wpage = 0; } if (res != 0) break; - } else { - /* XHAT does not support seg_dev */ - res = FC_NOSUPPORT; - break; - } } /* * If we were SOFTLOCKing and encountered a failure, * we must SOFTUNLOCK the range we already did. (Maybe we --- 938,957 ---- if (raddr + rsize > seg->s_base + seg->s_size) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; ! res = segop_fault(hat, seg, raddr, ssize, type, rw); /* Restore watchpoints */ if (holding_wpage) { as_setwatch(as); holding_wpage = 0; } if (res != 0) break; } /* * If we were SOFTLOCKing and encountered a failure, * we must SOFTUNLOCK the range we already did. (Maybe we
*** 1053,1069 **** */ if (raddr > seg->s_base + seg->s_size) ssize = seg->s_base + seg->s_size - addrsav; else ssize = raddr - addrsav; ! (void) SEGOP_FAULT(hat, seg, addrsav, ssize, F_SOFTUNLOCK, S_OTHER); } } if (as_lock_held) AS_LOCK_EXIT(as, &as->a_lock); ! if ((lwp != NULL) && (!is_xhat)) lwp->lwp_nostop--; /* * If the lower levels returned EDEADLK for a fault, * It means that we should retry the fault. Let's wait --- 970,986 ---- */ if (raddr > seg->s_base + seg->s_size) ssize = seg->s_base + seg->s_size - addrsav; else ssize = raddr - addrsav; ! (void) segop_fault(hat, seg, addrsav, ssize, F_SOFTUNLOCK, S_OTHER); } } if (as_lock_held) AS_LOCK_EXIT(as, &as->a_lock); ! if (lwp != NULL) lwp->lwp_nostop--; /* * If the lower levels returned EDEADLK for a fault, * It means that we should retry the fault. Let's wait
*** 1123,1133 **** if (seg == NULL || raddr != seg->s_base) { res = FC_NOMAP; break; } } ! res = SEGOP_FAULTA(seg, raddr); if (res != 0) break; } AS_LOCK_EXIT(as, &as->a_lock); if (lwp != NULL) --- 1040,1050 ---- if (seg == NULL || raddr != seg->s_base) { res = FC_NOMAP; break; } } ! res = segop_faulta(seg, raddr); if (res != 0) break; } AS_LOCK_EXIT(as, &as->a_lock); if (lwp != NULL)
*** 1213,1223 **** if ((raddr + rsize) > (seg->s_base + seg->s_size)) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; retry: ! error = SEGOP_SETPROT(seg, raddr, ssize, prot); if (error == IE_NOMEM) { error = EAGAIN; break; } --- 1130,1140 ---- if ((raddr + rsize) > (seg->s_base + seg->s_size)) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; retry: ! error = segop_setprot(seg, raddr, ssize, prot); if (error == IE_NOMEM) { error = EAGAIN; break; }
*** 1364,1374 **** if ((raddr + rsize) > (seg->s_base + seg->s_size)) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; ! error = SEGOP_CHECKPROT(seg, raddr, ssize, prot); if (error != 0) break; } as_setwatch(as); AS_LOCK_EXIT(as, &as->a_lock); --- 1281,1291 ---- if ((raddr + rsize) > (seg->s_base + seg->s_size)) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; ! error = segop_checkprot(seg, raddr, ssize, prot); if (error != 0) break; } as_setwatch(as); AS_LOCK_EXIT(as, &as->a_lock);
*** 1430,1440 **** if (!SEG_IS_DEVNULL_MAPPING(seg) && !SEG_IS_PARTIAL_RESV(seg)) rsize = ssize; retry: ! err = SEGOP_UNMAP(seg, raddr, ssize); if (err == EAGAIN) { /* * Memory is currently locked. It must be unlocked * before this operation can succeed through a retry. * The possible reasons for locked memory and --- 1347,1357 ---- if (!SEG_IS_DEVNULL_MAPPING(seg) && !SEG_IS_PARTIAL_RESV(seg)) rsize = ssize; retry: ! err = segop_unmap(seg, raddr, ssize); if (err == EAGAIN) { /* * Memory is currently locked. It must be unlocked * before this operation can succeed through a retry. * The possible reasons for locked memory and
*** 1868,1878 **** next_seg = NULL; seg = AS_SEGFIRST(as); while (seg != NULL) { next_seg = AS_SEGNEXT(as, seg); if (seg->s_flags & S_PURGE) ! SEGOP_UNMAP(seg, seg->s_base, seg->s_size); seg = next_seg; } AS_LOCK_EXIT(as, &as->a_lock); mutex_enter(&as->a_contents); --- 1785,1795 ---- next_seg = NULL; seg = AS_SEGFIRST(as); while (seg != NULL) { next_seg = AS_SEGNEXT(as, seg); if (seg->s_flags & S_PURGE) ! segop_unmap(seg, seg->s_base, seg->s_size); seg = next_seg; } AS_LOCK_EXIT(as, &as->a_lock); mutex_enter(&as->a_contents);
*** 2086,2096 **** * We're lazy and only return one segment at a time. */ int as_memory(struct as *as, caddr_t *basep, size_t *lenp) { ! extern struct seg_ops segspt_shmops; /* needs a header file */ struct seg *seg; caddr_t addr, eaddr; caddr_t segend; AS_LOCK_ENTER(as, &as->a_lock, RW_READER); --- 2003,2013 ---- * We're lazy and only return one segment at a time. */ int as_memory(struct as *as, caddr_t *basep, size_t *lenp) { ! extern const struct seg_ops segspt_shmops; /* needs a header file */ struct seg *seg; caddr_t addr, eaddr; caddr_t segend; AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
*** 2139,2215 **** AS_LOCK_EXIT(as, &as->a_lock); return (0); } /* - * Swap the pages associated with the address space as out to - * secondary storage, returning the number of bytes actually - * swapped. - * - * The value returned is intended to correlate well with the process's - * memory requirements. Its usefulness for this purpose depends on - * how well the segment-level routines do at returning accurate - * information. - */ - size_t - as_swapout(struct as *as) - { - struct seg *seg; - size_t swpcnt = 0; - - /* - * Kernel-only processes have given up their address - * spaces. Of course, we shouldn't be attempting to - * swap out such processes in the first place... - */ - if (as == NULL) - return (0); - - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); - - /* Prevent XHATs from attaching */ - mutex_enter(&as->a_contents); - AS_SETBUSY(as); - mutex_exit(&as->a_contents); - - - /* - * Free all mapping resources associated with the address - * space. The segment-level swapout routines capitalize - * on this unmapping by scavanging pages that have become - * unmapped here. - */ - hat_swapout(as->a_hat); - if (as->a_xhat != NULL) - xhat_swapout_all(as); - - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); - - /* - * Call the swapout routines of all segments in the address - * space to do the actual work, accumulating the amount of - * space reclaimed. - */ - for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { - struct seg_ops *ov = seg->s_ops; - - /* - * We have to check to see if the seg has - * an ops vector because the seg may have - * been in the middle of being set up when - * the process was picked for swapout. - */ - if ((ov != NULL) && (ov->swapout != NULL)) - swpcnt += SEGOP_SWAPOUT(seg); - } - AS_LOCK_EXIT(as, &as->a_lock); - return (swpcnt); - } - - /* * Determine whether data from the mappings in interval [addr, addr + size) * are in the primary memory (core) cache. */ int as_incore(struct as *as, caddr_t addr, --- 2056,2065 ----
*** 2247,2257 **** } if ((raddr + rsize) > (seg->s_base + seg->s_size)) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; ! *sizep += isize = SEGOP_INCORE(seg, raddr, ssize, vec); if (isize != ssize) { error = -1; break; } vec += btopr(ssize); --- 2097,2107 ---- } if ((raddr + rsize) > (seg->s_base + seg->s_size)) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; ! *sizep += isize = segop_incore(seg, raddr, ssize, vec); if (isize != ssize) { error = -1; break; } vec += btopr(ssize);
*** 2273,2283 **** while (bt_range(bitmap, &pos1, &pos2, end_pos)) { size = ptob((pos2 - pos1)); range_start = (caddr_t)((uintptr_t)addr + ptob(pos1 - position)); ! (void) SEGOP_LOCKOP(seg, range_start, size, attr, MC_UNLOCK, (ulong_t *)NULL, (size_t)NULL); pos1 = pos2; } } --- 2123,2133 ---- while (bt_range(bitmap, &pos1, &pos2, end_pos)) { size = ptob((pos2 - pos1)); range_start = (caddr_t)((uintptr_t)addr + ptob(pos1 - position)); ! (void) segop_lockop(seg, range_start, size, attr, MC_UNLOCK, (ulong_t *)NULL, (size_t)NULL); pos1 = pos2; } }
*** 2369,2379 **** AS_LOCK_EXIT(as, &as->a_lock); return (EAGAIN); } for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { ! error = SEGOP_LOCKOP(seg, seg->s_base, seg->s_size, attr, MC_LOCK, mlock_map, pos); if (error != 0) break; pos += seg_pages(seg); } --- 2219,2229 ---- AS_LOCK_EXIT(as, &as->a_lock); return (EAGAIN); } for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { ! error = segop_lockop(seg, seg->s_base, seg->s_size, attr, MC_LOCK, mlock_map, pos); if (error != 0) break; pos += seg_pages(seg); }
*** 2398,2408 **** mutex_enter(&as->a_contents); AS_CLRPGLCK(as); mutex_exit(&as->a_contents); for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { ! error = SEGOP_LOCKOP(seg, seg->s_base, seg->s_size, attr, MC_UNLOCK, NULL, 0); if (error != 0) break; } --- 2248,2258 ---- mutex_enter(&as->a_contents); AS_CLRPGLCK(as); mutex_exit(&as->a_contents); for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { ! error = segop_lockop(seg, seg->s_base, seg->s_size, attr, MC_UNLOCK, NULL, 0); if (error != 0) break; }
*** 2476,2497 **** /* * Synchronize cached data from mappings with backing * objects. */ case MC_SYNC: ! if (error = SEGOP_SYNC(seg, raddr, ssize, attr, (uint_t)arg)) { AS_LOCK_EXIT(as, &as->a_lock); return (error); } break; /* * Lock pages in memory. */ case MC_LOCK: ! if (error = SEGOP_LOCKOP(seg, raddr, ssize, attr, func, mlock_map, pos)) { as_unlockerr(as, attr, mlock_map, initraddr, initrsize - rsize + ssize); kmem_free(mlock_map, mlock_size * sizeof (ulong_t)); --- 2326,2347 ---- /* * Synchronize cached data from mappings with backing * objects. */ case MC_SYNC: ! if (error = segop_sync(seg, raddr, ssize, attr, (uint_t)arg)) { AS_LOCK_EXIT(as, &as->a_lock); return (error); } break; /* * Lock pages in memory. */ case MC_LOCK: ! if (error = segop_lockop(seg, raddr, ssize, attr, func, mlock_map, pos)) { as_unlockerr(as, attr, mlock_map, initraddr, initrsize - rsize + ssize); kmem_free(mlock_map, mlock_size * sizeof (ulong_t));
*** 2502,2520 **** /* * Unlock mapped pages. */ case MC_UNLOCK: ! (void) SEGOP_LOCKOP(seg, raddr, ssize, attr, func, (ulong_t *)NULL, (size_t)NULL); break; /* * Store VM advise for mapped pages in segment layer. */ case MC_ADVISE: ! error = SEGOP_ADVISE(seg, raddr, ssize, (uint_t)arg); /* * Check for regular errors and special retry error */ if (error) { --- 2352,2370 ---- /* * Unlock mapped pages. */ case MC_UNLOCK: ! (void) segop_lockop(seg, raddr, ssize, attr, func, (ulong_t *)NULL, (size_t)NULL); break; /* * Store VM advise for mapped pages in segment layer. */ case MC_ADVISE: ! error = segop_advise(seg, raddr, ssize, (uint_t)arg); /* * Check for regular errors and special retry error */ if (error) {
*** 2546,2561 **** } } break; case MC_INHERIT_ZERO: ! if (seg->s_ops->inherit == NULL) { ! error = ENOTSUP; ! } else { ! error = SEGOP_INHERIT(seg, raddr, ssize, ! SEGP_INH_ZERO); ! } if (error != 0) { AS_LOCK_EXIT(as, &as->a_lock); return (error); } break; --- 2396,2406 ---- } } break; case MC_INHERIT_ZERO: ! error = segop_inherit(seg, raddr, ssize, SEGP_INH_ZERO); if (error != 0) { AS_LOCK_EXIT(as, &as->a_lock); return (error); } break;
*** 2635,2645 **** page_t **pl; int error; caddr_t eaddr; faultcode_t fault_err = 0; pgcnt_t pl_off; ! extern struct seg_ops segspt_shmops; ASSERT(AS_LOCK_HELD(as, &as->a_lock)); ASSERT(seg != NULL); ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size); ASSERT(addr + size > seg->s_base + seg->s_size); --- 2480,2490 ---- page_t **pl; int error; caddr_t eaddr; faultcode_t fault_err = 0; pgcnt_t pl_off; ! extern const struct seg_ops segspt_shmops; ASSERT(AS_LOCK_HELD(as, &as->a_lock)); ASSERT(seg != NULL); ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size); ASSERT(addr + size > seg->s_base + seg->s_size);
*** 2664,2674 **** * will most likely support pagelock. */ if (seg->s_ops == &segvn_ops) { vnode_t *vp; ! if (SEGOP_GETVP(seg, addr, &vp) != 0 || vp != NULL) { AS_LOCK_EXIT(as, &as->a_lock); goto slow; } } else if (seg->s_ops != &segspt_shmops) { --- 2509,2519 ---- * will most likely support pagelock. */ if (seg->s_ops == &segvn_ops) { vnode_t *vp; ! if (segop_getvp(seg, addr, &vp) != 0 || vp != NULL) { AS_LOCK_EXIT(as, &as->a_lock); goto slow; } } else if (seg->s_ops != &segspt_shmops) {
*** 2702,2712 **** ssize = seg->s_base + seg->s_size - addr; } else { ssize = size; } pl = &plist[npages + cnt]; ! error = SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl, L_PAGELOCK, rw); if (error) { break; } ASSERT(plist[npages + cnt] != NULL); --- 2547,2557 ---- ssize = seg->s_base + seg->s_size - addr; } else { ssize = size; } pl = &plist[npages + cnt]; ! error = segop_pagelock(seg, addr, ssize, (page_t ***)pl, L_PAGELOCK, rw); if (error) { break; } ASSERT(plist[npages + cnt] != NULL);
*** 2745,2755 **** } else { ssize = eaddr - addr; } pl = &plist[npages + cnt]; ASSERT(*pl != NULL); ! (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl, L_PAGEUNLOCK, rw); } AS_LOCK_EXIT(as, &as->a_lock); --- 2590,2600 ---- } else { ssize = eaddr - addr; } pl = &plist[npages + cnt]; ASSERT(*pl != NULL); ! (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl, L_PAGEUNLOCK, rw); } AS_LOCK_EXIT(as, &as->a_lock);
*** 2820,2830 **** "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize); /* * try to lock pages and pass back shadow list */ ! err = SEGOP_PAGELOCK(seg, raddr, rsize, ppp, L_PAGELOCK, rw); TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end"); AS_LOCK_EXIT(as, &as->a_lock); --- 2665,2675 ---- "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize); /* * try to lock pages and pass back shadow list */ ! err = segop_pagelock(seg, raddr, rsize, ppp, L_PAGELOCK, rw); TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end"); AS_LOCK_EXIT(as, &as->a_lock);
*** 2883,2893 **** } else { ssize = eaddr - addr; } pl = &plist[npages + cnt]; ASSERT(*pl != NULL); ! (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl, L_PAGEUNLOCK, rw); } ASSERT(cnt > 0); AS_LOCK_EXIT(as, &as->a_lock); --- 2728,2738 ---- } else { ssize = eaddr - addr; } pl = &plist[npages + cnt]; ASSERT(*pl != NULL); ! (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl, L_PAGEUNLOCK, rw); } ASSERT(cnt > 0); AS_LOCK_EXIT(as, &as->a_lock);
*** 2929,2939 **** TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START, "seg_unlock_start: raddr %p rsize %ld", raddr, rsize); ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size); if (raddr + rsize <= seg->s_base + seg->s_size) { ! SEGOP_PAGELOCK(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw); } else { as_pageunlock_segs(as, seg, raddr, rsize, pp, rw); return; } AS_LOCK_EXIT(as, &as->a_lock); --- 2774,2784 ---- TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START, "seg_unlock_start: raddr %p rsize %ld", raddr, rsize); ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size); if (raddr + rsize <= seg->s_base + seg->s_size) { ! segop_pagelock(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw); } else { as_pageunlock_segs(as, seg, raddr, rsize, pp, rw); return; } AS_LOCK_EXIT(as, &as->a_lock);
*** 2984,2994 **** } else { ssize = rsize; } retry: ! error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc); if (error == IE_NOMEM) { error = EAGAIN; break; } --- 2829,2839 ---- } else { ssize = rsize; } retry: ! error = segop_setpagesize(seg, raddr, ssize, szc); if (error == IE_NOMEM) { error = EAGAIN; break; }
*** 3063,3073 **** AS_LOCK_EXIT(as, &as->a_lock); return (error); } /* ! * as_iset3_default_lpsize() just calls SEGOP_SETPAGESIZE() on all segments * in its chunk where s_szc is less than the szc we want to set. */ static int as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc, int *retry) --- 2908,2918 ---- AS_LOCK_EXIT(as, &as->a_lock); return (error); } /* ! * as_iset3_default_lpsize() just calls segop_setpagesize() on all segments * in its chunk where s_szc is less than the szc we want to set. */ static int as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc, int *retry)
*** 3095,3110 **** } else { ssize = rsize; } if (szc > seg->s_szc) { ! error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc); /* Only retry on EINVAL segments that have no vnode. */ if (error == EINVAL) { vnode_t *vp = NULL; ! if ((SEGOP_GETTYPE(seg, raddr) & MAP_SHARED) && ! (SEGOP_GETVP(seg, raddr, &vp) != 0 || vp == NULL)) { *retry = 1; } else { *retry = 0; } --- 2940,2955 ---- } else { ssize = rsize; } if (szc > seg->s_szc) { ! error = segop_setpagesize(seg, raddr, ssize, szc); /* Only retry on EINVAL segments that have no vnode. */ if (error == EINVAL) { vnode_t *vp = NULL; ! if ((segop_gettype(seg, raddr) & MAP_SHARED) && ! (segop_getvp(seg, raddr, &vp) != 0 || vp == NULL)) { *retry = 1; } else { *retry = 0; }
*** 3343,3353 **** as_setwatch(as); AS_LOCK_EXIT(as, &as->a_lock); return (ENOMEM); } if (seg->s_ops == &segvn_ops) { ! rtype = SEGOP_GETTYPE(seg, addr); rflags = rtype & (MAP_TEXT | MAP_INITDATA); rtype = rtype & (MAP_SHARED | MAP_PRIVATE); segvn = 1; } else { segvn = 0; --- 3188,3198 ---- as_setwatch(as); AS_LOCK_EXIT(as, &as->a_lock); return (ENOMEM); } if (seg->s_ops == &segvn_ops) { ! rtype = segop_gettype(seg, addr); rflags = rtype & (MAP_TEXT | MAP_INITDATA); rtype = rtype & (MAP_SHARED | MAP_PRIVATE); segvn = 1; } else { segvn = 0;
*** 3361,3371 **** if (seg == NULL || raddr != seg->s_base) { error = ENOMEM; break; } if (seg->s_ops == &segvn_ops) { ! stype = SEGOP_GETTYPE(seg, raddr); sflags = stype & (MAP_TEXT | MAP_INITDATA); stype &= (MAP_SHARED | MAP_PRIVATE); if (segvn && (rflags != sflags || rtype != stype)) { /* --- 3206,3216 ---- if (seg == NULL || raddr != seg->s_base) { error = ENOMEM; break; } if (seg->s_ops == &segvn_ops) { ! stype = segop_gettype(seg, raddr); sflags = stype & (MAP_TEXT | MAP_INITDATA); stype &= (MAP_SHARED | MAP_PRIVATE); if (segvn && (rflags != sflags || rtype != stype)) { /*
*** 3475,3496 **** retrycnt = 0; retry: vaddr = pwp->wp_vaddr; if (pwp->wp_oprot != 0 || /* already set up */ (seg = as_segat(as, vaddr)) == NULL || ! SEGOP_GETPROT(seg, vaddr, 0, &prot) != 0) continue; pwp->wp_oprot = prot; if (pwp->wp_read) prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC); if (pwp->wp_write) prot &= ~PROT_WRITE; if (pwp->wp_exec) prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC); if (!(pwp->wp_flags & WP_NOWATCH) && prot != pwp->wp_oprot) { ! err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot); if (err == IE_RETRY) { pwp->wp_oprot = 0; ASSERT(retrycnt == 0); retrycnt++; goto retry; --- 3320,3341 ---- retrycnt = 0; retry: vaddr = pwp->wp_vaddr; if (pwp->wp_oprot != 0 || /* already set up */ (seg = as_segat(as, vaddr)) == NULL || ! segop_getprot(seg, vaddr, 0, &prot) != 0) continue; pwp->wp_oprot = prot; if (pwp->wp_read) prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC); if (pwp->wp_write) prot &= ~PROT_WRITE; if (pwp->wp_exec) prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC); if (!(pwp->wp_flags & WP_NOWATCH) && prot != pwp->wp_oprot) { ! err = segop_setprot(seg, vaddr, PAGESIZE, prot); if (err == IE_RETRY) { pwp->wp_oprot = 0; ASSERT(retrycnt == 0); retrycnt++; goto retry;
*** 3525,3535 **** if (pwp->wp_oprot == 0 || /* not set up */ (seg = as_segat(as, vaddr)) == NULL) continue; if ((prot = pwp->wp_oprot) != pwp->wp_prot) { ! err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot); if (err == IE_RETRY) { ASSERT(retrycnt == 0); retrycnt++; goto retry; } --- 3370,3380 ---- if (pwp->wp_oprot == 0 || /* not set up */ (seg = as_segat(as, vaddr)) == NULL) continue; if ((prot = pwp->wp_oprot) != pwp->wp_prot) { ! err = segop_setprot(seg, vaddr, PAGESIZE, prot); if (err == IE_RETRY) { ASSERT(retrycnt == 0); retrycnt++; goto retry; }
*** 3579,3589 **** seg = as_segat(as, vaddr); if (seg == NULL) { panic("as_setwatchprot: no seg"); /*NOTREACHED*/ } ! err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, wprot); if (err == IE_RETRY) { ASSERT(retrycnt == 0); retrycnt++; goto retry; } --- 3424,3434 ---- seg = as_segat(as, vaddr); if (seg == NULL) { panic("as_setwatchprot: no seg"); /*NOTREACHED*/ } ! err = segop_setprot(seg, vaddr, PAGESIZE, wprot); if (err == IE_RETRY) { ASSERT(retrycnt == 0); retrycnt++; goto retry; }
*** 3626,3636 **** if (prot != pwp->wp_prot) { retry: seg = as_segat(as, pwp->wp_vaddr); if (seg == NULL) continue; ! err = SEGOP_SETPROT(seg, pwp->wp_vaddr, PAGESIZE, prot); if (err == IE_RETRY) { ASSERT(retrycnt == 0); retrycnt++; goto retry; --- 3471,3481 ---- if (prot != pwp->wp_prot) { retry: seg = as_segat(as, pwp->wp_vaddr); if (seg == NULL) continue; ! err = segop_setprot(seg, pwp->wp_vaddr, PAGESIZE, prot); if (err == IE_RETRY) { ASSERT(retrycnt == 0); retrycnt++; goto retry;
*** 3675,3692 **** seg = as_segat(as, addr); if (seg == NULL) { AS_LOCK_EXIT(as, &as->a_lock); return (EFAULT); } - /* - * catch old drivers which may not support getmemid - */ - if (seg->s_ops->getmemid == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); - return (ENODEV); - } ! sts = SEGOP_GETMEMID(seg, addr, memidp); AS_LOCK_EXIT(as, &as->a_lock); return (sts); } --- 3520,3530 ---- seg = as_segat(as, addr); if (seg == NULL) { AS_LOCK_EXIT(as, &as->a_lock); return (EFAULT); } ! sts = segop_getmemid(seg, addr, memidp); AS_LOCK_EXIT(as, &as->a_lock); return (sts); }