Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout


2041                 }
2042 
2043                 seg = AS_SEGNEXT(as, seg);
2044 
2045                 if (seg != NULL)
2046                         addr = seg->s_base;
2047         }
2048 
2049         *basep = addr;
2050 
2051         if (segend > eaddr)
2052                 *lenp = eaddr - addr;
2053         else
2054                 *lenp = segend - addr;
2055 
2056         AS_LOCK_EXIT(as, &as->a_lock);
2057         return (0);
2058 }
2059 
2060 /*
2061  * Swap the pages associated with the address space as out to
2062  * secondary storage, returning the number of bytes actually
2063  * swapped.
2064  *
2065  * The value returned is intended to correlate well with the process's
2066  * memory requirements.  Its usefulness for this purpose depends on
2067  * how well the segment-level routines do at returning accurate
2068  * information.
2069  */
2070 size_t
2071 as_swapout(struct as *as)
2072 {
2073         struct seg *seg;
2074         size_t swpcnt = 0;
2075 
2076         /*
2077          * Kernel-only processes have given up their address
2078          * spaces.  Of course, we shouldn't be attempting to
2079          * swap out such processes in the first place...
2080          */
2081         if (as == NULL)
2082                 return (0);
2083 
2084         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2085 
2086         /*
2087          * Free all mapping resources associated with the address
2088          * space.  The segment-level swapout routines capitalize
2089          * on this unmapping by scavanging pages that have become
2090          * unmapped here.
2091          */
2092         hat_swapout(as->a_hat);
2093 
2094         /*
2095          * Call the swapout routines of all segments in the address
2096          * space to do the actual work, accumulating the amount of
2097          * space reclaimed.
2098          */
2099         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
2100                 struct seg_ops *ov = seg->s_ops;
2101 
2102                 /*
2103                  * We have to check to see if the seg has
2104                  * an ops vector because the seg may have
2105                  * been in the middle of being set up when
2106                  * the process was picked for swapout.
2107                  */
2108                 if ((ov != NULL) && (ov->swapout != NULL))
2109                         swpcnt += SEGOP_SWAPOUT(seg);
2110         }
2111         AS_LOCK_EXIT(as, &as->a_lock);
2112         return (swpcnt);
2113 }
2114 
2115 /*
2116  * Determine whether data from the mappings in interval [addr, addr + size)
2117  * are in the primary memory (core) cache.
2118  */
2119 int
2120 as_incore(struct as *as, caddr_t addr,
2121     size_t size, char *vec, size_t *sizep)
2122 {
2123         struct seg *seg;
2124         size_t ssize;
2125         caddr_t raddr;          /* rounded down addr */
2126         size_t rsize;           /* rounded up size */
2127         size_t isize;                   /* iteration size */
2128         int error = 0;          /* result, assume success */
2129 
2130         *sizep = 0;
2131         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2132         rsize = ((((size_t)addr + size) + PAGEOFFSET) & PAGEMASK) -
2133             (size_t)raddr;
2134 
2135         if (raddr + rsize < raddr)           /* check for wraparound */




2041                 }
2042 
2043                 seg = AS_SEGNEXT(as, seg);
2044 
2045                 if (seg != NULL)
2046                         addr = seg->s_base;
2047         }
2048 
2049         *basep = addr;
2050 
2051         if (segend > eaddr)
2052                 *lenp = eaddr - addr;
2053         else
2054                 *lenp = segend - addr;
2055 
2056         AS_LOCK_EXIT(as, &as->a_lock);
2057         return (0);
2058 }
2059 
2060 /*























































2061  * Determine whether data from the mappings in interval [addr, addr + size)
2062  * are in the primary memory (core) cache.
2063  */
2064 int
2065 as_incore(struct as *as, caddr_t addr,
2066     size_t size, char *vec, size_t *sizep)
2067 {
2068         struct seg *seg;
2069         size_t ssize;
2070         caddr_t raddr;          /* rounded down addr */
2071         size_t rsize;           /* rounded up size */
2072         size_t isize;                   /* iteration size */
2073         int error = 0;          /* result, assume success */
2074 
2075         *sizep = 0;
2076         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2077         rsize = ((((size_t)addr + size) + PAGEOFFSET) & PAGEMASK) -
2078             (size_t)raddr;
2079 
2080         if (raddr + rsize < raddr)           /* check for wraparound */