Print this page
patch vm-cleanup

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 1968 lines elided ↑ open up ↑
1969 1969  #ifdef DEBUG
1970 1970          for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971 1971                  ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972 1972          }
1973 1973  #endif
1974 1974  
1975 1975          kmem_cache_free(sfmmuid_cache, sfmmup);
1976 1976  }
1977 1977  
1978 1978  /*
1979      - * Set up any translation structures, for the specified address space,
1980      - * that are needed or preferred when the process is being swapped in.
1981      - */
1982      -/* ARGSUSED */
1983      -void
1984      -hat_swapin(struct hat *hat)
1985      -{
1986      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
1987      -}
1988      -
1989      -/*
1990      - * Free all of the translation resources, for the specified address space,
1991      - * that can be freed while the process is swapped out. Called from as_swapout.
1992      - * Also, free up the ctx that this process was using.
1993      - */
1994      -void
1995      -hat_swapout(struct hat *sfmmup)
1996      -{
1997      -        struct hmehash_bucket *hmebp;
1998      -        struct hme_blk *hmeblkp;
1999      -        struct hme_blk *pr_hblk = NULL;
2000      -        struct hme_blk *nx_hblk;
2001      -        int i;
2002      -        struct hme_blk *list = NULL;
2003      -        hatlock_t *hatlockp;
2004      -        struct tsb_info *tsbinfop;
2005      -        struct free_tsb {
2006      -                struct free_tsb *next;
2007      -                struct tsb_info *tsbinfop;
2008      -        };                      /* free list of TSBs */
2009      -        struct free_tsb *freelist, *last, *next;
2010      -
2011      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012      -        SFMMU_STAT(sf_swapout);
2013      -
2014      -        /*
2015      -         * There is no way to go from an as to all its translations in sfmmu.
2016      -         * Here is one of the times when we take the big hit and traverse
2017      -         * the hash looking for hme_blks to free up.  Not only do we free up
2018      -         * this as hme_blks but all those that are free.  We are obviously
2019      -         * swapping because we need memory so let's free up as much
2020      -         * as we can.
2021      -         *
2022      -         * Note that we don't flush TLB/TSB here -- it's not necessary
2023      -         * because:
2024      -         *  1) we free the ctx we're using and throw away the TSB(s);
2025      -         *  2) processes aren't runnable while being swapped out.
2026      -         */
2027      -        ASSERT(sfmmup != KHATID);
2028      -        for (i = 0; i <= UHMEHASH_SZ; i++) {
2029      -                hmebp = &uhme_hash[i];
2030      -                SFMMU_HASH_LOCK(hmebp);
2031      -                hmeblkp = hmebp->hmeblkp;
2032      -                pr_hblk = NULL;
2033      -                while (hmeblkp) {
2034      -
2035      -                        ASSERT(!hmeblkp->hblk_xhat_bit);
2036      -
2037      -                        if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038      -                            !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039      -                                ASSERT(!hmeblkp->hblk_shared);
2040      -                                (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041      -                                    (caddr_t)get_hblk_base(hmeblkp),
2042      -                                    get_hblk_endaddr(hmeblkp),
2043      -                                    NULL, HAT_UNLOAD);
2044      -                        }
2045      -                        nx_hblk = hmeblkp->hblk_next;
2046      -                        if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2047      -                                ASSERT(!hmeblkp->hblk_lckcnt);
2048      -                                sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2049      -                                    &list, 0);
2050      -                        } else {
2051      -                                pr_hblk = hmeblkp;
2052      -                        }
2053      -                        hmeblkp = nx_hblk;
2054      -                }
2055      -                SFMMU_HASH_UNLOCK(hmebp);
2056      -        }
2057      -
2058      -        sfmmu_hblks_list_purge(&list, 0);
2059      -
2060      -        /*
2061      -         * Now free up the ctx so that others can reuse it.
2062      -         */
2063      -        hatlockp = sfmmu_hat_enter(sfmmup);
2064      -
2065      -        sfmmu_invalidate_ctx(sfmmup);
2066      -
2067      -        /*
2068      -         * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2069      -         * If TSBs were never swapped in, just return.
2070      -         * This implies that we don't support partial swapping
2071      -         * of TSBs -- either all are swapped out, or none are.
2072      -         *
2073      -         * We must hold the HAT lock here to prevent racing with another
2074      -         * thread trying to unmap TTEs from the TSB or running the post-
2075      -         * relocator after relocating the TSB's memory.  Unfortunately, we
2076      -         * can't free memory while holding the HAT lock or we could
2077      -         * deadlock, so we build a list of TSBs to be freed after marking
2078      -         * the tsbinfos as swapped out and free them after dropping the
2079      -         * lock.
2080      -         */
2081      -        if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2082      -                sfmmu_hat_exit(hatlockp);
2083      -                return;
2084      -        }
2085      -
2086      -        SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2087      -        last = freelist = NULL;
2088      -        for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2089      -            tsbinfop = tsbinfop->tsb_next) {
2090      -                ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2091      -
2092      -                /*
2093      -                 * Cast the TSB into a struct free_tsb and put it on the free
2094      -                 * list.
2095      -                 */
2096      -                if (freelist == NULL) {
2097      -                        last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2098      -                } else {
2099      -                        last->next = (struct free_tsb *)tsbinfop->tsb_va;
2100      -                        last = last->next;
2101      -                }
2102      -                last->next = NULL;
2103      -                last->tsbinfop = tsbinfop;
2104      -                tsbinfop->tsb_flags |= TSB_SWAPPED;
2105      -                /*
2106      -                 * Zero out the TTE to clear the valid bit.
2107      -                 * Note we can't use a value like 0xbad because we want to
2108      -                 * ensure diagnostic bits are NEVER set on TTEs that might
2109      -                 * be loaded.  The intent is to catch any invalid access
2110      -                 * to the swapped TSB, such as a thread running with a valid
2111      -                 * context without first calling sfmmu_tsb_swapin() to
2112      -                 * allocate TSB memory.
2113      -                 */
2114      -                tsbinfop->tsb_tte.ll = 0;
2115      -        }
2116      -
2117      -        /* Now we can drop the lock and free the TSB memory. */
2118      -        sfmmu_hat_exit(hatlockp);
2119      -        for (; freelist != NULL; freelist = next) {
2120      -                next = freelist->next;
2121      -                sfmmu_tsb_free(freelist->tsbinfop);
2122      -        }
2123      -}
2124      -
2125      -/*
2126 1979   * Duplicate the translations of an as into another newas
2127 1980   */
2128 1981  /* ARGSUSED */
2129 1982  int
2130 1983  hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 1984          uint_t flag)
2132 1985  {
2133 1986          sf_srd_t *srdp;
2134 1987          sf_scd_t *scdp;
2135 1988          int i;
↓ open down ↓ 7853 lines elided ↑ open up ↑
9989 9842                   */
9990 9843                  sfmmu_invalidate_ctx(sfmmup);
9991 9844          }
9992 9845  
9993 9846          kpreempt_enable();
9994 9847  }
9995 9848  
9996 9849  
9997 9850  /*
9998 9851   * Replace the specified TSB with a new TSB.  This function gets called when
9999      - * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
     9852 + * we grow, or shrink a TSB.  When swapping in a TSB (TSB_SWAPIN), the
10000 9853   * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10001 9854   * (8K).
10002 9855   *
10003 9856   * Caller must hold the HAT lock, but should assume any tsb_info
10004 9857   * pointers it has are no longer valid after calling this function.
10005 9858   *
10006 9859   * Return values:
10007 9860   *      TSB_ALLOCFAIL   Failed to allocate a TSB, due to memory constraints
10008 9861   *      TSB_LOSTRACE    HAT is busy, i.e. another thread is already doing
10009 9862   *                      something to this tsbinfo/TSB
↓ open down ↓ 5846 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX