1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  26 /*        All Rights Reserved   */
  27 
  28 /*
  29  * University Copyright- Copyright (c) 1982, 1986, 1988
  30  * The Regents of the University of California
  31  * All Rights Reserved
  32  *
  33  * University Acknowledgment- Portions of this document are derived from
  34  * software developed by the University of California, Berkeley, and its
  35  * contributors.
  36  */
  37 
  38 #ifndef _VM_PAGE_H
  39 #define _VM_PAGE_H
  40 
  41 #include <vm/seg.h>
  42 
  43 #ifdef  __cplusplus
  44 extern "C" {
  45 #endif
  46 
  47 #if defined(_KERNEL) || defined(_KMEMUSER)
  48 
  49 /*
  50  * Shared/Exclusive lock.
  51  */
  52 
  53 /*
  54  * Types of page locking supported by page_lock & friends.
  55  */
  56 typedef enum {
  57         SE_SHARED,
  58         SE_EXCL                 /* exclusive lock (value == -1) */
  59 } se_t;
  60 
  61 /*
  62  * For requesting that page_lock reclaim the page from the free list.
  63  */
  64 typedef enum {
  65         P_RECLAIM,              /* reclaim page from free list */
  66         P_NO_RECLAIM            /* DON`T reclaim the page       */
  67 } reclaim_t;
  68 
  69 /*
  70  * Callers of page_try_reclaim_lock and page_lock_es can use this flag
  71  * to get SE_EXCL access before reader/writers are given access.
  72  */
  73 #define SE_EXCL_WANTED  0x02
  74 
  75 /*
  76  * All page_*lock() requests will be denied unless this flag is set in
  77  * the 'es' parameter.
  78  */
  79 #define SE_RETIRED      0x04
  80 
  81 #endif  /* _KERNEL | _KMEMUSER */
  82 
  83 typedef int     selock_t;
  84 
  85 /*
  86  * Define VM_STATS to turn on all sorts of statistic gathering about
  87  * the VM layer.  By default, it is only turned on when DEBUG is
  88  * also defined.
  89  */
  90 #ifdef DEBUG
  91 #define VM_STATS
  92 #endif  /* DEBUG */
  93 
  94 #ifdef VM_STATS
  95 #define VM_STAT_ADD(stat)                       (stat)++
  96 #define VM_STAT_COND_ADD(cond, stat)            ((void) (!(cond) || (stat)++))
  97 #else
  98 #define VM_STAT_ADD(stat)                       do { } while (0)
  99 #define VM_STAT_COND_ADD(cond, stat)            do { } while (0)
 100 #endif  /* VM_STATS */
 101 
 102 #ifdef _KERNEL
 103 
 104 /*
 105  * PAGE_LLOCK_SIZE is 2 * NCPU, but no smaller than 128.
 106  * PAGE_LLOCK_SHIFT is log2(PAGE_LLOCK_SIZE).
 107  *
 108  * We use ? : instead of #if because <vm/page.h> is included everywhere;
 109  * NCPU_P2 is only a constant in the "unix" module.
 110  *
 111  */
 112 #define PAGE_LLOCK_SHIFT \
 113             ((unsigned)(((2*NCPU_P2) > 128) ? NCPU_LOG2 + 1 : 7))
 114 
 115 #define PAGE_LLOCK_SIZE (1ul << PAGE_LLOCK_SHIFT)
 116 
 117 /*
 118  * The number of low order 0 (or less variable) bits in the page_t address.
 119  */
 120 #if defined(__sparc)
 121 #define PP_SHIFT                7
 122 #else
 123 #define PP_SHIFT                6
 124 #endif
 125 
 126 /*
 127  * pp may be the root of a large page, and many low order bits will be 0.
 128  * Shift and XOR multiple times to capture the good bits across the range of
 129  * possible page sizes.
 130  */
 131 #define PAGE_LLOCK_HASH(pp)     \
 132         (((((uintptr_t)(pp) >> PP_SHIFT) ^ \
 133         ((uintptr_t)(pp) >> (PAGE_LLOCK_SHIFT + PP_SHIFT))) ^ \
 134         ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 2) + PP_SHIFT)) ^ \
 135         ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 3) + PP_SHIFT))) & \
 136         (PAGE_LLOCK_SIZE - 1))
 137 
 138 #define page_struct_lock(pp)    \
 139         mutex_enter(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex)
 140 #define page_struct_unlock(pp)  \
 141         mutex_exit(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex)
 142 
 143 #endif  /* _KERNEL */
 144 
 145 #include <sys/t_lock.h>
 146 
 147 struct as;
 148 
 149 /*
 150  * Each physical page has a page structure, which is used to maintain
 151  * these pages as a cache.  A page can be found via a hashed lookup
 152  * based on the [vp, offset].  If a page has an [vp, offset] identity,
 153  * then it is entered on a doubly linked circular list off the
 154  * vnode using the vpnext/vpprev pointers.   If the p_free bit
 155  * is on, then the page is also on a doubly linked circular free
 156  * list using next/prev pointers.  If the "p_selock" and "p_iolock"
 157  * are held, then the page is currently being read in (exclusive p_selock)
 158  * or written back (shared p_selock).  In this case, the next/prev pointers
 159  * are used to link the pages together for a consecutive i/o request.  If
 160  * the page is being brought in from its backing store, then other processes
 161  * will wait for the i/o to complete before attaching to the page since it
 162  * will have an "exclusive" lock.
 163  *
 164  * Each page structure has the locks described below along with
 165  * the fields they protect:
 166  *
 167  *      p_selock        This is a per-page shared/exclusive lock that is
 168  *                      used to implement the logical shared/exclusive
 169  *                      lock for each page.  The "shared" lock is normally
 170  *                      used in most cases while the "exclusive" lock is
 171  *                      required to destroy or retain exclusive access to
 172  *                      a page (e.g., while reading in pages).  The appropriate
 173  *                      lock is always held whenever there is any reference
 174  *                      to a page structure (e.g., during i/o).
 175  *                      (Note that with the addition of the "writer-lock-wanted"
 176  *                      semantics (via SE_EWANTED), threads must not acquire
 177  *                      multiple reader locks or else a deadly embrace will
 178  *                      occur in the following situation: thread 1 obtains a
 179  *                      reader lock; next thread 2 fails to get a writer lock
 180  *                      but specified SE_EWANTED so it will wait by either
 181  *                      blocking (when using page_lock_es) or spinning while
 182  *                      retrying (when using page_try_reclaim_lock) until the
 183  *                      reader lock is released; then thread 1 attempts to
 184  *                      get another reader lock but is denied due to
 185  *                      SE_EWANTED being set, and now both threads are in a
 186  *                      deadly embrace.)
 187  *
 188  *                              p_hash
 189  *                              p_vnode
 190  *                              p_offset
 191  *
 192  *                              p_free
 193  *                              p_age
 194  *
 195  *      p_iolock        This is a binary semaphore lock that provides
 196  *                      exclusive access to the i/o list links in each
 197  *                      page structure.  It is always held while the page
 198  *                      is on an i/o list (i.e., involved in i/o).  That is,
 199  *                      even though a page may be only `shared' locked
 200  *                      while it is doing a write, the following fields may
 201  *                      change anyway.  Normally, the page must be
 202  *                      `exclusively' locked to change anything in it.
 203  *
 204  *                              p_next
 205  *                              p_prev
 206  *
 207  * The following fields are protected by the global page_llocks[]:
 208  *
 209  *                              p_lckcnt
 210  *                              p_cowcnt
 211  *
 212  * The following lists are protected by the global page_freelock:
 213  *
 214  *                              page_cachelist
 215  *                              page_freelist
 216  *
 217  * The following, for our purposes, are protected by
 218  * the global freemem_lock:
 219  *
 220  *                              freemem
 221  *                              freemem_wait
 222  *                              freemem_cv
 223  *
 224  * The following fields are protected by hat layer lock(s).  When a page
 225  * structure is not mapped and is not associated with a vnode (after a call
 226  * to page_hashout() for example) the p_nrm field may be modified with out
 227  * holding the hat layer lock:
 228  *
 229  *                              p_nrm
 230  *                              p_mapping
 231  *                              p_share
 232  *
 233  * The following field is file system dependent.  How it is used and
 234  * the locking strategies applied are up to the individual file system
 235  * implementation.
 236  *
 237  *                              p_fsdata
 238  *
 239  * The page structure is used to represent and control the system's
 240  * physical pages.  There is one instance of the structure for each
 241  * page that is not permenately allocated.  For example, the pages that
 242  * hold the page structures are permanently held by the kernel
 243  * and hence do not need page structures to track them.  The array
 244  * of page structures is allocated early on in the kernel's life and
 245  * is based on the amount of available physical memory.
 246  *
 247  * Each page structure may simultaneously appear on several linked lists.
 248  * The lists are:  hash list, free or in i/o list, and a vnode's page list.
 249  * Each type of list is protected by a different group of mutexes as described
 250  * below:
 251  *
 252  * The hash list is used to quickly find a page when the page's vnode and
 253  * offset within the vnode are known.  Each page that is hashed is
 254  * connected via the `p_hash' field.  The anchor for each hash is in the
 255  * array `page_hash'.  An array of mutexes, `ph_mutex', protects the
 256  * lists anchored by page_hash[].  To either search or modify a given hash
 257  * list, the appropriate mutex in the ph_mutex array must be held.
 258  *
 259  * The free list contains pages that are `free to be given away'.  For
 260  * efficiency reasons, pages on this list are placed in two catagories:
 261  * pages that are still associated with a vnode, and pages that are not
 262  * associated with a vnode.  Free pages always have their `p_free' bit set,
 263  * free pages that are still associated with a vnode also have their
 264  * `p_age' bit set.  Pages on the free list are connected via their
 265  * `p_next' and `p_prev' fields.  When a page is involved in some sort
 266  * of i/o, it is not free and these fields may be used to link associated
 267  * pages together.  At the moment, the free list is protected by a
 268  * single mutex `page_freelock'.  The list of free pages still associated
 269  * with a vnode is anchored by `page_cachelist' while other free pages
 270  * are anchored in architecture dependent ways (to handle page coloring etc.).
 271  *
 272  * Pages associated with a given vnode appear on a list anchored in the
 273  * vnode by the `v_pages' field.  They are linked together with
 274  * `p_vpnext' and `p_vpprev'.  The field `p_offset' contains a page's
 275  * offset within the vnode.  The pages on this list are not kept in
 276  * offset order.  These lists, in a manner similar to the hash lists,
 277  * are protected by an array of mutexes called `vph_hash'.  Before
 278  * searching or modifying this chain the appropriate mutex in the
 279  * vph_hash[] array must be held.
 280  *
 281  * Again, each of the lists that a page can appear on is protected by a
 282  * mutex.  Before reading or writing any of the fields comprising the
 283  * list, the appropriate lock must be held.  These list locks should only
 284  * be held for very short intervals.
 285  *
 286  * In addition to the list locks, each page structure contains a
 287  * shared/exclusive lock that protects various fields within it.
 288  * To modify one of these fields, the `p_selock' must be exclusively held.
 289  * To read a field with a degree of certainty, the lock must be at least
 290  * held shared.
 291  *
 292  * Removing a page structure from one of the lists requires holding
 293  * the appropriate list lock and the page's p_selock.  A page may be
 294  * prevented from changing identity, being freed, or otherwise modified
 295  * by acquiring p_selock shared.
 296  *
 297  * To avoid deadlocks, a strict locking protocol must be followed.  Basically
 298  * there are two cases:  In the first case, the page structure in question
 299  * is known ahead of time (e.g., when the page is to be added or removed
 300  * from a list).  In the second case, the page structure is not known and
 301  * must be found by searching one of the lists.
 302  *
 303  * When adding or removing a known page to one of the lists, first the
 304  * page must be exclusively locked (since at least one of its fields
 305  * will be modified), second the lock protecting the list must be acquired,
 306  * third the page inserted or deleted, and finally the list lock dropped.
 307  *
 308  * The more interesting case occures when the particular page structure
 309  * is not known ahead of time.  For example, when a call is made to
 310  * page_lookup(), it is not known if a page with the desired (vnode and
 311  * offset pair) identity exists.  So the appropriate mutex in ph_mutex is
 312  * acquired, the hash list searched, and if the desired page is found
 313  * an attempt is made to lock it.  The attempt to acquire p_selock must
 314  * not block while the hash list lock is held.  A deadlock could occure
 315  * if some other process was trying to remove the page from the list.
 316  * The removing process (following the above protocol) would have exclusively
 317  * locked the page, and be spinning waiting to acquire the lock protecting
 318  * the hash list.  Since the searching process holds the hash list lock
 319  * and is waiting to acquire the page lock, a deadlock occurs.
 320  *
 321  * The proper scheme to follow is: first, lock the appropriate list,
 322  * search the list, and if the desired page is found either use
 323  * page_trylock() (which will not block) or pass the address of the
 324  * list lock to page_lock().  If page_lock() can not acquire the page's
 325  * lock, it will drop the list lock before going to sleep.  page_lock()
 326  * returns a value to indicate if the list lock was dropped allowing the
 327  * calling program to react appropriately (i.e., retry the operation).
 328  *
 329  * If the list lock was dropped before the attempt at locking the page
 330  * was made, checks would have to be made to ensure that the page had
 331  * not changed identity before its lock was obtained.  This is because
 332  * the interval between dropping the list lock and acquiring the page
 333  * lock is indeterminate.
 334  *
 335  * In addition, when both a hash list lock (ph_mutex[]) and a vnode list
 336  * lock (vph_mutex[]) are needed, the hash list lock must be acquired first.
 337  * The routine page_hashin() is a good example of this sequence.
 338  * This sequence is ASSERTed by checking that the vph_mutex[] is not held
 339  * just before each acquisition of one of the mutexs in ph_mutex[].
 340  *
 341  * So, as a quick summary:
 342  *
 343  *      pse_mutex[]'s protect the p_selock and p_cv fields.
 344  *
 345  *      p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash,
 346  *
 347  *      ph_mutex[]'s protect the page_hash[] array and its chains.
 348  *
 349  *      vph_mutex[]'s protect the v_pages field and the vp page chains.
 350  *
 351  *      First lock the page, then the hash chain, then the vnode chain.  When
 352  *      this is not possible `trylocks' must be used.  Sleeping while holding
 353  *      any of these mutexes (p_selock is not a mutex) is not allowed.
 354  *
 355  *
 356  *      field           reading         writing             ordering
 357  *      ======================================================================
 358  *      p_vnode         p_selock(E,S)   p_selock(E)
 359  *      p_offset
 360  *      p_free
 361  *      p_age
 362  *      =====================================================================
 363  *      p_hash          p_selock(E,S)   p_selock(E) &&      p_selock, ph_mutex
 364  *                                      ph_mutex[]
 365  *      =====================================================================
 366  *      p_vpnext        p_selock(E,S)   p_selock(E) &&      p_selock, vph_mutex
 367  *      p_vpprev                        vph_mutex[]
 368  *      =====================================================================
 369  *      When the p_free bit is set:
 370  *
 371  *      p_next          p_selock(E,S)   p_selock(E) &&      p_selock,
 372  *      p_prev                          page_freelock       page_freelock
 373  *
 374  *      When the p_free bit is not set:
 375  *
 376  *      p_next          p_selock(E,S)   p_selock(E) &&      p_selock, p_iolock
 377  *      p_prev                          p_iolock
 378  *      =====================================================================
 379  *      p_selock        pse_mutex[]     pse_mutex[]         can`t acquire any
 380  *      p_cv                                                other mutexes or
 381  *                                                          sleep while holding
 382  *                                                          this lock.
 383  *      =====================================================================
 384  *      p_lckcnt        p_selock(E,S)   p_selock(E)
 385  *                                          OR
 386  *                                      p_selock(S) &&
 387  *                                      page_llocks[]
 388  *      p_cowcnt
 389  *      =====================================================================
 390  *      p_nrm           hat layer lock  hat layer lock
 391  *      p_mapping
 392  *      p_pagenum
 393  *      =====================================================================
 394  *
 395  *      where:
 396  *              E----> exclusive version of p_selock.
 397  *              S----> shared version of p_selock.
 398  *
 399  *
 400  *      Global data structures and variable:
 401  *
 402  *      field           reading         writing             ordering
 403  *      =====================================================================
 404  *      page_hash[]     ph_mutex[]      ph_mutex[]          can hold this lock
 405  *                                                          before acquiring
 406  *                                                          a vph_mutex or
 407  *                                                          pse_mutex.
 408  *      =====================================================================
 409  *      vp->v_pages  vph_mutex[]     vph_mutex[]         can only acquire
 410  *                                                          a pse_mutex while
 411  *                                                          holding this lock.
 412  *      =====================================================================
 413  *      page_cachelist  page_freelock   page_freelock       can't acquire any
 414  *      page_freelist   page_freelock   page_freelock
 415  *      =====================================================================
 416  *      freemem         freemem_lock    freemem_lock        can't acquire any
 417  *      freemem_wait                                        other mutexes while
 418  *      freemem_cv                                          holding this mutex.
 419  *      =====================================================================
 420  *
 421  * Page relocation, PG_NORELOC and P_NORELOC.
 422  *
 423  * Pages may be relocated using the page_relocate() interface. Relocation
 424  * involves moving the contents and identity of a page to another, free page.
 425  * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent
 426  * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL
 427  * lock must not be held indefinitely). If the page is going to be held
 428  * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed
 429  * to page_create_va so that pages that are prevented from being relocated
 430  * can be managed differently by the platform specific layer.
 431  *
 432  * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0)
 433  * are guaranteed to be held in memory, but can still be relocated
 434  * providing the SE_EXCL lock can be obtained.
 435  *
 436  * The P_NORELOC bit in the page_t.p_state field is provided for use by
 437  * the platform specific code in managing pages when the PG_NORELOC
 438  * hint is used.
 439  *
 440  * Memory delete and page locking.
 441  *
 442  * The set of all usable pages is managed using the global page list as
 443  * implemented by the memseg structure defined below. When memory is added
 444  * or deleted this list changes. Additions to this list guarantee that the
 445  * list is never corrupt.  In order to avoid the necessity of an additional
 446  * lock to protect against failed accesses to the memseg being deleted and,
 447  * more importantly, the page_ts, the memseg structure is never freed and the
 448  * page_t virtual address space is remapped to a page (or pages) of
 449  * zeros.  If a page_t is manipulated while it is p_selock'd, or if it is
 450  * locked indirectly via a hash or freelist lock, it is not possible for
 451  * memory delete to collect the page and so that part of the page list is
 452  * prevented from being deleted. If the page is referenced outside of one
 453  * of these locks, it is possible for the page_t being referenced to be
 454  * deleted.  Examples of this are page_t pointers returned by
 455  * page_numtopp_nolock, page_first and page_next.  Providing the page_t
 456  * is re-checked after taking the p_selock (for p_vnode != NULL), the
 457  * remapping to the zero pages will be detected.
 458  *
 459  *
 460  * Page size (p_szc field) and page locking.
 461  *
 462  * p_szc field of free pages is changed by free list manager under freelist
 463  * locks and is of no concern to the rest of VM subsystem.
 464  *
 465  * p_szc changes of allocated anonymous (swapfs) can only be done only after
 466  * exclusively locking all constituent pages and calling hat_pageunload() on
 467  * each of them. To prevent p_szc changes of non free anonymous (swapfs) large
 468  * pages it's enough to either lock SHARED any of constituent pages or prevent
 469  * hat_pageunload() by holding hat level lock that protects mapping lists (this
 470  * method is for hat code only)
 471  *
 472  * To increase (promote) p_szc of allocated non anonymous file system pages
 473  * one has to first lock exclusively all involved constituent pages and call
 474  * hat_pageunload() on each of them. To prevent p_szc promote it's enough to
 475  * either lock SHARED any of constituent pages that will be needed to make a
 476  * large page or prevent hat_pageunload() by holding hat level lock that
 477  * protects mapping lists (this method is for hat code only).
 478  *
 479  * To decrease (demote) p_szc of an allocated non anonymous file system large
 480  * page one can either use the same method as used for changeing p_szc of
 481  * anonymous large pages or if it's not possible to lock all constituent pages
 482  * exclusively a different method can be used. In the second method one only
 483  * has to exclusively lock one of constituent pages but then one has to
 484  * acquire further locks by calling page_szc_lock() and
 485  * hat_page_demote(). hat_page_demote() acquires hat level locks and then
 486  * demotes the page. This mechanism relies on the fact that any code that
 487  * needs to prevent p_szc of a file system large page from changeing either
 488  * locks all constituent large pages at least SHARED or locks some pages at
 489  * least SHARED and calls page_szc_lock() or uses hat level page locks.
 490  * Demotion using this method is implemented by page_demote_vp_pages().
 491  * Please see comments in front of page_demote_vp_pages(), hat_page_demote()
 492  * and page_szc_lock() for more details.
 493  *
 494  * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist,
 495  * hat level locks.
 496  */
 497 
 498 typedef struct page {
 499         u_offset_t      p_offset;       /* offset into vnode for this page */
 500         struct vnode    *p_vnode;       /* vnode that this page is named by */
 501         selock_t        p_selock;       /* shared/exclusive lock on the page */
 502 #if defined(_LP64)
 503         uint_t          p_vpmref;       /* vpm ref - index of the vpmap_t */
 504 #endif
 505         struct page     *p_hash;        /* hash by [vnode, offset] */
 506         struct page     *p_vpnext;      /* next page in vnode list */
 507         struct page     *p_vpprev;      /* prev page in vnode list */
 508         struct page     *p_next;        /* next page in free/intrans lists */
 509         struct page     *p_prev;        /* prev page in free/intrans lists */
 510         ushort_t        p_lckcnt;       /* number of locks on page data */
 511         ushort_t        p_cowcnt;       /* number of copy on write lock */
 512         kcondvar_t      p_cv;           /* page struct's condition var */
 513         kcondvar_t      p_io_cv;        /* for iolock */
 514         uchar_t         p_iolock_state; /* replaces p_iolock */
 515         volatile uchar_t p_szc;         /* page size code */
 516         uchar_t         p_fsdata;       /* file system dependent byte */
 517         uchar_t         p_state;        /* p_free, p_noreloc */
 518         uchar_t         p_nrm;          /* non-cache, ref, mod readonly bits */
 519 #if defined(__sparc)
 520         uchar_t         p_vcolor;       /* virtual color */
 521 #else
 522         uchar_t         p_embed;        /* x86 - changes p_mapping & p_index */
 523 #endif
 524         uchar_t         p_index;        /* MPSS mapping info. Not used on x86 */
 525         uchar_t         p_toxic;        /* page has an unrecoverable error */
 526         void            *p_mapping;     /* hat specific translation info */
 527         pfn_t           p_pagenum;      /* physical page number */
 528 
 529         uint_t          p_share;        /* number of translations */
 530 #if defined(_LP64)
 531         uint_t          p_sharepad;     /* pad for growing p_share */
 532 #endif
 533         uint_t          p_slckcnt;      /* number of softlocks */
 534 #if defined(__sparc)
 535         uint_t          p_kpmref;       /* number of kpm mapping sharers */
 536         struct kpme     *p_kpmelist;    /* kpm specific mapping info */
 537 #else
 538         /* index of entry in p_map when p_embed is set */
 539         uint_t          p_mlentry;
 540 #endif
 541 #if defined(_LP64)
 542         kmutex_t        p_ilock;        /* protects p_vpmref */
 543 #else
 544         uint64_t        p_msresv_2;     /* page allocation debugging */
 545 #endif
 546 } page_t;
 547 
 548 
 549 typedef page_t  devpage_t;
 550 #define devpage page
 551 
 552 #define PAGE_LOCK_MAXIMUM \
 553         ((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1)
 554 
 555 #define PAGE_SLOCK_MAXIMUM UINT_MAX
 556 
 557 /*
 558  * Page hash table is a power-of-two in size, externally chained
 559  * through the hash field.  PAGE_HASHAVELEN is the average length
 560  * desired for this chain, from which the size of the page_hash
 561  * table is derived at boot time and stored in the kernel variable
 562  * page_hashsz.  In the hash function it is given by PAGE_HASHSZ.
 563  *
 564  * PAGE_HASH_FUNC returns an index into the page_hash[] array.  This
 565  * index is also used to derive the mutex that protects the chain.
 566  *
 567  * In constructing the hash function, first we dispose of unimportant bits
 568  * (page offset from "off" and the low 3 bits of "vp" which are zero for
 569  * struct alignment). Then shift and sum the remaining bits a couple times
 570  * in order to get as many source bits from the two source values into the
 571  * resulting hashed value.  Note that this will perform quickly, since the
 572  * shifting/summing are fast register to register operations with no additional
 573  * memory references).
 574  *
 575  * PH_SHIFT_SIZE is the amount to use for the successive shifts in the hash
 576  * function below.  The actual value is LOG2(PH_TABLE_SIZE), so that as many
 577  * bits as possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX().
 578  *
 579  * We use ? : instead of #if because <vm/page.h> is included everywhere;
 580  * NCPU maps to a global variable outside of the "unix" module.
 581  */
 582 #if defined(_LP64)
 583 #define PH_SHIFT_SIZE   ((NCPU < 4) ? 7              : (NCPU_LOG2 + 1))
 584 #else   /* 32 bits */
 585 #define PH_SHIFT_SIZE   ((NCPU < 4) ? 4              : 7)
 586 #endif  /* _LP64 */
 587 
 588 #define PH_TABLE_SIZE   (1ul << PH_SHIFT_SIZE)
 589 
 590 /*
 591  *
 592  * We take care to get as much randomness as possible from both the vp and
 593  * the offset.  Workloads can have few vnodes with many offsets, many vnodes
 594  * with few offsets or a moderate mix of both.  This hash should perform
 595  * equally well for each of these possibilities and for all types of memory
 596  * allocations.
 597  *
 598  * vnodes representing files are created over a long period of time and
 599  * have good variation in the upper vp bits, and the right shifts below
 600  * capture these bits.  However, swap vnodes are created quickly in a
 601  * narrow vp* range.  Refer to comments at swap_alloc: vnum has exactly
 602  * AN_VPSHIFT bits, so the kmem_alloc'd vnode addresses have approximately
 603  * AN_VPSHIFT bits of variation above their VNODE_ALIGN low order 0 bits.
 604  * Spread swap vnodes widely in the hash table by XOR'ing a term with the
 605  * vp bits of variation left shifted to the top of the range.
 606  */
 607 
 608 #define PAGE_HASHSZ     page_hashsz
 609 #define PAGE_HASHAVELEN         4
 610 #define PAGE_HASH_FUNC(vp, off) \
 611         (((((uintptr_t)(off) >> PAGESHIFT) ^ \
 612             ((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE))) ^ \
 613             (((uintptr_t)(vp) >> 3) ^ \
 614             ((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) ^ \
 615             ((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE)) ^ \
 616             ((uintptr_t)(vp) << \
 617             (page_hashsz_shift - AN_VPSHIFT - VNODE_ALIGN_LOG2)))) & \
 618             (PAGE_HASHSZ - 1))
 619 
 620 #ifdef _KERNEL
 621 
 622 /*
 623  * The page hash value is re-hashed to an index for the ph_mutex array.
 624  *
 625  * For 64 bit kernels, the mutex array is padded out to prevent false
 626  * sharing of cache sub-blocks (64 bytes) of adjacent mutexes.
 627  *
 628  * For 32 bit kernels, we don't want to waste kernel address space with
 629  * padding, so instead we rely on the hash function to introduce skew of
 630  * adjacent vnode/offset indexes (the left shift part of the hash function).
 631  * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different
 632  * 64 byte sub-block.
 633  */
 634 extern pad_mutex_t ph_mutex[];
 635 
 636 #define PAGE_HASH_MUTEX(x) \
 637         &(ph_mutex[((x) ^ ((x) >> PH_SHIFT_SIZE) + ((x) << 3)) & \
 638                 (PH_TABLE_SIZE - 1)].pad_mutex)
 639 
 640 /*
 641  * Flags used while creating pages.
 642  */
 643 #define PG_EXCL         0x0001
 644 #define PG_WAIT         0x0002          /* Blocking memory allocations */
 645 #define PG_PHYSCONTIG   0x0004          /* NOT SUPPORTED */
 646 #define PG_MATCH_COLOR  0x0008          /* SUPPORTED by free list routines */
 647 #define PG_NORELOC      0x0010          /* Non-relocatable alloc hint. */
 648                                         /* Page must be PP_ISNORELOC */
 649 #define PG_PANIC        0x0020          /* system will panic if alloc fails */
 650 #define PG_PUSHPAGE     0x0040          /* alloc may use reserve */
 651 #define PG_LOCAL        0x0080          /* alloc from given lgrp only */
 652 #define PG_NORMALPRI    0x0100          /* PG_WAIT like priority, but */
 653                                         /* non-blocking */
 654 /*
 655  * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL
 656  * access are given priority over all other waiting threads.
 657  */
 658 #define SE_EWANTED      0x40000000
 659 #define PAGE_LOCKED(pp)         (((pp)->p_selock & ~SE_EWANTED) != 0)
 660 #define PAGE_SHARED(pp)         (((pp)->p_selock & ~SE_EWANTED) > 0)
 661 #define PAGE_EXCL(pp)           ((pp)->p_selock < 0)
 662 #define PAGE_LOCKED_SE(pp, se)  \
 663         ((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp))
 664 
 665 extern  long page_hashsz;
 666 extern  unsigned int page_hashsz_shift;
 667 extern  page_t **page_hash;
 668 
 669 extern  pad_mutex_t page_llocks[];      /* page logical lock mutex */
 670 extern  kmutex_t freemem_lock;          /* freemem lock */
 671 
 672 extern  pgcnt_t total_pages;            /* total pages in the system */
 673 
 674 /*
 675  * Variables controlling locking of physical memory.
 676  */
 677 extern  pgcnt_t pages_pp_maximum;       /* tuning: lock + claim <= max */
 678 extern  void init_pages_pp_maximum(void);
 679 
 680 struct lgrp;
 681 
 682 /* page_list_{add,sub} flags */
 683 
 684 /* which list */
 685 #define PG_FREE_LIST    0x0001
 686 #define PG_CACHE_LIST   0x0002
 687 
 688 /* where on list */
 689 #define PG_LIST_TAIL    0x0010
 690 #define PG_LIST_HEAD    0x0020
 691 
 692 /* called from */
 693 #define PG_LIST_ISINIT  0x1000
 694 
 695 /*
 696  * Page frame operations.
 697  */
 698 page_t  *page_lookup(struct vnode *, u_offset_t, se_t);
 699 page_t  *page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *,
 700         spgcnt_t *, int);
 701 page_t  *page_lookup_nowait(struct vnode *, u_offset_t, se_t);
 702 page_t  *page_find(struct vnode *, u_offset_t);
 703 page_t  *page_exists(struct vnode *, u_offset_t);
 704 int     page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]);
 705 int     page_exists_forreal(struct vnode *, u_offset_t, uint_t *);
 706 void    page_needfree(spgcnt_t);
 707 page_t  *page_create(struct vnode *, u_offset_t, size_t, uint_t);
 708 int     page_alloc_pages(struct vnode *, struct seg *, caddr_t, page_t **,
 709         page_t **, uint_t, int, int);
 710 page_t  *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes,
 711         uint_t flags, struct seg *seg, caddr_t vaddr, void *arg);
 712 page_t  *page_create_va(struct vnode *, u_offset_t, size_t, uint_t,
 713         struct seg *, caddr_t);
 714 int     page_create_wait(pgcnt_t npages, uint_t flags);
 715 void    page_create_putback(spgcnt_t npages);
 716 void    page_free(page_t *, int);
 717 void    page_free_at_startup(page_t *);
 718 void    page_free_pages(page_t *);
 719 void    free_vp_pages(struct vnode *, u_offset_t, size_t);
 720 int     page_reclaim(page_t *, kmutex_t *);
 721 int     page_reclaim_pages(page_t *, kmutex_t *, uint_t);
 722 void    page_destroy(page_t *, int);
 723 void    page_destroy_pages(page_t *);
 724 void    page_destroy_free(page_t *);
 725 void    page_rename(page_t *, struct vnode *, u_offset_t);
 726 int     page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *);
 727 void    page_hashout(page_t *, kmutex_t *);
 728 int     page_num_hashin(pfn_t, struct vnode *, u_offset_t);
 729 void    page_add(page_t **, page_t *);
 730 void    page_add_common(page_t **, page_t *);
 731 void    page_sub(page_t **, page_t *);
 732 void    page_sub_common(page_t **, page_t *);
 733 page_t  *page_get_freelist(struct vnode *, u_offset_t, struct seg *,
 734                 caddr_t, size_t, uint_t, struct lgrp *);
 735 
 736 page_t  *page_get_cachelist(struct vnode *, u_offset_t, struct seg *,
 737                 caddr_t, uint_t, struct lgrp *);
 738 #if defined(__i386) || defined(__amd64)
 739 int     page_chk_freelist(uint_t);
 740 #endif
 741 void    page_list_add(page_t *, int);
 742 void    page_boot_demote(page_t *);
 743 void    page_promote_size(page_t *, uint_t);
 744 void    page_list_add_pages(page_t *, int);
 745 void    page_list_sub(page_t *, int);
 746 void    page_list_sub_pages(page_t *, uint_t);
 747 void    page_list_xfer(page_t *, int, int);
 748 void    page_list_break(page_t **, page_t **, size_t);
 749 void    page_list_concat(page_t **, page_t **);
 750 void    page_vpadd(page_t **, page_t *);
 751 void    page_vpsub(page_t **, page_t *);
 752 int     page_lock(page_t *, se_t, kmutex_t *, reclaim_t);
 753 int     page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int);
 754 void page_lock_clr_exclwanted(page_t *);
 755 int     page_trylock(page_t *, se_t);
 756 int     page_try_reclaim_lock(page_t *, se_t, int);
 757 int     page_tryupgrade(page_t *);
 758 void    page_downgrade(page_t *);
 759 void    page_unlock(page_t *);
 760 void    page_unlock_nocapture(page_t *);
 761 void    page_lock_delete(page_t *);
 762 int     page_deleted(page_t *);
 763 int     page_pp_lock(page_t *, int, int);
 764 void    page_pp_unlock(page_t *, int, int);
 765 int     page_resv(pgcnt_t, uint_t);
 766 void    page_unresv(pgcnt_t);
 767 void    page_pp_useclaim(page_t *, page_t *, uint_t);
 768 int     page_addclaim(page_t *);
 769 int     page_subclaim(page_t *);
 770 int     page_addclaim_pages(page_t **);
 771 int     page_subclaim_pages(page_t **);
 772 pfn_t   page_pptonum(page_t *);
 773 page_t  *page_numtopp(pfn_t, se_t);
 774 page_t  *page_numtopp_noreclaim(pfn_t, se_t);
 775 page_t  *page_numtopp_nolock(pfn_t);
 776 page_t  *page_numtopp_nowait(pfn_t, se_t);
 777 page_t  *page_first();
 778 page_t  *page_next(page_t *);
 779 page_t  *page_list_next(page_t *);
 780 page_t  *page_nextn(page_t *, ulong_t);
 781 page_t  *page_next_scan_init(void **);
 782 page_t  *page_next_scan_large(page_t *, ulong_t *, void **);
 783 void    prefetch_page_r(void *);
 784 int     ppcopy(page_t *, page_t *);
 785 void    page_relocate_hash(page_t *, page_t *);
 786 void    pagezero(page_t *, uint_t, uint_t);
 787 void    pagescrub(page_t *, uint_t, uint_t);
 788 void    page_io_lock(page_t *);
 789 void    page_io_unlock(page_t *);
 790 int     page_io_trylock(page_t *);
 791 int     page_iolock_assert(page_t *);
 792 void    page_iolock_init(page_t *);
 793 void    page_io_wait(page_t *);
 794 int     page_io_locked(page_t *);
 795 pgcnt_t page_busy(int);
 796 void    page_lock_init(void);
 797 ulong_t page_share_cnt(page_t *);
 798 int     page_isshared(page_t *);
 799 int     page_isfree(page_t *);
 800 int     page_isref(page_t *);
 801 int     page_ismod(page_t *);
 802 int     page_release(page_t *, int);
 803 void    page_retire_init(void);
 804 int     page_retire(uint64_t, uchar_t);
 805 int     page_retire_check(uint64_t, uint64_t *);
 806 int     page_unretire(uint64_t);
 807 int     page_unretire_pp(page_t *, int);
 808 void    page_tryretire(page_t *);
 809 void    page_retire_mdboot();
 810 uint64_t        page_retire_pend_count(void);
 811 uint64_t        page_retire_pend_kas_count(void);
 812 void    page_retire_incr_pend_count(void *);
 813 void    page_retire_decr_pend_count(void *);
 814 void    page_clrtoxic(page_t *, uchar_t);
 815 void    page_settoxic(page_t *, uchar_t);
 816 
 817 int     page_reclaim_mem(pgcnt_t, pgcnt_t, int);
 818 
 819 void page_set_props(page_t *, uint_t);
 820 void page_clr_all_props(page_t *);
 821 int page_clear_lck_cow(page_t *, int);
 822 
 823 kmutex_t        *page_vnode_mutex(struct vnode *);
 824 kmutex_t        *page_se_mutex(struct page *);
 825 kmutex_t        *page_szc_lock(struct page *);
 826 int             page_szc_lock_assert(struct page *pp);
 827 
 828 /*
 829  * Page relocation interfaces. page_relocate() is generic.
 830  * page_get_replacement_page() is provided by the PSM.
 831  * page_free_replacement_page() is generic.
 832  */
 833 int group_page_trylock(page_t *, se_t);
 834 void group_page_unlock(page_t *);
 835 int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *);
 836 int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *);
 837 page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t);
 838 void page_free_replacement_page(page_t *);
 839 int page_relocate_cage(page_t **, page_t **);
 840 
 841 int page_try_demote_pages(page_t *);
 842 int page_try_demote_free_pages(page_t *);
 843 void page_demote_free_pages(page_t *);
 844 
 845 struct anon_map;
 846 
 847 void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *,
 848     ulong_t, vnode_t *, u_offset_t, int);
 849 void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t);
 850 
 851 /*
 852  * Tell the PIM we are adding physical memory
 853  */
 854 void add_physmem(page_t *, size_t, pfn_t);
 855 void add_physmem_cb(page_t *, pfn_t);   /* callback for page_t part */
 856 
 857 /*
 858  * hw_page_array[] is configured with hardware supported page sizes by
 859  * platform specific code.
 860  */
 861 typedef struct {
 862         size_t  hp_size;
 863         uint_t  hp_shift;
 864         uint_t  hp_colors;
 865         pgcnt_t hp_pgcnt;       /* base pagesize cnt */
 866 } hw_pagesize_t;
 867 
 868 extern hw_pagesize_t    hw_page_array[];
 869 extern uint_t           page_coloring_shift;
 870 extern uint_t           page_colors_mask;
 871 extern int              cpu_page_colors;
 872 extern uint_t           colorequiv;
 873 extern uchar_t          colorequivszc[];
 874 
 875 uint_t  page_num_pagesizes(void);
 876 uint_t  page_num_user_pagesizes(int);
 877 size_t  page_get_pagesize(uint_t);
 878 size_t  page_get_user_pagesize(uint_t n);
 879 pgcnt_t page_get_pagecnt(uint_t);
 880 uint_t  page_get_shift(uint_t);
 881 int     page_szc(size_t);
 882 int     page_szc_user_filtered(size_t);
 883 
 884 /* page_get_replacement page flags */
 885 #define PGR_SAMESZC     0x1     /* only look for page size same as orig */
 886 #define PGR_NORELOC     0x2     /* allocate a P_NORELOC page */
 887 
 888 /*
 889  * macros for "masked arithmetic"
 890  * The purpose is to step through all combinations of a set of bits while
 891  * keeping some other bits fixed. Fixed bits need not be contiguous. The
 892  * variable bits need not be contiguous either, or even right aligned. The
 893  * trick is to set all fixed bits to 1, then increment, then restore the
 894  * fixed bits. If incrementing causes a carry from a low bit position, the
 895  * carry propagates thru the fixed bits, because they are temporarily set to 1.
 896  *      v is the value
 897  *      i is the increment
 898  *      eq_mask defines the fixed bits
 899  *      mask limits the size of the result
 900  */
 901 #define ADD_MASKED(v, i, eq_mask, mask) \
 902         (((((v) | (eq_mask)) + (i)) & (mask) & ~(eq_mask)) | ((v) & (eq_mask)))
 903 
 904 /*
 905  * convenience macro which increments by 1
 906  */
 907 #define INC_MASKED(v, eq_mask, mask) ADD_MASKED(v, 1, eq_mask, mask)
 908 
 909 #endif  /* _KERNEL */
 910 
 911 /*
 912  * Constants used for the p_iolock_state
 913  */
 914 #define PAGE_IO_INUSE   0x1
 915 #define PAGE_IO_WANTED  0x2
 916 
 917 /*
 918  * Constants used for page_release status
 919  */
 920 #define PGREL_NOTREL    0x1
 921 #define PGREL_CLEAN     0x2
 922 #define PGREL_MOD       0x3
 923 
 924 /*
 925  * The p_state field holds what used to be the p_age and p_free
 926  * bits.  These fields are protected by p_selock (see above).
 927  */
 928 #define P_FREE          0x80            /* Page on free list */
 929 #define P_NORELOC       0x40            /* Page is non-relocatable */
 930 #define P_MIGRATE       0x20            /* Migrate page on next touch */
 931 #define P_SWAP          0x10            /* belongs to vnode that is V_ISSWAP */
 932 #define P_BOOTPAGES     0x08            /* member of bootpages list */
 933 #define P_RAF           0x04            /* page retired at free */
 934 
 935 #define PP_ISFREE(pp)           ((pp)->p_state & P_FREE)
 936 #define PP_ISAGED(pp)           (((pp)->p_state & P_FREE) && \
 937                                         ((pp)->p_vnode == NULL))
 938 #define PP_ISNORELOC(pp)        ((pp)->p_state & P_NORELOC)
 939 #define PP_ISKAS(pp)            (VN_ISKAS((pp)->p_vnode))
 940 #define PP_ISNORELOCKERNEL(pp)  (PP_ISNORELOC(pp) && PP_ISKAS(pp))
 941 #define PP_ISMIGRATE(pp)        ((pp)->p_state & P_MIGRATE)
 942 #define PP_ISSWAP(pp)           ((pp)->p_state & P_SWAP)
 943 #define PP_ISBOOTPAGES(pp)      ((pp)->p_state & P_BOOTPAGES)
 944 #define PP_ISRAF(pp)            ((pp)->p_state & P_RAF)
 945 
 946 #define PP_SETFREE(pp)          ((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \
 947                                 | P_FREE)
 948 #define PP_SETAGED(pp)          ASSERT(PP_ISAGED(pp))
 949 #define PP_SETNORELOC(pp)       ((pp)->p_state |= P_NORELOC)
 950 #define PP_SETMIGRATE(pp)       ((pp)->p_state |= P_MIGRATE)
 951 #define PP_SETSWAP(pp)          ((pp)->p_state |= P_SWAP)
 952 #define PP_SETBOOTPAGES(pp)     ((pp)->p_state |= P_BOOTPAGES)
 953 #define PP_SETRAF(pp)           ((pp)->p_state |= P_RAF)
 954 
 955 #define PP_CLRFREE(pp)          ((pp)->p_state &= ~P_FREE)
 956 #define PP_CLRAGED(pp)          ASSERT(!PP_ISAGED(pp))
 957 #define PP_CLRNORELOC(pp)       ((pp)->p_state &= ~P_NORELOC)
 958 #define PP_CLRMIGRATE(pp)       ((pp)->p_state &= ~P_MIGRATE)
 959 #define PP_CLRSWAP(pp)          ((pp)->p_state &= ~P_SWAP)
 960 #define PP_CLRBOOTPAGES(pp)     ((pp)->p_state &= ~P_BOOTPAGES)
 961 #define PP_CLRRAF(pp)           ((pp)->p_state &= ~P_RAF)
 962 
 963 /*
 964  * Flags for page_t p_toxic, for tracking memory hardware errors.
 965  *
 966  * These flags are OR'ed into p_toxic with page_settoxic() to track which
 967  * error(s) have occurred on a given page. The flags are cleared with
 968  * page_clrtoxic(). Both page_settoxic() and page_cleartoxic use atomic
 969  * primitives to manipulate the p_toxic field so no other locking is needed.
 970  *
 971  * When an error occurs on a page, p_toxic is set to record the error. The
 972  * error could be a memory error or something else (i.e. a datapath). The Page
 973  * Retire mechanism does not try to determine the exact cause of the error;
 974  * Page Retire rightly leaves that sort of determination to FMA's Diagnostic
 975  * Engine (DE).
 976  *
 977  * Note that, while p_toxic bits can be set without holding any locks, they
 978  * should only be cleared while holding the page exclusively locked.
 979  * There is one exception to this, the PR_CAPTURE bit is protected by a mutex
 980  * within the page capture logic and thus to set or clear the bit, that mutex
 981  * needs to be held.  The page does not need to be locked but the page_clrtoxic
 982  * function must be used as we need an atomic operation.
 983  * Also note that there is what amounts to a hack to prevent recursion with
 984  * large pages such that if we are unlocking a page and the PR_CAPTURE bit is
 985  * set, we will only try to capture the page if the current threads T_CAPTURING
 986  * flag is not set.  If the flag is set, the unlock will not try to capture
 987  * the page even though the PR_CAPTURE bit is set.
 988  *
 989  * Pages with PR_UE or PR_FMA flags are retired unconditionally, while pages
 990  * with PR_MCE are retired if the system has not retired too many of them.
 991  *
 992  * A page must be exclusively locked to be retired. Pages can be retired if
 993  * they are mapped, modified, or both, as long as they are not marked PR_UE,
 994  * since pages with uncorrectable errors cannot be relocated in memory.
 995  * Once a page has been successfully retired it is zeroed, attached to the
 996  * retired_pages vnode and, finally, PR_RETIRED is set in p_toxic. The other
 997  * p_toxic bits are NOT cleared. Pages are not left locked after retiring them
 998  * to avoid special case code throughout the kernel; rather, page_*lock() will
 999  * fail to lock the page, unless SE_RETIRED is passed as an argument.
1000  *
1001  * While we have your attention, go take a look at the comments at the
1002  * beginning of page_retire.c too.
1003  */
1004 #define PR_OK           0x00    /* no problem */
1005 #define PR_MCE          0x01    /* page has seen two or more CEs */
1006 #define PR_UE           0x02    /* page has an unhandled UE */
1007 #define PR_UE_SCRUBBED  0x04    /* page has seen a UE but was cleaned */
1008 #define PR_FMA          0x08    /* A DE wants this page retired */
1009 #define PR_CAPTURE      0x10    /* page is hashed on page_capture_hash[] */
1010 #define PR_RESV         0x20    /* Reserved for future use */
1011 #define PR_MSG          0x40    /* message(s) already printed for this page */
1012 #define PR_RETIRED      0x80    /* This page has been retired */
1013 
1014 #define PR_REASONS      (PR_UE | PR_MCE | PR_FMA)
1015 #define PR_TOXIC        (PR_UE)
1016 #define PR_ERRMASK      (PR_UE | PR_UE_SCRUBBED | PR_MCE | PR_FMA)
1017 #define PR_TOXICFLAGS   (0xCF)
1018 
1019 #define PP_RETIRED(pp)  ((pp)->p_toxic & PR_RETIRED)
1020 #define PP_TOXIC(pp)    ((pp)->p_toxic & PR_TOXIC)
1021 #define PP_PR_REQ(pp)   (((pp)->p_toxic & PR_REASONS) && !PP_RETIRED(pp))
1022 #define PP_PR_NOSHARE(pp)                                               \
1023         ((((pp)->p_toxic & (PR_RETIRED | PR_FMA | PR_UE)) == PR_FMA) &&  \
1024         !PP_ISKAS(pp))
1025 
1026 /*
1027  * Flags for page_unretire_pp
1028  */
1029 #define PR_UNR_FREE     0x1
1030 #define PR_UNR_CLEAN    0x2
1031 #define PR_UNR_TEMP     0x4
1032 
1033 /*
1034  * kpm large page description.
1035  * The virtual address range of segkpm is divided into chunks of
1036  * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort
1037  * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz
1038  * for 8K is 256M and 2G for 64K pages. It it kept as small as
1039  * possible to save physical memory space.
1040  *
1041  * There are 2 segkpm mapping windows within in the virtual address
1042  * space when we have to prevent VAC alias conflicts. The so called
1043  * Alias window (mappings are always by PAGESIZE) is controlled by
1044  * kp_refcnta. The regular window is controlled by kp_refcnt for the
1045  * normal operation, which is to use the largest available pagesize.
1046  * When VAC alias conflicts are present within a chunk in the regular
1047  * window the large page mapping is broken up into smaller PAGESIZE
1048  * mappings. kp_refcntc is used to control the pages that are invoked
1049  * in the conflict and kp_refcnts holds the active mappings done
1050  * with the small page size. In non vac conflict mode kp_refcntc is
1051  * also used as "go" indication (-1) for the trap level tsbmiss
1052  * handler.
1053  */
1054 typedef struct kpm_page {
1055         short kp_refcnt;        /* pages mapped large */
1056         short kp_refcnta;       /* pages mapped in Alias window */
1057         short kp_refcntc;       /* TL-tsbmiss flag; #vac alias conflict pages */
1058         short kp_refcnts;       /* vac alias: pages mapped small */
1059 } kpm_page_t;
1060 
1061 /*
1062  * Note: khl_lock offset changes must be reflected in sfmmu_asm.s
1063  */
1064 typedef struct kpm_hlk {
1065         kmutex_t khl_mutex;     /* kpm_page mutex */
1066         uint_t   khl_lock;      /* trap level tsbmiss handling */
1067 } kpm_hlk_t;
1068 
1069 /*
1070  * kpm small page description.
1071  * When kpm_pgsz is equal to PAGESIZE a smaller representation is used
1072  * to save memory space. Alias range mappings and regular segkpm
1073  * mappings are done in units of PAGESIZE and can share the mapping
1074  * information and the mappings are always distinguishable by their
1075  * virtual address. Other information needed for VAC conflict prevention
1076  * is already available on a per page basis.
1077  *
1078  * The state about how a kpm page is mapped and whether it is ready to go
1079  * is indicated by the following 1 byte kpm_spage structure. This byte is
1080  * split into two 4-bit parts - kp_mapped and kp_mapped_go.
1081  *      - kp_mapped == 1        the page is mapped cacheable
1082  *      - kp_mapped == 2        the page is mapped non-cacheable
1083  *      - kp_mapped_go == 1     the mapping is ready to be dropped in
1084  *      - kp_mapped_go == 0     the mapping is not ready to be dropped in.
1085  * When kp_mapped_go == 0, we will have C handler resolve the VAC conflict.
1086  * Otherwise, the assembly tsb miss handler can simply drop in the mapping
1087  * when a tsb miss occurs.
1088  */
1089 typedef union kpm_spage {
1090         struct {
1091 #ifdef  _BIG_ENDIAN
1092                 uchar_t mapped_go: 4;   /* go or nogo flag */
1093                 uchar_t mapped: 4;      /* page mapped small */
1094 #else
1095                 uchar_t mapped: 4;      /* page mapped small */
1096                 uchar_t mapped_go: 4;   /* go or nogo flag */
1097 #endif
1098         } kpm_spage_un;
1099         uchar_t kp_mapped_flag;
1100 } kpm_spage_t;
1101 
1102 #define kp_mapped       kpm_spage_un.mapped
1103 #define kp_mapped_go    kpm_spage_un.mapped_go
1104 
1105 /*
1106  * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s
1107  */
1108 typedef struct kpm_shlk {
1109         uint_t   kshl_lock;     /* trap level tsbmiss handling */
1110 } kpm_shlk_t;
1111 
1112 /*
1113  * Each segment of physical memory is described by a memseg struct.
1114  * Within a segment, memory is considered contiguous. The members
1115  * can be categorized as follows:
1116  * . Platform independent:
1117  *         pages, epages, pages_base, pages_end, next, lnext.
1118  * . 64bit only but platform independent:
1119  *         kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages.
1120  * . Really platform or mmu specific:
1121  *         pagespa, epagespa, nextpa, kpm_pagespa.
1122  * . Mixed:
1123  *         msegflags.
1124  */
1125 struct memseg {
1126         page_t *pages, *epages;         /* [from, to] in page array */
1127         pfn_t pages_base, pages_end;    /* [from, to] in page numbers */
1128         struct memseg *next;            /* next segment in list */
1129         struct memseg *lnext;           /* next segment in deleted list */
1130 #if defined(__sparc)
1131         uint64_t pagespa, epagespa;     /* [from, to] page array physical */
1132         uint64_t nextpa;                /* physical next pointer */
1133         pfn_t   kpm_pbase;              /* start of kpm range */
1134         pgcnt_t kpm_nkpmpgs;            /* # of kpm_pgsz pages */
1135         union _mseg_un {
1136                 kpm_page_t  *kpm_lpgs;  /* ptr to kpm_page array */
1137                 kpm_spage_t *kpm_spgs;  /* ptr to kpm_spage array */
1138         } mseg_un;
1139         uint64_t kpm_pagespa;           /* physical ptr to kpm (s)pages array */
1140 #endif /* __sparc */
1141         uint_t msegflags;               /* memseg flags */
1142 };
1143 
1144 /* memseg union aliases */
1145 #define kpm_pages       mseg_un.kpm_lpgs
1146 #define kpm_spages      mseg_un.kpm_spgs
1147 
1148 /* msegflags */
1149 #define MEMSEG_DYNAMIC          0x1     /* DR: memory was added dynamically */
1150 #define MEMSEG_META_INCL        0x2     /* DR: memseg includes it's metadata */
1151 #define MEMSEG_META_ALLOC       0x4     /* DR: memseg allocated it's metadata */
1152 
1153 /* memseg support macros */
1154 #define MSEG_NPAGES(SEG)        ((SEG)->pages_end - (SEG)->pages_base)
1155 
1156 /* memseg hash */
1157 #define MEM_HASH_SHIFT          0x9
1158 #define N_MEM_SLOTS             0x200           /* must be a power of 2 */
1159 #define MEMSEG_PFN_HASH(pfn)    (((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1))
1160 
1161 /* memseg  externals */
1162 extern struct memseg *memsegs;          /* list of memory segments */
1163 extern ulong_t mhash_per_slot;
1164 extern uint64_t memsegspa;              /* memsegs as physical address */
1165 
1166 void build_pfn_hash();
1167 extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum);
1168 
1169 /*
1170  * page capture related info:
1171  * The page capture routines allow us to asynchronously capture given pages
1172  * for the explicit use of the requestor.  New requestors can be added by
1173  * explicitly adding themselves to the PC_* flags below and incrementing
1174  * PC_NUM_CALLBACKS as necessary.
1175  *
1176  * Subsystems using page capture must register a callback before attempting
1177  * to capture a page.  A duration of -1 will indicate that we will never give
1178  * up while trying to capture a page and will only stop trying to capture the
1179  * given page once we have successfully captured it.  Thus the user needs to be
1180  * aware of the behavior of all callers who have a duration of -1.
1181  *
1182  * For now, only /dev/physmem and page retire use the page capture interface
1183  * and only a single request can be outstanding for a given page.  Thus, if
1184  * /dev/phsymem wants a page and page retire also wants the same page, only
1185  * the page retire request will be honored until the point in time that the
1186  * page is actually retired, at which point in time, subsequent requests by
1187  * /dev/physmem will succeed if the CAPTURE_GET_RETIRED flag was set.
1188  */
1189 
1190 #define PC_RETIRE               (0)
1191 #define PC_PHYSMEM              (1)
1192 #define PC_NUM_CALLBACKS        (2)
1193 #define PC_MASK                 ((1 << PC_NUM_CALLBACKS) - 1)
1194 
1195 #define CAPTURE_RETIRE          (1 << PC_RETIRE)
1196 #define CAPTURE_PHYSMEM         (1 << PC_PHYSMEM)
1197 
1198 #define CAPTURE_ASYNC           (0x0200)
1199 
1200 #define CAPTURE_GET_RETIRED     (0x1000)
1201 #define CAPTURE_GET_CAGE        (0x2000)
1202 
1203 struct page_capture_callback {
1204         int cb_active;          /* 1 means active, 0 means inactive */
1205         clock_t duration;       /* the length in time that we'll attempt to */
1206                                 /* capture this page asynchronously. (in HZ) */
1207         krwlock_t cb_rwlock;
1208         int (*cb_func)(page_t *, void *, uint_t); /* callback function */
1209 };
1210 
1211 extern kcondvar_t pc_cv;
1212 
1213 void page_capture_register_callback(uint_t index, clock_t duration,
1214     int (*cb_func)(page_t *, void *, uint_t));
1215 void page_capture_unregister_callback(uint_t index);
1216 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap);
1217 void page_unlock_capture(page_t *pp);
1218 int page_capture_unretire_pp(page_t *);
1219 
1220 extern int memsegs_trylock(int);
1221 extern void memsegs_lock(int);
1222 extern void memsegs_unlock(int);
1223 extern int memsegs_lock_held(void);
1224 extern void memlist_read_lock(void);
1225 extern void memlist_read_unlock(void);
1226 extern void memlist_write_lock(void);
1227 extern void memlist_write_unlock(void);
1228 
1229 #ifdef  __cplusplus
1230 }
1231 #endif
1232 
1233 #endif  /* _VM_PAGE_H */