Print this page
6065 page hash: use a static inline instead of a macro

*** 18,27 **** --- 18,28 ---- * * CDDL HEADER END */ /* * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net> */ /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ /* All Rights Reserved */
*** 265,307 **** uint_t page_create_cnt[10]; uint_t alloc_pages[9]; uint_t page_exphcontg[19]; uint_t page_create_large_cnt[10]; ! /* ! * Collects statistics. ! */ ! #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ ! uint_t mylen = 0; \ ! \ ! for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ ! if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ ! break; \ ! } \ ! if ((pp) != NULL) \ ! pagecnt.pc_find_hit++; \ ! else \ ! pagecnt.pc_find_miss++; \ ! if (mylen > PC_HASH_CNT) \ ! mylen = PC_HASH_CNT; \ ! pagecnt.pc_find_hashlen[mylen]++; \ ! } ! ! #else /* VM_STATS */ ! ! /* ! * Don't collect statistics ! */ ! #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ ! for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ ! if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ ! break; \ ! } \ ! } ! #endif /* VM_STATS */ #ifdef DEBUG #define MEMSEG_SEARCH_STATS #endif --- 266,298 ---- uint_t page_create_cnt[10]; uint_t alloc_pages[9]; uint_t page_exphcontg[19]; uint_t page_create_large_cnt[10]; ! #endif ! static inline page_t * ! page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off) ! { ! uint_t mylen = 0; ! page_t *page; ! ! for (page = page_hash[index]; page; page = page->p_hash, mylen++) ! if (page->p_vnode == vnode && page->p_offset == off) ! break; + #ifdef VM_STATS + if (page != NULL) + pagecnt.pc_find_hit++; + else + pagecnt.pc_find_miss++; + + pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++; + #endif + + return (page); + } #ifdef DEBUG #define MEMSEG_SEARCH_STATS #endif
*** 748,758 **** */ hash_locked = 0; index = PAGE_HASH_FUNC(vp, off); phm = NULL; top: ! PAGE_HASH_SEARCH(index, pp, vp, off); if (pp != NULL) { VM_STAT_ADD(page_lookup_cnt[1]); es = (newpp != NULL) ? 1 : 0; es |= flags; if (!hash_locked) { --- 739,749 ---- */ hash_locked = 0; index = PAGE_HASH_FUNC(vp, off); phm = NULL; top: ! pp = page_hash_search(index, vp, off); if (pp != NULL) { VM_STAT_ADD(page_lookup_cnt[1]); es = (newpp != NULL) ? 1 : 0; es |= flags; if (!hash_locked) {
*** 782,793 **** /* * Since `pp' is locked it can not change identity now. * Reconfirm we locked the correct page. * * Both the p_vnode and p_offset *must* be cast volatile ! * to force a reload of their values: The PAGE_HASH_SEARCH ! * macro will have stuffed p_vnode and p_offset into * registers before calling page_trylock(); another thread, * actually holding the hash lock, could have changed the * page's identity in memory, but our registers would not * be changed, fooling the reconfirmation. If the hash * lock was held during the search, the casting would --- 773,784 ---- /* * Since `pp' is locked it can not change identity now. * Reconfirm we locked the correct page. * * Both the p_vnode and p_offset *must* be cast volatile ! * to force a reload of their values: The page_hash_search ! * function will have stuffed p_vnode and p_offset into * registers before calling page_trylock(); another thread, * actually holding the hash lock, could have changed the * page's identity in memory, but our registers would not * be changed, fooling the reconfirmation. If the hash * lock was held during the search, the casting would
*** 946,964 **** ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); VM_STAT_ADD(page_lookup_nowait_cnt[0]); index = PAGE_HASH_FUNC(vp, off); ! PAGE_HASH_SEARCH(index, pp, vp, off); locked = 0; if (pp == NULL) { top: VM_STAT_ADD(page_lookup_nowait_cnt[1]); locked = 1; phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! PAGE_HASH_SEARCH(index, pp, vp, off); } if (pp == NULL || PP_ISFREE(pp)) { VM_STAT_ADD(page_lookup_nowait_cnt[2]); pp = NULL; --- 937,955 ---- ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); VM_STAT_ADD(page_lookup_nowait_cnt[0]); index = PAGE_HASH_FUNC(vp, off); ! pp = page_hash_search(index, vp, off); locked = 0; if (pp == NULL) { top: VM_STAT_ADD(page_lookup_nowait_cnt[1]); locked = 1; phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! pp = page_hash_search(index, vp, off); } if (pp == NULL || PP_ISFREE(pp)) { VM_STAT_ADD(page_lookup_nowait_cnt[2]); pp = NULL;
*** 1016,1026 **** index = PAGE_HASH_FUNC(vp, off); phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! PAGE_HASH_SEARCH(index, pp, vp, off); mutex_exit(phm); ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); return (pp); } --- 1007,1017 ---- index = PAGE_HASH_FUNC(vp, off); phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! pp = page_hash_search(index, vp, off); mutex_exit(phm); ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); return (pp); }
*** 1034,1053 **** * even bother to lock the list. */ page_t * page_exists(vnode_t *vp, u_offset_t off) { - page_t *pp; ulong_t index; ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); VM_STAT_ADD(page_exists_cnt); index = PAGE_HASH_FUNC(vp, off); - PAGE_HASH_SEARCH(index, pp, vp, off); ! return (pp); } /* * Determine if physically contiguous pages exist for [vp, off] - [vp, off + * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array --- 1025,1042 ---- * even bother to lock the list. */ page_t * page_exists(vnode_t *vp, u_offset_t off) { ulong_t index; ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); VM_STAT_ADD(page_exists_cnt); index = PAGE_HASH_FUNC(vp, off); ! return (page_hash_search(index, vp, off)); } /* * Determine if physically contiguous pages exist for [vp, off] - [vp, off + * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array
*** 1090,1100 **** index = PAGE_HASH_FUNC(vp, off); phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! PAGE_HASH_SEARCH(index, pp, vp, off); mutex_exit(phm); VM_STAT_ADD(page_exphcontg[1]); if (pp == NULL) { --- 1079,1089 ---- index = PAGE_HASH_FUNC(vp, off); phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! pp = page_hash_search(index, vp, off); mutex_exit(phm); VM_STAT_ADD(page_exphcontg[1]); if (pp == NULL) {
*** 1317,1327 **** index = PAGE_HASH_FUNC(vp, off); phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! PAGE_HASH_SEARCH(index, pp, vp, off); if (pp != NULL) { *szc = pp->p_szc; rc = 1; } mutex_exit(phm); --- 1306,1316 ---- index = PAGE_HASH_FUNC(vp, off); phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! pp = page_hash_search(index, vp, off); if (pp != NULL) { *szc = pp->p_szc; rc = 1; } mutex_exit(phm);
*** 2445,2455 **** * Get the mutex and check to see if it really does * not exist. */ phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! PAGE_HASH_SEARCH(index, pp, vp, off); if (pp == NULL) { VM_STAT_ADD(page_create_new); pp = npp; npp = NULL; if (!page_hashin(pp, vp, off, phm)) { --- 2434,2444 ---- * Get the mutex and check to see if it really does * not exist. */ phm = PAGE_HASH_MUTEX(index); mutex_enter(phm); ! pp = page_hash_search(index, vp, off); if (pp == NULL) { VM_STAT_ADD(page_create_new); pp = npp; npp = NULL; if (!page_hashin(pp, vp, off, phm)) {
*** 3275,3285 **** * identity. In the case when the phm lock is dropped to undo any * hat layer mappings, the existing page is held with an "exclusive" * lock, again preventing another page from being created with * this identity. */ ! PAGE_HASH_SEARCH(index, pp, vp, off); if (pp != NULL) { VM_STAT_ADD(page_rename_exists); /* * As it turns out, this is one of only two places where --- 3264,3274 ---- * identity. In the case when the phm lock is dropped to undo any * hat layer mappings, the existing page is held with an "exclusive" * lock, again preventing another page from being created with * this identity. */ ! pp = page_hash_search(index, vp, off); if (pp != NULL) { VM_STAT_ADD(page_rename_exists); /* * As it turns out, this is one of only two places where