Print this page
6065 page hash: use a static inline instead of a macro
@@ -18,10 +18,11 @@
*
* CDDL HEADER END
*/
/*
* Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
*/
/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
@@ -265,43 +266,33 @@
uint_t page_create_cnt[10];
uint_t alloc_pages[9];
uint_t page_exphcontg[19];
uint_t page_create_large_cnt[10];
-/*
- * Collects statistics.
- */
-#define PAGE_HASH_SEARCH(index, pp, vp, off) { \
- uint_t mylen = 0; \
- \
- for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \
- if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
- break; \
- } \
- if ((pp) != NULL) \
- pagecnt.pc_find_hit++; \
- else \
- pagecnt.pc_find_miss++; \
- if (mylen > PC_HASH_CNT) \
- mylen = PC_HASH_CNT; \
- pagecnt.pc_find_hashlen[mylen]++; \
-}
-
-#else /* VM_STATS */
-
-/*
- * Don't collect statistics
- */
-#define PAGE_HASH_SEARCH(index, pp, vp, off) { \
- for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
- if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
- break; \
- } \
-}
+#endif
-#endif /* VM_STATS */
+static inline page_t *
+page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off)
+{
+ uint_t mylen = 0;
+ page_t *page;
+
+ for (page = page_hash[index]; page; page = page->p_hash, mylen++)
+ if (page->p_vnode == vnode && page->p_offset == off)
+ break;
+#ifdef VM_STATS
+ if (page != NULL)
+ pagecnt.pc_find_hit++;
+ else
+ pagecnt.pc_find_miss++;
+
+ pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++;
+#endif
+
+ return (page);
+}
#ifdef DEBUG
#define MEMSEG_SEARCH_STATS
#endif
@@ -748,11 +739,11 @@
*/
hash_locked = 0;
index = PAGE_HASH_FUNC(vp, off);
phm = NULL;
top:
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
if (pp != NULL) {
VM_STAT_ADD(page_lookup_cnt[1]);
es = (newpp != NULL) ? 1 : 0;
es |= flags;
if (!hash_locked) {
@@ -782,12 +773,12 @@
/*
* Since `pp' is locked it can not change identity now.
* Reconfirm we locked the correct page.
*
* Both the p_vnode and p_offset *must* be cast volatile
- * to force a reload of their values: The PAGE_HASH_SEARCH
- * macro will have stuffed p_vnode and p_offset into
+ * to force a reload of their values: The page_hash_search
+ * function will have stuffed p_vnode and p_offset into
* registers before calling page_trylock(); another thread,
* actually holding the hash lock, could have changed the
* page's identity in memory, but our registers would not
* be changed, fooling the reconfirmation. If the hash
* lock was held during the search, the casting would
@@ -946,19 +937,19 @@
ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
VM_STAT_ADD(page_lookup_nowait_cnt[0]);
index = PAGE_HASH_FUNC(vp, off);
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
locked = 0;
if (pp == NULL) {
top:
VM_STAT_ADD(page_lookup_nowait_cnt[1]);
locked = 1;
phm = PAGE_HASH_MUTEX(index);
mutex_enter(phm);
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
}
if (pp == NULL || PP_ISFREE(pp)) {
VM_STAT_ADD(page_lookup_nowait_cnt[2]);
pp = NULL;
@@ -1016,11 +1007,11 @@
index = PAGE_HASH_FUNC(vp, off);
phm = PAGE_HASH_MUTEX(index);
mutex_enter(phm);
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
mutex_exit(phm);
ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
return (pp);
}
@@ -1034,20 +1025,18 @@
* even bother to lock the list.
*/
page_t *
page_exists(vnode_t *vp, u_offset_t off)
{
- page_t *pp;
ulong_t index;
ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
VM_STAT_ADD(page_exists_cnt);
index = PAGE_HASH_FUNC(vp, off);
- PAGE_HASH_SEARCH(index, pp, vp, off);
- return (pp);
+ return (page_hash_search(index, vp, off));
}
/*
* Determine if physically contiguous pages exist for [vp, off] - [vp, off +
* page_size(szc)) range. if they exist and ppa is not NULL fill ppa array
@@ -1090,11 +1079,11 @@
index = PAGE_HASH_FUNC(vp, off);
phm = PAGE_HASH_MUTEX(index);
mutex_enter(phm);
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
mutex_exit(phm);
VM_STAT_ADD(page_exphcontg[1]);
if (pp == NULL) {
@@ -1317,11 +1306,11 @@
index = PAGE_HASH_FUNC(vp, off);
phm = PAGE_HASH_MUTEX(index);
mutex_enter(phm);
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
if (pp != NULL) {
*szc = pp->p_szc;
rc = 1;
}
mutex_exit(phm);
@@ -2445,11 +2434,11 @@
* Get the mutex and check to see if it really does
* not exist.
*/
phm = PAGE_HASH_MUTEX(index);
mutex_enter(phm);
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
if (pp == NULL) {
VM_STAT_ADD(page_create_new);
pp = npp;
npp = NULL;
if (!page_hashin(pp, vp, off, phm)) {
@@ -3275,11 +3264,11 @@
* identity. In the case when the phm lock is dropped to undo any
* hat layer mappings, the existing page is held with an "exclusive"
* lock, again preventing another page from being created with
* this identity.
*/
- PAGE_HASH_SEARCH(index, pp, vp, off);
+ pp = page_hash_search(index, vp, off);
if (pp != NULL) {
VM_STAT_ADD(page_rename_exists);
/*
* As it turns out, this is one of only two places where