Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 272,283 **** uri_hash_t *_nhp; \ \ mutex_enter(&(hp)->lock); \ while ((_nuri = (hp)->list) != NULL) { \ (hp)->list = _nuri->hash; \ ! atomic_add_32(&uri_hash_cnt[(from)], -1); \ ! atomic_add_32(&uri_hash_cnt[(to)], 1); \ _nhix = _nuri->hvalue; \ URI_HASH_IX(_nhix, to); \ _nhp = &uri_hash_ab[(to)][_nhix]; \ mutex_enter(&_nhp->lock); \ _nuri->hash = _nhp->list; \ --- 272,283 ---- uri_hash_t *_nhp; \ \ mutex_enter(&(hp)->lock); \ while ((_nuri = (hp)->list) != NULL) { \ (hp)->list = _nuri->hash; \ ! atomic_dec_32(&uri_hash_cnt[(from)]); \ ! atomic_inc_32(&uri_hash_cnt[(to)]); \ _nhix = _nuri->hvalue; \ URI_HASH_IX(_nhix, to); \ _nhp = &uri_hash_ab[(to)][_nhix]; \ mutex_enter(&_nhp->lock); \ _nuri->hash = _nhp->list; \
*** 292,302 **** if ((puri) != NULL) { \ (puri)->hash = (uri)->hash; \ } else { \ (hp)->list = (uri)->hash; \ } \ ! if (atomic_add_32_nv(&uri_hash_cnt[(cur)], -1) == 0 && \ uri_hash_ab[(new)] != NULL) { \ kmem_free(uri_hash_ab[cur], \ sizeof (uri_hash_t) * uri_hash_sz[cur]); \ uri_hash_ab[(cur)] = NULL; \ uri_hash_lru[(cur)] = NULL; \ --- 292,302 ---- if ((puri) != NULL) { \ (puri)->hash = (uri)->hash; \ } else { \ (hp)->list = (uri)->hash; \ } \ ! if (atomic_dec_32_nv(&uri_hash_cnt[(cur)]) == 0 && \ uri_hash_ab[(new)] != NULL) { \ kmem_free(uri_hash_ab[cur], \ sizeof (uri_hash_t) * uri_hash_sz[cur]); \ uri_hash_ab[(cur)] = NULL; \ uri_hash_lru[(cur)] = NULL; \
*** 595,605 **** * atomictally, i.e. multiple threads can be in this code * as RW_READER and update the cnt[], this isn't a problem * as the check is only advisory. */ fast: ! atomic_add_32(&uri_hash_cnt[cur], 1); hp = &uri_hash_ab[cur][hix]; mutex_enter(&hp->lock); uri->hash = hp->list; hp->list = uri; mutex_exit(&hp->lock); --- 595,605 ---- * atomictally, i.e. multiple threads can be in this code * as RW_READER and update the cnt[], this isn't a problem * as the check is only advisory. */ fast: ! atomic_inc_32(&uri_hash_cnt[cur]); hp = &uri_hash_ab[cur][hix]; mutex_enter(&hp->lock); uri->hash = hp->list; hp->list = uri; mutex_exit(&hp->lock);
*** 687,697 **** /* * If new hash has overflowed before current hash has been * completely migrated then walk all current hash chains and * migrate list members now. */ ! if (atomic_add_32_nv(&uri_hash_cnt[new], 1) >= uri_hash_overflow[new]) { for (hix = 0; hix < uri_hash_sz[cur]; hix++) { hp = &uri_hash_ab[cur][hix]; if (hp->list != NULL) { URI_HASH_MIGRATE(cur, hp, new); } --- 687,697 ---- /* * If new hash has overflowed before current hash has been * completely migrated then walk all current hash chains and * migrate list members now. */ ! if (atomic_inc_32_nv(&uri_hash_cnt[new]) >= uri_hash_overflow[new]) { for (hix = 0; hix < uri_hash_sz[cur]; hix++) { hp = &uri_hash_ab[cur][hix]; if (hp->list != NULL) { URI_HASH_MIGRATE(cur, hp, new); }
*** 835,845 **** puri->hash = uri->hash; } else { hp->list = uri->hash; } mutex_exit(&hp->lock); ! atomic_add_32(&uri_hash_cnt[cur], -1); rw_exit(&uri_hash_access); if (ruri->nocache) nl7c_uri_purge++; else nl7c_uri_expire++; --- 835,845 ---- puri->hash = uri->hash; } else { hp->list = uri->hash; } mutex_exit(&hp->lock); ! atomic_dec_32(&uri_hash_cnt[cur]); rw_exit(&uri_hash_access); if (ruri->nocache) nl7c_uri_purge++; else nl7c_uri_expire++;