Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 2149,2159 **** if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { ASSERT(srdp->srd_evp != NULL); VN_HOLD(srdp->srd_evp); ASSERT(srdp->srd_refcnt > 0); newhat->sfmmu_srdp = srdp; ! atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); } /* * HAT_DUP_ALL flag is used after as duplication is done. */ --- 2149,2159 ---- if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { ASSERT(srdp->srd_evp != NULL); VN_HOLD(srdp->srd_evp); ASSERT(srdp->srd_refcnt > 0); newhat->sfmmu_srdp = srdp; ! atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); } /* * HAT_DUP_ALL flag is used after as duplication is done. */
*** 3225,3235 **** if (flags & HAT_LOAD_LOCK) { if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { panic("too high lckcnt-hmeblk %p", (void *)hmeblkp); } ! atomic_add_32(&hmeblkp->hblk_lckcnt, 1); HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); } #ifdef VAC --- 3225,3235 ---- if (flags & HAT_LOAD_LOCK) { if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { panic("too high lckcnt-hmeblk %p", (void *)hmeblkp); } ! atomic_inc_32(&hmeblkp->hblk_lckcnt); HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); } #ifdef VAC
*** 3260,3281 **** } ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); if (!TTE_IS_VALID(&tteold)) { ! atomic_add_16(&hmeblkp->hblk_vcnt, 1); if (rid == SFMMU_INVALID_SHMERID) { ! atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); } else { sf_srd_t *srdp = sfmmup->sfmmu_srdp; sf_region_t *rgnp = srdp->srd_hmergnp[rid]; /* * We already accounted for region ttecnt's in sfmmu * during hat_join_region() processing. Here we * only update ttecnt's in region struture. */ ! atomic_add_long(&rgnp->rgn_ttecnt[size], 1); } } myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && --- 3260,3281 ---- } ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); if (!TTE_IS_VALID(&tteold)) { ! atomic_inc_16(&hmeblkp->hblk_vcnt); if (rid == SFMMU_INVALID_SHMERID) { ! atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]); } else { sf_srd_t *srdp = sfmmup->sfmmu_srdp; sf_region_t *rgnp = srdp->srd_hmergnp[rid]; /* * We already accounted for region ttecnt's in sfmmu * during hat_join_region() processing. Here we * only update ttecnt's in region struture. */ ! atomic_inc_ulong(&rgnp->rgn_ttecnt[size]); } } myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
*** 3379,3389 **** } } if (pp) { if (!remap) { HME_ADD(sfhme, pp); ! atomic_add_16(&hmeblkp->hblk_hmecnt, 1); ASSERT(hmeblkp->hblk_hmecnt > 0); /* * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) * see pageunload() for comment. --- 3379,3389 ---- } } if (pp) { if (!remap) { HME_ADD(sfhme, pp); ! atomic_inc_16(&hmeblkp->hblk_hmecnt); ASSERT(hmeblkp->hblk_hmecnt > 0); /* * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) * see pageunload() for comment.
*** 4152,4162 **** if (((uintptr_t)addr + TTEBYTES(ttesz)) > (uintptr_t)endaddr) panic("can't unlock large tte"); ASSERT(hmeblkp->hblk_lckcnt > 0); ! atomic_add_32(&hmeblkp->hblk_lckcnt, -1); HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); } else { panic("sfmmu_hblk_unlock: invalid tte"); } addr += TTEBYTES(ttesz); --- 4152,4162 ---- if (((uintptr_t)addr + TTEBYTES(ttesz)) > (uintptr_t)endaddr) panic("can't unlock large tte"); ASSERT(hmeblkp->hblk_lckcnt > 0); ! atomic_dec_32(&hmeblkp->hblk_lckcnt); HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); } else { panic("sfmmu_hblk_unlock: invalid tte"); } addr += TTEBYTES(ttesz);
*** 6131,6141 **** */ ttecnt++; if (flags & HAT_UNLOAD_UNLOCK) { ASSERT(hmeblkp->hblk_lckcnt > 0); ! atomic_add_32(&hmeblkp->hblk_lckcnt, -1); HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); } /* * Normally we would need to flush the page --- 6131,6141 ---- */ ttecnt++; if (flags & HAT_UNLOAD_UNLOCK) { ASSERT(hmeblkp->hblk_lckcnt > 0); ! atomic_dec_32(&hmeblkp->hblk_lckcnt); HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); } /* * Normally we would need to flush the page
*** 6185,6199 **** * Again, we cannot * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); */ HME_SUB(sfhmep, pp); membar_stst(); ! atomic_add_16(&hmeblkp->hblk_hmecnt, -1); } ASSERT(hmeblkp->hblk_vcnt > 0); ! atomic_add_16(&hmeblkp->hblk_vcnt, -1); ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || !hmeblkp->hblk_lckcnt); #ifdef VAC --- 6185,6199 ---- * Again, we cannot * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); */ HME_SUB(sfhmep, pp); membar_stst(); ! atomic_dec_16(&hmeblkp->hblk_hmecnt); } ASSERT(hmeblkp->hblk_vcnt > 0); ! atomic_dec_16(&hmeblkp->hblk_vcnt); ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || !hmeblkp->hblk_lckcnt); #ifdef VAC
*** 7347,7360 **** rgnp = srdp->srd_hmergnp[rid]; SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); sfmmu_ttesync(NULL, addr, &tte, pp); ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); ! atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1); } else { sfmmu_ttesync(sfmmup, addr, &tte, pp); ! atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); /* * We need to flush the page from the virtual cache * in order to prevent a virtual cache alias * inconsistency. The particular scenario we need --- 7347,7360 ---- rgnp = srdp->srd_hmergnp[rid]; SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); sfmmu_ttesync(NULL, addr, &tte, pp); ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); ! atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]); } else { sfmmu_ttesync(sfmmup, addr, &tte, pp); ! atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]); /* * We need to flush the page from the virtual cache * in order to prevent a virtual cache alias * inconsistency. The particular scenario we need
*** 7411,7422 **** * we are done with hmeblkp so that this hmeblk won't be * stolen. */ ASSERT(hmeblkp->hblk_hmecnt > 0); ASSERT(hmeblkp->hblk_vcnt > 0); ! atomic_add_16(&hmeblkp->hblk_vcnt, -1); ! atomic_add_16(&hmeblkp->hblk_hmecnt, -1); /* * This is bug 4063182. * XXX: fixme * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || * !hmeblkp->hblk_lckcnt); --- 7411,7422 ---- * we are done with hmeblkp so that this hmeblk won't be * stolen. */ ASSERT(hmeblkp->hblk_hmecnt > 0); ASSERT(hmeblkp->hblk_vcnt > 0); ! atomic_dec_16(&hmeblkp->hblk_vcnt); ! atomic_dec_16(&hmeblkp->hblk_hmecnt); /* * This is bug 4063182. * XXX: fixme * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || * !hmeblkp->hblk_lckcnt);
*** 13811,13822 **** for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; srdp = srdp->srd_hash) { if (srdp->srd_evp == evp) { ASSERT(srdp->srd_refcnt >= 0); sfmmup->sfmmu_srdp = srdp; ! atomic_add_32( ! (volatile uint_t *)&srdp->srd_refcnt, 1); mutex_exit(&srd_buckets[hash].srdb_lock); return; } } mutex_exit(&srd_buckets[hash].srdb_lock); --- 13811,13822 ---- for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; srdp = srdp->srd_hash) { if (srdp->srd_evp == evp) { ASSERT(srdp->srd_refcnt >= 0); sfmmup->sfmmu_srdp = srdp; ! atomic_inc_32( ! (volatile uint_t *)&srdp->srd_refcnt); mutex_exit(&srd_buckets[hash].srdb_lock); return; } } mutex_exit(&srd_buckets[hash].srdb_lock);
*** 13833,13843 **** for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; srdp = srdp->srd_hash) { if (srdp->srd_evp == evp) { ASSERT(srdp->srd_refcnt >= 0); sfmmup->sfmmu_srdp = srdp; ! atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); mutex_exit(&srd_buckets[hash].srdb_lock); kmem_cache_free(srd_cache, newsrdp); return; } } --- 13833,13843 ---- for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; srdp = srdp->srd_hash) { if (srdp->srd_evp == evp) { ASSERT(srdp->srd_refcnt >= 0); sfmmup->sfmmu_srdp = srdp; ! atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); mutex_exit(&srd_buckets[hash].srdb_lock); kmem_cache_free(srd_cache, newsrdp); return; } }
*** 13870,13881 **** ASSERT(sfmmup->sfmmu_free == 1); sfmmup->sfmmu_srdp = NULL; evp = srdp->srd_evp; ASSERT(evp != NULL); ! if (atomic_add_32_nv( ! (volatile uint_t *)&srdp->srd_refcnt, -1)) { VN_RELE(evp); return; } hash = SRD_HASH_FUNCTION(evp); --- 13870,13880 ---- ASSERT(sfmmup->sfmmu_free == 1); sfmmup->sfmmu_srdp = NULL; evp = srdp->srd_evp; ASSERT(evp != NULL); ! if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) { VN_RELE(evp); return; } hash = SRD_HASH_FUNCTION(evp);
*** 14088,14098 **** ASSERT(rgnp->rgn_refcnt >= 0); rid = rgnp->rgn_id; ASSERT(rid < maxids); ASSERT(rarrp[rid] == rgnp); ASSERT(rid < *nextidp); ! atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); mutex_exit(&srdp->srd_mutex); if (new_rgnp != NULL) { kmem_cache_free(region_cache, new_rgnp); } if (r_type == SFMMU_REGION_HME) { --- 14087,14097 ---- ASSERT(rgnp->rgn_refcnt >= 0); rid = rgnp->rgn_id; ASSERT(rid < maxids); ASSERT(rarrp[rid] == rgnp); ASSERT(rid < *nextidp); ! atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); mutex_exit(&srdp->srd_mutex); if (new_rgnp != NULL) { kmem_cache_free(region_cache, new_rgnp); } if (r_type == SFMMU_REGION_HME) {
*** 14438,14448 **** if (r_type == SFMMU_REGION_HME) { sfmmu_unlink_from_hmeregion(sfmmup, rgnp); } r_obj = rgnp->rgn_obj; ! if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) { return; } /* * looks like nobody uses this region anymore. Free it. --- 14437,14447 ---- if (r_type == SFMMU_REGION_HME) { sfmmu_unlink_from_hmeregion(sfmmup, rgnp); } r_obj = rgnp->rgn_obj; ! if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) { return; } /* * looks like nobody uses this region anymore. Free it.
*** 14523,14533 **** ASSERT(rgnp->rgn_refcnt > 0); ASSERT(rgnp->rgn_id == rid); ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); ! atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); /* LINTED: constant in conditional context */ SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); ASSERT(rlink != NULL); mutex_enter(&rgnp->rgn_mutex); --- 14522,14532 ---- ASSERT(rgnp->rgn_refcnt > 0); ASSERT(rgnp->rgn_id == rid); ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); ! atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); /* LINTED: constant in conditional context */ SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); ASSERT(rlink != NULL); mutex_enter(&rgnp->rgn_mutex);
*** 15253,15264 **** if (ret == 1) { SF_SCD_INCR_REF(scdp); mutex_exit(&srdp->srd_scd_mutex); sfmmu_join_scd(scdp, sfmmup); ASSERT(scdp->scd_refcnt >= 2); ! atomic_add_32((volatile uint32_t *) ! &scdp->scd_refcnt, -1); return; } else { /* * If the sfmmu region map is a subset of the scd * region map, then the assumption is that this process --- 15252,15262 ---- if (ret == 1) { SF_SCD_INCR_REF(scdp); mutex_exit(&srdp->srd_scd_mutex); sfmmu_join_scd(scdp, sfmmup); ASSERT(scdp->scd_refcnt >= 2); ! atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt); return; } else { /* * If the sfmmu region map is a subset of the scd * region map, then the assumption is that this process
*** 15299,15309 **** SFMMU_STAT_ADD(sf_create_scd, 1); mutex_exit(&srdp->srd_scd_mutex); sfmmu_join_scd(new_scdp, sfmmup); ASSERT(new_scdp->scd_refcnt >= 2); ! atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1); } /* * This routine is called by a process to remove itself from an SCD. It is * either called when the processes has detached from a segment or from --- 15297,15307 ---- SFMMU_STAT_ADD(sf_create_scd, 1); mutex_exit(&srdp->srd_scd_mutex); sfmmu_join_scd(new_scdp, sfmmup); ASSERT(new_scdp->scd_refcnt >= 2); ! atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt); } /* * This routine is called by a process to remove itself from an SCD. It is * either called when the processes has detached from a segment or from