Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/nfs/nfs4_vnops.c
          +++ new/usr/src/uts/common/fs/nfs/nfs4_vnops.c
↓ open down ↓ 10482 lines elided ↑ open up ↑
10483 10483           * and we can prevent the deadlock that would have occurred
10484 10484           * when nfs4_addmap() would have acquired it out of order.
10485 10485           *
10486 10486           * Since we are not protecting r_inmap by any lock, we do not
10487 10487           * hold any lock when we decrement it. We atomically decrement
10488 10488           * r_inmap after we release r_lkserlock.
10489 10489           */
10490 10490  
10491 10491          if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR4(vp)))
10492 10492                  return (EINTR);
10493      -        atomic_add_int(&rp->r_inmap, 1);
     10493 +        atomic_inc_uint(&rp->r_inmap);
10494 10494          nfs_rw_exit(&rp->r_rwlock);
10495 10495  
10496 10496          if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR4(vp))) {
10497      -                atomic_add_int(&rp->r_inmap, -1);
     10497 +                atomic_dec_uint(&rp->r_inmap);
10498 10498                  return (EINTR);
10499 10499          }
10500 10500  
10501 10501  
10502 10502          if (vp->v_flag & VNOCACHE) {
10503 10503                  error = EAGAIN;
10504 10504                  goto done;
10505 10505          }
10506 10506  
10507 10507          /*
↓ open down ↓ 87 lines elided ↑ open up ↑
10595 10595          vn_a.cred = cr;
10596 10596          vn_a.amp = NULL;
10597 10597          vn_a.szc = 0;
10598 10598          vn_a.lgrp_mem_policy_flags = 0;
10599 10599  
10600 10600          error = as_map(as, *addrp, len, segvn_create, &vn_a);
10601 10601          as_rangeunlock(as);
10602 10602  
10603 10603  done:
10604 10604          nfs_rw_exit(&rp->r_lkserlock);
10605      -        atomic_add_int(&rp->r_inmap, -1);
     10605 +        atomic_dec_uint(&rp->r_inmap);
10606 10606          return (error);
10607 10607  }
10608 10608  
10609 10609  /*
10610 10610   * We're most likely dealing with a kernel module that likes to READ
10611 10611   * and mmap without OPENing the file (ie: lookup/read/mmap), so lets
10612 10612   * officially OPEN the file to create the necessary client state
10613 10613   * for bookkeeping of os_mmap_read/write counts.
10614 10614   *
10615 10615   * Since VOP_MAP only passes in a pointer to the vnode rather than
↓ open down ↓ 5388 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX