Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c
          +++ new/usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c
↓ open down ↓ 121 lines elided ↑ open up ↑
 122  122  
 123  123          if (!recv->r_ibinc) {
 124  124                  if (!atomic_add_unless(&rdsv3_ib_allocation, 1,
 125  125                      ic->i_max_recv_alloc)) {
 126  126                          rdsv3_ib_stats_inc(s_ib_rx_alloc_limit);
 127  127                          goto out;
 128  128                  }
 129  129                  recv->r_ibinc = kmem_cache_alloc(rdsv3_ib_incoming_slab,
 130  130                      KM_NOSLEEP);
 131  131                  if (recv->r_ibinc == NULL) {
 132      -                        atomic_add_32(&rdsv3_ib_allocation, -1);
      132 +                        atomic_dec_32(&rdsv3_ib_allocation);
 133  133                          goto out;
 134  134                  }
 135  135                  rdsv3_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
 136  136                  recv->r_ibinc->ii_ibdev = ic->rds_ibdev;
 137  137                  recv->r_ibinc->ii_pool = ic->rds_ibdev->inc_pool;
 138  138          }
 139  139  
 140  140          if (!recv->r_frag) {
 141  141                  recv->r_frag = kmem_cache_alloc(ic->rds_ibdev->ib_frag_slab,
 142  142                      KM_NOSLEEP);
↓ open down ↓ 4 lines elided ↑ open up ↑
 147  147          /* Data sge, structure copy */
 148  148          recv->r_sge[1] = recv->r_frag->f_sge;
 149  149  
 150  150          RDSV3_DPRINTF5("rdsv3_ib_recv_refill_one", "Return: conn: %p, recv: %p",
 151  151              conn, recv);
 152  152  
 153  153          return (0);
 154  154  out:
 155  155          if (recv->r_ibinc) {
 156  156                  kmem_cache_free(rdsv3_ib_incoming_slab, recv->r_ibinc);
 157      -                atomic_add_32(&rdsv3_ib_allocation, -1);
      157 +                atomic_dec_32(&rdsv3_ib_allocation);
 158  158                  recv->r_ibinc = NULL;
 159  159          }
 160  160          return (-ENOMEM);
 161  161  }
 162  162  
 163  163  /*
 164  164   * This tries to allocate and post unused work requests after making sure that
 165  165   * they have all the allocations they need to queue received fragments into
 166  166   * sockets.  The i_recv_mutex is held here so that ring_alloc and _unalloc
 167  167   * pairs don't go unmatched.
↓ open down ↓ 730 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX