Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 159 
 160         RDSV3_DPRINTF4("rdsv3_send_xmit", "Enter(conn: %p)", conn);
 161 
 162         list_create(&to_be_dropped, sizeof (struct rdsv3_message),
 163             offsetof(struct rdsv3_message, m_conn_item));
 164 
 165         /*
 166          * sendmsg calls here after having queued its message on the send
 167          * queue.  We only have one task feeding the connection at a time.  If
 168          * another thread is already feeding the queue then we back off.  This
 169          * avoids blocking the caller and trading per-connection data between
 170          * caches per message.
 171          */
 172         if (!mutex_tryenter(&conn->c_send_lock)) {
 173                 RDSV3_DPRINTF4("rdsv3_send_xmit",
 174                     "Another thread running(conn: %p)", conn);
 175                 rdsv3_stats_inc(s_send_sem_contention);
 176                 ret = -ENOMEM;
 177                 goto out;
 178         }
 179         atomic_add_32(&conn->c_senders, 1);
 180 
 181         if (conn->c_trans->xmit_prepare)
 182                 conn->c_trans->xmit_prepare(conn);
 183 
 184         /*
 185          * spin trying to push headers and data down the connection until
 186          * the connection doesn't make forward progress.
 187          */
 188         while (--send_quota) {
 189                 /*
 190                  * See if need to send a congestion map update if we're
 191                  * between sending messages.  The send_sem protects our sole
 192                  * use of c_map_offset and _bytes.
 193                  * Note this is used only by transports that define a special
 194                  * xmit_cong_map function. For all others, we create allocate
 195                  * a cong_map message and treat it just like any other send.
 196                  */
 197                 if (conn->c_map_bytes) {
 198                         ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
 199                             conn->c_map_offset);


 550         /* No need to wake the app - caller does this */
 551 }
 552 
 553 /*
 554  * This is called from the IB send completion when we detect
 555  * a RDMA operation that failed with remote access error.
 556  * So speed is not an issue here.
 557  */
 558 struct rdsv3_message *
 559 rdsv3_send_get_message(struct rdsv3_connection *conn,
 560     struct rdsv3_rdma_op *op)
 561 {
 562         struct rdsv3_message *rm, *tmp, *found = NULL;
 563 
 564         RDSV3_DPRINTF4("rdsv3_send_get_message", "Enter(conn: %p)", conn);
 565 
 566         mutex_enter(&conn->c_lock);
 567 
 568         RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_retrans, m_conn_item) {
 569                 if (rm->m_rdma_op == op) {
 570                         atomic_add_32(&rm->m_refcount, 1);
 571                         found = rm;
 572                         goto out;
 573                 }
 574         }
 575 
 576         RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_send_queue,
 577             m_conn_item) {
 578                 if (rm->m_rdma_op == op) {
 579                         atomic_add_32(&rm->m_refcount, 1);
 580                         found = rm;
 581                         break;
 582                 }
 583         }
 584 
 585 out:
 586         mutex_exit(&conn->c_lock);
 587 
 588         return (found);
 589 }
 590 
 591 /*
 592  * This removes messages from the socket's list if they're on it.  The list
 593  * argument must be private to the caller, we must be able to modify it
 594  * without locks.  The messages must have a reference held for their
 595  * position on the list.  This function will drop that reference after
 596  * removing the messages from the 'messages' list regardless of if it found
 597  * the messages on the socket list or not.
 598  */
 599 void




 159 
 160         RDSV3_DPRINTF4("rdsv3_send_xmit", "Enter(conn: %p)", conn);
 161 
 162         list_create(&to_be_dropped, sizeof (struct rdsv3_message),
 163             offsetof(struct rdsv3_message, m_conn_item));
 164 
 165         /*
 166          * sendmsg calls here after having queued its message on the send
 167          * queue.  We only have one task feeding the connection at a time.  If
 168          * another thread is already feeding the queue then we back off.  This
 169          * avoids blocking the caller and trading per-connection data between
 170          * caches per message.
 171          */
 172         if (!mutex_tryenter(&conn->c_send_lock)) {
 173                 RDSV3_DPRINTF4("rdsv3_send_xmit",
 174                     "Another thread running(conn: %p)", conn);
 175                 rdsv3_stats_inc(s_send_sem_contention);
 176                 ret = -ENOMEM;
 177                 goto out;
 178         }
 179         atomic_inc_32(&conn->c_senders);
 180 
 181         if (conn->c_trans->xmit_prepare)
 182                 conn->c_trans->xmit_prepare(conn);
 183 
 184         /*
 185          * spin trying to push headers and data down the connection until
 186          * the connection doesn't make forward progress.
 187          */
 188         while (--send_quota) {
 189                 /*
 190                  * See if need to send a congestion map update if we're
 191                  * between sending messages.  The send_sem protects our sole
 192                  * use of c_map_offset and _bytes.
 193                  * Note this is used only by transports that define a special
 194                  * xmit_cong_map function. For all others, we create allocate
 195                  * a cong_map message and treat it just like any other send.
 196                  */
 197                 if (conn->c_map_bytes) {
 198                         ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
 199                             conn->c_map_offset);


 550         /* No need to wake the app - caller does this */
 551 }
 552 
 553 /*
 554  * This is called from the IB send completion when we detect
 555  * a RDMA operation that failed with remote access error.
 556  * So speed is not an issue here.
 557  */
 558 struct rdsv3_message *
 559 rdsv3_send_get_message(struct rdsv3_connection *conn,
 560     struct rdsv3_rdma_op *op)
 561 {
 562         struct rdsv3_message *rm, *tmp, *found = NULL;
 563 
 564         RDSV3_DPRINTF4("rdsv3_send_get_message", "Enter(conn: %p)", conn);
 565 
 566         mutex_enter(&conn->c_lock);
 567 
 568         RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_retrans, m_conn_item) {
 569                 if (rm->m_rdma_op == op) {
 570                         atomic_inc_32(&rm->m_refcount);
 571                         found = rm;
 572                         goto out;
 573                 }
 574         }
 575 
 576         RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_send_queue,
 577             m_conn_item) {
 578                 if (rm->m_rdma_op == op) {
 579                         atomic_inc_32(&rm->m_refcount);
 580                         found = rm;
 581                         break;
 582                 }
 583         }
 584 
 585 out:
 586         mutex_exit(&conn->c_lock);
 587 
 588         return (found);
 589 }
 590 
 591 /*
 592  * This removes messages from the socket's list if they're on it.  The list
 593  * argument must be private to the caller, we must be able to modify it
 594  * without locks.  The messages must have a reference held for their
 595  * position on the list.  This function will drop that reference after
 596  * removing the messages from the 'messages' list regardless of if it found
 597  * the messages on the socket list or not.
 598  */
 599 void