Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1974                         wq->release_depth = WQE_RELEASE_DEPTH;
1975                         wqe->WQEC = 1;
1976                 }
1977 
1978 
1979                 HBASTATS.IocbIssued[channelno]++;
1980 
1981                 /* Check for ULP pkt request */
1982                 if (sbp) {
1983                         mutex_enter(&sbp->mtx);
1984 
1985                         if (sbp->node == NULL) {
1986                                 /* Set node to base node by default */
1987                                 iocbq->node = (void *)&port->node_base;
1988                                 sbp->node = (void *)&port->node_base;
1989                         }
1990 
1991                         sbp->pkt_flags |= PACKET_IN_CHIPQ;
1992                         mutex_exit(&sbp->mtx);
1993 
1994                         atomic_add_32(&hba->io_active, 1);
1995                         sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
1996                 }
1997 
1998 
1999                 /* Free the local iocb if there is no sbp tracking it */
2000                 if (sbp) {
2001 #ifdef SFCT_SUPPORT
2002 #ifdef FCT_IO_TRACE
2003                         if (sbp->fct_cmd) {
2004                                 emlxs_fct_io_trace(port, sbp->fct_cmd,
2005                                     EMLXS_FCT_IOCB_ISSUED);
2006                                 emlxs_fct_io_trace(port, sbp->fct_cmd,
2007                                     icmd->ULPCOMMAND);
2008                         }
2009 #endif /* FCT_IO_TRACE */
2010 #endif /* SFCT_SUPPORT */
2011                         cp->hbaSendCmd_sbp++;
2012                         iocbq->channel = cp;
2013                 } else {
2014                         cp->hbaSendCmd++;


3852                 sbp->iotag = 0;
3853                 mutex_exit(&EMLXS_FCTAB_LOCK);
3854 
3855                 cp = sbp->channel;
3856                 bzero(&cqe, sizeof (CQE_CmplWQ_t));
3857                 cqe.RequestTag = i;
3858                 cqe.Status = IOSTAT_LOCAL_REJECT;
3859                 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3860 
3861                 cp->hbaCmplCmd_sbp++;
3862 
3863 #ifdef SFCT_SUPPORT
3864 #ifdef FCT_IO_TRACE
3865                 if (sbp->fct_cmd) {
3866                         emlxs_fct_io_trace(port, sbp->fct_cmd,
3867                             EMLXS_FCT_IOCB_COMPLETE);
3868                 }
3869 #endif /* FCT_IO_TRACE */
3870 #endif /* SFCT_SUPPORT */
3871 
3872                 atomic_add_32(&hba->io_active, -1);
3873 
3874                 /* Copy entry to sbp's iocbq */
3875                 iocbq = &sbp->iocbq;
3876                 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3877 
3878                 iocbq->next = NULL;
3879 
3880                 /* Exchange is no longer busy on-chip, free it */
3881                 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3882 
3883                 if (!(sbp->pkt_flags &
3884                     (PACKET_POLLED | PACKET_ALLOCATED))) {
3885                         /* Add the IOCB to the channel list */
3886                         mutex_enter(&cp->rsp_lock);
3887                         if (cp->rsp_head == NULL) {
3888                                 cp->rsp_head = iocbq;
3889                                 cp->rsp_tail = iocbq;
3890                         } else {
3891                                 cp->rsp_tail->next = iocbq;
3892                                 cp->rsp_tail = iocbq;


3946 static void
3947 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3948 {
3949         emlxs_port_t *port = &PPORT;
3950         CHANNEL *cp;
3951         emlxs_buf_t *sbp;
3952         IOCBQ *iocbq;
3953         uint16_t request_tag;
3954 #ifdef SFCT_SUPPORT
3955         fct_cmd_t *fct_cmd;
3956         emlxs_buf_t *cmd_sbp;
3957 #endif /* SFCT_SUPPORT */
3958 
3959         request_tag = cqe->RequestTag;
3960 
3961         /* 1 to 1 mapping between CQ and channel */
3962         cp = cq->channelp;
3963 
3964         mutex_enter(&EMLXS_FCTAB_LOCK);
3965         sbp = hba->fc_table[request_tag];
3966         atomic_add_32(&hba->io_active, -1);
3967 
3968         if (sbp == STALE_PACKET) {
3969                 cp->hbaCmplCmd_sbp++;
3970                 mutex_exit(&EMLXS_FCTAB_LOCK);
3971                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3972                     "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3973                 return;
3974         }
3975 
3976         if (!sbp || !(sbp->xrip)) {
3977                 cp->hbaCmplCmd++;
3978                 mutex_exit(&EMLXS_FCTAB_LOCK);
3979                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3980                     "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3981                     sbp, request_tag);
3982                 return;
3983         }
3984 
3985 #ifdef SLI4_FASTPATH_DEBUG
3986         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,




1974                         wq->release_depth = WQE_RELEASE_DEPTH;
1975                         wqe->WQEC = 1;
1976                 }
1977 
1978 
1979                 HBASTATS.IocbIssued[channelno]++;
1980 
1981                 /* Check for ULP pkt request */
1982                 if (sbp) {
1983                         mutex_enter(&sbp->mtx);
1984 
1985                         if (sbp->node == NULL) {
1986                                 /* Set node to base node by default */
1987                                 iocbq->node = (void *)&port->node_base;
1988                                 sbp->node = (void *)&port->node_base;
1989                         }
1990 
1991                         sbp->pkt_flags |= PACKET_IN_CHIPQ;
1992                         mutex_exit(&sbp->mtx);
1993 
1994                         atomic_inc_32(&hba->io_active);
1995                         sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
1996                 }
1997 
1998 
1999                 /* Free the local iocb if there is no sbp tracking it */
2000                 if (sbp) {
2001 #ifdef SFCT_SUPPORT
2002 #ifdef FCT_IO_TRACE
2003                         if (sbp->fct_cmd) {
2004                                 emlxs_fct_io_trace(port, sbp->fct_cmd,
2005                                     EMLXS_FCT_IOCB_ISSUED);
2006                                 emlxs_fct_io_trace(port, sbp->fct_cmd,
2007                                     icmd->ULPCOMMAND);
2008                         }
2009 #endif /* FCT_IO_TRACE */
2010 #endif /* SFCT_SUPPORT */
2011                         cp->hbaSendCmd_sbp++;
2012                         iocbq->channel = cp;
2013                 } else {
2014                         cp->hbaSendCmd++;


3852                 sbp->iotag = 0;
3853                 mutex_exit(&EMLXS_FCTAB_LOCK);
3854 
3855                 cp = sbp->channel;
3856                 bzero(&cqe, sizeof (CQE_CmplWQ_t));
3857                 cqe.RequestTag = i;
3858                 cqe.Status = IOSTAT_LOCAL_REJECT;
3859                 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3860 
3861                 cp->hbaCmplCmd_sbp++;
3862 
3863 #ifdef SFCT_SUPPORT
3864 #ifdef FCT_IO_TRACE
3865                 if (sbp->fct_cmd) {
3866                         emlxs_fct_io_trace(port, sbp->fct_cmd,
3867                             EMLXS_FCT_IOCB_COMPLETE);
3868                 }
3869 #endif /* FCT_IO_TRACE */
3870 #endif /* SFCT_SUPPORT */
3871 
3872                 atomic_dec_32(&hba->io_active);
3873 
3874                 /* Copy entry to sbp's iocbq */
3875                 iocbq = &sbp->iocbq;
3876                 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3877 
3878                 iocbq->next = NULL;
3879 
3880                 /* Exchange is no longer busy on-chip, free it */
3881                 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3882 
3883                 if (!(sbp->pkt_flags &
3884                     (PACKET_POLLED | PACKET_ALLOCATED))) {
3885                         /* Add the IOCB to the channel list */
3886                         mutex_enter(&cp->rsp_lock);
3887                         if (cp->rsp_head == NULL) {
3888                                 cp->rsp_head = iocbq;
3889                                 cp->rsp_tail = iocbq;
3890                         } else {
3891                                 cp->rsp_tail->next = iocbq;
3892                                 cp->rsp_tail = iocbq;


3946 static void
3947 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3948 {
3949         emlxs_port_t *port = &PPORT;
3950         CHANNEL *cp;
3951         emlxs_buf_t *sbp;
3952         IOCBQ *iocbq;
3953         uint16_t request_tag;
3954 #ifdef SFCT_SUPPORT
3955         fct_cmd_t *fct_cmd;
3956         emlxs_buf_t *cmd_sbp;
3957 #endif /* SFCT_SUPPORT */
3958 
3959         request_tag = cqe->RequestTag;
3960 
3961         /* 1 to 1 mapping between CQ and channel */
3962         cp = cq->channelp;
3963 
3964         mutex_enter(&EMLXS_FCTAB_LOCK);
3965         sbp = hba->fc_table[request_tag];
3966         atomic_dec_32(&hba->io_active);
3967 
3968         if (sbp == STALE_PACKET) {
3969                 cp->hbaCmplCmd_sbp++;
3970                 mutex_exit(&EMLXS_FCTAB_LOCK);
3971                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3972                     "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3973                 return;
3974         }
3975 
3976         if (!sbp || !(sbp->xrip)) {
3977                 cp->hbaCmplCmd++;
3978                 mutex_exit(&EMLXS_FCTAB_LOCK);
3979                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3980                     "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3981                     sbp, request_tag);
3982                 return;
3983         }
3984 
3985 #ifdef SLI4_FASTPATH_DEBUG
3986         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,