Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1825  * pPOSTCARD_SEND:      Pointer to ARC send postcard
1826  *
1827  * This routine posts a ARC send postcard to the request post FIFO of a
1828  * specific ARC adapter.
1829  */
1830 static int
1831 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1832 {
1833         uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1834         struct scsi_pkt *pkt = ccb->pkt;
1835         struct ARCMSR_CDB *arcmsr_cdb;
1836         uint_t pkt_flags = pkt->pkt_flags;
1837 
1838         arcmsr_cdb = &ccb->arcmsr_cdb;
1839 
1840         /* TODO: Use correct offset and size for syncing? */
1841         if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1842             DDI_FAILURE)
1843                 return (DDI_FAILURE);
1844 
1845         atomic_add_32(&acb->ccboutstandingcount, 1);
1846         ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1847 
1848         ccb->ccb_state = ARCMSR_CCB_START;
1849         switch (acb->adapter_type) {
1850         case ACB_ADAPTER_TYPE_A:
1851         {
1852                 struct HBA_msgUnit *phbamu;
1853 
1854                 phbamu = (struct HBA_msgUnit *)acb->pmu;
1855                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1856                         CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1857                             &phbamu->inbound_queueport,
1858                             cdb_phyaddr_pattern |
1859                             ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1860                 } else {
1861                         CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1862                             &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1863                 }
1864                 if (pkt_flags & FLAG_NOINTR)
1865                         arcmsr_polling_hba_ccbdone(acb, ccb);


1938             STATE_SENT_CMD | STATE_GOT_STATUS);
1939 
1940         if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1941             (pkt->pkt_state & STATE_XFERRED_DATA)) {
1942                 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1943                     DDI_DMA_SYNC_FORCPU);
1944         }
1945         /*
1946          * TODO: This represents a potential race condition, and is
1947          * ultimately a poor design decision.  Revisit this code
1948          * and solve the mutex ownership issue correctly.
1949          */
1950         if (mutex_owned(&acb->isr_mutex)) {
1951                 mutex_exit(&acb->isr_mutex);
1952                 scsi_hba_pkt_comp(pkt);
1953                 mutex_enter(&acb->isr_mutex);
1954         } else {
1955                 scsi_hba_pkt_comp(pkt);
1956         }
1957         if (flag == 1) {
1958                 atomic_add_32(&acb->ccboutstandingcount, -1);
1959         }
1960 }
1961 
1962 static void
1963 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1964 {
1965         int id, lun;
1966 
1967         ccb->ccb_state |= ARCMSR_CCB_DONE;
1968         id = ccb->pkt->pkt_address.a_target;
1969         lun = ccb->pkt->pkt_address.a_lun;
1970 
1971         if (!error) {
1972                 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1973                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
1974                 }
1975                 ccb->pkt->pkt_reason = CMD_CMPLT;
1976                 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1977                 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1978                     &ccb->complete_queue_pointer, &acb->ccb_complete_list);




1825  * pPOSTCARD_SEND:      Pointer to ARC send postcard
1826  *
1827  * This routine posts a ARC send postcard to the request post FIFO of a
1828  * specific ARC adapter.
1829  */
1830 static int
1831 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1832 {
1833         uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1834         struct scsi_pkt *pkt = ccb->pkt;
1835         struct ARCMSR_CDB *arcmsr_cdb;
1836         uint_t pkt_flags = pkt->pkt_flags;
1837 
1838         arcmsr_cdb = &ccb->arcmsr_cdb;
1839 
1840         /* TODO: Use correct offset and size for syncing? */
1841         if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1842             DDI_FAILURE)
1843                 return (DDI_FAILURE);
1844 
1845         atomic_inc_32(&acb->ccboutstandingcount);
1846         ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1847 
1848         ccb->ccb_state = ARCMSR_CCB_START;
1849         switch (acb->adapter_type) {
1850         case ACB_ADAPTER_TYPE_A:
1851         {
1852                 struct HBA_msgUnit *phbamu;
1853 
1854                 phbamu = (struct HBA_msgUnit *)acb->pmu;
1855                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1856                         CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1857                             &phbamu->inbound_queueport,
1858                             cdb_phyaddr_pattern |
1859                             ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1860                 } else {
1861                         CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1862                             &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1863                 }
1864                 if (pkt_flags & FLAG_NOINTR)
1865                         arcmsr_polling_hba_ccbdone(acb, ccb);


1938             STATE_SENT_CMD | STATE_GOT_STATUS);
1939 
1940         if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1941             (pkt->pkt_state & STATE_XFERRED_DATA)) {
1942                 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1943                     DDI_DMA_SYNC_FORCPU);
1944         }
1945         /*
1946          * TODO: This represents a potential race condition, and is
1947          * ultimately a poor design decision.  Revisit this code
1948          * and solve the mutex ownership issue correctly.
1949          */
1950         if (mutex_owned(&acb->isr_mutex)) {
1951                 mutex_exit(&acb->isr_mutex);
1952                 scsi_hba_pkt_comp(pkt);
1953                 mutex_enter(&acb->isr_mutex);
1954         } else {
1955                 scsi_hba_pkt_comp(pkt);
1956         }
1957         if (flag == 1) {
1958                 atomic_dec_32(&acb->ccboutstandingcount);
1959         }
1960 }
1961 
1962 static void
1963 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1964 {
1965         int id, lun;
1966 
1967         ccb->ccb_state |= ARCMSR_CCB_DONE;
1968         id = ccb->pkt->pkt_address.a_target;
1969         lun = ccb->pkt->pkt_address.a_lun;
1970 
1971         if (!error) {
1972                 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1973                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
1974                 }
1975                 ccb->pkt->pkt_reason = CMD_CMPLT;
1976                 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1977                 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1978                     &ccb->complete_queue_pointer, &acb->ccb_complete_list);