313 return (arcmsr_do_ddi_attach(dev_info,
314 ddi_get_instance(dev_info)));
315 case DDI_RESUME:
316 /*
317 * There is no hardware state to restart and no
318 * timeouts to restart since we didn't DDI_SUSPEND with
319 * active cmds or active timeouts We just need to
320 * unblock waiting threads and restart I/O the code
321 */
322 hba_trans = ddi_get_driver_private(dev_info);
323 if (hba_trans == NULL) {
324 return (DDI_FAILURE);
325 }
326 acb = hba_trans->tran_hba_private;
327 mutex_enter(&acb->acb_mutex);
328 arcmsr_iop_init(acb);
329
330 /* restart ccbs "timeout" watchdog */
331 acb->timeout_count = 0;
332 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
333 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
334 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
335 (caddr_t)acb,
336 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
337 mutex_exit(&acb->acb_mutex);
338 return (DDI_SUCCESS);
339
340 default:
341 return (DDI_FAILURE);
342 }
343 }
344
345 /*
346 * Function: arcmsr_detach(9E)
347 * Description: Remove all device allocation and system resources, disable
348 * device interrupt.
349 * Input: dev_info_t *dev_info
350 * ddi_detach_cmd_t cmd
351 * Output: Return DDI_SUCCESS if done,
352 * else returnDDI_FAILURE
353 */
354 static int
355 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
356
2937 /*
2938 * The driver should attach this instance of the device, and
2939 * perform error cleanup if necessary
2940 */
2941 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2942 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2943 arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2944 goto error_level_5;
2945 }
2946
2947 /* Create a taskq for dealing with dr events */
2948 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2949 TASKQ_DEFAULTPRI, 0)) == NULL) {
2950 arcmsr_warn(acb, "ddi_taskq_create failed");
2951 goto error_level_8;
2952 }
2953
2954 acb->timeout_count = 0;
2955 /* active ccbs "timeout" watchdog */
2956 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2957 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2958 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2959 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2960
2961 /* report device info */
2962 ddi_report_dev(dev_info);
2963
2964 return (DDI_SUCCESS);
2965
2966 error_level_8:
2967
2968 error_level_7:
2969 error_level_6:
2970 (void) scsi_hba_detach(dev_info);
2971
2972 error_level_5:
2973 arcmsr_remove_intr(acb);
2974
2975 error_level_3:
2976 error_level_4:
2977 if (acb->scsi_hba_transport)
2978 scsi_hba_tran_free(acb->scsi_hba_transport);
2979
3145 static void
3146 arcmsr_ccbs_timeout(void* arg)
3147 {
3148 struct ACB *acb = (struct ACB *)arg;
3149 struct CCB *ccb;
3150 int i, instance, timeout_count = 0;
3151 uint32_t intmask_org;
3152 time_t current_time = ddi_get_time();
3153
3154 intmask_org = arcmsr_disable_allintr(acb);
3155 mutex_enter(&acb->isr_mutex);
3156 if (acb->ccboutstandingcount != 0) {
3157 /* check each ccb */
3158 i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3159 DDI_DMA_SYNC_FORKERNEL);
3160 if (i != DDI_SUCCESS) {
3161 if ((acb->timeout_id != 0) &&
3162 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3163 /* do pkt timeout check each 60 secs */
3164 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3165 (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3166 drv_usectohz(1000000)));
3167 }
3168 mutex_exit(&acb->isr_mutex);
3169 arcmsr_enable_allintr(acb, intmask_org);
3170 return;
3171 }
3172 instance = ddi_get_instance(acb->dev_info);
3173 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3174 ccb = acb->pccb_pool[i];
3175 if (ccb->acb != acb) {
3176 break;
3177 }
3178 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3179 continue;
3180 }
3181 if (ccb->pkt == NULL) {
3182 continue;
3183 }
3184 if (ccb->pkt->pkt_time == 0) {
3185 continue;
3186 }
3196 arcmsr_warn(acb,
3197 "scsi target %d lun %d cmd=0x%x "
3198 "command timeout, ccb=0x%p",
3199 instance, id, lun, *cdb, (void *)ccb);
3200 ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3201 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3202 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3203 /* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3204 arcmsr_ccb_complete(ccb, 1);
3205 continue;
3206 } else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3207 ARCMSR_CCB_CAN_BE_FREE) {
3208 arcmsr_free_ccb(ccb);
3209 }
3210 }
3211 }
3212 if ((acb->timeout_id != 0) &&
3213 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3214 /* do pkt timeout check each 60 secs */
3215 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3216 (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3217 }
3218 mutex_exit(&acb->isr_mutex);
3219 arcmsr_enable_allintr(acb, intmask_org);
3220 }
3221
3222 static void
3223 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3224 {
3225 struct CCB *ccb;
3226 uint32_t intmask_org;
3227 int i;
3228
3229 /* disable all outbound interrupts */
3230 intmask_org = arcmsr_disable_allintr(acb);
3231 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3232 ccb = acb->pccb_pool[i];
3233 if (ccb->ccb_state == ARCMSR_CCB_START) {
3234 if ((target == ccb->pkt->pkt_address.a_target) &&
3235 (lun == ccb->pkt->pkt_address.a_lun)) {
3236 ccb->ccb_state = ARCMSR_CCB_ABORTED;
3414 case ACB_ADAPTER_TYPE_C:
3415 {
3416 struct HBC_msgUnit *phbcmu;
3417
3418 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3419 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3420 &phbcmu->inbound_msgaddr0,
3421 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3422 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3423 &phbcmu->inbound_doorbell,
3424 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3425 break;
3426 }
3427
3428 }
3429
3430 if ((acb->timeout_id != 0) &&
3431 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3432 /* do pkt timeout check each 5 secs */
3433 acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3434 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3435 }
3436 }
3437
3438
3439 static uint32_t
3440 arcmsr_disable_allintr(struct ACB *acb) {
3441
3442 uint32_t intmask_org;
3443
3444 switch (acb->adapter_type) {
3445 case ACB_ADAPTER_TYPE_A:
3446 {
3447 struct HBA_msgUnit *phbamu;
3448
3449 phbamu = (struct HBA_msgUnit *)acb->pmu;
3450 /* disable all outbound interrupt */
3451 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3452 &phbamu->outbound_intmask);
3453 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3454 &phbamu->outbound_intmask,
|
313 return (arcmsr_do_ddi_attach(dev_info,
314 ddi_get_instance(dev_info)));
315 case DDI_RESUME:
316 /*
317 * There is no hardware state to restart and no
318 * timeouts to restart since we didn't DDI_SUSPEND with
319 * active cmds or active timeouts We just need to
320 * unblock waiting threads and restart I/O the code
321 */
322 hba_trans = ddi_get_driver_private(dev_info);
323 if (hba_trans == NULL) {
324 return (DDI_FAILURE);
325 }
326 acb = hba_trans->tran_hba_private;
327 mutex_enter(&acb->acb_mutex);
328 arcmsr_iop_init(acb);
329
330 /* restart ccbs "timeout" watchdog */
331 acb->timeout_count = 0;
332 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
333 drv_sectohz(ARCMSR_TIMEOUT_WATCH));
334 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
335 (caddr_t)acb,
336 drv_sectohz(ARCMSR_DEV_MAP_WATCH));
337 mutex_exit(&acb->acb_mutex);
338 return (DDI_SUCCESS);
339
340 default:
341 return (DDI_FAILURE);
342 }
343 }
344
345 /*
346 * Function: arcmsr_detach(9E)
347 * Description: Remove all device allocation and system resources, disable
348 * device interrupt.
349 * Input: dev_info_t *dev_info
350 * ddi_detach_cmd_t cmd
351 * Output: Return DDI_SUCCESS if done,
352 * else returnDDI_FAILURE
353 */
354 static int
355 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
356
2937 /*
2938 * The driver should attach this instance of the device, and
2939 * perform error cleanup if necessary
2940 */
2941 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2942 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2943 arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2944 goto error_level_5;
2945 }
2946
2947 /* Create a taskq for dealing with dr events */
2948 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2949 TASKQ_DEFAULTPRI, 0)) == NULL) {
2950 arcmsr_warn(acb, "ddi_taskq_create failed");
2951 goto error_level_8;
2952 }
2953
2954 acb->timeout_count = 0;
2955 /* active ccbs "timeout" watchdog */
2956 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2957 drv_sectohz(ARCMSR_TIMEOUT_WATCH));
2958 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2959 drv_sectohz(ARCMSR_DEV_MAP_WATCH));
2960
2961 /* report device info */
2962 ddi_report_dev(dev_info);
2963
2964 return (DDI_SUCCESS);
2965
2966 error_level_8:
2967
2968 error_level_7:
2969 error_level_6:
2970 (void) scsi_hba_detach(dev_info);
2971
2972 error_level_5:
2973 arcmsr_remove_intr(acb);
2974
2975 error_level_3:
2976 error_level_4:
2977 if (acb->scsi_hba_transport)
2978 scsi_hba_tran_free(acb->scsi_hba_transport);
2979
3145 static void
3146 arcmsr_ccbs_timeout(void* arg)
3147 {
3148 struct ACB *acb = (struct ACB *)arg;
3149 struct CCB *ccb;
3150 int i, instance, timeout_count = 0;
3151 uint32_t intmask_org;
3152 time_t current_time = ddi_get_time();
3153
3154 intmask_org = arcmsr_disable_allintr(acb);
3155 mutex_enter(&acb->isr_mutex);
3156 if (acb->ccboutstandingcount != 0) {
3157 /* check each ccb */
3158 i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3159 DDI_DMA_SYNC_FORKERNEL);
3160 if (i != DDI_SUCCESS) {
3161 if ((acb->timeout_id != 0) &&
3162 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3163 /* do pkt timeout check each 60 secs */
3164 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3165 (void*)acb, drv_sectohz(ARCMSR_TIMEOUT_WATCH));
3166 }
3167 mutex_exit(&acb->isr_mutex);
3168 arcmsr_enable_allintr(acb, intmask_org);
3169 return;
3170 }
3171 instance = ddi_get_instance(acb->dev_info);
3172 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3173 ccb = acb->pccb_pool[i];
3174 if (ccb->acb != acb) {
3175 break;
3176 }
3177 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3178 continue;
3179 }
3180 if (ccb->pkt == NULL) {
3181 continue;
3182 }
3183 if (ccb->pkt->pkt_time == 0) {
3184 continue;
3185 }
3195 arcmsr_warn(acb,
3196 "scsi target %d lun %d cmd=0x%x "
3197 "command timeout, ccb=0x%p",
3198 instance, id, lun, *cdb, (void *)ccb);
3199 ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3200 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3201 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3202 /* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3203 arcmsr_ccb_complete(ccb, 1);
3204 continue;
3205 } else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3206 ARCMSR_CCB_CAN_BE_FREE) {
3207 arcmsr_free_ccb(ccb);
3208 }
3209 }
3210 }
3211 if ((acb->timeout_id != 0) &&
3212 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3213 /* do pkt timeout check each 60 secs */
3214 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3215 (void*)acb, drv_sectohz(ARCMSR_TIMEOUT_WATCH));
3216 }
3217 mutex_exit(&acb->isr_mutex);
3218 arcmsr_enable_allintr(acb, intmask_org);
3219 }
3220
3221 static void
3222 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3223 {
3224 struct CCB *ccb;
3225 uint32_t intmask_org;
3226 int i;
3227
3228 /* disable all outbound interrupts */
3229 intmask_org = arcmsr_disable_allintr(acb);
3230 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3231 ccb = acb->pccb_pool[i];
3232 if (ccb->ccb_state == ARCMSR_CCB_START) {
3233 if ((target == ccb->pkt->pkt_address.a_target) &&
3234 (lun == ccb->pkt->pkt_address.a_lun)) {
3235 ccb->ccb_state = ARCMSR_CCB_ABORTED;
3413 case ACB_ADAPTER_TYPE_C:
3414 {
3415 struct HBC_msgUnit *phbcmu;
3416
3417 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3418 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3419 &phbcmu->inbound_msgaddr0,
3420 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3421 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3422 &phbcmu->inbound_doorbell,
3423 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3424 break;
3425 }
3426
3427 }
3428
3429 if ((acb->timeout_id != 0) &&
3430 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3431 /* do pkt timeout check each 5 secs */
3432 acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3433 drv_sectohz(ARCMSR_DEV_MAP_WATCH));
3434 }
3435 }
3436
3437
3438 static uint32_t
3439 arcmsr_disable_allintr(struct ACB *acb) {
3440
3441 uint32_t intmask_org;
3442
3443 switch (acb->adapter_type) {
3444 case ACB_ADAPTER_TYPE_A:
3445 {
3446 struct HBA_msgUnit *phbamu;
3447
3448 phbamu = (struct HBA_msgUnit *)acb->pmu;
3449 /* disable all outbound interrupt */
3450 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3451 &phbamu->outbound_intmask);
3452 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3453 &phbamu->outbound_intmask,
|