1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
  25  */
  26 
  27 
  28 /*
  29  * ISSUES
  30  *
  31  * - more consistent error messages
  32  * - report name of device on errors?
  33  * - if wide target renegotiates sync, back to narrow?
  34  * - last_msgout is not accurate ????
  35  * - resolve XXXX
  36  * - improve msg reject code (use special msg reject handler)
  37  * - better use of IDE message
  38  * - keep track if ATN remains asserted and target not going into
  39  *   a msg-out phase
  40  * - improve comments
  41  * - no slave accesses when start address is odd and dma hasn't started
  42  *   this affect asserting ATN
  43  */
  44 
  45 /*
  46  * fas - QLogic fas366 wide/fast SCSI Processor HBA driver with
  47  *      tagged and non-tagged queueing support
  48  */
  49 #if defined(lint) && !defined(DEBUG)
  50 #define DEBUG   1
  51 #define FASDEBUG
  52 #endif
  53 
  54 #define DMA_REG_TRACING         /* enable dma register access tracing */
  55 
  56 
  57 /*
  58  * standard header files
  59  */
  60 #include <sys/note.h>
  61 #include <sys/scsi/scsi.h>
  62 #include <sys/file.h>
  63 #include <sys/vtrace.h>
  64 
  65 /*
  66  * private header files
  67  */
  68 #include <sys/scsi/adapters/fasdma.h>
  69 #include <sys/scsi/adapters/fasreg.h>
  70 #include <sys/scsi/adapters/fasvar.h>
  71 #include <sys/scsi/adapters/fascmd.h>
  72 #include <sys/scsi/impl/scsi_reset_notify.h>
  73 
  74 /*
  75  * tunables
  76  */
  77 static int              fas_selection_timeout = 250; /* 250 milliseconds */
  78 static uchar_t          fas_default_offset = DEFAULT_OFFSET;
  79 
  80 /*
  81  * needed for presto support, do not remove
  82  */
  83 static int              fas_enable_sbus64 = 1;
  84 
  85 #ifdef  FASDEBUG
  86 int                     fasdebug = 0;
  87 int                     fasdebug_instance = -1; /* debug all instances */
  88 static int              fas_burstsizes_limit = -1;
  89 static int              fas_no_sync_wide_backoff = 0;
  90 #endif  /* FASDEBUG */
  91 
  92 /*
  93  * Local static data protected by global mutex
  94  */
  95 static kmutex_t         fas_global_mutex; /* to allow concurrent attach */
  96 
  97 static int              fas_scsi_watchdog_tick; /* in seconds, for all  */
  98                                         /* instances                    */
  99 static clock_t          fas_tick;       /* fas_watch() interval in Hz   */
 100 static timeout_id_t     fas_reset_watch; /* timeout id for reset watch  */
 101 static timeout_id_t     fas_timeout_id = 0;
 102 static int              fas_timeout_initted = 0;
 103 
 104 static krwlock_t        fas_global_rwlock;
 105 
 106 static void             *fas_state;     /* soft state ptr               */
 107 static struct fas       *fas_head;      /* link all softstate structures */
 108 static struct fas       *fas_tail;      /* for fas_watch()              */
 109 
 110 static kmutex_t         fas_log_mutex;
 111 static char             fas_log_buf[256];
 112 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
 113 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
 114         fas_scsi_watchdog_tick fas_tick))
 115 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", fas::f_quiesce_timeid))
 116 
 117 /*
 118  * dma attribute structure for scsi engine
 119  */
 120 static ddi_dma_attr_t dma_fasattr       = {
 121         DMA_ATTR_V0, (unsigned long long)0,
 122         (unsigned long long)0xffffffff, (unsigned long long)((1<<24)-1),
 123         1, DEFAULT_BURSTSIZE, 1,
 124         (unsigned long long)0xffffffff, (unsigned long long)0xffffffff,
 125         1, 512, 0
 126 };
 127 
 128 /*
 129  * optional torture test stuff
 130  */
 131 #ifdef  FASDEBUG
 132 #define FAS_TEST
 133 static int fas_ptest_emsgin;
 134 static int fas_ptest_msgin;
 135 static int fas_ptest_msg = -1;
 136 static int fas_ptest_status;
 137 static int fas_ptest_data_in;
 138 static int fas_atest;
 139 static int fas_atest_disc;
 140 static int fas_atest_reconn;
 141 static void fas_test_abort(struct fas *fas, int slot);
 142 static int fas_rtest;
 143 static int fas_rtest_type;
 144 static void fas_test_reset(struct fas *fas, int slot);
 145 static int fas_force_timeout;
 146 static int fas_btest;
 147 static int fas_test_stop;
 148 static int fas_transport_busy;
 149 static int fas_transport_busy_rqs;
 150 static int fas_transport_reject;
 151 static int fas_arqs_failure;
 152 static int fas_tran_err;
 153 static int fas_test_untagged;
 154 static int fas_enable_untagged;
 155 #endif
 156 
 157 /*
 158  * warlock directives
 159  */
 160 _NOTE(DATA_READABLE_WITHOUT_LOCK(dma fasdebug))
 161 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy))
 162 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy_rqs))
 163 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_reject))
 164 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_arqs_failure))
 165 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_tran_err))
 166 _NOTE(MUTEX_PROTECTS_DATA(fas_log_mutex, fas_log_buf))
 167 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
 168 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
 169         fas_scsi_watchdog_tick fas_tick))
 170 
 171 /*
 172  * function prototypes
 173  *
 174  * scsa functions are exported by means of the transport table:
 175  */
 176 static int fas_scsi_tgt_probe(struct scsi_device *sd,
 177     int (*waitfunc)(void));
 178 static int fas_scsi_tgt_init(dev_info_t *, dev_info_t *,
 179     scsi_hba_tran_t *, struct scsi_device *);
 180 static int fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
 181 static int fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
 182 static int fas_scsi_reset(struct scsi_address *ap, int level);
 183 static int fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
 184 static int fas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
 185     int whom);
 186 static struct scsi_pkt *fas_scsi_init_pkt(struct scsi_address *ap,
 187     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
 188     int tgtlen, int flags, int (*callback)(), caddr_t arg);
 189 static void fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
 190 static void fas_scsi_dmafree(struct scsi_address *ap,
 191     struct scsi_pkt *pkt);
 192 static void fas_scsi_sync_pkt(struct scsi_address *ap,
 193     struct scsi_pkt *pkt);
 194 
 195 /*
 196  * internal functions:
 197  */
 198 static int fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp);
 199 static int fas_alloc_tag(struct fas *fas, struct fas_cmd *sp);
 200 static int fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag);
 201 static void fas_empty_waitQ(struct fas *fas);
 202 static void fas_move_waitQ_to_readyQ(struct fas *fas);
 203 static void fas_check_waitQ_and_mutex_exit(struct fas *fas);
 204 static int fas_istart(struct fas *fas);
 205 static int fas_ustart(struct fas *fas);
 206 static int fas_startcmd(struct fas *fas, struct fas_cmd *sp);
 207 
 208 static int fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
 209     int cmdlen, int tgtlen, int statuslen, int kf);
 210 static void fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp);
 211 static int fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
 212 static void fas_kmem_cache_destructor(void *buf, void *cdrarg);
 213 
 214 static int fas_finish(struct fas *fas);
 215 static void fas_handle_qfull(struct fas *fas, struct fas_cmd *sp);
 216 static void fas_restart_cmd(void *);
 217 static int fas_dopoll(struct fas *fas, int timeout);
 218 static void fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp);
 219 static uint_t fas_intr(caddr_t arg);
 220 static int fas_intr_svc(struct  fas *fas);
 221 static int fas_phasemanage(struct fas *fas);
 222 static int fas_handle_unknown(struct fas *fas);
 223 static int fas_handle_cmd_start(struct fas *fas);
 224 static int fas_handle_cmd_done(struct fas *fas);
 225 static int fas_handle_msg_out_start(struct fas *fas);
 226 static int fas_handle_msg_out_done(struct fas *fas);
 227 static int fas_handle_clearing(struct fas *fas);
 228 static int fas_handle_data_start(struct fas *fas);
 229 static int fas_handle_data_done(struct fas *fas);
 230 static int fas_handle_c_cmplt(struct fas *fas);
 231 static int fas_handle_msg_in_start(struct fas *fas);
 232 static int fas_handle_more_msgin(struct fas *fas);
 233 static int fas_handle_msg_in_done(struct fas *fas);
 234 static int fas_onebyte_msg(struct fas *fas);
 235 static int fas_twobyte_msg(struct fas *fas);
 236 static int fas_multibyte_msg(struct fas *fas);
 237 static void fas_revert_to_async(struct fas *fas, int tgt);
 238 static int fas_finish_select(struct fas *fas);
 239 static int fas_reselect_preempt(struct fas *fas);
 240 static int fas_reconnect(struct fas *fas);
 241 static int fas_handle_selection(struct fas *fas);
 242 static void fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp);
 243 static int fas_handle_gross_err(struct fas *fas);
 244 static int fas_illegal_cmd_or_bus_reset(struct fas *fas);
 245 static int fas_check_dma_error(struct fas *fas);
 246 
 247 static void fas_make_sdtr(struct fas *fas, int msgout_offset, int target);
 248 static void fas_make_wdtr(struct fas *fas, int msgout_offset, int target,
 249     int width);
 250 static void fas_update_props(struct fas *fas, int tgt);
 251 static void fas_update_this_prop(struct fas *fas, char *property, int value);
 252 
 253 static int fas_commoncap(struct scsi_address *ap, char *cap, int val,
 254     int tgtonly, int doset);
 255 
 256 static void fas_watch(void *arg);
 257 static void fas_watchsubr(struct fas *fas);
 258 static void fas_cmd_timeout(struct fas *fas, int slot);
 259 static void fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
 260     int slot);
 261 static void fas_reset_sync_wide(struct fas *fas);
 262 static void fas_set_wide_conf3(struct fas *fas, int target, int width);
 263 static void fas_force_renegotiation(struct fas *fas, int target);
 264 
 265 static int fas_set_new_window(struct fas *fas, struct fas_cmd *sp);
 266 static int fas_restore_pointers(struct fas *fas, struct fas_cmd *sp);
 267 static int fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end);
 268 
 269 /*PRINTFLIKE3*/
 270 static void fas_log(struct fas *fas, int level, const char *fmt, ...);
 271 /*PRINTFLIKE2*/
 272 static void fas_printf(struct fas *fas, const char *fmt, ...);
 273 static void fas_printstate(struct fas *fas, char *msg);
 274 static void fas_dump_cmd(struct fas *fas, struct fas_cmd *sp);
 275 static void fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp);
 276 static char *fas_state_name(ushort_t state);
 277 
 278 static void fas_makeproxy_cmd(struct fas_cmd *sp,
 279     struct scsi_address *ap, struct scsi_pkt *pkt, int nmsg, ...);
 280 static int fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
 281     struct scsi_address *ap, char *what);
 282 
 283 static void fas_internal_reset(struct fas *fas, int reset_action);
 284 static int fas_alloc_active_slots(struct fas *fas, int slot, int flag);
 285 
 286 static int fas_abort_curcmd(struct fas *fas);
 287 static int fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot);
 288 static int fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
 289 static int fas_do_scsi_reset(struct scsi_address *ap, int level);
 290 static int fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp,
 291     int slot);
 292 static void fas_flush_readyQ(struct fas *fas, int slot);
 293 static void fas_flush_tagQ(struct fas *fas, int slot);
 294 static void fas_flush_cmd(struct fas *fas, struct fas_cmd *sp,
 295     uchar_t reason, uint_t stat);
 296 static int fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp,
 297     uchar_t msg);
 298 static int fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
 299     struct fas_cmd *sp, uchar_t msg, int slot);
 300 static void fas_mark_packets(struct fas *fas, int slot, uchar_t reason,
 301     uint_t stat);
 302 static void fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp,
 303     uchar_t reason, uint_t stat);
 304 
 305 static int fas_reset_bus(struct fas *fas);
 306 static int fas_reset_recovery(struct fas *fas);
 307 static int fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap);
 308 static int fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap);
 309 static void fas_start_watch_reset_delay(struct fas *);
 310 static void fas_setup_reset_delay(struct fas *fas);
 311 static void fas_watch_reset_delay(void *arg);
 312 static int fas_watch_reset_delay_subr(struct fas *fas);
 313 static void fas_reset_cleanup(struct fas *fas, int slot);
 314 static int fas_scsi_reset_notify(struct scsi_address *ap, int flag,
 315     void (*callback)(caddr_t), caddr_t arg);
 316 static int fas_scsi_quiesce(dev_info_t *hba_dip);
 317 static int fas_scsi_unquiesce(dev_info_t *hba_dip);
 318 
 319 static void fas_set_throttles(struct fas *fas, int slot,
 320     int n, int what);
 321 static void fas_set_all_lun_throttles(struct fas *fas, int slot, int what);
 322 static void fas_full_throttle(struct fas *fas, int slot);
 323 static void fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int timeout);
 324 static void fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp);
 325 
 326 static int fas_quiesce_bus(struct fas *fas);
 327 static int fas_unquiesce_bus(struct fas *fas);
 328 static void fas_ncmds_checkdrain(void *arg);
 329 static int fas_check_outstanding(struct fas *fas);
 330 
 331 static int fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap);
 332 static int fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap);
 333 static int fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp);
 334 void fas_complete_arq_pkt(struct scsi_pkt *pkt);
 335 
 336 void fas_call_pkt_comp(struct fas *fas, struct fas_cmd *sp);
 337 void fas_empty_callbackQ(struct fas *fas);
 338 int fas_init_callbacks(struct fas *fas);
 339 void fas_destroy_callbacks(struct fas *fas);
 340 
 341 static int fas_check_dma_error(struct fas *fas);
 342 static int fas_init_chip(struct fas *fas, uchar_t id);
 343 
 344 static void fas_read_fifo(struct fas *fas);
 345 static void fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad);
 346 
 347 #ifdef FASDEBUG
 348 static void fas_reg_cmd_write(struct fas *fas, uint8_t cmd);
 349 static void fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what);
 350 static uint8_t fas_reg_read(struct fas *fas, volatile uint8_t *p);
 351 
 352 static void fas_dma_reg_write(struct fas *fas, volatile uint32_t *p,
 353     uint32_t what);
 354 static uint32_t fas_dma_reg_read(struct fas *fas, volatile uint32_t *p);
 355 #else
 356 #define fas_reg_cmd_write(fas, cmd) \
 357         fas->f_reg->fas_cmd = (cmd), fas->f_last_cmd = (cmd)
 358 #define fas_reg_write(fas, p, what)  *(p) = (what)
 359 #define fas_reg_read(fas, p) *(p)
 360 #define fas_dma_reg_write(fas, p, what)  *(p) = (what)
 361 #define fas_dma_reg_read(fas, p) *(p)
 362 #endif
 363 
 364 /*
 365  * autoconfiguration data and routines.
 366  */
 367 static int fas_attach(dev_info_t *dev, ddi_attach_cmd_t cmd);
 368 static int fas_detach(dev_info_t *dev, ddi_detach_cmd_t cmd);
 369 static int fas_dr_detach(dev_info_t *dev);
 370 
 371 static struct dev_ops fas_ops = {
 372         DEVO_REV,               /* devo_rev, */
 373         0,                      /* refcnt  */
 374         ddi_no_info,            /* info */
 375         nulldev,                /* identify */
 376         nulldev,                /* probe */
 377         fas_attach,             /* attach */
 378         fas_detach,             /* detach */
 379         nodev,                  /* reset */
 380         NULL,                   /* driver operations */
 381         NULL,                   /* bus operations */
 382         NULL,                   /* power */
 383         ddi_quiesce_not_supported,      /* devo_quiesce */
 384 };
 385 
 386 static struct modldrv modldrv = {
 387         &mod_driverops, /* Type of module. This one is a driver */
 388         "FAS SCSI HBA Driver", /* Name of the module. */
 389         &fas_ops,   /* driver ops */
 390 };
 391 
 392 static struct modlinkage modlinkage = {
 393         MODREV_1, (void *)&modldrv, NULL
 394 };
 395 
 396 int
 397 _init(void)
 398 {
 399         int rval;
 400         /* CONSTCOND */
 401         ASSERT(NO_COMPETING_THREADS);
 402 
 403         rval = ddi_soft_state_init(&fas_state, sizeof (struct fas),
 404             FAS_INITIAL_SOFT_SPACE);
 405         if (rval != 0) {
 406                 return (rval);
 407         }
 408 
 409         if ((rval = scsi_hba_init(&modlinkage)) != 0) {
 410                 ddi_soft_state_fini(&fas_state);
 411                 return (rval);
 412         }
 413 
 414         mutex_init(&fas_global_mutex, NULL, MUTEX_DRIVER, NULL);
 415         rw_init(&fas_global_rwlock, NULL, RW_DRIVER, NULL);
 416 
 417         mutex_init(&fas_log_mutex, NULL, MUTEX_DRIVER, NULL);
 418 
 419         if ((rval = mod_install(&modlinkage)) != 0) {
 420                 mutex_destroy(&fas_log_mutex);
 421                 rw_destroy(&fas_global_rwlock);
 422                 mutex_destroy(&fas_global_mutex);
 423                 ddi_soft_state_fini(&fas_state);
 424                 scsi_hba_fini(&modlinkage);
 425                 return (rval);
 426         }
 427 
 428         return (rval);
 429 }
 430 
 431 int
 432 _fini(void)
 433 {
 434         int     rval;
 435         /* CONSTCOND */
 436         ASSERT(NO_COMPETING_THREADS);
 437 
 438         if ((rval = mod_remove(&modlinkage)) == 0) {
 439                 ddi_soft_state_fini(&fas_state);
 440                 scsi_hba_fini(&modlinkage);
 441                 mutex_destroy(&fas_log_mutex);
 442                 rw_destroy(&fas_global_rwlock);
 443                 mutex_destroy(&fas_global_mutex);
 444         }
 445         return (rval);
 446 }
 447 
 448 int
 449 _info(struct modinfo *modinfop)
 450 {
 451         /* CONSTCOND */
 452         ASSERT(NO_COMPETING_THREADS);
 453 
 454         return (mod_info(&modlinkage, modinfop));
 455 }
 456 
 457 static int
 458 fas_scsi_tgt_probe(struct scsi_device *sd,
 459     int (*waitfunc)(void))
 460 {
 461         dev_info_t *dip = ddi_get_parent(sd->sd_dev);
 462         int rval = SCSIPROBE_FAILURE;
 463         scsi_hba_tran_t *tran;
 464         struct fas *fas;
 465         int tgt = sd->sd_address.a_target;
 466 
 467         tran = ddi_get_driver_private(dip);
 468         ASSERT(tran != NULL);
 469         fas = TRAN2FAS(tran);
 470 
 471         /*
 472          * force renegotiation since inquiry cmds do not cause
 473          * check conditions
 474          */
 475         mutex_enter(FAS_MUTEX(fas));
 476         fas_force_renegotiation(fas, tgt);
 477         mutex_exit(FAS_MUTEX(fas));
 478         rval = scsi_hba_probe(sd, waitfunc);
 479 
 480         /*
 481          * the scsi-options precedence is:
 482          *      target-scsi-options             highest
 483          *      device-type-scsi-options
 484          *      per bus scsi-options
 485          *      global scsi-options             lowest
 486          */
 487         mutex_enter(FAS_MUTEX(fas));
 488         if ((rval == SCSIPROBE_EXISTS) &&
 489             ((fas->f_target_scsi_options_defined & (1 << tgt)) == 0)) {
 490                 int options;
 491 
 492                 options = scsi_get_device_type_scsi_options(dip, sd, -1);
 493                 if (options != -1) {
 494                         fas->f_target_scsi_options[tgt] = options;
 495                         fas_log(fas, CE_NOTE,
 496                             "?target%x-scsi-options = 0x%x\n", tgt,
 497                             fas->f_target_scsi_options[tgt]);
 498                         fas_force_renegotiation(fas, tgt);
 499                 }
 500         }
 501         mutex_exit(FAS_MUTEX(fas));
 502 
 503         IPRINTF2("target%x-scsi-options= 0x%x\n",
 504             tgt, fas->f_target_scsi_options[tgt]);
 505 
 506         return (rval);
 507 }
 508 
 509 
 510 /*ARGSUSED*/
 511 static int
 512 fas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
 513     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
 514 {
 515         return (((sd->sd_address.a_target < NTARGETS_WIDE) &&
 516             (sd->sd_address.a_lun < NLUNS_PER_TARGET)) ?
 517             DDI_SUCCESS : DDI_FAILURE);
 518 }
 519 
 520 /*ARGSUSED*/
 521 static int
 522 fas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 523 {
 524         struct fas      *fas = NULL;
 525         volatile struct dma     *dmar = NULL;
 526         volatile struct fasreg  *fasreg;
 527         ddi_dma_attr_t          *fas_dma_attr;
 528         ddi_device_acc_attr_t   dev_attr;
 529 
 530         int                     instance, id, slot, i, hm_rev;
 531         size_t                  rlen;
 532         uint_t                  count;
 533         char                    buf[64];
 534         scsi_hba_tran_t         *tran = NULL;
 535         char                    intr_added = 0;
 536         char                    mutex_init_done = 0;
 537         char                    hba_attached = 0;
 538         char                    bound_handle = 0;
 539         char                    *prop_template = "target%d-scsi-options";
 540         char                    prop_str[32];
 541 
 542         /* CONSTCOND */
 543         ASSERT(NO_COMPETING_THREADS);
 544 
 545         switch (cmd) {
 546         case DDI_ATTACH:
 547                 break;
 548 
 549         case DDI_RESUME:
 550                 if ((tran = ddi_get_driver_private(dip)) == NULL)
 551                         return (DDI_FAILURE);
 552 
 553                 fas = TRAN2FAS(tran);
 554                 if (!fas) {
 555                         return (DDI_FAILURE);
 556                 }
 557                 /*
 558                  * Reset hardware and softc to "no outstanding commands"
 559                  * Note that a check condition can result on first command
 560                  * to a target.
 561                  */
 562                 mutex_enter(FAS_MUTEX(fas));
 563                 fas_internal_reset(fas,
 564                     FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
 565 
 566                 (void) fas_reset_bus(fas);
 567 
 568                 fas->f_suspended = 0;
 569 
 570                 /* make sure that things get started */
 571                 (void) fas_istart(fas);
 572                 fas_check_waitQ_and_mutex_exit(fas);
 573 
 574                 mutex_enter(&fas_global_mutex);
 575                 if (fas_timeout_id == 0) {
 576                         fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
 577                         fas_timeout_initted = 1;
 578                 }
 579                 mutex_exit(&fas_global_mutex);
 580 
 581                 return (DDI_SUCCESS);
 582 
 583         default:
 584                 return (DDI_FAILURE);
 585         }
 586 
 587         instance = ddi_get_instance(dip);
 588 
 589         /*
 590          * Since we know that some instantiations of this device can
 591          * be plugged into slave-only SBus slots, check to see whether
 592          * this is one such.
 593          */
 594         if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 595                 cmn_err(CE_WARN,
 596                     "fas%d: device in slave-only slot", instance);
 597                 return (DDI_FAILURE);
 598         }
 599 
 600         if (ddi_intr_hilevel(dip, 0)) {
 601                 /*
 602                  * Interrupt number '0' is a high-level interrupt.
 603                  * At this point you either add a special interrupt
 604                  * handler that triggers a soft interrupt at a lower level,
 605                  * or - more simply and appropriately here - you just
 606                  * fail the attach.
 607                  */
 608                 cmn_err(CE_WARN,
 609                     "fas%d: Device is using a hilevel intr", instance);
 610                 return (DDI_FAILURE);
 611         }
 612 
 613         /*
 614          * Allocate softc information.
 615          */
 616         if (ddi_soft_state_zalloc(fas_state, instance) != DDI_SUCCESS) {
 617                 cmn_err(CE_WARN,
 618                     "fas%d: cannot allocate soft state", instance);
 619                 goto fail;
 620         }
 621 
 622         fas = (struct fas *)ddi_get_soft_state(fas_state, instance);
 623 
 624         if (fas == NULL) {
 625                 goto fail;
 626         }
 627 
 628         /*
 629          * map in device registers
 630          */
 631         dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
 632         dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
 633         dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
 634 
 635         if (ddi_regs_map_setup(dip, (uint_t)0, (caddr_t *)&dmar,
 636             (off_t)0, (off_t)sizeof (struct dma),
 637             &dev_attr, &fas->f_dmar_acc_handle) != DDI_SUCCESS) {
 638                 cmn_err(CE_WARN, "fas%d: cannot map dma", instance);
 639                 goto fail;
 640         }
 641 
 642         if (ddi_regs_map_setup(dip, (uint_t)1, (caddr_t *)&fasreg,
 643             (off_t)0, (off_t)sizeof (struct fasreg),
 644             &dev_attr, &fas->f_regs_acc_handle) != DDI_SUCCESS) {
 645                 cmn_err(CE_WARN,
 646                     "fas%d: unable to map fas366 registers", instance);
 647                 goto fail;
 648         }
 649 
 650         fas_dma_attr = &dma_fasattr;
 651         if (ddi_dma_alloc_handle(dip, fas_dma_attr,
 652             DDI_DMA_SLEEP, NULL, &fas->f_dmahandle) != DDI_SUCCESS) {
 653                 cmn_err(CE_WARN,
 654                     "fas%d: cannot alloc dma handle", instance);
 655                 goto fail;
 656         }
 657 
 658         /*
 659          * allocate cmdarea and its dma handle
 660          */
 661         if (ddi_dma_mem_alloc(fas->f_dmahandle,
 662             (uint_t)2*FIFOSIZE,
 663             &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
 664             NULL, (caddr_t *)&fas->f_cmdarea, &rlen,
 665             &fas->f_cmdarea_acc_handle) != DDI_SUCCESS) {
 666                 cmn_err(CE_WARN,
 667                     "fas%d: cannot alloc cmd area", instance);
 668                 goto fail;
 669         }
 670 
 671         fas->f_reg = fasreg;
 672         fas->f_dma = dmar;
 673         fas->f_instance  = instance;
 674 
 675         if (ddi_dma_addr_bind_handle(fas->f_dmahandle,
 676             NULL, (caddr_t)fas->f_cmdarea,
 677             rlen, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 678             &fas->f_dmacookie, &count) != DDI_DMA_MAPPED) {
 679                 cmn_err(CE_WARN,
 680                     "fas%d: cannot bind cmdarea", instance);
 681                 goto fail;
 682         }
 683         bound_handle++;
 684 
 685         ASSERT(count == 1);
 686 
 687         /*
 688          * Allocate a transport structure
 689          */
 690         tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
 691 
 692         /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
 693         scsi_size_clean(dip);           /* SCSI_SIZE_CLEAN_VERIFY ok */
 694 
 695         /*
 696          * initialize transport structure
 697          */
 698         fas->f_tran                  = tran;
 699         fas->f_dev                   = dip;
 700         tran->tran_hba_private               = fas;
 701         tran->tran_tgt_private               = NULL;
 702         tran->tran_tgt_init          = fas_scsi_tgt_init;
 703         tran->tran_tgt_probe         = fas_scsi_tgt_probe;
 704         tran->tran_tgt_free          = NULL;
 705         tran->tran_start             = fas_scsi_start;
 706         tran->tran_abort             = fas_scsi_abort;
 707         tran->tran_reset             = fas_scsi_reset;
 708         tran->tran_getcap            = fas_scsi_getcap;
 709         tran->tran_setcap            = fas_scsi_setcap;
 710         tran->tran_init_pkt          = fas_scsi_init_pkt;
 711         tran->tran_destroy_pkt               = fas_scsi_destroy_pkt;
 712         tran->tran_dmafree           = fas_scsi_dmafree;
 713         tran->tran_sync_pkt          = fas_scsi_sync_pkt;
 714         tran->tran_reset_notify      = fas_scsi_reset_notify;
 715         tran->tran_get_bus_addr              = NULL;
 716         tran->tran_get_name          = NULL;
 717         tran->tran_quiesce           = fas_scsi_quiesce;
 718         tran->tran_unquiesce         = fas_scsi_unquiesce;
 719         tran->tran_bus_reset         = NULL;
 720         tran->tran_add_eventcall     = NULL;
 721         tran->tran_get_eventcookie   = NULL;
 722         tran->tran_post_event                = NULL;
 723         tran->tran_remove_eventcall  = NULL;
 724 
 725         fas->f_force_async = 0;
 726 
 727         /*
 728          * disable tagged queuing and wide for all targets
 729          * (will be enabled by target driver if required)
 730          * sync is enabled by default
 731          */
 732         fas->f_nowide = fas->f_notag = ALL_TARGETS;
 733         fas->f_force_narrow = ALL_TARGETS;
 734 
 735         /*
 736          * By default we assume embedded devices and save time
 737          * checking for timeouts in fas_watch() by skipping
 738          * the rest of luns
 739          * If we're talking to any non-embedded devices,
 740          * we can't cheat and skip over non-zero luns anymore
 741          * in fas_watch() and fas_ustart().
 742          */
 743         fas->f_dslot = NLUNS_PER_TARGET;
 744 
 745         /*
 746          * f_active is used for saving disconnected cmds;
 747          * For tagged targets, we need to increase the size later
 748          * Only allocate for Lun == 0, if we probe a lun > 0 then
 749          * we allocate an active structure
 750          * If TQ gets enabled then we need to increase the size
 751          * to hold 256 cmds
 752          */
 753         for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
 754                 (void) fas_alloc_active_slots(fas, slot, KM_SLEEP);
 755         }
 756 
 757         /*
 758          * initialize the qfull retry counts
 759          */
 760         for (i = 0; i < NTARGETS_WIDE; i++) {
 761                 fas->f_qfull_retries[i] = QFULL_RETRIES;
 762                 fas->f_qfull_retry_interval[i] =
 763                     drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
 764 
 765         }
 766 
 767         /*
 768          * Initialize throttles.
 769          */
 770         fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
 771 
 772         /*
 773          * Initialize mask of deferred property updates
 774          */
 775         fas->f_props_update = 0;
 776 
 777         /*
 778          * set host ID
 779          */
 780         fas->f_fasconf = DEFAULT_HOSTID;
 781         id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "initiator-id", -1);
 782         if (id == -1) {
 783                 id = ddi_prop_get_int(DDI_DEV_T_ANY,    dip, 0,
 784                     "scsi-initiator-id", -1);
 785         }
 786         if (id != DEFAULT_HOSTID && id >= 0 && id < NTARGETS_WIDE) {
 787                 fas_log(fas, CE_NOTE, "?initiator SCSI ID now %d\n", id);
 788                 fas->f_fasconf = (uchar_t)id;
 789         }
 790 
 791         /*
 792          * find the burstsize and reduce ours if necessary
 793          */
 794         fas->f_dma_attr = fas_dma_attr;
 795         fas->f_dma_attr->dma_attr_burstsizes &=
 796             ddi_dma_burstsizes(fas->f_dmahandle);
 797 
 798 #ifdef FASDEBUG
 799         fas->f_dma_attr->dma_attr_burstsizes &= fas_burstsizes_limit;
 800         IPRINTF1("dma burstsize=%x\n", fas->f_dma_attr->dma_attr_burstsizes);
 801 #endif
 802         /*
 803          * Attach this instance of the hba
 804          */
 805         if (scsi_hba_attach_setup(dip, fas->f_dma_attr, tran, 0) !=
 806             DDI_SUCCESS) {
 807                 fas_log(fas, CE_WARN, "scsi_hba_attach_setup failed");
 808                 goto fail;
 809         }
 810         hba_attached++;
 811 
 812         /*
 813          * if scsi-options property exists, use it
 814          */
 815         fas->f_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY,
 816             dip, 0, "scsi-options", DEFAULT_SCSI_OPTIONS);
 817 
 818         /*
 819          * if scsi-selection-timeout property exists, use it
 820          */
 821         fas_selection_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
 822             dip, 0, "scsi-selection-timeout", SCSI_DEFAULT_SELECTION_TIMEOUT);
 823 
 824         /*
 825          * if hm-rev property doesn't exist, use old scheme for rev
 826          */
 827         hm_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
 828             "hm-rev", -1);
 829 
 830         if (hm_rev == 0xa0 || hm_rev == -1) {
 831                 if (DMAREV(dmar) != 0) {
 832                         fas->f_hm_rev = 0x20;
 833                         fas_log(fas, CE_WARN,
 834                             "obsolete rev 2.0 FEPS chip, "
 835                             "possible data corruption");
 836                 } else {
 837                         fas->f_hm_rev = 0x10;
 838                         fas_log(fas, CE_WARN,
 839                             "obsolete and unsupported rev 1.0 FEPS chip");
 840                         goto fail;
 841                 }
 842         } else if (hm_rev == 0x20) {
 843                 fas->f_hm_rev = 0x21;
 844                 fas_log(fas, CE_WARN, "obsolete rev 2.1 FEPS chip");
 845         } else {
 846                 fas->f_hm_rev = (uchar_t)hm_rev;
 847                 fas_log(fas, CE_NOTE, "?rev %x.%x FEPS chip\n",
 848                     (hm_rev >> 4) & 0xf, hm_rev & 0xf);
 849         }
 850 
 851         if ((fas->f_scsi_options & SCSI_OPTIONS_SYNC) == 0) {
 852                 fas->f_nosync = ALL_TARGETS;
 853         }
 854 
 855         if ((fas->f_scsi_options & SCSI_OPTIONS_WIDE) == 0) {
 856                 fas->f_nowide = ALL_TARGETS;
 857         }
 858 
 859         /*
 860          * if target<n>-scsi-options property exists, use it;
 861          * otherwise use the f_scsi_options
 862          */
 863         for (i = 0; i < NTARGETS_WIDE; i++) {
 864                 (void) sprintf(prop_str, prop_template, i);
 865                 fas->f_target_scsi_options[i] = ddi_prop_get_int(
 866                     DDI_DEV_T_ANY, dip, 0, prop_str, -1);
 867 
 868                 if (fas->f_target_scsi_options[i] != -1) {
 869                         fas_log(fas, CE_NOTE, "?target%x-scsi-options=0x%x\n",
 870                             i, fas->f_target_scsi_options[i]);
 871                         fas->f_target_scsi_options_defined |= 1 << i;
 872                 } else {
 873                         fas->f_target_scsi_options[i] = fas->f_scsi_options;
 874                 }
 875                 if (((fas->f_target_scsi_options[i] &
 876                     SCSI_OPTIONS_DR) == 0) &&
 877                     (fas->f_target_scsi_options[i] & SCSI_OPTIONS_TAG)) {
 878                         fas->f_target_scsi_options[i] &= ~SCSI_OPTIONS_TAG;
 879                         fas_log(fas, CE_WARN,
 880                             "Disabled TQ since disconnects are disabled");
 881                 }
 882         }
 883 
 884         fas->f_scsi_tag_age_limit =
 885             ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-tag-age-limit",
 886             DEFAULT_TAG_AGE_LIMIT);
 887 
 888         fas->f_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
 889             dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
 890         if (fas->f_scsi_reset_delay == 0) {
 891                 fas_log(fas, CE_NOTE,
 892                     "scsi_reset_delay of 0 is not recommended,"
 893                     " resetting to SCSI_DEFAULT_RESET_DELAY\n");
 894                 fas->f_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
 895         }
 896 
 897         /*
 898          * get iblock cookie and initialize mutexes
 899          */
 900         if (ddi_get_iblock_cookie(dip, (uint_t)0, &fas->f_iblock)
 901             != DDI_SUCCESS) {
 902                 cmn_err(CE_WARN, "fas_attach: cannot get iblock cookie");
 903                 goto fail;
 904         }
 905 
 906         mutex_init(&fas->f_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
 907         cv_init(&fas->f_cv, NULL, CV_DRIVER, NULL);
 908 
 909         /*
 910          * initialize mutex for waitQ
 911          */
 912         mutex_init(&fas->f_waitQ_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
 913         mutex_init_done++;
 914 
 915         /*
 916          * initialize callback mechanism (immediate callback)
 917          */
 918         mutex_enter(&fas_global_mutex);
 919         if (fas_init_callbacks(fas)) {
 920                 mutex_exit(&fas_global_mutex);
 921                 goto fail;
 922         }
 923         mutex_exit(&fas_global_mutex);
 924 
 925         /*
 926          * kstat_intr support
 927          */
 928         (void) sprintf(buf, "fas%d", instance);
 929         fas->f_intr_kstat = kstat_create("fas", instance, buf, "controller", \
 930             KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
 931         if (fas->f_intr_kstat)
 932                 kstat_install(fas->f_intr_kstat);
 933 
 934         /*
 935          * install interrupt handler
 936          */
 937         mutex_enter(FAS_MUTEX(fas));
 938         if (ddi_add_intr(dip, (uint_t)0, &fas->f_iblock, NULL,
 939             fas_intr, (caddr_t)fas)) {
 940                 cmn_err(CE_WARN, "fas: cannot add intr");
 941                 mutex_exit(FAS_MUTEX(fas));
 942                 goto fail;
 943         }
 944         intr_added++;
 945 
 946         /*
 947          * initialize fas chip
 948          */
 949         if (fas_init_chip(fas, id))     {
 950                 cmn_err(CE_WARN, "fas: cannot initialize");
 951                 mutex_exit(FAS_MUTEX(fas));
 952                 goto fail;
 953         }
 954         mutex_exit(FAS_MUTEX(fas));
 955 
 956         /*
 957          * create kmem cache for packets
 958          */
 959         (void) sprintf(buf, "fas%d_cache", instance);
 960         fas->f_kmem_cache = kmem_cache_create(buf,
 961             EXTCMD_SIZE, 8,
 962             fas_kmem_cache_constructor, fas_kmem_cache_destructor,
 963             NULL, (void *)fas, NULL, 0);
 964         if (fas->f_kmem_cache == NULL) {
 965                 cmn_err(CE_WARN, "fas: cannot create kmem_cache");
 966                 goto fail;
 967         }
 968 
 969         /*
 970          * at this point, we are not going to fail the attach
 971          * so there is no need to undo the rest:
 972          *
 973          * add this fas to the list, this makes debugging easier
 974          * and fas_watch() needs it to walk thru all fas's
 975          */
 976         rw_enter(&fas_global_rwlock, RW_WRITER);
 977         if (fas_head == NULL) {
 978                 fas_head = fas;
 979         } else {
 980                 fas_tail->f_next = fas;
 981         }
 982         fas_tail = fas;         /* point to last fas in list */
 983         rw_exit(&fas_global_rwlock);
 984 
 985         /*
 986          * there is one watchdog handler for all driver instances.
 987          * start the watchdog if it hasn't been done yet
 988          */
 989         mutex_enter(&fas_global_mutex);
 990         if (fas_scsi_watchdog_tick == 0) {
 991                 fas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
 992                     dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
 993                 if (fas_scsi_watchdog_tick != DEFAULT_WD_TICK) {
 994                         fas_log(fas, CE_NOTE, "?scsi-watchdog-tick=%d\n",
 995                             fas_scsi_watchdog_tick);
 996                 }
 997                 fas_tick = drv_sectohz((clock_t)fas_scsi_watchdog_tick);
 998                 IPRINTF2("fas scsi watchdog tick=%x, fas_tick=%lx\n",
 999                     fas_scsi_watchdog_tick, fas_tick);
1000                 if (fas_timeout_id == 0) {
1001                         fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
1002                         fas_timeout_initted = 1;
1003                 }
1004         }
1005         mutex_exit(&fas_global_mutex);
1006 
1007         ddi_report_dev(dip);
1008 
1009         return (DDI_SUCCESS);
1010 
1011 fail:
1012         cmn_err(CE_WARN, "fas%d: cannot attach", instance);
1013         if (fas) {
1014                 for (slot = 0; slot < N_SLOTS; slot++) {
1015                         struct f_slots *active = fas->f_active[slot];
1016                         if (active) {
1017                                 kmem_free(active, active->f_size);
1018                                 fas->f_active[slot] = NULL;
1019                         }
1020                 }
1021                 if (mutex_init_done) {
1022                         mutex_destroy(&fas->f_mutex);
1023                         mutex_destroy(&fas->f_waitQ_mutex);
1024                         cv_destroy(&fas->f_cv);
1025                 }
1026                 if (intr_added) {
1027                         ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1028                 }
1029                 /*
1030                  * kstat_intr support
1031                  */
1032                 if (fas->f_intr_kstat) {
1033                         kstat_delete(fas->f_intr_kstat);
1034                 }
1035                 if (hba_attached) {
1036                         (void) scsi_hba_detach(dip);
1037                 }
1038                 if (tran) {
1039                         scsi_hba_tran_free(tran);
1040                 }
1041                 if (fas->f_kmem_cache) {
1042                         kmem_cache_destroy(fas->f_kmem_cache);
1043                 }
1044                 if (fas->f_cmdarea) {
1045                         if (bound_handle) {
1046                                 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1047                         }
1048                         ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1049                 }
1050                 if (fas->f_dmahandle) {
1051                         ddi_dma_free_handle(&fas->f_dmahandle);
1052                 }
1053                 fas_destroy_callbacks(fas);
1054                 if (fas->f_regs_acc_handle) {
1055                         ddi_regs_map_free(&fas->f_regs_acc_handle);
1056                 }
1057                 if (fas->f_dmar_acc_handle) {
1058                         ddi_regs_map_free(&fas->f_dmar_acc_handle);
1059                 }
1060                 ddi_soft_state_free(fas_state, instance);
1061 
1062                 ddi_remove_minor_node(dip, NULL);
1063         }
1064         return (DDI_FAILURE);
1065 }
1066 
1067 /*ARGSUSED*/
1068 static int
1069 fas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1070 {
1071         struct fas      *fas, *nfas;
1072         scsi_hba_tran_t         *tran;
1073 
1074         /* CONSTCOND */
1075         ASSERT(NO_COMPETING_THREADS);
1076 
1077         switch (cmd) {
1078         case DDI_DETACH:
1079                 return (fas_dr_detach(dip));
1080 
1081         case DDI_SUSPEND:
1082                 if ((tran = ddi_get_driver_private(dip)) == NULL)
1083                         return (DDI_FAILURE);
1084 
1085                 fas = TRAN2FAS(tran);
1086                 if (!fas) {
1087                         return (DDI_FAILURE);
1088                 }
1089 
1090                 mutex_enter(FAS_MUTEX(fas));
1091 
1092                 fas->f_suspended = 1;
1093 
1094                 if (fas->f_ncmds) {
1095                         (void) fas_reset_bus(fas);
1096                         (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
1097                 }
1098                 /*
1099                  * disable dma and fas interrupt
1100                  */
1101                 fas->f_dma_csr &= ~DMA_INTEN;
1102                 fas->f_dma_csr &= ~DMA_ENDVMA;
1103                 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1104 
1105                 mutex_exit(FAS_MUTEX(fas));
1106 
1107                 if (fas->f_quiesce_timeid) {
1108                         (void) untimeout(fas->f_quiesce_timeid);
1109                                 fas->f_quiesce_timeid = 0;
1110                 }
1111 
1112                 if (fas->f_restart_cmd_timeid) {
1113                         (void) untimeout(fas->f_restart_cmd_timeid);
1114                                 fas->f_restart_cmd_timeid = 0;
1115                 }
1116 
1117                 /* Last fas? */
1118                 rw_enter(&fas_global_rwlock, RW_WRITER);
1119                 for (nfas = fas_head; nfas; nfas = nfas->f_next) {
1120                         if (!nfas->f_suspended) {
1121                                 rw_exit(&fas_global_rwlock);
1122                                 return (DDI_SUCCESS);
1123                         }
1124                 }
1125                 rw_exit(&fas_global_rwlock);
1126 
1127                 mutex_enter(&fas_global_mutex);
1128                 if (fas_timeout_id != 0) {
1129                         timeout_id_t tid = fas_timeout_id;
1130                         fas_timeout_id = 0;
1131                         fas_timeout_initted = 0;
1132                         mutex_exit(&fas_global_mutex);
1133                         (void) untimeout(tid);
1134                 } else {
1135                         mutex_exit(&fas_global_mutex);
1136                 }
1137 
1138                 mutex_enter(&fas_global_mutex);
1139                 if (fas_reset_watch) {
1140                         timeout_id_t tid = fas_reset_watch;
1141                         fas_reset_watch = 0;
1142                         mutex_exit(&fas_global_mutex);
1143                         (void) untimeout(tid);
1144                 } else {
1145                         mutex_exit(&fas_global_mutex);
1146                 }
1147 
1148                 return (DDI_SUCCESS);
1149 
1150         default:
1151                 return (DDI_FAILURE);
1152         }
1153         _NOTE(NOT_REACHED)
1154         /* NOTREACHED */
1155 }
1156 
1157 static int
1158 fas_dr_detach(dev_info_t *dip)
1159 {
1160         struct fas      *fas, *f;
1161         scsi_hba_tran_t         *tran;
1162         short           slot;
1163         int                     i, j;
1164 
1165         if ((tran = ddi_get_driver_private(dip)) == NULL)
1166                 return (DDI_FAILURE);
1167 
1168         fas = TRAN2FAS(tran);
1169         if (!fas) {
1170                 return (DDI_FAILURE);
1171         }
1172 
1173         /*
1174          * disable interrupts
1175          */
1176         fas->f_dma_csr &= ~DMA_INTEN;
1177         fas->f_dma->dma_csr = fas->f_dma_csr;
1178         ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1179 
1180         /*
1181          * Remove device instance from the global linked list
1182          */
1183         rw_enter(&fas_global_rwlock, RW_WRITER);
1184 
1185         if (fas_head == fas) {
1186                 f = fas_head = fas->f_next;
1187         } else {
1188                 for (f = fas_head; f != (struct fas *)NULL; f = f->f_next) {
1189                         if (f->f_next == fas) {
1190                                 f->f_next = fas->f_next;
1191                                 break;
1192                         }
1193                 }
1194 
1195                 /*
1196                  * Instance not in softc list. Since the
1197                  * instance is not there in softc list, don't
1198                  * enable interrupts, the instance is effectively
1199                  * unusable.
1200                  */
1201                 if (f == (struct fas *)NULL) {
1202                         cmn_err(CE_WARN, "fas_dr_detach: fas instance not"
1203                             " in softc list!");
1204                         rw_exit(&fas_global_rwlock);
1205                         return (DDI_FAILURE);
1206                 }
1207 
1208 
1209         }
1210 
1211         if (fas_tail == fas)
1212                 fas_tail = f;
1213 
1214         rw_exit(&fas_global_rwlock);
1215 
1216         if (fas->f_intr_kstat)
1217                 kstat_delete(fas->f_intr_kstat);
1218 
1219         fas_destroy_callbacks(fas);
1220 
1221         scsi_hba_reset_notify_tear_down(fas->f_reset_notify_listf);
1222 
1223         mutex_enter(&fas_global_mutex);
1224         /*
1225          * destroy any outstanding tagged command info
1226          */
1227         for (slot = 0; slot < N_SLOTS; slot++) {
1228                 struct f_slots *active = fas->f_active[slot];
1229                 if (active) {
1230                         ushort_t        tag;
1231                         for (tag = 0; tag < active->f_n_slots; tag++) {
1232                                 struct fas_cmd  *sp = active->f_slot[tag];
1233                                 if (sp) {
1234                                         struct scsi_pkt *pkt = sp->cmd_pkt;
1235                                         if (pkt) {
1236                                                 (void) fas_scsi_destroy_pkt(
1237                                                     &pkt->pkt_address, pkt);
1238                                         }
1239                                         /* sp freed in fas_scsi_destroy_pkt */
1240                                         active->f_slot[tag] = NULL;
1241                                 }
1242                         }
1243                         kmem_free(active, active->f_size);
1244                         fas->f_active[slot] = NULL;
1245                 }
1246                 ASSERT(fas->f_tcmds[slot] == 0);
1247         }
1248 
1249         /*
1250          * disallow timeout thread rescheduling
1251          */
1252         fas->f_flags |= FAS_FLG_NOTIMEOUTS;
1253         mutex_exit(&fas_global_mutex);
1254 
1255         if (fas->f_quiesce_timeid) {
1256                 (void) untimeout(fas->f_quiesce_timeid);
1257         }
1258 
1259         /*
1260          * last fas? ... if active, CANCEL watch threads.
1261          */
1262         mutex_enter(&fas_global_mutex);
1263         if (fas_head == (struct fas *)NULL) {
1264                 if (fas_timeout_initted) {
1265                         timeout_id_t tid = fas_timeout_id;
1266                         fas_timeout_initted = 0;
1267                         fas_timeout_id = 0;             /* don't resched */
1268                         mutex_exit(&fas_global_mutex);
1269                         (void) untimeout(tid);
1270                         mutex_enter(&fas_global_mutex);
1271                 }
1272 
1273                 if (fas_reset_watch) {
1274                         mutex_exit(&fas_global_mutex);
1275                         (void) untimeout(fas_reset_watch);
1276                         mutex_enter(&fas_global_mutex);
1277                         fas_reset_watch = 0;
1278                 }
1279         }
1280         mutex_exit(&fas_global_mutex);
1281 
1282         if (fas->f_restart_cmd_timeid) {
1283                 (void) untimeout(fas->f_restart_cmd_timeid);
1284                 fas->f_restart_cmd_timeid = 0;
1285         }
1286 
1287         /*
1288          * destroy outstanding ARQ pkts
1289          */
1290         for (i = 0; i < NTARGETS_WIDE; i++) {
1291                 for (j = 0; j < NLUNS_PER_TARGET; j++) {
1292                         int slot = i * NLUNS_PER_TARGET | j;
1293                         if (fas->f_arq_pkt[slot]) {
1294                                 struct scsi_address     sa;
1295                                 sa.a_hba_tran = NULL;           /* not used */
1296                                 sa.a_target = (ushort_t)i;
1297                                 sa.a_lun = (uchar_t)j;
1298                                 (void) fas_delete_arq_pkt(fas, &sa);
1299                         }
1300                 }
1301         }
1302 
1303         /*
1304          * Remove device MT locks and CV
1305          */
1306         mutex_destroy(&fas->f_waitQ_mutex);
1307         mutex_destroy(&fas->f_mutex);
1308         cv_destroy(&fas->f_cv);
1309 
1310         /*
1311          * Release miscellaneous device resources
1312          */
1313 
1314         if (fas->f_kmem_cache) {
1315                 kmem_cache_destroy(fas->f_kmem_cache);
1316         }
1317 
1318         if (fas->f_cmdarea != (uchar_t *)NULL) {
1319                 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1320                 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1321         }
1322 
1323         if (fas->f_dmahandle != (ddi_dma_handle_t)NULL) {
1324                 ddi_dma_free_handle(&fas->f_dmahandle);
1325         }
1326 
1327         if (fas->f_regs_acc_handle) {
1328                 ddi_regs_map_free(&fas->f_regs_acc_handle);
1329         }
1330         if (fas->f_dmar_acc_handle) {
1331                 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1332         }
1333 
1334         /*
1335          * Remove properties created during attach()
1336          */
1337         ddi_prop_remove_all(dip);
1338 
1339         /*
1340          * Delete the DMA limits, transport vectors and remove the device
1341          * links to the scsi_transport layer.
1342          *      -- ddi_set_driver_private(dip, NULL)
1343          */
1344         (void) scsi_hba_detach(dip);
1345 
1346         /*
1347          * Free the scsi_transport structure for this device.
1348          */
1349         scsi_hba_tran_free(tran);
1350 
1351         ddi_soft_state_free(fas_state, ddi_get_instance(dip));
1352 
1353         return (DDI_SUCCESS);
1354 }
1355 
1356 static int
1357 fas_quiesce_bus(struct fas *fas)
1358 {
1359         mutex_enter(FAS_MUTEX(fas));
1360         IPRINTF("fas_quiesce: QUIESCEing\n");
1361         IPRINTF3("fas_quiesce: ncmds (%d) ndisc (%d) state (%d)\n",
1362             fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1363         fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1364         if (fas_check_outstanding(fas)) {
1365                 fas->f_softstate |= FAS_SS_DRAINING;
1366                 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1367                     fas, drv_sectohz(FAS_QUIESCE_TIMEOUT));
1368                 if (cv_wait_sig(FAS_CV(fas), FAS_MUTEX(fas)) == 0) {
1369                         /*
1370                          * quiesce has been interrupted.
1371                          */
1372                         IPRINTF("fas_quiesce: abort QUIESCE\n");
1373                         fas->f_softstate &= ~FAS_SS_DRAINING;
1374                         fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1375                         (void) fas_istart(fas);
1376                         if (fas->f_quiesce_timeid != 0) {
1377                                 mutex_exit(FAS_MUTEX(fas));
1378 #ifndef __lock_lint     /* warlock complains but there is a NOTE on this */
1379                                 (void) untimeout(fas->f_quiesce_timeid);
1380                                 fas->f_quiesce_timeid = 0;
1381 #endif
1382                                 return (-1);
1383                         }
1384                         mutex_exit(FAS_MUTEX(fas));
1385                         return (-1);
1386                 } else {
1387                         IPRINTF("fas_quiesce: bus is QUIESCED\n");
1388                         ASSERT(fas->f_quiesce_timeid == 0);
1389                         fas->f_softstate &= ~FAS_SS_DRAINING;
1390                         fas->f_softstate |= FAS_SS_QUIESCED;
1391                         mutex_exit(FAS_MUTEX(fas));
1392                         return (0);
1393                 }
1394         }
1395         IPRINTF("fas_quiesce: bus was not busy QUIESCED\n");
1396         mutex_exit(FAS_MUTEX(fas));
1397         return (0);
1398 }
1399 
1400 static int
1401 fas_unquiesce_bus(struct fas *fas)
1402 {
1403         mutex_enter(FAS_MUTEX(fas));
1404         fas->f_softstate &= ~FAS_SS_QUIESCED;
1405         fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1406         (void) fas_istart(fas);
1407         IPRINTF("fas_quiesce: bus has been UNQUIESCED\n");
1408         mutex_exit(FAS_MUTEX(fas));
1409 
1410         return (0);
1411 }
1412 
1413 /*
1414  * invoked from timeout() to check the number of outstanding commands
1415  */
1416 static void
1417 fas_ncmds_checkdrain(void *arg)
1418 {
1419         struct fas *fas = arg;
1420 
1421         mutex_enter(FAS_MUTEX(fas));
1422         IPRINTF3("fas_checkdrain: ncmds (%d) ndisc (%d) state (%d)\n",
1423             fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1424         if (fas->f_softstate & FAS_SS_DRAINING) {
1425                 fas->f_quiesce_timeid = 0;
1426                 if (fas_check_outstanding(fas) == 0) {
1427                         IPRINTF("fas_drain: bus has drained\n");
1428                         cv_signal(FAS_CV(fas));
1429                 } else {
1430                         /*
1431                          * throttle may have been reset by a bus reset
1432                          * or fas_runpoll()
1433                          * XXX shouldn't be necessary
1434                          */
1435                         fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1436                         IPRINTF("fas_drain: rescheduling timeout\n");
1437                         fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1438                             fas, drv_sectohz(FAS_QUIESCE_TIMEOUT));
1439                 }
1440         }
1441         mutex_exit(FAS_MUTEX(fas));
1442 }
1443 
1444 static int
1445 fas_check_outstanding(struct fas *fas)
1446 {
1447         uint_t slot;
1448         uint_t d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
1449         int ncmds = 0;
1450 
1451         ASSERT(mutex_owned(FAS_MUTEX(fas)));
1452 
1453         for (slot = 0; slot < N_SLOTS; slot += d)
1454                 ncmds += fas->f_tcmds[slot];
1455 
1456         return (ncmds);
1457 }
1458 
1459 
1460 #ifdef  FASDEBUG
1461 /*
1462  * fas register read/write functions with tracing
1463  */
1464 static void
1465 fas_reg_tracing(struct fas *fas, int type, int regno, uint32_t what)
1466 {
1467         fas->f_reg_trace[fas->f_reg_trace_index++] = type;
1468         fas->f_reg_trace[fas->f_reg_trace_index++] = regno;
1469         fas->f_reg_trace[fas->f_reg_trace_index++] = what;
1470         fas->f_reg_trace[fas->f_reg_trace_index++] = gethrtime();
1471         fas->f_reg_trace[fas->f_reg_trace_index] = 0xff;
1472         if (fas->f_reg_trace_index >= REG_TRACE_BUF_SIZE) {
1473                 fas->f_reg_trace_index = 0;
1474         }
1475 }
1476 
1477 static void
1478 fas_reg_cmd_write(struct fas *fas, uint8_t cmd)
1479 {
1480         volatile struct fasreg *fasreg = fas->f_reg;
1481         int regno = (uintptr_t)&fasreg->fas_cmd - (uintptr_t)fasreg;
1482 
1483         fasreg->fas_cmd = cmd;
1484         fas->f_last_cmd = cmd;
1485 
1486         EPRINTF1("issuing cmd %x\n", (uchar_t)cmd);
1487         fas_reg_tracing(fas, 0, regno, cmd);
1488 
1489         fas->f_reg_cmds++;
1490 }
1491 
1492 static void
1493 fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what)
1494 {
1495         int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1496 
1497         *p = what;
1498 
1499         EPRINTF2("writing reg%x = %x\n", regno, what);
1500         fas_reg_tracing(fas, 1, regno, what);
1501 
1502         fas->f_reg_writes++;
1503 }
1504 
1505 static uint8_t
1506 fas_reg_read(struct fas *fas, volatile uint8_t *p)
1507 {
1508         uint8_t what;
1509         int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1510 
1511         what = *p;
1512 
1513         EPRINTF2("reading reg%x => %x\n", regno, what);
1514         fas_reg_tracing(fas, 2, regno, what);
1515 
1516         fas->f_reg_reads++;
1517 
1518         return (what);
1519 }
1520 
1521 /*
1522  * dma register access routines
1523  */
1524 static void
1525 fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, uint32_t what)
1526 {
1527         *p = what;
1528         fas->f_reg_dma_writes++;
1529 
1530 #ifdef DMA_REG_TRACING
1531 {
1532         int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1533         EPRINTF2("writing dma reg%x = %x\n", regno, what);
1534         fas_reg_tracing(fas, 3, regno, what);
1535 }
1536 #endif
1537 }
1538 
1539 static uint32_t
1540 fas_dma_reg_read(struct fas *fas, volatile uint32_t *p)
1541 {
1542         uint32_t what = *p;
1543         fas->f_reg_dma_reads++;
1544 
1545 #ifdef DMA_REG_TRACING
1546 {
1547         int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1548         EPRINTF2("reading dma reg%x => %x\n", regno, what);
1549         fas_reg_tracing(fas, 4, regno, what);
1550 }
1551 #endif
1552         return (what);
1553 }
1554 #endif
1555 
1556 #define FIFO_EMPTY(fas)  (fas_reg_read(fas, &fas->f_reg->fas_stat2) & \
1557                 FAS_STAT2_EMPTY)
1558 #define FIFO_CNT(fas) \
1559         (fas_reg_read(fas, &fas->f_reg->fas_fifo_flag) & FIFO_CNT_MASK)
1560 
1561 #ifdef FASDEBUG
1562 static void
1563 fas_assert_atn(struct fas *fas)
1564 {
1565         fas_reg_cmd_write(fas, CMD_SET_ATN);
1566 #ifdef FAS_TEST
1567         if (fas_test_stop > 1)
1568                 debug_enter("asserted atn");
1569 #endif
1570 }
1571 #else
1572 #define fas_assert_atn(fas)  fas_reg_cmd_write(fas, CMD_SET_ATN)
1573 #endif
1574 
1575 /*
1576  * DMA macros; we use a shadow copy of the dma_csr to   save unnecessary
1577  * reads
1578  */
1579 #define FAS_DMA_WRITE(fas, count, base, cmd) { \
1580         volatile struct fasreg *fasreg = fas->f_reg; \
1581         volatile struct dma *dmar = fas->f_dma; \
1582         ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1583         SET_FAS_COUNT(fasreg, count); \
1584         fas_reg_cmd_write(fas, cmd); \
1585         fas_dma_reg_write(fas, &dmar->dma_count, count); \
1586         fas->f_dma_csr |= \
1587             DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1588         fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1589         fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1590 }
1591 
1592 #define FAS_DMA_WRITE_SETUP(fas, count, base) { \
1593         volatile struct fasreg *fasreg = fas->f_reg; \
1594         volatile struct dma *dmar = fas->f_dma; \
1595         ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1596         SET_FAS_COUNT(fasreg, count); \
1597         fas_dma_reg_write(fas, &dmar->dma_count, count); \
1598         fas->f_dma_csr |= \
1599             DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1600         fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1601 }
1602 
1603 
1604 #define FAS_DMA_READ(fas, count, base, dmacount, cmd) { \
1605         volatile struct fasreg *fasreg = fas->f_reg; \
1606         volatile struct dma *dmar = fas->f_dma; \
1607         ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1608         SET_FAS_COUNT(fasreg, count); \
1609         fas_reg_cmd_write(fas, cmd); \
1610         fas->f_dma_csr |= \
1611             (fas->f_dma_csr &    ~DMA_WRITE) | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1612         fas_dma_reg_write(fas, &dmar->dma_count, dmacount); \
1613         fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1614         fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1615 }
1616 
1617 static void
1618 FAS_FLUSH_DMA(struct fas *fas)
1619 {
1620         fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1621         fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1622             DMA_DSBL_DRAIN);
1623         fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1624         fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1625         fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1626         fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1627 }
1628 
1629 /*
1630  * FAS_FLUSH_DMA_HARD checks on REQPEND before taking away the reset
1631  */
1632 static void
1633 FAS_FLUSH_DMA_HARD(struct fas *fas)
1634 {
1635         fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1636         fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1637             DMA_DSBL_DRAIN);
1638         fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1639         while (fas_dma_reg_read(fas, &fas->f_dma->dma_csr) & DMA_REQPEND)
1640                 ;
1641         fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1642         fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1643         fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1644 }
1645 
1646 /*
1647  * update period, conf3, offset reg, if necessary
1648  */
1649 #define FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target) \
1650 { \
1651         uchar_t period, offset, conf3; \
1652         period = fas->f_sync_period[target] & SYNC_PERIOD_MASK; \
1653         offset = fas->f_offset[target]; \
1654         conf3  = fas->f_fasconf3[target]; \
1655         if ((period != fas->f_period_reg_last) || \
1656             (offset != fas->f_offset_reg_last) || \
1657             (conf3 != fas->f_fasconf3_reg_last)) { \
1658                 fas->f_period_reg_last = period; \
1659                 fas->f_offset_reg_last = offset; \
1660                 fas->f_fasconf3_reg_last = conf3; \
1661                 fas_reg_write(fas, &fasreg->fas_sync_period, period); \
1662                 fas_reg_write(fas, &fasreg->fas_sync_offset, offset); \
1663                 fas_reg_write(fas, &fasreg->fas_conf3, conf3); \
1664         } \
1665 }
1666 
1667 /*
1668  * fifo read/write routines
1669  * always read the fifo bytes before reading the interrupt register
1670  */
1671 
1672 static void
1673 fas_read_fifo(struct fas *fas)
1674 {
1675         int stat = fas->f_stat;
1676         volatile struct fasreg   *fasreg = fas->f_reg;
1677         int              i;
1678 
1679         i = fas_reg_read(fas, &fasreg->fas_fifo_flag) & FIFO_CNT_MASK;
1680         EPRINTF2("fas_read_fifo: fifo cnt=%x, stat=%x\n", i, stat);
1681         ASSERT(i <= FIFOSIZE);
1682 
1683         fas->f_fifolen = 0;
1684         while (i-- > 0) {
1685                 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1686                     &fasreg->fas_fifo_data);
1687                 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1688                     &fasreg->fas_fifo_data);
1689         }
1690         if (fas->f_stat2 & FAS_STAT2_ISHUTTLE)   {
1691 
1692                 /* write pad byte */
1693                 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1694                 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1695                     &fasreg->fas_fifo_data);
1696                 /* flush pad byte */
1697                 fas_reg_cmd_write(fas, CMD_FLUSH);
1698         }
1699         EPRINTF2("fas_read_fifo: fifo len=%x, stat2=%x\n",
1700             fas->f_fifolen, stat);
1701 } /* fas_read_fifo */
1702 
1703 static void
1704 fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad)
1705 {
1706         int i;
1707         volatile struct fasreg   *fasreg = fas->f_reg;
1708 
1709         EPRINTF1("writing fifo %x bytes\n", length);
1710         ASSERT(length <= 15);
1711         fas_reg_cmd_write(fas, CMD_FLUSH);
1712         for (i = 0; i < length; i++) {
1713                 fas_reg_write(fas, &fasreg->fas_fifo_data, buf[i]);
1714                 if (pad) {
1715                         fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1716                 }
1717         }
1718 }
1719 
1720 /*
1721  * Hardware and Software internal reset routines
1722  */
1723 static int
1724 fas_init_chip(struct fas *fas, uchar_t initiator_id)
1725 {
1726         int             i;
1727         uchar_t         clock_conv;
1728         uchar_t         initial_conf3;
1729         uint_t          ticks;
1730         static char     *prop_cfreq = "clock-frequency";
1731 
1732         /*
1733          * Determine clock frequency of attached FAS chip.
1734          */
1735         i = ddi_prop_get_int(DDI_DEV_T_ANY,
1736             fas->f_dev, DDI_PROP_DONTPASS, prop_cfreq, -1);
1737         clock_conv = (i + FIVE_MEG - 1) / FIVE_MEG;
1738         if (clock_conv != CLOCK_40MHZ) {
1739                 fas_log(fas, CE_WARN, "Bad clock frequency");
1740                 return (-1);
1741         }
1742 
1743         fas->f_clock_conv = clock_conv;
1744         fas->f_clock_cycle = CLOCK_PERIOD(i);
1745         ticks = FAS_CLOCK_TICK(fas);
1746         fas->f_stval = FAS_CLOCK_TIMEOUT(ticks, fas_selection_timeout);
1747 
1748         DPRINTF5("%d mhz, clock_conv %d, clock_cycle %d, ticks %d, stval %d\n",
1749             i, fas->f_clock_conv, fas->f_clock_cycle,
1750             ticks, fas->f_stval);
1751         /*
1752          * set up conf registers
1753          */
1754         fas->f_fasconf |= FAS_CONF_PAREN;
1755         fas->f_fasconf2 = (uchar_t)(FAS_CONF2_FENABLE | FAS_CONF2_XL32);
1756 
1757         if (initiator_id < NTARGETS) {
1758                 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO;
1759         } else {
1760                 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO |
1761                     FAS_CONF3_IDBIT3;
1762         }
1763 
1764         for (i = 0; i < NTARGETS_WIDE; i++) {
1765                 fas->f_fasconf3[i] = initial_conf3;
1766         }
1767 
1768         /*
1769          * Avoid resetting the scsi bus since this causes a few seconds
1770          * delay per fas in boot and also causes busy conditions in some
1771          * tape devices.
1772          */
1773         fas_internal_reset(fas, FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
1774 
1775         /*
1776          * initialize period and offset for each target
1777          */
1778         for (i = 0; i < NTARGETS_WIDE; i++) {
1779                 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_SYNC) {
1780                         fas->f_offset[i] = fas_default_offset |
1781                             fas->f_req_ack_delay;
1782                 } else {
1783                         fas->f_offset[i] = 0;
1784                 }
1785                 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_FAST) {
1786                         fas->f_neg_period[i] =
1787                             (uchar_t)MIN_SYNC_PERIOD(fas);
1788                 } else {
1789                         fas->f_neg_period[i] =
1790                             (uchar_t)CONVERT_PERIOD(DEFAULT_SYNC_PERIOD);
1791                 }
1792         }
1793         return (0);
1794 }
1795 
1796 /*
1797  * reset bus, chip, dma, or soft state
1798  */
1799 static void
1800 fas_internal_reset(struct fas *fas, int reset_action)
1801 {
1802         volatile struct fasreg *fasreg = fas->f_reg;
1803         volatile struct dma *dmar = fas->f_dma;
1804 
1805         if (reset_action & FAS_RESET_SCSIBUS)       {
1806                 fas_reg_cmd_write(fas, CMD_RESET_SCSI);
1807                 fas_setup_reset_delay(fas);
1808         }
1809 
1810         FAS_FLUSH_DMA_HARD(fas); /* resets and reinits the dma */
1811 
1812         /*
1813          * NOTE: if dma is aborted while active, indefinite hangs
1814          * may occur; it is preferable to stop the target first before
1815          * flushing the dma
1816          */
1817         if (reset_action & FAS_RESET_DMA) {
1818                 int burstsizes = fas->f_dma_attr->dma_attr_burstsizes;
1819                 if (burstsizes & BURST64) {
1820                         IPRINTF("64 byte burstsize\n");
1821                         fas->f_dma_csr |= DMA_BURST64;
1822                 } else if       (burstsizes & BURST32) {
1823                         IPRINTF("32 byte burstsize\n");
1824                         fas->f_dma_csr |= DMA_BURST32;
1825                 } else {
1826                         IPRINTF("16 byte burstsize\n");
1827                 }
1828                 if ((fas->f_hm_rev > 0x20) && (fas_enable_sbus64) &&
1829                     (ddi_dma_set_sbus64(fas->f_dmahandle, burstsizes) ==
1830                     DDI_SUCCESS)) {
1831                         IPRINTF("enabled 64 bit sbus\n");
1832                         fas->f_dma_csr |= DMA_WIDE_EN;
1833                 }
1834         }
1835 
1836         if (reset_action & FAS_RESET_FAS) {
1837                 /*
1838                  * 2 NOPs with DMA are required here
1839                  * id_code is unreliable if we don't do this)
1840                  */
1841                 uchar_t idcode, fcode;
1842                 int dmarev;
1843 
1844                 fas_reg_cmd_write(fas, CMD_RESET_FAS);
1845                 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1846                 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1847 
1848                 /*
1849                  * Re-load chip configurations
1850                  * Only load registers which are not loaded in fas_startcmd()
1851                  */
1852                 fas_reg_write(fas, &fasreg->fas_clock_conv,
1853                     (fas->f_clock_conv & CLOCK_MASK));
1854 
1855                 fas_reg_write(fas, &fasreg->fas_timeout, fas->f_stval);
1856 
1857                 /*
1858                  * enable default configurations
1859                  */
1860                 fas->f_idcode = idcode =
1861                     fas_reg_read(fas, &fasreg->fas_id_code);
1862                 fcode = (uchar_t)(idcode & FAS_FCODE_MASK) >> (uchar_t)3;
1863                 fas->f_type = FAS366;
1864                 IPRINTF2("Family code %d, revision %d\n",
1865                     fcode, (idcode & FAS_REV_MASK));
1866                 dmarev = fas_dma_reg_read(fas, &dmar->dma_csr);
1867                 dmarev = (dmarev >> 11) & 0xf;
1868                 IPRINTF1("DMA channel revision %d\n", dmarev);
1869 
1870                 fas_reg_write(fas, &fasreg->fas_conf, fas->f_fasconf);
1871                 fas_reg_write(fas, &fasreg->fas_conf2, fas->f_fasconf2);
1872 
1873                 fas->f_req_ack_delay = DEFAULT_REQ_ACK_DELAY;
1874 
1875                 /*
1876                  * Just in case... clear interrupt
1877                  */
1878                 (void) fas_reg_read(fas, &fasreg->fas_intr);
1879         }
1880 
1881         if (reset_action & FAS_RESET_SOFTC) {
1882                 fas->f_wdtr_sent = fas->f_sdtr_sent = 0;
1883                 fas->f_wide_known = fas->f_sync_known = 0;
1884                 fas->f_wide_enabled = fas->f_sync_enabled = 0;
1885                 fas->f_omsglen = 0;
1886                 fas->f_cur_msgout[0] = fas->f_last_msgout =
1887                     fas->f_last_msgin = INVALID_MSG;
1888                 fas->f_abort_msg_sent = fas->f_reset_msg_sent = 0;
1889                 fas->f_next_slot = 0;
1890                 fas->f_current_sp = NULL;
1891                 fas->f_fifolen = 0;
1892                 fas->f_fasconf3_reg_last = fas->f_offset_reg_last =
1893                     fas->f_period_reg_last = 0xff;
1894 
1895                 New_state(fas, STATE_FREE);
1896         }
1897 }
1898 
1899 
1900 #ifdef FASDEBUG
1901 /*
1902  * check if ncmds still reflects the truth
1903  * count all cmds for this driver instance and compare with ncmds
1904  */
1905 static void
1906 fas_check_ncmds(struct fas *fas)
1907 {
1908         int slot = 0;
1909         ushort_t tag, t;
1910         int n, total = 0;
1911 
1912         do {
1913                 if (fas->f_active[slot]) {
1914                         struct fas_cmd *sp = fas->f_readyf[slot];
1915                         t = fas->f_active[slot]->f_n_slots;
1916                         while (sp != 0) {
1917                                 sp = sp->cmd_forw;
1918                                 total++;
1919                         }
1920                         for (n = tag = 0; tag < t; tag++) {
1921                                 if (fas->f_active[slot]->f_slot[tag] != 0) {
1922                                         n++;
1923                                         total++;
1924                                 }
1925                         }
1926                         ASSERT(n == fas->f_tcmds[slot]);
1927                 }
1928                 slot = NEXTSLOT(slot, fas->f_dslot);
1929         } while (slot != 0);
1930 
1931         if (total != fas->f_ncmds) {
1932                 IPRINTF2("fas_check_ncmds: total=%x, ncmds=%x\n",
1933                     total, fas->f_ncmds);
1934         }
1935         ASSERT(fas->f_ncmds >= fas->f_ndisc);
1936 }
1937 #else
1938 #define fas_check_ncmds(fas)
1939 #endif
1940 
1941 /*
1942  * SCSA Interface functions
1943  *
1944  * Visible to the external world via the transport structure.
1945  *
1946  * fas_scsi_abort: abort a current cmd or all cmds for a target
1947  */
1948 /*ARGSUSED*/
1949 static int
1950 fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1951 {
1952         struct fas *fas = ADDR2FAS(ap);
1953         int rval;
1954 
1955         IPRINTF2("fas_scsi_abort: target %d.%d\n", ap->a_target, ap->a_lun);
1956 
1957         mutex_enter(FAS_MUTEX(fas));
1958         rval =  fas_do_scsi_abort(ap, pkt);
1959         fas_check_waitQ_and_mutex_exit(fas);
1960         return (rval);
1961 }
1962 
1963 /*
1964  * reset handling: reset bus or target
1965  */
1966 /*ARGSUSED*/
1967 static int
1968 fas_scsi_reset(struct scsi_address *ap, int level)
1969 {
1970         struct fas *fas = ADDR2FAS(ap);
1971         int rval;
1972 
1973         IPRINTF3("fas_scsi_reset: target %d.%d, level %d\n",
1974             ap->a_target, ap->a_lun, level);
1975 
1976         mutex_enter(FAS_MUTEX(fas));
1977         rval = fas_do_scsi_reset(ap, level);
1978         fas_check_waitQ_and_mutex_exit(fas);
1979         return (rval);
1980 }
1981 
1982 /*
1983  * entry point for reset notification setup, to register or to cancel.
1984  */
1985 static int
1986 fas_scsi_reset_notify(struct scsi_address *ap, int flag,
1987     void (*callback)(caddr_t), caddr_t arg)
1988 {
1989         struct fas      *fas = ADDR2FAS(ap);
1990 
1991         return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1992             &fas->f_mutex, &fas->f_reset_notify_listf));
1993 }
1994 
1995 /*
1996  * capability interface
1997  */
1998 /*ARGSUSED*/
1999 static int
2000 fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
2001 {
2002         struct fas *fas = ADDR2FAS(ap);
2003         DPRINTF3("fas_scsi_getcap: tgt=%x, cap=%s, whom=%x\n",
2004             ap->a_target, cap, whom);
2005         return (fas_commoncap(ap, cap, 0, whom, 0));
2006 }
2007 
2008 /*ARGSUSED*/
2009 static int
2010 fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2011 {
2012         struct fas *fas = ADDR2FAS(ap);
2013         IPRINTF4("fas_scsi_setcap: tgt=%x, cap=%s, value=%x, whom=%x\n",
2014             ap->a_target, cap, value, whom);
2015         return (fas_commoncap(ap, cap, value, whom, 1));
2016 }
2017 
2018 /*
2019  * pkt and dma allocation and deallocation
2020  */
2021 /*ARGSUSED*/
2022 static void
2023 fas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2024 {
2025         struct fas_cmd *cmd = PKT2CMD(pkt);
2026 
2027         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2028             "fas_scsi_dmafree_start");
2029 
2030         if (cmd->cmd_flags & CFLAG_DMAVALID) {
2031                 /*
2032                  * Free the mapping.
2033                  */
2034                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
2035                 cmd->cmd_flags ^= CFLAG_DMAVALID;
2036         }
2037         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2038             "fas_scsi_dmafree_end");
2039 }
2040 
2041 /*ARGSUSED*/
2042 static void
2043 fas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2044 {
2045         struct fas_cmd *sp = PKT2CMD(pkt);
2046 
2047         if (sp->cmd_flags & CFLAG_DMAVALID) {
2048                 if (ddi_dma_sync(sp->cmd_dmahandle, 0, 0,
2049                     (sp->cmd_flags & CFLAG_DMASEND) ?
2050                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
2051                     DDI_SUCCESS) {
2052                         fas_log(ADDR2FAS(ap), CE_WARN,
2053                             "sync of pkt (%p) failed", (void *)pkt);
2054                 }
2055         }
2056 }
2057 
2058 /*
2059  * initialize pkt and allocate DVMA resources
2060  */
2061 static struct scsi_pkt *
2062 fas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
2063         struct buf *bp, int cmdlen, int statuslen, int tgtlen,
2064         int flags, int (*callback)(), caddr_t arg)
2065 {
2066         int kf;
2067         int failure = 1;
2068         struct fas_cmd *cmd;
2069         struct fas *fas = ADDR2FAS(ap);
2070         struct fas_cmd *new_cmd;
2071         int rval;
2072 
2073 /* #define      FAS_TEST_EXTRN_ALLOC */
2074 #ifdef FAS_TEST_EXTRN_ALLOC
2075         cmdlen *= 4; statuslen *= 4; tgtlen *= 4;
2076 #endif
2077         /*
2078          * if no pkt was passed then allocate a pkt first
2079          */
2080         if (pkt == NULL) {
2081                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_START,
2082                     "fas_scsi_impl_pktalloc_start");
2083 
2084                 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
2085 
2086                 /*
2087                  * only one size of pkt (with arq).
2088                  */
2089                 cmd = kmem_cache_alloc(fas->f_kmem_cache, kf);
2090 
2091                 if (cmd) {
2092 
2093                         ddi_dma_handle_t        save_dma_handle;
2094 
2095                         save_dma_handle = cmd->cmd_dmahandle;
2096                         bzero(cmd, EXTCMD_SIZE);
2097                         cmd->cmd_dmahandle = save_dma_handle;
2098 
2099                         pkt = (struct scsi_pkt *)((uchar_t *)cmd +
2100                             sizeof (struct fas_cmd));
2101                         cmd->cmd_pkt         = pkt;
2102                         pkt->pkt_ha_private  = (opaque_t)cmd;
2103                         pkt->pkt_scbp        = (opaque_t)&cmd->cmd_scb;
2104                         pkt->pkt_cdbp        = (opaque_t)&cmd->cmd_cdb;
2105                         pkt->pkt_address     = *ap;
2106 
2107                         pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2108                         pkt->pkt_private = cmd->cmd_pkt_private;
2109 
2110                         cmd->cmd_cdblen      = cmdlen;
2111                         cmd->cmd_scblen      = statuslen;
2112                         cmd->cmd_privlen     = tgtlen;
2113                         cmd->cmd_slot                =
2114                             (Tgt(cmd) * NLUNS_PER_TARGET) | Lun(cmd);
2115                         failure = 0;
2116                 }
2117                 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
2118                     (tgtlen > PKT_PRIV_LEN) ||
2119                     (statuslen > EXTCMDS_STATUS_SIZE)) {
2120                         if (failure == 0) {
2121                                 /*
2122                                  * if extern alloc fails, all will be
2123                                  * deallocated, including cmd
2124                                  */
2125                                 failure = fas_pkt_alloc_extern(fas, cmd,
2126                                     cmdlen, tgtlen, statuslen, kf);
2127                         }
2128                         if (failure) {
2129                                 /*
2130                                  * nothing to deallocate so just return
2131                                  */
2132                                 TRACE_0(TR_FAC_SCSI_FAS,
2133                                     TR_FAS_SCSI_IMPL_PKTALLOC_END,
2134                                     "fas_scsi_impl_pktalloc_end");
2135                                 return (NULL);
2136                         }
2137                 }
2138 
2139                 new_cmd = cmd;
2140 
2141                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_END,
2142                     "fas_scsi_impl_pktalloc_end");
2143         } else {
2144                 cmd = PKT2CMD(pkt);
2145                 new_cmd = NULL;
2146         }
2147 
2148         /*
2149          * Second step of fas_scsi_init_pkt:
2150          * bind the buf to the handle
2151          */
2152         if (bp && bp->b_bcount != 0 &&
2153             (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
2154 
2155                 int cmd_flags, dma_flags;
2156                 uint_t dmacookie_count;
2157 
2158                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_START,
2159                     "fas_scsi_impl_dmaget_start");
2160 
2161                 cmd_flags = cmd->cmd_flags;
2162 
2163                 if (bp->b_flags & B_READ) {
2164                         cmd_flags &= ~CFLAG_DMASEND;
2165                         dma_flags = DDI_DMA_READ | DDI_DMA_PARTIAL;
2166                 } else {
2167                         cmd_flags |= CFLAG_DMASEND;
2168                         dma_flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL;
2169                 }
2170                 if (flags & PKT_CONSISTENT) {
2171                         cmd_flags |= CFLAG_CMDIOPB;
2172                         dma_flags |= DDI_DMA_CONSISTENT;
2173                 }
2174 
2175                 /*
2176                  * bind the handle to the buf
2177                  */
2178                 ASSERT(cmd->cmd_dmahandle != NULL);
2179                 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
2180                     dma_flags, callback, arg, &cmd->cmd_dmacookie,
2181                     &dmacookie_count);
2182 
2183                 if (rval && rval != DDI_DMA_PARTIAL_MAP) {
2184                         switch (rval) {
2185                         case DDI_DMA_NORESOURCES:
2186                                 bioerror(bp, 0);
2187                                 break;
2188                         case DDI_DMA_BADATTR:
2189                         case DDI_DMA_NOMAPPING:
2190                                 bioerror(bp, EFAULT);
2191                                 break;
2192                         case DDI_DMA_TOOBIG:
2193                         default:
2194                                 bioerror(bp, EINVAL);
2195                                 break;
2196                         }
2197                         cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
2198                         if (new_cmd) {
2199                                 fas_scsi_destroy_pkt(ap, pkt);
2200                         }
2201                         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2202                             "fas_scsi_impl_dmaget_end");
2203                         return ((struct scsi_pkt *)NULL);
2204                 }
2205                 ASSERT(dmacookie_count == 1);
2206                 cmd->cmd_dmacount = bp->b_bcount;
2207                 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
2208 
2209                 ASSERT(cmd->cmd_dmahandle != NULL);
2210                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2211                     "fas_scsi_impl_dmaget_end");
2212         }
2213 
2214         return (pkt);
2215 }
2216 
2217 /*
2218  * unbind dma resources and deallocate the pkt
2219  */
2220 static void
2221 fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2222 {
2223         struct fas_cmd *sp = PKT2CMD(pkt);
2224         struct fas *fas = ADDR2FAS(ap);
2225 
2226         /*
2227          * fas_scsi_impl_dmafree inline to speed things up
2228          */
2229         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2230             "fas_scsi_impl_dmafree_start");
2231 
2232         if (sp->cmd_flags & CFLAG_DMAVALID) {
2233                 /*
2234                  * Free the mapping.
2235                  */
2236                 (void) ddi_dma_unbind_handle(sp->cmd_dmahandle);
2237                 sp->cmd_flags ^= CFLAG_DMAVALID;
2238         }
2239 
2240         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2241             "fas_scsi_impl_dmafree_end");
2242 
2243         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_START,
2244             "fas_scsi_impl_pktfree_start");
2245 
2246         if ((sp->cmd_flags &
2247             (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
2248             CFLAG_SCBEXTERN)) == 0) {
2249                 sp->cmd_flags = CFLAG_FREE;
2250                 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2251         } else {
2252                 fas_pkt_destroy_extern(fas, sp);
2253         }
2254 
2255         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_END,
2256             "fas_scsi_impl_pktfree_end");
2257 }
2258 
2259 /*
2260  * allocate and deallocate external pkt space (ie. not part of fas_cmd) for
2261  * non-standard length cdb, pkt_private, status areas
2262  * if allocation fails, then deallocate all external space and the pkt
2263  */
2264 /* ARGSUSED */
2265 static int
2266 fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
2267     int cmdlen, int tgtlen, int statuslen, int kf)
2268 {
2269         caddr_t cdbp, scbp, tgt;
2270         int failure = 0;
2271 
2272         tgt = cdbp = scbp = NULL;
2273         if (cmdlen > sizeof (sp->cmd_cdb)) {
2274                 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
2275                         failure++;
2276                 } else {
2277                         sp->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
2278                         sp->cmd_flags |= CFLAG_CDBEXTERN;
2279                 }
2280         }
2281         if (tgtlen > PKT_PRIV_LEN) {
2282                 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
2283                         failure++;
2284                 } else {
2285                         sp->cmd_flags |= CFLAG_PRIVEXTERN;
2286                         sp->cmd_pkt->pkt_private = tgt;
2287                 }
2288         }
2289         if (statuslen > EXTCMDS_STATUS_SIZE) {
2290                 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
2291                         failure++;
2292                 } else {
2293                         sp->cmd_flags |= CFLAG_SCBEXTERN;
2294                         sp->cmd_pkt->pkt_scbp = (opaque_t)scbp;
2295                 }
2296         }
2297         if (failure) {
2298                 fas_pkt_destroy_extern(fas, sp);
2299         }
2300         return (failure);
2301 }
2302 
2303 /*
2304  * deallocate external pkt space and deallocate the pkt
2305  */
2306 static void
2307 fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp)
2308 {
2309         if (sp->cmd_flags & CFLAG_FREE) {
2310                 panic("fas_pkt_destroy_extern: freeing free packet");
2311                 _NOTE(NOT_REACHED)
2312                 /* NOTREACHED */
2313         }
2314         if (sp->cmd_flags & CFLAG_CDBEXTERN) {
2315                 kmem_free((caddr_t)sp->cmd_pkt->pkt_cdbp,
2316                     (size_t)sp->cmd_cdblen);
2317         }
2318         if (sp->cmd_flags & CFLAG_SCBEXTERN) {
2319                 kmem_free((caddr_t)sp->cmd_pkt->pkt_scbp,
2320                     (size_t)sp->cmd_scblen);
2321         }
2322         if (sp->cmd_flags & CFLAG_PRIVEXTERN) {
2323                 kmem_free((caddr_t)sp->cmd_pkt->pkt_private,
2324                     (size_t)sp->cmd_privlen);
2325         }
2326         sp->cmd_flags = CFLAG_FREE;
2327         kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2328 }
2329 
2330 /*
2331  * kmem cache constructor and destructor:
2332  * When constructing, we bzero the cmd and allocate the dma handle
2333  * When destructing, just free the dma handle
2334  */
2335 static int
2336 fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
2337 {
2338         struct fas_cmd *cmd = buf;
2339         struct fas *fas = cdrarg;
2340         int  (*callback)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP:
2341             DDI_DMA_DONTWAIT;
2342 
2343         bzero(buf, EXTCMD_SIZE);
2344 
2345         /*
2346          * allocate a dma handle
2347          */
2348         if ((ddi_dma_alloc_handle(fas->f_dev, fas->f_dma_attr, callback,
2349             NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
2350                 return (-1);
2351         }
2352         return (0);
2353 }
2354 
2355 /*ARGSUSED*/
2356 static void
2357 fas_kmem_cache_destructor(void *buf, void *cdrarg)
2358 {
2359         struct fas_cmd *cmd = buf;
2360         if (cmd->cmd_dmahandle) {
2361                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2362         }
2363 }
2364 
2365 /*
2366  * fas_scsi_start - Accept commands for transport
2367  */
2368 static int
2369 fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2370 {
2371         struct fas_cmd *sp = PKT2CMD(pkt);
2372         struct fas *fas = ADDR2FAS(ap);
2373         int rval;
2374         int intr = 0;
2375 
2376         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_START, "fas_scsi_start_start");
2377 
2378 #ifdef FAS_TEST
2379         if (fas_transport_busy > 0) {
2380                 fas_transport_busy--;
2381                 return (TRAN_BUSY);
2382         }
2383         if ((fas_transport_busy_rqs > 0) &&
2384             (*(sp->cmd_pkt->pkt_cdbp) == SCMD_REQUEST_SENSE)) {
2385                 fas_transport_busy_rqs--;
2386                 return (TRAN_BUSY);
2387         }
2388         if (fas_transport_reject > 0) {
2389                 fas_transport_reject--;
2390                 return (TRAN_BADPKT);
2391         }
2392 #endif
2393         /*
2394          * prepare packet before taking the mutex
2395          */
2396         rval = fas_prepare_pkt(fas, sp);
2397         if (rval != TRAN_ACCEPT) {
2398                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_PREPARE_PKT_END,
2399                     "fas_scsi_start_end (prepare_pkt)");
2400                 return (rval);
2401         }
2402 
2403         /*
2404          * fas mutex can be held for a long time; therefore, if the mutex is
2405          * held, we queue the packet in a waitQ; we now should check
2406          * the waitQ on every mutex_exit(FAS_MUTEX(fas)) but we really only
2407          * need to do this when the bus is free
2408          * don't put NOINTR cmds including proxy cmds in waitQ! These
2409          * cmds are handled by fas_runpoll()
2410          * if the waitQ is non-empty, queue the pkt anyway to preserve
2411          * order
2412          * the goal is to queue in waitQ as much as possible so at
2413          * interrupt time, we can move the packets to readyQ or start
2414          * a packet immediately. It helps to do this at interrupt
2415          * time because we can then field more interrupts
2416          */
2417         if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
2418 
2419                 /*
2420                  * if the bus is not free, we will get an interrupt shortly
2421                  * so we don't want to take the fas mutex but queue up
2422                  * the packet in the waitQ
2423                  * also, if the waitQ is non-empty or there is an interrupt
2424                  * pending then queue up the packet in the waitQ and let the
2425                  * interrupt handler empty the waitQ
2426                  */
2427                 mutex_enter(&fas->f_waitQ_mutex);
2428 
2429                 if ((fas->f_state != STATE_FREE) ||
2430                     fas->f_waitf || (intr = INTPENDING(fas))) {
2431                         goto queue_in_waitQ;
2432                 }
2433 
2434                 /*
2435                  * we didn't queue up in the waitQ, so now try to accept
2436                  * the packet. if we fail to get the fas mutex, go back to
2437                  * the waitQ again
2438                  * do not release the waitQ mutex yet because that
2439                  * leaves a window where the interrupt handler has
2440                  * emptied the waitQ but not released the fas mutex yet
2441                  *
2442                  * the interrupt handler gets the locks in opposite order
2443                  * but because we do a tryenter, there is no deadlock
2444                  *
2445                  * if another thread has the fas mutex then either this
2446                  * thread or the other may find the bus free and
2447                  * empty the waitQ
2448                  */
2449                 if (mutex_tryenter(FAS_MUTEX(fas))) {
2450                         mutex_exit(&fas->f_waitQ_mutex);
2451                         rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2452                 } else {
2453                         /*
2454                          * we didn't get the fas mutex so
2455                          * the packet has to go in the waitQ now
2456                          */
2457                         goto queue_in_waitQ;
2458                 }
2459         } else {
2460                 /*
2461                  * for polled cmds, we have to take the mutex and
2462                  * start the packet using fas_runpoll()
2463                  */
2464                 mutex_enter(FAS_MUTEX(fas));
2465                 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2466         }
2467 
2468         /*
2469          * if the bus is free then empty waitQ and release the mutex
2470          * (this should be unlikely that the bus is still free after
2471          * accepting the packet. it may be the relatively unusual case
2472          * that we are throttling)
2473          */
2474         if (fas->f_state == STATE_FREE) {
2475                 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2476         } else {
2477                 mutex_exit(FAS_MUTEX(fas));
2478         }
2479 
2480 done:
2481         TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2482             "fas_scsi_start_end: fas 0x%p", fas);
2483         return (rval);
2484 
2485 queue_in_waitQ:
2486         if (fas->f_waitf == NULL) {
2487                 fas->f_waitb = fas->f_waitf = sp;
2488                 sp->cmd_forw = NULL;
2489         } else {
2490                 struct fas_cmd *dp = fas->f_waitb;
2491                 dp->cmd_forw = fas->f_waitb = sp;
2492                 sp->cmd_forw = NULL;
2493         }
2494 
2495         /*
2496          * check again the fas mutex
2497          * if there was an interrupt then the interrupt
2498          * handler will eventually empty the waitQ
2499          */
2500         if ((intr == 0) && (fas->f_state == STATE_FREE) &&
2501             mutex_tryenter(FAS_MUTEX(fas))) {
2502                 /*
2503                  * double check if the bus is still free
2504                  * (this actually reduced mutex contention a bit)
2505                  */
2506                 if (fas->f_state == STATE_FREE) {
2507                         fas_empty_waitQ(fas);
2508                 }
2509                 mutex_exit(FAS_MUTEX(fas));
2510         }
2511         mutex_exit(&fas->f_waitQ_mutex);
2512 
2513         TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2514             "fas_scsi_start_end: fas 0x%p", fas);
2515         return (rval);
2516 }
2517 
2518 /*
2519  * prepare the pkt:
2520  * the pkt may have been resubmitted or just reused so
2521  * initialize some fields, reset the dma window, and do some checks
2522  */
2523 static int
2524 fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp)
2525 {
2526         struct scsi_pkt *pkt = CMD2PKT(sp);
2527 
2528         /*
2529          * Reinitialize some fields that need it; the packet may
2530          * have been resubmitted
2531          */
2532         pkt->pkt_reason = CMD_CMPLT;
2533         pkt->pkt_state       = 0;
2534         pkt->pkt_statistics = 0;
2535         pkt->pkt_resid       = 0;
2536         sp->cmd_age  = 0;
2537         sp->cmd_pkt_flags = pkt->pkt_flags;
2538 
2539         /*
2540          * Copy the cdb pointer to the pkt wrapper area as we
2541          * might modify this pointer. Zero status byte
2542          */
2543         sp->cmd_cdbp = pkt->pkt_cdbp;
2544         *(pkt->pkt_scbp) = 0;
2545 
2546         if (sp->cmd_flags & CFLAG_DMAVALID) {
2547                 pkt->pkt_resid       = sp->cmd_dmacount;
2548 
2549                 /*
2550                  * if the pkt was resubmitted then the
2551                  * windows may be at the wrong number
2552                  */
2553                 if (sp->cmd_cur_win) {
2554                         sp->cmd_cur_win = 0;
2555                         if (fas_set_new_window(fas, sp)) {
2556                                 IPRINTF("cannot reset window\n");
2557                                 return (TRAN_BADPKT);
2558                         }
2559                 }
2560                 sp->cmd_saved_cur_addr =
2561                     sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
2562 
2563                 /*
2564                  * the common case is just one window, we worry
2565                  * about multiple windows when we run out of the
2566                  * current window
2567                  */
2568                 sp->cmd_nwin = sp->cmd_saved_win = 0;
2569                 sp->cmd_data_count = sp->cmd_saved_data_count = 0;
2570 
2571                 /*
2572                  * consistent packets need to be sync'ed first
2573                  * (only for data going out)
2574                  */
2575                 if ((sp->cmd_flags & (CFLAG_CMDIOPB | CFLAG_DMASEND)) ==
2576                     (CFLAG_CMDIOPB | CFLAG_DMASEND)) {
2577                         (void) ddi_dma_sync(sp->cmd_dmahandle,       0, (uint_t)0,
2578                             DDI_DMA_SYNC_FORDEV);
2579                 }
2580         }
2581 
2582         sp->cmd_actual_cdblen = sp->cmd_cdblen;
2583 
2584 #ifdef FAS_TEST
2585 #ifndef __lock_lint
2586         if (fas_test_untagged > 0) {
2587                 if (TAGGED(Tgt(sp))) {
2588                         int slot = sp->cmd_slot;
2589                         sp->cmd_pkt_flags &= ~FLAG_TAGMASK;
2590                         sp->cmd_pkt_flags &= ~FLAG_NODISCON;
2591                         sp->cmd_pkt_flags |= 0x80000000;
2592                         fas_log(fas, CE_NOTE,
2593                             "starting untagged cmd, target=%d,"
2594                             " tcmds=%d, sp=0x%p, throttle=%d\n",
2595                             Tgt(sp), fas->f_tcmds[slot], (void *)sp,
2596                             fas->f_throttle[slot]);
2597                         fas_test_untagged = -10;
2598                 }
2599         }
2600 #endif
2601 #endif
2602 
2603 #ifdef FASDEBUG
2604         if (NOTAG(Tgt(sp)) && (pkt->pkt_flags & FLAG_TAGMASK)) {
2605                 IPRINTF2("tagged packet for non-tagged target %d.%d\n",
2606                     Tgt(sp), Lun(sp));
2607                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2608                     "fas_prepare_pkt_end (tran_badpkt)");
2609                 return (TRAN_BADPKT);
2610         }
2611 
2612         /*
2613          * the SCSA spec states that it is an error to have no
2614          * completion function when FLAG_NOINTR is not set
2615          */
2616         if ((pkt->pkt_comp == NULL) &&
2617             ((pkt->pkt_flags & FLAG_NOINTR) == 0)) {
2618                 IPRINTF("intr packet with pkt_comp == 0\n");
2619                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2620                     "fas_prepare_pkt_end (tran_badpkt)");
2621                 return (TRAN_BADPKT);
2622         }
2623 #endif /* FASDEBUG */
2624 
2625         if ((fas->f_target_scsi_options[Tgt(sp)] & SCSI_OPTIONS_DR) == 0) {
2626                 /*
2627                  * no need to reset tag bits since tag queueing will
2628                  * not be enabled if disconnects are disabled
2629                  */
2630                 sp->cmd_pkt_flags |= FLAG_NODISCON;
2631         }
2632 
2633         sp->cmd_flags = (sp->cmd_flags & ~CFLAG_TRANFLAG) |
2634             CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
2635 
2636         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_ACCEPT_END,
2637             "fas_prepare_pkt_end (tran_accept)");
2638         return (TRAN_ACCEPT);
2639 }
2640 
2641 /*
2642  * emptying the waitQ just before releasing FAS_MUTEX is a bit
2643  * tricky; if we release the waitQ mutex and then the FAS_MUTEX,
2644  * another thread could queue a cmd in the waitQ, just before
2645  * the FAS_MUTEX is released. This cmd is then stuck in the waitQ unless
2646  * another cmd comes in or fas_intr() or fas_watch() checks the waitQ.
2647  * Therefore, by releasing the FAS_MUTEX before releasing the waitQ mutex,
2648  * we prevent fas_scsi_start() filling the waitQ
2649  *
2650  * By setting NO_TRAN_BUSY, we force fas_accept_pkt() to queue up
2651  * the waitQ pkts in the readyQ.
2652  * If a QFull condition occurs, the target driver may set its throttle
2653  * too high because of the requests queued up in the readyQ but this
2654  * is not a big problem. The throttle should be periodically reset anyway.
2655  */
2656 static void
2657 fas_empty_waitQ(struct fas *fas)
2658 {
2659         struct fas_cmd *sp;
2660         int rval;
2661         struct fas_cmd *waitf, *waitb;
2662 
2663         ASSERT(mutex_owned(&fas->f_waitQ_mutex));
2664         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_START,
2665             "fas_empty_waitQ_start");
2666 
2667         while (fas->f_waitf) {
2668 
2669                 /* copy waitQ, zero the waitQ and release the mutex */
2670                 waitf = fas->f_waitf;
2671                 waitb = fas->f_waitb;
2672                 fas->f_waitf = fas->f_waitb = NULL;
2673                 mutex_exit(&fas->f_waitQ_mutex);
2674 
2675                 do {
2676                         sp = waitf;
2677                         waitf = sp->cmd_forw;
2678                         if (waitb == sp)        {
2679                                 waitb = NULL;
2680                         }
2681 
2682                         rval = fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
2683 
2684                         /*
2685                          * If the  packet was rejected for other reasons then
2686                          * complete it here
2687                          */
2688                         if (rval != TRAN_ACCEPT) {
2689                                 ASSERT(rval != TRAN_BUSY);
2690                                 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
2691                                 if (sp->cmd_pkt->pkt_comp) {
2692                                         sp->cmd_flags |= CFLAG_FINISHED;
2693                                         fas_call_pkt_comp(fas, sp);
2694                                 }
2695                         }
2696 
2697                         if (INTPENDING(fas)) {
2698                                 /*
2699                                  * stop processing the waitQ and put back
2700                                  * the remaining packets on the waitQ
2701                                  */
2702                                 mutex_enter(&fas->f_waitQ_mutex);
2703                                 if (waitf) {
2704                                         ASSERT(waitb != NULL);
2705                                         waitb->cmd_forw = fas->f_waitf;
2706                                         fas->f_waitf = waitf;
2707                                         if (fas->f_waitb == NULL) {
2708                                                 fas->f_waitb = waitb;
2709                                         }
2710                                 }
2711                                 return;
2712                         }
2713                 } while (waitf);
2714 
2715                 mutex_enter(&fas->f_waitQ_mutex);
2716         }
2717         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_END,
2718             "fas_empty_waitQ_end");
2719 }
2720 
2721 static void
2722 fas_move_waitQ_to_readyQ(struct fas *fas)
2723 {
2724         /*
2725          * this may actually start cmds but it is most likely
2726          * that if waitQ is not empty that the bus is not free
2727          */
2728         ASSERT(mutex_owned(FAS_MUTEX(fas)));
2729         mutex_enter(&fas->f_waitQ_mutex);
2730         fas_empty_waitQ(fas);
2731         mutex_exit(&fas->f_waitQ_mutex);
2732 }
2733 
2734 
2735 /*
2736  * function wrapper for two frequently used macros. for the non-critical
2737  * path we use the function
2738  */
2739 static void
2740 fas_check_waitQ_and_mutex_exit(struct fas *fas)
2741 {
2742         _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(fas->f_mutex))
2743         FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2744         FAS_EMPTY_CALLBACKQ(fas);
2745 }
2746 
2747 /*
2748  * fas_accept_pkt():
2749  * the flag argument is to force fas_accept_pkt to accept the pkt;
2750  * the caller cannot take the pkt back and it has to be queued up in
2751  * the readyQ
2752  */
2753 static int
2754 fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag)
2755 {
2756         short slot = sp->cmd_slot;
2757         int rval = TRAN_ACCEPT;
2758 
2759         TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_START, "fas_accept_pkt_start");
2760         ASSERT(mutex_owned(FAS_MUTEX(fas)));
2761         ASSERT(fas->f_ncmds >= 0 && fas->f_ndisc >= 0);
2762         ASSERT(fas->f_ncmds >= fas->f_ndisc);
2763         ASSERT(fas->f_tcmds[slot] >= 0);
2764 
2765         /*
2766          * prepare packet for transport if this hasn't been done yet and
2767          * do some checks
2768          */
2769         if ((sp->cmd_flags & CFLAG_PREPARED) == 0) {
2770                 rval = fas_prepare_pkt(fas, sp);
2771                 if (rval != TRAN_ACCEPT) {
2772                         IPRINTF1("prepare pkt failed, slot=%x\n", slot);
2773                         sp->cmd_flags &= ~CFLAG_TRANFLAG;
2774                         goto done;
2775                 }
2776         }
2777 
2778         if (Lun(sp)) {
2779                 EPRINTF("fas_accept_pkt: switching target and lun slot scan\n");
2780                 fas->f_dslot = 1;
2781 
2782                 if ((fas->f_active[slot] == NULL) ||
2783                     ((fas->f_active[slot]->f_n_slots != NTAGS) &&
2784                     TAGGED(Tgt(sp)))) {
2785                         (void) fas_alloc_active_slots(fas, slot, KM_NOSLEEP);
2786                 }
2787                 if ((fas->f_active[slot] == NULL) ||
2788                     (NOTAG(Tgt(sp)) && (sp->cmd_pkt_flags & FLAG_TAGMASK))) {
2789                         IPRINTF("fatal error on non-zero lun pkt\n");
2790                         return (TRAN_FATAL_ERROR);
2791                 }
2792         }
2793 
2794         /*
2795          * we accepted the command; increment the count
2796          * (we may still reject later if TRAN_BUSY_OK)
2797          */
2798         fas_check_ncmds(fas);
2799         fas->f_ncmds++;
2800 
2801         /*
2802          * if it is a nointr packet, start it now
2803          * (NO_INTR pkts are not queued in the waitQ)
2804          */
2805         if (sp->cmd_pkt_flags & FLAG_NOINTR) {
2806                 EPRINTF("starting a nointr cmd\n");
2807                 fas_runpoll(fas, slot, sp);
2808                 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2809                 goto done;
2810         }
2811 
2812         /*
2813          * reset the throttle if we were draining
2814          */
2815         if ((fas->f_tcmds[slot] == 0) &&
2816             (fas->f_throttle[slot] == DRAIN_THROTTLE)) {
2817                 DPRINTF("reset throttle\n");
2818                 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
2819                 fas_full_throttle(fas, slot);
2820         }
2821 
2822         /*
2823          * accept the command:
2824          * If no readyQ and no bus free, and throttle is OK,
2825          * run cmd immediately.
2826          */
2827 #ifdef FASDEBUG
2828         fas->f_total_cmds++;
2829 #endif
2830 
2831         if ((fas->f_readyf[slot] == NULL) && (fas->f_state == STATE_FREE) &&
2832             (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
2833                 ASSERT(fas->f_current_sp == 0);
2834                 (void) fas_startcmd(fas, sp);
2835                 goto exit;
2836         } else {
2837                 /*
2838                  * If FLAG_HEAD is set, run cmd if target and bus are
2839                  * available. if first cmd in ready Q is request sense
2840                  * then insert after this command, there shouldn't be more
2841                  * than one request sense.
2842                  */
2843                 if (sp->cmd_pkt_flags & FLAG_HEAD) {
2844                         struct fas_cmd *ssp = fas->f_readyf[slot];
2845                         EPRINTF("que head\n");
2846                         if (ssp &&
2847                             *(ssp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
2848                                 fas_head_of_readyQ(fas, sp);
2849                         } else if (ssp) {
2850                                 struct fas_cmd *dp = ssp->cmd_forw;
2851                                 ssp->cmd_forw = sp;
2852                                 sp->cmd_forw = dp;
2853                                 if (fas->f_readyb[slot] == ssp) {
2854                                         fas->f_readyb[slot] = sp;
2855                                 }
2856                         } else {
2857                                 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2858                                 sp->cmd_forw = NULL;
2859                         }
2860 
2861                 /*
2862                  * for tagged targets, check for qfull condition and
2863                  * return TRAN_BUSY (if permitted), if throttle has been
2864                  * exceeded
2865                  */
2866                 } else if (TAGGED(Tgt(sp)) &&
2867                     (fas->f_tcmds[slot] >= fas->f_throttle[slot]) &&
2868                     (fas->f_throttle[slot] > HOLD_THROTTLE) &&
2869                     (flag == TRAN_BUSY_OK)) {
2870                         IPRINTF2(
2871                             "transport busy, slot=%x, ncmds=%x\n",
2872                             slot, fas->f_ncmds);
2873                         rval = TRAN_BUSY;
2874                         fas->f_ncmds--;
2875                         sp->cmd_flags &=
2876                             ~(CFLAG_PREPARED | CFLAG_IN_TRANSPORT);
2877                         goto done;
2878                         /*
2879                          * append to readyQ or start a new readyQ
2880                          */
2881                 } else if (fas->f_readyf[slot]) {
2882                         struct fas_cmd *dp = fas->f_readyb[slot];
2883                         ASSERT(dp != 0);
2884                         fas->f_readyb[slot] = sp;
2885                         sp->cmd_forw = NULL;
2886                         dp->cmd_forw = sp;
2887                 } else {
2888                         fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2889                         sp->cmd_forw = NULL;
2890                 }
2891 
2892         }
2893 
2894 done:
2895         /*
2896          * just in case that the bus is free and we haven't
2897          * been able to restart for some reason
2898          */
2899         if (fas->f_state == STATE_FREE) {
2900                 (void) fas_istart(fas);
2901         }
2902 
2903 exit:
2904         fas_check_ncmds(fas);
2905         ASSERT(mutex_owned(FAS_MUTEX(fas)));
2906         TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_END,     "fas_accept_pkt_end");
2907         return (rval);
2908 }
2909 
2910 /*
2911  * allocate a tag byte and check for tag aging
2912  */
2913 static char fas_tag_lookup[] =
2914         {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
2915 
2916 static int
2917 fas_alloc_tag(struct fas *fas, struct fas_cmd *sp)
2918 {
2919         struct f_slots *tag_slots;
2920         int tag;
2921         short slot = sp->cmd_slot;
2922 
2923         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_START, "fas_alloc_tag_start");
2924         ASSERT(mutex_owned(FAS_MUTEX(fas)));
2925 
2926         tag_slots = fas->f_active[slot];
2927         ASSERT(tag_slots->f_n_slots == NTAGS);
2928 
2929 alloc_tag:
2930         tag = (fas->f_active[slot]->f_tags)++;
2931         if (fas->f_active[slot]->f_tags >= NTAGS) {
2932                 /*
2933                  * we reserve tag 0 for non-tagged cmds
2934                  */
2935                 fas->f_active[slot]->f_tags = 1;
2936         }
2937         EPRINTF1("tagged cmd, tag = %d\n", tag);
2938 
2939         /* Validate tag, should never fail. */
2940         if (tag_slots->f_slot[tag] == 0) {
2941                 /*
2942                  * Store assigned tag and tag queue type.
2943                  * Note, in case of multiple choice, default to simple queue.
2944                  */
2945                 ASSERT(tag < NTAGS);
2946                 sp->cmd_tag[1] = (uchar_t)tag;
2947                 sp->cmd_tag[0] = fas_tag_lookup[((sp->cmd_pkt_flags &
2948                     FLAG_TAGMASK) >> 12)];
2949                 EPRINTF1("tag= %d\n", tag);
2950                 tag_slots->f_slot[tag] = sp;
2951                 (fas->f_tcmds[slot])++;
2952                 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2953                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
2954                     "fas_alloc_tag_end");
2955                 return (0);
2956 
2957         } else {
2958                 int age, i;
2959 
2960                 /*
2961                  * Check tag age.  If timeouts enabled and
2962                  * tag age greater than 1, print warning msg.
2963                  * If timeouts enabled and tag age greater than
2964                  * age limit, begin draining tag que to check for
2965                  * lost tag cmd.
2966                  */
2967                 age = tag_slots->f_slot[tag]->cmd_age++;
2968                 if (age >= fas->f_scsi_tag_age_limit &&
2969                     tag_slots->f_slot[tag]->cmd_pkt->pkt_time) {
2970                         IPRINTF2("tag %d in use, age= %d\n", tag, age);
2971                         DPRINTF("draining tag queue\n");
2972                         if (fas->f_reset_delay[Tgt(sp)] == 0) {
2973                                 fas->f_throttle[slot] = DRAIN_THROTTLE;
2974                         }
2975                 }
2976 
2977                 /* If tag in use, scan until a free one is found. */
2978                 for (i = 1; i < NTAGS; i++) {
2979                         tag = fas->f_active[slot]->f_tags;
2980                         if (!tag_slots->f_slot[tag]) {
2981                                 EPRINTF1("found free tag %d\n", tag);
2982                                 break;
2983                         }
2984                         if (++(fas->f_active[slot]->f_tags) >= NTAGS) {
2985                         /*
2986                          * we reserve tag 0 for non-tagged cmds
2987                          */
2988                                 fas->f_active[slot]->f_tags = 1;
2989                         }
2990                         EPRINTF1("found in use tag %d\n", tag);
2991                 }
2992 
2993                 /*
2994                  * If no free tags, we're in serious trouble.
2995                  * the target driver submitted more than 255
2996                  * requests
2997                  */
2998                 if (tag_slots->f_slot[tag]) {
2999                         IPRINTF1("slot %x: All tags in use!!!\n", slot);
3000                         goto fail;
3001                 }
3002                 goto alloc_tag;
3003         }
3004 
3005 fail:
3006         fas_head_of_readyQ(fas, sp);
3007 
3008         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
3009             "fas_alloc_tag_end");
3010         return (-1);
3011 }
3012 
3013 /*
3014  * Internal Search Routine.
3015  *
3016  * Search for a command to start.
3017  */
3018 static int
3019 fas_istart(struct fas *fas)
3020 {
3021         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_START,
3022             "fas_istart_start");
3023         EPRINTF("fas_istart:\n");
3024 
3025         if (fas->f_state == STATE_FREE && fas->f_ncmds > fas->f_ndisc) {
3026                 (void) fas_ustart(fas);
3027         }
3028         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_END,
3029             "fas_istart_end");
3030         return (ACTION_RETURN);
3031 }
3032 
3033 static int
3034 fas_ustart(struct fas *fas)
3035 {
3036         struct fas_cmd *sp;
3037         short slot = fas->f_next_slot;
3038         short start_slot = slot;
3039         short dslot = fas->f_dslot;
3040 
3041         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_START, "fas_ustart_start");
3042         EPRINTF1("fas_ustart: start_slot=%x\n", fas->f_next_slot);
3043         ASSERT(fas->f_current_sp == NULL);
3044         ASSERT(dslot != 0);
3045         if (dslot == NLUNS_PER_TARGET) {
3046                 ASSERT((slot % NLUNS_PER_TARGET) == 0);
3047         }
3048 
3049         /*
3050          * if readyQ not empty and we are not draining, then we
3051          * can start another cmd
3052          */
3053         do {
3054                 /*
3055                  * If all cmds drained from tag Q, back to full throttle and
3056                  * start queueing up new cmds again.
3057                  */
3058                 if (fas->f_throttle[slot] == DRAIN_THROTTLE &&
3059                     fas->f_tcmds[slot] == 0) {
3060                         fas_full_throttle(fas, slot);
3061                 }
3062 
3063                 if (fas->f_readyf[slot] &&
3064                     (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
3065                         sp = fas->f_readyf[slot];
3066                         fas->f_readyf[slot] = sp->cmd_forw;
3067                         if (sp->cmd_forw == NULL) {
3068                                 fas->f_readyb[slot] = NULL;
3069                         }
3070                         fas->f_next_slot = NEXTSLOT(slot, dslot);
3071                         ASSERT((sp->cmd_pkt_flags & FLAG_NOINTR) == 0);
3072                         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_END,
3073                             "fas_ustart_end");
3074                         return (fas_startcmd(fas, sp));
3075                 } else {
3076                         slot = NEXTSLOT(slot, dslot);
3077                 }
3078         } while (slot != start_slot);
3079 
3080         EPRINTF("fas_ustart: no cmds to start\n");
3081         fas->f_next_slot = NEXTSLOT(slot, dslot);
3082         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_NOT_FOUND_END,
3083             "fas_ustart_end (not_found)");
3084         return (FALSE);
3085 }
3086 
3087 /*
3088  * Start a command off
3089  */
3090 static int
3091 fas_startcmd(struct fas *fas, struct fas_cmd *sp)
3092 {
3093         volatile struct fasreg *fasreg = fas->f_reg;
3094         ushort_t  nstate;
3095         uchar_t cmd, target, lun;
3096         ushort_t tshift;
3097         volatile uchar_t *tp = fas->f_cmdarea;
3098         struct scsi_pkt *pkt = CMD2PKT(sp);
3099         int slot = sp->cmd_slot;
3100         struct f_slots *slots = fas->f_active[slot];
3101         int i, cdb_len;
3102 
3103 #define LOAD_CMDP       *(tp++)
3104 
3105         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_START, "fas_startcmd_start");
3106 
3107         EPRINTF2("fas_startcmd: sp=0x%p flags=%x\n",
3108             (void *)sp, sp->cmd_pkt_flags);
3109         ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3110         ASSERT((sp->cmd_flags & CFLAG_COMPLETED) == 0);
3111         ASSERT(fas->f_current_sp == NULL && fas->f_state == STATE_FREE);
3112         if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3113                 ASSERT(fas->f_throttle[slot] > 0);
3114                 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
3115         }
3116 
3117         target          = Tgt(sp);
3118         lun             = Lun(sp);
3119 
3120         /*
3121          * if a non-tagged cmd is submitted to an active tagged target
3122          * then drain before submitting this cmd; SCSI-2 allows RQSENSE
3123          * to be untagged
3124          */
3125         if (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
3126             TAGGED(target) && fas->f_tcmds[slot] &&
3127             ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) &&
3128             (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
3129                 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3130                         struct fas_cmd *dp;
3131 
3132                         IPRINTF("untagged cmd, start draining\n");
3133 
3134                         if (fas->f_reset_delay[Tgt(sp)] == 0) {
3135                                 fas->f_throttle[slot] = DRAIN_THROTTLE;
3136                         }
3137                         dp = fas->f_readyf[slot];
3138                         fas->f_readyf[slot] = sp;
3139                         sp->cmd_forw = dp;
3140                         if (fas->f_readyb[slot] == NULL) {
3141                                 fas->f_readyb[slot] = sp;
3142                         }
3143                 }
3144                 return (FALSE);
3145         }
3146 
3147         /*
3148          * allocate a tag; if no tag available then put request back
3149          * on the ready queue and return; eventually a cmd returns and we
3150          * get going again or we timeout
3151          */
3152         if (TAGGED(target) && (sp->cmd_pkt_flags & FLAG_TAGMASK)) {
3153                 if (fas_alloc_tag(fas, sp)) {
3154                         return (FALSE);
3155                 }
3156         } else {
3157                 /*
3158                  * tag slot 0 is reserved for non-tagged cmds
3159                  * and should be empty because we have drained
3160                  */
3161                 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3162                         ASSERT(fas->f_active[slot]->f_slot[0] == NULL);
3163                         fas->f_active[slot]->f_slot[0] = sp;
3164                         sp->cmd_tag[1] = 0;
3165                         if (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
3166                                 ASSERT(fas->f_tcmds[slot] == 0);
3167                                 /*
3168                                  * don't start any other cmd until this
3169                                  * one is finished. The throttle is reset
3170                                  * later in fas_watch()
3171                                  */
3172                                 fas->f_throttle[slot] = 1;
3173                         }
3174                         (fas->f_tcmds[slot])++;
3175 
3176                 }
3177         }
3178 
3179         fas->f_current_sp = sp;
3180         fas->f_omsglen       = 0;
3181         tshift          = 1<<target;
3182         fas->f_sdtr_sent = fas->f_wdtr_sent =     0;
3183         cdb_len         = sp->cmd_actual_cdblen;
3184 
3185         if (sp->cmd_pkt_flags & FLAG_RENEGOTIATE_WIDE_SYNC) {
3186                 fas_force_renegotiation(fas, Tgt(sp));
3187         }
3188 
3189         /*
3190          * first send identify message, with or without disconnect priv.
3191          */
3192         if (sp->cmd_pkt_flags & FLAG_NODISCON) {
3193                 LOAD_CMDP = fas->f_last_msgout = MSG_IDENTIFY | lun;
3194                 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3195         } else {
3196                 LOAD_CMDP = fas->f_last_msgout = MSG_DR_IDENTIFY | lun;
3197         }
3198 
3199         /*
3200          * normal case, tagQ and we have negotiated wide and sync
3201          * or we don't need to renegotiate because wide and sync
3202          * have been disabled
3203          * (proxy msg's don't have tag flag set)
3204          */
3205         if ((sp->cmd_pkt_flags & FLAG_TAGMASK) &&
3206             ((fas->f_wide_known | fas->f_nowide) &
3207             (fas->f_sync_known | fas->f_nosync) & tshift)) {
3208 
3209                 EPRINTF("tag cmd\n");
3210                 ASSERT((sp->cmd_pkt_flags & FLAG_NODISCON) == 0);
3211 
3212                 fas->f_last_msgout = LOAD_CMDP = sp->cmd_tag[0];
3213                 LOAD_CMDP = sp->cmd_tag[1];
3214 
3215                 nstate = STATE_SELECT_NORMAL;
3216                 cmd = CMD_SEL_ATN3 | CMD_DMA;
3217 
3218         /*
3219          * is this a proxy message
3220          */
3221         } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
3222 
3223                 IPRINTF2("proxy cmd, len=%x, msg=%x\n",
3224                     sp->cmd_cdb[FAS_PROXY_DATA],
3225                     sp->cmd_cdb[FAS_PROXY_DATA+1]);
3226                 /*
3227                  * This is a proxy command. It will have
3228                  * a message to send as part of post-selection
3229                  * (e.g, MSG_ABORT or MSG_DEVICE_RESET)
3230                  */
3231                 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
3232                 for (i = 0; i < (uint_t)fas->f_omsglen; i++) {
3233                         fas->f_cur_msgout[i] =
3234                             sp->cmd_cdb[FAS_PROXY_DATA+1+i];
3235                 }
3236                 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
3237                 cdb_len = 0;
3238                 cmd = CMD_SEL_STOP | CMD_DMA;
3239                 nstate = STATE_SELECT_N_SENDMSG;
3240 
3241         /*
3242          * always negotiate wide first and sync after wide
3243          */
3244         } else if (((fas->f_wide_known | fas->f_nowide) & tshift) == 0) {
3245                 int i = 0;
3246 
3247                 /* First the tag message bytes */
3248                 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3249                         fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3250                         fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3251                 }
3252 
3253                 /*
3254                  * Set up to send wide negotiating message.  This is getting
3255                  * a bit tricky as we dma out the identify message and
3256                  * send the other messages via the fifo buffer.
3257                  */
3258                 EPRINTF1("cmd with wdtr msg, tag=%x\n", sp->cmd_tag[1]);
3259 
3260                 fas_make_wdtr(fas, i, target, FAS_XFER_WIDTH);
3261 
3262                 cdb_len = 0;
3263                 nstate = STATE_SELECT_N_SENDMSG;
3264                 cmd = CMD_SEL_STOP | CMD_DMA;
3265 
3266         /*
3267          * negotiate sync xfer rate
3268          */
3269         } else if (((fas->f_sync_known | fas->f_nosync) & tshift) == 0) {
3270                 int i = 0;
3271                 /*
3272                  * Set up to send sync negotiating message.  This is getting
3273                  * a bit tricky as we dma out the identify message and
3274                  * send the other messages via the fifo buffer.
3275                  */
3276                 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3277                         fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3278                         fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3279                 }
3280 
3281                 fas_make_sdtr(fas, i, target);
3282 
3283                 cdb_len = 0;
3284                 cmd = CMD_SEL_STOP | CMD_DMA;
3285                 nstate = STATE_SELECT_N_SENDMSG;
3286 
3287         /*
3288          * normal cmds, no negotiations and not a proxy and no TQ
3289          */
3290         } else {
3291 
3292                 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3293                 EPRINTF("std. cmd\n");
3294 
3295                 nstate = STATE_SELECT_NORMAL;
3296                 cmd = CMD_SEL_ATN | CMD_DMA;
3297         }
3298 
3299         /*
3300          * Now load cdb (if any)
3301          */
3302         for (i = 0; i < cdb_len; i++) {
3303                 LOAD_CMDP = sp->cmd_cdbp[i];
3304         }
3305 
3306         /*
3307          * calculate total dma amount:
3308          */
3309         fas->f_lastcount = (uintptr_t)tp - (uintptr_t)fas->f_cmdarea;
3310 
3311         /*
3312          * load target id and enable bus id encoding and 32 bit counter
3313          */
3314         fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
3315             (target & 0xf) | FAS_BUSID_ENCODID | FAS_BUSID_32BIT_COUNTER);
3316 
3317         FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
3318 
3319         fas_reg_cmd_write(fas, CMD_FLUSH);
3320 
3321         FAS_DMA_READ(fas, fas->f_lastcount,
3322             fas->f_dmacookie.dmac_address, 16, cmd);
3323 
3324         New_state(fas, (int)nstate);
3325 
3326 #ifdef FASDEBUG
3327         if (DDEBUGGING) {
3328                 fas_dump_cmd(fas, sp);
3329         }
3330 #endif /* FASDEBUG */
3331 
3332         /*
3333          * if timeout == 0, then it has no effect on the timeout
3334          * handling; we deal with this when an actual timeout occurs.
3335          */
3336         if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3337                 ASSERT(fas->f_tcmds[slot] >= 1);
3338         }
3339         i = pkt->pkt_time - slots->f_timebase;
3340 
3341         if (i == 0) {
3342                 EPRINTF("dup timeout\n");
3343                 (slots->f_dups)++;
3344                 slots->f_timeout = slots->f_timebase;
3345         } else if (i > 0) {
3346                 EPRINTF("new timeout\n");
3347                 slots->f_timeout = slots->f_timebase = pkt->pkt_time;
3348                 slots->f_dups = 1;
3349         }
3350 
3351         fas_check_ncmds(fas);
3352 
3353         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_END, "fas_startcmd_end");
3354 
3355         return (TRUE);
3356 }
3357 
3358 /*
3359  * Interrupt Entry Point.
3360  * Poll interrupts until they go away
3361  */
3362 static uint_t
3363 fas_intr(caddr_t arg)
3364 {
3365         struct fas *fas = (struct fas *)arg;
3366         int rval = DDI_INTR_UNCLAIMED;
3367         int kstat_updated = 0;
3368 
3369         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_START, "fas_intr_start");
3370 
3371         do {
3372                 mutex_enter(FAS_MUTEX(fas));
3373 
3374                 do {
3375                         if (fas_intr_svc(fas)) {
3376                                 /*
3377                                  * do not return immediately here because
3378                                  * we have to guarantee to always empty
3379                                  * the waitQ and callbackQ in the interrupt
3380                                  * handler
3381                                  */
3382                                 if (fas->f_polled_intr) {
3383                                         rval = DDI_INTR_CLAIMED;
3384                                         fas->f_polled_intr = 0;
3385                                 }
3386                         } else {
3387                                 rval = DDI_INTR_CLAIMED;
3388                         }
3389                 } while (INTPENDING(fas));
3390 
3391                 if (!kstat_updated && fas->f_intr_kstat &&
3392                     rval == DDI_INTR_CLAIMED) {
3393                         FAS_KSTAT_INTR(fas);
3394                         kstat_updated++;
3395                 }
3396 
3397                 /*
3398                  * check and empty the waitQ and the callbackQ
3399                  */
3400                 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
3401                 FAS_EMPTY_CALLBACKQ(fas);
3402 
3403         } while (INTPENDING(fas));
3404 
3405         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_END, "fas_intr_end");
3406 
3407         return (rval);
3408 }
3409 
3410 /*
3411  * General interrupt service routine.
3412  */
3413 static char *dma_bits   = DMA_BITS;
3414 
3415 static int
3416 fas_intr_svc(struct fas *fas)
3417 {
3418         static int (*evec[])(struct fas *fas) = {
3419                 fas_finish_select,
3420                 fas_reconnect,
3421                 fas_phasemanage,
3422                 fas_finish,
3423                 fas_reset_recovery,
3424                 fas_istart,
3425                 fas_abort_curcmd,
3426                 fas_reset_bus,
3427                 fas_reset_bus,
3428                 fas_handle_selection
3429         };
3430         int action;
3431         uchar_t intr, stat;
3432         volatile struct fasreg *fasreg = fas->f_reg;
3433         int i = 0;
3434 
3435         TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_START, "fas_intr_svc_start");
3436 
3437         /*
3438          * A read of FAS interrupt register clears interrupt,
3439          * so any other volatile information needs to be latched
3440          * up prior to reading the interrupt register.
3441          */
3442         fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
3443 
3444         EPRINTF2("fas_intr_svc: state=%x stat=%x\n", fas->f_state,
3445             fas->f_stat);
3446 
3447         /*
3448          * this wasn't our interrupt?
3449          */
3450         if ((fas->f_stat & FAS_STAT_IPEND) == 0) {
3451                 if (fas_check_dma_error(fas)) {
3452                         action = ACTION_RESET;
3453                         goto start_action;
3454                 }
3455                 return (-1);
3456         }
3457 
3458         /*
3459          * if we are reset state, handle this first
3460          */
3461         if (fas->f_state == ACTS_RESET) {
3462                 action = ACTION_FINRST;
3463                 goto start_action;
3464         }
3465 
3466         /*
3467          * check for gross error.  fas366 hardware seems to register
3468          * the gross error bit when a parity error is found.  Make sure
3469          * to ignore the gross error bit when a parity error is detected.
3470          */
3471         if ((fas->f_stat & FAS_STAT_GERR) &&
3472             (fas->f_stat & FAS_STAT_PERR) == 0) {
3473                 action = fas_handle_gross_err(fas);
3474                 goto start_action;
3475         }
3476 
3477         /*
3478          * now it is finally safe to read the interrupt register
3479          * if we haven't done so yet
3480          * Note: we don't read step register here but only in
3481          * fas_finish_select(). It is not entirely safe but saves
3482          * redundant PIOs or extra code in this critical path
3483          */
3484         fas->f_intr =
3485             intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
3486 
3487         /*
3488          * read the fifo if there is something there or still in the
3489          * input shuttle
3490          */
3491         stat = fas->f_stat & FAS_PHASE_MASK;
3492 
3493         if ((intr & FAS_INT_RESEL) ||
3494             ((stat != FAS_PHASE_DATA_IN) && (stat != FAS_PHASE_DATA_OUT) &&
3495             ((fas->f_state & STATE_SELECTING) == 0) &&
3496             (fas->f_state != ACTS_DATA_DONE) &&
3497             (fas->f_state != ACTS_C_CMPLT))) {
3498 
3499                 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
3500 
3501                 if (((fas->f_stat2 & FAS_STAT2_EMPTY) == 0) ||
3502                     (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
3503                         fas_read_fifo(fas);
3504                 }
3505         }
3506 
3507         EPRINTF2("fas_intr_svc: intr=%x, stat=%x\n", fas->f_intr, fas->f_stat);
3508         EPRINTF2("dmacsr=%b\n", fas->f_dma->dma_csr, dma_bits);
3509 
3510         /*
3511          * Based upon the current state of the host adapter driver
3512          * we should be able to figure out what to do with an interrupt.
3513          *
3514          * The FAS asserts an interrupt with one or more of 8 possible
3515          * bits set in its interrupt register. These conditions are
3516          * SCSI bus reset detected, an illegal command fed to the FAS,
3517          * one of DISCONNECT, BUS SERVICE, FUNCTION COMPLETE conditions
3518          * for the FAS, a Reselection interrupt, or one of Selection
3519          * or Selection with Attention.
3520          *
3521          * Of these possible interrupts, we can deal with some right
3522          * here and now, irrespective of the current state of the driver.
3523          *
3524          * take care of the most likely interrupts first and call the action
3525          * immediately
3526          */
3527         if ((intr & (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN|
3528             FAS_INT_RESEL)) == 0) {
3529                 /*
3530                  * The rest of the reasons for an interrupt can
3531                  * be handled based purely on the state that the driver
3532                  * is currently in now.
3533                  */
3534                 if (fas->f_state & STATE_SELECTING) {
3535                         action = fas_finish_select(fas);
3536 
3537                 } else if (fas->f_state & STATE_ITPHASES) {
3538                         action = fas_phasemanage(fas);
3539 
3540                 } else {
3541                         fas_log(fas, CE_WARN, "spurious interrupt");
3542                         action = ACTION_RETURN;
3543                 }
3544 
3545         } else if ((intr & FAS_INT_RESEL) && ((intr &
3546             (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN)) == 0)) {
3547 
3548                 if ((fas->f_state & STATE_SELECTING) == 0) {
3549                         ASSERT(fas->f_state == STATE_FREE);
3550                         action = fas_reconnect(fas);
3551                 } else {
3552                         action = fas_reselect_preempt(fas);
3553                 }
3554 
3555         } else if (intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
3556                 action = fas_illegal_cmd_or_bus_reset(fas);
3557 
3558         } else if (intr & (FAS_INT_SEL|FAS_INT_SELATN)) {
3559                 action = ACTION_SELECT;
3560         }
3561 
3562 start_action:
3563         while (action != ACTION_RETURN) {
3564                 ASSERT((action >= 0) && (action <= ACTION_SELECT));
3565                 TRACE_3(TR_FAC_SCSI_FAS, TR_FASSVC_ACTION_CALL,
3566                     "fas_intr_svc call: fas 0x%p, action %d (%d)",
3567                     fas, action, i);
3568                 i++;
3569                 action = (*evec[action])(fas);
3570         }
3571 exit:
3572         TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_END, "fas_intr_svc_end");
3573 
3574         return (0);
3575 }
3576 
3577 /*
3578  * Manage phase transitions.
3579  */
3580 static int
3581 fas_phasemanage(struct fas *fas)
3582 {
3583         ushort_t state;
3584         int action;
3585         static int (*pvecs[])(struct fas *fas) = {
3586                 fas_handle_cmd_start,
3587                 fas_handle_cmd_done,
3588                 fas_handle_msg_out_start,
3589                 fas_handle_msg_out_done,
3590                 fas_handle_msg_in_start,
3591                 fas_handle_more_msgin,
3592                 fas_handle_msg_in_done,
3593                 fas_handle_clearing,
3594                 fas_handle_data_start,
3595                 fas_handle_data_done,
3596                 fas_handle_c_cmplt,
3597                 fas_reconnect,
3598                 fas_handle_unknown,
3599                 fas_reset_recovery
3600         };
3601         int i = 0;
3602 
3603         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_START,
3604             "fas_phasemanage_start");
3605 
3606         do {
3607                 EPRINTF1("fas_phasemanage: %s\n",
3608                     fas_state_name(fas->f_state & STATE_ITPHASES));
3609 
3610                 TRACE_2(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_CALL,
3611                     "fas_phasemanage_call: fas 0x%p (%d)", fas, i++);
3612 
3613                 state = fas->f_state;
3614 
3615                 if (!(state == STATE_FREE || state > ACTS_ENDVEC)) {
3616                         ASSERT(pvecs[state-1] != NULL);
3617                         action = (*pvecs[state-1]) (fas);
3618                 } else {
3619                         fas_log(fas, CE_WARN, "lost state in phasemanage");
3620                         action = ACTION_ABORT_ALLCMDS;
3621                 }
3622 
3623         } while (action == ACTION_PHASEMANAGE);
3624 
3625         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_END,
3626             "fas_phasemanage_end");
3627         return (action);
3628 }
3629 
3630 /*
3631  * remove a cmd from active list and if timeout flag is set, then
3632  * adjust timeouts; if a the same cmd will be resubmitted soon, don't
3633  * bother to adjust timeouts (ie. don't set this flag)
3634  */
3635 static void
3636 fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int new_timeout_flag)
3637 {
3638         int tag = sp->cmd_tag[1];
3639         int slot = sp->cmd_slot;
3640         struct f_slots *tag_slots = fas->f_active[slot];
3641 
3642         ASSERT(sp != NULL);
3643         EPRINTF4("remove tag %d slot %d for target %d.%d\n",
3644             tag, slot, Tgt(sp), Lun(sp));
3645 
3646         if (sp == tag_slots->f_slot[tag]) {
3647                 tag_slots->f_slot[tag] = NULL;
3648                 fas->f_tcmds[slot]--;
3649         }
3650         if (fas->f_current_sp == sp) {
3651                 fas->f_current_sp = NULL;
3652         }
3653 
3654         ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
3655 
3656         if (new_timeout_flag != NEW_TIMEOUT) {
3657                 return;
3658         }
3659 
3660         /*
3661          * Figure out what to set tag Q timeout for...
3662          *
3663          * Optimize: If we have duplicate's of same timeout
3664          * we're using, then we'll use it again until we run
3665          * out of duplicates.  This should be the normal case
3666          * for block and raw I/O.
3667          * If no duplicates, we have to scan through tag que and
3668          * find the longest timeout value and use it.  This is
3669          * going to take a while...
3670          */
3671         if (sp->cmd_pkt->pkt_time == tag_slots->f_timebase) {
3672                 if (--(tag_slots->f_dups) <= 0) {
3673                         if (fas->f_tcmds[slot]) {
3674                                 struct fas_cmd *ssp;
3675                                 uint_t n = 0;
3676                                 ushort_t t = tag_slots->f_n_slots;
3677                                 ushort_t i;
3678                                 /*
3679                                  * This crude check assumes we don't do
3680                                  * this too often which seems reasonable
3681                                  * for block and raw I/O.
3682                                  */
3683                                 for (i = 0; i < t; i++) {
3684                                         ssp = tag_slots->f_slot[i];
3685                                         if (ssp &&
3686                                             (ssp->cmd_pkt->pkt_time > n)) {
3687                                                 n = ssp->cmd_pkt->pkt_time;
3688                                                 tag_slots->f_dups = 1;
3689                                         } else if (ssp &&
3690                                             (ssp->cmd_pkt->pkt_time == n)) {
3691                                                 tag_slots->f_dups++;
3692                                         }
3693                                 }
3694                                 tag_slots->f_timebase = n;
3695                                 EPRINTF1("searching, new_timeout= %d\n", n);
3696                         } else {
3697                                 tag_slots->f_dups = 0;
3698                                 tag_slots->f_timebase = 0;
3699                         }
3700                 }
3701         }
3702         tag_slots->f_timeout = tag_slots->f_timebase;
3703 
3704         ASSERT(fas->f_ncmds >= fas->f_ndisc);
3705 }
3706 
3707 /*
3708  * decrement f_ncmds and f_ndisc for this cmd before completing
3709  */
3710 static void
3711 fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp)
3712 {
3713         ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3714         if ((sp->cmd_flags & CFLAG_FINISHED) == 0) {
3715                 fas->f_ncmds--;
3716                 if (sp->cmd_flags & CFLAG_CMDDISC) {
3717                         fas->f_ndisc--;
3718                 }
3719                 sp->cmd_flags |= CFLAG_FINISHED;
3720                 sp->cmd_flags &= ~CFLAG_CMDDISC;
3721         }
3722         ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
3723         ASSERT(fas->f_ncmds >= fas->f_ndisc);
3724 }
3725 
3726 /*
3727  * Most commonly called phase handlers:
3728  *
3729  * Finish routines
3730  */
3731 static int
3732 fas_finish(struct fas *fas)
3733 {
3734         struct fas_cmd *sp = fas->f_current_sp;
3735         struct scsi_pkt *pkt = CMD2PKT(sp);
3736         int action = ACTION_SEARCH;
3737         struct scsi_status *status =
3738             (struct  scsi_status *)sp->cmd_pkt->pkt_scbp;
3739 
3740         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_START,
3741             "fas_finish_start");
3742         EPRINTF("fas_finish\n");
3743 
3744 #ifdef FAS_TEST
3745         if (fas_test_stop && (sp->cmd_pkt_flags & 0x80000000)) {
3746                 debug_enter("untagged cmd completed");
3747         }
3748 #endif
3749 
3750         /*
3751          * immediately enable reselects
3752          */
3753         fas_reg_cmd_write(fas, CMD_EN_RESEL);
3754         if (status->sts_chk) {
3755                 /*
3756                  * In the case that we are getting a check condition
3757                  * clear our knowledge of synchronous capabilities.
3758                  * This will unambiguously force a renegotiation
3759                  * prior to any possible data transfer (we hope),
3760                  * including the data transfer for a UNIT ATTENTION
3761                  * condition generated by somebody powering on and
3762                  * off a target.
3763                  */
3764                 fas_force_renegotiation(fas, Tgt(sp));
3765         }
3766 
3767         /*
3768          * backoff sync/wide if there were parity errors
3769          */
3770         if (sp->cmd_pkt->pkt_statistics & STAT_PERR) {
3771                 fas_sync_wide_backoff(fas, sp, sp->cmd_slot);
3772 #ifdef FAS_TEST
3773                 if (fas_test_stop) {
3774                         debug_enter("parity error");
3775                 }
3776 #endif
3777         }
3778 
3779         /*
3780          * Free from active list and update counts
3781          * We need to clean up this cmd now, just in case fas_ustart()
3782          * hits a reset or other fatal transport error
3783          */
3784         fas_check_ncmds(fas);
3785         fas_remove_cmd(fas, sp, NEW_TIMEOUT);
3786         fas_decrement_ncmds(fas, sp);
3787         fas_check_ncmds(fas);
3788 
3789         /*
3790          * go to state free and try to start a new cmd now
3791          */
3792         New_state(fas, STATE_FREE);
3793 
3794         if ((fas->f_ncmds > fas->f_ndisc) && (*((char *)status) == 0) &&
3795             (INTPENDING(fas) == 0)) {
3796                 if (fas_ustart(fas)) {
3797                         action = ACTION_RETURN;
3798                 }
3799         }
3800 
3801         /*
3802          * if there was a data xfer then calculate residue and
3803          * sync data for consistent memory xfers
3804          */
3805         if (pkt->pkt_state & STATE_XFERRED_DATA) {
3806                 pkt->pkt_resid = sp->cmd_dmacount - sp->cmd_data_count;
3807                 if (sp->cmd_flags & CFLAG_CMDIOPB) {
3808                         (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
3809                             DDI_DMA_SYNC_FORCPU);
3810                 }
3811                 if (pkt->pkt_resid) {
3812                         IPRINTF3("%d.%d finishes with %ld resid\n",
3813                             Tgt(sp), Lun(sp), pkt->pkt_resid);
3814                 }
3815         }
3816 
3817         if (sp->cmd_pkt_flags & FLAG_NOINTR) {
3818                 fas_call_pkt_comp(fas, sp);
3819                 action = ACTION_RETURN;
3820         } else {
3821                 /*
3822                  * start an autorequest sense if there was a check condition.
3823                  * if arq has not been enabled, fas_handle_sts_chk will do
3824                  * do the callback
3825                  */
3826                 if (status->sts_chk) {
3827                         if (fas_handle_sts_chk(fas, sp)) {
3828                                 /*
3829                                  * we can't start an arq because one is
3830                                  * already in progress. the target is
3831                                  * probably confused
3832                                  */
3833                                 action = ACTION_ABORT_CURCMD;
3834                         }
3835                 } else if ((*((char *)status) & STATUS_MASK) ==
3836                     STATUS_QFULL) {
3837                         fas_handle_qfull(fas, sp);
3838                 } else {
3839 #ifdef FAS_TEST
3840                         if (fas_arqs_failure && (status->sts_chk == 0)) {
3841                                 struct scsi_arq_status *arqstat;
3842                                 status->sts_chk = 1;
3843                                 arqstat = (struct scsi_arq_status *)
3844                                     (sp->cmd_pkt->pkt_scbp);
3845                                 arqstat->sts_rqpkt_reason = CMD_TRAN_ERR;
3846                                 sp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
3847                                 fas_arqs_failure = 0;
3848                         }
3849                         if (fas_tran_err) {
3850                                 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
3851                                 fas_tran_err = 0;
3852                         }
3853 #endif
3854                         fas_call_pkt_comp(fas, sp);
3855                 }
3856         }
3857 
3858         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_END, "fas_finish_end");
3859         return (action);
3860 }
3861 
3862 /*
3863  * Complete the process of selecting a target
3864  */
3865 static int
3866 fas_finish_select(struct fas *fas)
3867 {
3868         volatile struct dma *dmar = fas->f_dma;
3869         struct fas_cmd *sp = fas->f_current_sp;
3870         uchar_t intr = fas->f_intr;
3871         uchar_t step;
3872 
3873         step = fas_reg_read(fas, &fas->f_reg->fas_step) & FAS_STEP_MASK;
3874 
3875         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_START,
3876             "fas_finish_select_start");
3877         EPRINTF("fas_finish_select:\n");
3878         ASSERT(sp != 0);
3879 
3880         /*
3881          * Check for DMA gate array errors
3882          */
3883         if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr))
3884             & DMA_ERRPEND) {
3885                 /*
3886                  * It would be desirable to set the ATN* line and attempt to
3887                  * do the whole schmear of INITIATOR DETECTED ERROR here,
3888                  * but that is too hard to do at present.
3889                  */
3890                 fas_log(fas, CE_WARN,
3891                     "Unrecoverable DMA error during selection");
3892                 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
3893 
3894                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET1_END,
3895                     "fas_finish_select_end (ACTION_RESET1)");
3896                 return (ACTION_RESET);
3897         }
3898 
3899         /*
3900          * Shut off DMA gate array
3901          */
3902         FAS_FLUSH_DMA(fas);
3903 
3904         /*
3905          * Did something respond to selection?
3906          */
3907         if (intr == (FAS_INT_BUS|FAS_INT_FCMP)) {
3908                 /*
3909                  * We succesfully selected a target (we think).
3910                  * Now we figure out how botched things are
3911                  * based upon the kind of selection we were
3912                  * doing and the state of the step register.
3913                  */
3914                 switch (step) {
3915                 case FAS_STEP_ARBSEL:
3916                         /*
3917                          * In this case, we selected the target, but went
3918                          * neither into MESSAGE OUT nor COMMAND phase.
3919                          * However, this isn't a fatal error, so we just
3920                          * drive on.
3921                          *
3922                          * This might be a good point to note that we have
3923                          * a target that appears to not accomodate
3924                          * disconnecting,
3925                          * but it really isn't worth the effort to distinguish
3926                          * such targets fasecially from others.
3927                          */
3928                         /* FALLTHROUGH */
3929 
3930                 case FAS_STEP_SENTID:
3931                         /*
3932                          * In this case, we selected the target and sent
3933                          * message byte and have stopped with ATN* still on.
3934                          * This case should only occur if we use the SELECT
3935                          * AND STOP command.
3936                          */
3937                         /* FALLTHROUGH */
3938 
3939                 case FAS_STEP_NOTCMD:
3940                         /*
3941                          * In this case, we either didn't transition to command
3942                          * phase, or,
3943                          * if we were using the SELECT WITH ATN3 command,
3944                          * we possibly didn't send all message bytes.
3945                          */
3946                         break;
3947 
3948                 case FAS_STEP_PCMD:
3949                         /*
3950                          * In this case, not all command bytes transferred.
3951                          */
3952                         /* FALLTHROUGH */
3953 
3954                 case FAS_STEP_DONE:
3955                         /*
3956                          * This is the usual 'good' completion point.
3957                          * If we we sent message byte(s), we subtract
3958                          * off the number of message bytes that were
3959                          * ahead of the command.
3960                          */
3961                         sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
3962                         break;
3963 
3964                 default:
3965                         fas_log(fas, CE_WARN,
3966                             "bad sequence step (0x%x) in selection", step);
3967                         TRACE_0(TR_FAC_SCSI_FAS,
3968                             TR_FAS_FINISH_SELECT_RESET3_END,
3969                             "fas_finish_select_end (ACTION_RESET3)");
3970                         return (ACTION_RESET);
3971                 }
3972 
3973                 /*
3974                  * OR in common state...
3975                  */
3976                 sp->cmd_pkt->pkt_state |= (STATE_GOT_BUS|STATE_GOT_TARGET);
3977 
3978                 /*
3979                  * data pointer initialization has already been done
3980                  */
3981                 New_state(fas, ACTS_UNKNOWN);
3982                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_ACTION3_END,
3983                     "fas_finish_select_end (action3)");
3984                 return (fas_handle_unknown(fas));
3985 
3986         } else if (intr == FAS_INT_DISCON) {
3987                 /*
3988                  * make sure we negotiate when this target comes
3989                  * on line later on
3990                  */
3991                 fas_force_renegotiation(fas, Tgt(sp));
3992 
3993                 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3994                 sp->cmd_pkt->pkt_state |= STATE_GOT_BUS;
3995 
3996                 /*
3997                  * Set the throttle to DRAIN_THROTTLE to make
3998                  * sure any disconnected commands will get timed out
3999                  * incase the drive dies
4000                  */
4001 
4002                 if (fas->f_reset_delay[Tgt(sp)] == 0) {
4003                         fas->f_throttle[sp->cmd_slot] = DRAIN_THROTTLE;
4004                 }
4005 
4006                 fas_set_pkt_reason(fas, sp, CMD_INCOMPLETE, 0);
4007 
4008                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_FINISH_END,
4009                     "fas_finish_select_end (ACTION_FINISH)");
4010                 return (ACTION_FINISH);
4011         } else  {
4012                 fas_printstate(fas, "undetermined selection failure");
4013                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET2_END,
4014                     "fas_finish_select_end (ACTION_RESET2)");
4015                 return (ACTION_RESET);
4016         }
4017         _NOTE(NOT_REACHED)
4018         /* NOTREACHED */
4019 }
4020 
4021 /*
4022  * a selection got preempted by a reselection; shut down dma
4023  * and put back cmd in the ready queue unless NOINTR
4024  */
4025 static int
4026 fas_reselect_preempt(struct fas *fas)
4027 {
4028         int rval;
4029 
4030         /*
4031          * A reselection attempt glotzed our selection attempt.
4032          * we put request back in the ready queue
4033          */
4034         struct fas_cmd *sp = fas->f_current_sp;
4035 
4036         /*
4037          * Shut off DMA gate array
4038          */
4039         FAS_FLUSH_DMA(fas);
4040 
4041         /*
4042          * service the reconnect now and clean up later
4043          */
4044         New_state(fas, STATE_FREE);
4045         rval = fas_reconnect(fas);
4046 
4047         /*
4048          * If selection for a non-tagged command is preempted, the
4049          * command could be stuck because throttle was set to DRAIN,
4050          * and a disconnected command timeout follows.
4051          */
4052         if ((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0)
4053                 fas->f_throttle[sp->cmd_slot] = 1;
4054 
4055         if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4056                 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4057         }
4058 
4059         /*
4060          * if we attempted to renegotiate on this cmd, undo this now
4061          */
4062         if (fas->f_wdtr_sent) {
4063                 fas->f_wide_known &= ~(1<<Tgt(sp));
4064                 fas->f_wdtr_sent = 0;
4065         }
4066         if (fas->f_sdtr_sent) {
4067                 fas->f_sync_known &= ~(1<<Tgt(sp));
4068                 fas->f_sdtr_sent = 0;
4069         }
4070 
4071         fas_head_of_readyQ(fas, sp);
4072 
4073         return (rval);
4074 }
4075 
4076 /*
4077  * Handle the reconnection of a target
4078  */
4079 static int
4080 fas_reconnect(struct fas *fas)
4081 {
4082         volatile struct fasreg *fasreg = fas->f_reg;
4083         struct fas_cmd *sp = NULL;
4084         uchar_t target, lun;
4085         uchar_t tmp;
4086         uchar_t slot;
4087         char *bad_reselect = NULL;
4088 
4089         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_START,
4090             "fas_reconnect_start");
4091         EPRINTF("fas_reconnect:\n");
4092 
4093         fas_check_ncmds(fas);
4094 
4095         switch (fas->f_state) {
4096         default:
4097                 /*
4098                  * Pick up target id from fifo
4099                  *
4100                  * There should only be the reselecting target's id
4101                  * and an identify message in the fifo.
4102                  */
4103                 target = fas->f_fifo[0];
4104 
4105                 /*
4106                  * we know the target so update period, conf3,
4107                  * offset reg, if necessary, and accept the msg
4108                  */
4109                 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
4110 
4111                 /*
4112                  * now we can accept the message. an untagged
4113                  * target will go immediately into data phase so
4114                  * the period/offset/conf3 registers need to be
4115                  * updated before accepting the message
4116                  */
4117                 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4118 
4119                 if (fas->f_fifolen != 2) {
4120                         bad_reselect = "bad reselect bytes";
4121                         break;
4122                 }
4123 
4124                 /*
4125                  * normal initial reconnect; we get another interrupt later
4126                  * for the tag
4127                  */
4128                 New_state(fas, ACTS_RESEL);
4129 
4130                 if (fas->f_stat & FAS_STAT_PERR) {
4131                         break;
4132                 }
4133 
4134                 /*
4135                  * Check sanity of message.
4136                  */
4137                 tmp = fas->f_fifo[1];
4138                 fas->f_last_msgin = tmp;
4139 
4140                 if (!(IS_IDENTIFY_MSG(tmp)) || (tmp & INI_CAN_DISCON)) {
4141                         bad_reselect = "bad identify msg";
4142                         break;
4143                 }
4144 
4145                 lun = tmp & (NLUNS_PER_TARGET-1);
4146 
4147                 EPRINTF2("fas_reconnect: target=%x, idmsg=%x\n",
4148                     target, tmp);
4149 
4150                 fas->f_resel_slot = slot = (target * NLUNS_PER_TARGET) | lun;
4151 
4152                 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
4153                     (target & 0xf) | FAS_BUSID_ENCODID |
4154                     FAS_BUSID_32BIT_COUNTER);
4155 
4156                 /*
4157                  * If tag queueing in use, DMA in tag.
4158                  * Otherwise, we're ready to go.
4159                  * if tag 0 slot is non-empty, a non-tagged cmd is
4160                  * reconnecting
4161                  */
4162                 if (TAGGED(target) && fas->f_tcmds[slot] &&
4163                     (fas->f_active[slot]->f_slot[0] == NULL)) {
4164                         volatile uchar_t *c =
4165                             (uchar_t *)fas->f_cmdarea;
4166 
4167                         /*
4168                          * If we've been doing tagged queueing and this
4169                          * request doesn't  do it,
4170                          * maybe it was disabled for this one.  This is rather
4171                          * dangerous as it blows all pending tagged cmds away.
4172                          * But if target is confused, then we'll blow up
4173                          * shortly.
4174                          */
4175                         *c++ = INVALID_MSG;
4176                         *c   = INVALID_MSG;
4177 
4178                         FAS_DMA_WRITE_SETUP(fas, 2,
4179                             fas->f_dmacookie.dmac_address);
4180 
4181                         /*
4182                          * For tagged queuing, we should still be in msgin
4183                          * phase.
4184                          * If not, then either we aren't running tagged
4185                          * queueing like we thought or the target died.
4186                          */
4187                         if (INTPENDING(fas) == 0) {
4188                                 EPRINTF1("slow reconnect, slot=%x\n", slot);
4189                                 TRACE_0(TR_FAC_SCSI_FAS,
4190                                     TR_FAS_RECONNECT_RETURN1_END,
4191                                     "fas_reconnect_end (_RETURN1)");
4192                                 return (ACTION_RETURN);
4193                         }
4194 
4195                         fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
4196                         fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
4197                         if (fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)) {
4198                                 return (fas_illegal_cmd_or_bus_reset(fas));
4199                         }
4200 
4201                         if ((fas->f_stat & FAS_PHASE_MASK) !=
4202                             FAS_PHASE_MSG_IN) {
4203                                 bad_reselect = "not in msgin phase";
4204                                 break;
4205                         }
4206 
4207                         if (fas->f_intr & FAS_INT_DISCON) {
4208                                 bad_reselect = "unexpected bus free";
4209                                 break;
4210                         }
4211                 } else {
4212                         fas->f_current_sp = sp = fas->f_active[slot]->f_slot[0];
4213                         break;
4214                 }
4215                 /*FALLTHROUGH*/
4216 
4217         case ACTS_RESEL:
4218                 {
4219                         volatile uchar_t *c =
4220                             (uchar_t *)fas->f_cmdarea;
4221                         struct f_slots *tag_slots;
4222                         int id, tag;
4223                         uint_t i;
4224 
4225                         slot = fas->f_resel_slot;
4226                         target = slot/NLUNS_PER_TARGET;
4227 
4228                         if ((fas->f_stat & FAS_PHASE_MASK) !=
4229                             FAS_PHASE_MSG_IN) {
4230                                 IPRINTF1("no tag for slot %x\n", slot);
4231                                 if (fas->f_intr & ~(FAS_INT_BUS |
4232                                     FAS_INT_FCMP)) {
4233                                         New_state(fas, ACTS_UNKNOWN);
4234                                         TRACE_0(TR_FAC_SCSI_FAS,
4235                                             TR_FAS_RECONNECT_PHASEMANAGE_END,
4236                                             "fas_reconnect_end (_PHASEMANAGE)");
4237                                         return (ACTION_PHASEMANAGE);
4238                                 } else {
4239                                         bad_reselect = "not in msgin phase";
4240                                         break;
4241                                 }
4242                         }
4243                         fas_reg_cmd_write(fas, CMD_TRAN_INFO|CMD_DMA);
4244                         fas_dma_reg_write(fas, &fas->f_dma->dma_csr,
4245                             fas->f_dma_csr);
4246 
4247                         fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4248 
4249                         for (i = 0; i < (uint_t)RECONNECT_TAG_RCV_TIMEOUT;
4250                             i++) {
4251                                 /*
4252                                  * timeout is not very accurate but this
4253                                  * should take no time at all
4254                                  */
4255                                 if (INTPENDING(fas)) {
4256                                         fas->f_stat = fas_reg_read(fas,
4257                                             (uchar_t *)&fas->f_reg->fas_stat);
4258                                         fas->f_intr = fas_reg_read(fas,
4259                                             (uchar_t *)&fas->f_reg->fas_intr);
4260                                         if (fas->f_intr & (FAS_INT_RESET |
4261                                             FAS_INT_ILLEGAL)) {
4262                                                 return (
4263                                                     fas_illegal_cmd_or_bus_reset
4264                                                     (fas));
4265                                         }
4266                                         if (fas->f_intr & FAS_INT_FCMP) {
4267                                                 break;
4268                                         }
4269                                 }
4270                         }
4271 
4272                         if (i == (uint_t)RECONNECT_TAG_RCV_TIMEOUT) {
4273                                 bad_reselect = "timeout on receiving tag msg";
4274                                 break;
4275                         }
4276 
4277                         FAS_FLUSH_DMA(fas);
4278 
4279                         /*
4280                          * we should really do a sync here but that
4281                          * hurts performance too much; we'll just hang
4282                          * around till the tag byte flips
4283                          * This is necessary on any system with an
4284                          * XBox
4285                          */
4286                         if (*c == INVALID_MSG) {
4287                                 EPRINTF(
4288                                     "fas_reconnect: invalid msg, polling\n");
4289                                 for (i = 0; i < 1000000; i++) {
4290                                         if (*c != INVALID_MSG)
4291                                                 break;
4292                                 }
4293                         }
4294 
4295                         if (fas->f_stat & FAS_STAT_PERR) {
4296                                 break;
4297                         }
4298 
4299                         if ((fas->f_stat & FAS_STAT_XZERO) == 0 ||
4300                             (id = *c++) < MSG_SIMPLE_QTAG ||
4301                             id > MSG_ORDERED_QTAG) {
4302                                 /*
4303                                  * Target agreed to do tagged queueing
4304                                  * and lied!
4305                                  * This problem implies the drive firmware is
4306                                  * broken.
4307                                  */
4308                                 bad_reselect = "botched tag";
4309                                 break;
4310                         }
4311                         tag = *c;
4312 
4313                         /* Set ptr to reconnecting scsi pkt */
4314                         tag_slots = fas->f_active[slot];
4315                         if (tag_slots != NULL) {
4316                                 sp = tag_slots->f_slot[tag];
4317                         } else {
4318                                 bad_reselect = "Invalid tag";
4319                                 break;
4320                         }
4321 
4322                         fas->f_current_sp = sp;
4323                 }
4324         }
4325 
4326         if (fas->f_stat & FAS_STAT_PERR) {
4327                 sp = NULL;
4328                 bad_reselect = "Parity error in reconnect msg's";
4329         }
4330 
4331         if ((sp == NULL ||
4332 #ifdef FAS_TEST
4333             (fas_atest_reconn & (1<<Tgt(sp))) ||
4334 #endif
4335             (sp->cmd_flags & (CFLAG_CMDDISC|CFLAG_CMDPROXY)) == 0)) {
4336                 /*
4337                  * this shouldn't really happen, so it is better
4338                  * to reset the bus; some disks accept the abort
4339                  * and then still reconnect
4340                  */
4341                 if (bad_reselect == NULL) {
4342                         bad_reselect = "no command";
4343                 }
4344 #ifdef FAS_TEST
4345                 if (sp && !(fas_atest_reconn & (1<<Tgt(sp))) &&
4346                     fas_test_stop) {
4347                         debug_enter("bad reconnect");
4348                 } else {
4349                         fas_atest_reconn = 0;
4350                 }
4351 #endif
4352                 goto bad;
4353 
4354         /*
4355          *  XXX remove this case or make it an ASSERT
4356          */
4357         } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
4358                 /*
4359                  * If we got here, we were already attempting to
4360                  * run a polled proxy command for this target.
4361                  * Set ATN and, copy in the message, and drive
4362                  * on (ignoring any parity error on the identify).
4363                  */
4364                 IPRINTF1("fas_reconnect: fielding proxy cmd for %d\n",
4365                     target);
4366                 fas_assert_atn(fas);
4367                 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
4368                 tmp = 0;
4369                 while (tmp < fas->f_omsglen) {
4370                         fas->f_cur_msgout[tmp] =
4371                             sp->cmd_cdb[FAS_PROXY_DATA+1+tmp];
4372                         tmp++;
4373                 }
4374                 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
4375 
4376                 /*
4377                  * pretend that the disconnected cmd is still disconnected
4378                  * (this prevents ndisc from going negative)
4379                  */
4380                 fas->f_ndisc++;
4381                 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4382                 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4383         }
4384 
4385         ASSERT(fas->f_resel_slot == slot);
4386         ASSERT(fas->f_ndisc > 0);
4387         fas->f_ndisc--;
4388         sp->cmd_flags &= ~CFLAG_CMDDISC;
4389         New_state(fas, ACTS_UNKNOWN);
4390 
4391         /*
4392          * A reconnect may imply a restore pointers operation
4393          * Note that some older disks (Micropolis in Pbox) do not
4394          * send a save data ptr on disconnect if all data has been
4395          * xferred. So, we cannot restore ptrs yet here.
4396          */
4397         if ((sp->cmd_flags & CFLAG_DMAVALID) &&
4398             (sp->cmd_data_count != sp->cmd_saved_data_count)) {
4399                 sp->cmd_flags |= CFLAG_RESTORE_PTRS;
4400         }
4401 
4402         /*
4403          * Return to await the FUNCTION COMPLETE interrupt we
4404          * should get out of accepting the IDENTIFY message.
4405          */
4406         EPRINTF2("Reconnecting %d.%d\n", target, slot % NLUNS_PER_TARGET);
4407         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RETURN2_END,
4408             "fas_reconnect_end (_RETURN2)");
4409         return (ACTION_RETURN);
4410 
4411 bad:
4412         if (sp && (fas->f_stat       & FAS_STAT_PERR)) {
4413                 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4414         }
4415         fas_log(fas, CE_WARN, "target %x: failed reselection (%s)",
4416             target, bad_reselect);
4417 
4418 #ifdef FASDEBUG
4419         fas_printstate(fas, "failed reselection");
4420 #endif
4421         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RESET5_END,
4422             "fas_reconnect_end (_RESET5)");
4423         return (ACTION_RESET);
4424 }
4425 
4426 /*
4427  * handle unknown bus phase
4428  * we don't know what to expect so check status register for current
4429  * phase
4430  */
4431 int
4432 fas_handle_unknown(struct fas *fas)
4433 {
4434         TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_START,
4435             "fas_handle_unknown_start: fas 0x%p", fas);
4436         EPRINTF("fas_handle_unknown:\n");
4437 
4438         if ((fas->f_intr & FAS_INT_DISCON) == 0) {
4439                 /*
4440                  * we call actions here rather than returning to phasemanage
4441                  * (this is the most frequently called action)
4442                  */
4443                 switch (fas->f_stat & FAS_PHASE_MASK) {
4444                 case FAS_PHASE_DATA_IN:
4445                 case FAS_PHASE_DATA_OUT:
4446                         New_state(fas, ACTS_DATA);
4447                         TRACE_0(TR_FAC_SCSI_FAS,
4448                             TR_FAS_HANDLE_UNKNOWN_PHASE_DATA_END,
4449                             "fas_handle_unknown_end (phase_data)");
4450                         return (fas_handle_data_start(fas));
4451 
4452                 case FAS_PHASE_MSG_OUT:
4453                         New_state(fas, ACTS_MSG_OUT);
4454                         TRACE_0(TR_FAC_SCSI_FAS,
4455                             TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_OUT_END,
4456                             "fas_handle_unknown_end (phase_msg_out)");
4457                         return (fas_handle_msg_out_start(fas));
4458 
4459                 case FAS_PHASE_MSG_IN:
4460                         New_state(fas, ACTS_MSG_IN);
4461                         TRACE_0(TR_FAC_SCSI_FAS,
4462                             TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_IN_END,
4463                             "fas_handle_unknown_end (phase_msg_in)");
4464                         return (fas_handle_msg_in_start(fas));
4465 
4466                 case FAS_PHASE_STATUS:
4467                         fas_reg_cmd_write(fas, CMD_FLUSH);
4468 #ifdef  FAS_TEST
4469                         if (fas_ptest_status & (1<<Tgt(fas->f_current_sp))) {
4470                                 fas_assert_atn(fas);
4471                         }
4472 #endif  /* FAS_TEST */
4473 
4474                         fas_reg_cmd_write(fas, CMD_COMP_SEQ);
4475                         New_state(fas, ACTS_C_CMPLT);
4476 
4477                         TRACE_0(TR_FAC_SCSI_FAS,
4478                             TR_FAS_HANDLE_UNKNOWN_PHASE_STATUS_END,
4479                             "fas_handle_unknown_end (phase_status)");
4480                         return (fas_handle_c_cmplt(fas));
4481 
4482                 case FAS_PHASE_COMMAND:
4483                         New_state(fas, ACTS_CMD_START);
4484                         TRACE_0(TR_FAC_SCSI_FAS,
4485                             TR_FAS_HANDLE_UNKNOWN_PHASE_CMD_END,
4486                             "fas_handle_unknown_end (phase_cmd)");
4487                         return (fas_handle_cmd_start(fas));
4488                 }
4489 
4490                 fas_printstate(fas, "Unknown bus phase");
4491                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_RESET_END,
4492                     "fas_handle_unknown_end (reset)");
4493                 return (ACTION_RESET);
4494 
4495         } else {
4496                 /*
4497                  * Okay. What to do now? Let's try (for the time being)
4498                  * assuming that the target went south and dropped busy,
4499                  * as a disconnect implies that either we received
4500                  * a completion or a disconnect message, or that we
4501                  * had sent an ABORT OPERATION or BUS DEVICE RESET
4502                  * message. In either case, we expected the disconnect
4503                  * and should have fielded it elsewhere.
4504                  *
4505                  * If we see a chip disconnect here, this is an unexpected
4506                  * loss of BSY*. Clean up the state of the chip and return.
4507                  *
4508                  */
4509                 int msgout = fas->f_cur_msgout[0];
4510                 struct fas_cmd *sp = fas->f_current_sp;
4511                 int target = Tgt(sp);
4512 
4513                 if (msgout == MSG_HEAD_QTAG || msgout == MSG_SIMPLE_QTAG) {
4514                         msgout = fas->f_cur_msgout[2];
4515                 }
4516                 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4517                     fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4518                     fas->f_cur_msgout[2], fas->f_last_msgout);
4519 
4520                 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG ||
4521                     msgout == MSG_DEVICE_RESET) {
4522                         IPRINTF2("Successful %s message to target %d\n",
4523                             scsi_mname(msgout), Tgt(sp));
4524                         if (sp->cmd_flags & CFLAG_CMDPROXY) {
4525                                 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
4526                         }
4527                         if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
4528                                 fas->f_abort_msg_sent++;
4529                                 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4530                                         fas_set_pkt_reason(fas, sp,
4531                                             CMD_ABORTED, STAT_ABORTED);
4532                                 }
4533                         } else if (msgout == MSG_DEVICE_RESET) {
4534                                 fas->f_reset_msg_sent++;
4535                                 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4536                                         fas_set_pkt_reason(fas, sp,
4537                                             CMD_RESET, STAT_DEV_RESET);
4538                                 }
4539                                 fas_force_renegotiation(fas, target);
4540                         }
4541                 } else {
4542                         if ((fas->f_last_msgout == MSG_EXTENDED) &&
4543                             (fas->f_last_msgin == MSG_REJECT)) {
4544                                 /*
4545                                  * the target rejected the negotiations,
4546                                  * so resubmit again (no_sync/no_wide
4547                                  * is now set)
4548                                  */
4549                                 New_state(fas, STATE_FREE);
4550                                 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4551                                 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4552                                 fas_decrement_ncmds(fas, sp);
4553                                 fas_check_ncmds(fas);
4554                                 sp->cmd_flags &= ~CFLAG_TRANFLAG;
4555                                 (void) fas_accept_pkt(fas, sp,  NO_TRAN_BUSY);
4556                                 fas_check_ncmds(fas);
4557                                 TRACE_0(TR_FAC_SCSI_FAS,
4558                                     TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4559                                     "fas_handle_unknown_end (int_discon)");
4560                                 return (ACTION_SEARCH);
4561 
4562                         } else if (fas->f_last_msgout == MSG_EXTENDED)       {
4563                                 /*
4564                                  * target dropped off the bus during
4565                                  * negotiations
4566                                  */
4567                                 fas_reset_sync_wide(fas);
4568                                 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
4569                         }
4570 
4571                         fas_set_pkt_reason(fas, sp, CMD_UNX_BUS_FREE, 0);
4572 #ifdef FASDEBUG
4573                         fas_printstate(fas, "unexpected bus free");
4574 #endif
4575                 }
4576                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4577                     "fas_handle_unknown_end (int_discon)");
4578                 return (ACTION_FINISH);
4579         }
4580         _NOTE(NOT_REACHED)
4581         /* NOTREACHED */
4582 }
4583 
4584 /*
4585  * handle target disconnecting
4586  */
4587 static int
4588 fas_handle_clearing(struct fas *fas)
4589 {
4590         struct fas_cmd *sp = fas->f_current_sp;
4591 
4592         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_START,
4593             "fas_handle_clearing_start");
4594         EPRINTF("fas_handle_clearing:\n");
4595 
4596         if (fas->f_laststate == ACTS_C_CMPLT ||
4597             fas->f_laststate == ACTS_MSG_IN_DONE) {
4598                 if (INTPENDING(fas)) {
4599                         volatile struct fasreg *fasreg = fas->f_reg;
4600 
4601                         fas->f_stat = fas_reg_read(fas,
4602                             (uchar_t *)&fasreg->fas_stat);
4603                         fas->f_intr = fas_reg_read(fas,
4604                             (uchar_t *)&fasreg->fas_intr);
4605                         if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
4606                                 return (fas_illegal_cmd_or_bus_reset(fas));
4607                         }
4608                 } else {
4609                         /*
4610                          * change e_laststate for the next time around
4611                          */
4612                         fas->f_laststate = ACTS_CLEARING;
4613                         TRACE_0(TR_FAC_SCSI_FAS,
4614                             TR_FAS_HANDLE_CLEARING_RETURN1_END,
4615                             "fas_handle_clearing_end (ACTION_RETURN1)");
4616                         return (ACTION_RETURN);
4617                 }
4618         }
4619 
4620         if (fas->f_intr == FAS_INT_DISCON) {
4621                 /*
4622                  * At this point the FAS chip has disconnected. The bus should
4623                  * be either quiet or someone may be attempting a reselection
4624                  * of us (or somebody else). Call the routine that sets the
4625                  * chip back to a correct and known state.
4626                  * If the last message in was a disconnect, search
4627                  * for new work to do, else return to call fas_finish()
4628                  */
4629                 fas->f_last_msgout = 0xff;
4630                 fas->f_omsglen = 0;
4631                 if (fas->f_last_msgin == MSG_DISCONNECT) {
4632 
4633                         fas_reg_cmd_write(fas, CMD_EN_RESEL);
4634 
4635                         New_state(fas, STATE_FREE);
4636 
4637                         ASSERT(fas->f_current_sp != NULL);
4638                         EPRINTF2("disconnecting %d.%d\n", Tgt(sp), Lun(sp));
4639 
4640                         sp->cmd_pkt->pkt_statistics |= STAT_DISCON;
4641                         sp->cmd_flags |= CFLAG_CMDDISC;
4642                         if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4643                                 fas->f_ndisc++;
4644                         }
4645                         ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4646                         ASSERT(fas->f_ncmds >= fas->f_ndisc);
4647 
4648                         fas->f_current_sp = NULL;
4649 
4650                         /*
4651                          * start a cmd here to save time
4652                          */
4653                         if ((fas->f_ncmds > fas->f_ndisc) && fas_ustart(fas)) {
4654                                 TRACE_0(TR_FAC_SCSI_FAS,
4655                                     TR_FAS_HANDLE_CLEARING_RETURN2_END,
4656                                     "fas_handle_clearing_end (ACTION_RETURN2)");
4657                                 return (ACTION_RETURN);
4658                         }
4659 
4660 
4661                         TRACE_0(TR_FAC_SCSI_FAS,
4662                             TR_FAS_HANDLE_CLEARING_RETURN3_END,
4663                             "fas_handle_clearing_end (ACTION_RETURN3)");
4664                         return (ACTION_RETURN);
4665                 } else {
4666                         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_END,
4667                             "fas_handle_clearing_end");
4668                         return (fas_finish(fas));
4669                 }
4670         } else {
4671                 /*
4672                  * If the target didn't disconnect from the
4673                  * bus, that is a gross fatal error.
4674                  * XXX this can be caused by asserting ATN
4675                  * XXX check bus phase and if msgout, send a message
4676                  */
4677                 fas_log(fas, CE_WARN,
4678                     "Target %d didn't disconnect after sending %s",
4679                     Tgt(sp), scsi_mname(fas->f_last_msgin));
4680 
4681                 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4682 
4683 #ifdef FASDEBUG
4684                 IPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4685                     fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4686                     fas->f_cur_msgout[2], fas->f_last_msgout);
4687                 IPRINTF1("last msgin=%x\n", fas->f_last_msgin);
4688 #endif
4689                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_ABORT_END,
4690                     "fas_handle_clearing_end (ACTION_ABORT_CURCMD)");
4691                 return (ACTION_ABORT_ALLCMDS);
4692         }
4693 }
4694 
4695 /*
4696  * handle data phase start
4697  */
4698 static int
4699 fas_handle_data_start(struct fas *fas)
4700 {
4701         uint64_t end;
4702         uint32_t amt;
4703         struct fas_cmd *sp = fas->f_current_sp;
4704         int sending, phase;
4705 
4706         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_START,
4707             "fas_handle_data_start");
4708         EPRINTF("fas_handle_data_start:\n");
4709 
4710         if ((sp->cmd_flags & CFLAG_DMAVALID) == 0) {
4711                 fas_printstate(fas, "unexpected data phase");
4712 bad:
4713                 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4714 
4715                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT1_END,
4716                     "fas_handle_data_end (ACTION_ABORT_CURCMD1)");
4717                 return (ACTION_ABORT_CURCMD);
4718         } else {
4719                 sending = (sp->cmd_flags & CFLAG_DMASEND)? 1 : 0;
4720         }
4721 
4722         if (sp->cmd_flags & CFLAG_RESTORE_PTRS) {
4723                 if (fas_restore_pointers(fas, sp)) {
4724                         return (ACTION_ABORT_CURCMD);
4725                 }
4726                 sp->cmd_flags &= ~CFLAG_RESTORE_PTRS;
4727         }
4728 
4729         /*
4730          * And make sure our DMA pointers are in good shape.
4731          *
4732          * Because SCSI is SCSI, the current DMA pointer has got to be
4733          * greater than or equal to our DMA base address. All other cases
4734          * that might have affected this always set curaddr to be >=
4735          * to the DMA base address.
4736          */
4737         ASSERT(sp->cmd_cur_addr >= sp->cmd_dmacookie.dmac_address);
4738         end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4739             (uint64_t)sp->cmd_dmacookie.dmac_size;
4740 
4741         DPRINTF5(
4742             "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%"
4743             PRIx64 ", nwin=%x\n",
4744             sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
4745             sp->cmd_nwin);
4746         DPRINTF2("dmac_address = %x, dmac_size=%lx\n",
4747             sp->cmd_dmacookie.dmac_address, sp->cmd_dmacookie.dmac_size);
4748 
4749         if (sp->cmd_cur_addr >= end) {
4750                 if (fas_next_window(fas, sp, end)) {
4751                         goto bad;
4752                 }
4753                 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4754                     (uint64_t)sp->cmd_dmacookie.dmac_size;
4755                 DPRINTF2("dmac_address=%x, dmac_size=%lx\n",
4756                     sp->cmd_dmacookie.dmac_address,
4757                     sp->cmd_dmacookie.dmac_size);
4758         }
4759 
4760         amt = end - sp->cmd_cur_addr;
4761         if (fas->f_dma_attr->dma_attr_count_max < amt) {
4762                 amt = fas->f_dma_attr->dma_attr_count_max;
4763         }
4764         DPRINTF3("amt=%x, end=%lx, cur_addr=%x\n", amt, end, sp->cmd_cur_addr);
4765 
4766 #ifdef FASDEBUG
4767         /*
4768          * Make sure that we don't cross a boundary we can't handle
4769          */
4770         end = (uint64_t)sp->cmd_cur_addr + (uint64_t)amt - 1;
4771         if ((end & ~fas->f_dma_attr->dma_attr_seg) !=
4772             (sp->cmd_cur_addr & ~fas->f_dma_attr->dma_attr_seg)) {
4773                 EPRINTF3("curaddr %x curaddr+amt %" PRIx64
4774                     " cntr_max %" PRIx64 "\n",
4775                     sp->cmd_cur_addr, end, fas->f_dma_attr->dma_attr_seg);
4776                 amt = (end & ~fas->f_dma_attr->dma_attr_seg) - sp->cmd_cur_addr;
4777                 if (amt == 0 || amt > fas->f_dma_attr->dma_attr_count_max) {
4778                         fas_log(fas, CE_WARN, "illegal dma boundary? %x", amt);
4779                         goto bad;
4780                 }
4781         }
4782 #endif
4783 
4784         end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4785             (uint64_t)sp->cmd_dmacookie.dmac_size -
4786             (uint64_t)sp->cmd_cur_addr;
4787         if (amt > end) {
4788                 EPRINTF4("ovflow amt %x s.b. %" PRIx64 " curaddr %x count %x\n",
4789                     amt, end, sp->cmd_cur_addr, sp->cmd_dmacount);
4790                 amt = (uint32_t)end;
4791         }
4792 
4793         fas->f_lastcount = amt;
4794 
4795         EPRINTF4("%d.%d cmd 0x%x to xfer %x\n", Tgt(sp), Lun(sp),
4796             sp->cmd_pkt->pkt_cdbp[0], amt);
4797 
4798         phase = fas->f_stat & FAS_PHASE_MASK;
4799 
4800         if ((phase == FAS_PHASE_DATA_IN) && !sending) {
4801                 FAS_DMA_WRITE(fas, amt, sp->cmd_cur_addr,
4802                     CMD_TRAN_INFO|CMD_DMA);
4803         } else if ((phase == FAS_PHASE_DATA_OUT) && sending) {
4804                 FAS_DMA_READ(fas, amt, sp->cmd_cur_addr, amt,
4805                     CMD_TRAN_INFO|CMD_DMA);
4806         } else {
4807                 fas_log(fas, CE_WARN,
4808                     "unwanted data xfer direction for Target %d", Tgt(sp));
4809                 fas_set_pkt_reason(fas, sp, CMD_DMA_DERR, 0);
4810                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT2_END,
4811                     "fas_handle_data_end (ACTION_ABORT_CURCMD2)");
4812                 return (ACTION_ABORT_CURCMD);
4813         }
4814 
4815 #ifdef  FAS_TEST
4816         if (!sending && (fas_ptest_data_in & (1<<Tgt(sp)))) {
4817                 fas_assert_atn(fas);
4818         }
4819 #endif  /* FAS_TEST */
4820 
4821         New_state(fas, ACTS_DATA_DONE);
4822 
4823         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_END,
4824             "fas_handle_data_end (ACTION_RETURN)");
4825         return (ACTION_RETURN);
4826 }
4827 
4828 static int
4829 fas_handle_data_done(struct fas *fas)
4830 {
4831         volatile struct fasreg *fasreg = fas->f_reg;
4832         volatile struct dma *dmar = fas->f_dma;
4833         struct fas_cmd *sp = fas->f_current_sp;
4834         uint32_t xfer_amt;
4835         char was_sending;
4836         uchar_t stat, fifoamt, tgt;
4837 
4838         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_START,
4839             "fas_handle_data_done_start");
4840         EPRINTF("fas_handle_data_done\n");
4841 
4842         tgt = Tgt(sp);
4843         stat = fas->f_stat;
4844         was_sending = (sp->cmd_flags & CFLAG_DMASEND) ? 1 : 0;
4845 
4846         /*
4847          * Check for DMA errors (parity or memory fault)
4848          */
4849         if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) &
4850             DMA_ERRPEND) {
4851                 /*
4852                  * It would be desirable to set the ATN* line and attempt to
4853                  * do the whole schmear of INITIATOR DETECTED ERROR here,
4854                  * but that is too hard to do at present.
4855                  */
4856                 fas_log(fas, CE_WARN, "Unrecoverable DMA error on dma %s",
4857                     (was_sending) ? "send" : "receive");
4858                 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4859                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4860                     "fas_handle_data_done_end (ACTION_RESET)");
4861                 return (ACTION_RESET);
4862         }
4863 
4864         /*
4865          * Data Receive conditions:
4866          *
4867          * Check for parity errors. If we have a parity error upon
4868          * receive, the FAS chip has asserted ATN* for us already.
4869          */
4870         if (!was_sending) {
4871 #ifdef  FAS_TEST
4872                 if (fas_ptest_data_in & (1<<tgt)) {
4873                         fas_ptest_data_in = 0;
4874                         stat |= FAS_STAT_PERR;
4875                         if (fas_test_stop > 1) {
4876                                 debug_enter("ptest_data_in");
4877                         }
4878                 }
4879 #endif  /* FAS_TEST */
4880                 if (stat & FAS_STAT_PERR) {
4881                         fas_log(fas, CE_WARN,
4882                             "SCSI bus DATA IN phase parity error");
4883                         fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
4884                         fas->f_omsglen = 1;
4885                         sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4886                         sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
4887                 }
4888         }
4889 
4890         FAS_FLUSH_DMA(fas);
4891 
4892         /*
4893          * Check to make sure we're still connected to the target.
4894          * If the target dropped the bus, that is a fatal error.
4895          * We don't even attempt to count what we were transferring
4896          * here. Let fas_handle_unknown clean up for us.
4897          */
4898         if (fas->f_intr != FAS_INT_BUS) {
4899                 New_state(fas, ACTS_UNKNOWN);
4900                 TRACE_0(TR_FAC_SCSI_FAS,
4901                     TR_FAS_HANDLE_DATA_DONE_PHASEMANAGE_END,
4902                     "fas_handle_data_done_end (ACTION_PHASEMANAGE)");
4903                 return (ACTION_PHASEMANAGE);
4904         }
4905 
4906         /*
4907          * Figure out how far we got.
4908          * Latch up fifo amount first and double if wide has been enabled
4909          */
4910         fifoamt = FIFO_CNT(fas);
4911         if (fas->f_wide_enabled & (1<<tgt)) {
4912                 fifoamt = fifoamt << 1;
4913         }
4914 
4915         if (stat & FAS_STAT_XZERO) {
4916                 xfer_amt = fas->f_lastcount;
4917         } else {
4918                 GET_FAS_COUNT(fasreg, xfer_amt);
4919                 xfer_amt = fas->f_lastcount - xfer_amt;
4920         }
4921         DPRINTF4("fifoamt=%x, xfer_amt=%x, lastcount=%x, stat=%x\n",
4922             fifoamt, xfer_amt, fas->f_lastcount, stat);
4923 
4924 
4925         /*
4926          * Unconditionally knock off by the amount left
4927          * in the fifo if we were sending out the SCSI bus.
4928          *
4929          * If we were receiving from the SCSI bus, believe
4930          * what the chip told us (either XZERO or by the
4931          * value calculated from the counter register).
4932          * The reason we don't look at the fifo for
4933          * incoming data is that in synchronous mode
4934          * the fifo may have further data bytes, and
4935          * for async mode we assume that all data in
4936          * the fifo will have been transferred before
4937          * the fas asserts an interrupt.
4938          */
4939         if (was_sending) {
4940                 xfer_amt -= fifoamt;
4941         }
4942 
4943 #ifdef FASDEBUG
4944         {
4945         int phase = stat & FAS_PHASE_MASK;
4946         fas->f_stat2 = fas_reg_read(fas,
4947             (uchar_t *)&fasreg->fas_stat2);
4948 
4949         if (((fas->f_stat & FAS_STAT_XZERO) == 0) &&
4950             (phase != FAS_PHASE_DATA_IN) &&
4951             (phase != FAS_PHASE_DATA_OUT) &&
4952             (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
4953                 fas_log(fas, CE_WARN,
4954                     "input shuttle not empty at end of data phase");
4955                 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4956                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4957                     "fas_handle_data_done_end (ACTION_RESET)");
4958                 return (ACTION_RESET);
4959         }
4960 }
4961 #endif /* FASDEBUG */
4962 
4963         /*
4964          * If this was a synchronous transfer, flag it.
4965          * Also check for the errata condition of long
4966          * last REQ/ pulse for some synchronous targets
4967          */
4968         if (fas->f_offset[tgt]) {
4969                 /*
4970                  * flag that a synchronous data xfer took place
4971                  */
4972                 sp->cmd_pkt->pkt_statistics |= STAT_SYNC;
4973 
4974                 if (was_sending)
4975                         fas_reg_cmd_write(fas, CMD_FLUSH);
4976         } else {
4977                 /*
4978                  * If we aren't doing Synchronous Data Transfers,
4979                  * definitely offload the fifo.
4980                  */
4981                 fas_reg_cmd_write(fas, CMD_FLUSH);
4982         }
4983 
4984         /*
4985          * adjust pointers...
4986          */
4987         DPRINTF3("before:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4988             sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4989         sp->cmd_data_count += xfer_amt;
4990         sp->cmd_cur_addr += xfer_amt;
4991         sp->cmd_pkt->pkt_state |= STATE_XFERRED_DATA;
4992         New_state(fas, ACTS_UNKNOWN);
4993         DPRINTF3("after:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4994             sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4995 
4996         stat &= FAS_PHASE_MASK;
4997         if (stat == FAS_PHASE_DATA_IN || stat == FAS_PHASE_DATA_OUT) {
4998                 fas->f_state = ACTS_DATA;
4999                 TRACE_0(TR_FAC_SCSI_FAS,
5000                     TR_FAS_HANDLE_DATA_DONE_ACTION1_END,
5001                     "fas_handle_data_done_end (action1)");
5002                 return (fas_handle_data_start(fas));
5003         }
5004 
5005         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_ACTION2_END,
5006             "fas_handle_data_done_end (action2)");
5007         return (fas_handle_unknown(fas));
5008 }
5009 
5010 static char msginperr[] = "SCSI bus MESSAGE IN phase parity error";
5011 
5012 static int
5013 fas_handle_c_cmplt(struct fas *fas)
5014 {
5015         struct fas_cmd *sp = fas->f_current_sp;
5016         volatile struct fasreg *fasreg = fas->f_reg;
5017         uchar_t sts, msg, intr, perr;
5018 
5019         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_START,
5020             "fas_handle_c_cmplt_start");
5021         EPRINTF("fas_handle_c_cmplt:\n");
5022 
5023 
5024         /*
5025          * if target is fast, we can get cmd. completion by the time we get
5026          * here. Otherwise, we'll have to taken an interrupt.
5027          */
5028         if (fas->f_laststate == ACTS_UNKNOWN) {
5029                 if (INTPENDING(fas)) {
5030                         fas->f_stat = fas_reg_read(fas,
5031                             (uchar_t *)&fasreg->fas_stat);
5032                         intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
5033                         fas->f_intr = intr;
5034                         if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5035                                 return (fas_illegal_cmd_or_bus_reset(fas));
5036                         }
5037                 } else {
5038                         /*
5039                          * change f_laststate for the next time around
5040                          */
5041                         fas->f_laststate = ACTS_C_CMPLT;
5042                         TRACE_0(TR_FAC_SCSI_FAS,
5043                             TR_FAS_HANDLE_C_CMPLT_RETURN1_END,
5044                             "fas_handle_c_cmplt_end (ACTION_RETURN1)");
5045                         return (ACTION_RETURN);
5046                 }
5047         } else {
5048                 intr = fas->f_intr;
5049         }
5050 
5051 #ifdef  FAS_TEST
5052         if (fas_ptest_status & (1<<Tgt(sp))) {
5053                 fas_ptest_status = 0;
5054                 fas->f_stat |= FAS_STAT_PERR;
5055                 if (fas_test_stop > 1) {
5056                         debug_enter("ptest_status");
5057                 }
5058         } else if ((fas_ptest_msgin & (1<<Tgt(sp))) && fas_ptest_msg == 0) {
5059                 fas_ptest_msgin = 0;
5060                 fas_ptest_msg = -1;
5061                 fas->f_stat |= FAS_STAT_PERR;
5062                 if (fas_test_stop > 1) {
5063                         debug_enter("ptest_completion");
5064                 }
5065         }
5066 #endif  /* FAS_TEST */
5067 
5068         if (intr == FAS_INT_DISCON) {
5069                 New_state(fas, ACTS_UNKNOWN);
5070                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION1_END,
5071                     "fas_handle_c_cmplt_end (action1)");
5072                 return (fas_handle_unknown(fas));
5073         }
5074 
5075         if ((perr = (fas->f_stat & FAS_STAT_PERR)) != 0) {
5076                 fas_assert_atn(fas);
5077                 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5078         }
5079 
5080         /*
5081          * do a msg accept now and read the fifo data
5082          */
5083         if (intr & FAS_INT_FCMP) {
5084                 /*
5085                  * The FAS manuals state that this sequence completes
5086                  * with a BUS SERVICE interrupt if just the status
5087                  * byte was received, else a FUNCTION COMPLETE interrupt
5088                  * if both status and a message was received.
5089                  *
5090                  * if we give the MSG_ACT before reading the msg byte
5091                  * we get the status byte again and if the status is zero
5092                  * then we won't detect a failure
5093                  */
5094                 *(sp->cmd_pkt->pkt_scbp) =
5095                     sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5096                 fas->f_last_msgin = fas->f_imsgarea[0] =
5097                     msg = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5098 
5099                 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5100                 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5101 
5102                 /*
5103                  * The manuals also state that ATN* is asserted if
5104                  * bad parity is detected.
5105                  *
5106                  * The one case that we cannot handle is where we detect
5107                  * bad parity for the status byte, but the target refuses
5108                  * to go to MESSAGE OUT phase right away. This means that
5109                  * if that happens, we will misconstrue the parity error
5110                  * to be for the completion message, not the status byte.
5111                  */
5112                 if (perr) {
5113                         fas_log(fas, CE_WARN, msginperr);
5114                         sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5115 
5116                         fas->f_cur_msgout[0] = MSG_MSG_PARITY;
5117                         fas->f_omsglen = 1;
5118                         New_state(fas, ACTS_UNKNOWN);
5119                         TRACE_0(TR_FAC_SCSI_FAS,
5120                             TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5121                             "fas_handle_c_cmplt_end (action5)");
5122                         return (ACTION_RETURN);
5123                 }
5124 
5125         } else if (intr == FAS_INT_BUS) {
5126                 /*
5127                  * We only got the status byte.
5128                  */
5129                 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5130                 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5131                 *(sp->cmd_pkt->pkt_scbp) = sts;
5132                 msg = INVALID_MSG;
5133 
5134                 IPRINTF1("fas_handle_cmd_cmplt: sts=%x, no msg byte\n", sts);
5135 
5136                 if (perr) {
5137                         /*
5138                          * If we get a parity error on a status byte
5139                          * assume that it was a CHECK CONDITION
5140                          */
5141                         sts = STATUS_CHECK;
5142                         fas_log(fas, CE_WARN,
5143                             "SCSI bus STATUS phase parity error");
5144                         fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
5145                         fas->f_omsglen = 1;
5146                         New_state(fas, ACTS_UNKNOWN);
5147                         TRACE_0(TR_FAC_SCSI_FAS,
5148                             TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5149                             "fas_handle_c_cmplt_end (action5)");
5150                         return (fas_handle_unknown(fas));
5151                 }
5152 
5153         } else {
5154                 msg = sts = INVALID_MSG;
5155                 IPRINTF("fas_handle_cmd_cmplt: unexpected intr\n");
5156                 New_state(fas, ACTS_UNKNOWN);
5157                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION2_END,
5158                     "fas_handle_c_cmplt_end (action2)");
5159                 return (fas_handle_unknown(fas));
5160         }
5161 
5162         EPRINTF2("fas_handle_c_cmplt: status=%x, msg=%x\n", sts, msg);
5163 
5164         EPRINTF1("Completion Message=%s\n", scsi_mname(msg));
5165         if (msg == MSG_COMMAND_COMPLETE) {
5166                 /*
5167                  * Actually, if the message was a 'linked command
5168                  * complete' message, the target isn't going to be
5169                  * clearing the bus.
5170                  */
5171                 New_state(fas, ACTS_CLEARING);
5172                 TRACE_0(TR_FAC_SCSI_FAS,
5173                     TR_FAS_HANDLE_C_CMPLT_ACTION4_END,
5174                     "fas_handle_c_cmplt_end (action4)");
5175                 return (fas_handle_clearing(fas));
5176         } else {
5177                 fas->f_imsglen = 1;
5178                 fas->f_imsgindex = 1;
5179                 New_state(fas, ACTS_MSG_IN_DONE);
5180                 TRACE_0(TR_FAC_SCSI_FAS,
5181                     TR_FAS_HANDLE_C_CMPLT_ACTION3_END,
5182                     "fas_handle_c_cmplt_end (action3)");
5183                 return (fas_handle_msg_in_done(fas));
5184         }
5185 }
5186 
5187 /*
5188  * prepare for accepting a message byte from the fifo
5189  */
5190 static int
5191 fas_handle_msg_in_start(struct fas *fas)
5192 {
5193         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_START,
5194             "fas_handle_msg_in_start");
5195         EPRINTF("fas_handle_msg_in_start\n");
5196 
5197         /*
5198          * Pick up a message byte.
5199          * Clear the FIFO so we
5200          * don't get confused.
5201          */
5202         if (!FIFO_EMPTY(fas)) {
5203                 fas_reg_cmd_write(fas, CMD_FLUSH);
5204         }
5205         fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5206         fas->f_imsglen = 1;
5207         fas->f_imsgindex = 0;
5208         New_state(fas, ACTS_MSG_IN_DONE);
5209 
5210         /*
5211          * give a little extra time by returning to phasemanage
5212          */
5213         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_END,
5214             "fas_handle_msg_in_end (ACTION_PHASEMANAGE)");
5215         return (ACTION_PHASEMANAGE);
5216 }
5217 
5218 /*
5219  * We come here after issuing a MSG_ACCEPT
5220  * command and are expecting more message bytes.
5221  * The FAS should be asserting a BUS SERVICE
5222  * interrupt status, but may have asserted
5223  * a different interrupt in the case that
5224  * the target disconnected and dropped BSY*.
5225  *
5226  * In the case that we are eating up message
5227  * bytes (and throwing them away unread) because
5228  * we have ATN* asserted (we are trying to send
5229  * a message), we do not consider it an error
5230  * if the phase has changed out of MESSAGE IN.
5231  */
5232 static int
5233 fas_handle_more_msgin(struct fas *fas)
5234 {
5235         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_START,
5236             "fas_handle_more_msgin_start");
5237         EPRINTF("fas_handle_more_msgin\n");
5238 
5239         if (fas->f_intr & FAS_INT_BUS) {
5240                 if ((fas->f_stat & FAS_PHASE_MASK) == FAS_PHASE_MSG_IN) {
5241                         /*
5242                          * Fetch another byte of a message in.
5243                          */
5244                         fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5245                         New_state(fas, ACTS_MSG_IN_DONE);
5246                         TRACE_0(TR_FAC_SCSI_FAS,
5247                             TR_FAS_HANDLE_MORE_MSGIN_RETURN1_END,
5248                             "fas_handle_more_msgin_end (ACTION_RETURN)");
5249                         return (ACTION_RETURN);
5250                 }
5251 
5252                 /*
5253                  * If we were gobbling up a message and we have
5254                  * changed phases, handle this silently, else
5255                  * complain. In either case, we return to let
5256                  * fas_phasemanage() handle things.
5257                  *
5258                  * If it wasn't a BUS SERVICE interrupt,
5259                  * let fas_phasemanage() find out if the
5260                  * chip disconnected.
5261                  */
5262                 if (fas->f_imsglen != 0) {
5263                         fas_log(fas, CE_WARN,
5264                             "Premature end of extended message");
5265                 }
5266         }
5267         New_state(fas, ACTS_UNKNOWN);
5268         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_RETURN2_END,
5269             "fas_handle_more_msgin_end (action)");
5270         return (fas_handle_unknown(fas));
5271 }
5272 
5273 static int
5274 fas_handle_msg_in_done(struct fas *fas)
5275 {
5276         struct fas_cmd *sp = fas->f_current_sp;
5277         volatile struct fasreg *fasreg = fas->f_reg;
5278         int sndmsg = 0;
5279         uchar_t msgin;
5280 
5281         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_START,
5282             "fas_handle_msg_in_done_start");
5283         EPRINTF("fas_handle_msg_in_done:\n");
5284         if (fas->f_laststate == ACTS_MSG_IN) {
5285                 if (INTPENDING(fas)) {
5286                         fas->f_stat = fas_reg_read(fas,
5287                             (uchar_t *)&fasreg->fas_stat);
5288                         fas->f_stat2 = fas_reg_read(fas,
5289                             (uchar_t *)&fasreg->fas_stat2);
5290 
5291                         fas_read_fifo(fas);
5292 
5293                         fas->f_intr = fas_reg_read(fas,
5294                             (uchar_t *)&fasreg->fas_intr);
5295                         if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5296                                 return (fas_illegal_cmd_or_bus_reset(fas));
5297                         }
5298                 } else {
5299                         /*
5300                          * change f_laststate for the next time around
5301                          */
5302                         fas->f_laststate = ACTS_MSG_IN_DONE;
5303                         TRACE_0(TR_FAC_SCSI_FAS,
5304                             TR_FAS_HANDLE_MSG_IN_DONE_RETURN1_END,
5305                             "fas_handle_msg_in_done_end (ACTION_RETURN1)");
5306                         return (ACTION_RETURN);
5307                 }
5308         }
5309 
5310         /*
5311          * the most common case is a disconnect message. we do
5312          * a fast path for this condition and if it fails then
5313          * we go for the detailed error handling
5314          */
5315 #ifndef  FAS_TEST
5316         if (((fas->f_laststate == ACTS_MSG_IN) ||
5317             (fas->f_laststate == ACTS_MSG_IN_DONE)) &&
5318             ((fas->f_intr & FAS_INT_DISCON) == 0) &&
5319             ((fas->f_stat & FAS_STAT_PERR) == 0) &&
5320             ((sp->cmd_pkt_flags & FLAG_NODISCON) == 0)) {
5321 
5322                 if ((fas->f_fifolen == 1) &&
5323                     (fas->f_imsglen == 1) &&
5324                     (fas->f_fifo[0] == MSG_DISCONNECT)) {
5325 
5326                         fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5327                         fas->f_imsgarea[fas->f_imsgindex++] = fas->f_fifo[0];
5328                         fas->f_last_msgin = MSG_DISCONNECT;
5329                         New_state(fas, ACTS_CLEARING);
5330 
5331                         TRACE_0(TR_FAC_SCSI_FAS,
5332                             TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5333                             "fas_handle_msg_in_done_end (action)");
5334 
5335                         return (fas_handle_clearing(fas));
5336                 }
5337         }
5338 #endif  /* not FAS_TEST */
5339 
5340         /*
5341          * We can be called here for both the case where
5342          * we had requested the FAS chip to fetch a message
5343          * byte from the target (at the target's request).
5344          * We can also be called in the case where we had
5345          * been using the CMD_COMP_SEQ command to pick up
5346          * both a status byte and a completion message from
5347          * a target, but where the message wasn't one of
5348          * COMMAND COMPLETE, LINKED COMMAND COMPLETE, or
5349          * LINKED COMMAND COMPLETE (with flag). This is a
5350          * legal (albeit extremely unusual) SCSI bus trans-
5351          * -ition, so we have to handle it.
5352          */
5353         if (fas->f_laststate != ACTS_C_CMPLT) {
5354 #ifdef  FAS_TEST
5355 reloop:
5356 #endif  /* FAS_TEST */
5357 
5358                 if (fas->f_intr & FAS_INT_DISCON) {
5359                         fas_log(fas, CE_WARN,
5360                             "premature end of input message");
5361                         New_state(fas, ACTS_UNKNOWN);
5362                         TRACE_0(TR_FAC_SCSI_FAS,
5363                             TR_FAS_HANDLE_MSG_IN_DONE_PHASEMANAGE_END,
5364                             "fas_handle_msg_in_done_end (ACTION_PHASEMANAGE)");
5365                         return (ACTION_PHASEMANAGE);
5366                 }
5367 
5368                 /*
5369                  * Note that if f_imsglen is zero, then we are skipping
5370                  * input message bytes, so there is no reason to look for
5371                  * parity errors.
5372                  */
5373                 if (fas->f_imsglen != 0 && (fas->f_stat & FAS_STAT_PERR)) {
5374                         fas_log(fas, CE_WARN, msginperr);
5375                         sndmsg = MSG_MSG_PARITY;
5376                         sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5377                         fas_reg_cmd_write(fas, CMD_FLUSH);
5378 
5379                 } else if ((msgin = fas->f_fifolen) != 1) {
5380 
5381                         /*
5382                          * If we have got more than one or 0 bytes in the fifo,
5383                          * that is a gross screwup, and we should let the
5384                          * target know that we have completely fouled up.
5385                          */
5386                         fas_printf(fas, "fifocount=%x", msgin);
5387                         fas_printstate(fas, "input message botch");
5388                         sndmsg = MSG_INITIATOR_ERROR;
5389                         fas_reg_cmd_write(fas, CMD_FLUSH);
5390                         fas_log(fas, CE_WARN, "input message botch");
5391 
5392                 } else if (fas->f_imsglen == 0) {
5393                         /*
5394                          * If we are in the middle of gobbling up and throwing
5395                          * away a message (due to a previous message input
5396                          * error), drive on.
5397                          */
5398                         msgin = fas_reg_read(fas,
5399                             (uchar_t *)&fasreg->fas_fifo_data);
5400                         New_state(fas, ACTS_MSG_IN_MORE);
5401 
5402                 } else {
5403                         msgin = fas->f_fifo[0];
5404                         fas->f_imsgarea[fas->f_imsgindex++] = msgin;
5405                 }
5406 
5407         } else {
5408                 /*
5409                  * In this case, we have been called (from
5410                  * fas_handle_c_cmplt()) with the message
5411                  * already stored in the message array.
5412                  */
5413                 msgin = fas->f_imsgarea[0];
5414         }
5415 
5416         /*
5417          * Process this message byte (but not if we are
5418          * going to be trying to send back some error
5419          * anyway)
5420          */
5421         if (sndmsg == 0 && fas->f_imsglen != 0) {
5422 
5423                 if (fas->f_imsgindex < fas->f_imsglen) {
5424 
5425                         EPRINTF2("message byte %d: 0x%x\n",
5426                             fas->f_imsgindex-1,
5427                             fas->f_imsgarea[fas->f_imsgindex-1]);
5428 
5429                         New_state(fas, ACTS_MSG_IN_MORE);
5430 
5431                 } else if (fas->f_imsglen == 1) {
5432 
5433 #ifdef  FAS_TEST
5434                         if ((fas_ptest_msgin & (1<<Tgt(sp))) &&
5435                             fas_ptest_msg == msgin) {
5436                                 fas_ptest_msgin = 0;
5437                                 fas_ptest_msg = -1;
5438                                 fas_assert_atn(fas);
5439                                 fas->f_stat |= FAS_STAT_PERR;
5440                                 fas->f_imsgindex -= 1;
5441                                 if (fas_test_stop > 1) {
5442                                         debug_enter("ptest msgin");
5443                                 }
5444                                 goto reloop;
5445                         }
5446 #endif  /* FAS_TEST */
5447 
5448                         sndmsg = fas_onebyte_msg(fas);
5449 
5450                 } else if (fas->f_imsglen == 2) {
5451 #ifdef  FAS_TEST
5452                         if (fas_ptest_emsgin & (1<<Tgt(sp))) {
5453                                 fas_ptest_emsgin = 0;
5454                                 fas_assert_atn(fas);
5455                                 fas->f_stat |= FAS_STAT_PERR;
5456                                 fas->f_imsgindex -= 1;
5457                                 if (fas_test_stop > 1) {
5458                                         debug_enter("ptest emsgin");
5459                                 }
5460                                 goto reloop;
5461                         }
5462 #endif  /* FAS_TEST */
5463 
5464                         if (fas->f_imsgarea[0] ==  MSG_EXTENDED) {
5465                                 static char *tool =
5466                                     "Extended message 0x%x is too long";
5467 
5468                                 /*
5469                                  * Is the incoming message too long
5470                                  * to be stored in our local array?
5471                                  */
5472                                 if ((int)(msgin+2) > IMSGSIZE) {
5473                                         fas_log(fas, CE_WARN,
5474                                             tool, fas->f_imsgarea[0]);
5475                                         sndmsg = MSG_REJECT;
5476                                 } else {
5477                                         fas->f_imsglen = msgin + 2;
5478                                         New_state(fas, ACTS_MSG_IN_MORE);
5479                                 }
5480                         } else {
5481                                 sndmsg = fas_twobyte_msg(fas);
5482                         }
5483 
5484                 } else {
5485                         sndmsg = fas_multibyte_msg(fas);
5486                 }
5487         }
5488 
5489         if (sndmsg < 0) {
5490                 /*
5491                  * If sndmsg is less than zero, one of the subsidiary
5492                  * routines needs to return some other state than
5493                  * ACTION_RETURN.
5494                  */
5495                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_SNDMSG_END,
5496                     "fas_handle_msg_in_done_end (-sndmsg)");
5497                 return (-sndmsg);
5498 
5499         } else if (sndmsg > 0) {
5500                 if (IS_1BYTE_MSG(sndmsg)) {
5501                         fas->f_omsglen = 1;
5502                 }
5503                 fas->f_cur_msgout[0] = (uchar_t)sndmsg;
5504 
5505                 /*
5506                  * The target is not guaranteed to go to message out
5507                  * phase, period. Moreover, until the entire incoming
5508                  * message is transferred, the target may (and likely
5509                  * will) continue to transfer message bytes (which
5510                  * we will have to ignore).
5511                  *
5512                  * In order to do this, we'll go to 'infinite'
5513                  * message in handling by setting the current input
5514                  * message length to a sentinel of zero.
5515                  *
5516                  * This works regardless of the message we are trying
5517                  * to send out. At the point in time which we want
5518                  * to send a message in response to an incoming message
5519                  * we do not care any more about the incoming message.
5520                  *
5521                  * If we are sending a message in response to detecting
5522                  * a parity error on input, the FAS chip has already
5523                  * set ATN* for us, but it doesn't hurt to set it here
5524                  * again anyhow.
5525                  */
5526                 fas_assert_atn(fas);
5527                 New_state(fas, ACTS_MSG_IN_MORE);
5528                 fas->f_imsglen = 0;
5529         }
5530 
5531         fas_reg_cmd_write(fas, CMD_FLUSH);
5532 
5533         fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5534 
5535         if ((fas->f_laststate == ACTS_MSG_IN_DONE) &&
5536             (fas->f_state == ACTS_CLEARING)) {
5537                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5538                     "fas_handle_msg_in_done_end (action)");
5539                 return (fas_handle_clearing(fas));
5540         }
5541         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_RETURN2_END,
5542             "fas_handle_msg_in_done_end (ACTION_RETURN2)");
5543         return (ACTION_RETURN);
5544 }
5545 
5546 static int
5547 fas_onebyte_msg(struct fas *fas)
5548 {
5549         struct fas_cmd *sp = fas->f_current_sp;
5550         int msgout = 0;
5551         uchar_t msgin = fas->f_last_msgin = fas->f_imsgarea[0];
5552         int tgt = Tgt(sp);
5553 
5554         EPRINTF("fas_onebyte_msg\n");
5555 
5556         if (msgin & MSG_IDENTIFY) {
5557                 /*
5558                  * How did we get here? We should only see identify
5559                  * messages on a reconnection, but we'll handle this
5560                  * fine here (just in case we get this) as long as
5561                  * we believe that this is a valid identify message.
5562                  *
5563                  * For this to be a valid incoming message,
5564                  * bits 6-4 must must be zero. Also, the
5565                  * bit that says that I'm an initiator and
5566                  * can support disconnection cannot possibly
5567                  * be set here.
5568                  */
5569 
5570                 char garbled = ((msgin & (BAD_IDENTIFY|INI_CAN_DISCON)) != 0);
5571 
5572                 fas_log(fas, CE_WARN, "%s message 0x%x from Target %d",
5573                     garbled ? "Garbled" : "Identify", msgin, tgt);
5574 
5575                 if (garbled) {
5576                         /*
5577                          * If it's a garbled message,
5578                          * try and tell the target...
5579                          */
5580                         msgout = MSG_INITIATOR_ERROR;
5581                 } else {
5582                         New_state(fas, ACTS_UNKNOWN);
5583                 }
5584                 return (msgout);
5585 
5586         } else if (IS_2BYTE_MSG(msgin) || IS_EXTENDED_MSG(msgin)) {
5587                 fas->f_imsglen = 2;
5588                 New_state(fas, ACTS_MSG_IN_MORE);
5589                 return (0);
5590         }
5591 
5592         New_state(fas, ACTS_UNKNOWN);
5593 
5594         switch (msgin) {
5595         case MSG_DISCONNECT:
5596                 /*
5597                  * If we 'cannot' disconnect- reject this message.
5598                  * Note that we only key off of the pkt_flags here-
5599                  * the FLAG_NODISCON was set in fas_accept_pkt() if
5600                  * no disconnect was enabled in scsi_options
5601                  */
5602                 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
5603                         msgout = MSG_REJECT;
5604                         break;
5605                 }
5606                 /* FALLTHROUGH */
5607         case MSG_COMMAND_COMPLETE:
5608                 fas->f_state = ACTS_CLEARING;
5609                 break;
5610 
5611         case MSG_NOP:
5612                 break;
5613 
5614         /* XXX Make it a MSG_REJECT handler */
5615         case MSG_REJECT:
5616         {
5617                 uchar_t reason = 0;
5618                 uchar_t lastmsg = fas->f_last_msgout;
5619                 /*
5620                  * The target is rejecting the last message we sent.
5621                  *
5622                  * If the last message we attempted to send out was an
5623                  * extended message, we were trying to negotiate sync
5624                  * xfers- and we're okay.
5625                  *
5626                  * Otherwise, a target has rejected a message that
5627                  * it should have handled. We will abort the operation
5628                  * in progress and set the pkt_reason value here to
5629                  * show why we have completed. The process of aborting
5630                  * may be via a message or may be via a bus reset (as
5631                  * a last resort).
5632                  */
5633                 msgout = (TAGGED(tgt)? MSG_ABORT_TAG : MSG_ABORT);
5634 
5635                 switch (lastmsg) {
5636                 case MSG_EXTENDED:
5637                         if (fas->f_wdtr_sent) {
5638                                 /*
5639                                  * Disable wide, Target rejected
5640                                  * out WDTR message
5641                                  */
5642                                 fas_set_wide_conf3(fas, tgt, 0);
5643                                 fas->f_nowide |= (1<<tgt);
5644                                 fas->f_wdtr_sent = 0;
5645                                 /*
5646                                  * we still want to negotiate sync
5647                                  */
5648                                 if ((fas->f_nosync & (1<<tgt)) == 0) {
5649                                         fas_assert_atn(fas);
5650                                         fas_make_sdtr(fas, 0, tgt);
5651                                 }
5652                         } else if (fas->f_sdtr_sent) {
5653                                 fas_reg_cmd_write(fas, CMD_CLR_ATN);
5654                                 fas_revert_to_async(fas, tgt);
5655                                 fas->f_nosync |= (1<<tgt);
5656                                 fas->f_sdtr_sent = 0;
5657                         }
5658                         msgout = 0;
5659                         break;
5660                 case MSG_NOP:
5661                         reason = CMD_NOP_FAIL;
5662                         break;
5663                 case MSG_INITIATOR_ERROR:
5664                         reason = CMD_IDE_FAIL;
5665                         break;
5666                 case MSG_MSG_PARITY:
5667                         reason = CMD_PER_FAIL;
5668                         break;
5669                 case MSG_REJECT:
5670                         reason = CMD_REJECT_FAIL;
5671                         break;
5672                 /* XXX - abort not good, queue full handling or drain (?) */
5673                 case MSG_SIMPLE_QTAG:
5674                 case MSG_ORDERED_QTAG:
5675                 case MSG_HEAD_QTAG:
5676                         msgout = MSG_ABORT;
5677                         reason = CMD_TAG_REJECT;
5678                         break;
5679                 case MSG_DEVICE_RESET:
5680                         reason = CMD_BDR_FAIL;
5681                         msgout = -ACTION_ABORT_CURCMD;
5682                         break;
5683                 case MSG_ABORT:
5684                 case MSG_ABORT_TAG:
5685                         /*
5686                          * If an RESET/ABORT OPERATION message is rejected
5687                          * it is time to yank the chain on the bus...
5688                          */
5689                         reason = CMD_ABORT_FAIL;
5690                         msgout = -ACTION_ABORT_CURCMD;
5691                         break;
5692                 default:
5693                         if (IS_IDENTIFY_MSG(lastmsg)) {
5694                                 if (TAGGED(tgt)) {
5695                                         /*
5696                                          * this often happens when the
5697                                          * target rejected our tag
5698                                          */
5699                                         reason = CMD_TAG_REJECT;
5700                                 } else {
5701                                         reason = CMD_ID_FAIL;
5702                                 }
5703                         } else {
5704                                 reason = CMD_TRAN_ERR;
5705                                 msgout = -ACTION_ABORT_CURCMD;
5706                         }
5707 
5708                         break;
5709                 }
5710 
5711                 if (msgout) {
5712                         fas_log(fas, CE_WARN,
5713                             "Target %d rejects our message '%s'",
5714                             tgt, scsi_mname(lastmsg));
5715                         fas_set_pkt_reason(fas, sp, reason, 0);
5716                 }
5717 
5718                 break;
5719         }
5720         case MSG_RESTORE_PTRS:
5721                 sp->cmd_cdbp = sp->cmd_pkt->pkt_cdbp;
5722                 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
5723                         if (fas_restore_pointers(fas, sp)) {
5724                                 msgout = -ACTION_ABORT_CURCMD;
5725                         } else if ((sp->cmd_pkt->pkt_reason & CMD_TRAN_ERR) &&
5726                             (sp->cmd_pkt->pkt_statistics & STAT_PERR) &&
5727                             (sp->cmd_cur_win == 0) &&
5728                             (sp->cmd_data_count == 0)) {
5729                                 sp->cmd_pkt->pkt_reason &= ~CMD_TRAN_ERR;
5730                         }
5731                 }
5732                 break;
5733 
5734         case MSG_SAVE_DATA_PTR:
5735                 sp->cmd_saved_data_count = sp->cmd_data_count;
5736                 sp->cmd_saved_win = sp->cmd_cur_win;
5737                 sp->cmd_saved_cur_addr = sp->cmd_cur_addr;
5738                 break;
5739 
5740         /* These don't make sense for us, and   */
5741         /* will be rejected                     */
5742         /*      case MSG_INITIATOR_ERROR        */
5743         /*      case MSG_ABORT                  */
5744         /*      case MSG_MSG_PARITY             */
5745         /*      case MSG_DEVICE_RESET           */
5746         default:
5747                 msgout = MSG_REJECT;
5748                 fas_log(fas, CE_WARN,
5749                     "Rejecting message '%s' from Target %d",
5750                     scsi_mname(msgin), tgt);
5751                 break;
5752         }
5753 
5754         EPRINTF1("Message in: %s\n", scsi_mname(msgin));
5755 
5756         return (msgout);
5757 }
5758 
5759 /*
5760  * phase handlers that are rarely used
5761  */
5762 static int
5763 fas_handle_cmd_start(struct fas *fas)
5764 {
5765         struct fas_cmd *sp = fas->f_current_sp;
5766         volatile uchar_t *tp = fas->f_cmdarea;
5767         int i;
5768         int amt = sp->cmd_cdblen;
5769 
5770         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_START,
5771             "fas_handle_cmd_start_start");
5772         EPRINTF("fas_handle_cmd: send cmd\n");
5773 
5774         for (i = 0; i < amt; i++) {
5775                 *tp++ = sp->cmd_cdbp[i];
5776         }
5777         fas_reg_cmd_write(fas, CMD_FLUSH);
5778 
5779         FAS_DMA_READ(fas, amt, fas->f_dmacookie.dmac_address, amt,
5780             CMD_TRAN_INFO|CMD_DMA);
5781         fas->f_lastcount = amt;
5782 
5783         New_state(fas, ACTS_CMD_DONE);
5784 
5785         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_END,
5786             "fas_handle_cmd_start_end");
5787         return (ACTION_RETURN);
5788 }
5789 
5790 static int
5791 fas_handle_cmd_done(struct fas *fas)
5792 {
5793         struct fas_cmd *sp = fas->f_current_sp;
5794         uchar_t intr = fas->f_intr;
5795         volatile struct dma *dmar = fas->f_dma;
5796 
5797         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_START,
5798             "fas_handle_cmd_done_start");
5799         EPRINTF("fas_handle_cmd_done\n");
5800 
5801         /*
5802          * We should have gotten a BUS SERVICE interrupt.
5803          * If it isn't that, and it isn't a DISCONNECT
5804          * interrupt, we have a "cannot happen" situation.
5805          */
5806         if ((intr & FAS_INT_BUS) == 0) {
5807                 if ((intr & FAS_INT_DISCON) == 0) {
5808                         fas_printstate(fas, "cmd transmission error");
5809                         TRACE_0(TR_FAC_SCSI_FAS,
5810                             TR_FAS_HANDLE_CMD_DONE_ABORT1_END,
5811                             "fas_handle_cmd_done_end (abort1)");
5812                         return (ACTION_ABORT_CURCMD);
5813                 }
5814         } else {
5815                 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
5816         }
5817 
5818         fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr);
5819         FAS_FLUSH_DMA(fas);
5820 
5821         New_state(fas, ACTS_UNKNOWN);
5822         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_END,
5823             "fas_handle_cmd_done_end");
5824         return (fas_handle_unknown(fas));
5825 }
5826 
5827 /*
5828  * Begin to send a message out
5829  */
5830 static int
5831 fas_handle_msg_out_start(struct fas *fas)
5832 {
5833         struct fas_cmd *sp = fas->f_current_sp;
5834         uchar_t *msgout = fas->f_cur_msgout;
5835         uchar_t amt = fas->f_omsglen;
5836 
5837         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_START,
5838             "fas_handle_msg_out_start");
5839         EPRINTF("fas_handle_msg_out_start\n");
5840 
5841         /*
5842          * Check to make *sure* that we are really
5843          * in MESSAGE OUT phase. If the last state
5844          * was ACTS_MSG_OUT_DONE, then we are trying
5845          * to resend a message that the target stated
5846          * had a parity error in it.
5847          *
5848          * If this is the case, and mark completion reason as CMD_NOMSGOUT.
5849          * XXX: Right now, we just *drive* on. Should we abort the command?
5850          */
5851         if ((fas->f_stat & FAS_PHASE_MASK) != FAS_PHASE_MSG_OUT &&
5852             fas->f_laststate == ACTS_MSG_OUT_DONE) {
5853                 fas_log(fas, CE_WARN,
5854                     "Target %d refused message resend", Tgt(sp));
5855                 fas_set_pkt_reason(fas, sp, CMD_NOMSGOUT, 0);
5856                 New_state(fas, ACTS_UNKNOWN);
5857                 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_PHASEMANAGE_END,
5858                     "fas_handle_msg_out_end (ACTION_PHASEMANAGE)");
5859                 return (ACTION_PHASEMANAGE);
5860         }
5861 
5862         /*
5863          * Clean the fifo.
5864          */
5865         fas_reg_cmd_write(fas, CMD_FLUSH);
5866 
5867         if (amt == 0) {
5868                 /*
5869                  * no msg to send
5870                  */
5871                 *msgout = MSG_NOP;
5872                 amt = fas->f_omsglen = 1;
5873         }
5874 
5875         /*
5876          * If msg only 1 byte, just dump it in the fifo and go.  For
5877          * multi-byte msgs, dma them to save time.  If we have no
5878          * msg to send and we're in msg out phase, send a NOP.
5879          */
5880         fas->f_last_msgout = *msgout;
5881 
5882         /*
5883          * There is a bug in the fas366 that occasionaly
5884          * deasserts the ATN signal prematurely when we send
5885          * the sync/wide negotiation bytes out using DMA. The
5886          * workaround here is to send the negotiation bytes out
5887          * using PIO
5888          */
5889         fas_write_fifo(fas, msgout, fas->f_omsglen, 1);
5890         fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5891 
5892         EPRINTF2("amt=%x, last_msgout=%x\n", amt, fas->f_last_msgout);
5893 
5894         New_state(fas, ACTS_MSG_OUT_DONE);
5895         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_END,
5896             "fas_handle_msg_out_end");
5897         return (ACTION_RETURN);
5898 }
5899 
5900 static int
5901 fas_handle_msg_out_done(struct fas *fas)
5902 {
5903         struct fas_cmd *sp = fas->f_current_sp;
5904         uchar_t msgout, phase;
5905         int target = Tgt(sp);
5906         int     amt = fas->f_omsglen;
5907         int action;
5908 
5909         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_START,
5910             "fas_handle_msg_out_done_start");
5911         msgout = fas->f_cur_msgout[0];
5912         if ((msgout == MSG_HEAD_QTAG) || (msgout == MSG_SIMPLE_QTAG)) {
5913                 msgout = fas->f_cur_msgout[2];
5914         }
5915         EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
5916             fas->f_cur_msgout[0], fas->f_cur_msgout[1],
5917             fas->f_cur_msgout[2], fas->f_last_msgout);
5918 
5919         EPRINTF1("fas_handle_msgout_done: msgout=%x\n", msgout);
5920 
5921         /*
5922          * flush fifo, just in case some bytes were not sent
5923          */
5924         fas_reg_cmd_write(fas, CMD_FLUSH);
5925 
5926         /*
5927          * If the FAS disconnected, then the message we sent caused
5928          * the target to decide to drop BSY* and clear the bus.
5929          */
5930         if (fas->f_intr == FAS_INT_DISCON) {
5931                 if (msgout == MSG_DEVICE_RESET || msgout == MSG_ABORT ||
5932                     msgout == MSG_ABORT_TAG) {
5933                         /*
5934                          * If we sent a device reset msg, then we need to do
5935                          * a synch negotiate again unless we have already
5936                          * inhibited synch.
5937                          */
5938                         if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
5939                                 fas->f_abort_msg_sent++;
5940                                 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5941                                         fas_set_pkt_reason(fas, sp,
5942                                             CMD_ABORTED, STAT_ABORTED);
5943                                 }
5944                         } else if (msgout == MSG_DEVICE_RESET) {
5945                                 fas->f_reset_msg_sent++;
5946                                 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5947                                         fas_set_pkt_reason(fas, sp,
5948                                             CMD_RESET, STAT_DEV_RESET);
5949                                 }
5950                                 fas_force_renegotiation(fas, Tgt(sp));
5951                         }
5952                         EPRINTF2("Successful %s message to target %d\n",
5953                             scsi_mname(msgout), target);
5954 
5955                         if (sp->cmd_flags & CFLAG_CMDPROXY) {
5956                                 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
5957                         }
5958                         TRACE_0(TR_FAC_SCSI_FAS,
5959                             TR_FAS_HANDLE_MSG_OUT_DONE_FINISH_END,
5960                             "fas_handle_msg_out_done_end (ACTION_FINISH)");
5961                         return (ACTION_FINISH);
5962                 }
5963                 /*
5964                  * If the target dropped busy on any other message, it
5965                  * wasn't expected. We will let the code in fas_phasemanage()
5966                  * handle this unexpected bus free event.
5967                  */
5968                 goto out;
5969         }
5970 
5971         /*
5972          * What phase have we transitioned to?
5973          */
5974         phase = fas->f_stat & FAS_PHASE_MASK;
5975 
5976         /*
5977          * If we finish sending a message out, and we are
5978          * still in message out phase, then the target has
5979          * detected one or more parity errors in the message
5980          * we just sent and it is asking us to resend the
5981          * previous message.
5982          */
5983         if ((fas->f_intr & FAS_INT_BUS) && phase == FAS_PHASE_MSG_OUT) {
5984                 /*
5985                  * As per SCSI-2 specification, if the message to
5986                  * be re-sent is greater than one byte, then we
5987                  * have to set ATN*.
5988                  */
5989                 if (amt > 1) {
5990                         fas_assert_atn(fas);
5991                 }
5992                 fas_log(fas, CE_WARN,
5993                     "SCSI bus MESSAGE OUT phase parity error");
5994                 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5995                 New_state(fas, ACTS_MSG_OUT);
5996                 TRACE_0(TR_FAC_SCSI_FAS,
5997                     TR_FAS_HANDLE_MSG_OUT_DONE_PHASEMANAGE_END,
5998                     "fas_handle_msg_out_done_end (ACTION_PHASEMANAGE)");
5999                 return (ACTION_PHASEMANAGE);
6000         }
6001 
6002 
6003 out:
6004         fas->f_last_msgout = msgout;
6005         fas->f_omsglen = 0;
6006         New_state(fas, ACTS_UNKNOWN);
6007         action = fas_handle_unknown(fas);
6008         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_END,
6009             "fas_handle_msg_out_done_end");
6010         return (action);
6011 }
6012 
6013 static int
6014 fas_twobyte_msg(struct fas *fas)
6015 {
6016         struct fas_cmd *sp = fas->f_current_sp;
6017 
6018         if ((fas->f_imsgarea[0] == MSG_IGNORE_WIDE_RESID) &&
6019             (fas->f_imsgarea[1] == 1)) {
6020                 int xfer_amt;
6021 
6022                 /*
6023                  * Knock off one byte if there
6024                  * is a last transfer and is even number of bytes
6025                  */
6026                 xfer_amt = sp->cmd_data_count - sp->cmd_saved_data_count;
6027                 if (xfer_amt && (!(xfer_amt & 1))) {
6028                         ASSERT(sp->cmd_data_count > 0);
6029                         sp->cmd_data_count--;
6030                         sp->cmd_cur_addr--;
6031                 }
6032                 IPRINTF1("ignore wide resid %d\n", fas->f_imsgarea[1]);
6033                 New_state(fas, ACTS_UNKNOWN);
6034                 return (0);
6035         }
6036 
6037         fas_log(fas, CE_WARN,
6038             "Two byte message '%s' 0x%x rejected",
6039             scsi_mname(fas->f_imsgarea[0]), fas->f_imsgarea[1]);
6040         return (MSG_REJECT);
6041 }
6042 
6043 /*
6044  * handle receiving extended messages
6045  */
6046 static int
6047 fas_multibyte_msg(struct fas *fas)
6048 {
6049 #ifdef FASDEBUG
6050         static char *mbs =
6051             "Target %d now Synchronous at %d.%d MB/s max transmit rate\n";
6052         static char *mbs1 =
6053             "Target %d now Synchronous at %d.0%d MB/s max transmit rate\n";
6054         static char *mbs2 =
6055             "Target %d now Synchronous at %d.00%d MB/s max transmit rate\n";
6056 #endif
6057         struct fas_cmd *sp = fas->f_current_sp;
6058         volatile struct fasreg *fasreg = fas->f_reg;
6059         uchar_t emsg = fas->f_imsgarea[2];
6060         int tgt = Tgt(sp);
6061         int msgout = 0;
6062 
6063         EPRINTF("fas_multibyte_msg:\n");
6064 
6065         if (emsg == MSG_SYNCHRONOUS) {
6066                 uint_t period, offset, regval;
6067                 uint_t minsync, maxsync, clockval;
6068                 uint_t xfer_freq, xfer_div, xfer_mod, xfer_rate;
6069 
6070                 period = fas->f_imsgarea[3] & 0xff;
6071                 offset = fas->f_imsgarea[4] & 0xff;
6072                 minsync = MIN_SYNC_PERIOD(fas);
6073                 maxsync = MAX_SYNC_PERIOD(fas);
6074                 DPRINTF5("sync msg received: %x %x %x %x %x\n",
6075                     fas->f_imsgarea[0], fas->f_imsgarea[1],
6076                     fas->f_imsgarea[2], fas->f_imsgarea[3],
6077                     fas->f_imsgarea[4]);
6078                 DPRINTF3("received period %d offset %d from tgt %d\n",
6079                     period, offset, tgt);
6080                 DPRINTF3("calculated minsync %d, maxsync %d for tgt %d\n",
6081                     minsync, maxsync, tgt);
6082                 DPRINTF2("sync period %d, neg period %d\n",
6083                     fas->f_sync_period[tgt], fas->f_neg_period[tgt]);
6084 
6085                 if ((++(fas->f_sdtr_sent)) & 1) {
6086                         /*
6087                          * In cases where the target negotiates synchronous
6088                          * mode before we do, and we either have sync mode
6089                          * disabled, or this target is known to be a weak
6090                          * signal target, we send back a message indicating
6091                          * a desire to stay in asynchronous mode (the SCSI-2
6092                          * spec states that if we have synchronous capability
6093                          * then we cannot reject a SYNCHRONOUS DATA TRANSFER
6094                          * REQUEST message).
6095                          */
6096                         IPRINTF1("SYNC negotiation initiated by target %d\n",
6097                             tgt);
6098 
6099                         msgout = MSG_EXTENDED;
6100 
6101                         period =
6102                             period ? max(period, MIN_SYNC_PERIOD(fas)) : 0;
6103 
6104                         if (fas->f_backoff & (1<<tgt)) {
6105                                 period = period ?
6106                                     max(period, fas->f_neg_period[tgt]) : 0;
6107                         }
6108                         offset = min(offset, fas_default_offset);
6109                 }
6110                 xfer_freq = regval = 0;
6111 
6112                 /*
6113                  * If the target's offset is bigger than ours,
6114                  * the target has violated the scsi protocol.
6115                  */
6116                 if (offset > fas_default_offset) {
6117                         period = offset = 0;
6118                         msgout = MSG_REJECT;
6119                 }
6120 
6121                 if (offset && (period > maxsync)) {
6122                         /*
6123                          * We cannot transmit data in synchronous
6124                          * mode this slow, so convert to asynchronous
6125                          * mode.
6126                          */
6127                         msgout = MSG_EXTENDED;
6128                         period = offset = 0;
6129 
6130                 } else if (offset && (period < minsync)) {
6131                         /*
6132                          * If the target's period is less than ours,
6133                          * the target has violated the scsi protocol.
6134                          */
6135                         period = offset = 0;
6136                         msgout = MSG_REJECT;
6137 
6138                 } else if (offset) {
6139                         /*
6140                          * Conversion method for received PERIOD value
6141                          * to the number of input clock ticks to the FAS.
6142                          *
6143                          * We adjust the input period value such that
6144                          * we always will transmit data *not* faster
6145                          * than the period value received.
6146                          */
6147 
6148                         clockval = fas->f_clock_cycle / 1000;
6149                         regval = (((period << 2) + clockval - 1) / clockval);
6150 
6151                         /*
6152                          * correction if xfer rate <= 5MB/sec
6153                          * XXX do we need this?
6154                          */
6155                         if (regval && (period >= FASTSCSI_THRESHOLD)) {
6156                                 regval--;
6157                         }
6158                 }
6159 
6160                 fas->f_offset[tgt] = offset;
6161                 fas->f_neg_period[tgt] = period;
6162 
6163                 /*
6164                  * Is is now safe to produce a responce to a target
6165                  * initiated sdtr.  period and offset have been checked.
6166                  */
6167                 if (msgout == MSG_EXTENDED) {
6168                         fas_make_sdtr(fas, 0, tgt);
6169                         period = fas->f_neg_period[tgt];
6170                         offset = (fas->f_offset[tgt] & 0xf);
6171                 }
6172 
6173                 if (offset) {
6174                         fas->f_sync_period[tgt] = regval & SYNC_PERIOD_MASK;
6175                         fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period,
6176                             fas->f_sync_period[tgt]);
6177 
6178                         fas->f_offset[tgt] = offset | fas->f_req_ack_delay;
6179                         fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset,
6180                             fas->f_offset[tgt]);
6181 
6182                         /*
6183                          * if transferring > 5 MB/sec then enable
6184                          * fastscsi in conf3
6185                          */
6186                         if (period < FASTSCSI_THRESHOLD) {
6187                                 fas->f_fasconf3[tgt] |= FAS_CONF3_FASTSCSI;
6188                         } else {
6189                                 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6190                         }
6191 
6192                         fas_reg_write(fas, (uchar_t *)&fasreg->fas_conf3,
6193                             fas->f_fasconf3[tgt]);
6194 
6195                         DPRINTF4("period %d (%d), offset %d to tgt %d\n",
6196                             period,
6197                             fas->f_sync_period[tgt] & SYNC_PERIOD_MASK,
6198                             fas->f_offset[tgt] & 0xf, tgt);
6199                         DPRINTF1("req/ack delay = %x\n", fas->f_req_ack_delay);
6200                         DPRINTF1("conf3 = %x\n", fas->f_fasconf3[tgt]);
6201 #ifdef FASDEBUG
6202                         /*
6203                          * Convert input clock cycle per
6204                          * byte to nanoseconds per byte.
6205                          * (ns/b), and convert that to
6206                          * k-bytes/second.
6207                          */
6208                         xfer_freq = FAS_SYNC_KBPS((regval *
6209                             fas->f_clock_cycle) / 1000);
6210                         xfer_rate = ((fas->f_nowide & (1<<tgt))? 1 : 2) *
6211                             xfer_freq;
6212                         xfer_div = xfer_rate / 1000;
6213                         xfer_mod = xfer_rate % 1000;
6214 
6215 
6216                         if (xfer_mod > 99) {
6217                                 IPRINTF3(mbs, tgt, xfer_div, xfer_mod);
6218                         } else if (xfer_mod > 9) {
6219                                 IPRINTF3(mbs1, tgt, xfer_div, xfer_mod);
6220                         } else {
6221                                 IPRINTF3(mbs2, tgt, xfer_div, xfer_mod);
6222                         }
6223 #endif
6224                         fas->f_sync_enabled |= (1<<tgt);
6225 
6226                 } else {
6227                         /*
6228                          * We are converting back to async mode.
6229                          */
6230                         fas_revert_to_async(fas, tgt);
6231                 }
6232 
6233                 /*
6234                  * If this target violated the scsi spec, reject the
6235                  * sdtr msg and don't negotiate sdtr again.
6236                  */
6237                 if (msgout == MSG_REJECT) {
6238                         fas->f_nosync |= (1<<tgt);
6239                 }
6240 
6241                 fas->f_props_update |= (1<<tgt);
6242 
6243         } else  if (emsg == MSG_WIDE_DATA_XFER) {
6244                 uchar_t width = fas->f_imsgarea[3] & 0xff;
6245 
6246                 DPRINTF4("wide msg received: %x %x %x %x\n",
6247                     fas->f_imsgarea[0], fas->f_imsgarea[1],
6248                     fas->f_imsgarea[2], fas->f_imsgarea[3]);
6249 
6250                 /* always renegotiate sync after wide */
6251                 msgout = MSG_EXTENDED;
6252 
6253                 if ((++(fas->f_wdtr_sent)) &     1) {
6254                         IPRINTF1("Wide negotiation initiated by target %d\n",
6255                             tgt);
6256                         /*
6257                          * allow wide neg even if the target driver hasn't
6258                          * enabled wide yet.
6259                          */
6260                         fas->f_nowide &= ~(1<<tgt);
6261                         fas_make_wdtr(fas, 0, tgt, width);
6262                         IPRINTF1("sending wide sync %d back\n", width);
6263                         /*
6264                          * Let us go back to async mode(SCSI spec)
6265                          * and depend on target to do sync
6266                          * after wide negotiations.
6267                          * If target does not do a sync neg and enters
6268                          * async mode we will negotiate sync on next command
6269                          */
6270                         fas_revert_to_async(fas, tgt);
6271                         fas->f_sync_known &= ~(1<<tgt);
6272                 } else {
6273                         /*
6274                          * renegotiate sync after wide
6275                          */
6276                         fas_set_wide_conf3(fas, tgt, width);
6277                         ASSERT(width <= 1);
6278                         fas->f_wdtr_sent = 0;
6279                         if ((fas->f_nosync & (1<<tgt)) == 0) {
6280                                 fas_make_sdtr(fas, 0, tgt);
6281                         } else {
6282                                 msgout = 0;
6283                         }
6284                 }
6285 
6286                 fas->f_props_update |= (1<<tgt);
6287 
6288         } else if (emsg == MSG_MODIFY_DATA_PTR) {
6289                 msgout = MSG_REJECT;
6290         } else {
6291                 fas_log(fas, CE_WARN,
6292                     "Rejecting message %s 0x%x from Target %d",
6293                     scsi_mname(MSG_EXTENDED), emsg, tgt);
6294                 msgout = MSG_REJECT;
6295         }
6296 out:
6297         New_state(fas, ACTS_UNKNOWN);
6298         return (msgout);
6299 }
6300 
6301 /*
6302  * Back off sync negotiation
6303  * and got to async mode
6304  */
6305 static void
6306 fas_revert_to_async(struct fas *fas, int tgt)
6307 {
6308         volatile struct fasreg *fasreg = fas->f_reg;
6309 
6310         fas->f_sync_period[tgt] = 0;
6311         fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 0);
6312         fas->f_offset[tgt] = 0;
6313         fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 0);
6314         fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6315         fas_reg_write(fas, &fasreg->fas_conf3, fas->f_fasconf3[tgt]);
6316         fas->f_sync_enabled &= ~(1<<tgt);
6317 }
6318 
6319 /*
6320  * handle an unexpected selection attempt
6321  * XXX look for better way: msg reject, drop off the bus
6322  */
6323 static int
6324 fas_handle_selection(struct fas *fas)
6325 {
6326         fas_reg_cmd_write(fas, CMD_DISCONNECT);
6327         fas_reg_cmd_write(fas, CMD_FLUSH);
6328         fas_reg_cmd_write(fas, CMD_EN_RESEL);
6329         return (ACTION_RETURN);
6330 }
6331 
6332 /*
6333  * dma window handling
6334  */
6335 static int
6336 fas_restore_pointers(struct fas *fas, struct fas_cmd *sp)
6337 {
6338         if (sp->cmd_data_count != sp->cmd_saved_data_count) {
6339                 sp->cmd_data_count = sp->cmd_saved_data_count;
6340                 sp->cmd_cur_addr = sp->cmd_saved_cur_addr;
6341 
6342                 if (sp->cmd_cur_win != sp->cmd_saved_win) {
6343                         sp->cmd_cur_win = sp->cmd_saved_win;
6344                         if (fas_set_new_window(fas, sp)) {
6345                                 return (-1);
6346                         }
6347                 }
6348                 DPRINTF1("curaddr=%x\n", sp->cmd_cur_addr);
6349         }
6350         return (0);
6351 }
6352 
6353 static int
6354 fas_set_new_window(struct fas *fas, struct fas_cmd *sp)
6355 {
6356         off_t offset;
6357         size_t len;
6358         uint_t count;
6359 
6360         if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_cur_win,
6361             &offset, &len, &sp->cmd_dmacookie, &count) != DDI_SUCCESS) {
6362                 return (-1);
6363         }
6364 
6365         DPRINTF4("new window %x: off=%lx, len=%lx, count=%x\n",
6366             sp->cmd_cur_win, offset, len, count);
6367 
6368         ASSERT(count == 1);
6369         return (0);
6370 }
6371 
6372 static int
6373 fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end)
6374 {
6375 
6376         /* are there more windows? */
6377         if (sp->cmd_nwin == 0) {
6378                 uint_t nwin = 0;
6379                 (void) ddi_dma_numwin(sp->cmd_dmahandle, &nwin);
6380                 sp->cmd_nwin = (uchar_t)nwin;
6381         }
6382 
6383         DPRINTF5(
6384             "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%lx, nwin=%x\n",
6385             sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
6386             sp->cmd_nwin);
6387 
6388         if (sp->cmd_cur_win < sp->cmd_nwin) {
6389                 sp->cmd_cur_win++;
6390                 if (fas_set_new_window(fas, sp)) {
6391                         fas_printstate(fas, "cannot set new window");
6392                         sp->cmd_cur_win--;
6393                         return (-1);
6394                 }
6395         /*
6396          * if there are no more windows, we have a data overrun condition
6397          */
6398         } else {
6399                 int slot = sp->cmd_slot;
6400 
6401                 fas_printstate(fas, "data transfer overrun");
6402                 fas_set_pkt_reason(fas, sp, CMD_DATA_OVR, 0);
6403 
6404                 /*
6405                  * if we get data transfer overruns, assume we have
6406                  * a weak scsi bus. Note that this won't catch consistent
6407                  * underruns or other noise related syndromes.
6408                  */
6409                 fas_sync_wide_backoff(fas, sp, slot);
6410                 return (-1);
6411         }
6412         sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
6413         DPRINTF1("cur_addr=%x\n", sp->cmd_cur_addr);
6414         return (0);
6415 }
6416 
6417 /*
6418  * dma error handler
6419  */
6420 static int
6421 fas_check_dma_error(struct fas *fas)
6422 {
6423         /*
6424          * was there a dma error that   caused fas_intr_svc() to be called?
6425          */
6426         if (fas->f_dma->dma_csr & DMA_ERRPEND) {
6427                 /*
6428                  * It would be desirable to set the ATN* line and attempt to
6429                  * do the whole schmear of INITIATOR DETECTED ERROR here,
6430                  * but that is too hard to do at present.
6431                  */
6432                 fas_log(fas, CE_WARN, "Unrecoverable DMA error");
6433                 fas_printstate(fas, "dma error");
6434                 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6435                 return (-1);
6436         }
6437         return (0);
6438 }
6439 
6440 /*
6441  * check for gross error or spurious interrupt
6442  */
6443 static int
6444 fas_handle_gross_err(struct fas *fas)
6445 {
6446         volatile struct fasreg *fasreg = fas->f_reg;
6447 
6448         fas_log(fas, CE_WARN,
6449         "gross error in fas status (%x)", fas->f_stat);
6450 
6451         IPRINTF5("fas_cmd=%x, stat=%x, intr=%x, step=%x, fifoflag=%x\n",
6452             fasreg->fas_cmd, fas->f_stat, fas->f_intr, fasreg->fas_step,
6453             fasreg->fas_fifo_flag);
6454 
6455         fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6456 
6457         fas_internal_reset(fas, FAS_RESET_FAS);
6458         return (ACTION_RESET);
6459 }
6460 
6461 
6462 /*
6463  * handle illegal cmd interrupt or (external) bus reset cleanup
6464  */
6465 static int
6466 fas_illegal_cmd_or_bus_reset(struct fas *fas)
6467 {
6468         /*
6469          * If we detect a SCSI reset, we blow away the current
6470          * command (if there is one) and all disconnected commands
6471          * because we now don't know the state of them at all.
6472          */
6473         ASSERT(fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET));
6474 
6475         if (fas->f_intr & FAS_INT_RESET) {
6476                 return (ACTION_FINRST);
6477         }
6478 
6479         /*
6480          * Illegal cmd to fas:
6481          * This should not happen. The one situation where
6482          * we can get an ILLEGAL COMMAND interrupt is due to
6483          * a bug in the FAS366 during reselection which we
6484          * should be handling in fas_reconnect().
6485          */
6486         if (fas->f_intr & FAS_INT_ILLEGAL) {
6487                 IPRINTF1("lastcmd=%x\n", fas->f_reg->fas_cmd);
6488                 fas_printstate(fas, "ILLEGAL bit set");
6489                 return (ACTION_RESET);
6490         }
6491         /*NOTREACHED*/
6492         return (ACTION_RETURN);
6493 }
6494 
6495 /*
6496  * set throttles for all luns of this target
6497  */
6498 static void
6499 fas_set_throttles(struct fas *fas, int slot, int n, int what)
6500 {
6501         int i;
6502 
6503         /*
6504          * if the bus is draining/quiesced, no changes to the throttles
6505          * are allowed. Not allowing change of throttles during draining
6506          * limits error recovery but will reduce draining time
6507          *
6508          * all throttles should have been set to HOLD_THROTTLE
6509          */
6510         if (fas->f_softstate & (FAS_SS_QUIESCED | FAS_SS_DRAINING)) {
6511                 return;
6512         }
6513 
6514         ASSERT((n == 1) || (n == N_SLOTS) || (n == NLUNS_PER_TARGET));
6515         ASSERT((slot + n) <= N_SLOTS);
6516         if (n == NLUNS_PER_TARGET) {
6517                 slot &= ~(NLUNS_PER_TARGET - 1);
6518         }
6519 
6520         for (i = slot; i < (slot + n); i++) {
6521                 if (what == HOLD_THROTTLE) {
6522                         fas->f_throttle[i] = HOLD_THROTTLE;
6523                 } else if ((fas->f_reset_delay[i/NLUNS_PER_TARGET]) == 0) {
6524                         if (what == MAX_THROTTLE) {
6525                                 int tshift = 1 << (i/NLUNS_PER_TARGET);
6526                                 fas->f_throttle[i] = (short)
6527                                     ((fas->f_notag & tshift)? 1 : what);
6528                         } else {
6529                                 fas->f_throttle[i] = what;
6530                         }
6531                 }
6532         }
6533 }
6534 
6535 static void
6536 fas_set_all_lun_throttles(struct fas *fas, int slot, int what)
6537 {
6538         /*
6539          * fas_set_throttle will adjust slot to starting at LUN 0
6540          */
6541         fas_set_throttles(fas, slot, NLUNS_PER_TARGET, what);
6542 }
6543 
6544 static void
6545 fas_full_throttle(struct fas *fas, int slot)
6546 {
6547         fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
6548 }
6549 
6550 /*
6551  * run a polled cmd
6552  */
6553 static void
6554 fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp)
6555 {
6556         int limit, i, n;
6557         int timeout = 0;
6558 
6559         DPRINTF4("runpoll: slot=%x, cmd=%x, current_sp=0x%p, tcmds=%x\n",
6560             slot, *((uchar_t *)sp->cmd_pkt->pkt_cdbp),
6561             (void *)fas->f_current_sp, fas->f_tcmds[slot]);
6562 
6563         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_START, "fas_runpoll_start");
6564 
6565         /*
6566          * wait for cmd to complete
6567          * don't start new cmds so set throttles to HOLD_THROTTLE
6568          */
6569         while ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6570                 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6571                         fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
6572                 }
6573                 if ((fas->f_state != STATE_FREE) || INTPENDING(fas)) {
6574                         if (fas_dopoll(fas, POLL_TIMEOUT) <= 0) {
6575                                 IPRINTF("runpoll: timeout on draining\n");
6576                                 goto bad;
6577                         }
6578                 }
6579 
6580                 ASSERT(fas->f_state == STATE_FREE);
6581                 ASSERT(fas->f_current_sp == NULL);
6582 
6583                 /*
6584                  * if this is not a proxy cmd, don't start the cmd
6585                  * without draining the active cmd(s)
6586                  * for proxy cmds, we zap the active cmd and assume
6587                  * that the caller will take care of this
6588                  * For tagged cmds, wait with submitting a non-tagged
6589                  * cmd until the queue has been drained
6590                  * If the cmd is a request sense, then draining won't
6591                  * help since we are in contingence allegiance condition
6592                  */
6593                 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6594                         uchar_t *cmdp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
6595 
6596                         if ((fas->f_tcmds[slot]) &&
6597                             (NOTAG(Tgt(sp)) ||
6598                             (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
6599                             (*cmdp != SCMD_REQUEST_SENSE)))) {
6600                                 if (timeout < POLL_TIMEOUT) {
6601                                         timeout += 100;
6602                                         drv_usecwait(100);
6603                                         continue;
6604                                 } else {
6605                                         fas_log(fas, CE_WARN,
6606                                             "polled cmd failed (target busy)");
6607                                         goto cleanup;
6608                                 }
6609                         }
6610                 }
6611 
6612                 /*
6613                  * If the draining of active commands killed the
6614                  * the current polled command, we're done..
6615                  */
6616                 if (sp->cmd_flags & CFLAG_COMPLETED) {
6617                         break;
6618                 }
6619 
6620                 /*
6621                  * ensure we are not accessing a target too quickly
6622                  * after a reset. the throttles get set back later
6623                  * by the reset delay watch; hopefully, we don't go
6624                  * thru this loop more than once
6625                  */
6626                 if (fas->f_reset_delay[slot/NLUNS_PER_TARGET]) {
6627                         IPRINTF1("reset delay set for slot %x\n", slot);
6628                         drv_usecwait(fas->f_scsi_reset_delay * 1000);
6629                         for (i = 0; i < NTARGETS_WIDE; i++) {
6630                                 if (fas->f_reset_delay[i]) {
6631                                         int s = i * NLUNS_PER_TARGET;
6632                                         int e = s + NLUNS_PER_TARGET;
6633                                         fas->f_reset_delay[i] = 0;
6634                                         for (; s < e; s++) {
6635                                                 fas_full_throttle(fas, s);
6636                                         }
6637                                 }
6638                         }
6639                 }
6640 
6641                 /*
6642                  * fas_startcmd() will return false if preempted
6643                  * or draining
6644                  */
6645                 if (fas_startcmd(fas, sp) != TRUE) {
6646                         IPRINTF("runpoll: cannot start new cmds\n");
6647                         ASSERT(fas->f_current_sp != sp);
6648                         continue;
6649                 }
6650 
6651                 /*
6652                  * We're now 'running' this command.
6653                  *
6654                  * fas_dopoll will always return when
6655                  * fas->f_state is STATE_FREE, and
6656                  */
6657                 limit = sp->cmd_pkt->pkt_time * 1000000;
6658                 if (limit == 0) {
6659                         limit = POLL_TIMEOUT;
6660                 }
6661 
6662                 /*
6663                  * if the cmd disconnected, the first call to fas_dopoll
6664                  * will return with bus free; we go thru the loop one more
6665                  * time and wait limit usec for the target to reconnect
6666                  */
6667                 for (i = 0; i <= POLL_TIMEOUT; i += 100) {
6668 
6669                         if ((n = fas_dopoll(fas, limit)) <= 0) {
6670                                 IPRINTF("runpoll: timeout on polling\n");
6671                                 goto bad;
6672                         }
6673 
6674                         /*
6675                          * If a preemption occurred that caused this
6676                          * command to actually not start, go around
6677                          * the loop again. If CFLAG_COMPLETED is set, the
6678                          * command completed
6679                          */
6680                         if ((sp->cmd_flags & CFLAG_COMPLETED) ||
6681                             (sp->cmd_pkt->pkt_state == 0)) {
6682                                 break;
6683                         }
6684 
6685                         /*
6686                          * the bus may have gone free because the target
6687                          * disconnected; go thru the loop again
6688                          */
6689                         ASSERT(fas->f_state == STATE_FREE);
6690                         if (n == 0) {
6691                                 /*
6692                                  * bump i, we have waited limit usecs in
6693                                  * fas_dopoll
6694                                  */
6695                                 i += limit - 100;
6696                         }
6697                 }
6698 
6699                 if ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6700 
6701                         if (i > POLL_TIMEOUT) {
6702                                 IPRINTF("polled timeout on disc. cmd\n");
6703                                 goto bad;
6704                         }
6705 
6706                         if (sp->cmd_pkt->pkt_state) {
6707                                 /*
6708                                  * don't go thru the loop again; the cmd
6709                                  * was already started
6710                                  */
6711                                 IPRINTF("fas_runpoll: cmd started??\n");
6712                                 goto bad;
6713                         }
6714                 }
6715         }
6716 
6717         /*
6718          * blindly restore throttles which is preferable over
6719          * leaving throttle hanging at 0 and noone to clear it
6720          */
6721         if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6722                 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6723         }
6724 
6725         /*
6726          * ensure that the cmd is completely removed
6727          */
6728         fas_remove_cmd(fas, sp, 0);
6729 
6730         /*
6731          * If we stored up commands to do, start them off now.
6732          */
6733         if ((fas->f_state == STATE_FREE) &&
6734             (!(sp->cmd_flags & CFLAG_CMDPROXY))) {
6735                 (void) fas_ustart(fas);
6736         }
6737 exit:
6738         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_END, "fas_runpoll_end");
6739         return;
6740 
6741 bad:
6742         fas_log(fas, CE_WARN, "Polled cmd failed");
6743 #ifdef FASDEBUG
6744         fas_printstate(fas, "fas_runpoll: polled cmd failed");
6745 #endif /* FASDEBUG */
6746 
6747 cleanup:
6748         fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6749 
6750         /*
6751          * clean up all traces of this sp because fas_runpoll will return
6752          * before fas_reset_recovery() cleans up
6753          */
6754         fas_remove_cmd(fas, sp, NEW_TIMEOUT);
6755         fas_decrement_ncmds(fas, sp);
6756         fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
6757 
6758         if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
6759                 (void) fas_reset_bus(fas);
6760         }
6761         goto exit;
6762 }
6763 
6764 /*
6765  * Poll for command completion (i.e., no interrupts)
6766  * limit is in usec (and will not be very accurate)
6767  *
6768  * the assumption is that we only run polled cmds in interrupt context
6769  * as scsi_transport will filter out FLAG_NOINTR
6770  */
6771 static int
6772 fas_dopoll(struct fas *fas, int limit)
6773 {
6774         int i, n;
6775 
6776         /*
6777          * timeout is not very accurate since we don't know how
6778          * long the poll takes
6779          * also if the packet gets started fairly late, we may
6780          * timeout prematurely
6781          * fas_dopoll always returns if e_state transitions to STATE_FREE
6782          */
6783         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_START, "fas_dopoll_start");
6784 
6785         if (limit == 0) {
6786                 limit = POLL_TIMEOUT;
6787         }
6788 
6789         for (n = i = 0; i < limit; i += 100) {
6790                 if (INTPENDING(fas)) {
6791                         fas->f_polled_intr = 1;
6792                         n++;
6793                         (void) fas_intr_svc(fas);
6794                         if (fas->f_state == STATE_FREE)
6795                                 break;
6796                 }
6797                 drv_usecwait(100);
6798         }
6799 
6800         if (i >= limit && fas->f_state != STATE_FREE) {
6801                 fas_printstate(fas, "polled command timeout");
6802                 n = -1;
6803         }
6804         TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_END,
6805             "fas_dopoll_end: rval %x", n);
6806         return (n);
6807 }
6808 
6809 /*
6810  * prepare a sync negotiation message
6811  */
6812 static void
6813 fas_make_sdtr(struct fas *fas, int msgout_offset, int target)
6814 {
6815         uchar_t *p = fas->f_cur_msgout + msgout_offset;
6816         ushort_t tshift = 1<<target;
6817         uchar_t period = MIN_SYNC_PERIOD(fas);
6818         uchar_t offset = fas_default_offset;
6819 
6820         /*
6821          * If this target experienced a sync backoff use the
6822          * target's sync speed that was adjusted in
6823          * fas_sync_wide_backoff.  For second sync backoff,
6824          * offset will be ajusted below in sanity checks.
6825          */
6826         if (fas->f_backoff & tshift) {
6827                 period = fas->f_neg_period[target];
6828         }
6829 
6830         /*
6831          * If this is a responce to a target initiated sdtr,
6832          * use the agreed upon values.
6833          */
6834         if (fas->f_sdtr_sent & 1) {
6835                 period = fas->f_neg_period[target];
6836                 offset = fas->f_offset[target];
6837         }
6838 
6839         /*
6840          * If the target driver disabled
6841          * sync then make offset = 0
6842          */
6843         if (fas->f_force_async & tshift) {
6844                 offset = 0;
6845         }
6846 
6847         /*
6848          * sanity check of period and offset
6849          */
6850         if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_FAST) {
6851                 if (period < (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4)) {
6852                         period = (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4);
6853                 }
6854         } else if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_SYNC) {
6855                 if (period < (uchar_t)(DEFAULT_SYNC_PERIOD/4)) {
6856                         period = (uchar_t)(DEFAULT_SYNC_PERIOD/4);
6857                 }
6858         } else {
6859                 fas->f_nosync |= tshift;
6860         }
6861 
6862         if (fas->f_nosync & tshift) {
6863                 offset = 0;
6864         }
6865 
6866         if ((uchar_t)(offset & 0xf) > fas_default_offset) {
6867                 offset = fas_default_offset | fas->f_req_ack_delay;
6868         }
6869 
6870         fas->f_neg_period[target] = (uchar_t)period;
6871         fas->f_offset[target] = (uchar_t)offset;
6872 
6873         *p++ = (uchar_t)MSG_EXTENDED;
6874         *p++ = (uchar_t)3;
6875         *p++ = (uchar_t)MSG_SYNCHRONOUS;
6876         *p++ = period;
6877         *p++ = offset & 0xf;
6878         fas->f_omsglen = 5 + msgout_offset;
6879 
6880         IPRINTF2("fas_make_sdtr: period = %x, offset = %x\n",
6881             period, offset);
6882         /*
6883          * increment sdtr flag, odd value indicates that we initiated
6884          * the negotiation
6885          */
6886         fas->f_sdtr_sent++;
6887 
6888         /*
6889          * the target may reject the optional sync message so
6890          * to avoid negotiating on every cmd, set sync known here
6891          * we should not negotiate wide after sync again
6892          */
6893         fas->f_sync_known |= 1<<target;
6894         fas->f_wide_known |= 1<<target;
6895 }
6896 
6897 /*
6898  * prepare a wide negotiation message
6899  */
6900 static void
6901 fas_make_wdtr(struct fas *fas, int msgout_offset, int target, int width)
6902 {
6903         uchar_t *p = fas->f_cur_msgout + msgout_offset;
6904 
6905         if (((fas->f_target_scsi_options[target] & SCSI_OPTIONS_WIDE) == 0) ||
6906             (fas->f_nowide & (1<<target))) {
6907                 fas->f_nowide |= 1<<target;
6908                 width = 0;
6909         }
6910         if (fas->f_force_narrow & (1<<target)) {
6911                 width = 0;
6912         }
6913         width = min(FAS_XFER_WIDTH, width);
6914 
6915         *p++ = (uchar_t)MSG_EXTENDED;
6916         *p++ = (uchar_t)2;
6917         *p++ = (uchar_t)MSG_WIDE_DATA_XFER;
6918         *p++ = (uchar_t)width;
6919         fas->f_omsglen = 4 + msgout_offset;
6920         IPRINTF1("fas_make_wdtr: width=%x\n", width);
6921 
6922         /*
6923          * increment wdtr flag, odd value indicates that we initiated
6924          * the negotiation
6925          */
6926         fas->f_wdtr_sent++;
6927 
6928         /*
6929          * the target may reject the optional wide message so
6930          * to avoid negotiating on every cmd, set wide known here
6931          */
6932         fas->f_wide_known |= 1<<target;
6933 
6934         fas_set_wide_conf3(fas, target, width);
6935 }
6936 
6937 /*
6938  * auto request sense support
6939  * create or destroy an auto request sense packet
6940  */
6941 static int
6942 fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap)
6943 {
6944         /*
6945          * Allocate a request sense packet using get_pktiopb
6946          */
6947         struct fas_cmd *rqpktp;
6948         uchar_t slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
6949         struct buf *bp;
6950         struct arq_private_data *arq_data;
6951 
6952         /*
6953          * if one exists, don't create another
6954          */
6955         if (fas->f_arq_pkt[slot] != 0) {
6956                 return (0);
6957         }
6958 
6959         /*
6960          * it would be nicer if we could allow the target driver
6961          * to specify the size but this is easier and OK for most
6962          * drivers to use SENSE_LENGTH
6963          * Allocate a request sense packet.
6964          */
6965         bp = scsi_alloc_consistent_buf(ap, (struct buf *)NULL,
6966             SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
6967         rqpktp = PKT2CMD(scsi_init_pkt(ap,
6968             NULL, bp, CDB_GROUP0, 1, PKT_PRIV_LEN,
6969             PKT_CONSISTENT, SLEEP_FUNC, NULL));
6970         arq_data =
6971             (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
6972         arq_data->arq_save_bp = bp;
6973 
6974         RQ_MAKECOM_G0((CMD2PKT(rqpktp)),
6975             FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON,
6976             (char)SCMD_REQUEST_SENSE, 0, (char)SENSE_LENGTH);
6977         rqpktp->cmd_flags |= CFLAG_CMDARQ;
6978         rqpktp->cmd_slot = slot;
6979         rqpktp->cmd_pkt->pkt_ha_private = rqpktp;
6980         fas->f_arq_pkt[slot] = rqpktp;
6981 
6982         /*
6983          * we need a function ptr here so abort/reset can
6984          * defer callbacks; fas_call_pkt_comp() calls
6985          * fas_complete_arq_pkt() directly without releasing the lock
6986          * However, since we are not calling back directly thru
6987          * pkt_comp, don't check this with warlock
6988          */
6989 #ifndef __lock_lint
6990         rqpktp->cmd_pkt->pkt_comp =
6991             (void (*)(struct scsi_pkt *))fas_complete_arq_pkt;
6992 #endif
6993         return (0);
6994 }
6995 
6996 static int
6997 fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap)
6998 {
6999         struct fas_cmd *rqpktp;
7000         int slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
7001 
7002         /*
7003          * if there is still a pkt saved or no rqpkt
7004          * then we cannot deallocate or there is nothing to do
7005          */
7006         if ((rqpktp = fas->f_arq_pkt[slot]) != NULL) {
7007                 struct arq_private_data *arq_data =
7008                     (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
7009                 struct buf *bp = arq_data->arq_save_bp;
7010                 /*
7011                  * is arq pkt in use?
7012                  */
7013                 if (arq_data->arq_save_sp) {
7014                         return (-1);
7015                 }
7016 
7017                 scsi_destroy_pkt(CMD2PKT(rqpktp));
7018                 scsi_free_consistent_buf(bp);
7019                 fas->f_arq_pkt[slot] = 0;
7020         }
7021         return (0);
7022 }
7023 
7024 /*
7025  * complete an arq packet by copying over transport info and the actual
7026  * request sense data; called with mutex held from fas_call_pkt_comp()
7027  */
7028 void
7029 fas_complete_arq_pkt(struct scsi_pkt *pkt)
7030 {
7031         struct fas *fas = ADDR2FAS(&pkt->pkt_address);
7032         struct fas_cmd *sp = pkt->pkt_ha_private;
7033         struct scsi_arq_status *arqstat;
7034         struct arq_private_data *arq_data =
7035             (struct arq_private_data *)sp->cmd_pkt->pkt_private;
7036         struct fas_cmd *ssp = arq_data->arq_save_sp;
7037         struct buf *bp = arq_data->arq_save_bp;
7038         int     slot = sp->cmd_slot;
7039 
7040         DPRINTF1("completing arq pkt sp=0x%p\n", (void *)sp);
7041         ASSERT(sp == fas->f_arq_pkt[slot]);
7042         ASSERT(arq_data->arq_save_sp != NULL);
7043         ASSERT(ssp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7044 
7045         arqstat = (struct scsi_arq_status *)(ssp->cmd_pkt->pkt_scbp);
7046         arqstat->sts_rqpkt_status = *((struct scsi_status *)
7047             (sp->cmd_pkt->pkt_scbp));
7048         arqstat->sts_rqpkt_reason = sp->cmd_pkt->pkt_reason;
7049         arqstat->sts_rqpkt_state  = sp->cmd_pkt->pkt_state;
7050         arqstat->sts_rqpkt_statistics = sp->cmd_pkt->pkt_statistics;
7051         arqstat->sts_rqpkt_resid  = sp->cmd_pkt->pkt_resid;
7052         arqstat->sts_sensedata =
7053             *((struct scsi_extended_sense *)bp->b_un.b_addr);
7054         ssp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
7055         arq_data->arq_save_sp = NULL;
7056 
7057         /*
7058          * ASC=0x47 is parity error
7059          */
7060         if (arqstat->sts_sensedata.es_key == KEY_ABORTED_COMMAND &&
7061             arqstat->sts_sensedata.es_add_code == 0x47) {
7062                 fas_sync_wide_backoff(fas, sp, slot);
7063         }
7064 
7065         fas_call_pkt_comp(fas, ssp);
7066 }
7067 
7068 /*
7069  * handle check condition and start an arq packet
7070  */
7071 static int
7072 fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp)
7073 {
7074         struct fas_cmd *arqsp = fas->f_arq_pkt[sp->cmd_slot];
7075         struct arq_private_data *arq_data;
7076         struct buf *bp;
7077 
7078         if ((arqsp == NULL) || (arqsp == sp) ||
7079             (sp->cmd_scblen < sizeof (struct scsi_arq_status))) {
7080                 IPRINTF("no arq packet or cannot arq on arq pkt\n");
7081                 fas_call_pkt_comp(fas, sp);
7082                 return (0);
7083         }
7084 
7085         arq_data = (struct arq_private_data *)arqsp->cmd_pkt->pkt_private;
7086         bp = arq_data->arq_save_bp;
7087 
7088         ASSERT(sp->cmd_flags & CFLAG_FINISHED);
7089         ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7090         DPRINTF3("start arq for slot=%x, arqsp=0x%p, rqpkt=0x%p\n",
7091             sp->cmd_slot, (void *)arqsp, (void *)fas->f_arq_pkt[sp->cmd_slot]);
7092         if (arq_data->arq_save_sp != NULL) {
7093                 IPRINTF("auto request sense already in progress\n");
7094                 goto fail;
7095         }
7096 
7097         arq_data->arq_save_sp = sp;
7098 
7099         bzero(bp->b_un.b_addr, sizeof (struct scsi_extended_sense));
7100 
7101         /*
7102          * copy the timeout from the original packet by lack of a better
7103          * value
7104          * we could take the residue of the timeout but that could cause
7105          * premature timeouts perhaps
7106          */
7107         arqsp->cmd_pkt->pkt_time = sp->cmd_pkt->pkt_time;
7108         arqsp->cmd_flags &= ~CFLAG_TRANFLAG;
7109         ASSERT(arqsp->cmd_pkt->pkt_comp != NULL);
7110 
7111         /*
7112          * make sure that auto request sense always goes out
7113          * after queue full and after throttle was set to draining
7114          */
7115         fas_full_throttle(fas, sp->cmd_slot);
7116         (void) fas_accept_pkt(fas, arqsp, NO_TRAN_BUSY);
7117         return (0);
7118 
7119 fail:
7120         fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
7121         fas_log(fas, CE_WARN, "auto request sense failed\n");
7122         fas_dump_cmd(fas, sp);
7123         fas_call_pkt_comp(fas, sp);
7124         return (-1);
7125 }
7126 
7127 
7128 /*
7129  * handle qfull condition
7130  */
7131 static void
7132 fas_handle_qfull(struct fas *fas, struct fas_cmd *sp)
7133 {
7134         int slot = sp->cmd_slot;
7135 
7136         if ((++sp->cmd_qfull_retries > fas->f_qfull_retries[Tgt(sp)]) ||
7137             (fas->f_qfull_retries[Tgt(sp)] == 0)) {
7138                 /*
7139                  * We have exhausted the retries on QFULL, or,
7140                  * the target driver has indicated that it
7141                  * wants to handle QFULL itself by setting
7142                  * qfull-retries capability to 0. In either case
7143                  * we want the target driver's QFULL handling
7144                  * to kick in. We do this by having pkt_reason
7145                  * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
7146                  */
7147                 IPRINTF2("%d.%d: status queue full, retries over\n",
7148                     Tgt(sp), Lun(sp));
7149                 fas_set_all_lun_throttles(fas, slot, DRAIN_THROTTLE);
7150                 fas_call_pkt_comp(fas, sp);
7151         } else {
7152                 if (fas->f_reset_delay[Tgt(sp)] == 0) {
7153                         fas->f_throttle[slot] =
7154                             max((fas->f_tcmds[slot] - 2), 0);
7155                 }
7156                 IPRINTF3("%d.%d: status queue full, new throttle = %d, "
7157                     "retrying\n", Tgt(sp), Lun(sp), fas->f_throttle[slot]);
7158                 sp->cmd_pkt->pkt_flags |= FLAG_HEAD;
7159                 sp->cmd_flags &= ~CFLAG_TRANFLAG;
7160                 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
7161 
7162                 /*
7163                  * when target gives queue full status with no commands
7164                  * outstanding (f_tcmds[] == 0), throttle is set to 0
7165                  * (HOLD_THROTTLE), and the queue full handling starts
7166                  * (see psarc/1994/313); if there are commands outstanding,
7167                  * the throttle is set to (f_tcmds[] - 2)
7168                  */
7169                 if (fas->f_throttle[slot] == HOLD_THROTTLE) {
7170                         /*
7171                          * By setting throttle to QFULL_THROTTLE, we
7172                          * avoid submitting new commands and in
7173                          * fas_restart_cmd find out slots which need
7174                          * their throttles to be cleared.
7175                          */
7176                         fas_set_all_lun_throttles(fas, slot, QFULL_THROTTLE);
7177                         if (fas->f_restart_cmd_timeid == 0) {
7178                                 fas->f_restart_cmd_timeid =
7179                                     timeout(fas_restart_cmd, fas,
7180                                     fas->f_qfull_retry_interval[Tgt(sp)]);
7181                         }
7182                 }
7183         }
7184 }
7185 
7186 /*
7187  * invoked from timeout() to restart qfull cmds with throttle == 0
7188  */
7189 static void
7190 fas_restart_cmd(void *fas_arg)
7191 {
7192         struct fas *fas = fas_arg;
7193         int i;
7194 
7195         IPRINTF("fas_restart_cmd:\n");
7196 
7197         mutex_enter(FAS_MUTEX(fas));
7198         fas->f_restart_cmd_timeid = 0;
7199 
7200         for (i = 0; i < N_SLOTS; i += NLUNS_PER_TARGET) {
7201                 if (fas->f_reset_delay[i/NLUNS_PER_TARGET] == 0) {
7202                         if (fas->f_throttle[i] == QFULL_THROTTLE) {
7203                                 fas_set_all_lun_throttles(fas,
7204                                     i, MAX_THROTTLE);
7205                         }
7206                 }
7207         }
7208 
7209         (void) fas_ustart(fas);
7210         mutex_exit(FAS_MUTEX(fas));
7211 }
7212 
7213 /*
7214  * Timeout handling:
7215  * Command watchdog routines
7216  */
7217 
7218 /*ARGSUSED*/
7219 static void
7220 fas_watch(void *arg)
7221 {
7222         struct fas *fas;
7223         ushort_t        props_update = 0;
7224 
7225         rw_enter(&fas_global_rwlock, RW_READER);
7226 
7227         for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
7228 
7229                 mutex_enter(FAS_MUTEX(fas));
7230                 IPRINTF2("ncmds=%x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
7231 
7232 #ifdef FAS_PIO_COUNTS
7233         if (fas->f_total_cmds) {
7234                 int n = fas->f_total_cmds;
7235 
7236                 fas_log(fas, CE_NOTE,
7237         "total=%d, cmds=%d fas-rd=%d, fas-wrt=%d, dma-rd=%d, dma-wrt=%d\n",
7238                     fas->f_total_cmds,
7239                     fas->f_reg_cmds/n,
7240                     fas->f_reg_reads/n, fas->f_reg_writes/n,
7241                     fas->f_reg_dma_reads/n, fas->f_reg_dma_writes/n);
7242 
7243                 fas->f_reg_reads = fas->f_reg_writes =
7244                     fas->f_reg_dma_reads = fas->f_reg_dma_writes =
7245                     fas->f_reg_cmds = fas->f_total_cmds = 0;
7246         }
7247 #endif
7248                 if (fas->f_ncmds) {
7249                         int i;
7250                         fas_watchsubr(fas);
7251 
7252                         /*
7253                          * reset throttle. the throttle may have been
7254                          * too low if queue full was caused by
7255                          * another initiator
7256                          * Only reset throttle if no cmd active in slot 0
7257                          * (untagged cmd)
7258                          */
7259 #ifdef FAS_TEST
7260                         if (fas_enable_untagged) {
7261                                 fas_test_untagged++;
7262                         }
7263 #endif
7264                         for (i = 0; i < N_SLOTS; i++) {
7265                                 if ((fas->f_throttle[i] > HOLD_THROTTLE) &&
7266                                     (fas->f_active[i] &&
7267                                     (fas->f_active[i]->f_slot[0] == NULL))) {
7268                                         fas_full_throttle(fas, i);
7269                                 }
7270                         }
7271                 }
7272 
7273                 if (fas->f_props_update) {
7274                         int i;
7275                         /*
7276                          * f_mutex will be released and reentered in
7277                          * fas_props_update().
7278                          * Hence we save the fas->f_props_update now and
7279                          * set to 0 indicating that property has been
7280                          * updated. This will avoid a race condition with
7281                          * any thread that runs in interrupt context that
7282                          * attempts to set the f_props_update to non-zero value
7283                          */
7284                         props_update = fas->f_props_update;
7285                         fas->f_props_update = 0;
7286                         for (i = 0; i < NTARGETS_WIDE; i++) {
7287                                 if (props_update & (1<<i)) {
7288                                         fas_update_props(fas, i);
7289                                 }
7290                         }
7291                 }
7292                 fas_check_waitQ_and_mutex_exit(fas);
7293 
7294         }
7295         rw_exit(&fas_global_rwlock);
7296 
7297 again:
7298         mutex_enter(&fas_global_mutex);
7299         if (fas_timeout_initted && fas_timeout_id) {
7300                 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
7301         }
7302         mutex_exit(&fas_global_mutex);
7303         TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_WATCH_END, "fas_watch_end");
7304 }
7305 
7306 static void
7307 fas_watchsubr(struct fas *fas)
7308 {
7309         short slot;
7310         int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7311         struct f_slots *tag_slots;
7312 
7313         for (slot = 0; slot < N_SLOTS; slot += d)  {
7314 
7315 #ifdef FAS_TEST
7316                 if (fas_btest) {
7317                         fas_btest = 0;
7318                         (void) fas_reset_bus(fas);
7319                         return;
7320                 }
7321                 if (fas_force_timeout && fas->f_tcmds[slot]) {
7322                         fas_cmd_timeout(fas, slot);
7323                         fas_force_timeout = 0;
7324                         return;
7325                 }
7326                 fas_test_reset(fas, slot);
7327                 fas_test_abort(fas, slot);
7328 #endif /* FAS_TEST */
7329 
7330                 /*
7331                  * check tagged cmds first
7332                  */
7333                 tag_slots = fas->f_active[slot];
7334                 DPRINTF3(
7335                 "fas_watchsubr: slot %x: tcmds=%x, timeout=%x\n",
7336                     slot, fas->f_tcmds[slot], tag_slots->f_timeout);
7337 
7338                 if ((fas->f_tcmds[slot] > 0) && (tag_slots->f_timebase)) {
7339 
7340                         if (tag_slots->f_timebase <=
7341                             fas_scsi_watchdog_tick) {
7342                                 tag_slots->f_timebase +=
7343                                     fas_scsi_watchdog_tick;
7344                                 continue;
7345                         }
7346 
7347                         tag_slots->f_timeout -= fas_scsi_watchdog_tick;
7348 
7349                         if (tag_slots->f_timeout < 0) {
7350                                 fas_cmd_timeout(fas, slot);
7351                                 return;
7352                         }
7353                         if ((tag_slots->f_timeout) <=
7354                             fas_scsi_watchdog_tick) {
7355                                 IPRINTF1("pending timeout on slot=%x\n",
7356                                     slot);
7357                                 IPRINTF("draining all queues\n");
7358                                 fas_set_throttles(fas, 0, N_SLOTS,
7359                                     DRAIN_THROTTLE);
7360                         }
7361                 }
7362         }
7363 }
7364 
7365 /*
7366  * timeout recovery
7367  */
7368 static void
7369 fas_cmd_timeout(struct fas *fas, int slot)
7370 {
7371         int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7372         int target, lun, i, n, tag, ncmds;
7373         struct fas_cmd *sp = NULL;
7374         struct fas_cmd *ssp;
7375 
7376         ASSERT(fas->f_tcmds[slot]);
7377 
7378 #ifdef FAS_TEST
7379         if (fas_test_stop) {
7380                 debug_enter("timeout");
7381         }
7382 #endif
7383 
7384         /*
7385          * set throttle back; no more draining necessary
7386          */
7387         for (i = 0; i < N_SLOTS; i += d) {
7388                 if (fas->f_throttle[i] == DRAIN_THROTTLE) {
7389                         fas_full_throttle(fas, i);
7390                 }
7391         }
7392 
7393         if (NOTAG(slot/NLUNS_PER_TARGET)) {
7394                 sp = fas->f_active[slot]->f_slot[0];
7395         }
7396 
7397         /*
7398          * if no interrupt pending for next second then the current
7399          * cmd must be stuck; switch slot and sp to current slot and cmd
7400          */
7401         if (fas->f_current_sp && fas->f_state != STATE_FREE) {
7402                 for (i = 0; (i < 10000) && (INTPENDING(fas) == 0); i++) {
7403                         drv_usecwait(100);
7404                 }
7405                 if (INTPENDING(fas) == 0) {
7406                         slot = fas->f_current_sp->cmd_slot;
7407                         sp = fas->f_current_sp;
7408                 }
7409         }
7410 
7411         target = slot / NLUNS_PER_TARGET;
7412         lun = slot % NLUNS_PER_TARGET;
7413 
7414         /*
7415          * update all outstanding  pkts for this slot
7416          */
7417         n = fas->f_active[slot]->f_n_slots;
7418         for (ncmds = tag = 0; tag < n; tag++) {
7419                 ssp = fas->f_active[slot]->f_slot[tag];
7420                 if (ssp && ssp->cmd_pkt->pkt_time) {
7421                         fas_set_pkt_reason(fas, ssp, CMD_TIMEOUT,
7422                             STAT_TIMEOUT | STAT_ABORTED);
7423                         fas_short_dump_cmd(fas, ssp);
7424                         ncmds++;
7425                 }
7426         }
7427 
7428         /*
7429          * no timed-out cmds here?
7430          */
7431         if (ncmds == 0) {
7432                 return;
7433         }
7434 
7435         /*
7436          * dump all we know about this timeout
7437          */
7438         if (sp) {
7439                 if (sp->cmd_flags & CFLAG_CMDDISC) {
7440                         fas_log(fas, CE_WARN,
7441                             "Disconnected command timeout for Target %d.%d",
7442                             target, lun);
7443                 } else {
7444                         ASSERT(sp == fas->f_current_sp);
7445                         fas_log(fas, CE_WARN,
7446                             "Connected command timeout for Target %d.%d",
7447                             target, lun);
7448                         /*
7449                          * Current command timeout appears to relate often
7450                          * to noisy SCSI in synchronous mode.
7451                          */
7452                         if (fas->f_state == ACTS_DATA_DONE) {
7453                                 fas_sync_wide_backoff(fas, sp, slot);
7454                         }
7455                 }
7456 #ifdef FASDEBUG
7457                 fas_printstate(fas, "timeout");
7458 #endif
7459         } else {
7460                 fas_log(fas, CE_WARN,
7461                     "Disconnected tagged cmd(s) (%d) timeout for Target %d.%d",
7462                     fas->f_tcmds[slot], target, lun);
7463         }
7464 
7465         if (fas_abort_cmd(fas, sp, slot) == ACTION_SEARCH) {
7466                 (void) fas_istart(fas);
7467         }
7468 }
7469 
7470 /*
7471  * fas_sync_wide_backoff() increases sync period and enables slow
7472  * cable mode.
7473  * the second time, we revert back to narrow/async
7474  * we count on a bus reset to disable wide in the target and will
7475  * never renegotiate wide again
7476  */
7477 static void
7478 fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
7479     int slot)
7480 {
7481         char phase;
7482         ushort_t state = fas->f_state;
7483         uchar_t tgt = slot / NLUNS_PER_TARGET;
7484         uint_t tshift = 1 << tgt;
7485 
7486         phase = fas_reg_read(fas, &fas->f_reg->fas_stat);
7487         phase &=  FAS_PHASE_MASK;
7488 
7489         IPRINTF4(
7490         "fas_sync_wide_backoff: target %d: state=%x, phase=%x, sp=0x%p\n",
7491             tgt, state, phase, (void *)sp);
7492 
7493 #ifdef FASDEBUG
7494         if (fas_no_sync_wide_backoff) {
7495                 return;
7496         }
7497 #endif
7498 
7499         /*
7500          * if this not the first time or sync is disabled
7501          * thru scsi_options then disable wide
7502          */
7503         if ((fas->f_backoff & tshift) ||
7504             (fas->f_nosync & tshift)) {
7505                 /*
7506                  * disable wide for just this target
7507                  */
7508                 if ((fas->f_nowide & tshift) == 0) {
7509                         fas_log(fas, CE_WARN,
7510                             "Target %d disabled wide SCSI mode", tgt);
7511                 }
7512                 /*
7513                  * do not reset the bit in f_nowide because that
7514                  * would not force a renegotiation of wide
7515                  * and do not change any register value yet because
7516                  * we may have reconnects before the renegotiations
7517                  */
7518                 fas->f_target_scsi_options[tgt] &= ~SCSI_OPTIONS_WIDE;
7519         }
7520 
7521         /*
7522          * reduce xfer rate. if this is the first time, reduce by
7523          * 100%. second time, disable sync and wide.
7524          */
7525         if (fas->f_offset[tgt] != 0) {
7526                 /*
7527                  * do not reset the bit in f_nosync because that
7528                  * would not force a renegotiation of sync
7529                  */
7530                 if (fas->f_backoff & tshift) {
7531                         if ((fas->f_nosync & tshift) == 0) {
7532                                 fas_log(fas, CE_WARN,
7533                                     "Target %d reverting to async. mode",
7534                                     tgt);
7535                         }
7536                         fas->f_target_scsi_options[tgt] &=
7537                             ~(SCSI_OPTIONS_SYNC | SCSI_OPTIONS_FAST);
7538                 } else {
7539                         /* increase period by 100% */
7540                         fas->f_neg_period[tgt] *= 2;
7541 
7542                         fas_log(fas, CE_WARN,
7543                             "Target %d reducing sync. transfer rate", tgt);
7544                 }
7545         }
7546         fas->f_backoff |= tshift;
7547 
7548         /*
7549          * always enable slow cable mode, if not already enabled
7550          */
7551         if ((fas->f_fasconf & FAS_CONF_SLOWMODE) == 0) {
7552                 fas->f_fasconf |= FAS_CONF_SLOWMODE;
7553                 fas_reg_write(fas, &fas->f_reg->fas_conf, fas->f_fasconf);
7554                 IPRINTF("Reverting to slow SCSI cable mode\n");
7555         }
7556 
7557         /*
7558          * Force sync renegotiation and update properties
7559          */
7560         fas_force_renegotiation(fas, tgt);
7561         fas->f_props_update |= (1<<tgt);
7562 }
7563 
7564 /*
7565  * handle failed negotiations (either reject or bus free condition)
7566  */
7567 static void
7568 fas_reset_sync_wide(struct fas *fas)
7569 {
7570         struct fas_cmd *sp = fas->f_current_sp;
7571         int tgt = Tgt(sp);
7572 
7573         if (fas->f_wdtr_sent) {
7574                 IPRINTF("wide neg message rejected or bus free\n");
7575                 fas->f_nowide |= (1<<tgt);
7576                 fas->f_fasconf3[tgt] &= ~FAS_CONF3_WIDE;
7577                 fas_reg_write(fas, &fas->f_reg->fas_conf3,
7578                     fas->f_fasconf3[tgt]);
7579                 /*
7580                  * clear offset just in case it goes to
7581                  * data phase
7582                  */
7583                 fas_reg_write(fas,
7584                     (uchar_t *)&fas->f_reg->fas_sync_offset, 0);
7585         } else if (fas->f_sdtr_sent) {
7586                 volatile struct fasreg *fasreg =
7587                     fas->f_reg;
7588                 IPRINTF("sync neg message rejected or bus free\n");
7589                 fas->f_nosync |= (1<<tgt);
7590                 fas->f_offset[tgt] = 0;
7591                 fas->f_sync_period[tgt] = 0;
7592                 fas_reg_write(fas,
7593                     (uchar_t *)&fasreg->fas_sync_period, 0);
7594                 fas_reg_write(fas,
7595                     (uchar_t *)&fasreg->fas_sync_offset, 0);
7596                 fas->f_offset[tgt] = 0;
7597                 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
7598                 fas_reg_write(fas, &fasreg->fas_conf3,
7599                     fas->f_fasconf3[tgt]);
7600         }
7601 
7602         fas_force_renegotiation(fas, tgt);
7603 }
7604 
7605 /*
7606  * force wide and sync renegotiation
7607  */
7608 static void
7609 fas_force_renegotiation(struct fas *fas, int target)
7610 {
7611         ushort_t tshift = 1<<target;
7612         fas->f_sync_known &= ~tshift;
7613         fas->f_sync_enabled &= ~tshift;
7614         fas->f_wide_known &= ~tshift;
7615         fas->f_wide_enabled &= ~tshift;
7616 }
7617 
7618 /*
7619  * update conf3 register for wide negotiation
7620  */
7621 static void
7622 fas_set_wide_conf3(struct fas *fas, int target, int width)
7623 {
7624         ASSERT(width <= 1);
7625         switch (width) {
7626         case 0:
7627                 fas->f_fasconf3[target] &= ~FAS_CONF3_WIDE;
7628                 break;
7629         case 1:
7630                 fas->f_fasconf3[target] |= FAS_CONF3_WIDE;
7631                 fas->f_wide_enabled |= (1<<target);
7632                 break;
7633         }
7634 
7635         fas_reg_write(fas, &fas->f_reg->fas_conf3, fas->f_fasconf3[target]);
7636         fas->f_fasconf3_reg_last = fas->f_fasconf3[target];
7637 }
7638 
7639 /*
7640  * Abort command handling
7641  *
7642  * abort current cmd, either by device reset or immediately with bus reset
7643  * (usually an abort msg doesn't completely solve the problem, therefore
7644  * a device or bus reset is recommended)
7645  */
7646 static int
7647 fas_abort_curcmd(struct fas *fas)
7648 {
7649         if (fas->f_current_sp) {
7650                 return (fas_abort_cmd(fas, fas->f_current_sp,
7651                     fas->f_current_sp->cmd_slot));
7652         } else {
7653                 return (fas_reset_bus(fas));
7654         }
7655 }
7656 
7657 static int
7658 fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot)
7659 {
7660         struct scsi_address ap;
7661 
7662         ap.a_hba_tran = fas->f_tran;
7663         ap.a_target = slot / NLUNS_PER_TARGET;
7664         ap.a_lun    = slot % NLUNS_PER_TARGET;
7665 
7666         IPRINTF1("abort cmd 0x%p\n", (void *)sp);
7667 
7668         /*
7669          * attempting to abort a connected cmd is usually fruitless, so
7670          * only try disconnected cmds
7671          * a reset is preferable over an abort (see 1161701)
7672          */
7673         if ((fas->f_current_sp && (fas->f_current_sp->cmd_slot != slot)) ||
7674             (fas->f_state == STATE_FREE)) {
7675                 IPRINTF2("attempting to reset target %d.%d\n",
7676                     ap.a_target, ap.a_lun);
7677                 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
7678                         return (ACTION_SEARCH);
7679                 }
7680         }
7681 
7682         /*
7683          * if the target won't listen, then a retry is useless
7684          * there is also the possibility that the cmd still completed while
7685          * we were trying to reset and the target driver may have done a
7686          * device reset which has blown away this sp.
7687          * well, we've tried, now pull the chain
7688          */
7689         IPRINTF("aborting all cmds by bus reset\n");
7690         return (fas_reset_bus(fas));
7691 }
7692 
7693 /*
7694  * fas_do_scsi_abort() assumes that we already have the mutex.
7695  * during the abort, we hold the mutex and prevent callbacks by setting
7696  * completion pointer to NULL. this will also avoid that a target driver
7697  * attempts to do a scsi_abort/reset while we are aborting.
7698  * because the completion pointer is NULL  we can still update the
7699  * packet after completion
7700  * the throttle for this slot is cleared either by fas_abort_connected_cmd
7701  * or fas_runpoll which prevents new cmds from starting while aborting
7702  */
7703 static int
7704 fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
7705 {
7706         struct fas *fas = ADDR2FAS(ap);
7707         struct fas_cmd *sp;
7708         int rval = FALSE;
7709         short slot;
7710         struct fas_cmd *cur_sp = fas->f_current_sp;
7711         void    (*cur_savec)(), (*sp_savec)();
7712         int     sp_tagged_flag, abort_msg;
7713 
7714         if (pkt) {
7715                 sp = PKT2CMD(pkt);
7716                 slot = sp->cmd_slot;
7717                 ASSERT(slot == ((ap->a_target * NLUNS_PER_TARGET) | ap->a_lun));
7718         } else {
7719                 sp = NULL;
7720                 slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
7721         }
7722 
7723         fas_move_waitQ_to_readyQ(fas);
7724 
7725         /*
7726          *   If no specific command was passed, all cmds here will be aborted
7727          *   If a specific command was passed as an argument (to be aborted)
7728          *   only the specified command will be aborted
7729          */
7730         ASSERT(mutex_owned(FAS_MUTEX(fas)));
7731         IPRINTF4("fas_scsi_abort for slot %x, "
7732             "sp=0x%p, pkt_flags=%x, cur_sp=0x%p\n",
7733             slot, (void *)sp, (sp? sp->cmd_pkt_flags : 0), (void *)cur_sp);
7734 
7735         /*
7736          * first check if the cmd is in the ready queue or
7737          * in the active queue
7738          */
7739         if (sp) {
7740                 IPRINTF3("aborting one command 0x%p for %d.%d\n",
7741                     (void *)sp, ap->a_target, ap->a_lun);
7742                 rval = fas_remove_from_readyQ(fas, sp, slot);
7743                 if (rval) {
7744                         IPRINTF("aborted one ready cmd\n");
7745                         fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7746                         fas_decrement_ncmds(fas, sp);
7747                         fas_call_pkt_comp(fas, sp);
7748                         goto exit;
7749 
7750                 } else if ((sp !=
7751                     fas->f_active[slot]->f_slot[sp->cmd_tag[1]])) {
7752                         IPRINTF("cmd doesn't exist here\n");
7753                         rval = TRUE;
7754                         goto exit;
7755                 }
7756         }
7757 
7758         /*
7759          * hold off any new commands while attempting to abort
7760          * an active cmd
7761          */
7762         fas_set_throttles(fas, slot, 1, HOLD_THROTTLE);
7763 
7764         if (cur_sp) {
7765                 /*
7766                  * prevent completion on current cmd
7767                  */
7768                 cur_savec = cur_sp->cmd_pkt->pkt_comp;
7769                 cur_sp->cmd_pkt->pkt_comp = NULL;
7770         }
7771 
7772         if (sp) {
7773                 /*
7774                  * the cmd exists here. is it connected or disconnected?
7775                  * if connected but still selecting then can't abort now.
7776                  * prevent completion on this cmd
7777                  */
7778                 sp_tagged_flag = (sp->cmd_pkt_flags & FLAG_TAGMASK);
7779                 abort_msg = (sp_tagged_flag? MSG_ABORT_TAG : MSG_ABORT);
7780                 sp_savec = sp->cmd_pkt->pkt_comp;
7781                 sp->cmd_pkt->pkt_comp = NULL;
7782 
7783                 /* connected but not selecting? */
7784                 if ((sp == cur_sp) && (fas->f_state != STATE_FREE) &&
7785                     (sp->cmd_pkt->pkt_state)) {
7786                         rval = fas_abort_connected_cmd(fas, sp, abort_msg);
7787                 }
7788 
7789                 /* if abort connected cmd failed, try abort disconnected */
7790                 if ((rval == 0) &&
7791                     (sp->cmd_flags & CFLAG_CMDDISC) &&
7792                     ((sp->cmd_flags &  CFLAG_COMPLETED) == 0)) {
7793                         rval = fas_abort_disconnected_cmd(fas, ap, sp,
7794                             abort_msg, slot);
7795                 }
7796 
7797                 if (rval) {
7798                         sp->cmd_flags |= CFLAG_COMPLETED;
7799                         fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7800                 }
7801 
7802                 sp->cmd_pkt->pkt_comp = sp_savec;
7803 
7804         } else {
7805                 IPRINTF2("aborting all commands for %d.%d\n",
7806                     ap->a_target, ap->a_lun);
7807                 abort_msg = MSG_ABORT;
7808 
7809                 /* active and not selecting ? */
7810                 if (cur_sp && (fas->f_state != STATE_FREE) &&
7811                     (cur_sp->cmd_slot == slot) &&
7812                     cur_sp->cmd_pkt->pkt_state) {
7813                         rval = fas_abort_connected_cmd(fas, cur_sp,
7814                             abort_msg);
7815                 }
7816                 if (rval == 0) {
7817                         rval = fas_abort_disconnected_cmd(fas, ap,
7818                             NULL, abort_msg, slot);
7819                 }
7820         }
7821 
7822 done:
7823         /* complete the current sp */
7824         if (cur_sp) {
7825                 cur_sp->cmd_pkt->pkt_comp = cur_savec;
7826                 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
7827                         fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
7828                         cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
7829                         fas_decrement_ncmds(fas, cur_sp);
7830                         fas_call_pkt_comp(fas, cur_sp);
7831                 }
7832         }
7833 
7834         /* complete the sp passed as 2nd arg */
7835         if (sp && (sp != cur_sp) && (sp->cmd_flags & CFLAG_COMPLETED)) {
7836                 sp->cmd_flags &= ~CFLAG_COMPLETED;
7837                 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
7838                 fas_decrement_ncmds(fas, sp);
7839                 fas_call_pkt_comp(fas, sp);
7840         }
7841 
7842         /* clean up all cmds for this slot */
7843         if (rval && (abort_msg == MSG_ABORT)) {
7844                 /*
7845                  * mark all commands here as aborted
7846                  * abort msg has been accepted, now cleanup queues;
7847                  */
7848                 fas_mark_packets(fas, slot, CMD_ABORTED, STAT_ABORTED);
7849                 fas_flush_tagQ(fas, slot);
7850                 fas_flush_readyQ(fas, slot);
7851         }
7852         fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
7853 
7854 exit:
7855         if (fas->f_state == STATE_FREE) {
7856                 (void) fas_ustart(fas);
7857         }
7858 
7859         ASSERT(mutex_owned(FAS_MUTEX(fas)));
7860 
7861 #ifdef FASDEBUG
7862         if (rval && fas_test_stop) {
7863                 debug_enter("abort succeeded");
7864         }
7865 #endif
7866         return (rval);
7867 }
7868 
7869 /*
7870  * mark all packets with new reason and update statistics
7871  */
7872 static void
7873 fas_mark_packets(struct fas *fas, int slot, uchar_t reason, uint_t stat)
7874 {
7875         struct fas_cmd *sp = fas->f_readyf[slot];
7876 
7877         while (sp != 0) {
7878                 fas_set_pkt_reason(fas, sp, reason, STAT_ABORTED);
7879                 sp = sp->cmd_forw;
7880         }
7881         if (fas->f_tcmds[slot]) {
7882                 int n = 0;
7883                 ushort_t tag;
7884 
7885                 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
7886                         if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) {
7887                                 fas_set_pkt_reason(fas, sp, reason, stat);
7888                                 n++;
7889                         }
7890                 }
7891                 ASSERT(fas->f_tcmds[slot] == n);
7892         }
7893 }
7894 
7895 /*
7896  * set pkt_reason and OR in pkt_statistics flag
7897  */
7898 static void
7899 fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
7900     uint_t stat)
7901 {
7902         if (sp) {
7903                 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
7904                         sp->cmd_pkt->pkt_reason = reason;
7905                 }
7906                 sp->cmd_pkt->pkt_statistics |= stat;
7907                 IPRINTF3("sp=0x%p, pkt_reason=%x, pkt_stat=%x\n",
7908                     (void *)sp, reason, sp->cmd_pkt->pkt_statistics);
7909         }
7910 }
7911 
7912 /*
7913  * delete specified cmd from the ready queue
7914  */
7915 static int
7916 fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, int slot)
7917 {
7918         struct fas_cmd *ssp, *psp;
7919 
7920         /*
7921          * command has not been started yet and is still in the ready queue
7922          */
7923         if (sp) {
7924                 ASSERT(fas->f_ncmds > 0);
7925                 /*
7926                  * find packet on the ready queue and remove it
7927                  */
7928                 for (psp = NULL, ssp = fas->f_readyf[slot]; ssp != NULL;
7929                     psp = ssp, ssp = ssp->cmd_forw) {
7930                         if (ssp == sp) {
7931                                 if (fas->f_readyf[slot] == sp) {
7932                                         fas->f_readyf[slot] = sp->cmd_forw;
7933                                 } else {
7934                                         psp->cmd_forw = sp->cmd_forw;
7935                                 }
7936                                 if (fas->f_readyb[slot] == sp) {
7937                                         fas->f_readyb[slot] = psp;
7938                                 }
7939                                 return (TRUE);
7940                         }
7941                 }
7942         }
7943         return (FALSE);
7944 }
7945 
7946 /*
7947  * add cmd to to head of the readyQ
7948  * due to tag allocation failure or preemption we have to return
7949  * this cmd to the readyQ
7950  */
7951 static void
7952 fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp)
7953 {
7954         /*
7955          * never return a NOINTR pkt to the readyQ
7956          * (fas_runpoll will resubmit)
7957          */
7958         if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7959                 struct fas_cmd *dp;
7960                 int slot = sp->cmd_slot;
7961 
7962                 dp = fas->f_readyf[slot];
7963                 fas->f_readyf[slot] = sp;
7964                 sp->cmd_forw = dp;
7965                 if (fas->f_readyb[slot] == NULL) {
7966                         fas->f_readyb[slot] = sp;
7967                 }
7968         }
7969 }
7970 
7971 /*
7972  * flush cmds in ready queue
7973  */
7974 static void
7975 fas_flush_readyQ(struct fas *fas, int slot)
7976 {
7977         if (fas->f_readyf[slot]) {
7978                 struct fas_cmd *sp, *nsp;
7979 
7980                 IPRINTF1("flushing ready queue, slot=%x\n", slot);
7981                 ASSERT(fas->f_ncmds > 0);
7982 
7983                 sp = fas->f_readyf[slot];
7984                 fas->f_readyf[slot] = fas->f_readyb[slot] = NULL;
7985 
7986                 while (sp != 0) {
7987                         /*
7988                          * save the forward pointer before calling
7989                          * the completion routine
7990                          */
7991                         nsp = sp->cmd_forw;
7992                         ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
7993                         ASSERT(Tgt(sp) == slot/NLUNS_PER_TARGET);
7994                         fas_decrement_ncmds(fas, sp);
7995                         fas_call_pkt_comp(fas, sp);
7996                         sp = nsp;
7997                 }
7998                 fas_check_ncmds(fas);
7999         }
8000 }
8001 
8002 /*
8003  * cleanup the tag queue
8004  * preserve some order by starting with the oldest tag
8005  */
8006 static void
8007 fas_flush_tagQ(struct fas *fas, int slot)
8008 {
8009         ushort_t tag, starttag;
8010         struct fas_cmd *sp;
8011         struct f_slots *tagque = fas->f_active[slot];
8012 
8013         if (tagque == NULL) {
8014                 return;
8015         }
8016 
8017         DPRINTF2("flushing entire tag queue, slot=%x, tcmds=%x\n",
8018             slot, fas->f_tcmds[slot]);
8019 
8020 #ifdef FASDEBUG
8021         {
8022                 int n = 0;
8023                 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
8024                         if ((sp = tagque->f_slot[tag]) != 0) {
8025                                 n++;
8026                                 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8027                                 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
8028                                         if ((sp->cmd_flags & CFLAG_FINISHED) ==
8029                                             0) {
8030                                                 debug_enter("fas_flush_tagQ");
8031                                         }
8032                                 }
8033                         }
8034                 }
8035                 ASSERT(fas->f_tcmds[slot] == n);
8036         }
8037 #endif
8038         tag = starttag = fas->f_active[slot]->f_tags;
8039 
8040         do {
8041                 if ((sp = tagque->f_slot[tag]) != 0) {
8042                         fas_flush_cmd(fas, sp, 0, 0);
8043                 }
8044                 tag = ((ushort_t)(tag + 1)) %
8045                     (ushort_t)fas->f_active[slot]->f_n_slots;
8046         } while (tag != starttag);
8047 
8048         ASSERT(fas->f_tcmds[slot] == 0);
8049         EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8050         fas_check_ncmds(fas);
8051 }
8052 
8053 /*
8054  * cleanup one active command
8055  */
8056 static void
8057 fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
8058     uint_t stat)
8059 {
8060         short slot = sp->cmd_slot;
8061 
8062         ASSERT(fas->f_ncmds > 0);
8063         ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8064         ASSERT(sp == fas->f_active[slot]->f_slot[sp->cmd_tag[1]]);
8065 
8066         fas_remove_cmd(fas, sp, NEW_TIMEOUT);
8067         fas_decrement_ncmds(fas, sp);
8068         fas_set_pkt_reason(fas, sp, reason, stat);
8069         fas_call_pkt_comp(fas, sp);
8070 
8071         EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8072         fas_check_ncmds(fas);
8073 }
8074 
8075 /*
8076  * prepare a proxy cmd (a cmd sent on behalf of the target driver,
8077  * usually for error recovery or abort/reset)
8078  */
8079 static void
8080 fas_makeproxy_cmd(struct fas_cmd *sp, struct scsi_address *ap,
8081     struct scsi_pkt *pkt, int nmsgs, ...)
8082 {
8083         va_list vap;
8084         int i;
8085 
8086         ASSERT(nmsgs <= (CDB_GROUP5 - CDB_GROUP0 - 3));
8087 
8088         bzero(sp, sizeof (*sp));
8089         bzero(pkt, scsi_pkt_size());
8090 
8091         pkt->pkt_address     = *ap;
8092         pkt->pkt_cdbp                = (opaque_t)&sp->cmd_cdb[0];
8093         pkt->pkt_scbp                = (opaque_t)&sp->cmd_scb;
8094         pkt->pkt_ha_private  = (opaque_t)sp;
8095         sp->cmd_pkt          = pkt;
8096         sp->cmd_scblen               = 1;
8097         sp->cmd_pkt_flags    = pkt->pkt_flags = FLAG_NOINTR;
8098         sp->cmd_flags                = CFLAG_CMDPROXY;
8099         sp->cmd_cdb[FAS_PROXY_TYPE] = FAS_PROXY_SNDMSG;
8100         sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
8101         sp->cmd_cdb[FAS_PROXY_DATA] = (char)nmsgs;
8102 
8103         va_start(vap, nmsgs);
8104         for (i = 0; i < nmsgs; i++) {
8105                 sp->cmd_cdb[FAS_PROXY_DATA + 1 + i] = (uchar_t)va_arg(vap, int);
8106         }
8107         va_end(vap);
8108 }
8109 
8110 /*
8111  * send a proxy cmd and check the result
8112  */
8113 static int
8114 fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
8115     struct scsi_address *ap, char *what)
8116 {
8117         int rval;
8118 
8119         IPRINTF3("Sending proxy %s message to %d.%d\n", what,
8120             ap->a_target, ap->a_lun);
8121         if (fas_accept_pkt(fas, sp, TRAN_BUSY_OK) == TRAN_ACCEPT &&
8122             sp->cmd_pkt->pkt_reason == CMD_CMPLT &&
8123             sp->cmd_cdb[FAS_PROXY_RESULT] == TRUE) {
8124                 IPRINTF3("Proxy %s succeeded for %d.%d\n", what,
8125                     ap->a_target, ap->a_lun);
8126                 ASSERT(fas->f_current_sp != sp);
8127                 rval = TRUE;
8128         } else {
8129                 IPRINTF5(
8130                 "Proxy %s failed for %d.%d, result=%x, reason=%x\n", what,
8131                     ap->a_target, ap->a_lun, sp->cmd_cdb[FAS_PROXY_RESULT],
8132                     sp->cmd_pkt->pkt_reason);
8133                 ASSERT(fas->f_current_sp != sp);
8134                 rval = FALSE;
8135         }
8136         return (rval);
8137 }
8138 
8139 /*
8140  * abort a connected command by sending an abort msg; hold off on
8141  * starting new cmds by setting throttles to HOLD_THROTTLE
8142  */
8143 static int
8144 fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t msg)
8145 {
8146         int rval = FALSE;
8147         int flags = sp->cmd_pkt_flags;
8148 
8149         /*
8150          * if reset delay active we cannot  access the target.
8151          */
8152         if (fas->f_reset_delay[Tgt(sp)]) {
8153                 return (rval);
8154         }
8155 
8156         /*
8157          * only abort while in data phase; otherwise we mess up msg phase
8158          */
8159         if (!((fas->f_state == ACTS_DATA) ||
8160             (fas->f_state == ACTS_DATA_DONE))) {
8161                 return (rval);
8162         }
8163 
8164 
8165         IPRINTF3("Sending abort message %s to connected %d.%d\n",
8166             scsi_mname(msg), Tgt(sp), Lun(sp));
8167 
8168 
8169         fas->f_abort_msg_sent = 0;
8170         fas->f_omsglen = 1;
8171         fas->f_cur_msgout[0] = msg;
8172         sp->cmd_pkt_flags |= FLAG_NOINTR;
8173         fas_assert_atn(fas);
8174 
8175         (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8176 
8177         /*
8178          * now check if the msg was taken
8179          * e_abort is set in fas_handle_msg_out_done when the abort
8180          * msg has actually gone out (ie. msg out phase occurred
8181          */
8182         if (fas->f_abort_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8183                 IPRINTF2("target %d.%d aborted\n",
8184                     Tgt(sp), Lun(sp));
8185                 rval = TRUE;
8186         } else {
8187                 IPRINTF2("target %d.%d did not abort\n",
8188                     Tgt(sp), Lun(sp));
8189         }
8190         sp->cmd_pkt_flags = flags;
8191         fas->f_omsglen = 0;
8192         return (rval);
8193 }
8194 
8195 /*
8196  * abort a disconnected command; if it is a tagged command, we need
8197  * to include the tag
8198  */
8199 static int
8200 fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
8201     struct fas_cmd *sp, uchar_t msg, int slot)
8202 {
8203         auto struct fas_cmd     local;
8204         struct fas_cmd          *proxy_cmdp = &local;
8205         struct scsi_pkt         *pkt;
8206         int                     rval;
8207         int                     target = ap->a_target;
8208 
8209         /*
8210          * if reset delay is active, we cannot start a selection
8211          * and there shouldn't be a cmd outstanding
8212          */
8213         if (fas->f_reset_delay[target] != 0) {
8214                 return (FALSE);
8215         }
8216 
8217         if (sp)
8218                 ASSERT(sp->cmd_slot == slot);
8219 
8220         IPRINTF1("aborting disconnected tagged cmd(s) with %s\n",
8221             scsi_mname(msg));
8222         pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8223         if (sp && (TAGGED(target) && (msg == MSG_ABORT_TAG))) {
8224                 int tag = sp->cmd_tag[1];
8225                 ASSERT(sp == fas->f_active[slot]->f_slot[tag]);
8226                 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 3,
8227                     MSG_SIMPLE_QTAG, tag, msg);
8228         } else {
8229                 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 1, msg);
8230         }
8231 
8232         rval = fas_do_proxy_cmd(fas, proxy_cmdp, ap, scsi_mname(msg));
8233         kmem_free(pkt, scsi_pkt_size());
8234         return (rval);
8235 }
8236 
8237 /*
8238  * reset handling:
8239  * fas_do_scsi_reset assumes that we have already entered the mutex
8240  */
8241 static int
8242 fas_do_scsi_reset(struct scsi_address *ap, int level)
8243 {
8244         int rval = FALSE;
8245         struct fas *fas = ADDR2FAS(ap);
8246         short slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
8247 
8248         ASSERT(mutex_owned(FAS_MUTEX(fas)));
8249         IPRINTF3("fas_scsi_reset for slot %x, level=%x, tcmds=%x\n",
8250             slot, level, fas->f_tcmds[slot]);
8251 
8252         fas_move_waitQ_to_readyQ(fas);
8253 
8254         if (level == RESET_ALL) {
8255                 /*
8256                  * We know that fas_reset_bus() returns ACTION_RETURN.
8257                  */
8258                 (void) fas_reset_bus(fas);
8259 
8260                 /*
8261                  * Now call fas_dopoll() to field the reset interrupt
8262                  * which will then call fas_reset_recovery which will
8263                  * call the completion function for all commands.
8264                  */
8265                 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8266                         /*
8267                          * reset fas
8268                          */
8269                         fas_internal_reset(fas, FAS_RESET_FAS);
8270                         (void) fas_reset_bus(fas);
8271                         if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8272                                 fas_log(fas,
8273                                     CE_WARN, "reset scsi bus failed");
8274                                 New_state(fas, STATE_FREE);
8275                         } else {
8276                                 rval = TRUE;
8277                         }
8278                 } else {
8279                         rval = TRUE;
8280                 }
8281 
8282         } else {
8283                 struct fas_cmd *cur_sp = fas->f_current_sp;
8284                 void (*savec)() = NULL;
8285 
8286                 /*
8287                  * prevent new commands from starting
8288                  */
8289                 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
8290 
8291                 /*
8292                  * zero pkt_comp so it won't complete during the reset and
8293                  * we can still update the packet after the reset.
8294                  */
8295                 if (cur_sp) {
8296                         savec = cur_sp->cmd_pkt->pkt_comp;
8297                         cur_sp->cmd_pkt->pkt_comp = NULL;
8298                 }
8299 
8300                 /*
8301                  * is this a connected cmd but not selecting?
8302                  */
8303                 if (cur_sp && (fas->f_state != STATE_FREE) &&
8304                     (cur_sp->cmd_pkt->pkt_state != 0) &&
8305                     (ap->a_target == (Tgt(cur_sp)))) {
8306                         rval = fas_reset_connected_cmd(fas, ap);
8307                 }
8308 
8309                 /*
8310                  * if not connected or fas_reset_connected_cmd() failed,
8311                  * attempt a reset_disconnected_cmd
8312                  */
8313                 if (rval == FALSE) {
8314                         rval = fas_reset_disconnected_cmd(fas, ap);
8315                 }
8316 
8317                 /*
8318                  * cleanup if reset was successful
8319                  * complete the current sp first.
8320                  */
8321                 if (cur_sp) {
8322                         cur_sp->cmd_pkt->pkt_comp = savec;
8323                         if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
8324                                 if (ap->a_target == (Tgt(cur_sp))) {
8325                                         fas_set_pkt_reason(fas, cur_sp,
8326                                             CMD_RESET, STAT_DEV_RESET);
8327                                 }
8328                                 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
8329                                 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
8330                                 fas_decrement_ncmds(fas, cur_sp);
8331                                 fas_call_pkt_comp(fas, cur_sp);
8332                         }
8333                 }
8334 
8335                 if (rval == TRUE) {
8336                         fas_reset_cleanup(fas, slot);
8337                 } else {
8338                         IPRINTF1("fas_scsi_reset failed for slot %x\n", slot);
8339 
8340                         /*
8341                          * restore throttles to max throttle, regardless
8342                          * of what it was (fas_set_throttles() will deal
8343                          * with reset delay active)
8344                          * restoring to the old throttle is not
8345                          * a such a good idea
8346                          */
8347                         fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
8348 
8349                 }
8350 
8351                 if (fas->f_state == STATE_FREE) {
8352                         (void) fas_ustart(fas);
8353                 }
8354         }
8355 exit:
8356         ASSERT(mutex_owned(FAS_MUTEX(fas)));
8357         ASSERT(fas->f_ncmds >= fas->f_ndisc);
8358 
8359 #ifdef FASDEBUG
8360         if (rval && fas_test_stop) {
8361                 debug_enter("reset succeeded");
8362         }
8363 #endif
8364         return (rval);
8365 }
8366 
8367 /*
8368  * reset delay is  handled by a separate watchdog; this ensures that
8369  * regardless of fas_scsi_watchdog_tick, the reset delay will not change
8370  */
8371 static void
8372 fas_start_watch_reset_delay(struct fas *fas)
8373 {
8374         mutex_enter(&fas_global_mutex);
8375         if ((fas_reset_watch == 0) && FAS_CAN_SCHED) {
8376                 fas_reset_watch = timeout(fas_watch_reset_delay, NULL,
8377                     drv_usectohz((clock_t)FAS_WATCH_RESET_DELAY_TICK * 1000));
8378         }
8379         ASSERT((fas_reset_watch != 0) || (fas->f_flags & FAS_FLG_NOTIMEOUTS));
8380         mutex_exit(&fas_global_mutex);
8381 }
8382 
8383 /*
8384  * set throttles to HOLD and set reset_delay for all target/luns
8385  */
8386 static void
8387 fas_setup_reset_delay(struct fas *fas)
8388 {
8389         if (!ddi_in_panic()) {
8390                 int i;
8391 
8392                 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
8393                 for (i = 0; i < NTARGETS_WIDE; i++) {
8394                         fas->f_reset_delay[i] = fas->f_scsi_reset_delay;
8395                 }
8396                 fas_start_watch_reset_delay(fas);
8397         } else {
8398                 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8399         }
8400 }
8401 
8402 /*
8403  * fas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8404  * fas instance for active reset delays
8405  */
8406 /*ARGSUSED*/
8407 static void
8408 fas_watch_reset_delay(void *arg)
8409 {
8410         struct fas *fas;
8411         struct fas *lfas;       /* last not_done fas */
8412         int not_done = 0;
8413 
8414         mutex_enter(&fas_global_mutex);
8415         fas_reset_watch = 0;
8416         mutex_exit(&fas_global_mutex);
8417 
8418         rw_enter(&fas_global_rwlock, RW_READER);
8419         for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
8420                 if (fas->f_tran == 0) {
8421                         continue;
8422                 }
8423                 mutex_enter(FAS_MUTEX(fas));
8424                 not_done += fas_watch_reset_delay_subr(fas);
8425                 lfas = fas;
8426                 fas_check_waitQ_and_mutex_exit(fas);
8427         }
8428         rw_exit(&fas_global_rwlock);
8429         if (not_done) {
8430                 ASSERT(lfas != NULL);
8431                 fas_start_watch_reset_delay(lfas);
8432         }
8433 }
8434 
8435 static int
8436 fas_watch_reset_delay_subr(struct fas *fas)
8437 {
8438         short slot, s;
8439         int start_slot = -1;
8440         int done = 0;
8441 
8442         for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET)  {
8443 
8444                 /*
8445                  * check if a reset delay is active; if so back to full throttle
8446                  * which will unleash the cmds in the ready Q
8447                  */
8448                 s = slot/NLUNS_PER_TARGET;
8449                 if (fas->f_reset_delay[s] != 0) {
8450                         EPRINTF2("target%d: reset delay=%d\n", s,
8451                             fas->f_reset_delay[s]);
8452                         fas->f_reset_delay[s] -= FAS_WATCH_RESET_DELAY_TICK;
8453                         if (fas->f_reset_delay[s] <= 0) {
8454                                 /*
8455                                  * clear throttle for all luns on  this target
8456                                  */
8457                                 fas->f_reset_delay[s] = 0;
8458                                 fas_set_all_lun_throttles(fas,
8459                                     slot, MAX_THROTTLE);
8460                                 IPRINTF1("reset delay completed, slot=%x\n",
8461                                     slot);
8462                                 if (start_slot == -1) {
8463                                         start_slot = slot;
8464                                 }
8465                         } else {
8466                                 done = -1;
8467                         }
8468                 }
8469         }
8470 
8471         /*
8472          * start a cmd if a reset delay expired
8473          */
8474         if (start_slot != -1 && fas->f_state == STATE_FREE) {
8475                 (void) fas_ustart(fas);
8476         }
8477         return (done);
8478 }
8479 
8480 /*
8481  * cleanup after a device reset. this affects all target's luns
8482  */
8483 static void
8484 fas_reset_cleanup(struct fas *fas, int slot)
8485 {
8486         /*
8487          * reset msg has been accepted, now cleanup queues;
8488          * for all luns of this target
8489          */
8490         int i, start, end;
8491         int target  = slot/NLUNS_PER_TARGET;
8492 
8493         start = slot & ~(NLUNS_PER_TARGET-1);
8494         end = start + NLUNS_PER_TARGET;
8495         IPRINTF4("fas_reset_cleanup: slot %x, start=%x, end=%x, tcmds=%x\n",
8496             slot, start, end, fas->f_tcmds[slot]);
8497 
8498         ASSERT(!(fas->f_current_sp &&
8499             (fas->f_current_sp->cmd_slot == slot) &&
8500             (fas->f_state & STATE_SELECTING)));
8501 
8502         /*
8503          * if we are not in panic set up a reset delay for this target,
8504          * a zero throttle forces all new requests into the ready Q
8505          */
8506         if (!ddi_in_panic()) {
8507                 fas_set_all_lun_throttles(fas, start, HOLD_THROTTLE);
8508                 fas->f_reset_delay[target] = fas->f_scsi_reset_delay;
8509                 fas_start_watch_reset_delay(fas);
8510         } else {
8511                 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8512         }
8513 
8514         for (i = start; i < end; i++) {
8515                 fas_mark_packets(fas, i, CMD_RESET, STAT_DEV_RESET);
8516                 fas_flush_tagQ(fas, i);
8517                 fas_flush_readyQ(fas, i);
8518                 if (fas->f_arq_pkt[i]) {
8519                         struct fas_cmd *sp = fas->f_arq_pkt[i];
8520                         struct arq_private_data *arq_data =
8521                             (struct arq_private_data *)
8522                             (sp->cmd_pkt->pkt_private);
8523                         if (sp->cmd_pkt->pkt_comp) {
8524                                 ASSERT(arq_data->arq_save_sp == NULL);
8525                         }
8526                 }
8527                 ASSERT(fas->f_tcmds[i] == 0);
8528         }
8529         ASSERT(fas->f_ncmds >= fas->f_ndisc);
8530 
8531         fas_force_renegotiation(fas, target);
8532 }
8533 
8534 /*
8535  * reset a currently disconnected target
8536  */
8537 static int
8538 fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap)
8539 {
8540         auto struct fas_cmd     local;
8541         struct fas_cmd          *sp = &local;
8542         struct scsi_pkt         *pkt;
8543         int                     rval;
8544 
8545         pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8546         fas_makeproxy_cmd(sp, ap, pkt, 1, MSG_DEVICE_RESET);
8547         rval = fas_do_proxy_cmd(fas, sp, ap, scsi_mname(MSG_DEVICE_RESET));
8548         kmem_free(pkt, scsi_pkt_size());
8549         return (rval);
8550 }
8551 
8552 /*
8553  * reset a target with a currently connected command
8554  * Assert ATN and send MSG_DEVICE_RESET, zero throttles temporarily
8555  * to prevent new cmds from starting regardless of the outcome
8556  */
8557 static int
8558 fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap)
8559 {
8560         int rval = FALSE;
8561         struct fas_cmd *sp = fas->f_current_sp;
8562         int flags = sp->cmd_pkt_flags;
8563 
8564         /*
8565          * only attempt to reset in data phase; during other phases
8566          * asserting ATN may just cause confusion
8567          */
8568         if (!((fas->f_state == ACTS_DATA) ||
8569             (fas->f_state == ACTS_DATA_DONE))) {
8570                 return (rval);
8571         }
8572 
8573         IPRINTF2("Sending reset message to connected %d.%d\n",
8574             ap->a_target, ap->a_lun);
8575         fas->f_reset_msg_sent = 0;
8576         fas->f_omsglen = 1;
8577         fas->f_cur_msgout[0] = MSG_DEVICE_RESET;
8578         sp->cmd_pkt_flags |= FLAG_NOINTR;
8579 
8580         fas_assert_atn(fas);
8581 
8582         /*
8583          * poll for interrupts until bus free
8584          */
8585         (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8586 
8587         /*
8588          * now check if the msg was taken
8589          * f_reset is set in fas_handle_msg_out_done when
8590          * msg has actually gone out  (ie. msg out phase occurred)
8591          */
8592         if (fas->f_reset_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8593                 IPRINTF2("target %d.%d reset\n", ap->a_target, ap->a_lun);
8594                 rval = TRUE;
8595         } else {
8596                 IPRINTF2("target %d.%d did not reset\n",
8597                     ap->a_target, ap->a_lun);
8598         }
8599         sp->cmd_pkt_flags = flags;
8600         fas->f_omsglen = 0;
8601 
8602         return (rval);
8603 }
8604 
8605 /*
8606  * reset the scsi bus to blow all commands away
8607  */
8608 static int
8609 fas_reset_bus(struct fas *fas)
8610 {
8611         IPRINTF("fas_reset_bus:\n");
8612         New_state(fas, ACTS_RESET);
8613 
8614         fas_internal_reset(fas, FAS_RESET_SCSIBUS);
8615 
8616         /*
8617          * Now that we've reset the SCSI bus, we'll take a SCSI RESET
8618          * interrupt and use that to clean up the state of things.
8619          */
8620         return (ACTION_RETURN);
8621 }
8622 
8623 /*
8624  * fas_reset_recovery is called on the reset interrupt and cleans
8625  * up all cmds (active or waiting)
8626  */
8627 static int
8628 fas_reset_recovery(struct fas *fas)
8629 {
8630         short slot, start_slot;
8631         int i;
8632         int rval = ACTION_SEARCH;
8633         int max_loop = 0;
8634 
8635         IPRINTF("fas_reset_recovery:\n");
8636         fas_check_ncmds(fas);
8637 
8638         /*
8639          * renegotiate wide and sync for all targets
8640          */
8641         fas->f_sync_known = fas->f_wide_known = 0;
8642 
8643         /*
8644          * reset dma engine
8645          */
8646         FAS_FLUSH_DMA_HARD(fas);
8647 
8648         /*
8649          * set throttles and reset delay
8650          */
8651         fas_setup_reset_delay(fas);
8652 
8653         /*
8654          * clear interrupts until they go away
8655          */
8656         while (INTPENDING(fas) && (max_loop < FAS_RESET_SPIN_MAX_LOOP)) {
8657                 volatile struct fasreg *fasreg = fas->f_reg;
8658                 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
8659                 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
8660                 fas->f_step = fas_reg_read(fas, &fasreg->fas_step);
8661                 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
8662                 drv_usecwait(FAS_RESET_SPIN_DELAY_USEC);
8663                 max_loop++;
8664         }
8665 
8666         if (max_loop >= FAS_RESET_SPIN_MAX_LOOP) {
8667                 fas_log(fas, CE_WARN, "Resetting SCSI bus failed");
8668         }
8669 
8670         fas_reg_cmd_write(fas, CMD_FLUSH);
8671 
8672         /*
8673          * reset the chip, this shouldn't be necessary but sometimes
8674          * we get a hang in the next data in phase
8675          */
8676         fas_internal_reset(fas, FAS_RESET_FAS);
8677 
8678         /*
8679          * reset was expected? if not, it must be external bus reset
8680          */
8681         if (fas->f_state != ACTS_RESET) {
8682                 if (fas->f_ncmds) {
8683                         fas_log(fas, CE_WARN, "external SCSI bus reset");
8684                 }
8685         }
8686 
8687         if (fas->f_ncmds == 0) {
8688                 rval = ACTION_RETURN;
8689                 goto done;
8690         }
8691 
8692         /*
8693          * completely reset the state of the softc data.
8694          */
8695         fas_internal_reset(fas, FAS_RESET_SOFTC);
8696 
8697         /*
8698          * Hold the state of the host adapter open
8699          */
8700         New_state(fas, ACTS_FROZEN);
8701 
8702         /*
8703          * for right now just claim that all
8704          * commands have been destroyed by a SCSI reset
8705          * and let already set reason fields or callers
8706          * decide otherwise for specific commands.
8707          */
8708         start_slot = fas->f_next_slot;
8709         slot = start_slot;
8710         do {
8711                 fas_check_ncmds(fas);
8712                 fas_mark_packets(fas, slot, CMD_RESET, STAT_BUS_RESET);
8713                 fas_flush_tagQ(fas, slot);
8714                 fas_flush_readyQ(fas, slot);
8715                 if (fas->f_arq_pkt[slot]) {
8716                         struct fas_cmd *sp = fas->f_arq_pkt[slot];
8717                         struct arq_private_data *arq_data =
8718                             (struct arq_private_data *)
8719                             (sp->cmd_pkt->pkt_private);
8720                         if (sp->cmd_pkt->pkt_comp) {
8721                                 ASSERT(arq_data->arq_save_sp == NULL);
8722                         }
8723                 }
8724                 slot = NEXTSLOT(slot, fas->f_dslot);
8725         } while (slot != start_slot);
8726 
8727         fas_check_ncmds(fas);
8728 
8729         /*
8730          * reset timeouts
8731          */
8732         for (i = 0; i < N_SLOTS; i++) {
8733                 if (fas->f_active[i]) {
8734                         fas->f_active[i]->f_timebase = 0;
8735                         fas->f_active[i]->f_timeout = 0;
8736                         fas->f_active[i]->f_dups = 0;
8737                 }
8738         }
8739 
8740 done:
8741         /*
8742          * Move the state back to free...
8743          */
8744         New_state(fas, STATE_FREE);
8745         ASSERT(fas->f_ncmds >= fas->f_ndisc);
8746 
8747         /*
8748          * perform the reset notification callbacks that are registered.
8749          */
8750         (void) scsi_hba_reset_notify_callback(&fas->f_mutex,
8751             &fas->f_reset_notify_listf);
8752 
8753         /*
8754          * if reset delay is still active a search is meaningless
8755          * but do it anyway
8756          */
8757         return (rval);
8758 }
8759 
8760 /*
8761  * hba_tran ops for quiesce and unquiesce
8762  */
8763 static int
8764 fas_scsi_quiesce(dev_info_t *dip)
8765 {
8766         struct fas *fas;
8767         scsi_hba_tran_t *tran;
8768 
8769         tran = ddi_get_driver_private(dip);
8770         if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8771                 return (-1);
8772         }
8773 
8774         return (fas_quiesce_bus(fas));
8775 }
8776 
8777 static int
8778 fas_scsi_unquiesce(dev_info_t *dip)
8779 {
8780         struct fas *fas;
8781         scsi_hba_tran_t *tran;
8782 
8783         tran = ddi_get_driver_private(dip);
8784         if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8785                 return (-1);
8786         }
8787 
8788         return (fas_unquiesce_bus(fas));
8789 }
8790 
8791 #ifdef FAS_TEST
8792 /*
8793  * torture test functions
8794  */
8795 static void
8796 fas_test_reset(struct fas *fas, int slot)
8797 {
8798         struct scsi_address ap;
8799         char target = slot/NLUNS_PER_TARGET;
8800 
8801         if (fas_rtest & (1 << target)) {
8802                 ap.a_hba_tran = fas->f_tran;
8803                 ap.a_target = target;
8804                 ap.a_lun = 0;
8805                 if ((fas_rtest_type == 1) &&
8806                     (fas->f_state == ACTS_DATA_DONE)) {
8807                         if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8808                                 fas_rtest = 0;
8809                         }
8810                 } else if ((fas_rtest_type == 2) &&
8811                     (fas->f_state == ACTS_DATA_DONE)) {
8812                         if (fas_do_scsi_reset(&ap, RESET_ALL)) {
8813                                 fas_rtest = 0;
8814                         }
8815                 } else {
8816                         if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8817                                 fas_rtest = 0;
8818                         }
8819                 }
8820         }
8821 }
8822 
8823 static void
8824 fas_test_abort(struct fas *fas, int slot)
8825 {
8826         struct fas_cmd *sp = fas->f_current_sp;
8827         struct scsi_address ap;
8828         char target = slot/NLUNS_PER_TARGET;
8829         struct scsi_pkt *pkt = NULL;
8830 
8831         if (fas_atest & (1 << target)) {
8832                 ap.a_hba_tran = fas->f_tran;
8833                 ap.a_target = target;
8834                 ap.a_lun = 0;
8835 
8836                 if ((fas_atest_disc == 0) && sp &&
8837                     (sp->cmd_slot == slot) &&
8838                     ((sp->cmd_flags & CFLAG_CMDDISC) == 0)) {
8839                         pkt = sp->cmd_pkt;
8840                 } else if ((fas_atest_disc == 1) && NOTAG(target)) {
8841                         sp = fas->f_active[slot]->f_slot[0];
8842                         if (sp && (sp->cmd_flags & CFLAG_CMDDISC)) {
8843                                 pkt = sp->cmd_pkt;
8844                         }
8845                 } else if ((fas_atest_disc == 1) && (sp == 0) &&
8846                     TAGGED(target) &&
8847                     (fas->f_tcmds[slot] != 0)) {
8848                         int tag;
8849                         /*
8850                          * find the oldest tag
8851                          */
8852                         for (tag = NTAGS-1; tag >= 0; tag--) {
8853                                 if ((sp = fas->f_active[slot]->f_slot[tag])
8854                                     != 0)
8855                                 break;
8856                         }
8857                         if (sp) {
8858                                 pkt = sp->cmd_pkt;
8859                                 ASSERT(sp->cmd_slot == slot);
8860                         } else {
8861                                 return;
8862                         }
8863                 } else if (fas_atest_disc == 2 && (sp == 0) &&
8864                     (fas->f_tcmds[slot] != 0)) {
8865                         pkt = NULL;
8866                 } else if (fas_atest_disc == 2 && NOTAG(target)) {
8867                         pkt = NULL;
8868                 } else if (fas_atest_disc == 3 && fas->f_readyf[slot]) {
8869                         pkt = fas->f_readyf[slot]->cmd_pkt;
8870                 } else if (fas_atest_disc == 4 &&
8871                     fas->f_readyf[slot] && fas->f_readyf[slot]->cmd_forw) {
8872                         pkt = fas->f_readyf[slot]->cmd_forw->cmd_pkt;
8873                 } else if (fas_atest_disc == 5 && fas->f_readyb[slot]) {
8874                         pkt = fas->f_readyb[slot]->cmd_pkt;
8875                 } else if ((fas_atest_disc == 6) && sp &&
8876                     (sp->cmd_slot == slot) &&
8877                     (fas->f_state == ACTS_DATA_DONE)) {
8878                         pkt = sp->cmd_pkt;
8879                 } else if (fas_atest_disc == 7) {
8880                         if (fas_do_scsi_abort(&ap, NULL)) {
8881                                 if (fas_do_scsi_abort(&ap, NULL)) {
8882                                         if (fas_do_scsi_reset(&ap,
8883                                             RESET_TARGET)) {
8884                                                 fas_atest = 0;
8885                                         }
8886                                 }
8887                         }
8888                         return;
8889                 } else {
8890                         return;
8891                 }
8892 
8893                 fas_log(fas, CE_NOTE, "aborting pkt=0x%p state=%x\n",
8894                     (void *)pkt, (pkt != NULL? pkt->pkt_state : 0));
8895                 if (fas_do_scsi_abort(&ap, pkt)) {
8896                         fas_atest = 0;
8897                 }
8898         }
8899 }
8900 #endif /* FAS_TEST */
8901 
8902 /*
8903  * capability interface
8904  */
8905 static int
8906 fas_commoncap(struct scsi_address *ap, char *cap, int val,
8907     int tgtonly, int doset)
8908 {
8909         struct fas *fas = ADDR2FAS(ap);
8910         int cidx;
8911         int target = ap->a_target;
8912         ushort_t tshift = (1<<target);
8913         ushort_t ntshift = ~tshift;
8914         int rval = FALSE;
8915 
8916         mutex_enter(FAS_MUTEX(fas));
8917 
8918         if (cap == (char *)0) {
8919                 goto exit;
8920         }
8921 
8922         cidx = scsi_hba_lookup_capstr(cap);
8923         if (cidx == -1) {
8924                 rval = UNDEFINED;
8925         } else if (doset) {
8926                 /*
8927                  * we usually don't allow setting capabilities for
8928                  * other targets!
8929                  */
8930                 if (!tgtonly) {
8931                         goto exit;
8932                 }
8933                 switch (cidx) {
8934                 case SCSI_CAP_DMA_MAX:
8935                 case SCSI_CAP_MSG_OUT:
8936                 case SCSI_CAP_PARITY:
8937                 case SCSI_CAP_INITIATOR_ID:
8938                 case SCSI_CAP_LINKED_CMDS:
8939                 case SCSI_CAP_UNTAGGED_QING:
8940                 case SCSI_CAP_RESET_NOTIFICATION:
8941                         /*
8942                          * None of these are settable via
8943                          * the capability interface.
8944                          */
8945                         break;
8946 
8947                 case SCSI_CAP_DISCONNECT:
8948                         if (val)
8949                                 fas->f_target_scsi_options[ap->a_target] |=
8950                                     SCSI_OPTIONS_DR;
8951                         else
8952                                 fas->f_target_scsi_options[ap->a_target] &=
8953                                     ~SCSI_OPTIONS_DR;
8954 
8955                         break;
8956 
8957                 case SCSI_CAP_SYNCHRONOUS:
8958                         if (val) {
8959                                 fas->f_force_async &= ~tshift;
8960                         } else {
8961                                 fas->f_force_async |= tshift;
8962                         }
8963                         fas_force_renegotiation(fas, target);
8964                         rval = TRUE;
8965                         break;
8966 
8967                 case SCSI_CAP_TAGGED_QING:
8968                 {
8969                         int slot = target * NLUNS_PER_TARGET | ap->a_lun;
8970                         ushort_t old_notag = fas->f_notag;
8971 
8972                         /* do not allow with active tgt */
8973                         if (fas->f_tcmds[slot]) {
8974                                 break;
8975                         }
8976 
8977                         slot =  target * NLUNS_PER_TARGET | ap->a_lun;
8978 
8979                         if (val) {
8980                                 if (fas->f_target_scsi_options[target] &
8981                                     SCSI_OPTIONS_TAG) {
8982                                         IPRINTF1("target %d: TQ enabled\n",
8983                                             target);
8984                                         fas->f_notag &= ntshift;
8985                                 } else {
8986                                         break;
8987                                 }
8988                         } else {
8989                                 IPRINTF1("target %d: TQ disabled\n",
8990                                     target);
8991                                 fas->f_notag |= tshift;
8992                         }
8993 
8994                         if (val && fas_alloc_active_slots(fas, slot,
8995                             KM_NOSLEEP)) {
8996                                 fas->f_notag = old_notag;
8997                                 break;
8998                         }
8999 
9000                         fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
9001 
9002                         fas_update_props(fas, target);
9003                         rval = TRUE;
9004                         break;
9005                 }
9006 
9007                 case SCSI_CAP_WIDE_XFER:
9008                         if (val) {
9009                                 if (fas->f_target_scsi_options[target] &
9010                                     SCSI_OPTIONS_WIDE) {
9011                                         fas->f_nowide &= ntshift;
9012                                         fas->f_force_narrow &= ~tshift;
9013                                 } else {
9014                                         break;
9015                                 }
9016                         } else {
9017                                 fas->f_force_narrow |= tshift;
9018                         }
9019                         fas_force_renegotiation(fas, target);
9020                         rval = TRUE;
9021                         break;
9022 
9023                 case SCSI_CAP_ARQ:
9024                         if (val) {
9025                                 if (fas_create_arq_pkt(fas, ap)) {
9026                                         break;
9027                                 }
9028                         } else {
9029                                 if (fas_delete_arq_pkt(fas, ap)) {
9030                                         break;
9031                                 }
9032                         }
9033                         rval = TRUE;
9034                         break;
9035 
9036                 case SCSI_CAP_QFULL_RETRIES:
9037                         fas->f_qfull_retries[target] = (uchar_t)val;
9038                         rval = TRUE;
9039                         break;
9040 
9041                 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9042                         fas->f_qfull_retry_interval[target] =
9043                             drv_usectohz(val * 1000);
9044                         rval = TRUE;
9045                         break;
9046 
9047                 default:
9048                         rval = UNDEFINED;
9049                         break;
9050                 }
9051 
9052         } else if (doset == 0) {
9053                 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
9054 
9055                 switch (cidx) {
9056                 case SCSI_CAP_DMA_MAX:
9057                         /* very high limit because of multiple dma windows */
9058                         rval = 1<<30;
9059                         break;
9060                 case SCSI_CAP_MSG_OUT:
9061                         rval = TRUE;
9062                         break;
9063                 case SCSI_CAP_DISCONNECT:
9064                         if (tgtonly &&
9065                             (fas->f_target_scsi_options[target] &
9066                             SCSI_OPTIONS_DR)) {
9067                                 rval = TRUE;
9068                         }
9069                         break;
9070                 case SCSI_CAP_SYNCHRONOUS:
9071                         if (tgtonly && fas->f_offset[target]) {
9072                                 rval = TRUE;
9073                         }
9074                         break;
9075                 case SCSI_CAP_PARITY:
9076                         rval = TRUE;
9077                         break;
9078                 case SCSI_CAP_INITIATOR_ID:
9079                         rval = MY_ID(fas);
9080                         break;
9081                 case SCSI_CAP_TAGGED_QING:
9082                         if (tgtonly && ((fas->f_notag & tshift) == 0)) {
9083                                 rval = TRUE;
9084                         }
9085                         break;
9086                 case SCSI_CAP_WIDE_XFER:
9087                         if ((tgtonly && (fas->f_nowide & tshift) == 0)) {
9088                                 rval = TRUE;
9089                         }
9090                         break;
9091                 case SCSI_CAP_UNTAGGED_QING:
9092                         rval = TRUE;
9093                         break;
9094                 case SCSI_CAP_ARQ:
9095                         if (tgtonly && fas->f_arq_pkt[slot]) {
9096                                 rval = TRUE;
9097                         }
9098                         break;
9099                 case SCSI_CAP_LINKED_CMDS:
9100                         break;
9101                 case SCSI_CAP_RESET_NOTIFICATION:
9102                         rval = TRUE;
9103                         break;
9104                 case SCSI_CAP_QFULL_RETRIES:
9105                         rval = fas->f_qfull_retries[target];
9106                         break;
9107                 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9108                         rval = drv_hztousec(
9109                             fas->f_qfull_retry_interval[target]) /
9110                             1000;
9111                         break;
9112 
9113                 default:
9114                         rval = UNDEFINED;
9115                         break;
9116                 }
9117         }
9118 exit:
9119         if (val && tgtonly) {
9120                 fas_update_props(fas, target);
9121         }
9122         fas_check_waitQ_and_mutex_exit(fas);
9123 
9124         if (doset) {
9125                 IPRINTF6(
9126             "fas_commoncap:tgt=%x,cap=%s,tgtonly=%x,doset=%x,val=%x,rval=%x\n",
9127                     target, cap, tgtonly, doset, val, rval);
9128         }
9129         return (rval);
9130 }
9131 
9132 /*
9133  * property management
9134  * fas_update_props:
9135  * create/update sync/wide/TQ/scsi-options properties for this target
9136  */
9137 static void
9138 fas_update_props(struct fas *fas, int tgt)
9139 {
9140         char    property[32];
9141         uint_t  xfer_speed = 0;
9142         uint_t  xfer_rate = 0;
9143         int     wide_enabled, tq_enabled;
9144         uint_t  regval = fas->f_sync_period[tgt];
9145         int     offset = fas->f_offset[tgt];
9146 
9147         wide_enabled = ((fas->f_nowide & (1<<tgt)) == 0);
9148         if (offset && regval) {
9149                 xfer_speed =
9150                     FAS_SYNC_KBPS((regval * fas->f_clock_cycle) / 1000);
9151                 xfer_rate = ((wide_enabled)? 2 : 1) * xfer_speed;
9152         }
9153         (void) sprintf(property, "target%x-sync-speed", tgt);
9154         fas_update_this_prop(fas, property, xfer_rate);
9155 
9156         (void) sprintf(property, "target%x-wide", tgt);
9157         fas_update_this_prop(fas, property, wide_enabled);
9158 
9159         (void) sprintf(property, "target%x-TQ", tgt);
9160         tq_enabled = ((fas->f_notag & (1<<tgt))? 0 : 1);
9161         fas_update_this_prop(fas, property, tq_enabled);
9162 
9163 }
9164 
9165 static void
9166 fas_update_this_prop(struct fas *fas, char *property, int value)
9167 {
9168         dev_info_t *dip = fas->f_dev;
9169 
9170         IPRINTF2("update prop: %s value=%x\n", property, value);
9171         ASSERT(mutex_owned(FAS_MUTEX(fas)));
9172         /*
9173          * We cannot hold any mutex at this point because the call to
9174          * ddi_prop_update_int() may block.
9175          */
9176         mutex_exit(FAS_MUTEX(fas));
9177         if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
9178             property, value) != DDI_PROP_SUCCESS)       {
9179                 IPRINTF1("cannot modify/create %s property\n",  property);
9180         }
9181         mutex_enter(FAS_MUTEX(fas));
9182 }
9183 
9184 /*
9185  * allocate active slots array, size is dependent on whether tagQ enabled
9186  */
9187 static int
9188 fas_alloc_active_slots(struct fas *fas, int slot, int flag)
9189 {
9190         int target = slot / NLUNS_PER_TARGET;
9191         struct f_slots *old_active = fas->f_active[slot];
9192         struct f_slots *new_active;
9193         ushort_t size;
9194         int rval = -1;
9195 
9196         if (fas->f_tcmds[slot]) {
9197                 IPRINTF("cannot change size of active slots array\n");
9198                 return (rval);
9199         }
9200 
9201         size = ((NOTAG(target)) ? FAS_F_SLOT_SIZE : FAS_F_SLOTS_SIZE_TQ);
9202         EPRINTF4(
9203         "fas_alloc_active_slots: target=%x size=%x, old=0x%p, oldsize=%x\n",
9204             target, size, (void *)old_active,
9205             ((old_active == NULL) ? -1 : old_active->f_size));
9206 
9207         new_active = kmem_zalloc(size, flag);
9208         if (new_active == NULL) {
9209                 IPRINTF("new active alloc failed\n");
9210         } else {
9211                 fas->f_active[slot] = new_active;
9212                 fas->f_active[slot]->f_n_slots = (NOTAG(target) ? 1 : NTAGS);
9213                 fas->f_active[slot]->f_size = size;
9214                 /*
9215                  * reserve tag 0 for non-tagged cmds to tagged targets
9216                  */
9217                 if (TAGGED(target)) {
9218                         fas->f_active[slot]->f_tags = 1;
9219                 }
9220                 if (old_active) {
9221                         kmem_free((caddr_t)old_active, old_active->f_size);
9222                 }
9223                 rval = 0;
9224         }
9225         return (rval);
9226 }
9227 
9228 /*
9229  * Error logging, printing, and debug print routines
9230  */
9231 static char *fas_label = "fas";
9232 
9233 /*PRINTFLIKE3*/
9234 static void
9235 fas_log(struct fas *fas, int level, const char *fmt, ...)
9236 {
9237         dev_info_t *dev;
9238         va_list ap;
9239 
9240         if (fas) {
9241                 dev = fas->f_dev;
9242         } else {
9243                 dev = 0;
9244         }
9245 
9246         mutex_enter(&fas_log_mutex);
9247 
9248         va_start(ap, fmt);
9249         (void) vsprintf(fas_log_buf, fmt, ap);
9250         va_end(ap);
9251 
9252         if (level == CE_CONT) {
9253                 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9254         } else {
9255                 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9256         }
9257 
9258         mutex_exit(&fas_log_mutex);
9259 }
9260 
9261 /*PRINTFLIKE2*/
9262 static void
9263 fas_printf(struct fas *fas, const char *fmt, ...)
9264 {
9265         dev_info_t *dev = 0;
9266         va_list ap;
9267         int level = CE_CONT;
9268 
9269         mutex_enter(&fas_log_mutex);
9270 
9271         va_start(ap, fmt);
9272         (void) vsprintf(fas_log_buf, fmt, ap);
9273         va_end(ap);
9274 
9275         if (fas) {
9276                 dev = fas->f_dev;
9277                 level = CE_NOTE;
9278                 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9279         } else {
9280                 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9281         }
9282 
9283         mutex_exit(&fas_log_mutex);
9284 }
9285 
9286 #ifdef FASDEBUG
9287 /*PRINTFLIKE2*/
9288 void
9289 fas_dprintf(struct fas *fas, const char *fmt, ...)
9290 {
9291         dev_info_t *dev = 0;
9292         va_list ap;
9293 
9294         if (fas) {
9295                 dev = fas->f_dev;
9296         }
9297 
9298         mutex_enter(&fas_log_mutex);
9299 
9300         va_start(ap, fmt);
9301         (void) vsprintf(fas_log_buf, fmt, ap);
9302         va_end(ap);
9303 
9304         scsi_log(dev, fas_label, SCSI_DEBUG, "%s", fas_log_buf);
9305 
9306         mutex_exit(&fas_log_mutex);
9307 }
9308 #endif
9309 
9310 
9311 static void
9312 fas_printstate(struct fas *fas, char *msg)
9313 {
9314         volatile struct fasreg *fasreg = fas->f_reg;
9315         volatile struct dma *dmar = fas->f_dma;
9316         uint_t csr = fas_dma_reg_read(fas, &dmar->dma_csr);
9317         uint_t count = fas_dma_reg_read(fas, &dmar->dma_count);
9318         uint_t addr = fas_dma_reg_read(fas, &dmar->dma_addr);
9319         uint_t test = fas_dma_reg_read(fas, &dmar->dma_test);
9320         uint_t fas_cnt;
9321 
9322         fas_log(fas, CE_WARN, "%s: current fas state:", msg);
9323         fas_printf(NULL, "Latched stat=0x%b intr=0x%b",
9324             fas->f_stat, FAS_STAT_BITS, fas->f_intr, FAS_INT_BITS);
9325         fas_printf(NULL, "last msgout: %s, last msgin: %s",
9326             scsi_mname(fas->f_last_msgout), scsi_mname(fas->f_last_msgin));
9327         fas_printf(NULL, "DMA csr=0x%b", csr, dma_bits);
9328         fas_printf(NULL,
9329             "addr=%x dmacnt=%x test=%x last=%x last_cnt=%x",
9330             addr, count, test, fas->f_lastdma, fas->f_lastcount);
9331 
9332         GET_FAS_COUNT(fasreg, fas_cnt);
9333         fas_printf(NULL, "fas state:");
9334         fas_printf(NULL, "\tcount(32)=%x cmd=%x stat=%x stat2=%x intr=%x",
9335             fas_cnt, fasreg->fas_cmd, fasreg->fas_stat, fasreg->fas_stat2,
9336             fasreg->fas_intr);
9337         fas_printf(NULL,
9338         "\tstep=%x fifoflag=%x conf=%x test=%x conf2=%x conf3=%x",
9339             fasreg->fas_step, fasreg->fas_fifo_flag, fasreg->fas_conf,
9340             fasreg->fas_test, fasreg->fas_conf2, fasreg->fas_conf3);
9341 
9342         if (fas->f_current_sp) {
9343                 fas_dump_cmd(fas, fas->f_current_sp);
9344         }
9345 }
9346 
9347 /*
9348  * dump all we know about a cmd
9349  */
9350 static void
9351 fas_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9352 {
9353         int i;
9354         uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9355         auto char buf[128];
9356 
9357         buf[0] = '\0';
9358         fas_printf(NULL, "Cmd dump for Target %d Lun %d:",
9359             Tgt(sp), Lun(sp));
9360         (void) sprintf(&buf[0], " cdb=[");
9361         for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9362                 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9363         }
9364         (void) sprintf(&buf[strlen(buf)], " ]");
9365         fas_printf(NULL, buf);
9366         fas_printf(NULL, "State=%s Last State=%s",
9367             fas_state_name(fas->f_state), fas_state_name(fas->f_laststate));
9368         fas_printf(NULL,
9369             "pkt_state=0x%b pkt_flags=0x%x pkt_statistics=0x%x",
9370             sp->cmd_pkt->pkt_state, scsi_state_bits, sp->cmd_pkt_flags,
9371             sp->cmd_pkt->pkt_statistics);
9372         if (sp->cmd_pkt->pkt_state & STATE_GOT_STATUS) {
9373                 fas_printf(NULL, "Status=0x%x\n", sp->cmd_pkt->pkt_scbp[0]);
9374         }
9375 }
9376 
9377 /*ARGSUSED*/
9378 static void
9379 fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9380 {
9381         int i;
9382         uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9383         auto char buf[128];
9384 
9385         buf[0] = '\0';
9386         (void) sprintf(&buf[0], "?%d.%d: cdb=[", Tgt(sp), Lun(sp));
9387         for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9388                 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9389         }
9390         (void) sprintf(&buf[strlen(buf)], " ]");
9391         fas_printf(NULL, buf);
9392 }
9393 
9394 /*
9395  * state decoding for error messages
9396  */
9397 static char *
9398 fas_state_name(ushort_t state)
9399 {
9400         if (state == STATE_FREE) {
9401                 return ("FREE");
9402         } else if (state & STATE_SELECTING) {
9403                 if (state == STATE_SELECT_NORMAL)
9404                         return ("SELECT");
9405                 else if (state == STATE_SELECT_N_STOP)
9406                         return ("SEL&STOP");
9407                 else if (state == STATE_SELECT_N_SENDMSG)
9408                         return ("SELECT_SNDMSG");
9409                 else
9410                         return ("SEL_NO_ATN");
9411         } else {
9412                 static struct {
9413                         char *sname;
9414                         char state;
9415                 } names[] = {
9416                         "CMD_START",            ACTS_CMD_START,
9417                         "CMD_DONE",             ACTS_CMD_DONE,
9418                         "MSG_OUT",              ACTS_MSG_OUT,
9419                         "MSG_OUT_DONE",         ACTS_MSG_OUT_DONE,
9420                         "MSG_IN",               ACTS_MSG_IN,
9421                         "MSG_IN_MORE",          ACTS_MSG_IN_MORE,
9422                         "MSG_IN_DONE",          ACTS_MSG_IN_DONE,
9423                         "CLEARING",             ACTS_CLEARING,
9424                         "DATA",                 ACTS_DATA,
9425                         "DATA_DONE",            ACTS_DATA_DONE,
9426                         "CMD_CMPLT",            ACTS_C_CMPLT,
9427                         "UNKNOWN",              ACTS_UNKNOWN,
9428                         "RESEL",                ACTS_RESEL,
9429                         "ENDVEC",               ACTS_ENDVEC,
9430                         "RESET",                ACTS_RESET,
9431                         "ABORTING",             ACTS_ABORTING,
9432                         "FROZEN",               ACTS_FROZEN,
9433                         0
9434                 };
9435                 int i;
9436                 for (i = 0; names[i].sname; i++) {
9437                         if (names[i].state == state)
9438                                 return (names[i].sname);
9439                 }
9440         }
9441         return ("<BAD>");
9442 }