Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun/io/scsi/adapters/fas.c
+++ new/usr/src/uts/sun/io/scsi/adapters/fas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 25 */
26 26
27 27
28 28 /*
29 29 * ISSUES
30 30 *
31 31 * - more consistent error messages
32 32 * - report name of device on errors?
33 33 * - if wide target renegotiates sync, back to narrow?
34 34 * - last_msgout is not accurate ????
35 35 * - resolve XXXX
36 36 * - improve msg reject code (use special msg reject handler)
37 37 * - better use of IDE message
38 38 * - keep track if ATN remains asserted and target not going into
39 39 * a msg-out phase
40 40 * - improve comments
41 41 * - no slave accesses when start address is odd and dma hasn't started
42 42 * this affect asserting ATN
43 43 */
44 44
45 45 /*
46 46 * fas - QLogic fas366 wide/fast SCSI Processor HBA driver with
47 47 * tagged and non-tagged queueing support
48 48 */
49 49 #if defined(lint) && !defined(DEBUG)
50 50 #define DEBUG 1
51 51 #define FASDEBUG
52 52 #endif
53 53
54 54 #define DMA_REG_TRACING /* enable dma register access tracing */
55 55
56 56
57 57 /*
58 58 * standard header files
59 59 */
60 60 #include <sys/note.h>
61 61 #include <sys/scsi/scsi.h>
62 62 #include <sys/file.h>
63 63 #include <sys/vtrace.h>
64 64
65 65 /*
66 66 * private header files
67 67 */
68 68 #include <sys/scsi/adapters/fasdma.h>
69 69 #include <sys/scsi/adapters/fasreg.h>
70 70 #include <sys/scsi/adapters/fasvar.h>
71 71 #include <sys/scsi/adapters/fascmd.h>
72 72 #include <sys/scsi/impl/scsi_reset_notify.h>
73 73
74 74 /*
75 75 * tunables
76 76 */
77 77 static int fas_selection_timeout = 250; /* 250 milliseconds */
78 78 static uchar_t fas_default_offset = DEFAULT_OFFSET;
79 79
80 80 /*
81 81 * needed for presto support, do not remove
82 82 */
83 83 static int fas_enable_sbus64 = 1;
84 84
85 85 #ifdef FASDEBUG
86 86 int fasdebug = 0;
87 87 int fasdebug_instance = -1; /* debug all instances */
88 88 static int fas_burstsizes_limit = -1;
89 89 static int fas_no_sync_wide_backoff = 0;
90 90 #endif /* FASDEBUG */
91 91
92 92 /*
93 93 * Local static data protected by global mutex
94 94 */
95 95 static kmutex_t fas_global_mutex; /* to allow concurrent attach */
96 96
97 97 static int fas_scsi_watchdog_tick; /* in seconds, for all */
98 98 /* instances */
99 99 static clock_t fas_tick; /* fas_watch() interval in Hz */
100 100 static timeout_id_t fas_reset_watch; /* timeout id for reset watch */
101 101 static timeout_id_t fas_timeout_id = 0;
102 102 static int fas_timeout_initted = 0;
103 103
104 104 static krwlock_t fas_global_rwlock;
105 105
106 106 static void *fas_state; /* soft state ptr */
107 107 static struct fas *fas_head; /* link all softstate structures */
108 108 static struct fas *fas_tail; /* for fas_watch() */
109 109
110 110 static kmutex_t fas_log_mutex;
111 111 static char fas_log_buf[256];
112 112 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
113 113 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
114 114 fas_scsi_watchdog_tick fas_tick))
115 115 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", fas::f_quiesce_timeid))
116 116
117 117 /*
118 118 * dma attribute structure for scsi engine
119 119 */
120 120 static ddi_dma_attr_t dma_fasattr = {
121 121 DMA_ATTR_V0, (unsigned long long)0,
122 122 (unsigned long long)0xffffffff, (unsigned long long)((1<<24)-1),
123 123 1, DEFAULT_BURSTSIZE, 1,
124 124 (unsigned long long)0xffffffff, (unsigned long long)0xffffffff,
125 125 1, 512, 0
126 126 };
127 127
128 128 /*
129 129 * optional torture test stuff
130 130 */
131 131 #ifdef FASDEBUG
132 132 #define FAS_TEST
133 133 static int fas_ptest_emsgin;
134 134 static int fas_ptest_msgin;
135 135 static int fas_ptest_msg = -1;
136 136 static int fas_ptest_status;
137 137 static int fas_ptest_data_in;
138 138 static int fas_atest;
139 139 static int fas_atest_disc;
140 140 static int fas_atest_reconn;
141 141 static void fas_test_abort(struct fas *fas, int slot);
142 142 static int fas_rtest;
143 143 static int fas_rtest_type;
144 144 static void fas_test_reset(struct fas *fas, int slot);
145 145 static int fas_force_timeout;
146 146 static int fas_btest;
147 147 static int fas_test_stop;
148 148 static int fas_transport_busy;
149 149 static int fas_transport_busy_rqs;
150 150 static int fas_transport_reject;
151 151 static int fas_arqs_failure;
152 152 static int fas_tran_err;
153 153 static int fas_test_untagged;
154 154 static int fas_enable_untagged;
155 155 #endif
156 156
157 157 /*
158 158 * warlock directives
159 159 */
160 160 _NOTE(DATA_READABLE_WITHOUT_LOCK(dma fasdebug))
161 161 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy))
162 162 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy_rqs))
163 163 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_reject))
164 164 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_arqs_failure))
165 165 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_tran_err))
166 166 _NOTE(MUTEX_PROTECTS_DATA(fas_log_mutex, fas_log_buf))
167 167 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
168 168 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
169 169 fas_scsi_watchdog_tick fas_tick))
170 170
171 171 /*
172 172 * function prototypes
173 173 *
174 174 * scsa functions are exported by means of the transport table:
175 175 */
176 176 static int fas_scsi_tgt_probe(struct scsi_device *sd,
177 177 int (*waitfunc)(void));
178 178 static int fas_scsi_tgt_init(dev_info_t *, dev_info_t *,
179 179 scsi_hba_tran_t *, struct scsi_device *);
180 180 static int fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
181 181 static int fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
182 182 static int fas_scsi_reset(struct scsi_address *ap, int level);
183 183 static int fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
184 184 static int fas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
185 185 int whom);
186 186 static struct scsi_pkt *fas_scsi_init_pkt(struct scsi_address *ap,
187 187 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
188 188 int tgtlen, int flags, int (*callback)(), caddr_t arg);
189 189 static void fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
190 190 static void fas_scsi_dmafree(struct scsi_address *ap,
191 191 struct scsi_pkt *pkt);
192 192 static void fas_scsi_sync_pkt(struct scsi_address *ap,
193 193 struct scsi_pkt *pkt);
194 194
195 195 /*
196 196 * internal functions:
197 197 */
198 198 static int fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp);
199 199 static int fas_alloc_tag(struct fas *fas, struct fas_cmd *sp);
200 200 static int fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag);
201 201 static void fas_empty_waitQ(struct fas *fas);
202 202 static void fas_move_waitQ_to_readyQ(struct fas *fas);
203 203 static void fas_check_waitQ_and_mutex_exit(struct fas *fas);
204 204 static int fas_istart(struct fas *fas);
205 205 static int fas_ustart(struct fas *fas);
206 206 static int fas_startcmd(struct fas *fas, struct fas_cmd *sp);
207 207
208 208 static int fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
209 209 int cmdlen, int tgtlen, int statuslen, int kf);
210 210 static void fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp);
211 211 static int fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
212 212 static void fas_kmem_cache_destructor(void *buf, void *cdrarg);
213 213
214 214 static int fas_finish(struct fas *fas);
215 215 static void fas_handle_qfull(struct fas *fas, struct fas_cmd *sp);
216 216 static void fas_restart_cmd(void *);
217 217 static int fas_dopoll(struct fas *fas, int timeout);
218 218 static void fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp);
219 219 static uint_t fas_intr(caddr_t arg);
220 220 static int fas_intr_svc(struct fas *fas);
221 221 static int fas_phasemanage(struct fas *fas);
222 222 static int fas_handle_unknown(struct fas *fas);
223 223 static int fas_handle_cmd_start(struct fas *fas);
224 224 static int fas_handle_cmd_done(struct fas *fas);
225 225 static int fas_handle_msg_out_start(struct fas *fas);
226 226 static int fas_handle_msg_out_done(struct fas *fas);
227 227 static int fas_handle_clearing(struct fas *fas);
228 228 static int fas_handle_data_start(struct fas *fas);
229 229 static int fas_handle_data_done(struct fas *fas);
230 230 static int fas_handle_c_cmplt(struct fas *fas);
231 231 static int fas_handle_msg_in_start(struct fas *fas);
232 232 static int fas_handle_more_msgin(struct fas *fas);
233 233 static int fas_handle_msg_in_done(struct fas *fas);
234 234 static int fas_onebyte_msg(struct fas *fas);
235 235 static int fas_twobyte_msg(struct fas *fas);
236 236 static int fas_multibyte_msg(struct fas *fas);
237 237 static void fas_revert_to_async(struct fas *fas, int tgt);
238 238 static int fas_finish_select(struct fas *fas);
239 239 static int fas_reselect_preempt(struct fas *fas);
240 240 static int fas_reconnect(struct fas *fas);
241 241 static int fas_handle_selection(struct fas *fas);
242 242 static void fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp);
243 243 static int fas_handle_gross_err(struct fas *fas);
244 244 static int fas_illegal_cmd_or_bus_reset(struct fas *fas);
245 245 static int fas_check_dma_error(struct fas *fas);
246 246
247 247 static void fas_make_sdtr(struct fas *fas, int msgout_offset, int target);
248 248 static void fas_make_wdtr(struct fas *fas, int msgout_offset, int target,
249 249 int width);
250 250 static void fas_update_props(struct fas *fas, int tgt);
251 251 static void fas_update_this_prop(struct fas *fas, char *property, int value);
252 252
253 253 static int fas_commoncap(struct scsi_address *ap, char *cap, int val,
254 254 int tgtonly, int doset);
255 255
256 256 static void fas_watch(void *arg);
257 257 static void fas_watchsubr(struct fas *fas);
258 258 static void fas_cmd_timeout(struct fas *fas, int slot);
259 259 static void fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
260 260 int slot);
261 261 static void fas_reset_sync_wide(struct fas *fas);
262 262 static void fas_set_wide_conf3(struct fas *fas, int target, int width);
263 263 static void fas_force_renegotiation(struct fas *fas, int target);
264 264
265 265 static int fas_set_new_window(struct fas *fas, struct fas_cmd *sp);
266 266 static int fas_restore_pointers(struct fas *fas, struct fas_cmd *sp);
267 267 static int fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end);
268 268
269 269 /*PRINTFLIKE3*/
270 270 static void fas_log(struct fas *fas, int level, const char *fmt, ...);
271 271 /*PRINTFLIKE2*/
272 272 static void fas_printf(struct fas *fas, const char *fmt, ...);
273 273 static void fas_printstate(struct fas *fas, char *msg);
274 274 static void fas_dump_cmd(struct fas *fas, struct fas_cmd *sp);
275 275 static void fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp);
276 276 static char *fas_state_name(ushort_t state);
277 277
278 278 static void fas_makeproxy_cmd(struct fas_cmd *sp,
279 279 struct scsi_address *ap, struct scsi_pkt *pkt, int nmsg, ...);
280 280 static int fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
281 281 struct scsi_address *ap, char *what);
282 282
283 283 static void fas_internal_reset(struct fas *fas, int reset_action);
284 284 static int fas_alloc_active_slots(struct fas *fas, int slot, int flag);
285 285
286 286 static int fas_abort_curcmd(struct fas *fas);
287 287 static int fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot);
288 288 static int fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
289 289 static int fas_do_scsi_reset(struct scsi_address *ap, int level);
290 290 static int fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp,
291 291 int slot);
292 292 static void fas_flush_readyQ(struct fas *fas, int slot);
293 293 static void fas_flush_tagQ(struct fas *fas, int slot);
294 294 static void fas_flush_cmd(struct fas *fas, struct fas_cmd *sp,
295 295 uchar_t reason, uint_t stat);
296 296 static int fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp,
297 297 uchar_t msg);
298 298 static int fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
299 299 struct fas_cmd *sp, uchar_t msg, int slot);
300 300 static void fas_mark_packets(struct fas *fas, int slot, uchar_t reason,
301 301 uint_t stat);
302 302 static void fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp,
303 303 uchar_t reason, uint_t stat);
304 304
305 305 static int fas_reset_bus(struct fas *fas);
306 306 static int fas_reset_recovery(struct fas *fas);
307 307 static int fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap);
308 308 static int fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap);
309 309 static void fas_start_watch_reset_delay(struct fas *);
310 310 static void fas_setup_reset_delay(struct fas *fas);
311 311 static void fas_watch_reset_delay(void *arg);
312 312 static int fas_watch_reset_delay_subr(struct fas *fas);
313 313 static void fas_reset_cleanup(struct fas *fas, int slot);
314 314 static int fas_scsi_reset_notify(struct scsi_address *ap, int flag,
315 315 void (*callback)(caddr_t), caddr_t arg);
316 316 static int fas_scsi_quiesce(dev_info_t *hba_dip);
317 317 static int fas_scsi_unquiesce(dev_info_t *hba_dip);
318 318
319 319 static void fas_set_throttles(struct fas *fas, int slot,
320 320 int n, int what);
321 321 static void fas_set_all_lun_throttles(struct fas *fas, int slot, int what);
322 322 static void fas_full_throttle(struct fas *fas, int slot);
323 323 static void fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int timeout);
324 324 static void fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp);
325 325
326 326 static int fas_quiesce_bus(struct fas *fas);
327 327 static int fas_unquiesce_bus(struct fas *fas);
328 328 static void fas_ncmds_checkdrain(void *arg);
329 329 static int fas_check_outstanding(struct fas *fas);
330 330
331 331 static int fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap);
332 332 static int fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap);
333 333 static int fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp);
334 334 void fas_complete_arq_pkt(struct scsi_pkt *pkt);
335 335
336 336 void fas_call_pkt_comp(struct fas *fas, struct fas_cmd *sp);
337 337 void fas_empty_callbackQ(struct fas *fas);
338 338 int fas_init_callbacks(struct fas *fas);
339 339 void fas_destroy_callbacks(struct fas *fas);
340 340
341 341 static int fas_check_dma_error(struct fas *fas);
342 342 static int fas_init_chip(struct fas *fas, uchar_t id);
343 343
344 344 static void fas_read_fifo(struct fas *fas);
345 345 static void fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad);
346 346
347 347 #ifdef FASDEBUG
348 348 static void fas_reg_cmd_write(struct fas *fas, uint8_t cmd);
349 349 static void fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what);
350 350 static uint8_t fas_reg_read(struct fas *fas, volatile uint8_t *p);
351 351
352 352 static void fas_dma_reg_write(struct fas *fas, volatile uint32_t *p,
353 353 uint32_t what);
354 354 static uint32_t fas_dma_reg_read(struct fas *fas, volatile uint32_t *p);
355 355 #else
356 356 #define fas_reg_cmd_write(fas, cmd) \
357 357 fas->f_reg->fas_cmd = (cmd), fas->f_last_cmd = (cmd)
358 358 #define fas_reg_write(fas, p, what) *(p) = (what)
359 359 #define fas_reg_read(fas, p) *(p)
360 360 #define fas_dma_reg_write(fas, p, what) *(p) = (what)
361 361 #define fas_dma_reg_read(fas, p) *(p)
362 362 #endif
363 363
364 364 /*
365 365 * autoconfiguration data and routines.
366 366 */
367 367 static int fas_attach(dev_info_t *dev, ddi_attach_cmd_t cmd);
368 368 static int fas_detach(dev_info_t *dev, ddi_detach_cmd_t cmd);
369 369 static int fas_dr_detach(dev_info_t *dev);
370 370
371 371 static struct dev_ops fas_ops = {
372 372 DEVO_REV, /* devo_rev, */
373 373 0, /* refcnt */
374 374 ddi_no_info, /* info */
375 375 nulldev, /* identify */
376 376 nulldev, /* probe */
377 377 fas_attach, /* attach */
378 378 fas_detach, /* detach */
379 379 nodev, /* reset */
380 380 NULL, /* driver operations */
381 381 NULL, /* bus operations */
382 382 NULL, /* power */
383 383 ddi_quiesce_not_supported, /* devo_quiesce */
384 384 };
385 385
386 386 static struct modldrv modldrv = {
387 387 &mod_driverops, /* Type of module. This one is a driver */
388 388 "FAS SCSI HBA Driver", /* Name of the module. */
389 389 &fas_ops, /* driver ops */
390 390 };
391 391
392 392 static struct modlinkage modlinkage = {
393 393 MODREV_1, (void *)&modldrv, NULL
394 394 };
395 395
396 396 int
397 397 _init(void)
398 398 {
399 399 int rval;
400 400 /* CONSTCOND */
401 401 ASSERT(NO_COMPETING_THREADS);
402 402
403 403 rval = ddi_soft_state_init(&fas_state, sizeof (struct fas),
404 404 FAS_INITIAL_SOFT_SPACE);
405 405 if (rval != 0) {
406 406 return (rval);
407 407 }
408 408
409 409 if ((rval = scsi_hba_init(&modlinkage)) != 0) {
410 410 ddi_soft_state_fini(&fas_state);
411 411 return (rval);
412 412 }
413 413
414 414 mutex_init(&fas_global_mutex, NULL, MUTEX_DRIVER, NULL);
415 415 rw_init(&fas_global_rwlock, NULL, RW_DRIVER, NULL);
416 416
417 417 mutex_init(&fas_log_mutex, NULL, MUTEX_DRIVER, NULL);
418 418
419 419 if ((rval = mod_install(&modlinkage)) != 0) {
420 420 mutex_destroy(&fas_log_mutex);
421 421 rw_destroy(&fas_global_rwlock);
422 422 mutex_destroy(&fas_global_mutex);
423 423 ddi_soft_state_fini(&fas_state);
424 424 scsi_hba_fini(&modlinkage);
425 425 return (rval);
426 426 }
427 427
428 428 return (rval);
429 429 }
430 430
431 431 int
432 432 _fini(void)
433 433 {
434 434 int rval;
435 435 /* CONSTCOND */
436 436 ASSERT(NO_COMPETING_THREADS);
437 437
438 438 if ((rval = mod_remove(&modlinkage)) == 0) {
439 439 ddi_soft_state_fini(&fas_state);
440 440 scsi_hba_fini(&modlinkage);
441 441 mutex_destroy(&fas_log_mutex);
442 442 rw_destroy(&fas_global_rwlock);
443 443 mutex_destroy(&fas_global_mutex);
444 444 }
445 445 return (rval);
446 446 }
447 447
448 448 int
449 449 _info(struct modinfo *modinfop)
450 450 {
451 451 /* CONSTCOND */
452 452 ASSERT(NO_COMPETING_THREADS);
453 453
454 454 return (mod_info(&modlinkage, modinfop));
455 455 }
456 456
457 457 static int
458 458 fas_scsi_tgt_probe(struct scsi_device *sd,
459 459 int (*waitfunc)(void))
460 460 {
461 461 dev_info_t *dip = ddi_get_parent(sd->sd_dev);
462 462 int rval = SCSIPROBE_FAILURE;
463 463 scsi_hba_tran_t *tran;
464 464 struct fas *fas;
465 465 int tgt = sd->sd_address.a_target;
466 466
467 467 tran = ddi_get_driver_private(dip);
468 468 ASSERT(tran != NULL);
469 469 fas = TRAN2FAS(tran);
470 470
471 471 /*
472 472 * force renegotiation since inquiry cmds do not cause
473 473 * check conditions
474 474 */
475 475 mutex_enter(FAS_MUTEX(fas));
476 476 fas_force_renegotiation(fas, tgt);
477 477 mutex_exit(FAS_MUTEX(fas));
478 478 rval = scsi_hba_probe(sd, waitfunc);
479 479
480 480 /*
481 481 * the scsi-options precedence is:
482 482 * target-scsi-options highest
483 483 * device-type-scsi-options
484 484 * per bus scsi-options
485 485 * global scsi-options lowest
486 486 */
487 487 mutex_enter(FAS_MUTEX(fas));
488 488 if ((rval == SCSIPROBE_EXISTS) &&
489 489 ((fas->f_target_scsi_options_defined & (1 << tgt)) == 0)) {
490 490 int options;
491 491
492 492 options = scsi_get_device_type_scsi_options(dip, sd, -1);
493 493 if (options != -1) {
494 494 fas->f_target_scsi_options[tgt] = options;
495 495 fas_log(fas, CE_NOTE,
496 496 "?target%x-scsi-options = 0x%x\n", tgt,
497 497 fas->f_target_scsi_options[tgt]);
498 498 fas_force_renegotiation(fas, tgt);
499 499 }
500 500 }
501 501 mutex_exit(FAS_MUTEX(fas));
502 502
503 503 IPRINTF2("target%x-scsi-options= 0x%x\n",
504 504 tgt, fas->f_target_scsi_options[tgt]);
505 505
506 506 return (rval);
507 507 }
508 508
509 509
510 510 /*ARGSUSED*/
511 511 static int
512 512 fas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
513 513 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
514 514 {
515 515 return (((sd->sd_address.a_target < NTARGETS_WIDE) &&
516 516 (sd->sd_address.a_lun < NLUNS_PER_TARGET)) ?
517 517 DDI_SUCCESS : DDI_FAILURE);
518 518 }
519 519
520 520 /*ARGSUSED*/
521 521 static int
522 522 fas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
523 523 {
524 524 struct fas *fas = NULL;
525 525 volatile struct dma *dmar = NULL;
526 526 volatile struct fasreg *fasreg;
527 527 ddi_dma_attr_t *fas_dma_attr;
528 528 ddi_device_acc_attr_t dev_attr;
529 529
530 530 int instance, id, slot, i, hm_rev;
531 531 size_t rlen;
532 532 uint_t count;
533 533 char buf[64];
534 534 scsi_hba_tran_t *tran = NULL;
535 535 char intr_added = 0;
536 536 char mutex_init_done = 0;
537 537 char hba_attached = 0;
538 538 char bound_handle = 0;
539 539 char *prop_template = "target%d-scsi-options";
540 540 char prop_str[32];
541 541
542 542 /* CONSTCOND */
543 543 ASSERT(NO_COMPETING_THREADS);
544 544
545 545 switch (cmd) {
546 546 case DDI_ATTACH:
547 547 break;
548 548
549 549 case DDI_RESUME:
550 550 if ((tran = ddi_get_driver_private(dip)) == NULL)
551 551 return (DDI_FAILURE);
552 552
553 553 fas = TRAN2FAS(tran);
554 554 if (!fas) {
555 555 return (DDI_FAILURE);
556 556 }
557 557 /*
558 558 * Reset hardware and softc to "no outstanding commands"
559 559 * Note that a check condition can result on first command
560 560 * to a target.
561 561 */
562 562 mutex_enter(FAS_MUTEX(fas));
563 563 fas_internal_reset(fas,
564 564 FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
565 565
566 566 (void) fas_reset_bus(fas);
567 567
568 568 fas->f_suspended = 0;
569 569
570 570 /* make sure that things get started */
571 571 (void) fas_istart(fas);
572 572 fas_check_waitQ_and_mutex_exit(fas);
573 573
574 574 mutex_enter(&fas_global_mutex);
575 575 if (fas_timeout_id == 0) {
576 576 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
577 577 fas_timeout_initted = 1;
578 578 }
579 579 mutex_exit(&fas_global_mutex);
580 580
581 581 return (DDI_SUCCESS);
582 582
583 583 default:
584 584 return (DDI_FAILURE);
585 585 }
586 586
587 587 instance = ddi_get_instance(dip);
588 588
589 589 /*
590 590 * Since we know that some instantiations of this device can
591 591 * be plugged into slave-only SBus slots, check to see whether
592 592 * this is one such.
593 593 */
594 594 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
595 595 cmn_err(CE_WARN,
596 596 "fas%d: device in slave-only slot", instance);
597 597 return (DDI_FAILURE);
598 598 }
599 599
600 600 if (ddi_intr_hilevel(dip, 0)) {
601 601 /*
602 602 * Interrupt number '0' is a high-level interrupt.
603 603 * At this point you either add a special interrupt
604 604 * handler that triggers a soft interrupt at a lower level,
605 605 * or - more simply and appropriately here - you just
606 606 * fail the attach.
607 607 */
608 608 cmn_err(CE_WARN,
609 609 "fas%d: Device is using a hilevel intr", instance);
610 610 return (DDI_FAILURE);
611 611 }
612 612
613 613 /*
614 614 * Allocate softc information.
615 615 */
616 616 if (ddi_soft_state_zalloc(fas_state, instance) != DDI_SUCCESS) {
617 617 cmn_err(CE_WARN,
618 618 "fas%d: cannot allocate soft state", instance);
619 619 goto fail;
620 620 }
621 621
622 622 fas = (struct fas *)ddi_get_soft_state(fas_state, instance);
623 623
624 624 if (fas == NULL) {
625 625 goto fail;
626 626 }
627 627
628 628 /*
629 629 * map in device registers
630 630 */
631 631 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
632 632 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
633 633 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
634 634
635 635 if (ddi_regs_map_setup(dip, (uint_t)0, (caddr_t *)&dmar,
636 636 (off_t)0, (off_t)sizeof (struct dma),
637 637 &dev_attr, &fas->f_dmar_acc_handle) != DDI_SUCCESS) {
638 638 cmn_err(CE_WARN, "fas%d: cannot map dma", instance);
639 639 goto fail;
640 640 }
641 641
642 642 if (ddi_regs_map_setup(dip, (uint_t)1, (caddr_t *)&fasreg,
643 643 (off_t)0, (off_t)sizeof (struct fasreg),
644 644 &dev_attr, &fas->f_regs_acc_handle) != DDI_SUCCESS) {
645 645 cmn_err(CE_WARN,
646 646 "fas%d: unable to map fas366 registers", instance);
647 647 goto fail;
648 648 }
649 649
650 650 fas_dma_attr = &dma_fasattr;
651 651 if (ddi_dma_alloc_handle(dip, fas_dma_attr,
652 652 DDI_DMA_SLEEP, NULL, &fas->f_dmahandle) != DDI_SUCCESS) {
653 653 cmn_err(CE_WARN,
654 654 "fas%d: cannot alloc dma handle", instance);
655 655 goto fail;
656 656 }
657 657
658 658 /*
659 659 * allocate cmdarea and its dma handle
660 660 */
661 661 if (ddi_dma_mem_alloc(fas->f_dmahandle,
662 662 (uint_t)2*FIFOSIZE,
663 663 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
664 664 NULL, (caddr_t *)&fas->f_cmdarea, &rlen,
665 665 &fas->f_cmdarea_acc_handle) != DDI_SUCCESS) {
666 666 cmn_err(CE_WARN,
667 667 "fas%d: cannot alloc cmd area", instance);
668 668 goto fail;
669 669 }
670 670
671 671 fas->f_reg = fasreg;
672 672 fas->f_dma = dmar;
673 673 fas->f_instance = instance;
674 674
675 675 if (ddi_dma_addr_bind_handle(fas->f_dmahandle,
676 676 NULL, (caddr_t)fas->f_cmdarea,
677 677 rlen, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
678 678 &fas->f_dmacookie, &count) != DDI_DMA_MAPPED) {
679 679 cmn_err(CE_WARN,
680 680 "fas%d: cannot bind cmdarea", instance);
681 681 goto fail;
682 682 }
683 683 bound_handle++;
684 684
685 685 ASSERT(count == 1);
686 686
687 687 /*
688 688 * Allocate a transport structure
689 689 */
690 690 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
691 691
692 692 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
693 693 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */
694 694
695 695 /*
696 696 * initialize transport structure
697 697 */
698 698 fas->f_tran = tran;
699 699 fas->f_dev = dip;
700 700 tran->tran_hba_private = fas;
701 701 tran->tran_tgt_private = NULL;
702 702 tran->tran_tgt_init = fas_scsi_tgt_init;
703 703 tran->tran_tgt_probe = fas_scsi_tgt_probe;
704 704 tran->tran_tgt_free = NULL;
705 705 tran->tran_start = fas_scsi_start;
706 706 tran->tran_abort = fas_scsi_abort;
707 707 tran->tran_reset = fas_scsi_reset;
708 708 tran->tran_getcap = fas_scsi_getcap;
709 709 tran->tran_setcap = fas_scsi_setcap;
710 710 tran->tran_init_pkt = fas_scsi_init_pkt;
711 711 tran->tran_destroy_pkt = fas_scsi_destroy_pkt;
712 712 tran->tran_dmafree = fas_scsi_dmafree;
713 713 tran->tran_sync_pkt = fas_scsi_sync_pkt;
714 714 tran->tran_reset_notify = fas_scsi_reset_notify;
715 715 tran->tran_get_bus_addr = NULL;
716 716 tran->tran_get_name = NULL;
717 717 tran->tran_quiesce = fas_scsi_quiesce;
718 718 tran->tran_unquiesce = fas_scsi_unquiesce;
719 719 tran->tran_bus_reset = NULL;
720 720 tran->tran_add_eventcall = NULL;
721 721 tran->tran_get_eventcookie = NULL;
722 722 tran->tran_post_event = NULL;
723 723 tran->tran_remove_eventcall = NULL;
724 724
725 725 fas->f_force_async = 0;
726 726
727 727 /*
728 728 * disable tagged queuing and wide for all targets
729 729 * (will be enabled by target driver if required)
730 730 * sync is enabled by default
731 731 */
732 732 fas->f_nowide = fas->f_notag = ALL_TARGETS;
733 733 fas->f_force_narrow = ALL_TARGETS;
734 734
735 735 /*
736 736 * By default we assume embedded devices and save time
737 737 * checking for timeouts in fas_watch() by skipping
738 738 * the rest of luns
739 739 * If we're talking to any non-embedded devices,
740 740 * we can't cheat and skip over non-zero luns anymore
741 741 * in fas_watch() and fas_ustart().
742 742 */
743 743 fas->f_dslot = NLUNS_PER_TARGET;
744 744
745 745 /*
746 746 * f_active is used for saving disconnected cmds;
747 747 * For tagged targets, we need to increase the size later
748 748 * Only allocate for Lun == 0, if we probe a lun > 0 then
749 749 * we allocate an active structure
750 750 * If TQ gets enabled then we need to increase the size
751 751 * to hold 256 cmds
752 752 */
753 753 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
754 754 (void) fas_alloc_active_slots(fas, slot, KM_SLEEP);
755 755 }
756 756
757 757 /*
758 758 * initialize the qfull retry counts
759 759 */
760 760 for (i = 0; i < NTARGETS_WIDE; i++) {
761 761 fas->f_qfull_retries[i] = QFULL_RETRIES;
762 762 fas->f_qfull_retry_interval[i] =
763 763 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
764 764
765 765 }
766 766
767 767 /*
768 768 * Initialize throttles.
769 769 */
770 770 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
771 771
772 772 /*
773 773 * Initialize mask of deferred property updates
774 774 */
775 775 fas->f_props_update = 0;
776 776
777 777 /*
778 778 * set host ID
779 779 */
780 780 fas->f_fasconf = DEFAULT_HOSTID;
781 781 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "initiator-id", -1);
782 782 if (id == -1) {
783 783 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
784 784 "scsi-initiator-id", -1);
785 785 }
786 786 if (id != DEFAULT_HOSTID && id >= 0 && id < NTARGETS_WIDE) {
787 787 fas_log(fas, CE_NOTE, "?initiator SCSI ID now %d\n", id);
788 788 fas->f_fasconf = (uchar_t)id;
789 789 }
790 790
791 791 /*
792 792 * find the burstsize and reduce ours if necessary
793 793 */
794 794 fas->f_dma_attr = fas_dma_attr;
795 795 fas->f_dma_attr->dma_attr_burstsizes &=
796 796 ddi_dma_burstsizes(fas->f_dmahandle);
797 797
798 798 #ifdef FASDEBUG
799 799 fas->f_dma_attr->dma_attr_burstsizes &= fas_burstsizes_limit;
800 800 IPRINTF1("dma burstsize=%x\n", fas->f_dma_attr->dma_attr_burstsizes);
801 801 #endif
802 802 /*
803 803 * Attach this instance of the hba
804 804 */
805 805 if (scsi_hba_attach_setup(dip, fas->f_dma_attr, tran, 0) !=
806 806 DDI_SUCCESS) {
807 807 fas_log(fas, CE_WARN, "scsi_hba_attach_setup failed");
808 808 goto fail;
809 809 }
810 810 hba_attached++;
811 811
812 812 /*
813 813 * if scsi-options property exists, use it
814 814 */
815 815 fas->f_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY,
816 816 dip, 0, "scsi-options", DEFAULT_SCSI_OPTIONS);
817 817
818 818 /*
819 819 * if scsi-selection-timeout property exists, use it
820 820 */
821 821 fas_selection_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
822 822 dip, 0, "scsi-selection-timeout", SCSI_DEFAULT_SELECTION_TIMEOUT);
823 823
824 824 /*
825 825 * if hm-rev property doesn't exist, use old scheme for rev
826 826 */
827 827 hm_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
828 828 "hm-rev", -1);
829 829
830 830 if (hm_rev == 0xa0 || hm_rev == -1) {
831 831 if (DMAREV(dmar) != 0) {
832 832 fas->f_hm_rev = 0x20;
833 833 fas_log(fas, CE_WARN,
834 834 "obsolete rev 2.0 FEPS chip, "
835 835 "possible data corruption");
836 836 } else {
837 837 fas->f_hm_rev = 0x10;
838 838 fas_log(fas, CE_WARN,
839 839 "obsolete and unsupported rev 1.0 FEPS chip");
840 840 goto fail;
841 841 }
842 842 } else if (hm_rev == 0x20) {
843 843 fas->f_hm_rev = 0x21;
844 844 fas_log(fas, CE_WARN, "obsolete rev 2.1 FEPS chip");
845 845 } else {
846 846 fas->f_hm_rev = (uchar_t)hm_rev;
847 847 fas_log(fas, CE_NOTE, "?rev %x.%x FEPS chip\n",
848 848 (hm_rev >> 4) & 0xf, hm_rev & 0xf);
849 849 }
850 850
851 851 if ((fas->f_scsi_options & SCSI_OPTIONS_SYNC) == 0) {
852 852 fas->f_nosync = ALL_TARGETS;
853 853 }
854 854
855 855 if ((fas->f_scsi_options & SCSI_OPTIONS_WIDE) == 0) {
856 856 fas->f_nowide = ALL_TARGETS;
857 857 }
858 858
859 859 /*
860 860 * if target<n>-scsi-options property exists, use it;
861 861 * otherwise use the f_scsi_options
862 862 */
863 863 for (i = 0; i < NTARGETS_WIDE; i++) {
864 864 (void) sprintf(prop_str, prop_template, i);
865 865 fas->f_target_scsi_options[i] = ddi_prop_get_int(
866 866 DDI_DEV_T_ANY, dip, 0, prop_str, -1);
867 867
868 868 if (fas->f_target_scsi_options[i] != -1) {
869 869 fas_log(fas, CE_NOTE, "?target%x-scsi-options=0x%x\n",
870 870 i, fas->f_target_scsi_options[i]);
871 871 fas->f_target_scsi_options_defined |= 1 << i;
872 872 } else {
873 873 fas->f_target_scsi_options[i] = fas->f_scsi_options;
874 874 }
875 875 if (((fas->f_target_scsi_options[i] &
876 876 SCSI_OPTIONS_DR) == 0) &&
877 877 (fas->f_target_scsi_options[i] & SCSI_OPTIONS_TAG)) {
878 878 fas->f_target_scsi_options[i] &= ~SCSI_OPTIONS_TAG;
879 879 fas_log(fas, CE_WARN,
880 880 "Disabled TQ since disconnects are disabled");
881 881 }
882 882 }
883 883
884 884 fas->f_scsi_tag_age_limit =
885 885 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-tag-age-limit",
886 886 DEFAULT_TAG_AGE_LIMIT);
887 887
888 888 fas->f_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
889 889 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
890 890 if (fas->f_scsi_reset_delay == 0) {
891 891 fas_log(fas, CE_NOTE,
892 892 "scsi_reset_delay of 0 is not recommended,"
893 893 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
894 894 fas->f_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
895 895 }
896 896
897 897 /*
898 898 * get iblock cookie and initialize mutexes
899 899 */
900 900 if (ddi_get_iblock_cookie(dip, (uint_t)0, &fas->f_iblock)
901 901 != DDI_SUCCESS) {
902 902 cmn_err(CE_WARN, "fas_attach: cannot get iblock cookie");
903 903 goto fail;
904 904 }
905 905
906 906 mutex_init(&fas->f_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
907 907 cv_init(&fas->f_cv, NULL, CV_DRIVER, NULL);
908 908
909 909 /*
910 910 * initialize mutex for waitQ
911 911 */
912 912 mutex_init(&fas->f_waitQ_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
913 913 mutex_init_done++;
914 914
915 915 /*
916 916 * initialize callback mechanism (immediate callback)
917 917 */
918 918 mutex_enter(&fas_global_mutex);
919 919 if (fas_init_callbacks(fas)) {
920 920 mutex_exit(&fas_global_mutex);
921 921 goto fail;
922 922 }
923 923 mutex_exit(&fas_global_mutex);
924 924
925 925 /*
926 926 * kstat_intr support
927 927 */
928 928 (void) sprintf(buf, "fas%d", instance);
929 929 fas->f_intr_kstat = kstat_create("fas", instance, buf, "controller", \
930 930 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
931 931 if (fas->f_intr_kstat)
932 932 kstat_install(fas->f_intr_kstat);
933 933
934 934 /*
935 935 * install interrupt handler
936 936 */
937 937 mutex_enter(FAS_MUTEX(fas));
938 938 if (ddi_add_intr(dip, (uint_t)0, &fas->f_iblock, NULL,
939 939 fas_intr, (caddr_t)fas)) {
940 940 cmn_err(CE_WARN, "fas: cannot add intr");
941 941 mutex_exit(FAS_MUTEX(fas));
942 942 goto fail;
943 943 }
944 944 intr_added++;
945 945
946 946 /*
947 947 * initialize fas chip
948 948 */
949 949 if (fas_init_chip(fas, id)) {
950 950 cmn_err(CE_WARN, "fas: cannot initialize");
951 951 mutex_exit(FAS_MUTEX(fas));
952 952 goto fail;
953 953 }
954 954 mutex_exit(FAS_MUTEX(fas));
955 955
956 956 /*
957 957 * create kmem cache for packets
958 958 */
959 959 (void) sprintf(buf, "fas%d_cache", instance);
960 960 fas->f_kmem_cache = kmem_cache_create(buf,
961 961 EXTCMD_SIZE, 8,
962 962 fas_kmem_cache_constructor, fas_kmem_cache_destructor,
963 963 NULL, (void *)fas, NULL, 0);
964 964 if (fas->f_kmem_cache == NULL) {
965 965 cmn_err(CE_WARN, "fas: cannot create kmem_cache");
966 966 goto fail;
967 967 }
968 968
969 969 /*
970 970 * at this point, we are not going to fail the attach
971 971 * so there is no need to undo the rest:
972 972 *
973 973 * add this fas to the list, this makes debugging easier
974 974 * and fas_watch() needs it to walk thru all fas's
975 975 */
976 976 rw_enter(&fas_global_rwlock, RW_WRITER);
977 977 if (fas_head == NULL) {
978 978 fas_head = fas;
979 979 } else {
980 980 fas_tail->f_next = fas;
981 981 }
982 982 fas_tail = fas; /* point to last fas in list */
983 983 rw_exit(&fas_global_rwlock);
984 984
985 985 /*
986 986 * there is one watchdog handler for all driver instances.
↓ open down ↓ |
986 lines elided |
↑ open up ↑ |
987 987 * start the watchdog if it hasn't been done yet
988 988 */
989 989 mutex_enter(&fas_global_mutex);
990 990 if (fas_scsi_watchdog_tick == 0) {
991 991 fas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
992 992 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
993 993 if (fas_scsi_watchdog_tick != DEFAULT_WD_TICK) {
994 994 fas_log(fas, CE_NOTE, "?scsi-watchdog-tick=%d\n",
995 995 fas_scsi_watchdog_tick);
996 996 }
997 - fas_tick = drv_usectohz((clock_t)
998 - fas_scsi_watchdog_tick * 1000000);
997 + fas_tick = drv_sectohz((clock_t)fas_scsi_watchdog_tick);
999 998 IPRINTF2("fas scsi watchdog tick=%x, fas_tick=%lx\n",
1000 999 fas_scsi_watchdog_tick, fas_tick);
1001 1000 if (fas_timeout_id == 0) {
1002 1001 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
1003 1002 fas_timeout_initted = 1;
1004 1003 }
1005 1004 }
1006 1005 mutex_exit(&fas_global_mutex);
1007 1006
1008 1007 ddi_report_dev(dip);
1009 1008
1010 1009 return (DDI_SUCCESS);
1011 1010
1012 1011 fail:
1013 1012 cmn_err(CE_WARN, "fas%d: cannot attach", instance);
1014 1013 if (fas) {
1015 1014 for (slot = 0; slot < N_SLOTS; slot++) {
1016 1015 struct f_slots *active = fas->f_active[slot];
1017 1016 if (active) {
1018 1017 kmem_free(active, active->f_size);
1019 1018 fas->f_active[slot] = NULL;
1020 1019 }
1021 1020 }
1022 1021 if (mutex_init_done) {
1023 1022 mutex_destroy(&fas->f_mutex);
1024 1023 mutex_destroy(&fas->f_waitQ_mutex);
1025 1024 cv_destroy(&fas->f_cv);
1026 1025 }
1027 1026 if (intr_added) {
1028 1027 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1029 1028 }
1030 1029 /*
1031 1030 * kstat_intr support
1032 1031 */
1033 1032 if (fas->f_intr_kstat) {
1034 1033 kstat_delete(fas->f_intr_kstat);
1035 1034 }
1036 1035 if (hba_attached) {
1037 1036 (void) scsi_hba_detach(dip);
1038 1037 }
1039 1038 if (tran) {
1040 1039 scsi_hba_tran_free(tran);
1041 1040 }
1042 1041 if (fas->f_kmem_cache) {
1043 1042 kmem_cache_destroy(fas->f_kmem_cache);
1044 1043 }
1045 1044 if (fas->f_cmdarea) {
1046 1045 if (bound_handle) {
1047 1046 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1048 1047 }
1049 1048 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1050 1049 }
1051 1050 if (fas->f_dmahandle) {
1052 1051 ddi_dma_free_handle(&fas->f_dmahandle);
1053 1052 }
1054 1053 fas_destroy_callbacks(fas);
1055 1054 if (fas->f_regs_acc_handle) {
1056 1055 ddi_regs_map_free(&fas->f_regs_acc_handle);
1057 1056 }
1058 1057 if (fas->f_dmar_acc_handle) {
1059 1058 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1060 1059 }
1061 1060 ddi_soft_state_free(fas_state, instance);
1062 1061
1063 1062 ddi_remove_minor_node(dip, NULL);
1064 1063 }
1065 1064 return (DDI_FAILURE);
1066 1065 }
1067 1066
1068 1067 /*ARGSUSED*/
1069 1068 static int
1070 1069 fas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1071 1070 {
1072 1071 struct fas *fas, *nfas;
1073 1072 scsi_hba_tran_t *tran;
1074 1073
1075 1074 /* CONSTCOND */
1076 1075 ASSERT(NO_COMPETING_THREADS);
1077 1076
1078 1077 switch (cmd) {
1079 1078 case DDI_DETACH:
1080 1079 return (fas_dr_detach(dip));
1081 1080
1082 1081 case DDI_SUSPEND:
1083 1082 if ((tran = ddi_get_driver_private(dip)) == NULL)
1084 1083 return (DDI_FAILURE);
1085 1084
1086 1085 fas = TRAN2FAS(tran);
1087 1086 if (!fas) {
1088 1087 return (DDI_FAILURE);
1089 1088 }
1090 1089
1091 1090 mutex_enter(FAS_MUTEX(fas));
1092 1091
1093 1092 fas->f_suspended = 1;
1094 1093
1095 1094 if (fas->f_ncmds) {
1096 1095 (void) fas_reset_bus(fas);
1097 1096 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
1098 1097 }
1099 1098 /*
1100 1099 * disable dma and fas interrupt
1101 1100 */
1102 1101 fas->f_dma_csr &= ~DMA_INTEN;
1103 1102 fas->f_dma_csr &= ~DMA_ENDVMA;
1104 1103 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1105 1104
1106 1105 mutex_exit(FAS_MUTEX(fas));
1107 1106
1108 1107 if (fas->f_quiesce_timeid) {
1109 1108 (void) untimeout(fas->f_quiesce_timeid);
1110 1109 fas->f_quiesce_timeid = 0;
1111 1110 }
1112 1111
1113 1112 if (fas->f_restart_cmd_timeid) {
1114 1113 (void) untimeout(fas->f_restart_cmd_timeid);
1115 1114 fas->f_restart_cmd_timeid = 0;
1116 1115 }
1117 1116
1118 1117 /* Last fas? */
1119 1118 rw_enter(&fas_global_rwlock, RW_WRITER);
1120 1119 for (nfas = fas_head; nfas; nfas = nfas->f_next) {
1121 1120 if (!nfas->f_suspended) {
1122 1121 rw_exit(&fas_global_rwlock);
1123 1122 return (DDI_SUCCESS);
1124 1123 }
1125 1124 }
1126 1125 rw_exit(&fas_global_rwlock);
1127 1126
1128 1127 mutex_enter(&fas_global_mutex);
1129 1128 if (fas_timeout_id != 0) {
1130 1129 timeout_id_t tid = fas_timeout_id;
1131 1130 fas_timeout_id = 0;
1132 1131 fas_timeout_initted = 0;
1133 1132 mutex_exit(&fas_global_mutex);
1134 1133 (void) untimeout(tid);
1135 1134 } else {
1136 1135 mutex_exit(&fas_global_mutex);
1137 1136 }
1138 1137
1139 1138 mutex_enter(&fas_global_mutex);
1140 1139 if (fas_reset_watch) {
1141 1140 timeout_id_t tid = fas_reset_watch;
1142 1141 fas_reset_watch = 0;
1143 1142 mutex_exit(&fas_global_mutex);
1144 1143 (void) untimeout(tid);
1145 1144 } else {
1146 1145 mutex_exit(&fas_global_mutex);
1147 1146 }
1148 1147
1149 1148 return (DDI_SUCCESS);
1150 1149
1151 1150 default:
1152 1151 return (DDI_FAILURE);
1153 1152 }
1154 1153 _NOTE(NOT_REACHED)
1155 1154 /* NOTREACHED */
1156 1155 }
1157 1156
1158 1157 static int
1159 1158 fas_dr_detach(dev_info_t *dip)
1160 1159 {
1161 1160 struct fas *fas, *f;
1162 1161 scsi_hba_tran_t *tran;
1163 1162 short slot;
1164 1163 int i, j;
1165 1164
1166 1165 if ((tran = ddi_get_driver_private(dip)) == NULL)
1167 1166 return (DDI_FAILURE);
1168 1167
1169 1168 fas = TRAN2FAS(tran);
1170 1169 if (!fas) {
1171 1170 return (DDI_FAILURE);
1172 1171 }
1173 1172
1174 1173 /*
1175 1174 * disable interrupts
1176 1175 */
1177 1176 fas->f_dma_csr &= ~DMA_INTEN;
1178 1177 fas->f_dma->dma_csr = fas->f_dma_csr;
1179 1178 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1180 1179
1181 1180 /*
1182 1181 * Remove device instance from the global linked list
1183 1182 */
1184 1183 rw_enter(&fas_global_rwlock, RW_WRITER);
1185 1184
1186 1185 if (fas_head == fas) {
1187 1186 f = fas_head = fas->f_next;
1188 1187 } else {
1189 1188 for (f = fas_head; f != (struct fas *)NULL; f = f->f_next) {
1190 1189 if (f->f_next == fas) {
1191 1190 f->f_next = fas->f_next;
1192 1191 break;
1193 1192 }
1194 1193 }
1195 1194
1196 1195 /*
1197 1196 * Instance not in softc list. Since the
1198 1197 * instance is not there in softc list, don't
1199 1198 * enable interrupts, the instance is effectively
1200 1199 * unusable.
1201 1200 */
1202 1201 if (f == (struct fas *)NULL) {
1203 1202 cmn_err(CE_WARN, "fas_dr_detach: fas instance not"
1204 1203 " in softc list!");
1205 1204 rw_exit(&fas_global_rwlock);
1206 1205 return (DDI_FAILURE);
1207 1206 }
1208 1207
1209 1208
1210 1209 }
1211 1210
1212 1211 if (fas_tail == fas)
1213 1212 fas_tail = f;
1214 1213
1215 1214 rw_exit(&fas_global_rwlock);
1216 1215
1217 1216 if (fas->f_intr_kstat)
1218 1217 kstat_delete(fas->f_intr_kstat);
1219 1218
1220 1219 fas_destroy_callbacks(fas);
1221 1220
1222 1221 scsi_hba_reset_notify_tear_down(fas->f_reset_notify_listf);
1223 1222
1224 1223 mutex_enter(&fas_global_mutex);
1225 1224 /*
1226 1225 * destroy any outstanding tagged command info
1227 1226 */
1228 1227 for (slot = 0; slot < N_SLOTS; slot++) {
1229 1228 struct f_slots *active = fas->f_active[slot];
1230 1229 if (active) {
1231 1230 ushort_t tag;
1232 1231 for (tag = 0; tag < active->f_n_slots; tag++) {
1233 1232 struct fas_cmd *sp = active->f_slot[tag];
1234 1233 if (sp) {
1235 1234 struct scsi_pkt *pkt = sp->cmd_pkt;
1236 1235 if (pkt) {
1237 1236 (void) fas_scsi_destroy_pkt(
1238 1237 &pkt->pkt_address, pkt);
1239 1238 }
1240 1239 /* sp freed in fas_scsi_destroy_pkt */
1241 1240 active->f_slot[tag] = NULL;
1242 1241 }
1243 1242 }
1244 1243 kmem_free(active, active->f_size);
1245 1244 fas->f_active[slot] = NULL;
1246 1245 }
1247 1246 ASSERT(fas->f_tcmds[slot] == 0);
1248 1247 }
1249 1248
1250 1249 /*
1251 1250 * disallow timeout thread rescheduling
1252 1251 */
1253 1252 fas->f_flags |= FAS_FLG_NOTIMEOUTS;
1254 1253 mutex_exit(&fas_global_mutex);
1255 1254
1256 1255 if (fas->f_quiesce_timeid) {
1257 1256 (void) untimeout(fas->f_quiesce_timeid);
1258 1257 }
1259 1258
1260 1259 /*
1261 1260 * last fas? ... if active, CANCEL watch threads.
1262 1261 */
1263 1262 mutex_enter(&fas_global_mutex);
1264 1263 if (fas_head == (struct fas *)NULL) {
1265 1264 if (fas_timeout_initted) {
1266 1265 timeout_id_t tid = fas_timeout_id;
1267 1266 fas_timeout_initted = 0;
1268 1267 fas_timeout_id = 0; /* don't resched */
1269 1268 mutex_exit(&fas_global_mutex);
1270 1269 (void) untimeout(tid);
1271 1270 mutex_enter(&fas_global_mutex);
1272 1271 }
1273 1272
1274 1273 if (fas_reset_watch) {
1275 1274 mutex_exit(&fas_global_mutex);
1276 1275 (void) untimeout(fas_reset_watch);
1277 1276 mutex_enter(&fas_global_mutex);
1278 1277 fas_reset_watch = 0;
1279 1278 }
1280 1279 }
1281 1280 mutex_exit(&fas_global_mutex);
1282 1281
1283 1282 if (fas->f_restart_cmd_timeid) {
1284 1283 (void) untimeout(fas->f_restart_cmd_timeid);
1285 1284 fas->f_restart_cmd_timeid = 0;
1286 1285 }
1287 1286
1288 1287 /*
1289 1288 * destroy outstanding ARQ pkts
1290 1289 */
1291 1290 for (i = 0; i < NTARGETS_WIDE; i++) {
1292 1291 for (j = 0; j < NLUNS_PER_TARGET; j++) {
1293 1292 int slot = i * NLUNS_PER_TARGET | j;
1294 1293 if (fas->f_arq_pkt[slot]) {
1295 1294 struct scsi_address sa;
1296 1295 sa.a_hba_tran = NULL; /* not used */
1297 1296 sa.a_target = (ushort_t)i;
1298 1297 sa.a_lun = (uchar_t)j;
1299 1298 (void) fas_delete_arq_pkt(fas, &sa);
1300 1299 }
1301 1300 }
1302 1301 }
1303 1302
1304 1303 /*
1305 1304 * Remove device MT locks and CV
1306 1305 */
1307 1306 mutex_destroy(&fas->f_waitQ_mutex);
1308 1307 mutex_destroy(&fas->f_mutex);
1309 1308 cv_destroy(&fas->f_cv);
1310 1309
1311 1310 /*
1312 1311 * Release miscellaneous device resources
1313 1312 */
1314 1313
1315 1314 if (fas->f_kmem_cache) {
1316 1315 kmem_cache_destroy(fas->f_kmem_cache);
1317 1316 }
1318 1317
1319 1318 if (fas->f_cmdarea != (uchar_t *)NULL) {
1320 1319 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1321 1320 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1322 1321 }
1323 1322
1324 1323 if (fas->f_dmahandle != (ddi_dma_handle_t)NULL) {
1325 1324 ddi_dma_free_handle(&fas->f_dmahandle);
1326 1325 }
1327 1326
1328 1327 if (fas->f_regs_acc_handle) {
1329 1328 ddi_regs_map_free(&fas->f_regs_acc_handle);
1330 1329 }
1331 1330 if (fas->f_dmar_acc_handle) {
1332 1331 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1333 1332 }
1334 1333
1335 1334 /*
1336 1335 * Remove properties created during attach()
1337 1336 */
1338 1337 ddi_prop_remove_all(dip);
1339 1338
1340 1339 /*
1341 1340 * Delete the DMA limits, transport vectors and remove the device
1342 1341 * links to the scsi_transport layer.
1343 1342 * -- ddi_set_driver_private(dip, NULL)
1344 1343 */
1345 1344 (void) scsi_hba_detach(dip);
1346 1345
1347 1346 /*
1348 1347 * Free the scsi_transport structure for this device.
1349 1348 */
1350 1349 scsi_hba_tran_free(tran);
1351 1350
1352 1351 ddi_soft_state_free(fas_state, ddi_get_instance(dip));
1353 1352
1354 1353 return (DDI_SUCCESS);
1355 1354 }
1356 1355
1357 1356 static int
↓ open down ↓ |
349 lines elided |
↑ open up ↑ |
1358 1357 fas_quiesce_bus(struct fas *fas)
1359 1358 {
1360 1359 mutex_enter(FAS_MUTEX(fas));
1361 1360 IPRINTF("fas_quiesce: QUIESCEing\n");
1362 1361 IPRINTF3("fas_quiesce: ncmds (%d) ndisc (%d) state (%d)\n",
1363 1362 fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1364 1363 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1365 1364 if (fas_check_outstanding(fas)) {
1366 1365 fas->f_softstate |= FAS_SS_DRAINING;
1367 1366 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1368 - fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1367 + fas, drv_sectohz(FAS_QUIESCE_TIMEOUT));
1369 1368 if (cv_wait_sig(FAS_CV(fas), FAS_MUTEX(fas)) == 0) {
1370 1369 /*
1371 1370 * quiesce has been interrupted.
1372 1371 */
1373 1372 IPRINTF("fas_quiesce: abort QUIESCE\n");
1374 1373 fas->f_softstate &= ~FAS_SS_DRAINING;
1375 1374 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1376 1375 (void) fas_istart(fas);
1377 1376 if (fas->f_quiesce_timeid != 0) {
1378 1377 mutex_exit(FAS_MUTEX(fas));
1379 1378 #ifndef __lock_lint /* warlock complains but there is a NOTE on this */
1380 1379 (void) untimeout(fas->f_quiesce_timeid);
1381 1380 fas->f_quiesce_timeid = 0;
1382 1381 #endif
1383 1382 return (-1);
1384 1383 }
1385 1384 mutex_exit(FAS_MUTEX(fas));
1386 1385 return (-1);
1387 1386 } else {
1388 1387 IPRINTF("fas_quiesce: bus is QUIESCED\n");
1389 1388 ASSERT(fas->f_quiesce_timeid == 0);
1390 1389 fas->f_softstate &= ~FAS_SS_DRAINING;
1391 1390 fas->f_softstate |= FAS_SS_QUIESCED;
1392 1391 mutex_exit(FAS_MUTEX(fas));
1393 1392 return (0);
1394 1393 }
1395 1394 }
1396 1395 IPRINTF("fas_quiesce: bus was not busy QUIESCED\n");
1397 1396 mutex_exit(FAS_MUTEX(fas));
1398 1397 return (0);
1399 1398 }
1400 1399
1401 1400 static int
1402 1401 fas_unquiesce_bus(struct fas *fas)
1403 1402 {
1404 1403 mutex_enter(FAS_MUTEX(fas));
1405 1404 fas->f_softstate &= ~FAS_SS_QUIESCED;
1406 1405 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1407 1406 (void) fas_istart(fas);
1408 1407 IPRINTF("fas_quiesce: bus has been UNQUIESCED\n");
1409 1408 mutex_exit(FAS_MUTEX(fas));
1410 1409
1411 1410 return (0);
1412 1411 }
1413 1412
1414 1413 /*
1415 1414 * invoked from timeout() to check the number of outstanding commands
1416 1415 */
1417 1416 static void
1418 1417 fas_ncmds_checkdrain(void *arg)
1419 1418 {
1420 1419 struct fas *fas = arg;
1421 1420
1422 1421 mutex_enter(FAS_MUTEX(fas));
1423 1422 IPRINTF3("fas_checkdrain: ncmds (%d) ndisc (%d) state (%d)\n",
1424 1423 fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1425 1424 if (fas->f_softstate & FAS_SS_DRAINING) {
1426 1425 fas->f_quiesce_timeid = 0;
1427 1426 if (fas_check_outstanding(fas) == 0) {
1428 1427 IPRINTF("fas_drain: bus has drained\n");
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
1429 1428 cv_signal(FAS_CV(fas));
1430 1429 } else {
1431 1430 /*
1432 1431 * throttle may have been reset by a bus reset
1433 1432 * or fas_runpoll()
1434 1433 * XXX shouldn't be necessary
1435 1434 */
1436 1435 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1437 1436 IPRINTF("fas_drain: rescheduling timeout\n");
1438 1437 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1439 - fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1438 + fas, drv_sectohz(FAS_QUIESCE_TIMEOUT));
1440 1439 }
1441 1440 }
1442 1441 mutex_exit(FAS_MUTEX(fas));
1443 1442 }
1444 1443
1445 1444 static int
1446 1445 fas_check_outstanding(struct fas *fas)
1447 1446 {
1448 1447 uint_t slot;
1449 1448 uint_t d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
1450 1449 int ncmds = 0;
1451 1450
1452 1451 ASSERT(mutex_owned(FAS_MUTEX(fas)));
1453 1452
1454 1453 for (slot = 0; slot < N_SLOTS; slot += d)
1455 1454 ncmds += fas->f_tcmds[slot];
1456 1455
1457 1456 return (ncmds);
1458 1457 }
1459 1458
1460 1459
1461 1460 #ifdef FASDEBUG
1462 1461 /*
1463 1462 * fas register read/write functions with tracing
1464 1463 */
1465 1464 static void
1466 1465 fas_reg_tracing(struct fas *fas, int type, int regno, uint32_t what)
1467 1466 {
1468 1467 fas->f_reg_trace[fas->f_reg_trace_index++] = type;
1469 1468 fas->f_reg_trace[fas->f_reg_trace_index++] = regno;
1470 1469 fas->f_reg_trace[fas->f_reg_trace_index++] = what;
1471 1470 fas->f_reg_trace[fas->f_reg_trace_index++] = gethrtime();
1472 1471 fas->f_reg_trace[fas->f_reg_trace_index] = 0xff;
1473 1472 if (fas->f_reg_trace_index >= REG_TRACE_BUF_SIZE) {
1474 1473 fas->f_reg_trace_index = 0;
1475 1474 }
1476 1475 }
1477 1476
1478 1477 static void
1479 1478 fas_reg_cmd_write(struct fas *fas, uint8_t cmd)
1480 1479 {
1481 1480 volatile struct fasreg *fasreg = fas->f_reg;
1482 1481 int regno = (uintptr_t)&fasreg->fas_cmd - (uintptr_t)fasreg;
1483 1482
1484 1483 fasreg->fas_cmd = cmd;
1485 1484 fas->f_last_cmd = cmd;
1486 1485
1487 1486 EPRINTF1("issuing cmd %x\n", (uchar_t)cmd);
1488 1487 fas_reg_tracing(fas, 0, regno, cmd);
1489 1488
1490 1489 fas->f_reg_cmds++;
1491 1490 }
1492 1491
1493 1492 static void
1494 1493 fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what)
1495 1494 {
1496 1495 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1497 1496
1498 1497 *p = what;
1499 1498
1500 1499 EPRINTF2("writing reg%x = %x\n", regno, what);
1501 1500 fas_reg_tracing(fas, 1, regno, what);
1502 1501
1503 1502 fas->f_reg_writes++;
1504 1503 }
1505 1504
1506 1505 static uint8_t
1507 1506 fas_reg_read(struct fas *fas, volatile uint8_t *p)
1508 1507 {
1509 1508 uint8_t what;
1510 1509 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1511 1510
1512 1511 what = *p;
1513 1512
1514 1513 EPRINTF2("reading reg%x => %x\n", regno, what);
1515 1514 fas_reg_tracing(fas, 2, regno, what);
1516 1515
1517 1516 fas->f_reg_reads++;
1518 1517
1519 1518 return (what);
1520 1519 }
1521 1520
1522 1521 /*
1523 1522 * dma register access routines
1524 1523 */
1525 1524 static void
1526 1525 fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, uint32_t what)
1527 1526 {
1528 1527 *p = what;
1529 1528 fas->f_reg_dma_writes++;
1530 1529
1531 1530 #ifdef DMA_REG_TRACING
1532 1531 {
1533 1532 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1534 1533 EPRINTF2("writing dma reg%x = %x\n", regno, what);
1535 1534 fas_reg_tracing(fas, 3, regno, what);
1536 1535 }
1537 1536 #endif
1538 1537 }
1539 1538
1540 1539 static uint32_t
1541 1540 fas_dma_reg_read(struct fas *fas, volatile uint32_t *p)
1542 1541 {
1543 1542 uint32_t what = *p;
1544 1543 fas->f_reg_dma_reads++;
1545 1544
1546 1545 #ifdef DMA_REG_TRACING
1547 1546 {
1548 1547 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1549 1548 EPRINTF2("reading dma reg%x => %x\n", regno, what);
1550 1549 fas_reg_tracing(fas, 4, regno, what);
1551 1550 }
1552 1551 #endif
1553 1552 return (what);
1554 1553 }
1555 1554 #endif
1556 1555
1557 1556 #define FIFO_EMPTY(fas) (fas_reg_read(fas, &fas->f_reg->fas_stat2) & \
1558 1557 FAS_STAT2_EMPTY)
1559 1558 #define FIFO_CNT(fas) \
1560 1559 (fas_reg_read(fas, &fas->f_reg->fas_fifo_flag) & FIFO_CNT_MASK)
1561 1560
1562 1561 #ifdef FASDEBUG
1563 1562 static void
1564 1563 fas_assert_atn(struct fas *fas)
1565 1564 {
1566 1565 fas_reg_cmd_write(fas, CMD_SET_ATN);
1567 1566 #ifdef FAS_TEST
1568 1567 if (fas_test_stop > 1)
1569 1568 debug_enter("asserted atn");
1570 1569 #endif
1571 1570 }
1572 1571 #else
1573 1572 #define fas_assert_atn(fas) fas_reg_cmd_write(fas, CMD_SET_ATN)
1574 1573 #endif
1575 1574
1576 1575 /*
1577 1576 * DMA macros; we use a shadow copy of the dma_csr to save unnecessary
1578 1577 * reads
1579 1578 */
1580 1579 #define FAS_DMA_WRITE(fas, count, base, cmd) { \
1581 1580 volatile struct fasreg *fasreg = fas->f_reg; \
1582 1581 volatile struct dma *dmar = fas->f_dma; \
1583 1582 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1584 1583 SET_FAS_COUNT(fasreg, count); \
1585 1584 fas_reg_cmd_write(fas, cmd); \
1586 1585 fas_dma_reg_write(fas, &dmar->dma_count, count); \
1587 1586 fas->f_dma_csr |= \
1588 1587 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1589 1588 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1590 1589 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1591 1590 }
1592 1591
1593 1592 #define FAS_DMA_WRITE_SETUP(fas, count, base) { \
1594 1593 volatile struct fasreg *fasreg = fas->f_reg; \
1595 1594 volatile struct dma *dmar = fas->f_dma; \
1596 1595 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1597 1596 SET_FAS_COUNT(fasreg, count); \
1598 1597 fas_dma_reg_write(fas, &dmar->dma_count, count); \
1599 1598 fas->f_dma_csr |= \
1600 1599 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1601 1600 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1602 1601 }
1603 1602
1604 1603
1605 1604 #define FAS_DMA_READ(fas, count, base, dmacount, cmd) { \
1606 1605 volatile struct fasreg *fasreg = fas->f_reg; \
1607 1606 volatile struct dma *dmar = fas->f_dma; \
1608 1607 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1609 1608 SET_FAS_COUNT(fasreg, count); \
1610 1609 fas_reg_cmd_write(fas, cmd); \
1611 1610 fas->f_dma_csr |= \
1612 1611 (fas->f_dma_csr & ~DMA_WRITE) | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1613 1612 fas_dma_reg_write(fas, &dmar->dma_count, dmacount); \
1614 1613 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1615 1614 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1616 1615 }
1617 1616
1618 1617 static void
1619 1618 FAS_FLUSH_DMA(struct fas *fas)
1620 1619 {
1621 1620 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1622 1621 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1623 1622 DMA_DSBL_DRAIN);
1624 1623 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1625 1624 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1626 1625 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1627 1626 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1628 1627 }
1629 1628
1630 1629 /*
1631 1630 * FAS_FLUSH_DMA_HARD checks on REQPEND before taking away the reset
1632 1631 */
1633 1632 static void
1634 1633 FAS_FLUSH_DMA_HARD(struct fas *fas)
1635 1634 {
1636 1635 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1637 1636 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1638 1637 DMA_DSBL_DRAIN);
1639 1638 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1640 1639 while (fas_dma_reg_read(fas, &fas->f_dma->dma_csr) & DMA_REQPEND)
1641 1640 ;
1642 1641 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1643 1642 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1644 1643 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1645 1644 }
1646 1645
1647 1646 /*
1648 1647 * update period, conf3, offset reg, if necessary
1649 1648 */
1650 1649 #define FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target) \
1651 1650 { \
1652 1651 uchar_t period, offset, conf3; \
1653 1652 period = fas->f_sync_period[target] & SYNC_PERIOD_MASK; \
1654 1653 offset = fas->f_offset[target]; \
1655 1654 conf3 = fas->f_fasconf3[target]; \
1656 1655 if ((period != fas->f_period_reg_last) || \
1657 1656 (offset != fas->f_offset_reg_last) || \
1658 1657 (conf3 != fas->f_fasconf3_reg_last)) { \
1659 1658 fas->f_period_reg_last = period; \
1660 1659 fas->f_offset_reg_last = offset; \
1661 1660 fas->f_fasconf3_reg_last = conf3; \
1662 1661 fas_reg_write(fas, &fasreg->fas_sync_period, period); \
1663 1662 fas_reg_write(fas, &fasreg->fas_sync_offset, offset); \
1664 1663 fas_reg_write(fas, &fasreg->fas_conf3, conf3); \
1665 1664 } \
1666 1665 }
1667 1666
1668 1667 /*
1669 1668 * fifo read/write routines
1670 1669 * always read the fifo bytes before reading the interrupt register
1671 1670 */
1672 1671
1673 1672 static void
1674 1673 fas_read_fifo(struct fas *fas)
1675 1674 {
1676 1675 int stat = fas->f_stat;
1677 1676 volatile struct fasreg *fasreg = fas->f_reg;
1678 1677 int i;
1679 1678
1680 1679 i = fas_reg_read(fas, &fasreg->fas_fifo_flag) & FIFO_CNT_MASK;
1681 1680 EPRINTF2("fas_read_fifo: fifo cnt=%x, stat=%x\n", i, stat);
1682 1681 ASSERT(i <= FIFOSIZE);
1683 1682
1684 1683 fas->f_fifolen = 0;
1685 1684 while (i-- > 0) {
1686 1685 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1687 1686 &fasreg->fas_fifo_data);
1688 1687 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1689 1688 &fasreg->fas_fifo_data);
1690 1689 }
1691 1690 if (fas->f_stat2 & FAS_STAT2_ISHUTTLE) {
1692 1691
1693 1692 /* write pad byte */
1694 1693 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1695 1694 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1696 1695 &fasreg->fas_fifo_data);
1697 1696 /* flush pad byte */
1698 1697 fas_reg_cmd_write(fas, CMD_FLUSH);
1699 1698 }
1700 1699 EPRINTF2("fas_read_fifo: fifo len=%x, stat2=%x\n",
1701 1700 fas->f_fifolen, stat);
1702 1701 } /* fas_read_fifo */
1703 1702
1704 1703 static void
1705 1704 fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad)
1706 1705 {
1707 1706 int i;
1708 1707 volatile struct fasreg *fasreg = fas->f_reg;
1709 1708
1710 1709 EPRINTF1("writing fifo %x bytes\n", length);
1711 1710 ASSERT(length <= 15);
1712 1711 fas_reg_cmd_write(fas, CMD_FLUSH);
1713 1712 for (i = 0; i < length; i++) {
1714 1713 fas_reg_write(fas, &fasreg->fas_fifo_data, buf[i]);
1715 1714 if (pad) {
1716 1715 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1717 1716 }
1718 1717 }
1719 1718 }
1720 1719
1721 1720 /*
1722 1721 * Hardware and Software internal reset routines
1723 1722 */
1724 1723 static int
1725 1724 fas_init_chip(struct fas *fas, uchar_t initiator_id)
1726 1725 {
1727 1726 int i;
1728 1727 uchar_t clock_conv;
1729 1728 uchar_t initial_conf3;
1730 1729 uint_t ticks;
1731 1730 static char *prop_cfreq = "clock-frequency";
1732 1731
1733 1732 /*
1734 1733 * Determine clock frequency of attached FAS chip.
1735 1734 */
1736 1735 i = ddi_prop_get_int(DDI_DEV_T_ANY,
1737 1736 fas->f_dev, DDI_PROP_DONTPASS, prop_cfreq, -1);
1738 1737 clock_conv = (i + FIVE_MEG - 1) / FIVE_MEG;
1739 1738 if (clock_conv != CLOCK_40MHZ) {
1740 1739 fas_log(fas, CE_WARN, "Bad clock frequency");
1741 1740 return (-1);
1742 1741 }
1743 1742
1744 1743 fas->f_clock_conv = clock_conv;
1745 1744 fas->f_clock_cycle = CLOCK_PERIOD(i);
1746 1745 ticks = FAS_CLOCK_TICK(fas);
1747 1746 fas->f_stval = FAS_CLOCK_TIMEOUT(ticks, fas_selection_timeout);
1748 1747
1749 1748 DPRINTF5("%d mhz, clock_conv %d, clock_cycle %d, ticks %d, stval %d\n",
1750 1749 i, fas->f_clock_conv, fas->f_clock_cycle,
1751 1750 ticks, fas->f_stval);
1752 1751 /*
1753 1752 * set up conf registers
1754 1753 */
1755 1754 fas->f_fasconf |= FAS_CONF_PAREN;
1756 1755 fas->f_fasconf2 = (uchar_t)(FAS_CONF2_FENABLE | FAS_CONF2_XL32);
1757 1756
1758 1757 if (initiator_id < NTARGETS) {
1759 1758 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO;
1760 1759 } else {
1761 1760 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO |
1762 1761 FAS_CONF3_IDBIT3;
1763 1762 }
1764 1763
1765 1764 for (i = 0; i < NTARGETS_WIDE; i++) {
1766 1765 fas->f_fasconf3[i] = initial_conf3;
1767 1766 }
1768 1767
1769 1768 /*
1770 1769 * Avoid resetting the scsi bus since this causes a few seconds
1771 1770 * delay per fas in boot and also causes busy conditions in some
1772 1771 * tape devices.
1773 1772 */
1774 1773 fas_internal_reset(fas, FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
1775 1774
1776 1775 /*
1777 1776 * initialize period and offset for each target
1778 1777 */
1779 1778 for (i = 0; i < NTARGETS_WIDE; i++) {
1780 1779 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_SYNC) {
1781 1780 fas->f_offset[i] = fas_default_offset |
1782 1781 fas->f_req_ack_delay;
1783 1782 } else {
1784 1783 fas->f_offset[i] = 0;
1785 1784 }
1786 1785 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_FAST) {
1787 1786 fas->f_neg_period[i] =
1788 1787 (uchar_t)MIN_SYNC_PERIOD(fas);
1789 1788 } else {
1790 1789 fas->f_neg_period[i] =
1791 1790 (uchar_t)CONVERT_PERIOD(DEFAULT_SYNC_PERIOD);
1792 1791 }
1793 1792 }
1794 1793 return (0);
1795 1794 }
1796 1795
1797 1796 /*
1798 1797 * reset bus, chip, dma, or soft state
1799 1798 */
1800 1799 static void
1801 1800 fas_internal_reset(struct fas *fas, int reset_action)
1802 1801 {
1803 1802 volatile struct fasreg *fasreg = fas->f_reg;
1804 1803 volatile struct dma *dmar = fas->f_dma;
1805 1804
1806 1805 if (reset_action & FAS_RESET_SCSIBUS) {
1807 1806 fas_reg_cmd_write(fas, CMD_RESET_SCSI);
1808 1807 fas_setup_reset_delay(fas);
1809 1808 }
1810 1809
1811 1810 FAS_FLUSH_DMA_HARD(fas); /* resets and reinits the dma */
1812 1811
1813 1812 /*
1814 1813 * NOTE: if dma is aborted while active, indefinite hangs
1815 1814 * may occur; it is preferable to stop the target first before
1816 1815 * flushing the dma
1817 1816 */
1818 1817 if (reset_action & FAS_RESET_DMA) {
1819 1818 int burstsizes = fas->f_dma_attr->dma_attr_burstsizes;
1820 1819 if (burstsizes & BURST64) {
1821 1820 IPRINTF("64 byte burstsize\n");
1822 1821 fas->f_dma_csr |= DMA_BURST64;
1823 1822 } else if (burstsizes & BURST32) {
1824 1823 IPRINTF("32 byte burstsize\n");
1825 1824 fas->f_dma_csr |= DMA_BURST32;
1826 1825 } else {
1827 1826 IPRINTF("16 byte burstsize\n");
1828 1827 }
1829 1828 if ((fas->f_hm_rev > 0x20) && (fas_enable_sbus64) &&
1830 1829 (ddi_dma_set_sbus64(fas->f_dmahandle, burstsizes) ==
1831 1830 DDI_SUCCESS)) {
1832 1831 IPRINTF("enabled 64 bit sbus\n");
1833 1832 fas->f_dma_csr |= DMA_WIDE_EN;
1834 1833 }
1835 1834 }
1836 1835
1837 1836 if (reset_action & FAS_RESET_FAS) {
1838 1837 /*
1839 1838 * 2 NOPs with DMA are required here
1840 1839 * id_code is unreliable if we don't do this)
1841 1840 */
1842 1841 uchar_t idcode, fcode;
1843 1842 int dmarev;
1844 1843
1845 1844 fas_reg_cmd_write(fas, CMD_RESET_FAS);
1846 1845 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1847 1846 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1848 1847
1849 1848 /*
1850 1849 * Re-load chip configurations
1851 1850 * Only load registers which are not loaded in fas_startcmd()
1852 1851 */
1853 1852 fas_reg_write(fas, &fasreg->fas_clock_conv,
1854 1853 (fas->f_clock_conv & CLOCK_MASK));
1855 1854
1856 1855 fas_reg_write(fas, &fasreg->fas_timeout, fas->f_stval);
1857 1856
1858 1857 /*
1859 1858 * enable default configurations
1860 1859 */
1861 1860 fas->f_idcode = idcode =
1862 1861 fas_reg_read(fas, &fasreg->fas_id_code);
1863 1862 fcode = (uchar_t)(idcode & FAS_FCODE_MASK) >> (uchar_t)3;
1864 1863 fas->f_type = FAS366;
1865 1864 IPRINTF2("Family code %d, revision %d\n",
1866 1865 fcode, (idcode & FAS_REV_MASK));
1867 1866 dmarev = fas_dma_reg_read(fas, &dmar->dma_csr);
1868 1867 dmarev = (dmarev >> 11) & 0xf;
1869 1868 IPRINTF1("DMA channel revision %d\n", dmarev);
1870 1869
1871 1870 fas_reg_write(fas, &fasreg->fas_conf, fas->f_fasconf);
1872 1871 fas_reg_write(fas, &fasreg->fas_conf2, fas->f_fasconf2);
1873 1872
1874 1873 fas->f_req_ack_delay = DEFAULT_REQ_ACK_DELAY;
1875 1874
1876 1875 /*
1877 1876 * Just in case... clear interrupt
1878 1877 */
1879 1878 (void) fas_reg_read(fas, &fasreg->fas_intr);
1880 1879 }
1881 1880
1882 1881 if (reset_action & FAS_RESET_SOFTC) {
1883 1882 fas->f_wdtr_sent = fas->f_sdtr_sent = 0;
1884 1883 fas->f_wide_known = fas->f_sync_known = 0;
1885 1884 fas->f_wide_enabled = fas->f_sync_enabled = 0;
1886 1885 fas->f_omsglen = 0;
1887 1886 fas->f_cur_msgout[0] = fas->f_last_msgout =
1888 1887 fas->f_last_msgin = INVALID_MSG;
1889 1888 fas->f_abort_msg_sent = fas->f_reset_msg_sent = 0;
1890 1889 fas->f_next_slot = 0;
1891 1890 fas->f_current_sp = NULL;
1892 1891 fas->f_fifolen = 0;
1893 1892 fas->f_fasconf3_reg_last = fas->f_offset_reg_last =
1894 1893 fas->f_period_reg_last = 0xff;
1895 1894
1896 1895 New_state(fas, STATE_FREE);
1897 1896 }
1898 1897 }
1899 1898
1900 1899
1901 1900 #ifdef FASDEBUG
1902 1901 /*
1903 1902 * check if ncmds still reflects the truth
1904 1903 * count all cmds for this driver instance and compare with ncmds
1905 1904 */
1906 1905 static void
1907 1906 fas_check_ncmds(struct fas *fas)
1908 1907 {
1909 1908 int slot = 0;
1910 1909 ushort_t tag, t;
1911 1910 int n, total = 0;
1912 1911
1913 1912 do {
1914 1913 if (fas->f_active[slot]) {
1915 1914 struct fas_cmd *sp = fas->f_readyf[slot];
1916 1915 t = fas->f_active[slot]->f_n_slots;
1917 1916 while (sp != 0) {
1918 1917 sp = sp->cmd_forw;
1919 1918 total++;
1920 1919 }
1921 1920 for (n = tag = 0; tag < t; tag++) {
1922 1921 if (fas->f_active[slot]->f_slot[tag] != 0) {
1923 1922 n++;
1924 1923 total++;
1925 1924 }
1926 1925 }
1927 1926 ASSERT(n == fas->f_tcmds[slot]);
1928 1927 }
1929 1928 slot = NEXTSLOT(slot, fas->f_dslot);
1930 1929 } while (slot != 0);
1931 1930
1932 1931 if (total != fas->f_ncmds) {
1933 1932 IPRINTF2("fas_check_ncmds: total=%x, ncmds=%x\n",
1934 1933 total, fas->f_ncmds);
1935 1934 }
1936 1935 ASSERT(fas->f_ncmds >= fas->f_ndisc);
1937 1936 }
1938 1937 #else
1939 1938 #define fas_check_ncmds(fas)
1940 1939 #endif
1941 1940
1942 1941 /*
1943 1942 * SCSA Interface functions
1944 1943 *
1945 1944 * Visible to the external world via the transport structure.
1946 1945 *
1947 1946 * fas_scsi_abort: abort a current cmd or all cmds for a target
1948 1947 */
1949 1948 /*ARGSUSED*/
1950 1949 static int
1951 1950 fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1952 1951 {
1953 1952 struct fas *fas = ADDR2FAS(ap);
1954 1953 int rval;
1955 1954
1956 1955 IPRINTF2("fas_scsi_abort: target %d.%d\n", ap->a_target, ap->a_lun);
1957 1956
1958 1957 mutex_enter(FAS_MUTEX(fas));
1959 1958 rval = fas_do_scsi_abort(ap, pkt);
1960 1959 fas_check_waitQ_and_mutex_exit(fas);
1961 1960 return (rval);
1962 1961 }
1963 1962
1964 1963 /*
1965 1964 * reset handling: reset bus or target
1966 1965 */
1967 1966 /*ARGSUSED*/
1968 1967 static int
1969 1968 fas_scsi_reset(struct scsi_address *ap, int level)
1970 1969 {
1971 1970 struct fas *fas = ADDR2FAS(ap);
1972 1971 int rval;
1973 1972
1974 1973 IPRINTF3("fas_scsi_reset: target %d.%d, level %d\n",
1975 1974 ap->a_target, ap->a_lun, level);
1976 1975
1977 1976 mutex_enter(FAS_MUTEX(fas));
1978 1977 rval = fas_do_scsi_reset(ap, level);
1979 1978 fas_check_waitQ_and_mutex_exit(fas);
1980 1979 return (rval);
1981 1980 }
1982 1981
1983 1982 /*
1984 1983 * entry point for reset notification setup, to register or to cancel.
1985 1984 */
1986 1985 static int
1987 1986 fas_scsi_reset_notify(struct scsi_address *ap, int flag,
1988 1987 void (*callback)(caddr_t), caddr_t arg)
1989 1988 {
1990 1989 struct fas *fas = ADDR2FAS(ap);
1991 1990
1992 1991 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1993 1992 &fas->f_mutex, &fas->f_reset_notify_listf));
1994 1993 }
1995 1994
1996 1995 /*
1997 1996 * capability interface
1998 1997 */
1999 1998 /*ARGSUSED*/
2000 1999 static int
2001 2000 fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
2002 2001 {
2003 2002 struct fas *fas = ADDR2FAS(ap);
2004 2003 DPRINTF3("fas_scsi_getcap: tgt=%x, cap=%s, whom=%x\n",
2005 2004 ap->a_target, cap, whom);
2006 2005 return (fas_commoncap(ap, cap, 0, whom, 0));
2007 2006 }
2008 2007
2009 2008 /*ARGSUSED*/
2010 2009 static int
2011 2010 fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2012 2011 {
2013 2012 struct fas *fas = ADDR2FAS(ap);
2014 2013 IPRINTF4("fas_scsi_setcap: tgt=%x, cap=%s, value=%x, whom=%x\n",
2015 2014 ap->a_target, cap, value, whom);
2016 2015 return (fas_commoncap(ap, cap, value, whom, 1));
2017 2016 }
2018 2017
2019 2018 /*
2020 2019 * pkt and dma allocation and deallocation
2021 2020 */
2022 2021 /*ARGSUSED*/
2023 2022 static void
2024 2023 fas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2025 2024 {
2026 2025 struct fas_cmd *cmd = PKT2CMD(pkt);
2027 2026
2028 2027 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2029 2028 "fas_scsi_dmafree_start");
2030 2029
2031 2030 if (cmd->cmd_flags & CFLAG_DMAVALID) {
2032 2031 /*
2033 2032 * Free the mapping.
2034 2033 */
2035 2034 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
2036 2035 cmd->cmd_flags ^= CFLAG_DMAVALID;
2037 2036 }
2038 2037 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2039 2038 "fas_scsi_dmafree_end");
2040 2039 }
2041 2040
2042 2041 /*ARGSUSED*/
2043 2042 static void
2044 2043 fas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2045 2044 {
2046 2045 struct fas_cmd *sp = PKT2CMD(pkt);
2047 2046
2048 2047 if (sp->cmd_flags & CFLAG_DMAVALID) {
2049 2048 if (ddi_dma_sync(sp->cmd_dmahandle, 0, 0,
2050 2049 (sp->cmd_flags & CFLAG_DMASEND) ?
2051 2050 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
2052 2051 DDI_SUCCESS) {
2053 2052 fas_log(ADDR2FAS(ap), CE_WARN,
2054 2053 "sync of pkt (%p) failed", (void *)pkt);
2055 2054 }
2056 2055 }
2057 2056 }
2058 2057
2059 2058 /*
2060 2059 * initialize pkt and allocate DVMA resources
2061 2060 */
2062 2061 static struct scsi_pkt *
2063 2062 fas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
2064 2063 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
2065 2064 int flags, int (*callback)(), caddr_t arg)
2066 2065 {
2067 2066 int kf;
2068 2067 int failure = 1;
2069 2068 struct fas_cmd *cmd;
2070 2069 struct fas *fas = ADDR2FAS(ap);
2071 2070 struct fas_cmd *new_cmd;
2072 2071 int rval;
2073 2072
2074 2073 /* #define FAS_TEST_EXTRN_ALLOC */
2075 2074 #ifdef FAS_TEST_EXTRN_ALLOC
2076 2075 cmdlen *= 4; statuslen *= 4; tgtlen *= 4;
2077 2076 #endif
2078 2077 /*
2079 2078 * if no pkt was passed then allocate a pkt first
2080 2079 */
2081 2080 if (pkt == NULL) {
2082 2081 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_START,
2083 2082 "fas_scsi_impl_pktalloc_start");
2084 2083
2085 2084 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
2086 2085
2087 2086 /*
2088 2087 * only one size of pkt (with arq).
2089 2088 */
2090 2089 cmd = kmem_cache_alloc(fas->f_kmem_cache, kf);
2091 2090
2092 2091 if (cmd) {
2093 2092
2094 2093 ddi_dma_handle_t save_dma_handle;
2095 2094
2096 2095 save_dma_handle = cmd->cmd_dmahandle;
2097 2096 bzero(cmd, EXTCMD_SIZE);
2098 2097 cmd->cmd_dmahandle = save_dma_handle;
2099 2098
2100 2099 pkt = (struct scsi_pkt *)((uchar_t *)cmd +
2101 2100 sizeof (struct fas_cmd));
2102 2101 cmd->cmd_pkt = pkt;
2103 2102 pkt->pkt_ha_private = (opaque_t)cmd;
2104 2103 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
2105 2104 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2106 2105 pkt->pkt_address = *ap;
2107 2106
2108 2107 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2109 2108 pkt->pkt_private = cmd->cmd_pkt_private;
2110 2109
2111 2110 cmd->cmd_cdblen = cmdlen;
2112 2111 cmd->cmd_scblen = statuslen;
2113 2112 cmd->cmd_privlen = tgtlen;
2114 2113 cmd->cmd_slot =
2115 2114 (Tgt(cmd) * NLUNS_PER_TARGET) | Lun(cmd);
2116 2115 failure = 0;
2117 2116 }
2118 2117 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
2119 2118 (tgtlen > PKT_PRIV_LEN) ||
2120 2119 (statuslen > EXTCMDS_STATUS_SIZE)) {
2121 2120 if (failure == 0) {
2122 2121 /*
2123 2122 * if extern alloc fails, all will be
2124 2123 * deallocated, including cmd
2125 2124 */
2126 2125 failure = fas_pkt_alloc_extern(fas, cmd,
2127 2126 cmdlen, tgtlen, statuslen, kf);
2128 2127 }
2129 2128 if (failure) {
2130 2129 /*
2131 2130 * nothing to deallocate so just return
2132 2131 */
2133 2132 TRACE_0(TR_FAC_SCSI_FAS,
2134 2133 TR_FAS_SCSI_IMPL_PKTALLOC_END,
2135 2134 "fas_scsi_impl_pktalloc_end");
2136 2135 return (NULL);
2137 2136 }
2138 2137 }
2139 2138
2140 2139 new_cmd = cmd;
2141 2140
2142 2141 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_END,
2143 2142 "fas_scsi_impl_pktalloc_end");
2144 2143 } else {
2145 2144 cmd = PKT2CMD(pkt);
2146 2145 new_cmd = NULL;
2147 2146 }
2148 2147
2149 2148 /*
2150 2149 * Second step of fas_scsi_init_pkt:
2151 2150 * bind the buf to the handle
2152 2151 */
2153 2152 if (bp && bp->b_bcount != 0 &&
2154 2153 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
2155 2154
2156 2155 int cmd_flags, dma_flags;
2157 2156 uint_t dmacookie_count;
2158 2157
2159 2158 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_START,
2160 2159 "fas_scsi_impl_dmaget_start");
2161 2160
2162 2161 cmd_flags = cmd->cmd_flags;
2163 2162
2164 2163 if (bp->b_flags & B_READ) {
2165 2164 cmd_flags &= ~CFLAG_DMASEND;
2166 2165 dma_flags = DDI_DMA_READ | DDI_DMA_PARTIAL;
2167 2166 } else {
2168 2167 cmd_flags |= CFLAG_DMASEND;
2169 2168 dma_flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL;
2170 2169 }
2171 2170 if (flags & PKT_CONSISTENT) {
2172 2171 cmd_flags |= CFLAG_CMDIOPB;
2173 2172 dma_flags |= DDI_DMA_CONSISTENT;
2174 2173 }
2175 2174
2176 2175 /*
2177 2176 * bind the handle to the buf
2178 2177 */
2179 2178 ASSERT(cmd->cmd_dmahandle != NULL);
2180 2179 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
2181 2180 dma_flags, callback, arg, &cmd->cmd_dmacookie,
2182 2181 &dmacookie_count);
2183 2182
2184 2183 if (rval && rval != DDI_DMA_PARTIAL_MAP) {
2185 2184 switch (rval) {
2186 2185 case DDI_DMA_NORESOURCES:
2187 2186 bioerror(bp, 0);
2188 2187 break;
2189 2188 case DDI_DMA_BADATTR:
2190 2189 case DDI_DMA_NOMAPPING:
2191 2190 bioerror(bp, EFAULT);
2192 2191 break;
2193 2192 case DDI_DMA_TOOBIG:
2194 2193 default:
2195 2194 bioerror(bp, EINVAL);
2196 2195 break;
2197 2196 }
2198 2197 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
2199 2198 if (new_cmd) {
2200 2199 fas_scsi_destroy_pkt(ap, pkt);
2201 2200 }
2202 2201 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2203 2202 "fas_scsi_impl_dmaget_end");
2204 2203 return ((struct scsi_pkt *)NULL);
2205 2204 }
2206 2205 ASSERT(dmacookie_count == 1);
2207 2206 cmd->cmd_dmacount = bp->b_bcount;
2208 2207 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
2209 2208
2210 2209 ASSERT(cmd->cmd_dmahandle != NULL);
2211 2210 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2212 2211 "fas_scsi_impl_dmaget_end");
2213 2212 }
2214 2213
2215 2214 return (pkt);
2216 2215 }
2217 2216
2218 2217 /*
2219 2218 * unbind dma resources and deallocate the pkt
2220 2219 */
2221 2220 static void
2222 2221 fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2223 2222 {
2224 2223 struct fas_cmd *sp = PKT2CMD(pkt);
2225 2224 struct fas *fas = ADDR2FAS(ap);
2226 2225
2227 2226 /*
2228 2227 * fas_scsi_impl_dmafree inline to speed things up
2229 2228 */
2230 2229 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2231 2230 "fas_scsi_impl_dmafree_start");
2232 2231
2233 2232 if (sp->cmd_flags & CFLAG_DMAVALID) {
2234 2233 /*
2235 2234 * Free the mapping.
2236 2235 */
2237 2236 (void) ddi_dma_unbind_handle(sp->cmd_dmahandle);
2238 2237 sp->cmd_flags ^= CFLAG_DMAVALID;
2239 2238 }
2240 2239
2241 2240 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2242 2241 "fas_scsi_impl_dmafree_end");
2243 2242
2244 2243 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_START,
2245 2244 "fas_scsi_impl_pktfree_start");
2246 2245
2247 2246 if ((sp->cmd_flags &
2248 2247 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
2249 2248 CFLAG_SCBEXTERN)) == 0) {
2250 2249 sp->cmd_flags = CFLAG_FREE;
2251 2250 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2252 2251 } else {
2253 2252 fas_pkt_destroy_extern(fas, sp);
2254 2253 }
2255 2254
2256 2255 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_END,
2257 2256 "fas_scsi_impl_pktfree_end");
2258 2257 }
2259 2258
2260 2259 /*
2261 2260 * allocate and deallocate external pkt space (ie. not part of fas_cmd) for
2262 2261 * non-standard length cdb, pkt_private, status areas
2263 2262 * if allocation fails, then deallocate all external space and the pkt
2264 2263 */
2265 2264 /* ARGSUSED */
2266 2265 static int
2267 2266 fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
2268 2267 int cmdlen, int tgtlen, int statuslen, int kf)
2269 2268 {
2270 2269 caddr_t cdbp, scbp, tgt;
2271 2270 int failure = 0;
2272 2271
2273 2272 tgt = cdbp = scbp = NULL;
2274 2273 if (cmdlen > sizeof (sp->cmd_cdb)) {
2275 2274 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
2276 2275 failure++;
2277 2276 } else {
2278 2277 sp->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
2279 2278 sp->cmd_flags |= CFLAG_CDBEXTERN;
2280 2279 }
2281 2280 }
2282 2281 if (tgtlen > PKT_PRIV_LEN) {
2283 2282 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
2284 2283 failure++;
2285 2284 } else {
2286 2285 sp->cmd_flags |= CFLAG_PRIVEXTERN;
2287 2286 sp->cmd_pkt->pkt_private = tgt;
2288 2287 }
2289 2288 }
2290 2289 if (statuslen > EXTCMDS_STATUS_SIZE) {
2291 2290 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
2292 2291 failure++;
2293 2292 } else {
2294 2293 sp->cmd_flags |= CFLAG_SCBEXTERN;
2295 2294 sp->cmd_pkt->pkt_scbp = (opaque_t)scbp;
2296 2295 }
2297 2296 }
2298 2297 if (failure) {
2299 2298 fas_pkt_destroy_extern(fas, sp);
2300 2299 }
2301 2300 return (failure);
2302 2301 }
2303 2302
2304 2303 /*
2305 2304 * deallocate external pkt space and deallocate the pkt
2306 2305 */
2307 2306 static void
2308 2307 fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp)
2309 2308 {
2310 2309 if (sp->cmd_flags & CFLAG_FREE) {
2311 2310 panic("fas_pkt_destroy_extern: freeing free packet");
2312 2311 _NOTE(NOT_REACHED)
2313 2312 /* NOTREACHED */
2314 2313 }
2315 2314 if (sp->cmd_flags & CFLAG_CDBEXTERN) {
2316 2315 kmem_free((caddr_t)sp->cmd_pkt->pkt_cdbp,
2317 2316 (size_t)sp->cmd_cdblen);
2318 2317 }
2319 2318 if (sp->cmd_flags & CFLAG_SCBEXTERN) {
2320 2319 kmem_free((caddr_t)sp->cmd_pkt->pkt_scbp,
2321 2320 (size_t)sp->cmd_scblen);
2322 2321 }
2323 2322 if (sp->cmd_flags & CFLAG_PRIVEXTERN) {
2324 2323 kmem_free((caddr_t)sp->cmd_pkt->pkt_private,
2325 2324 (size_t)sp->cmd_privlen);
2326 2325 }
2327 2326 sp->cmd_flags = CFLAG_FREE;
2328 2327 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2329 2328 }
2330 2329
2331 2330 /*
2332 2331 * kmem cache constructor and destructor:
2333 2332 * When constructing, we bzero the cmd and allocate the dma handle
2334 2333 * When destructing, just free the dma handle
2335 2334 */
2336 2335 static int
2337 2336 fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
2338 2337 {
2339 2338 struct fas_cmd *cmd = buf;
2340 2339 struct fas *fas = cdrarg;
2341 2340 int (*callback)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP:
2342 2341 DDI_DMA_DONTWAIT;
2343 2342
2344 2343 bzero(buf, EXTCMD_SIZE);
2345 2344
2346 2345 /*
2347 2346 * allocate a dma handle
2348 2347 */
2349 2348 if ((ddi_dma_alloc_handle(fas->f_dev, fas->f_dma_attr, callback,
2350 2349 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
2351 2350 return (-1);
2352 2351 }
2353 2352 return (0);
2354 2353 }
2355 2354
2356 2355 /*ARGSUSED*/
2357 2356 static void
2358 2357 fas_kmem_cache_destructor(void *buf, void *cdrarg)
2359 2358 {
2360 2359 struct fas_cmd *cmd = buf;
2361 2360 if (cmd->cmd_dmahandle) {
2362 2361 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2363 2362 }
2364 2363 }
2365 2364
2366 2365 /*
2367 2366 * fas_scsi_start - Accept commands for transport
2368 2367 */
2369 2368 static int
2370 2369 fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2371 2370 {
2372 2371 struct fas_cmd *sp = PKT2CMD(pkt);
2373 2372 struct fas *fas = ADDR2FAS(ap);
2374 2373 int rval;
2375 2374 int intr = 0;
2376 2375
2377 2376 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_START, "fas_scsi_start_start");
2378 2377
2379 2378 #ifdef FAS_TEST
2380 2379 if (fas_transport_busy > 0) {
2381 2380 fas_transport_busy--;
2382 2381 return (TRAN_BUSY);
2383 2382 }
2384 2383 if ((fas_transport_busy_rqs > 0) &&
2385 2384 (*(sp->cmd_pkt->pkt_cdbp) == SCMD_REQUEST_SENSE)) {
2386 2385 fas_transport_busy_rqs--;
2387 2386 return (TRAN_BUSY);
2388 2387 }
2389 2388 if (fas_transport_reject > 0) {
2390 2389 fas_transport_reject--;
2391 2390 return (TRAN_BADPKT);
2392 2391 }
2393 2392 #endif
2394 2393 /*
2395 2394 * prepare packet before taking the mutex
2396 2395 */
2397 2396 rval = fas_prepare_pkt(fas, sp);
2398 2397 if (rval != TRAN_ACCEPT) {
2399 2398 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_PREPARE_PKT_END,
2400 2399 "fas_scsi_start_end (prepare_pkt)");
2401 2400 return (rval);
2402 2401 }
2403 2402
2404 2403 /*
2405 2404 * fas mutex can be held for a long time; therefore, if the mutex is
2406 2405 * held, we queue the packet in a waitQ; we now should check
2407 2406 * the waitQ on every mutex_exit(FAS_MUTEX(fas)) but we really only
2408 2407 * need to do this when the bus is free
2409 2408 * don't put NOINTR cmds including proxy cmds in waitQ! These
2410 2409 * cmds are handled by fas_runpoll()
2411 2410 * if the waitQ is non-empty, queue the pkt anyway to preserve
2412 2411 * order
2413 2412 * the goal is to queue in waitQ as much as possible so at
2414 2413 * interrupt time, we can move the packets to readyQ or start
2415 2414 * a packet immediately. It helps to do this at interrupt
2416 2415 * time because we can then field more interrupts
2417 2416 */
2418 2417 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
2419 2418
2420 2419 /*
2421 2420 * if the bus is not free, we will get an interrupt shortly
2422 2421 * so we don't want to take the fas mutex but queue up
2423 2422 * the packet in the waitQ
2424 2423 * also, if the waitQ is non-empty or there is an interrupt
2425 2424 * pending then queue up the packet in the waitQ and let the
2426 2425 * interrupt handler empty the waitQ
2427 2426 */
2428 2427 mutex_enter(&fas->f_waitQ_mutex);
2429 2428
2430 2429 if ((fas->f_state != STATE_FREE) ||
2431 2430 fas->f_waitf || (intr = INTPENDING(fas))) {
2432 2431 goto queue_in_waitQ;
2433 2432 }
2434 2433
2435 2434 /*
2436 2435 * we didn't queue up in the waitQ, so now try to accept
2437 2436 * the packet. if we fail to get the fas mutex, go back to
2438 2437 * the waitQ again
2439 2438 * do not release the waitQ mutex yet because that
2440 2439 * leaves a window where the interrupt handler has
2441 2440 * emptied the waitQ but not released the fas mutex yet
2442 2441 *
2443 2442 * the interrupt handler gets the locks in opposite order
2444 2443 * but because we do a tryenter, there is no deadlock
2445 2444 *
2446 2445 * if another thread has the fas mutex then either this
2447 2446 * thread or the other may find the bus free and
2448 2447 * empty the waitQ
2449 2448 */
2450 2449 if (mutex_tryenter(FAS_MUTEX(fas))) {
2451 2450 mutex_exit(&fas->f_waitQ_mutex);
2452 2451 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2453 2452 } else {
2454 2453 /*
2455 2454 * we didn't get the fas mutex so
2456 2455 * the packet has to go in the waitQ now
2457 2456 */
2458 2457 goto queue_in_waitQ;
2459 2458 }
2460 2459 } else {
2461 2460 /*
2462 2461 * for polled cmds, we have to take the mutex and
2463 2462 * start the packet using fas_runpoll()
2464 2463 */
2465 2464 mutex_enter(FAS_MUTEX(fas));
2466 2465 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2467 2466 }
2468 2467
2469 2468 /*
2470 2469 * if the bus is free then empty waitQ and release the mutex
2471 2470 * (this should be unlikely that the bus is still free after
2472 2471 * accepting the packet. it may be the relatively unusual case
2473 2472 * that we are throttling)
2474 2473 */
2475 2474 if (fas->f_state == STATE_FREE) {
2476 2475 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2477 2476 } else {
2478 2477 mutex_exit(FAS_MUTEX(fas));
2479 2478 }
2480 2479
2481 2480 done:
2482 2481 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2483 2482 "fas_scsi_start_end: fas 0x%p", fas);
2484 2483 return (rval);
2485 2484
2486 2485 queue_in_waitQ:
2487 2486 if (fas->f_waitf == NULL) {
2488 2487 fas->f_waitb = fas->f_waitf = sp;
2489 2488 sp->cmd_forw = NULL;
2490 2489 } else {
2491 2490 struct fas_cmd *dp = fas->f_waitb;
2492 2491 dp->cmd_forw = fas->f_waitb = sp;
2493 2492 sp->cmd_forw = NULL;
2494 2493 }
2495 2494
2496 2495 /*
2497 2496 * check again the fas mutex
2498 2497 * if there was an interrupt then the interrupt
2499 2498 * handler will eventually empty the waitQ
2500 2499 */
2501 2500 if ((intr == 0) && (fas->f_state == STATE_FREE) &&
2502 2501 mutex_tryenter(FAS_MUTEX(fas))) {
2503 2502 /*
2504 2503 * double check if the bus is still free
2505 2504 * (this actually reduced mutex contention a bit)
2506 2505 */
2507 2506 if (fas->f_state == STATE_FREE) {
2508 2507 fas_empty_waitQ(fas);
2509 2508 }
2510 2509 mutex_exit(FAS_MUTEX(fas));
2511 2510 }
2512 2511 mutex_exit(&fas->f_waitQ_mutex);
2513 2512
2514 2513 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2515 2514 "fas_scsi_start_end: fas 0x%p", fas);
2516 2515 return (rval);
2517 2516 }
2518 2517
2519 2518 /*
2520 2519 * prepare the pkt:
2521 2520 * the pkt may have been resubmitted or just reused so
2522 2521 * initialize some fields, reset the dma window, and do some checks
2523 2522 */
2524 2523 static int
2525 2524 fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp)
2526 2525 {
2527 2526 struct scsi_pkt *pkt = CMD2PKT(sp);
2528 2527
2529 2528 /*
2530 2529 * Reinitialize some fields that need it; the packet may
2531 2530 * have been resubmitted
2532 2531 */
2533 2532 pkt->pkt_reason = CMD_CMPLT;
2534 2533 pkt->pkt_state = 0;
2535 2534 pkt->pkt_statistics = 0;
2536 2535 pkt->pkt_resid = 0;
2537 2536 sp->cmd_age = 0;
2538 2537 sp->cmd_pkt_flags = pkt->pkt_flags;
2539 2538
2540 2539 /*
2541 2540 * Copy the cdb pointer to the pkt wrapper area as we
2542 2541 * might modify this pointer. Zero status byte
2543 2542 */
2544 2543 sp->cmd_cdbp = pkt->pkt_cdbp;
2545 2544 *(pkt->pkt_scbp) = 0;
2546 2545
2547 2546 if (sp->cmd_flags & CFLAG_DMAVALID) {
2548 2547 pkt->pkt_resid = sp->cmd_dmacount;
2549 2548
2550 2549 /*
2551 2550 * if the pkt was resubmitted then the
2552 2551 * windows may be at the wrong number
2553 2552 */
2554 2553 if (sp->cmd_cur_win) {
2555 2554 sp->cmd_cur_win = 0;
2556 2555 if (fas_set_new_window(fas, sp)) {
2557 2556 IPRINTF("cannot reset window\n");
2558 2557 return (TRAN_BADPKT);
2559 2558 }
2560 2559 }
2561 2560 sp->cmd_saved_cur_addr =
2562 2561 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
2563 2562
2564 2563 /*
2565 2564 * the common case is just one window, we worry
2566 2565 * about multiple windows when we run out of the
2567 2566 * current window
2568 2567 */
2569 2568 sp->cmd_nwin = sp->cmd_saved_win = 0;
2570 2569 sp->cmd_data_count = sp->cmd_saved_data_count = 0;
2571 2570
2572 2571 /*
2573 2572 * consistent packets need to be sync'ed first
2574 2573 * (only for data going out)
2575 2574 */
2576 2575 if ((sp->cmd_flags & (CFLAG_CMDIOPB | CFLAG_DMASEND)) ==
2577 2576 (CFLAG_CMDIOPB | CFLAG_DMASEND)) {
2578 2577 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
2579 2578 DDI_DMA_SYNC_FORDEV);
2580 2579 }
2581 2580 }
2582 2581
2583 2582 sp->cmd_actual_cdblen = sp->cmd_cdblen;
2584 2583
2585 2584 #ifdef FAS_TEST
2586 2585 #ifndef __lock_lint
2587 2586 if (fas_test_untagged > 0) {
2588 2587 if (TAGGED(Tgt(sp))) {
2589 2588 int slot = sp->cmd_slot;
2590 2589 sp->cmd_pkt_flags &= ~FLAG_TAGMASK;
2591 2590 sp->cmd_pkt_flags &= ~FLAG_NODISCON;
2592 2591 sp->cmd_pkt_flags |= 0x80000000;
2593 2592 fas_log(fas, CE_NOTE,
2594 2593 "starting untagged cmd, target=%d,"
2595 2594 " tcmds=%d, sp=0x%p, throttle=%d\n",
2596 2595 Tgt(sp), fas->f_tcmds[slot], (void *)sp,
2597 2596 fas->f_throttle[slot]);
2598 2597 fas_test_untagged = -10;
2599 2598 }
2600 2599 }
2601 2600 #endif
2602 2601 #endif
2603 2602
2604 2603 #ifdef FASDEBUG
2605 2604 if (NOTAG(Tgt(sp)) && (pkt->pkt_flags & FLAG_TAGMASK)) {
2606 2605 IPRINTF2("tagged packet for non-tagged target %d.%d\n",
2607 2606 Tgt(sp), Lun(sp));
2608 2607 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2609 2608 "fas_prepare_pkt_end (tran_badpkt)");
2610 2609 return (TRAN_BADPKT);
2611 2610 }
2612 2611
2613 2612 /*
2614 2613 * the SCSA spec states that it is an error to have no
2615 2614 * completion function when FLAG_NOINTR is not set
2616 2615 */
2617 2616 if ((pkt->pkt_comp == NULL) &&
2618 2617 ((pkt->pkt_flags & FLAG_NOINTR) == 0)) {
2619 2618 IPRINTF("intr packet with pkt_comp == 0\n");
2620 2619 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2621 2620 "fas_prepare_pkt_end (tran_badpkt)");
2622 2621 return (TRAN_BADPKT);
2623 2622 }
2624 2623 #endif /* FASDEBUG */
2625 2624
2626 2625 if ((fas->f_target_scsi_options[Tgt(sp)] & SCSI_OPTIONS_DR) == 0) {
2627 2626 /*
2628 2627 * no need to reset tag bits since tag queueing will
2629 2628 * not be enabled if disconnects are disabled
2630 2629 */
2631 2630 sp->cmd_pkt_flags |= FLAG_NODISCON;
2632 2631 }
2633 2632
2634 2633 sp->cmd_flags = (sp->cmd_flags & ~CFLAG_TRANFLAG) |
2635 2634 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
2636 2635
2637 2636 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_ACCEPT_END,
2638 2637 "fas_prepare_pkt_end (tran_accept)");
2639 2638 return (TRAN_ACCEPT);
2640 2639 }
2641 2640
2642 2641 /*
2643 2642 * emptying the waitQ just before releasing FAS_MUTEX is a bit
2644 2643 * tricky; if we release the waitQ mutex and then the FAS_MUTEX,
2645 2644 * another thread could queue a cmd in the waitQ, just before
2646 2645 * the FAS_MUTEX is released. This cmd is then stuck in the waitQ unless
2647 2646 * another cmd comes in or fas_intr() or fas_watch() checks the waitQ.
2648 2647 * Therefore, by releasing the FAS_MUTEX before releasing the waitQ mutex,
2649 2648 * we prevent fas_scsi_start() filling the waitQ
2650 2649 *
2651 2650 * By setting NO_TRAN_BUSY, we force fas_accept_pkt() to queue up
2652 2651 * the waitQ pkts in the readyQ.
2653 2652 * If a QFull condition occurs, the target driver may set its throttle
2654 2653 * too high because of the requests queued up in the readyQ but this
2655 2654 * is not a big problem. The throttle should be periodically reset anyway.
2656 2655 */
2657 2656 static void
2658 2657 fas_empty_waitQ(struct fas *fas)
2659 2658 {
2660 2659 struct fas_cmd *sp;
2661 2660 int rval;
2662 2661 struct fas_cmd *waitf, *waitb;
2663 2662
2664 2663 ASSERT(mutex_owned(&fas->f_waitQ_mutex));
2665 2664 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_START,
2666 2665 "fas_empty_waitQ_start");
2667 2666
2668 2667 while (fas->f_waitf) {
2669 2668
2670 2669 /* copy waitQ, zero the waitQ and release the mutex */
2671 2670 waitf = fas->f_waitf;
2672 2671 waitb = fas->f_waitb;
2673 2672 fas->f_waitf = fas->f_waitb = NULL;
2674 2673 mutex_exit(&fas->f_waitQ_mutex);
2675 2674
2676 2675 do {
2677 2676 sp = waitf;
2678 2677 waitf = sp->cmd_forw;
2679 2678 if (waitb == sp) {
2680 2679 waitb = NULL;
2681 2680 }
2682 2681
2683 2682 rval = fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
2684 2683
2685 2684 /*
2686 2685 * If the packet was rejected for other reasons then
2687 2686 * complete it here
2688 2687 */
2689 2688 if (rval != TRAN_ACCEPT) {
2690 2689 ASSERT(rval != TRAN_BUSY);
2691 2690 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
2692 2691 if (sp->cmd_pkt->pkt_comp) {
2693 2692 sp->cmd_flags |= CFLAG_FINISHED;
2694 2693 fas_call_pkt_comp(fas, sp);
2695 2694 }
2696 2695 }
2697 2696
2698 2697 if (INTPENDING(fas)) {
2699 2698 /*
2700 2699 * stop processing the waitQ and put back
2701 2700 * the remaining packets on the waitQ
2702 2701 */
2703 2702 mutex_enter(&fas->f_waitQ_mutex);
2704 2703 if (waitf) {
2705 2704 ASSERT(waitb != NULL);
2706 2705 waitb->cmd_forw = fas->f_waitf;
2707 2706 fas->f_waitf = waitf;
2708 2707 if (fas->f_waitb == NULL) {
2709 2708 fas->f_waitb = waitb;
2710 2709 }
2711 2710 }
2712 2711 return;
2713 2712 }
2714 2713 } while (waitf);
2715 2714
2716 2715 mutex_enter(&fas->f_waitQ_mutex);
2717 2716 }
2718 2717 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_END,
2719 2718 "fas_empty_waitQ_end");
2720 2719 }
2721 2720
2722 2721 static void
2723 2722 fas_move_waitQ_to_readyQ(struct fas *fas)
2724 2723 {
2725 2724 /*
2726 2725 * this may actually start cmds but it is most likely
2727 2726 * that if waitQ is not empty that the bus is not free
2728 2727 */
2729 2728 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2730 2729 mutex_enter(&fas->f_waitQ_mutex);
2731 2730 fas_empty_waitQ(fas);
2732 2731 mutex_exit(&fas->f_waitQ_mutex);
2733 2732 }
2734 2733
2735 2734
2736 2735 /*
2737 2736 * function wrapper for two frequently used macros. for the non-critical
2738 2737 * path we use the function
2739 2738 */
2740 2739 static void
2741 2740 fas_check_waitQ_and_mutex_exit(struct fas *fas)
2742 2741 {
2743 2742 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(fas->f_mutex))
2744 2743 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2745 2744 FAS_EMPTY_CALLBACKQ(fas);
2746 2745 }
2747 2746
2748 2747 /*
2749 2748 * fas_accept_pkt():
2750 2749 * the flag argument is to force fas_accept_pkt to accept the pkt;
2751 2750 * the caller cannot take the pkt back and it has to be queued up in
2752 2751 * the readyQ
2753 2752 */
2754 2753 static int
2755 2754 fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag)
2756 2755 {
2757 2756 short slot = sp->cmd_slot;
2758 2757 int rval = TRAN_ACCEPT;
2759 2758
2760 2759 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_START, "fas_accept_pkt_start");
2761 2760 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2762 2761 ASSERT(fas->f_ncmds >= 0 && fas->f_ndisc >= 0);
2763 2762 ASSERT(fas->f_ncmds >= fas->f_ndisc);
2764 2763 ASSERT(fas->f_tcmds[slot] >= 0);
2765 2764
2766 2765 /*
2767 2766 * prepare packet for transport if this hasn't been done yet and
2768 2767 * do some checks
2769 2768 */
2770 2769 if ((sp->cmd_flags & CFLAG_PREPARED) == 0) {
2771 2770 rval = fas_prepare_pkt(fas, sp);
2772 2771 if (rval != TRAN_ACCEPT) {
2773 2772 IPRINTF1("prepare pkt failed, slot=%x\n", slot);
2774 2773 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2775 2774 goto done;
2776 2775 }
2777 2776 }
2778 2777
2779 2778 if (Lun(sp)) {
2780 2779 EPRINTF("fas_accept_pkt: switching target and lun slot scan\n");
2781 2780 fas->f_dslot = 1;
2782 2781
2783 2782 if ((fas->f_active[slot] == NULL) ||
2784 2783 ((fas->f_active[slot]->f_n_slots != NTAGS) &&
2785 2784 TAGGED(Tgt(sp)))) {
2786 2785 (void) fas_alloc_active_slots(fas, slot, KM_NOSLEEP);
2787 2786 }
2788 2787 if ((fas->f_active[slot] == NULL) ||
2789 2788 (NOTAG(Tgt(sp)) && (sp->cmd_pkt_flags & FLAG_TAGMASK))) {
2790 2789 IPRINTF("fatal error on non-zero lun pkt\n");
2791 2790 return (TRAN_FATAL_ERROR);
2792 2791 }
2793 2792 }
2794 2793
2795 2794 /*
2796 2795 * we accepted the command; increment the count
2797 2796 * (we may still reject later if TRAN_BUSY_OK)
2798 2797 */
2799 2798 fas_check_ncmds(fas);
2800 2799 fas->f_ncmds++;
2801 2800
2802 2801 /*
2803 2802 * if it is a nointr packet, start it now
2804 2803 * (NO_INTR pkts are not queued in the waitQ)
2805 2804 */
2806 2805 if (sp->cmd_pkt_flags & FLAG_NOINTR) {
2807 2806 EPRINTF("starting a nointr cmd\n");
2808 2807 fas_runpoll(fas, slot, sp);
2809 2808 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2810 2809 goto done;
2811 2810 }
2812 2811
2813 2812 /*
2814 2813 * reset the throttle if we were draining
2815 2814 */
2816 2815 if ((fas->f_tcmds[slot] == 0) &&
2817 2816 (fas->f_throttle[slot] == DRAIN_THROTTLE)) {
2818 2817 DPRINTF("reset throttle\n");
2819 2818 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
2820 2819 fas_full_throttle(fas, slot);
2821 2820 }
2822 2821
2823 2822 /*
2824 2823 * accept the command:
2825 2824 * If no readyQ and no bus free, and throttle is OK,
2826 2825 * run cmd immediately.
2827 2826 */
2828 2827 #ifdef FASDEBUG
2829 2828 fas->f_total_cmds++;
2830 2829 #endif
2831 2830
2832 2831 if ((fas->f_readyf[slot] == NULL) && (fas->f_state == STATE_FREE) &&
2833 2832 (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
2834 2833 ASSERT(fas->f_current_sp == 0);
2835 2834 (void) fas_startcmd(fas, sp);
2836 2835 goto exit;
2837 2836 } else {
2838 2837 /*
2839 2838 * If FLAG_HEAD is set, run cmd if target and bus are
2840 2839 * available. if first cmd in ready Q is request sense
2841 2840 * then insert after this command, there shouldn't be more
2842 2841 * than one request sense.
2843 2842 */
2844 2843 if (sp->cmd_pkt_flags & FLAG_HEAD) {
2845 2844 struct fas_cmd *ssp = fas->f_readyf[slot];
2846 2845 EPRINTF("que head\n");
2847 2846 if (ssp &&
2848 2847 *(ssp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
2849 2848 fas_head_of_readyQ(fas, sp);
2850 2849 } else if (ssp) {
2851 2850 struct fas_cmd *dp = ssp->cmd_forw;
2852 2851 ssp->cmd_forw = sp;
2853 2852 sp->cmd_forw = dp;
2854 2853 if (fas->f_readyb[slot] == ssp) {
2855 2854 fas->f_readyb[slot] = sp;
2856 2855 }
2857 2856 } else {
2858 2857 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2859 2858 sp->cmd_forw = NULL;
2860 2859 }
2861 2860
2862 2861 /*
2863 2862 * for tagged targets, check for qfull condition and
2864 2863 * return TRAN_BUSY (if permitted), if throttle has been
2865 2864 * exceeded
2866 2865 */
2867 2866 } else if (TAGGED(Tgt(sp)) &&
2868 2867 (fas->f_tcmds[slot] >= fas->f_throttle[slot]) &&
2869 2868 (fas->f_throttle[slot] > HOLD_THROTTLE) &&
2870 2869 (flag == TRAN_BUSY_OK)) {
2871 2870 IPRINTF2(
2872 2871 "transport busy, slot=%x, ncmds=%x\n",
2873 2872 slot, fas->f_ncmds);
2874 2873 rval = TRAN_BUSY;
2875 2874 fas->f_ncmds--;
2876 2875 sp->cmd_flags &=
2877 2876 ~(CFLAG_PREPARED | CFLAG_IN_TRANSPORT);
2878 2877 goto done;
2879 2878 /*
2880 2879 * append to readyQ or start a new readyQ
2881 2880 */
2882 2881 } else if (fas->f_readyf[slot]) {
2883 2882 struct fas_cmd *dp = fas->f_readyb[slot];
2884 2883 ASSERT(dp != 0);
2885 2884 fas->f_readyb[slot] = sp;
2886 2885 sp->cmd_forw = NULL;
2887 2886 dp->cmd_forw = sp;
2888 2887 } else {
2889 2888 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2890 2889 sp->cmd_forw = NULL;
2891 2890 }
2892 2891
2893 2892 }
2894 2893
2895 2894 done:
2896 2895 /*
2897 2896 * just in case that the bus is free and we haven't
2898 2897 * been able to restart for some reason
2899 2898 */
2900 2899 if (fas->f_state == STATE_FREE) {
2901 2900 (void) fas_istart(fas);
2902 2901 }
2903 2902
2904 2903 exit:
2905 2904 fas_check_ncmds(fas);
2906 2905 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2907 2906 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_END, "fas_accept_pkt_end");
2908 2907 return (rval);
2909 2908 }
2910 2909
2911 2910 /*
2912 2911 * allocate a tag byte and check for tag aging
2913 2912 */
2914 2913 static char fas_tag_lookup[] =
2915 2914 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
2916 2915
2917 2916 static int
2918 2917 fas_alloc_tag(struct fas *fas, struct fas_cmd *sp)
2919 2918 {
2920 2919 struct f_slots *tag_slots;
2921 2920 int tag;
2922 2921 short slot = sp->cmd_slot;
2923 2922
2924 2923 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_START, "fas_alloc_tag_start");
2925 2924 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2926 2925
2927 2926 tag_slots = fas->f_active[slot];
2928 2927 ASSERT(tag_slots->f_n_slots == NTAGS);
2929 2928
2930 2929 alloc_tag:
2931 2930 tag = (fas->f_active[slot]->f_tags)++;
2932 2931 if (fas->f_active[slot]->f_tags >= NTAGS) {
2933 2932 /*
2934 2933 * we reserve tag 0 for non-tagged cmds
2935 2934 */
2936 2935 fas->f_active[slot]->f_tags = 1;
2937 2936 }
2938 2937 EPRINTF1("tagged cmd, tag = %d\n", tag);
2939 2938
2940 2939 /* Validate tag, should never fail. */
2941 2940 if (tag_slots->f_slot[tag] == 0) {
2942 2941 /*
2943 2942 * Store assigned tag and tag queue type.
2944 2943 * Note, in case of multiple choice, default to simple queue.
2945 2944 */
2946 2945 ASSERT(tag < NTAGS);
2947 2946 sp->cmd_tag[1] = (uchar_t)tag;
2948 2947 sp->cmd_tag[0] = fas_tag_lookup[((sp->cmd_pkt_flags &
2949 2948 FLAG_TAGMASK) >> 12)];
2950 2949 EPRINTF1("tag= %d\n", tag);
2951 2950 tag_slots->f_slot[tag] = sp;
2952 2951 (fas->f_tcmds[slot])++;
2953 2952 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2954 2953 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
2955 2954 "fas_alloc_tag_end");
2956 2955 return (0);
2957 2956
2958 2957 } else {
2959 2958 int age, i;
2960 2959
2961 2960 /*
2962 2961 * Check tag age. If timeouts enabled and
2963 2962 * tag age greater than 1, print warning msg.
2964 2963 * If timeouts enabled and tag age greater than
2965 2964 * age limit, begin draining tag que to check for
2966 2965 * lost tag cmd.
2967 2966 */
2968 2967 age = tag_slots->f_slot[tag]->cmd_age++;
2969 2968 if (age >= fas->f_scsi_tag_age_limit &&
2970 2969 tag_slots->f_slot[tag]->cmd_pkt->pkt_time) {
2971 2970 IPRINTF2("tag %d in use, age= %d\n", tag, age);
2972 2971 DPRINTF("draining tag queue\n");
2973 2972 if (fas->f_reset_delay[Tgt(sp)] == 0) {
2974 2973 fas->f_throttle[slot] = DRAIN_THROTTLE;
2975 2974 }
2976 2975 }
2977 2976
2978 2977 /* If tag in use, scan until a free one is found. */
2979 2978 for (i = 1; i < NTAGS; i++) {
2980 2979 tag = fas->f_active[slot]->f_tags;
2981 2980 if (!tag_slots->f_slot[tag]) {
2982 2981 EPRINTF1("found free tag %d\n", tag);
2983 2982 break;
2984 2983 }
2985 2984 if (++(fas->f_active[slot]->f_tags) >= NTAGS) {
2986 2985 /*
2987 2986 * we reserve tag 0 for non-tagged cmds
2988 2987 */
2989 2988 fas->f_active[slot]->f_tags = 1;
2990 2989 }
2991 2990 EPRINTF1("found in use tag %d\n", tag);
2992 2991 }
2993 2992
2994 2993 /*
2995 2994 * If no free tags, we're in serious trouble.
2996 2995 * the target driver submitted more than 255
2997 2996 * requests
2998 2997 */
2999 2998 if (tag_slots->f_slot[tag]) {
3000 2999 IPRINTF1("slot %x: All tags in use!!!\n", slot);
3001 3000 goto fail;
3002 3001 }
3003 3002 goto alloc_tag;
3004 3003 }
3005 3004
3006 3005 fail:
3007 3006 fas_head_of_readyQ(fas, sp);
3008 3007
3009 3008 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
3010 3009 "fas_alloc_tag_end");
3011 3010 return (-1);
3012 3011 }
3013 3012
3014 3013 /*
3015 3014 * Internal Search Routine.
3016 3015 *
3017 3016 * Search for a command to start.
3018 3017 */
3019 3018 static int
3020 3019 fas_istart(struct fas *fas)
3021 3020 {
3022 3021 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_START,
3023 3022 "fas_istart_start");
3024 3023 EPRINTF("fas_istart:\n");
3025 3024
3026 3025 if (fas->f_state == STATE_FREE && fas->f_ncmds > fas->f_ndisc) {
3027 3026 (void) fas_ustart(fas);
3028 3027 }
3029 3028 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_END,
3030 3029 "fas_istart_end");
3031 3030 return (ACTION_RETURN);
3032 3031 }
3033 3032
3034 3033 static int
3035 3034 fas_ustart(struct fas *fas)
3036 3035 {
3037 3036 struct fas_cmd *sp;
3038 3037 short slot = fas->f_next_slot;
3039 3038 short start_slot = slot;
3040 3039 short dslot = fas->f_dslot;
3041 3040
3042 3041 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_START, "fas_ustart_start");
3043 3042 EPRINTF1("fas_ustart: start_slot=%x\n", fas->f_next_slot);
3044 3043 ASSERT(fas->f_current_sp == NULL);
3045 3044 ASSERT(dslot != 0);
3046 3045 if (dslot == NLUNS_PER_TARGET) {
3047 3046 ASSERT((slot % NLUNS_PER_TARGET) == 0);
3048 3047 }
3049 3048
3050 3049 /*
3051 3050 * if readyQ not empty and we are not draining, then we
3052 3051 * can start another cmd
3053 3052 */
3054 3053 do {
3055 3054 /*
3056 3055 * If all cmds drained from tag Q, back to full throttle and
3057 3056 * start queueing up new cmds again.
3058 3057 */
3059 3058 if (fas->f_throttle[slot] == DRAIN_THROTTLE &&
3060 3059 fas->f_tcmds[slot] == 0) {
3061 3060 fas_full_throttle(fas, slot);
3062 3061 }
3063 3062
3064 3063 if (fas->f_readyf[slot] &&
3065 3064 (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
3066 3065 sp = fas->f_readyf[slot];
3067 3066 fas->f_readyf[slot] = sp->cmd_forw;
3068 3067 if (sp->cmd_forw == NULL) {
3069 3068 fas->f_readyb[slot] = NULL;
3070 3069 }
3071 3070 fas->f_next_slot = NEXTSLOT(slot, dslot);
3072 3071 ASSERT((sp->cmd_pkt_flags & FLAG_NOINTR) == 0);
3073 3072 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_END,
3074 3073 "fas_ustart_end");
3075 3074 return (fas_startcmd(fas, sp));
3076 3075 } else {
3077 3076 slot = NEXTSLOT(slot, dslot);
3078 3077 }
3079 3078 } while (slot != start_slot);
3080 3079
3081 3080 EPRINTF("fas_ustart: no cmds to start\n");
3082 3081 fas->f_next_slot = NEXTSLOT(slot, dslot);
3083 3082 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_NOT_FOUND_END,
3084 3083 "fas_ustart_end (not_found)");
3085 3084 return (FALSE);
3086 3085 }
3087 3086
3088 3087 /*
3089 3088 * Start a command off
3090 3089 */
3091 3090 static int
3092 3091 fas_startcmd(struct fas *fas, struct fas_cmd *sp)
3093 3092 {
3094 3093 volatile struct fasreg *fasreg = fas->f_reg;
3095 3094 ushort_t nstate;
3096 3095 uchar_t cmd, target, lun;
3097 3096 ushort_t tshift;
3098 3097 volatile uchar_t *tp = fas->f_cmdarea;
3099 3098 struct scsi_pkt *pkt = CMD2PKT(sp);
3100 3099 int slot = sp->cmd_slot;
3101 3100 struct f_slots *slots = fas->f_active[slot];
3102 3101 int i, cdb_len;
3103 3102
3104 3103 #define LOAD_CMDP *(tp++)
3105 3104
3106 3105 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_START, "fas_startcmd_start");
3107 3106
3108 3107 EPRINTF2("fas_startcmd: sp=0x%p flags=%x\n",
3109 3108 (void *)sp, sp->cmd_pkt_flags);
3110 3109 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3111 3110 ASSERT((sp->cmd_flags & CFLAG_COMPLETED) == 0);
3112 3111 ASSERT(fas->f_current_sp == NULL && fas->f_state == STATE_FREE);
3113 3112 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3114 3113 ASSERT(fas->f_throttle[slot] > 0);
3115 3114 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
3116 3115 }
3117 3116
3118 3117 target = Tgt(sp);
3119 3118 lun = Lun(sp);
3120 3119
3121 3120 /*
3122 3121 * if a non-tagged cmd is submitted to an active tagged target
3123 3122 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
3124 3123 * to be untagged
3125 3124 */
3126 3125 if (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
3127 3126 TAGGED(target) && fas->f_tcmds[slot] &&
3128 3127 ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) &&
3129 3128 (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
3130 3129 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3131 3130 struct fas_cmd *dp;
3132 3131
3133 3132 IPRINTF("untagged cmd, start draining\n");
3134 3133
3135 3134 if (fas->f_reset_delay[Tgt(sp)] == 0) {
3136 3135 fas->f_throttle[slot] = DRAIN_THROTTLE;
3137 3136 }
3138 3137 dp = fas->f_readyf[slot];
3139 3138 fas->f_readyf[slot] = sp;
3140 3139 sp->cmd_forw = dp;
3141 3140 if (fas->f_readyb[slot] == NULL) {
3142 3141 fas->f_readyb[slot] = sp;
3143 3142 }
3144 3143 }
3145 3144 return (FALSE);
3146 3145 }
3147 3146
3148 3147 /*
3149 3148 * allocate a tag; if no tag available then put request back
3150 3149 * on the ready queue and return; eventually a cmd returns and we
3151 3150 * get going again or we timeout
3152 3151 */
3153 3152 if (TAGGED(target) && (sp->cmd_pkt_flags & FLAG_TAGMASK)) {
3154 3153 if (fas_alloc_tag(fas, sp)) {
3155 3154 return (FALSE);
3156 3155 }
3157 3156 } else {
3158 3157 /*
3159 3158 * tag slot 0 is reserved for non-tagged cmds
3160 3159 * and should be empty because we have drained
3161 3160 */
3162 3161 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3163 3162 ASSERT(fas->f_active[slot]->f_slot[0] == NULL);
3164 3163 fas->f_active[slot]->f_slot[0] = sp;
3165 3164 sp->cmd_tag[1] = 0;
3166 3165 if (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
3167 3166 ASSERT(fas->f_tcmds[slot] == 0);
3168 3167 /*
3169 3168 * don't start any other cmd until this
3170 3169 * one is finished. The throttle is reset
3171 3170 * later in fas_watch()
3172 3171 */
3173 3172 fas->f_throttle[slot] = 1;
3174 3173 }
3175 3174 (fas->f_tcmds[slot])++;
3176 3175
3177 3176 }
3178 3177 }
3179 3178
3180 3179 fas->f_current_sp = sp;
3181 3180 fas->f_omsglen = 0;
3182 3181 tshift = 1<<target;
3183 3182 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3184 3183 cdb_len = sp->cmd_actual_cdblen;
3185 3184
3186 3185 if (sp->cmd_pkt_flags & FLAG_RENEGOTIATE_WIDE_SYNC) {
3187 3186 fas_force_renegotiation(fas, Tgt(sp));
3188 3187 }
3189 3188
3190 3189 /*
3191 3190 * first send identify message, with or without disconnect priv.
3192 3191 */
3193 3192 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
3194 3193 LOAD_CMDP = fas->f_last_msgout = MSG_IDENTIFY | lun;
3195 3194 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3196 3195 } else {
3197 3196 LOAD_CMDP = fas->f_last_msgout = MSG_DR_IDENTIFY | lun;
3198 3197 }
3199 3198
3200 3199 /*
3201 3200 * normal case, tagQ and we have negotiated wide and sync
3202 3201 * or we don't need to renegotiate because wide and sync
3203 3202 * have been disabled
3204 3203 * (proxy msg's don't have tag flag set)
3205 3204 */
3206 3205 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) &&
3207 3206 ((fas->f_wide_known | fas->f_nowide) &
3208 3207 (fas->f_sync_known | fas->f_nosync) & tshift)) {
3209 3208
3210 3209 EPRINTF("tag cmd\n");
3211 3210 ASSERT((sp->cmd_pkt_flags & FLAG_NODISCON) == 0);
3212 3211
3213 3212 fas->f_last_msgout = LOAD_CMDP = sp->cmd_tag[0];
3214 3213 LOAD_CMDP = sp->cmd_tag[1];
3215 3214
3216 3215 nstate = STATE_SELECT_NORMAL;
3217 3216 cmd = CMD_SEL_ATN3 | CMD_DMA;
3218 3217
3219 3218 /*
3220 3219 * is this a proxy message
3221 3220 */
3222 3221 } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
3223 3222
3224 3223 IPRINTF2("proxy cmd, len=%x, msg=%x\n",
3225 3224 sp->cmd_cdb[FAS_PROXY_DATA],
3226 3225 sp->cmd_cdb[FAS_PROXY_DATA+1]);
3227 3226 /*
3228 3227 * This is a proxy command. It will have
3229 3228 * a message to send as part of post-selection
3230 3229 * (e.g, MSG_ABORT or MSG_DEVICE_RESET)
3231 3230 */
3232 3231 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
3233 3232 for (i = 0; i < (uint_t)fas->f_omsglen; i++) {
3234 3233 fas->f_cur_msgout[i] =
3235 3234 sp->cmd_cdb[FAS_PROXY_DATA+1+i];
3236 3235 }
3237 3236 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
3238 3237 cdb_len = 0;
3239 3238 cmd = CMD_SEL_STOP | CMD_DMA;
3240 3239 nstate = STATE_SELECT_N_SENDMSG;
3241 3240
3242 3241 /*
3243 3242 * always negotiate wide first and sync after wide
3244 3243 */
3245 3244 } else if (((fas->f_wide_known | fas->f_nowide) & tshift) == 0) {
3246 3245 int i = 0;
3247 3246
3248 3247 /* First the tag message bytes */
3249 3248 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3250 3249 fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3251 3250 fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3252 3251 }
3253 3252
3254 3253 /*
3255 3254 * Set up to send wide negotiating message. This is getting
3256 3255 * a bit tricky as we dma out the identify message and
3257 3256 * send the other messages via the fifo buffer.
3258 3257 */
3259 3258 EPRINTF1("cmd with wdtr msg, tag=%x\n", sp->cmd_tag[1]);
3260 3259
3261 3260 fas_make_wdtr(fas, i, target, FAS_XFER_WIDTH);
3262 3261
3263 3262 cdb_len = 0;
3264 3263 nstate = STATE_SELECT_N_SENDMSG;
3265 3264 cmd = CMD_SEL_STOP | CMD_DMA;
3266 3265
3267 3266 /*
3268 3267 * negotiate sync xfer rate
3269 3268 */
3270 3269 } else if (((fas->f_sync_known | fas->f_nosync) & tshift) == 0) {
3271 3270 int i = 0;
3272 3271 /*
3273 3272 * Set up to send sync negotiating message. This is getting
3274 3273 * a bit tricky as we dma out the identify message and
3275 3274 * send the other messages via the fifo buffer.
3276 3275 */
3277 3276 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3278 3277 fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3279 3278 fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3280 3279 }
3281 3280
3282 3281 fas_make_sdtr(fas, i, target);
3283 3282
3284 3283 cdb_len = 0;
3285 3284 cmd = CMD_SEL_STOP | CMD_DMA;
3286 3285 nstate = STATE_SELECT_N_SENDMSG;
3287 3286
3288 3287 /*
3289 3288 * normal cmds, no negotiations and not a proxy and no TQ
3290 3289 */
3291 3290 } else {
3292 3291
3293 3292 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3294 3293 EPRINTF("std. cmd\n");
3295 3294
3296 3295 nstate = STATE_SELECT_NORMAL;
3297 3296 cmd = CMD_SEL_ATN | CMD_DMA;
3298 3297 }
3299 3298
3300 3299 /*
3301 3300 * Now load cdb (if any)
3302 3301 */
3303 3302 for (i = 0; i < cdb_len; i++) {
3304 3303 LOAD_CMDP = sp->cmd_cdbp[i];
3305 3304 }
3306 3305
3307 3306 /*
3308 3307 * calculate total dma amount:
3309 3308 */
3310 3309 fas->f_lastcount = (uintptr_t)tp - (uintptr_t)fas->f_cmdarea;
3311 3310
3312 3311 /*
3313 3312 * load target id and enable bus id encoding and 32 bit counter
3314 3313 */
3315 3314 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
3316 3315 (target & 0xf) | FAS_BUSID_ENCODID | FAS_BUSID_32BIT_COUNTER);
3317 3316
3318 3317 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
3319 3318
3320 3319 fas_reg_cmd_write(fas, CMD_FLUSH);
3321 3320
3322 3321 FAS_DMA_READ(fas, fas->f_lastcount,
3323 3322 fas->f_dmacookie.dmac_address, 16, cmd);
3324 3323
3325 3324 New_state(fas, (int)nstate);
3326 3325
3327 3326 #ifdef FASDEBUG
3328 3327 if (DDEBUGGING) {
3329 3328 fas_dump_cmd(fas, sp);
3330 3329 }
3331 3330 #endif /* FASDEBUG */
3332 3331
3333 3332 /*
3334 3333 * if timeout == 0, then it has no effect on the timeout
3335 3334 * handling; we deal with this when an actual timeout occurs.
3336 3335 */
3337 3336 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3338 3337 ASSERT(fas->f_tcmds[slot] >= 1);
3339 3338 }
3340 3339 i = pkt->pkt_time - slots->f_timebase;
3341 3340
3342 3341 if (i == 0) {
3343 3342 EPRINTF("dup timeout\n");
3344 3343 (slots->f_dups)++;
3345 3344 slots->f_timeout = slots->f_timebase;
3346 3345 } else if (i > 0) {
3347 3346 EPRINTF("new timeout\n");
3348 3347 slots->f_timeout = slots->f_timebase = pkt->pkt_time;
3349 3348 slots->f_dups = 1;
3350 3349 }
3351 3350
3352 3351 fas_check_ncmds(fas);
3353 3352
3354 3353 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_END, "fas_startcmd_end");
3355 3354
3356 3355 return (TRUE);
3357 3356 }
3358 3357
3359 3358 /*
3360 3359 * Interrupt Entry Point.
3361 3360 * Poll interrupts until they go away
3362 3361 */
3363 3362 static uint_t
3364 3363 fas_intr(caddr_t arg)
3365 3364 {
3366 3365 struct fas *fas = (struct fas *)arg;
3367 3366 int rval = DDI_INTR_UNCLAIMED;
3368 3367 int kstat_updated = 0;
3369 3368
3370 3369 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_START, "fas_intr_start");
3371 3370
3372 3371 do {
3373 3372 mutex_enter(FAS_MUTEX(fas));
3374 3373
3375 3374 do {
3376 3375 if (fas_intr_svc(fas)) {
3377 3376 /*
3378 3377 * do not return immediately here because
3379 3378 * we have to guarantee to always empty
3380 3379 * the waitQ and callbackQ in the interrupt
3381 3380 * handler
3382 3381 */
3383 3382 if (fas->f_polled_intr) {
3384 3383 rval = DDI_INTR_CLAIMED;
3385 3384 fas->f_polled_intr = 0;
3386 3385 }
3387 3386 } else {
3388 3387 rval = DDI_INTR_CLAIMED;
3389 3388 }
3390 3389 } while (INTPENDING(fas));
3391 3390
3392 3391 if (!kstat_updated && fas->f_intr_kstat &&
3393 3392 rval == DDI_INTR_CLAIMED) {
3394 3393 FAS_KSTAT_INTR(fas);
3395 3394 kstat_updated++;
3396 3395 }
3397 3396
3398 3397 /*
3399 3398 * check and empty the waitQ and the callbackQ
3400 3399 */
3401 3400 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
3402 3401 FAS_EMPTY_CALLBACKQ(fas);
3403 3402
3404 3403 } while (INTPENDING(fas));
3405 3404
3406 3405 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_END, "fas_intr_end");
3407 3406
3408 3407 return (rval);
3409 3408 }
3410 3409
3411 3410 /*
3412 3411 * General interrupt service routine.
3413 3412 */
3414 3413 static char *dma_bits = DMA_BITS;
3415 3414
3416 3415 static int
3417 3416 fas_intr_svc(struct fas *fas)
3418 3417 {
3419 3418 static int (*evec[])(struct fas *fas) = {
3420 3419 fas_finish_select,
3421 3420 fas_reconnect,
3422 3421 fas_phasemanage,
3423 3422 fas_finish,
3424 3423 fas_reset_recovery,
3425 3424 fas_istart,
3426 3425 fas_abort_curcmd,
3427 3426 fas_reset_bus,
3428 3427 fas_reset_bus,
3429 3428 fas_handle_selection
3430 3429 };
3431 3430 int action;
3432 3431 uchar_t intr, stat;
3433 3432 volatile struct fasreg *fasreg = fas->f_reg;
3434 3433 int i = 0;
3435 3434
3436 3435 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_START, "fas_intr_svc_start");
3437 3436
3438 3437 /*
3439 3438 * A read of FAS interrupt register clears interrupt,
3440 3439 * so any other volatile information needs to be latched
3441 3440 * up prior to reading the interrupt register.
3442 3441 */
3443 3442 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
3444 3443
3445 3444 EPRINTF2("fas_intr_svc: state=%x stat=%x\n", fas->f_state,
3446 3445 fas->f_stat);
3447 3446
3448 3447 /*
3449 3448 * this wasn't our interrupt?
3450 3449 */
3451 3450 if ((fas->f_stat & FAS_STAT_IPEND) == 0) {
3452 3451 if (fas_check_dma_error(fas)) {
3453 3452 action = ACTION_RESET;
3454 3453 goto start_action;
3455 3454 }
3456 3455 return (-1);
3457 3456 }
3458 3457
3459 3458 /*
3460 3459 * if we are reset state, handle this first
3461 3460 */
3462 3461 if (fas->f_state == ACTS_RESET) {
3463 3462 action = ACTION_FINRST;
3464 3463 goto start_action;
3465 3464 }
3466 3465
3467 3466 /*
3468 3467 * check for gross error. fas366 hardware seems to register
3469 3468 * the gross error bit when a parity error is found. Make sure
3470 3469 * to ignore the gross error bit when a parity error is detected.
3471 3470 */
3472 3471 if ((fas->f_stat & FAS_STAT_GERR) &&
3473 3472 (fas->f_stat & FAS_STAT_PERR) == 0) {
3474 3473 action = fas_handle_gross_err(fas);
3475 3474 goto start_action;
3476 3475 }
3477 3476
3478 3477 /*
3479 3478 * now it is finally safe to read the interrupt register
3480 3479 * if we haven't done so yet
3481 3480 * Note: we don't read step register here but only in
3482 3481 * fas_finish_select(). It is not entirely safe but saves
3483 3482 * redundant PIOs or extra code in this critical path
3484 3483 */
3485 3484 fas->f_intr =
3486 3485 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
3487 3486
3488 3487 /*
3489 3488 * read the fifo if there is something there or still in the
3490 3489 * input shuttle
3491 3490 */
3492 3491 stat = fas->f_stat & FAS_PHASE_MASK;
3493 3492
3494 3493 if ((intr & FAS_INT_RESEL) ||
3495 3494 ((stat != FAS_PHASE_DATA_IN) && (stat != FAS_PHASE_DATA_OUT) &&
3496 3495 ((fas->f_state & STATE_SELECTING) == 0) &&
3497 3496 (fas->f_state != ACTS_DATA_DONE) &&
3498 3497 (fas->f_state != ACTS_C_CMPLT))) {
3499 3498
3500 3499 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
3501 3500
3502 3501 if (((fas->f_stat2 & FAS_STAT2_EMPTY) == 0) ||
3503 3502 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
3504 3503 fas_read_fifo(fas);
3505 3504 }
3506 3505 }
3507 3506
3508 3507 EPRINTF2("fas_intr_svc: intr=%x, stat=%x\n", fas->f_intr, fas->f_stat);
3509 3508 EPRINTF2("dmacsr=%b\n", fas->f_dma->dma_csr, dma_bits);
3510 3509
3511 3510 /*
3512 3511 * Based upon the current state of the host adapter driver
3513 3512 * we should be able to figure out what to do with an interrupt.
3514 3513 *
3515 3514 * The FAS asserts an interrupt with one or more of 8 possible
3516 3515 * bits set in its interrupt register. These conditions are
3517 3516 * SCSI bus reset detected, an illegal command fed to the FAS,
3518 3517 * one of DISCONNECT, BUS SERVICE, FUNCTION COMPLETE conditions
3519 3518 * for the FAS, a Reselection interrupt, or one of Selection
3520 3519 * or Selection with Attention.
3521 3520 *
3522 3521 * Of these possible interrupts, we can deal with some right
3523 3522 * here and now, irrespective of the current state of the driver.
3524 3523 *
3525 3524 * take care of the most likely interrupts first and call the action
3526 3525 * immediately
3527 3526 */
3528 3527 if ((intr & (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN|
3529 3528 FAS_INT_RESEL)) == 0) {
3530 3529 /*
3531 3530 * The rest of the reasons for an interrupt can
3532 3531 * be handled based purely on the state that the driver
3533 3532 * is currently in now.
3534 3533 */
3535 3534 if (fas->f_state & STATE_SELECTING) {
3536 3535 action = fas_finish_select(fas);
3537 3536
3538 3537 } else if (fas->f_state & STATE_ITPHASES) {
3539 3538 action = fas_phasemanage(fas);
3540 3539
3541 3540 } else {
3542 3541 fas_log(fas, CE_WARN, "spurious interrupt");
3543 3542 action = ACTION_RETURN;
3544 3543 }
3545 3544
3546 3545 } else if ((intr & FAS_INT_RESEL) && ((intr &
3547 3546 (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN)) == 0)) {
3548 3547
3549 3548 if ((fas->f_state & STATE_SELECTING) == 0) {
3550 3549 ASSERT(fas->f_state == STATE_FREE);
3551 3550 action = fas_reconnect(fas);
3552 3551 } else {
3553 3552 action = fas_reselect_preempt(fas);
3554 3553 }
3555 3554
3556 3555 } else if (intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
3557 3556 action = fas_illegal_cmd_or_bus_reset(fas);
3558 3557
3559 3558 } else if (intr & (FAS_INT_SEL|FAS_INT_SELATN)) {
3560 3559 action = ACTION_SELECT;
3561 3560 }
3562 3561
3563 3562 start_action:
3564 3563 while (action != ACTION_RETURN) {
3565 3564 ASSERT((action >= 0) && (action <= ACTION_SELECT));
3566 3565 TRACE_3(TR_FAC_SCSI_FAS, TR_FASSVC_ACTION_CALL,
3567 3566 "fas_intr_svc call: fas 0x%p, action %d (%d)",
3568 3567 fas, action, i);
3569 3568 i++;
3570 3569 action = (*evec[action])(fas);
3571 3570 }
3572 3571 exit:
3573 3572 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_END, "fas_intr_svc_end");
3574 3573
3575 3574 return (0);
3576 3575 }
3577 3576
3578 3577 /*
3579 3578 * Manage phase transitions.
3580 3579 */
3581 3580 static int
3582 3581 fas_phasemanage(struct fas *fas)
3583 3582 {
3584 3583 ushort_t state;
3585 3584 int action;
3586 3585 static int (*pvecs[])(struct fas *fas) = {
3587 3586 fas_handle_cmd_start,
3588 3587 fas_handle_cmd_done,
3589 3588 fas_handle_msg_out_start,
3590 3589 fas_handle_msg_out_done,
3591 3590 fas_handle_msg_in_start,
3592 3591 fas_handle_more_msgin,
3593 3592 fas_handle_msg_in_done,
3594 3593 fas_handle_clearing,
3595 3594 fas_handle_data_start,
3596 3595 fas_handle_data_done,
3597 3596 fas_handle_c_cmplt,
3598 3597 fas_reconnect,
3599 3598 fas_handle_unknown,
3600 3599 fas_reset_recovery
3601 3600 };
3602 3601 int i = 0;
3603 3602
3604 3603 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_START,
3605 3604 "fas_phasemanage_start");
3606 3605
3607 3606 do {
3608 3607 EPRINTF1("fas_phasemanage: %s\n",
3609 3608 fas_state_name(fas->f_state & STATE_ITPHASES));
3610 3609
3611 3610 TRACE_2(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_CALL,
3612 3611 "fas_phasemanage_call: fas 0x%p (%d)", fas, i++);
3613 3612
3614 3613 state = fas->f_state;
3615 3614
3616 3615 if (!(state == STATE_FREE || state > ACTS_ENDVEC)) {
3617 3616 ASSERT(pvecs[state-1] != NULL);
3618 3617 action = (*pvecs[state-1]) (fas);
3619 3618 } else {
3620 3619 fas_log(fas, CE_WARN, "lost state in phasemanage");
3621 3620 action = ACTION_ABORT_ALLCMDS;
3622 3621 }
3623 3622
3624 3623 } while (action == ACTION_PHASEMANAGE);
3625 3624
3626 3625 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_END,
3627 3626 "fas_phasemanage_end");
3628 3627 return (action);
3629 3628 }
3630 3629
3631 3630 /*
3632 3631 * remove a cmd from active list and if timeout flag is set, then
3633 3632 * adjust timeouts; if a the same cmd will be resubmitted soon, don't
3634 3633 * bother to adjust timeouts (ie. don't set this flag)
3635 3634 */
3636 3635 static void
3637 3636 fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int new_timeout_flag)
3638 3637 {
3639 3638 int tag = sp->cmd_tag[1];
3640 3639 int slot = sp->cmd_slot;
3641 3640 struct f_slots *tag_slots = fas->f_active[slot];
3642 3641
3643 3642 ASSERT(sp != NULL);
3644 3643 EPRINTF4("remove tag %d slot %d for target %d.%d\n",
3645 3644 tag, slot, Tgt(sp), Lun(sp));
3646 3645
3647 3646 if (sp == tag_slots->f_slot[tag]) {
3648 3647 tag_slots->f_slot[tag] = NULL;
3649 3648 fas->f_tcmds[slot]--;
3650 3649 }
3651 3650 if (fas->f_current_sp == sp) {
3652 3651 fas->f_current_sp = NULL;
3653 3652 }
3654 3653
3655 3654 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
3656 3655
3657 3656 if (new_timeout_flag != NEW_TIMEOUT) {
3658 3657 return;
3659 3658 }
3660 3659
3661 3660 /*
3662 3661 * Figure out what to set tag Q timeout for...
3663 3662 *
3664 3663 * Optimize: If we have duplicate's of same timeout
3665 3664 * we're using, then we'll use it again until we run
3666 3665 * out of duplicates. This should be the normal case
3667 3666 * for block and raw I/O.
3668 3667 * If no duplicates, we have to scan through tag que and
3669 3668 * find the longest timeout value and use it. This is
3670 3669 * going to take a while...
3671 3670 */
3672 3671 if (sp->cmd_pkt->pkt_time == tag_slots->f_timebase) {
3673 3672 if (--(tag_slots->f_dups) <= 0) {
3674 3673 if (fas->f_tcmds[slot]) {
3675 3674 struct fas_cmd *ssp;
3676 3675 uint_t n = 0;
3677 3676 ushort_t t = tag_slots->f_n_slots;
3678 3677 ushort_t i;
3679 3678 /*
3680 3679 * This crude check assumes we don't do
3681 3680 * this too often which seems reasonable
3682 3681 * for block and raw I/O.
3683 3682 */
3684 3683 for (i = 0; i < t; i++) {
3685 3684 ssp = tag_slots->f_slot[i];
3686 3685 if (ssp &&
3687 3686 (ssp->cmd_pkt->pkt_time > n)) {
3688 3687 n = ssp->cmd_pkt->pkt_time;
3689 3688 tag_slots->f_dups = 1;
3690 3689 } else if (ssp &&
3691 3690 (ssp->cmd_pkt->pkt_time == n)) {
3692 3691 tag_slots->f_dups++;
3693 3692 }
3694 3693 }
3695 3694 tag_slots->f_timebase = n;
3696 3695 EPRINTF1("searching, new_timeout= %d\n", n);
3697 3696 } else {
3698 3697 tag_slots->f_dups = 0;
3699 3698 tag_slots->f_timebase = 0;
3700 3699 }
3701 3700 }
3702 3701 }
3703 3702 tag_slots->f_timeout = tag_slots->f_timebase;
3704 3703
3705 3704 ASSERT(fas->f_ncmds >= fas->f_ndisc);
3706 3705 }
3707 3706
3708 3707 /*
3709 3708 * decrement f_ncmds and f_ndisc for this cmd before completing
3710 3709 */
3711 3710 static void
3712 3711 fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp)
3713 3712 {
3714 3713 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3715 3714 if ((sp->cmd_flags & CFLAG_FINISHED) == 0) {
3716 3715 fas->f_ncmds--;
3717 3716 if (sp->cmd_flags & CFLAG_CMDDISC) {
3718 3717 fas->f_ndisc--;
3719 3718 }
3720 3719 sp->cmd_flags |= CFLAG_FINISHED;
3721 3720 sp->cmd_flags &= ~CFLAG_CMDDISC;
3722 3721 }
3723 3722 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
3724 3723 ASSERT(fas->f_ncmds >= fas->f_ndisc);
3725 3724 }
3726 3725
3727 3726 /*
3728 3727 * Most commonly called phase handlers:
3729 3728 *
3730 3729 * Finish routines
3731 3730 */
3732 3731 static int
3733 3732 fas_finish(struct fas *fas)
3734 3733 {
3735 3734 struct fas_cmd *sp = fas->f_current_sp;
3736 3735 struct scsi_pkt *pkt = CMD2PKT(sp);
3737 3736 int action = ACTION_SEARCH;
3738 3737 struct scsi_status *status =
3739 3738 (struct scsi_status *)sp->cmd_pkt->pkt_scbp;
3740 3739
3741 3740 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_START,
3742 3741 "fas_finish_start");
3743 3742 EPRINTF("fas_finish\n");
3744 3743
3745 3744 #ifdef FAS_TEST
3746 3745 if (fas_test_stop && (sp->cmd_pkt_flags & 0x80000000)) {
3747 3746 debug_enter("untagged cmd completed");
3748 3747 }
3749 3748 #endif
3750 3749
3751 3750 /*
3752 3751 * immediately enable reselects
3753 3752 */
3754 3753 fas_reg_cmd_write(fas, CMD_EN_RESEL);
3755 3754 if (status->sts_chk) {
3756 3755 /*
3757 3756 * In the case that we are getting a check condition
3758 3757 * clear our knowledge of synchronous capabilities.
3759 3758 * This will unambiguously force a renegotiation
3760 3759 * prior to any possible data transfer (we hope),
3761 3760 * including the data transfer for a UNIT ATTENTION
3762 3761 * condition generated by somebody powering on and
3763 3762 * off a target.
3764 3763 */
3765 3764 fas_force_renegotiation(fas, Tgt(sp));
3766 3765 }
3767 3766
3768 3767 /*
3769 3768 * backoff sync/wide if there were parity errors
3770 3769 */
3771 3770 if (sp->cmd_pkt->pkt_statistics & STAT_PERR) {
3772 3771 fas_sync_wide_backoff(fas, sp, sp->cmd_slot);
3773 3772 #ifdef FAS_TEST
3774 3773 if (fas_test_stop) {
3775 3774 debug_enter("parity error");
3776 3775 }
3777 3776 #endif
3778 3777 }
3779 3778
3780 3779 /*
3781 3780 * Free from active list and update counts
3782 3781 * We need to clean up this cmd now, just in case fas_ustart()
3783 3782 * hits a reset or other fatal transport error
3784 3783 */
3785 3784 fas_check_ncmds(fas);
3786 3785 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
3787 3786 fas_decrement_ncmds(fas, sp);
3788 3787 fas_check_ncmds(fas);
3789 3788
3790 3789 /*
3791 3790 * go to state free and try to start a new cmd now
3792 3791 */
3793 3792 New_state(fas, STATE_FREE);
3794 3793
3795 3794 if ((fas->f_ncmds > fas->f_ndisc) && (*((char *)status) == 0) &&
3796 3795 (INTPENDING(fas) == 0)) {
3797 3796 if (fas_ustart(fas)) {
3798 3797 action = ACTION_RETURN;
3799 3798 }
3800 3799 }
3801 3800
3802 3801 /*
3803 3802 * if there was a data xfer then calculate residue and
3804 3803 * sync data for consistent memory xfers
3805 3804 */
3806 3805 if (pkt->pkt_state & STATE_XFERRED_DATA) {
3807 3806 pkt->pkt_resid = sp->cmd_dmacount - sp->cmd_data_count;
3808 3807 if (sp->cmd_flags & CFLAG_CMDIOPB) {
3809 3808 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
3810 3809 DDI_DMA_SYNC_FORCPU);
3811 3810 }
3812 3811 if (pkt->pkt_resid) {
3813 3812 IPRINTF3("%d.%d finishes with %ld resid\n",
3814 3813 Tgt(sp), Lun(sp), pkt->pkt_resid);
3815 3814 }
3816 3815 }
3817 3816
3818 3817 if (sp->cmd_pkt_flags & FLAG_NOINTR) {
3819 3818 fas_call_pkt_comp(fas, sp);
3820 3819 action = ACTION_RETURN;
3821 3820 } else {
3822 3821 /*
3823 3822 * start an autorequest sense if there was a check condition.
3824 3823 * if arq has not been enabled, fas_handle_sts_chk will do
3825 3824 * do the callback
3826 3825 */
3827 3826 if (status->sts_chk) {
3828 3827 if (fas_handle_sts_chk(fas, sp)) {
3829 3828 /*
3830 3829 * we can't start an arq because one is
3831 3830 * already in progress. the target is
3832 3831 * probably confused
3833 3832 */
3834 3833 action = ACTION_ABORT_CURCMD;
3835 3834 }
3836 3835 } else if ((*((char *)status) & STATUS_MASK) ==
3837 3836 STATUS_QFULL) {
3838 3837 fas_handle_qfull(fas, sp);
3839 3838 } else {
3840 3839 #ifdef FAS_TEST
3841 3840 if (fas_arqs_failure && (status->sts_chk == 0)) {
3842 3841 struct scsi_arq_status *arqstat;
3843 3842 status->sts_chk = 1;
3844 3843 arqstat = (struct scsi_arq_status *)
3845 3844 (sp->cmd_pkt->pkt_scbp);
3846 3845 arqstat->sts_rqpkt_reason = CMD_TRAN_ERR;
3847 3846 sp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
3848 3847 fas_arqs_failure = 0;
3849 3848 }
3850 3849 if (fas_tran_err) {
3851 3850 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
3852 3851 fas_tran_err = 0;
3853 3852 }
3854 3853 #endif
3855 3854 fas_call_pkt_comp(fas, sp);
3856 3855 }
3857 3856 }
3858 3857
3859 3858 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_END, "fas_finish_end");
3860 3859 return (action);
3861 3860 }
3862 3861
3863 3862 /*
3864 3863 * Complete the process of selecting a target
3865 3864 */
3866 3865 static int
3867 3866 fas_finish_select(struct fas *fas)
3868 3867 {
3869 3868 volatile struct dma *dmar = fas->f_dma;
3870 3869 struct fas_cmd *sp = fas->f_current_sp;
3871 3870 uchar_t intr = fas->f_intr;
3872 3871 uchar_t step;
3873 3872
3874 3873 step = fas_reg_read(fas, &fas->f_reg->fas_step) & FAS_STEP_MASK;
3875 3874
3876 3875 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_START,
3877 3876 "fas_finish_select_start");
3878 3877 EPRINTF("fas_finish_select:\n");
3879 3878 ASSERT(sp != 0);
3880 3879
3881 3880 /*
3882 3881 * Check for DMA gate array errors
3883 3882 */
3884 3883 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr))
3885 3884 & DMA_ERRPEND) {
3886 3885 /*
3887 3886 * It would be desirable to set the ATN* line and attempt to
3888 3887 * do the whole schmear of INITIATOR DETECTED ERROR here,
3889 3888 * but that is too hard to do at present.
3890 3889 */
3891 3890 fas_log(fas, CE_WARN,
3892 3891 "Unrecoverable DMA error during selection");
3893 3892 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
3894 3893
3895 3894 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET1_END,
3896 3895 "fas_finish_select_end (ACTION_RESET1)");
3897 3896 return (ACTION_RESET);
3898 3897 }
3899 3898
3900 3899 /*
3901 3900 * Shut off DMA gate array
3902 3901 */
3903 3902 FAS_FLUSH_DMA(fas);
3904 3903
3905 3904 /*
3906 3905 * Did something respond to selection?
3907 3906 */
3908 3907 if (intr == (FAS_INT_BUS|FAS_INT_FCMP)) {
3909 3908 /*
3910 3909 * We succesfully selected a target (we think).
3911 3910 * Now we figure out how botched things are
3912 3911 * based upon the kind of selection we were
3913 3912 * doing and the state of the step register.
3914 3913 */
3915 3914 switch (step) {
3916 3915 case FAS_STEP_ARBSEL:
3917 3916 /*
3918 3917 * In this case, we selected the target, but went
3919 3918 * neither into MESSAGE OUT nor COMMAND phase.
3920 3919 * However, this isn't a fatal error, so we just
3921 3920 * drive on.
3922 3921 *
3923 3922 * This might be a good point to note that we have
3924 3923 * a target that appears to not accomodate
3925 3924 * disconnecting,
3926 3925 * but it really isn't worth the effort to distinguish
3927 3926 * such targets fasecially from others.
3928 3927 */
3929 3928 /* FALLTHROUGH */
3930 3929
3931 3930 case FAS_STEP_SENTID:
3932 3931 /*
3933 3932 * In this case, we selected the target and sent
3934 3933 * message byte and have stopped with ATN* still on.
3935 3934 * This case should only occur if we use the SELECT
3936 3935 * AND STOP command.
3937 3936 */
3938 3937 /* FALLTHROUGH */
3939 3938
3940 3939 case FAS_STEP_NOTCMD:
3941 3940 /*
3942 3941 * In this case, we either didn't transition to command
3943 3942 * phase, or,
3944 3943 * if we were using the SELECT WITH ATN3 command,
3945 3944 * we possibly didn't send all message bytes.
3946 3945 */
3947 3946 break;
3948 3947
3949 3948 case FAS_STEP_PCMD:
3950 3949 /*
3951 3950 * In this case, not all command bytes transferred.
3952 3951 */
3953 3952 /* FALLTHROUGH */
3954 3953
3955 3954 case FAS_STEP_DONE:
3956 3955 /*
3957 3956 * This is the usual 'good' completion point.
3958 3957 * If we we sent message byte(s), we subtract
3959 3958 * off the number of message bytes that were
3960 3959 * ahead of the command.
3961 3960 */
3962 3961 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
3963 3962 break;
3964 3963
3965 3964 default:
3966 3965 fas_log(fas, CE_WARN,
3967 3966 "bad sequence step (0x%x) in selection", step);
3968 3967 TRACE_0(TR_FAC_SCSI_FAS,
3969 3968 TR_FAS_FINISH_SELECT_RESET3_END,
3970 3969 "fas_finish_select_end (ACTION_RESET3)");
3971 3970 return (ACTION_RESET);
3972 3971 }
3973 3972
3974 3973 /*
3975 3974 * OR in common state...
3976 3975 */
3977 3976 sp->cmd_pkt->pkt_state |= (STATE_GOT_BUS|STATE_GOT_TARGET);
3978 3977
3979 3978 /*
3980 3979 * data pointer initialization has already been done
3981 3980 */
3982 3981 New_state(fas, ACTS_UNKNOWN);
3983 3982 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_ACTION3_END,
3984 3983 "fas_finish_select_end (action3)");
3985 3984 return (fas_handle_unknown(fas));
3986 3985
3987 3986 } else if (intr == FAS_INT_DISCON) {
3988 3987 /*
3989 3988 * make sure we negotiate when this target comes
3990 3989 * on line later on
3991 3990 */
3992 3991 fas_force_renegotiation(fas, Tgt(sp));
3993 3992
3994 3993 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3995 3994 sp->cmd_pkt->pkt_state |= STATE_GOT_BUS;
3996 3995
3997 3996 /*
3998 3997 * Set the throttle to DRAIN_THROTTLE to make
3999 3998 * sure any disconnected commands will get timed out
4000 3999 * incase the drive dies
4001 4000 */
4002 4001
4003 4002 if (fas->f_reset_delay[Tgt(sp)] == 0) {
4004 4003 fas->f_throttle[sp->cmd_slot] = DRAIN_THROTTLE;
4005 4004 }
4006 4005
4007 4006 fas_set_pkt_reason(fas, sp, CMD_INCOMPLETE, 0);
4008 4007
4009 4008 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_FINISH_END,
4010 4009 "fas_finish_select_end (ACTION_FINISH)");
4011 4010 return (ACTION_FINISH);
4012 4011 } else {
4013 4012 fas_printstate(fas, "undetermined selection failure");
4014 4013 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET2_END,
4015 4014 "fas_finish_select_end (ACTION_RESET2)");
4016 4015 return (ACTION_RESET);
4017 4016 }
4018 4017 _NOTE(NOT_REACHED)
4019 4018 /* NOTREACHED */
4020 4019 }
4021 4020
4022 4021 /*
4023 4022 * a selection got preempted by a reselection; shut down dma
4024 4023 * and put back cmd in the ready queue unless NOINTR
4025 4024 */
4026 4025 static int
4027 4026 fas_reselect_preempt(struct fas *fas)
4028 4027 {
4029 4028 int rval;
4030 4029
4031 4030 /*
4032 4031 * A reselection attempt glotzed our selection attempt.
4033 4032 * we put request back in the ready queue
4034 4033 */
4035 4034 struct fas_cmd *sp = fas->f_current_sp;
4036 4035
4037 4036 /*
4038 4037 * Shut off DMA gate array
4039 4038 */
4040 4039 FAS_FLUSH_DMA(fas);
4041 4040
4042 4041 /*
4043 4042 * service the reconnect now and clean up later
4044 4043 */
4045 4044 New_state(fas, STATE_FREE);
4046 4045 rval = fas_reconnect(fas);
4047 4046
4048 4047 /*
4049 4048 * If selection for a non-tagged command is preempted, the
4050 4049 * command could be stuck because throttle was set to DRAIN,
4051 4050 * and a disconnected command timeout follows.
4052 4051 */
4053 4052 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0)
4054 4053 fas->f_throttle[sp->cmd_slot] = 1;
4055 4054
4056 4055 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4057 4056 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4058 4057 }
4059 4058
4060 4059 /*
4061 4060 * if we attempted to renegotiate on this cmd, undo this now
4062 4061 */
4063 4062 if (fas->f_wdtr_sent) {
4064 4063 fas->f_wide_known &= ~(1<<Tgt(sp));
4065 4064 fas->f_wdtr_sent = 0;
4066 4065 }
4067 4066 if (fas->f_sdtr_sent) {
4068 4067 fas->f_sync_known &= ~(1<<Tgt(sp));
4069 4068 fas->f_sdtr_sent = 0;
4070 4069 }
4071 4070
4072 4071 fas_head_of_readyQ(fas, sp);
4073 4072
4074 4073 return (rval);
4075 4074 }
4076 4075
4077 4076 /*
4078 4077 * Handle the reconnection of a target
4079 4078 */
4080 4079 static int
4081 4080 fas_reconnect(struct fas *fas)
4082 4081 {
4083 4082 volatile struct fasreg *fasreg = fas->f_reg;
4084 4083 struct fas_cmd *sp = NULL;
4085 4084 uchar_t target, lun;
4086 4085 uchar_t tmp;
4087 4086 uchar_t slot;
4088 4087 char *bad_reselect = NULL;
4089 4088
4090 4089 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_START,
4091 4090 "fas_reconnect_start");
4092 4091 EPRINTF("fas_reconnect:\n");
4093 4092
4094 4093 fas_check_ncmds(fas);
4095 4094
4096 4095 switch (fas->f_state) {
4097 4096 default:
4098 4097 /*
4099 4098 * Pick up target id from fifo
4100 4099 *
4101 4100 * There should only be the reselecting target's id
4102 4101 * and an identify message in the fifo.
4103 4102 */
4104 4103 target = fas->f_fifo[0];
4105 4104
4106 4105 /*
4107 4106 * we know the target so update period, conf3,
4108 4107 * offset reg, if necessary, and accept the msg
4109 4108 */
4110 4109 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
4111 4110
4112 4111 /*
4113 4112 * now we can accept the message. an untagged
4114 4113 * target will go immediately into data phase so
4115 4114 * the period/offset/conf3 registers need to be
4116 4115 * updated before accepting the message
4117 4116 */
4118 4117 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4119 4118
4120 4119 if (fas->f_fifolen != 2) {
4121 4120 bad_reselect = "bad reselect bytes";
4122 4121 break;
4123 4122 }
4124 4123
4125 4124 /*
4126 4125 * normal initial reconnect; we get another interrupt later
4127 4126 * for the tag
4128 4127 */
4129 4128 New_state(fas, ACTS_RESEL);
4130 4129
4131 4130 if (fas->f_stat & FAS_STAT_PERR) {
4132 4131 break;
4133 4132 }
4134 4133
4135 4134 /*
4136 4135 * Check sanity of message.
4137 4136 */
4138 4137 tmp = fas->f_fifo[1];
4139 4138 fas->f_last_msgin = tmp;
4140 4139
4141 4140 if (!(IS_IDENTIFY_MSG(tmp)) || (tmp & INI_CAN_DISCON)) {
4142 4141 bad_reselect = "bad identify msg";
4143 4142 break;
4144 4143 }
4145 4144
4146 4145 lun = tmp & (NLUNS_PER_TARGET-1);
4147 4146
4148 4147 EPRINTF2("fas_reconnect: target=%x, idmsg=%x\n",
4149 4148 target, tmp);
4150 4149
4151 4150 fas->f_resel_slot = slot = (target * NLUNS_PER_TARGET) | lun;
4152 4151
4153 4152 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
4154 4153 (target & 0xf) | FAS_BUSID_ENCODID |
4155 4154 FAS_BUSID_32BIT_COUNTER);
4156 4155
4157 4156 /*
4158 4157 * If tag queueing in use, DMA in tag.
4159 4158 * Otherwise, we're ready to go.
4160 4159 * if tag 0 slot is non-empty, a non-tagged cmd is
4161 4160 * reconnecting
4162 4161 */
4163 4162 if (TAGGED(target) && fas->f_tcmds[slot] &&
4164 4163 (fas->f_active[slot]->f_slot[0] == NULL)) {
4165 4164 volatile uchar_t *c =
4166 4165 (uchar_t *)fas->f_cmdarea;
4167 4166
4168 4167 /*
4169 4168 * If we've been doing tagged queueing and this
4170 4169 * request doesn't do it,
4171 4170 * maybe it was disabled for this one. This is rather
4172 4171 * dangerous as it blows all pending tagged cmds away.
4173 4172 * But if target is confused, then we'll blow up
4174 4173 * shortly.
4175 4174 */
4176 4175 *c++ = INVALID_MSG;
4177 4176 *c = INVALID_MSG;
4178 4177
4179 4178 FAS_DMA_WRITE_SETUP(fas, 2,
4180 4179 fas->f_dmacookie.dmac_address);
4181 4180
4182 4181 /*
4183 4182 * For tagged queuing, we should still be in msgin
4184 4183 * phase.
4185 4184 * If not, then either we aren't running tagged
4186 4185 * queueing like we thought or the target died.
4187 4186 */
4188 4187 if (INTPENDING(fas) == 0) {
4189 4188 EPRINTF1("slow reconnect, slot=%x\n", slot);
4190 4189 TRACE_0(TR_FAC_SCSI_FAS,
4191 4190 TR_FAS_RECONNECT_RETURN1_END,
4192 4191 "fas_reconnect_end (_RETURN1)");
4193 4192 return (ACTION_RETURN);
4194 4193 }
4195 4194
4196 4195 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
4197 4196 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
4198 4197 if (fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)) {
4199 4198 return (fas_illegal_cmd_or_bus_reset(fas));
4200 4199 }
4201 4200
4202 4201 if ((fas->f_stat & FAS_PHASE_MASK) !=
4203 4202 FAS_PHASE_MSG_IN) {
4204 4203 bad_reselect = "not in msgin phase";
4205 4204 break;
4206 4205 }
4207 4206
4208 4207 if (fas->f_intr & FAS_INT_DISCON) {
4209 4208 bad_reselect = "unexpected bus free";
4210 4209 break;
4211 4210 }
4212 4211 } else {
4213 4212 fas->f_current_sp = sp = fas->f_active[slot]->f_slot[0];
4214 4213 break;
4215 4214 }
4216 4215 /*FALLTHROUGH*/
4217 4216
4218 4217 case ACTS_RESEL:
4219 4218 {
4220 4219 volatile uchar_t *c =
4221 4220 (uchar_t *)fas->f_cmdarea;
4222 4221 struct f_slots *tag_slots;
4223 4222 int id, tag;
4224 4223 uint_t i;
4225 4224
4226 4225 slot = fas->f_resel_slot;
4227 4226 target = slot/NLUNS_PER_TARGET;
4228 4227
4229 4228 if ((fas->f_stat & FAS_PHASE_MASK) !=
4230 4229 FAS_PHASE_MSG_IN) {
4231 4230 IPRINTF1("no tag for slot %x\n", slot);
4232 4231 if (fas->f_intr & ~(FAS_INT_BUS |
4233 4232 FAS_INT_FCMP)) {
4234 4233 New_state(fas, ACTS_UNKNOWN);
4235 4234 TRACE_0(TR_FAC_SCSI_FAS,
4236 4235 TR_FAS_RECONNECT_PHASEMANAGE_END,
4237 4236 "fas_reconnect_end (_PHASEMANAGE)");
4238 4237 return (ACTION_PHASEMANAGE);
4239 4238 } else {
4240 4239 bad_reselect = "not in msgin phase";
4241 4240 break;
4242 4241 }
4243 4242 }
4244 4243 fas_reg_cmd_write(fas, CMD_TRAN_INFO|CMD_DMA);
4245 4244 fas_dma_reg_write(fas, &fas->f_dma->dma_csr,
4246 4245 fas->f_dma_csr);
4247 4246
4248 4247 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4249 4248
4250 4249 for (i = 0; i < (uint_t)RECONNECT_TAG_RCV_TIMEOUT;
4251 4250 i++) {
4252 4251 /*
4253 4252 * timeout is not very accurate but this
4254 4253 * should take no time at all
4255 4254 */
4256 4255 if (INTPENDING(fas)) {
4257 4256 fas->f_stat = fas_reg_read(fas,
4258 4257 (uchar_t *)&fas->f_reg->fas_stat);
4259 4258 fas->f_intr = fas_reg_read(fas,
4260 4259 (uchar_t *)&fas->f_reg->fas_intr);
4261 4260 if (fas->f_intr & (FAS_INT_RESET |
4262 4261 FAS_INT_ILLEGAL)) {
4263 4262 return (
4264 4263 fas_illegal_cmd_or_bus_reset
4265 4264 (fas));
4266 4265 }
4267 4266 if (fas->f_intr & FAS_INT_FCMP) {
4268 4267 break;
4269 4268 }
4270 4269 }
4271 4270 }
4272 4271
4273 4272 if (i == (uint_t)RECONNECT_TAG_RCV_TIMEOUT) {
4274 4273 bad_reselect = "timeout on receiving tag msg";
4275 4274 break;
4276 4275 }
4277 4276
4278 4277 FAS_FLUSH_DMA(fas);
4279 4278
4280 4279 /*
4281 4280 * we should really do a sync here but that
4282 4281 * hurts performance too much; we'll just hang
4283 4282 * around till the tag byte flips
4284 4283 * This is necessary on any system with an
4285 4284 * XBox
4286 4285 */
4287 4286 if (*c == INVALID_MSG) {
4288 4287 EPRINTF(
4289 4288 "fas_reconnect: invalid msg, polling\n");
4290 4289 for (i = 0; i < 1000000; i++) {
4291 4290 if (*c != INVALID_MSG)
4292 4291 break;
4293 4292 }
4294 4293 }
4295 4294
4296 4295 if (fas->f_stat & FAS_STAT_PERR) {
4297 4296 break;
4298 4297 }
4299 4298
4300 4299 if ((fas->f_stat & FAS_STAT_XZERO) == 0 ||
4301 4300 (id = *c++) < MSG_SIMPLE_QTAG ||
4302 4301 id > MSG_ORDERED_QTAG) {
4303 4302 /*
4304 4303 * Target agreed to do tagged queueing
4305 4304 * and lied!
4306 4305 * This problem implies the drive firmware is
4307 4306 * broken.
4308 4307 */
4309 4308 bad_reselect = "botched tag";
4310 4309 break;
4311 4310 }
4312 4311 tag = *c;
4313 4312
4314 4313 /* Set ptr to reconnecting scsi pkt */
4315 4314 tag_slots = fas->f_active[slot];
4316 4315 if (tag_slots != NULL) {
4317 4316 sp = tag_slots->f_slot[tag];
4318 4317 } else {
4319 4318 bad_reselect = "Invalid tag";
4320 4319 break;
4321 4320 }
4322 4321
4323 4322 fas->f_current_sp = sp;
4324 4323 }
4325 4324 }
4326 4325
4327 4326 if (fas->f_stat & FAS_STAT_PERR) {
4328 4327 sp = NULL;
4329 4328 bad_reselect = "Parity error in reconnect msg's";
4330 4329 }
4331 4330
4332 4331 if ((sp == NULL ||
4333 4332 #ifdef FAS_TEST
4334 4333 (fas_atest_reconn & (1<<Tgt(sp))) ||
4335 4334 #endif
4336 4335 (sp->cmd_flags & (CFLAG_CMDDISC|CFLAG_CMDPROXY)) == 0)) {
4337 4336 /*
4338 4337 * this shouldn't really happen, so it is better
4339 4338 * to reset the bus; some disks accept the abort
4340 4339 * and then still reconnect
4341 4340 */
4342 4341 if (bad_reselect == NULL) {
4343 4342 bad_reselect = "no command";
4344 4343 }
4345 4344 #ifdef FAS_TEST
4346 4345 if (sp && !(fas_atest_reconn & (1<<Tgt(sp))) &&
4347 4346 fas_test_stop) {
4348 4347 debug_enter("bad reconnect");
4349 4348 } else {
4350 4349 fas_atest_reconn = 0;
4351 4350 }
4352 4351 #endif
4353 4352 goto bad;
4354 4353
4355 4354 /*
4356 4355 * XXX remove this case or make it an ASSERT
4357 4356 */
4358 4357 } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
4359 4358 /*
4360 4359 * If we got here, we were already attempting to
4361 4360 * run a polled proxy command for this target.
4362 4361 * Set ATN and, copy in the message, and drive
4363 4362 * on (ignoring any parity error on the identify).
4364 4363 */
4365 4364 IPRINTF1("fas_reconnect: fielding proxy cmd for %d\n",
4366 4365 target);
4367 4366 fas_assert_atn(fas);
4368 4367 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
4369 4368 tmp = 0;
4370 4369 while (tmp < fas->f_omsglen) {
4371 4370 fas->f_cur_msgout[tmp] =
4372 4371 sp->cmd_cdb[FAS_PROXY_DATA+1+tmp];
4373 4372 tmp++;
4374 4373 }
4375 4374 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
4376 4375
4377 4376 /*
4378 4377 * pretend that the disconnected cmd is still disconnected
4379 4378 * (this prevents ndisc from going negative)
4380 4379 */
4381 4380 fas->f_ndisc++;
4382 4381 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4383 4382 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4384 4383 }
4385 4384
4386 4385 ASSERT(fas->f_resel_slot == slot);
4387 4386 ASSERT(fas->f_ndisc > 0);
4388 4387 fas->f_ndisc--;
4389 4388 sp->cmd_flags &= ~CFLAG_CMDDISC;
4390 4389 New_state(fas, ACTS_UNKNOWN);
4391 4390
4392 4391 /*
4393 4392 * A reconnect may imply a restore pointers operation
4394 4393 * Note that some older disks (Micropolis in Pbox) do not
4395 4394 * send a save data ptr on disconnect if all data has been
4396 4395 * xferred. So, we cannot restore ptrs yet here.
4397 4396 */
4398 4397 if ((sp->cmd_flags & CFLAG_DMAVALID) &&
4399 4398 (sp->cmd_data_count != sp->cmd_saved_data_count)) {
4400 4399 sp->cmd_flags |= CFLAG_RESTORE_PTRS;
4401 4400 }
4402 4401
4403 4402 /*
4404 4403 * Return to await the FUNCTION COMPLETE interrupt we
4405 4404 * should get out of accepting the IDENTIFY message.
4406 4405 */
4407 4406 EPRINTF2("Reconnecting %d.%d\n", target, slot % NLUNS_PER_TARGET);
4408 4407 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RETURN2_END,
4409 4408 "fas_reconnect_end (_RETURN2)");
4410 4409 return (ACTION_RETURN);
4411 4410
4412 4411 bad:
4413 4412 if (sp && (fas->f_stat & FAS_STAT_PERR)) {
4414 4413 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4415 4414 }
4416 4415 fas_log(fas, CE_WARN, "target %x: failed reselection (%s)",
4417 4416 target, bad_reselect);
4418 4417
4419 4418 #ifdef FASDEBUG
4420 4419 fas_printstate(fas, "failed reselection");
4421 4420 #endif
4422 4421 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RESET5_END,
4423 4422 "fas_reconnect_end (_RESET5)");
4424 4423 return (ACTION_RESET);
4425 4424 }
4426 4425
4427 4426 /*
4428 4427 * handle unknown bus phase
4429 4428 * we don't know what to expect so check status register for current
4430 4429 * phase
4431 4430 */
4432 4431 int
4433 4432 fas_handle_unknown(struct fas *fas)
4434 4433 {
4435 4434 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_START,
4436 4435 "fas_handle_unknown_start: fas 0x%p", fas);
4437 4436 EPRINTF("fas_handle_unknown:\n");
4438 4437
4439 4438 if ((fas->f_intr & FAS_INT_DISCON) == 0) {
4440 4439 /*
4441 4440 * we call actions here rather than returning to phasemanage
4442 4441 * (this is the most frequently called action)
4443 4442 */
4444 4443 switch (fas->f_stat & FAS_PHASE_MASK) {
4445 4444 case FAS_PHASE_DATA_IN:
4446 4445 case FAS_PHASE_DATA_OUT:
4447 4446 New_state(fas, ACTS_DATA);
4448 4447 TRACE_0(TR_FAC_SCSI_FAS,
4449 4448 TR_FAS_HANDLE_UNKNOWN_PHASE_DATA_END,
4450 4449 "fas_handle_unknown_end (phase_data)");
4451 4450 return (fas_handle_data_start(fas));
4452 4451
4453 4452 case FAS_PHASE_MSG_OUT:
4454 4453 New_state(fas, ACTS_MSG_OUT);
4455 4454 TRACE_0(TR_FAC_SCSI_FAS,
4456 4455 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_OUT_END,
4457 4456 "fas_handle_unknown_end (phase_msg_out)");
4458 4457 return (fas_handle_msg_out_start(fas));
4459 4458
4460 4459 case FAS_PHASE_MSG_IN:
4461 4460 New_state(fas, ACTS_MSG_IN);
4462 4461 TRACE_0(TR_FAC_SCSI_FAS,
4463 4462 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_IN_END,
4464 4463 "fas_handle_unknown_end (phase_msg_in)");
4465 4464 return (fas_handle_msg_in_start(fas));
4466 4465
4467 4466 case FAS_PHASE_STATUS:
4468 4467 fas_reg_cmd_write(fas, CMD_FLUSH);
4469 4468 #ifdef FAS_TEST
4470 4469 if (fas_ptest_status & (1<<Tgt(fas->f_current_sp))) {
4471 4470 fas_assert_atn(fas);
4472 4471 }
4473 4472 #endif /* FAS_TEST */
4474 4473
4475 4474 fas_reg_cmd_write(fas, CMD_COMP_SEQ);
4476 4475 New_state(fas, ACTS_C_CMPLT);
4477 4476
4478 4477 TRACE_0(TR_FAC_SCSI_FAS,
4479 4478 TR_FAS_HANDLE_UNKNOWN_PHASE_STATUS_END,
4480 4479 "fas_handle_unknown_end (phase_status)");
4481 4480 return (fas_handle_c_cmplt(fas));
4482 4481
4483 4482 case FAS_PHASE_COMMAND:
4484 4483 New_state(fas, ACTS_CMD_START);
4485 4484 TRACE_0(TR_FAC_SCSI_FAS,
4486 4485 TR_FAS_HANDLE_UNKNOWN_PHASE_CMD_END,
4487 4486 "fas_handle_unknown_end (phase_cmd)");
4488 4487 return (fas_handle_cmd_start(fas));
4489 4488 }
4490 4489
4491 4490 fas_printstate(fas, "Unknown bus phase");
4492 4491 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_RESET_END,
4493 4492 "fas_handle_unknown_end (reset)");
4494 4493 return (ACTION_RESET);
4495 4494
4496 4495 } else {
4497 4496 /*
4498 4497 * Okay. What to do now? Let's try (for the time being)
4499 4498 * assuming that the target went south and dropped busy,
4500 4499 * as a disconnect implies that either we received
4501 4500 * a completion or a disconnect message, or that we
4502 4501 * had sent an ABORT OPERATION or BUS DEVICE RESET
4503 4502 * message. In either case, we expected the disconnect
4504 4503 * and should have fielded it elsewhere.
4505 4504 *
4506 4505 * If we see a chip disconnect here, this is an unexpected
4507 4506 * loss of BSY*. Clean up the state of the chip and return.
4508 4507 *
4509 4508 */
4510 4509 int msgout = fas->f_cur_msgout[0];
4511 4510 struct fas_cmd *sp = fas->f_current_sp;
4512 4511 int target = Tgt(sp);
4513 4512
4514 4513 if (msgout == MSG_HEAD_QTAG || msgout == MSG_SIMPLE_QTAG) {
4515 4514 msgout = fas->f_cur_msgout[2];
4516 4515 }
4517 4516 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4518 4517 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4519 4518 fas->f_cur_msgout[2], fas->f_last_msgout);
4520 4519
4521 4520 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG ||
4522 4521 msgout == MSG_DEVICE_RESET) {
4523 4522 IPRINTF2("Successful %s message to target %d\n",
4524 4523 scsi_mname(msgout), Tgt(sp));
4525 4524 if (sp->cmd_flags & CFLAG_CMDPROXY) {
4526 4525 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
4527 4526 }
4528 4527 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
4529 4528 fas->f_abort_msg_sent++;
4530 4529 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4531 4530 fas_set_pkt_reason(fas, sp,
4532 4531 CMD_ABORTED, STAT_ABORTED);
4533 4532 }
4534 4533 } else if (msgout == MSG_DEVICE_RESET) {
4535 4534 fas->f_reset_msg_sent++;
4536 4535 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4537 4536 fas_set_pkt_reason(fas, sp,
4538 4537 CMD_RESET, STAT_DEV_RESET);
4539 4538 }
4540 4539 fas_force_renegotiation(fas, target);
4541 4540 }
4542 4541 } else {
4543 4542 if ((fas->f_last_msgout == MSG_EXTENDED) &&
4544 4543 (fas->f_last_msgin == MSG_REJECT)) {
4545 4544 /*
4546 4545 * the target rejected the negotiations,
4547 4546 * so resubmit again (no_sync/no_wide
4548 4547 * is now set)
4549 4548 */
4550 4549 New_state(fas, STATE_FREE);
4551 4550 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4552 4551 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4553 4552 fas_decrement_ncmds(fas, sp);
4554 4553 fas_check_ncmds(fas);
4555 4554 sp->cmd_flags &= ~CFLAG_TRANFLAG;
4556 4555 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
4557 4556 fas_check_ncmds(fas);
4558 4557 TRACE_0(TR_FAC_SCSI_FAS,
4559 4558 TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4560 4559 "fas_handle_unknown_end (int_discon)");
4561 4560 return (ACTION_SEARCH);
4562 4561
4563 4562 } else if (fas->f_last_msgout == MSG_EXTENDED) {
4564 4563 /*
4565 4564 * target dropped off the bus during
4566 4565 * negotiations
4567 4566 */
4568 4567 fas_reset_sync_wide(fas);
4569 4568 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
4570 4569 }
4571 4570
4572 4571 fas_set_pkt_reason(fas, sp, CMD_UNX_BUS_FREE, 0);
4573 4572 #ifdef FASDEBUG
4574 4573 fas_printstate(fas, "unexpected bus free");
4575 4574 #endif
4576 4575 }
4577 4576 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4578 4577 "fas_handle_unknown_end (int_discon)");
4579 4578 return (ACTION_FINISH);
4580 4579 }
4581 4580 _NOTE(NOT_REACHED)
4582 4581 /* NOTREACHED */
4583 4582 }
4584 4583
4585 4584 /*
4586 4585 * handle target disconnecting
4587 4586 */
4588 4587 static int
4589 4588 fas_handle_clearing(struct fas *fas)
4590 4589 {
4591 4590 struct fas_cmd *sp = fas->f_current_sp;
4592 4591
4593 4592 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_START,
4594 4593 "fas_handle_clearing_start");
4595 4594 EPRINTF("fas_handle_clearing:\n");
4596 4595
4597 4596 if (fas->f_laststate == ACTS_C_CMPLT ||
4598 4597 fas->f_laststate == ACTS_MSG_IN_DONE) {
4599 4598 if (INTPENDING(fas)) {
4600 4599 volatile struct fasreg *fasreg = fas->f_reg;
4601 4600
4602 4601 fas->f_stat = fas_reg_read(fas,
4603 4602 (uchar_t *)&fasreg->fas_stat);
4604 4603 fas->f_intr = fas_reg_read(fas,
4605 4604 (uchar_t *)&fasreg->fas_intr);
4606 4605 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
4607 4606 return (fas_illegal_cmd_or_bus_reset(fas));
4608 4607 }
4609 4608 } else {
4610 4609 /*
4611 4610 * change e_laststate for the next time around
4612 4611 */
4613 4612 fas->f_laststate = ACTS_CLEARING;
4614 4613 TRACE_0(TR_FAC_SCSI_FAS,
4615 4614 TR_FAS_HANDLE_CLEARING_RETURN1_END,
4616 4615 "fas_handle_clearing_end (ACTION_RETURN1)");
4617 4616 return (ACTION_RETURN);
4618 4617 }
4619 4618 }
4620 4619
4621 4620 if (fas->f_intr == FAS_INT_DISCON) {
4622 4621 /*
4623 4622 * At this point the FAS chip has disconnected. The bus should
4624 4623 * be either quiet or someone may be attempting a reselection
4625 4624 * of us (or somebody else). Call the routine that sets the
4626 4625 * chip back to a correct and known state.
4627 4626 * If the last message in was a disconnect, search
4628 4627 * for new work to do, else return to call fas_finish()
4629 4628 */
4630 4629 fas->f_last_msgout = 0xff;
4631 4630 fas->f_omsglen = 0;
4632 4631 if (fas->f_last_msgin == MSG_DISCONNECT) {
4633 4632
4634 4633 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4635 4634
4636 4635 New_state(fas, STATE_FREE);
4637 4636
4638 4637 ASSERT(fas->f_current_sp != NULL);
4639 4638 EPRINTF2("disconnecting %d.%d\n", Tgt(sp), Lun(sp));
4640 4639
4641 4640 sp->cmd_pkt->pkt_statistics |= STAT_DISCON;
4642 4641 sp->cmd_flags |= CFLAG_CMDDISC;
4643 4642 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4644 4643 fas->f_ndisc++;
4645 4644 }
4646 4645 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4647 4646 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4648 4647
4649 4648 fas->f_current_sp = NULL;
4650 4649
4651 4650 /*
4652 4651 * start a cmd here to save time
4653 4652 */
4654 4653 if ((fas->f_ncmds > fas->f_ndisc) && fas_ustart(fas)) {
4655 4654 TRACE_0(TR_FAC_SCSI_FAS,
4656 4655 TR_FAS_HANDLE_CLEARING_RETURN2_END,
4657 4656 "fas_handle_clearing_end (ACTION_RETURN2)");
4658 4657 return (ACTION_RETURN);
4659 4658 }
4660 4659
4661 4660
4662 4661 TRACE_0(TR_FAC_SCSI_FAS,
4663 4662 TR_FAS_HANDLE_CLEARING_RETURN3_END,
4664 4663 "fas_handle_clearing_end (ACTION_RETURN3)");
4665 4664 return (ACTION_RETURN);
4666 4665 } else {
4667 4666 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_END,
4668 4667 "fas_handle_clearing_end");
4669 4668 return (fas_finish(fas));
4670 4669 }
4671 4670 } else {
4672 4671 /*
4673 4672 * If the target didn't disconnect from the
4674 4673 * bus, that is a gross fatal error.
4675 4674 * XXX this can be caused by asserting ATN
4676 4675 * XXX check bus phase and if msgout, send a message
4677 4676 */
4678 4677 fas_log(fas, CE_WARN,
4679 4678 "Target %d didn't disconnect after sending %s",
4680 4679 Tgt(sp), scsi_mname(fas->f_last_msgin));
4681 4680
4682 4681 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4683 4682
4684 4683 #ifdef FASDEBUG
4685 4684 IPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4686 4685 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4687 4686 fas->f_cur_msgout[2], fas->f_last_msgout);
4688 4687 IPRINTF1("last msgin=%x\n", fas->f_last_msgin);
4689 4688 #endif
4690 4689 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_ABORT_END,
4691 4690 "fas_handle_clearing_end (ACTION_ABORT_CURCMD)");
4692 4691 return (ACTION_ABORT_ALLCMDS);
4693 4692 }
4694 4693 }
4695 4694
4696 4695 /*
4697 4696 * handle data phase start
4698 4697 */
4699 4698 static int
4700 4699 fas_handle_data_start(struct fas *fas)
4701 4700 {
4702 4701 uint64_t end;
4703 4702 uint32_t amt;
4704 4703 struct fas_cmd *sp = fas->f_current_sp;
4705 4704 int sending, phase;
4706 4705
4707 4706 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_START,
4708 4707 "fas_handle_data_start");
4709 4708 EPRINTF("fas_handle_data_start:\n");
4710 4709
4711 4710 if ((sp->cmd_flags & CFLAG_DMAVALID) == 0) {
4712 4711 fas_printstate(fas, "unexpected data phase");
4713 4712 bad:
4714 4713 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4715 4714
4716 4715 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT1_END,
4717 4716 "fas_handle_data_end (ACTION_ABORT_CURCMD1)");
4718 4717 return (ACTION_ABORT_CURCMD);
4719 4718 } else {
4720 4719 sending = (sp->cmd_flags & CFLAG_DMASEND)? 1 : 0;
4721 4720 }
4722 4721
4723 4722 if (sp->cmd_flags & CFLAG_RESTORE_PTRS) {
4724 4723 if (fas_restore_pointers(fas, sp)) {
4725 4724 return (ACTION_ABORT_CURCMD);
4726 4725 }
4727 4726 sp->cmd_flags &= ~CFLAG_RESTORE_PTRS;
4728 4727 }
4729 4728
4730 4729 /*
4731 4730 * And make sure our DMA pointers are in good shape.
4732 4731 *
4733 4732 * Because SCSI is SCSI, the current DMA pointer has got to be
4734 4733 * greater than or equal to our DMA base address. All other cases
4735 4734 * that might have affected this always set curaddr to be >=
4736 4735 * to the DMA base address.
4737 4736 */
4738 4737 ASSERT(sp->cmd_cur_addr >= sp->cmd_dmacookie.dmac_address);
4739 4738 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4740 4739 (uint64_t)sp->cmd_dmacookie.dmac_size;
4741 4740
4742 4741 DPRINTF5(
4743 4742 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%"
4744 4743 PRIx64 ", nwin=%x\n",
4745 4744 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
4746 4745 sp->cmd_nwin);
4747 4746 DPRINTF2("dmac_address = %x, dmac_size=%lx\n",
4748 4747 sp->cmd_dmacookie.dmac_address, sp->cmd_dmacookie.dmac_size);
4749 4748
4750 4749 if (sp->cmd_cur_addr >= end) {
4751 4750 if (fas_next_window(fas, sp, end)) {
4752 4751 goto bad;
4753 4752 }
4754 4753 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4755 4754 (uint64_t)sp->cmd_dmacookie.dmac_size;
4756 4755 DPRINTF2("dmac_address=%x, dmac_size=%lx\n",
4757 4756 sp->cmd_dmacookie.dmac_address,
4758 4757 sp->cmd_dmacookie.dmac_size);
4759 4758 }
4760 4759
4761 4760 amt = end - sp->cmd_cur_addr;
4762 4761 if (fas->f_dma_attr->dma_attr_count_max < amt) {
4763 4762 amt = fas->f_dma_attr->dma_attr_count_max;
4764 4763 }
4765 4764 DPRINTF3("amt=%x, end=%lx, cur_addr=%x\n", amt, end, sp->cmd_cur_addr);
4766 4765
4767 4766 #ifdef FASDEBUG
4768 4767 /*
4769 4768 * Make sure that we don't cross a boundary we can't handle
4770 4769 */
4771 4770 end = (uint64_t)sp->cmd_cur_addr + (uint64_t)amt - 1;
4772 4771 if ((end & ~fas->f_dma_attr->dma_attr_seg) !=
4773 4772 (sp->cmd_cur_addr & ~fas->f_dma_attr->dma_attr_seg)) {
4774 4773 EPRINTF3("curaddr %x curaddr+amt %" PRIx64
4775 4774 " cntr_max %" PRIx64 "\n",
4776 4775 sp->cmd_cur_addr, end, fas->f_dma_attr->dma_attr_seg);
4777 4776 amt = (end & ~fas->f_dma_attr->dma_attr_seg) - sp->cmd_cur_addr;
4778 4777 if (amt == 0 || amt > fas->f_dma_attr->dma_attr_count_max) {
4779 4778 fas_log(fas, CE_WARN, "illegal dma boundary? %x", amt);
4780 4779 goto bad;
4781 4780 }
4782 4781 }
4783 4782 #endif
4784 4783
4785 4784 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4786 4785 (uint64_t)sp->cmd_dmacookie.dmac_size -
4787 4786 (uint64_t)sp->cmd_cur_addr;
4788 4787 if (amt > end) {
4789 4788 EPRINTF4("ovflow amt %x s.b. %" PRIx64 " curaddr %x count %x\n",
4790 4789 amt, end, sp->cmd_cur_addr, sp->cmd_dmacount);
4791 4790 amt = (uint32_t)end;
4792 4791 }
4793 4792
4794 4793 fas->f_lastcount = amt;
4795 4794
4796 4795 EPRINTF4("%d.%d cmd 0x%x to xfer %x\n", Tgt(sp), Lun(sp),
4797 4796 sp->cmd_pkt->pkt_cdbp[0], amt);
4798 4797
4799 4798 phase = fas->f_stat & FAS_PHASE_MASK;
4800 4799
4801 4800 if ((phase == FAS_PHASE_DATA_IN) && !sending) {
4802 4801 FAS_DMA_WRITE(fas, amt, sp->cmd_cur_addr,
4803 4802 CMD_TRAN_INFO|CMD_DMA);
4804 4803 } else if ((phase == FAS_PHASE_DATA_OUT) && sending) {
4805 4804 FAS_DMA_READ(fas, amt, sp->cmd_cur_addr, amt,
4806 4805 CMD_TRAN_INFO|CMD_DMA);
4807 4806 } else {
4808 4807 fas_log(fas, CE_WARN,
4809 4808 "unwanted data xfer direction for Target %d", Tgt(sp));
4810 4809 fas_set_pkt_reason(fas, sp, CMD_DMA_DERR, 0);
4811 4810 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT2_END,
4812 4811 "fas_handle_data_end (ACTION_ABORT_CURCMD2)");
4813 4812 return (ACTION_ABORT_CURCMD);
4814 4813 }
4815 4814
4816 4815 #ifdef FAS_TEST
4817 4816 if (!sending && (fas_ptest_data_in & (1<<Tgt(sp)))) {
4818 4817 fas_assert_atn(fas);
4819 4818 }
4820 4819 #endif /* FAS_TEST */
4821 4820
4822 4821 New_state(fas, ACTS_DATA_DONE);
4823 4822
4824 4823 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_END,
4825 4824 "fas_handle_data_end (ACTION_RETURN)");
4826 4825 return (ACTION_RETURN);
4827 4826 }
4828 4827
4829 4828 static int
4830 4829 fas_handle_data_done(struct fas *fas)
4831 4830 {
4832 4831 volatile struct fasreg *fasreg = fas->f_reg;
4833 4832 volatile struct dma *dmar = fas->f_dma;
4834 4833 struct fas_cmd *sp = fas->f_current_sp;
4835 4834 uint32_t xfer_amt;
4836 4835 char was_sending;
4837 4836 uchar_t stat, fifoamt, tgt;
4838 4837
4839 4838 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_START,
4840 4839 "fas_handle_data_done_start");
4841 4840 EPRINTF("fas_handle_data_done\n");
4842 4841
4843 4842 tgt = Tgt(sp);
4844 4843 stat = fas->f_stat;
4845 4844 was_sending = (sp->cmd_flags & CFLAG_DMASEND) ? 1 : 0;
4846 4845
4847 4846 /*
4848 4847 * Check for DMA errors (parity or memory fault)
4849 4848 */
4850 4849 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) &
4851 4850 DMA_ERRPEND) {
4852 4851 /*
4853 4852 * It would be desirable to set the ATN* line and attempt to
4854 4853 * do the whole schmear of INITIATOR DETECTED ERROR here,
4855 4854 * but that is too hard to do at present.
4856 4855 */
4857 4856 fas_log(fas, CE_WARN, "Unrecoverable DMA error on dma %s",
4858 4857 (was_sending) ? "send" : "receive");
4859 4858 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4860 4859 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4861 4860 "fas_handle_data_done_end (ACTION_RESET)");
4862 4861 return (ACTION_RESET);
4863 4862 }
4864 4863
4865 4864 /*
4866 4865 * Data Receive conditions:
4867 4866 *
4868 4867 * Check for parity errors. If we have a parity error upon
4869 4868 * receive, the FAS chip has asserted ATN* for us already.
4870 4869 */
4871 4870 if (!was_sending) {
4872 4871 #ifdef FAS_TEST
4873 4872 if (fas_ptest_data_in & (1<<tgt)) {
4874 4873 fas_ptest_data_in = 0;
4875 4874 stat |= FAS_STAT_PERR;
4876 4875 if (fas_test_stop > 1) {
4877 4876 debug_enter("ptest_data_in");
4878 4877 }
4879 4878 }
4880 4879 #endif /* FAS_TEST */
4881 4880 if (stat & FAS_STAT_PERR) {
4882 4881 fas_log(fas, CE_WARN,
4883 4882 "SCSI bus DATA IN phase parity error");
4884 4883 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
4885 4884 fas->f_omsglen = 1;
4886 4885 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4887 4886 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
4888 4887 }
4889 4888 }
4890 4889
4891 4890 FAS_FLUSH_DMA(fas);
4892 4891
4893 4892 /*
4894 4893 * Check to make sure we're still connected to the target.
4895 4894 * If the target dropped the bus, that is a fatal error.
4896 4895 * We don't even attempt to count what we were transferring
4897 4896 * here. Let fas_handle_unknown clean up for us.
4898 4897 */
4899 4898 if (fas->f_intr != FAS_INT_BUS) {
4900 4899 New_state(fas, ACTS_UNKNOWN);
4901 4900 TRACE_0(TR_FAC_SCSI_FAS,
4902 4901 TR_FAS_HANDLE_DATA_DONE_PHASEMANAGE_END,
4903 4902 "fas_handle_data_done_end (ACTION_PHASEMANAGE)");
4904 4903 return (ACTION_PHASEMANAGE);
4905 4904 }
4906 4905
4907 4906 /*
4908 4907 * Figure out how far we got.
4909 4908 * Latch up fifo amount first and double if wide has been enabled
4910 4909 */
4911 4910 fifoamt = FIFO_CNT(fas);
4912 4911 if (fas->f_wide_enabled & (1<<tgt)) {
4913 4912 fifoamt = fifoamt << 1;
4914 4913 }
4915 4914
4916 4915 if (stat & FAS_STAT_XZERO) {
4917 4916 xfer_amt = fas->f_lastcount;
4918 4917 } else {
4919 4918 GET_FAS_COUNT(fasreg, xfer_amt);
4920 4919 xfer_amt = fas->f_lastcount - xfer_amt;
4921 4920 }
4922 4921 DPRINTF4("fifoamt=%x, xfer_amt=%x, lastcount=%x, stat=%x\n",
4923 4922 fifoamt, xfer_amt, fas->f_lastcount, stat);
4924 4923
4925 4924
4926 4925 /*
4927 4926 * Unconditionally knock off by the amount left
4928 4927 * in the fifo if we were sending out the SCSI bus.
4929 4928 *
4930 4929 * If we were receiving from the SCSI bus, believe
4931 4930 * what the chip told us (either XZERO or by the
4932 4931 * value calculated from the counter register).
4933 4932 * The reason we don't look at the fifo for
4934 4933 * incoming data is that in synchronous mode
4935 4934 * the fifo may have further data bytes, and
4936 4935 * for async mode we assume that all data in
4937 4936 * the fifo will have been transferred before
4938 4937 * the fas asserts an interrupt.
4939 4938 */
4940 4939 if (was_sending) {
4941 4940 xfer_amt -= fifoamt;
4942 4941 }
4943 4942
4944 4943 #ifdef FASDEBUG
4945 4944 {
4946 4945 int phase = stat & FAS_PHASE_MASK;
4947 4946 fas->f_stat2 = fas_reg_read(fas,
4948 4947 (uchar_t *)&fasreg->fas_stat2);
4949 4948
4950 4949 if (((fas->f_stat & FAS_STAT_XZERO) == 0) &&
4951 4950 (phase != FAS_PHASE_DATA_IN) &&
4952 4951 (phase != FAS_PHASE_DATA_OUT) &&
4953 4952 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
4954 4953 fas_log(fas, CE_WARN,
4955 4954 "input shuttle not empty at end of data phase");
4956 4955 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4957 4956 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4958 4957 "fas_handle_data_done_end (ACTION_RESET)");
4959 4958 return (ACTION_RESET);
4960 4959 }
4961 4960 }
4962 4961 #endif /* FASDEBUG */
4963 4962
4964 4963 /*
4965 4964 * If this was a synchronous transfer, flag it.
4966 4965 * Also check for the errata condition of long
4967 4966 * last REQ/ pulse for some synchronous targets
4968 4967 */
4969 4968 if (fas->f_offset[tgt]) {
4970 4969 /*
4971 4970 * flag that a synchronous data xfer took place
4972 4971 */
4973 4972 sp->cmd_pkt->pkt_statistics |= STAT_SYNC;
4974 4973
4975 4974 if (was_sending)
4976 4975 fas_reg_cmd_write(fas, CMD_FLUSH);
4977 4976 } else {
4978 4977 /*
4979 4978 * If we aren't doing Synchronous Data Transfers,
4980 4979 * definitely offload the fifo.
4981 4980 */
4982 4981 fas_reg_cmd_write(fas, CMD_FLUSH);
4983 4982 }
4984 4983
4985 4984 /*
4986 4985 * adjust pointers...
4987 4986 */
4988 4987 DPRINTF3("before:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4989 4988 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4990 4989 sp->cmd_data_count += xfer_amt;
4991 4990 sp->cmd_cur_addr += xfer_amt;
4992 4991 sp->cmd_pkt->pkt_state |= STATE_XFERRED_DATA;
4993 4992 New_state(fas, ACTS_UNKNOWN);
4994 4993 DPRINTF3("after:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4995 4994 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4996 4995
4997 4996 stat &= FAS_PHASE_MASK;
4998 4997 if (stat == FAS_PHASE_DATA_IN || stat == FAS_PHASE_DATA_OUT) {
4999 4998 fas->f_state = ACTS_DATA;
5000 4999 TRACE_0(TR_FAC_SCSI_FAS,
5001 5000 TR_FAS_HANDLE_DATA_DONE_ACTION1_END,
5002 5001 "fas_handle_data_done_end (action1)");
5003 5002 return (fas_handle_data_start(fas));
5004 5003 }
5005 5004
5006 5005 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_ACTION2_END,
5007 5006 "fas_handle_data_done_end (action2)");
5008 5007 return (fas_handle_unknown(fas));
5009 5008 }
5010 5009
5011 5010 static char msginperr[] = "SCSI bus MESSAGE IN phase parity error";
5012 5011
5013 5012 static int
5014 5013 fas_handle_c_cmplt(struct fas *fas)
5015 5014 {
5016 5015 struct fas_cmd *sp = fas->f_current_sp;
5017 5016 volatile struct fasreg *fasreg = fas->f_reg;
5018 5017 uchar_t sts, msg, intr, perr;
5019 5018
5020 5019 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_START,
5021 5020 "fas_handle_c_cmplt_start");
5022 5021 EPRINTF("fas_handle_c_cmplt:\n");
5023 5022
5024 5023
5025 5024 /*
5026 5025 * if target is fast, we can get cmd. completion by the time we get
5027 5026 * here. Otherwise, we'll have to taken an interrupt.
5028 5027 */
5029 5028 if (fas->f_laststate == ACTS_UNKNOWN) {
5030 5029 if (INTPENDING(fas)) {
5031 5030 fas->f_stat = fas_reg_read(fas,
5032 5031 (uchar_t *)&fasreg->fas_stat);
5033 5032 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
5034 5033 fas->f_intr = intr;
5035 5034 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5036 5035 return (fas_illegal_cmd_or_bus_reset(fas));
5037 5036 }
5038 5037 } else {
5039 5038 /*
5040 5039 * change f_laststate for the next time around
5041 5040 */
5042 5041 fas->f_laststate = ACTS_C_CMPLT;
5043 5042 TRACE_0(TR_FAC_SCSI_FAS,
5044 5043 TR_FAS_HANDLE_C_CMPLT_RETURN1_END,
5045 5044 "fas_handle_c_cmplt_end (ACTION_RETURN1)");
5046 5045 return (ACTION_RETURN);
5047 5046 }
5048 5047 } else {
5049 5048 intr = fas->f_intr;
5050 5049 }
5051 5050
5052 5051 #ifdef FAS_TEST
5053 5052 if (fas_ptest_status & (1<<Tgt(sp))) {
5054 5053 fas_ptest_status = 0;
5055 5054 fas->f_stat |= FAS_STAT_PERR;
5056 5055 if (fas_test_stop > 1) {
5057 5056 debug_enter("ptest_status");
5058 5057 }
5059 5058 } else if ((fas_ptest_msgin & (1<<Tgt(sp))) && fas_ptest_msg == 0) {
5060 5059 fas_ptest_msgin = 0;
5061 5060 fas_ptest_msg = -1;
5062 5061 fas->f_stat |= FAS_STAT_PERR;
5063 5062 if (fas_test_stop > 1) {
5064 5063 debug_enter("ptest_completion");
5065 5064 }
5066 5065 }
5067 5066 #endif /* FAS_TEST */
5068 5067
5069 5068 if (intr == FAS_INT_DISCON) {
5070 5069 New_state(fas, ACTS_UNKNOWN);
5071 5070 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION1_END,
5072 5071 "fas_handle_c_cmplt_end (action1)");
5073 5072 return (fas_handle_unknown(fas));
5074 5073 }
5075 5074
5076 5075 if ((perr = (fas->f_stat & FAS_STAT_PERR)) != 0) {
5077 5076 fas_assert_atn(fas);
5078 5077 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5079 5078 }
5080 5079
5081 5080 /*
5082 5081 * do a msg accept now and read the fifo data
5083 5082 */
5084 5083 if (intr & FAS_INT_FCMP) {
5085 5084 /*
5086 5085 * The FAS manuals state that this sequence completes
5087 5086 * with a BUS SERVICE interrupt if just the status
5088 5087 * byte was received, else a FUNCTION COMPLETE interrupt
5089 5088 * if both status and a message was received.
5090 5089 *
5091 5090 * if we give the MSG_ACT before reading the msg byte
5092 5091 * we get the status byte again and if the status is zero
5093 5092 * then we won't detect a failure
5094 5093 */
5095 5094 *(sp->cmd_pkt->pkt_scbp) =
5096 5095 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5097 5096 fas->f_last_msgin = fas->f_imsgarea[0] =
5098 5097 msg = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5099 5098
5100 5099 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5101 5100 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5102 5101
5103 5102 /*
5104 5103 * The manuals also state that ATN* is asserted if
5105 5104 * bad parity is detected.
5106 5105 *
5107 5106 * The one case that we cannot handle is where we detect
5108 5107 * bad parity for the status byte, but the target refuses
5109 5108 * to go to MESSAGE OUT phase right away. This means that
5110 5109 * if that happens, we will misconstrue the parity error
5111 5110 * to be for the completion message, not the status byte.
5112 5111 */
5113 5112 if (perr) {
5114 5113 fas_log(fas, CE_WARN, msginperr);
5115 5114 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5116 5115
5117 5116 fas->f_cur_msgout[0] = MSG_MSG_PARITY;
5118 5117 fas->f_omsglen = 1;
5119 5118 New_state(fas, ACTS_UNKNOWN);
5120 5119 TRACE_0(TR_FAC_SCSI_FAS,
5121 5120 TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5122 5121 "fas_handle_c_cmplt_end (action5)");
5123 5122 return (ACTION_RETURN);
5124 5123 }
5125 5124
5126 5125 } else if (intr == FAS_INT_BUS) {
5127 5126 /*
5128 5127 * We only got the status byte.
5129 5128 */
5130 5129 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5131 5130 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5132 5131 *(sp->cmd_pkt->pkt_scbp) = sts;
5133 5132 msg = INVALID_MSG;
5134 5133
5135 5134 IPRINTF1("fas_handle_cmd_cmplt: sts=%x, no msg byte\n", sts);
5136 5135
5137 5136 if (perr) {
5138 5137 /*
5139 5138 * If we get a parity error on a status byte
5140 5139 * assume that it was a CHECK CONDITION
5141 5140 */
5142 5141 sts = STATUS_CHECK;
5143 5142 fas_log(fas, CE_WARN,
5144 5143 "SCSI bus STATUS phase parity error");
5145 5144 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
5146 5145 fas->f_omsglen = 1;
5147 5146 New_state(fas, ACTS_UNKNOWN);
5148 5147 TRACE_0(TR_FAC_SCSI_FAS,
5149 5148 TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5150 5149 "fas_handle_c_cmplt_end (action5)");
5151 5150 return (fas_handle_unknown(fas));
5152 5151 }
5153 5152
5154 5153 } else {
5155 5154 msg = sts = INVALID_MSG;
5156 5155 IPRINTF("fas_handle_cmd_cmplt: unexpected intr\n");
5157 5156 New_state(fas, ACTS_UNKNOWN);
5158 5157 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION2_END,
5159 5158 "fas_handle_c_cmplt_end (action2)");
5160 5159 return (fas_handle_unknown(fas));
5161 5160 }
5162 5161
5163 5162 EPRINTF2("fas_handle_c_cmplt: status=%x, msg=%x\n", sts, msg);
5164 5163
5165 5164 EPRINTF1("Completion Message=%s\n", scsi_mname(msg));
5166 5165 if (msg == MSG_COMMAND_COMPLETE) {
5167 5166 /*
5168 5167 * Actually, if the message was a 'linked command
5169 5168 * complete' message, the target isn't going to be
5170 5169 * clearing the bus.
5171 5170 */
5172 5171 New_state(fas, ACTS_CLEARING);
5173 5172 TRACE_0(TR_FAC_SCSI_FAS,
5174 5173 TR_FAS_HANDLE_C_CMPLT_ACTION4_END,
5175 5174 "fas_handle_c_cmplt_end (action4)");
5176 5175 return (fas_handle_clearing(fas));
5177 5176 } else {
5178 5177 fas->f_imsglen = 1;
5179 5178 fas->f_imsgindex = 1;
5180 5179 New_state(fas, ACTS_MSG_IN_DONE);
5181 5180 TRACE_0(TR_FAC_SCSI_FAS,
5182 5181 TR_FAS_HANDLE_C_CMPLT_ACTION3_END,
5183 5182 "fas_handle_c_cmplt_end (action3)");
5184 5183 return (fas_handle_msg_in_done(fas));
5185 5184 }
5186 5185 }
5187 5186
5188 5187 /*
5189 5188 * prepare for accepting a message byte from the fifo
5190 5189 */
5191 5190 static int
5192 5191 fas_handle_msg_in_start(struct fas *fas)
5193 5192 {
5194 5193 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_START,
5195 5194 "fas_handle_msg_in_start");
5196 5195 EPRINTF("fas_handle_msg_in_start\n");
5197 5196
5198 5197 /*
5199 5198 * Pick up a message byte.
5200 5199 * Clear the FIFO so we
5201 5200 * don't get confused.
5202 5201 */
5203 5202 if (!FIFO_EMPTY(fas)) {
5204 5203 fas_reg_cmd_write(fas, CMD_FLUSH);
5205 5204 }
5206 5205 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5207 5206 fas->f_imsglen = 1;
5208 5207 fas->f_imsgindex = 0;
5209 5208 New_state(fas, ACTS_MSG_IN_DONE);
5210 5209
5211 5210 /*
5212 5211 * give a little extra time by returning to phasemanage
5213 5212 */
5214 5213 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_END,
5215 5214 "fas_handle_msg_in_end (ACTION_PHASEMANAGE)");
5216 5215 return (ACTION_PHASEMANAGE);
5217 5216 }
5218 5217
5219 5218 /*
5220 5219 * We come here after issuing a MSG_ACCEPT
5221 5220 * command and are expecting more message bytes.
5222 5221 * The FAS should be asserting a BUS SERVICE
5223 5222 * interrupt status, but may have asserted
5224 5223 * a different interrupt in the case that
5225 5224 * the target disconnected and dropped BSY*.
5226 5225 *
5227 5226 * In the case that we are eating up message
5228 5227 * bytes (and throwing them away unread) because
5229 5228 * we have ATN* asserted (we are trying to send
5230 5229 * a message), we do not consider it an error
5231 5230 * if the phase has changed out of MESSAGE IN.
5232 5231 */
5233 5232 static int
5234 5233 fas_handle_more_msgin(struct fas *fas)
5235 5234 {
5236 5235 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_START,
5237 5236 "fas_handle_more_msgin_start");
5238 5237 EPRINTF("fas_handle_more_msgin\n");
5239 5238
5240 5239 if (fas->f_intr & FAS_INT_BUS) {
5241 5240 if ((fas->f_stat & FAS_PHASE_MASK) == FAS_PHASE_MSG_IN) {
5242 5241 /*
5243 5242 * Fetch another byte of a message in.
5244 5243 */
5245 5244 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5246 5245 New_state(fas, ACTS_MSG_IN_DONE);
5247 5246 TRACE_0(TR_FAC_SCSI_FAS,
5248 5247 TR_FAS_HANDLE_MORE_MSGIN_RETURN1_END,
5249 5248 "fas_handle_more_msgin_end (ACTION_RETURN)");
5250 5249 return (ACTION_RETURN);
5251 5250 }
5252 5251
5253 5252 /*
5254 5253 * If we were gobbling up a message and we have
5255 5254 * changed phases, handle this silently, else
5256 5255 * complain. In either case, we return to let
5257 5256 * fas_phasemanage() handle things.
5258 5257 *
5259 5258 * If it wasn't a BUS SERVICE interrupt,
5260 5259 * let fas_phasemanage() find out if the
5261 5260 * chip disconnected.
5262 5261 */
5263 5262 if (fas->f_imsglen != 0) {
5264 5263 fas_log(fas, CE_WARN,
5265 5264 "Premature end of extended message");
5266 5265 }
5267 5266 }
5268 5267 New_state(fas, ACTS_UNKNOWN);
5269 5268 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_RETURN2_END,
5270 5269 "fas_handle_more_msgin_end (action)");
5271 5270 return (fas_handle_unknown(fas));
5272 5271 }
5273 5272
5274 5273 static int
5275 5274 fas_handle_msg_in_done(struct fas *fas)
5276 5275 {
5277 5276 struct fas_cmd *sp = fas->f_current_sp;
5278 5277 volatile struct fasreg *fasreg = fas->f_reg;
5279 5278 int sndmsg = 0;
5280 5279 uchar_t msgin;
5281 5280
5282 5281 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_START,
5283 5282 "fas_handle_msg_in_done_start");
5284 5283 EPRINTF("fas_handle_msg_in_done:\n");
5285 5284 if (fas->f_laststate == ACTS_MSG_IN) {
5286 5285 if (INTPENDING(fas)) {
5287 5286 fas->f_stat = fas_reg_read(fas,
5288 5287 (uchar_t *)&fasreg->fas_stat);
5289 5288 fas->f_stat2 = fas_reg_read(fas,
5290 5289 (uchar_t *)&fasreg->fas_stat2);
5291 5290
5292 5291 fas_read_fifo(fas);
5293 5292
5294 5293 fas->f_intr = fas_reg_read(fas,
5295 5294 (uchar_t *)&fasreg->fas_intr);
5296 5295 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5297 5296 return (fas_illegal_cmd_or_bus_reset(fas));
5298 5297 }
5299 5298 } else {
5300 5299 /*
5301 5300 * change f_laststate for the next time around
5302 5301 */
5303 5302 fas->f_laststate = ACTS_MSG_IN_DONE;
5304 5303 TRACE_0(TR_FAC_SCSI_FAS,
5305 5304 TR_FAS_HANDLE_MSG_IN_DONE_RETURN1_END,
5306 5305 "fas_handle_msg_in_done_end (ACTION_RETURN1)");
5307 5306 return (ACTION_RETURN);
5308 5307 }
5309 5308 }
5310 5309
5311 5310 /*
5312 5311 * the most common case is a disconnect message. we do
5313 5312 * a fast path for this condition and if it fails then
5314 5313 * we go for the detailed error handling
5315 5314 */
5316 5315 #ifndef FAS_TEST
5317 5316 if (((fas->f_laststate == ACTS_MSG_IN) ||
5318 5317 (fas->f_laststate == ACTS_MSG_IN_DONE)) &&
5319 5318 ((fas->f_intr & FAS_INT_DISCON) == 0) &&
5320 5319 ((fas->f_stat & FAS_STAT_PERR) == 0) &&
5321 5320 ((sp->cmd_pkt_flags & FLAG_NODISCON) == 0)) {
5322 5321
5323 5322 if ((fas->f_fifolen == 1) &&
5324 5323 (fas->f_imsglen == 1) &&
5325 5324 (fas->f_fifo[0] == MSG_DISCONNECT)) {
5326 5325
5327 5326 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5328 5327 fas->f_imsgarea[fas->f_imsgindex++] = fas->f_fifo[0];
5329 5328 fas->f_last_msgin = MSG_DISCONNECT;
5330 5329 New_state(fas, ACTS_CLEARING);
5331 5330
5332 5331 TRACE_0(TR_FAC_SCSI_FAS,
5333 5332 TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5334 5333 "fas_handle_msg_in_done_end (action)");
5335 5334
5336 5335 return (fas_handle_clearing(fas));
5337 5336 }
5338 5337 }
5339 5338 #endif /* not FAS_TEST */
5340 5339
5341 5340 /*
5342 5341 * We can be called here for both the case where
5343 5342 * we had requested the FAS chip to fetch a message
5344 5343 * byte from the target (at the target's request).
5345 5344 * We can also be called in the case where we had
5346 5345 * been using the CMD_COMP_SEQ command to pick up
5347 5346 * both a status byte and a completion message from
5348 5347 * a target, but where the message wasn't one of
5349 5348 * COMMAND COMPLETE, LINKED COMMAND COMPLETE, or
5350 5349 * LINKED COMMAND COMPLETE (with flag). This is a
5351 5350 * legal (albeit extremely unusual) SCSI bus trans-
5352 5351 * -ition, so we have to handle it.
5353 5352 */
5354 5353 if (fas->f_laststate != ACTS_C_CMPLT) {
5355 5354 #ifdef FAS_TEST
5356 5355 reloop:
5357 5356 #endif /* FAS_TEST */
5358 5357
5359 5358 if (fas->f_intr & FAS_INT_DISCON) {
5360 5359 fas_log(fas, CE_WARN,
5361 5360 "premature end of input message");
5362 5361 New_state(fas, ACTS_UNKNOWN);
5363 5362 TRACE_0(TR_FAC_SCSI_FAS,
5364 5363 TR_FAS_HANDLE_MSG_IN_DONE_PHASEMANAGE_END,
5365 5364 "fas_handle_msg_in_done_end (ACTION_PHASEMANAGE)");
5366 5365 return (ACTION_PHASEMANAGE);
5367 5366 }
5368 5367
5369 5368 /*
5370 5369 * Note that if f_imsglen is zero, then we are skipping
5371 5370 * input message bytes, so there is no reason to look for
5372 5371 * parity errors.
5373 5372 */
5374 5373 if (fas->f_imsglen != 0 && (fas->f_stat & FAS_STAT_PERR)) {
5375 5374 fas_log(fas, CE_WARN, msginperr);
5376 5375 sndmsg = MSG_MSG_PARITY;
5377 5376 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5378 5377 fas_reg_cmd_write(fas, CMD_FLUSH);
5379 5378
5380 5379 } else if ((msgin = fas->f_fifolen) != 1) {
5381 5380
5382 5381 /*
5383 5382 * If we have got more than one or 0 bytes in the fifo,
5384 5383 * that is a gross screwup, and we should let the
5385 5384 * target know that we have completely fouled up.
5386 5385 */
5387 5386 fas_printf(fas, "fifocount=%x", msgin);
5388 5387 fas_printstate(fas, "input message botch");
5389 5388 sndmsg = MSG_INITIATOR_ERROR;
5390 5389 fas_reg_cmd_write(fas, CMD_FLUSH);
5391 5390 fas_log(fas, CE_WARN, "input message botch");
5392 5391
5393 5392 } else if (fas->f_imsglen == 0) {
5394 5393 /*
5395 5394 * If we are in the middle of gobbling up and throwing
5396 5395 * away a message (due to a previous message input
5397 5396 * error), drive on.
5398 5397 */
5399 5398 msgin = fas_reg_read(fas,
5400 5399 (uchar_t *)&fasreg->fas_fifo_data);
5401 5400 New_state(fas, ACTS_MSG_IN_MORE);
5402 5401
5403 5402 } else {
5404 5403 msgin = fas->f_fifo[0];
5405 5404 fas->f_imsgarea[fas->f_imsgindex++] = msgin;
5406 5405 }
5407 5406
5408 5407 } else {
5409 5408 /*
5410 5409 * In this case, we have been called (from
5411 5410 * fas_handle_c_cmplt()) with the message
5412 5411 * already stored in the message array.
5413 5412 */
5414 5413 msgin = fas->f_imsgarea[0];
5415 5414 }
5416 5415
5417 5416 /*
5418 5417 * Process this message byte (but not if we are
5419 5418 * going to be trying to send back some error
5420 5419 * anyway)
5421 5420 */
5422 5421 if (sndmsg == 0 && fas->f_imsglen != 0) {
5423 5422
5424 5423 if (fas->f_imsgindex < fas->f_imsglen) {
5425 5424
5426 5425 EPRINTF2("message byte %d: 0x%x\n",
5427 5426 fas->f_imsgindex-1,
5428 5427 fas->f_imsgarea[fas->f_imsgindex-1]);
5429 5428
5430 5429 New_state(fas, ACTS_MSG_IN_MORE);
5431 5430
5432 5431 } else if (fas->f_imsglen == 1) {
5433 5432
5434 5433 #ifdef FAS_TEST
5435 5434 if ((fas_ptest_msgin & (1<<Tgt(sp))) &&
5436 5435 fas_ptest_msg == msgin) {
5437 5436 fas_ptest_msgin = 0;
5438 5437 fas_ptest_msg = -1;
5439 5438 fas_assert_atn(fas);
5440 5439 fas->f_stat |= FAS_STAT_PERR;
5441 5440 fas->f_imsgindex -= 1;
5442 5441 if (fas_test_stop > 1) {
5443 5442 debug_enter("ptest msgin");
5444 5443 }
5445 5444 goto reloop;
5446 5445 }
5447 5446 #endif /* FAS_TEST */
5448 5447
5449 5448 sndmsg = fas_onebyte_msg(fas);
5450 5449
5451 5450 } else if (fas->f_imsglen == 2) {
5452 5451 #ifdef FAS_TEST
5453 5452 if (fas_ptest_emsgin & (1<<Tgt(sp))) {
5454 5453 fas_ptest_emsgin = 0;
5455 5454 fas_assert_atn(fas);
5456 5455 fas->f_stat |= FAS_STAT_PERR;
5457 5456 fas->f_imsgindex -= 1;
5458 5457 if (fas_test_stop > 1) {
5459 5458 debug_enter("ptest emsgin");
5460 5459 }
5461 5460 goto reloop;
5462 5461 }
5463 5462 #endif /* FAS_TEST */
5464 5463
5465 5464 if (fas->f_imsgarea[0] == MSG_EXTENDED) {
5466 5465 static char *tool =
5467 5466 "Extended message 0x%x is too long";
5468 5467
5469 5468 /*
5470 5469 * Is the incoming message too long
5471 5470 * to be stored in our local array?
5472 5471 */
5473 5472 if ((int)(msgin+2) > IMSGSIZE) {
5474 5473 fas_log(fas, CE_WARN,
5475 5474 tool, fas->f_imsgarea[0]);
5476 5475 sndmsg = MSG_REJECT;
5477 5476 } else {
5478 5477 fas->f_imsglen = msgin + 2;
5479 5478 New_state(fas, ACTS_MSG_IN_MORE);
5480 5479 }
5481 5480 } else {
5482 5481 sndmsg = fas_twobyte_msg(fas);
5483 5482 }
5484 5483
5485 5484 } else {
5486 5485 sndmsg = fas_multibyte_msg(fas);
5487 5486 }
5488 5487 }
5489 5488
5490 5489 if (sndmsg < 0) {
5491 5490 /*
5492 5491 * If sndmsg is less than zero, one of the subsidiary
5493 5492 * routines needs to return some other state than
5494 5493 * ACTION_RETURN.
5495 5494 */
5496 5495 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_SNDMSG_END,
5497 5496 "fas_handle_msg_in_done_end (-sndmsg)");
5498 5497 return (-sndmsg);
5499 5498
5500 5499 } else if (sndmsg > 0) {
5501 5500 if (IS_1BYTE_MSG(sndmsg)) {
5502 5501 fas->f_omsglen = 1;
5503 5502 }
5504 5503 fas->f_cur_msgout[0] = (uchar_t)sndmsg;
5505 5504
5506 5505 /*
5507 5506 * The target is not guaranteed to go to message out
5508 5507 * phase, period. Moreover, until the entire incoming
5509 5508 * message is transferred, the target may (and likely
5510 5509 * will) continue to transfer message bytes (which
5511 5510 * we will have to ignore).
5512 5511 *
5513 5512 * In order to do this, we'll go to 'infinite'
5514 5513 * message in handling by setting the current input
5515 5514 * message length to a sentinel of zero.
5516 5515 *
5517 5516 * This works regardless of the message we are trying
5518 5517 * to send out. At the point in time which we want
5519 5518 * to send a message in response to an incoming message
5520 5519 * we do not care any more about the incoming message.
5521 5520 *
5522 5521 * If we are sending a message in response to detecting
5523 5522 * a parity error on input, the FAS chip has already
5524 5523 * set ATN* for us, but it doesn't hurt to set it here
5525 5524 * again anyhow.
5526 5525 */
5527 5526 fas_assert_atn(fas);
5528 5527 New_state(fas, ACTS_MSG_IN_MORE);
5529 5528 fas->f_imsglen = 0;
5530 5529 }
5531 5530
5532 5531 fas_reg_cmd_write(fas, CMD_FLUSH);
5533 5532
5534 5533 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5535 5534
5536 5535 if ((fas->f_laststate == ACTS_MSG_IN_DONE) &&
5537 5536 (fas->f_state == ACTS_CLEARING)) {
5538 5537 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5539 5538 "fas_handle_msg_in_done_end (action)");
5540 5539 return (fas_handle_clearing(fas));
5541 5540 }
5542 5541 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_RETURN2_END,
5543 5542 "fas_handle_msg_in_done_end (ACTION_RETURN2)");
5544 5543 return (ACTION_RETURN);
5545 5544 }
5546 5545
5547 5546 static int
5548 5547 fas_onebyte_msg(struct fas *fas)
5549 5548 {
5550 5549 struct fas_cmd *sp = fas->f_current_sp;
5551 5550 int msgout = 0;
5552 5551 uchar_t msgin = fas->f_last_msgin = fas->f_imsgarea[0];
5553 5552 int tgt = Tgt(sp);
5554 5553
5555 5554 EPRINTF("fas_onebyte_msg\n");
5556 5555
5557 5556 if (msgin & MSG_IDENTIFY) {
5558 5557 /*
5559 5558 * How did we get here? We should only see identify
5560 5559 * messages on a reconnection, but we'll handle this
5561 5560 * fine here (just in case we get this) as long as
5562 5561 * we believe that this is a valid identify message.
5563 5562 *
5564 5563 * For this to be a valid incoming message,
5565 5564 * bits 6-4 must must be zero. Also, the
5566 5565 * bit that says that I'm an initiator and
5567 5566 * can support disconnection cannot possibly
5568 5567 * be set here.
5569 5568 */
5570 5569
5571 5570 char garbled = ((msgin & (BAD_IDENTIFY|INI_CAN_DISCON)) != 0);
5572 5571
5573 5572 fas_log(fas, CE_WARN, "%s message 0x%x from Target %d",
5574 5573 garbled ? "Garbled" : "Identify", msgin, tgt);
5575 5574
5576 5575 if (garbled) {
5577 5576 /*
5578 5577 * If it's a garbled message,
5579 5578 * try and tell the target...
5580 5579 */
5581 5580 msgout = MSG_INITIATOR_ERROR;
5582 5581 } else {
5583 5582 New_state(fas, ACTS_UNKNOWN);
5584 5583 }
5585 5584 return (msgout);
5586 5585
5587 5586 } else if (IS_2BYTE_MSG(msgin) || IS_EXTENDED_MSG(msgin)) {
5588 5587 fas->f_imsglen = 2;
5589 5588 New_state(fas, ACTS_MSG_IN_MORE);
5590 5589 return (0);
5591 5590 }
5592 5591
5593 5592 New_state(fas, ACTS_UNKNOWN);
5594 5593
5595 5594 switch (msgin) {
5596 5595 case MSG_DISCONNECT:
5597 5596 /*
5598 5597 * If we 'cannot' disconnect- reject this message.
5599 5598 * Note that we only key off of the pkt_flags here-
5600 5599 * the FLAG_NODISCON was set in fas_accept_pkt() if
5601 5600 * no disconnect was enabled in scsi_options
5602 5601 */
5603 5602 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
5604 5603 msgout = MSG_REJECT;
5605 5604 break;
5606 5605 }
5607 5606 /* FALLTHROUGH */
5608 5607 case MSG_COMMAND_COMPLETE:
5609 5608 fas->f_state = ACTS_CLEARING;
5610 5609 break;
5611 5610
5612 5611 case MSG_NOP:
5613 5612 break;
5614 5613
5615 5614 /* XXX Make it a MSG_REJECT handler */
5616 5615 case MSG_REJECT:
5617 5616 {
5618 5617 uchar_t reason = 0;
5619 5618 uchar_t lastmsg = fas->f_last_msgout;
5620 5619 /*
5621 5620 * The target is rejecting the last message we sent.
5622 5621 *
5623 5622 * If the last message we attempted to send out was an
5624 5623 * extended message, we were trying to negotiate sync
5625 5624 * xfers- and we're okay.
5626 5625 *
5627 5626 * Otherwise, a target has rejected a message that
5628 5627 * it should have handled. We will abort the operation
5629 5628 * in progress and set the pkt_reason value here to
5630 5629 * show why we have completed. The process of aborting
5631 5630 * may be via a message or may be via a bus reset (as
5632 5631 * a last resort).
5633 5632 */
5634 5633 msgout = (TAGGED(tgt)? MSG_ABORT_TAG : MSG_ABORT);
5635 5634
5636 5635 switch (lastmsg) {
5637 5636 case MSG_EXTENDED:
5638 5637 if (fas->f_wdtr_sent) {
5639 5638 /*
5640 5639 * Disable wide, Target rejected
5641 5640 * out WDTR message
5642 5641 */
5643 5642 fas_set_wide_conf3(fas, tgt, 0);
5644 5643 fas->f_nowide |= (1<<tgt);
5645 5644 fas->f_wdtr_sent = 0;
5646 5645 /*
5647 5646 * we still want to negotiate sync
5648 5647 */
5649 5648 if ((fas->f_nosync & (1<<tgt)) == 0) {
5650 5649 fas_assert_atn(fas);
5651 5650 fas_make_sdtr(fas, 0, tgt);
5652 5651 }
5653 5652 } else if (fas->f_sdtr_sent) {
5654 5653 fas_reg_cmd_write(fas, CMD_CLR_ATN);
5655 5654 fas_revert_to_async(fas, tgt);
5656 5655 fas->f_nosync |= (1<<tgt);
5657 5656 fas->f_sdtr_sent = 0;
5658 5657 }
5659 5658 msgout = 0;
5660 5659 break;
5661 5660 case MSG_NOP:
5662 5661 reason = CMD_NOP_FAIL;
5663 5662 break;
5664 5663 case MSG_INITIATOR_ERROR:
5665 5664 reason = CMD_IDE_FAIL;
5666 5665 break;
5667 5666 case MSG_MSG_PARITY:
5668 5667 reason = CMD_PER_FAIL;
5669 5668 break;
5670 5669 case MSG_REJECT:
5671 5670 reason = CMD_REJECT_FAIL;
5672 5671 break;
5673 5672 /* XXX - abort not good, queue full handling or drain (?) */
5674 5673 case MSG_SIMPLE_QTAG:
5675 5674 case MSG_ORDERED_QTAG:
5676 5675 case MSG_HEAD_QTAG:
5677 5676 msgout = MSG_ABORT;
5678 5677 reason = CMD_TAG_REJECT;
5679 5678 break;
5680 5679 case MSG_DEVICE_RESET:
5681 5680 reason = CMD_BDR_FAIL;
5682 5681 msgout = -ACTION_ABORT_CURCMD;
5683 5682 break;
5684 5683 case MSG_ABORT:
5685 5684 case MSG_ABORT_TAG:
5686 5685 /*
5687 5686 * If an RESET/ABORT OPERATION message is rejected
5688 5687 * it is time to yank the chain on the bus...
5689 5688 */
5690 5689 reason = CMD_ABORT_FAIL;
5691 5690 msgout = -ACTION_ABORT_CURCMD;
5692 5691 break;
5693 5692 default:
5694 5693 if (IS_IDENTIFY_MSG(lastmsg)) {
5695 5694 if (TAGGED(tgt)) {
5696 5695 /*
5697 5696 * this often happens when the
5698 5697 * target rejected our tag
5699 5698 */
5700 5699 reason = CMD_TAG_REJECT;
5701 5700 } else {
5702 5701 reason = CMD_ID_FAIL;
5703 5702 }
5704 5703 } else {
5705 5704 reason = CMD_TRAN_ERR;
5706 5705 msgout = -ACTION_ABORT_CURCMD;
5707 5706 }
5708 5707
5709 5708 break;
5710 5709 }
5711 5710
5712 5711 if (msgout) {
5713 5712 fas_log(fas, CE_WARN,
5714 5713 "Target %d rejects our message '%s'",
5715 5714 tgt, scsi_mname(lastmsg));
5716 5715 fas_set_pkt_reason(fas, sp, reason, 0);
5717 5716 }
5718 5717
5719 5718 break;
5720 5719 }
5721 5720 case MSG_RESTORE_PTRS:
5722 5721 sp->cmd_cdbp = sp->cmd_pkt->pkt_cdbp;
5723 5722 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
5724 5723 if (fas_restore_pointers(fas, sp)) {
5725 5724 msgout = -ACTION_ABORT_CURCMD;
5726 5725 } else if ((sp->cmd_pkt->pkt_reason & CMD_TRAN_ERR) &&
5727 5726 (sp->cmd_pkt->pkt_statistics & STAT_PERR) &&
5728 5727 (sp->cmd_cur_win == 0) &&
5729 5728 (sp->cmd_data_count == 0)) {
5730 5729 sp->cmd_pkt->pkt_reason &= ~CMD_TRAN_ERR;
5731 5730 }
5732 5731 }
5733 5732 break;
5734 5733
5735 5734 case MSG_SAVE_DATA_PTR:
5736 5735 sp->cmd_saved_data_count = sp->cmd_data_count;
5737 5736 sp->cmd_saved_win = sp->cmd_cur_win;
5738 5737 sp->cmd_saved_cur_addr = sp->cmd_cur_addr;
5739 5738 break;
5740 5739
5741 5740 /* These don't make sense for us, and */
5742 5741 /* will be rejected */
5743 5742 /* case MSG_INITIATOR_ERROR */
5744 5743 /* case MSG_ABORT */
5745 5744 /* case MSG_MSG_PARITY */
5746 5745 /* case MSG_DEVICE_RESET */
5747 5746 default:
5748 5747 msgout = MSG_REJECT;
5749 5748 fas_log(fas, CE_WARN,
5750 5749 "Rejecting message '%s' from Target %d",
5751 5750 scsi_mname(msgin), tgt);
5752 5751 break;
5753 5752 }
5754 5753
5755 5754 EPRINTF1("Message in: %s\n", scsi_mname(msgin));
5756 5755
5757 5756 return (msgout);
5758 5757 }
5759 5758
5760 5759 /*
5761 5760 * phase handlers that are rarely used
5762 5761 */
5763 5762 static int
5764 5763 fas_handle_cmd_start(struct fas *fas)
5765 5764 {
5766 5765 struct fas_cmd *sp = fas->f_current_sp;
5767 5766 volatile uchar_t *tp = fas->f_cmdarea;
5768 5767 int i;
5769 5768 int amt = sp->cmd_cdblen;
5770 5769
5771 5770 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_START,
5772 5771 "fas_handle_cmd_start_start");
5773 5772 EPRINTF("fas_handle_cmd: send cmd\n");
5774 5773
5775 5774 for (i = 0; i < amt; i++) {
5776 5775 *tp++ = sp->cmd_cdbp[i];
5777 5776 }
5778 5777 fas_reg_cmd_write(fas, CMD_FLUSH);
5779 5778
5780 5779 FAS_DMA_READ(fas, amt, fas->f_dmacookie.dmac_address, amt,
5781 5780 CMD_TRAN_INFO|CMD_DMA);
5782 5781 fas->f_lastcount = amt;
5783 5782
5784 5783 New_state(fas, ACTS_CMD_DONE);
5785 5784
5786 5785 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_END,
5787 5786 "fas_handle_cmd_start_end");
5788 5787 return (ACTION_RETURN);
5789 5788 }
5790 5789
5791 5790 static int
5792 5791 fas_handle_cmd_done(struct fas *fas)
5793 5792 {
5794 5793 struct fas_cmd *sp = fas->f_current_sp;
5795 5794 uchar_t intr = fas->f_intr;
5796 5795 volatile struct dma *dmar = fas->f_dma;
5797 5796
5798 5797 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_START,
5799 5798 "fas_handle_cmd_done_start");
5800 5799 EPRINTF("fas_handle_cmd_done\n");
5801 5800
5802 5801 /*
5803 5802 * We should have gotten a BUS SERVICE interrupt.
5804 5803 * If it isn't that, and it isn't a DISCONNECT
5805 5804 * interrupt, we have a "cannot happen" situation.
5806 5805 */
5807 5806 if ((intr & FAS_INT_BUS) == 0) {
5808 5807 if ((intr & FAS_INT_DISCON) == 0) {
5809 5808 fas_printstate(fas, "cmd transmission error");
5810 5809 TRACE_0(TR_FAC_SCSI_FAS,
5811 5810 TR_FAS_HANDLE_CMD_DONE_ABORT1_END,
5812 5811 "fas_handle_cmd_done_end (abort1)");
5813 5812 return (ACTION_ABORT_CURCMD);
5814 5813 }
5815 5814 } else {
5816 5815 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
5817 5816 }
5818 5817
5819 5818 fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr);
5820 5819 FAS_FLUSH_DMA(fas);
5821 5820
5822 5821 New_state(fas, ACTS_UNKNOWN);
5823 5822 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_END,
5824 5823 "fas_handle_cmd_done_end");
5825 5824 return (fas_handle_unknown(fas));
5826 5825 }
5827 5826
5828 5827 /*
5829 5828 * Begin to send a message out
5830 5829 */
5831 5830 static int
5832 5831 fas_handle_msg_out_start(struct fas *fas)
5833 5832 {
5834 5833 struct fas_cmd *sp = fas->f_current_sp;
5835 5834 uchar_t *msgout = fas->f_cur_msgout;
5836 5835 uchar_t amt = fas->f_omsglen;
5837 5836
5838 5837 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_START,
5839 5838 "fas_handle_msg_out_start");
5840 5839 EPRINTF("fas_handle_msg_out_start\n");
5841 5840
5842 5841 /*
5843 5842 * Check to make *sure* that we are really
5844 5843 * in MESSAGE OUT phase. If the last state
5845 5844 * was ACTS_MSG_OUT_DONE, then we are trying
5846 5845 * to resend a message that the target stated
5847 5846 * had a parity error in it.
5848 5847 *
5849 5848 * If this is the case, and mark completion reason as CMD_NOMSGOUT.
5850 5849 * XXX: Right now, we just *drive* on. Should we abort the command?
5851 5850 */
5852 5851 if ((fas->f_stat & FAS_PHASE_MASK) != FAS_PHASE_MSG_OUT &&
5853 5852 fas->f_laststate == ACTS_MSG_OUT_DONE) {
5854 5853 fas_log(fas, CE_WARN,
5855 5854 "Target %d refused message resend", Tgt(sp));
5856 5855 fas_set_pkt_reason(fas, sp, CMD_NOMSGOUT, 0);
5857 5856 New_state(fas, ACTS_UNKNOWN);
5858 5857 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_PHASEMANAGE_END,
5859 5858 "fas_handle_msg_out_end (ACTION_PHASEMANAGE)");
5860 5859 return (ACTION_PHASEMANAGE);
5861 5860 }
5862 5861
5863 5862 /*
5864 5863 * Clean the fifo.
5865 5864 */
5866 5865 fas_reg_cmd_write(fas, CMD_FLUSH);
5867 5866
5868 5867 if (amt == 0) {
5869 5868 /*
5870 5869 * no msg to send
5871 5870 */
5872 5871 *msgout = MSG_NOP;
5873 5872 amt = fas->f_omsglen = 1;
5874 5873 }
5875 5874
5876 5875 /*
5877 5876 * If msg only 1 byte, just dump it in the fifo and go. For
5878 5877 * multi-byte msgs, dma them to save time. If we have no
5879 5878 * msg to send and we're in msg out phase, send a NOP.
5880 5879 */
5881 5880 fas->f_last_msgout = *msgout;
5882 5881
5883 5882 /*
5884 5883 * There is a bug in the fas366 that occasionaly
5885 5884 * deasserts the ATN signal prematurely when we send
5886 5885 * the sync/wide negotiation bytes out using DMA. The
5887 5886 * workaround here is to send the negotiation bytes out
5888 5887 * using PIO
5889 5888 */
5890 5889 fas_write_fifo(fas, msgout, fas->f_omsglen, 1);
5891 5890 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5892 5891
5893 5892 EPRINTF2("amt=%x, last_msgout=%x\n", amt, fas->f_last_msgout);
5894 5893
5895 5894 New_state(fas, ACTS_MSG_OUT_DONE);
5896 5895 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_END,
5897 5896 "fas_handle_msg_out_end");
5898 5897 return (ACTION_RETURN);
5899 5898 }
5900 5899
5901 5900 static int
5902 5901 fas_handle_msg_out_done(struct fas *fas)
5903 5902 {
5904 5903 struct fas_cmd *sp = fas->f_current_sp;
5905 5904 uchar_t msgout, phase;
5906 5905 int target = Tgt(sp);
5907 5906 int amt = fas->f_omsglen;
5908 5907 int action;
5909 5908
5910 5909 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_START,
5911 5910 "fas_handle_msg_out_done_start");
5912 5911 msgout = fas->f_cur_msgout[0];
5913 5912 if ((msgout == MSG_HEAD_QTAG) || (msgout == MSG_SIMPLE_QTAG)) {
5914 5913 msgout = fas->f_cur_msgout[2];
5915 5914 }
5916 5915 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
5917 5916 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
5918 5917 fas->f_cur_msgout[2], fas->f_last_msgout);
5919 5918
5920 5919 EPRINTF1("fas_handle_msgout_done: msgout=%x\n", msgout);
5921 5920
5922 5921 /*
5923 5922 * flush fifo, just in case some bytes were not sent
5924 5923 */
5925 5924 fas_reg_cmd_write(fas, CMD_FLUSH);
5926 5925
5927 5926 /*
5928 5927 * If the FAS disconnected, then the message we sent caused
5929 5928 * the target to decide to drop BSY* and clear the bus.
5930 5929 */
5931 5930 if (fas->f_intr == FAS_INT_DISCON) {
5932 5931 if (msgout == MSG_DEVICE_RESET || msgout == MSG_ABORT ||
5933 5932 msgout == MSG_ABORT_TAG) {
5934 5933 /*
5935 5934 * If we sent a device reset msg, then we need to do
5936 5935 * a synch negotiate again unless we have already
5937 5936 * inhibited synch.
5938 5937 */
5939 5938 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
5940 5939 fas->f_abort_msg_sent++;
5941 5940 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5942 5941 fas_set_pkt_reason(fas, sp,
5943 5942 CMD_ABORTED, STAT_ABORTED);
5944 5943 }
5945 5944 } else if (msgout == MSG_DEVICE_RESET) {
5946 5945 fas->f_reset_msg_sent++;
5947 5946 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5948 5947 fas_set_pkt_reason(fas, sp,
5949 5948 CMD_RESET, STAT_DEV_RESET);
5950 5949 }
5951 5950 fas_force_renegotiation(fas, Tgt(sp));
5952 5951 }
5953 5952 EPRINTF2("Successful %s message to target %d\n",
5954 5953 scsi_mname(msgout), target);
5955 5954
5956 5955 if (sp->cmd_flags & CFLAG_CMDPROXY) {
5957 5956 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
5958 5957 }
5959 5958 TRACE_0(TR_FAC_SCSI_FAS,
5960 5959 TR_FAS_HANDLE_MSG_OUT_DONE_FINISH_END,
5961 5960 "fas_handle_msg_out_done_end (ACTION_FINISH)");
5962 5961 return (ACTION_FINISH);
5963 5962 }
5964 5963 /*
5965 5964 * If the target dropped busy on any other message, it
5966 5965 * wasn't expected. We will let the code in fas_phasemanage()
5967 5966 * handle this unexpected bus free event.
5968 5967 */
5969 5968 goto out;
5970 5969 }
5971 5970
5972 5971 /*
5973 5972 * What phase have we transitioned to?
5974 5973 */
5975 5974 phase = fas->f_stat & FAS_PHASE_MASK;
5976 5975
5977 5976 /*
5978 5977 * If we finish sending a message out, and we are
5979 5978 * still in message out phase, then the target has
5980 5979 * detected one or more parity errors in the message
5981 5980 * we just sent and it is asking us to resend the
5982 5981 * previous message.
5983 5982 */
5984 5983 if ((fas->f_intr & FAS_INT_BUS) && phase == FAS_PHASE_MSG_OUT) {
5985 5984 /*
5986 5985 * As per SCSI-2 specification, if the message to
5987 5986 * be re-sent is greater than one byte, then we
5988 5987 * have to set ATN*.
5989 5988 */
5990 5989 if (amt > 1) {
5991 5990 fas_assert_atn(fas);
5992 5991 }
5993 5992 fas_log(fas, CE_WARN,
5994 5993 "SCSI bus MESSAGE OUT phase parity error");
5995 5994 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5996 5995 New_state(fas, ACTS_MSG_OUT);
5997 5996 TRACE_0(TR_FAC_SCSI_FAS,
5998 5997 TR_FAS_HANDLE_MSG_OUT_DONE_PHASEMANAGE_END,
5999 5998 "fas_handle_msg_out_done_end (ACTION_PHASEMANAGE)");
6000 5999 return (ACTION_PHASEMANAGE);
6001 6000 }
6002 6001
6003 6002
6004 6003 out:
6005 6004 fas->f_last_msgout = msgout;
6006 6005 fas->f_omsglen = 0;
6007 6006 New_state(fas, ACTS_UNKNOWN);
6008 6007 action = fas_handle_unknown(fas);
6009 6008 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_END,
6010 6009 "fas_handle_msg_out_done_end");
6011 6010 return (action);
6012 6011 }
6013 6012
6014 6013 static int
6015 6014 fas_twobyte_msg(struct fas *fas)
6016 6015 {
6017 6016 struct fas_cmd *sp = fas->f_current_sp;
6018 6017
6019 6018 if ((fas->f_imsgarea[0] == MSG_IGNORE_WIDE_RESID) &&
6020 6019 (fas->f_imsgarea[1] == 1)) {
6021 6020 int xfer_amt;
6022 6021
6023 6022 /*
6024 6023 * Knock off one byte if there
6025 6024 * is a last transfer and is even number of bytes
6026 6025 */
6027 6026 xfer_amt = sp->cmd_data_count - sp->cmd_saved_data_count;
6028 6027 if (xfer_amt && (!(xfer_amt & 1))) {
6029 6028 ASSERT(sp->cmd_data_count > 0);
6030 6029 sp->cmd_data_count--;
6031 6030 sp->cmd_cur_addr--;
6032 6031 }
6033 6032 IPRINTF1("ignore wide resid %d\n", fas->f_imsgarea[1]);
6034 6033 New_state(fas, ACTS_UNKNOWN);
6035 6034 return (0);
6036 6035 }
6037 6036
6038 6037 fas_log(fas, CE_WARN,
6039 6038 "Two byte message '%s' 0x%x rejected",
6040 6039 scsi_mname(fas->f_imsgarea[0]), fas->f_imsgarea[1]);
6041 6040 return (MSG_REJECT);
6042 6041 }
6043 6042
6044 6043 /*
6045 6044 * handle receiving extended messages
6046 6045 */
6047 6046 static int
6048 6047 fas_multibyte_msg(struct fas *fas)
6049 6048 {
6050 6049 #ifdef FASDEBUG
6051 6050 static char *mbs =
6052 6051 "Target %d now Synchronous at %d.%d MB/s max transmit rate\n";
6053 6052 static char *mbs1 =
6054 6053 "Target %d now Synchronous at %d.0%d MB/s max transmit rate\n";
6055 6054 static char *mbs2 =
6056 6055 "Target %d now Synchronous at %d.00%d MB/s max transmit rate\n";
6057 6056 #endif
6058 6057 struct fas_cmd *sp = fas->f_current_sp;
6059 6058 volatile struct fasreg *fasreg = fas->f_reg;
6060 6059 uchar_t emsg = fas->f_imsgarea[2];
6061 6060 int tgt = Tgt(sp);
6062 6061 int msgout = 0;
6063 6062
6064 6063 EPRINTF("fas_multibyte_msg:\n");
6065 6064
6066 6065 if (emsg == MSG_SYNCHRONOUS) {
6067 6066 uint_t period, offset, regval;
6068 6067 uint_t minsync, maxsync, clockval;
6069 6068 uint_t xfer_freq, xfer_div, xfer_mod, xfer_rate;
6070 6069
6071 6070 period = fas->f_imsgarea[3] & 0xff;
6072 6071 offset = fas->f_imsgarea[4] & 0xff;
6073 6072 minsync = MIN_SYNC_PERIOD(fas);
6074 6073 maxsync = MAX_SYNC_PERIOD(fas);
6075 6074 DPRINTF5("sync msg received: %x %x %x %x %x\n",
6076 6075 fas->f_imsgarea[0], fas->f_imsgarea[1],
6077 6076 fas->f_imsgarea[2], fas->f_imsgarea[3],
6078 6077 fas->f_imsgarea[4]);
6079 6078 DPRINTF3("received period %d offset %d from tgt %d\n",
6080 6079 period, offset, tgt);
6081 6080 DPRINTF3("calculated minsync %d, maxsync %d for tgt %d\n",
6082 6081 minsync, maxsync, tgt);
6083 6082 DPRINTF2("sync period %d, neg period %d\n",
6084 6083 fas->f_sync_period[tgt], fas->f_neg_period[tgt]);
6085 6084
6086 6085 if ((++(fas->f_sdtr_sent)) & 1) {
6087 6086 /*
6088 6087 * In cases where the target negotiates synchronous
6089 6088 * mode before we do, and we either have sync mode
6090 6089 * disabled, or this target is known to be a weak
6091 6090 * signal target, we send back a message indicating
6092 6091 * a desire to stay in asynchronous mode (the SCSI-2
6093 6092 * spec states that if we have synchronous capability
6094 6093 * then we cannot reject a SYNCHRONOUS DATA TRANSFER
6095 6094 * REQUEST message).
6096 6095 */
6097 6096 IPRINTF1("SYNC negotiation initiated by target %d\n",
6098 6097 tgt);
6099 6098
6100 6099 msgout = MSG_EXTENDED;
6101 6100
6102 6101 period =
6103 6102 period ? max(period, MIN_SYNC_PERIOD(fas)) : 0;
6104 6103
6105 6104 if (fas->f_backoff & (1<<tgt)) {
6106 6105 period = period ?
6107 6106 max(period, fas->f_neg_period[tgt]) : 0;
6108 6107 }
6109 6108 offset = min(offset, fas_default_offset);
6110 6109 }
6111 6110 xfer_freq = regval = 0;
6112 6111
6113 6112 /*
6114 6113 * If the target's offset is bigger than ours,
6115 6114 * the target has violated the scsi protocol.
6116 6115 */
6117 6116 if (offset > fas_default_offset) {
6118 6117 period = offset = 0;
6119 6118 msgout = MSG_REJECT;
6120 6119 }
6121 6120
6122 6121 if (offset && (period > maxsync)) {
6123 6122 /*
6124 6123 * We cannot transmit data in synchronous
6125 6124 * mode this slow, so convert to asynchronous
6126 6125 * mode.
6127 6126 */
6128 6127 msgout = MSG_EXTENDED;
6129 6128 period = offset = 0;
6130 6129
6131 6130 } else if (offset && (period < minsync)) {
6132 6131 /*
6133 6132 * If the target's period is less than ours,
6134 6133 * the target has violated the scsi protocol.
6135 6134 */
6136 6135 period = offset = 0;
6137 6136 msgout = MSG_REJECT;
6138 6137
6139 6138 } else if (offset) {
6140 6139 /*
6141 6140 * Conversion method for received PERIOD value
6142 6141 * to the number of input clock ticks to the FAS.
6143 6142 *
6144 6143 * We adjust the input period value such that
6145 6144 * we always will transmit data *not* faster
6146 6145 * than the period value received.
6147 6146 */
6148 6147
6149 6148 clockval = fas->f_clock_cycle / 1000;
6150 6149 regval = (((period << 2) + clockval - 1) / clockval);
6151 6150
6152 6151 /*
6153 6152 * correction if xfer rate <= 5MB/sec
6154 6153 * XXX do we need this?
6155 6154 */
6156 6155 if (regval && (period >= FASTSCSI_THRESHOLD)) {
6157 6156 regval--;
6158 6157 }
6159 6158 }
6160 6159
6161 6160 fas->f_offset[tgt] = offset;
6162 6161 fas->f_neg_period[tgt] = period;
6163 6162
6164 6163 /*
6165 6164 * Is is now safe to produce a responce to a target
6166 6165 * initiated sdtr. period and offset have been checked.
6167 6166 */
6168 6167 if (msgout == MSG_EXTENDED) {
6169 6168 fas_make_sdtr(fas, 0, tgt);
6170 6169 period = fas->f_neg_period[tgt];
6171 6170 offset = (fas->f_offset[tgt] & 0xf);
6172 6171 }
6173 6172
6174 6173 if (offset) {
6175 6174 fas->f_sync_period[tgt] = regval & SYNC_PERIOD_MASK;
6176 6175 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period,
6177 6176 fas->f_sync_period[tgt]);
6178 6177
6179 6178 fas->f_offset[tgt] = offset | fas->f_req_ack_delay;
6180 6179 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset,
6181 6180 fas->f_offset[tgt]);
6182 6181
6183 6182 /*
6184 6183 * if transferring > 5 MB/sec then enable
6185 6184 * fastscsi in conf3
6186 6185 */
6187 6186 if (period < FASTSCSI_THRESHOLD) {
6188 6187 fas->f_fasconf3[tgt] |= FAS_CONF3_FASTSCSI;
6189 6188 } else {
6190 6189 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6191 6190 }
6192 6191
6193 6192 fas_reg_write(fas, (uchar_t *)&fasreg->fas_conf3,
6194 6193 fas->f_fasconf3[tgt]);
6195 6194
6196 6195 DPRINTF4("period %d (%d), offset %d to tgt %d\n",
6197 6196 period,
6198 6197 fas->f_sync_period[tgt] & SYNC_PERIOD_MASK,
6199 6198 fas->f_offset[tgt] & 0xf, tgt);
6200 6199 DPRINTF1("req/ack delay = %x\n", fas->f_req_ack_delay);
6201 6200 DPRINTF1("conf3 = %x\n", fas->f_fasconf3[tgt]);
6202 6201 #ifdef FASDEBUG
6203 6202 /*
6204 6203 * Convert input clock cycle per
6205 6204 * byte to nanoseconds per byte.
6206 6205 * (ns/b), and convert that to
6207 6206 * k-bytes/second.
6208 6207 */
6209 6208 xfer_freq = FAS_SYNC_KBPS((regval *
6210 6209 fas->f_clock_cycle) / 1000);
6211 6210 xfer_rate = ((fas->f_nowide & (1<<tgt))? 1 : 2) *
6212 6211 xfer_freq;
6213 6212 xfer_div = xfer_rate / 1000;
6214 6213 xfer_mod = xfer_rate % 1000;
6215 6214
6216 6215
6217 6216 if (xfer_mod > 99) {
6218 6217 IPRINTF3(mbs, tgt, xfer_div, xfer_mod);
6219 6218 } else if (xfer_mod > 9) {
6220 6219 IPRINTF3(mbs1, tgt, xfer_div, xfer_mod);
6221 6220 } else {
6222 6221 IPRINTF3(mbs2, tgt, xfer_div, xfer_mod);
6223 6222 }
6224 6223 #endif
6225 6224 fas->f_sync_enabled |= (1<<tgt);
6226 6225
6227 6226 } else {
6228 6227 /*
6229 6228 * We are converting back to async mode.
6230 6229 */
6231 6230 fas_revert_to_async(fas, tgt);
6232 6231 }
6233 6232
6234 6233 /*
6235 6234 * If this target violated the scsi spec, reject the
6236 6235 * sdtr msg and don't negotiate sdtr again.
6237 6236 */
6238 6237 if (msgout == MSG_REJECT) {
6239 6238 fas->f_nosync |= (1<<tgt);
6240 6239 }
6241 6240
6242 6241 fas->f_props_update |= (1<<tgt);
6243 6242
6244 6243 } else if (emsg == MSG_WIDE_DATA_XFER) {
6245 6244 uchar_t width = fas->f_imsgarea[3] & 0xff;
6246 6245
6247 6246 DPRINTF4("wide msg received: %x %x %x %x\n",
6248 6247 fas->f_imsgarea[0], fas->f_imsgarea[1],
6249 6248 fas->f_imsgarea[2], fas->f_imsgarea[3]);
6250 6249
6251 6250 /* always renegotiate sync after wide */
6252 6251 msgout = MSG_EXTENDED;
6253 6252
6254 6253 if ((++(fas->f_wdtr_sent)) & 1) {
6255 6254 IPRINTF1("Wide negotiation initiated by target %d\n",
6256 6255 tgt);
6257 6256 /*
6258 6257 * allow wide neg even if the target driver hasn't
6259 6258 * enabled wide yet.
6260 6259 */
6261 6260 fas->f_nowide &= ~(1<<tgt);
6262 6261 fas_make_wdtr(fas, 0, tgt, width);
6263 6262 IPRINTF1("sending wide sync %d back\n", width);
6264 6263 /*
6265 6264 * Let us go back to async mode(SCSI spec)
6266 6265 * and depend on target to do sync
6267 6266 * after wide negotiations.
6268 6267 * If target does not do a sync neg and enters
6269 6268 * async mode we will negotiate sync on next command
6270 6269 */
6271 6270 fas_revert_to_async(fas, tgt);
6272 6271 fas->f_sync_known &= ~(1<<tgt);
6273 6272 } else {
6274 6273 /*
6275 6274 * renegotiate sync after wide
6276 6275 */
6277 6276 fas_set_wide_conf3(fas, tgt, width);
6278 6277 ASSERT(width <= 1);
6279 6278 fas->f_wdtr_sent = 0;
6280 6279 if ((fas->f_nosync & (1<<tgt)) == 0) {
6281 6280 fas_make_sdtr(fas, 0, tgt);
6282 6281 } else {
6283 6282 msgout = 0;
6284 6283 }
6285 6284 }
6286 6285
6287 6286 fas->f_props_update |= (1<<tgt);
6288 6287
6289 6288 } else if (emsg == MSG_MODIFY_DATA_PTR) {
6290 6289 msgout = MSG_REJECT;
6291 6290 } else {
6292 6291 fas_log(fas, CE_WARN,
6293 6292 "Rejecting message %s 0x%x from Target %d",
6294 6293 scsi_mname(MSG_EXTENDED), emsg, tgt);
6295 6294 msgout = MSG_REJECT;
6296 6295 }
6297 6296 out:
6298 6297 New_state(fas, ACTS_UNKNOWN);
6299 6298 return (msgout);
6300 6299 }
6301 6300
6302 6301 /*
6303 6302 * Back off sync negotiation
6304 6303 * and got to async mode
6305 6304 */
6306 6305 static void
6307 6306 fas_revert_to_async(struct fas *fas, int tgt)
6308 6307 {
6309 6308 volatile struct fasreg *fasreg = fas->f_reg;
6310 6309
6311 6310 fas->f_sync_period[tgt] = 0;
6312 6311 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 0);
6313 6312 fas->f_offset[tgt] = 0;
6314 6313 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 0);
6315 6314 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6316 6315 fas_reg_write(fas, &fasreg->fas_conf3, fas->f_fasconf3[tgt]);
6317 6316 fas->f_sync_enabled &= ~(1<<tgt);
6318 6317 }
6319 6318
6320 6319 /*
6321 6320 * handle an unexpected selection attempt
6322 6321 * XXX look for better way: msg reject, drop off the bus
6323 6322 */
6324 6323 static int
6325 6324 fas_handle_selection(struct fas *fas)
6326 6325 {
6327 6326 fas_reg_cmd_write(fas, CMD_DISCONNECT);
6328 6327 fas_reg_cmd_write(fas, CMD_FLUSH);
6329 6328 fas_reg_cmd_write(fas, CMD_EN_RESEL);
6330 6329 return (ACTION_RETURN);
6331 6330 }
6332 6331
6333 6332 /*
6334 6333 * dma window handling
6335 6334 */
6336 6335 static int
6337 6336 fas_restore_pointers(struct fas *fas, struct fas_cmd *sp)
6338 6337 {
6339 6338 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
6340 6339 sp->cmd_data_count = sp->cmd_saved_data_count;
6341 6340 sp->cmd_cur_addr = sp->cmd_saved_cur_addr;
6342 6341
6343 6342 if (sp->cmd_cur_win != sp->cmd_saved_win) {
6344 6343 sp->cmd_cur_win = sp->cmd_saved_win;
6345 6344 if (fas_set_new_window(fas, sp)) {
6346 6345 return (-1);
6347 6346 }
6348 6347 }
6349 6348 DPRINTF1("curaddr=%x\n", sp->cmd_cur_addr);
6350 6349 }
6351 6350 return (0);
6352 6351 }
6353 6352
6354 6353 static int
6355 6354 fas_set_new_window(struct fas *fas, struct fas_cmd *sp)
6356 6355 {
6357 6356 off_t offset;
6358 6357 size_t len;
6359 6358 uint_t count;
6360 6359
6361 6360 if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_cur_win,
6362 6361 &offset, &len, &sp->cmd_dmacookie, &count) != DDI_SUCCESS) {
6363 6362 return (-1);
6364 6363 }
6365 6364
6366 6365 DPRINTF4("new window %x: off=%lx, len=%lx, count=%x\n",
6367 6366 sp->cmd_cur_win, offset, len, count);
6368 6367
6369 6368 ASSERT(count == 1);
6370 6369 return (0);
6371 6370 }
6372 6371
6373 6372 static int
6374 6373 fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end)
6375 6374 {
6376 6375
6377 6376 /* are there more windows? */
6378 6377 if (sp->cmd_nwin == 0) {
6379 6378 uint_t nwin = 0;
6380 6379 (void) ddi_dma_numwin(sp->cmd_dmahandle, &nwin);
6381 6380 sp->cmd_nwin = (uchar_t)nwin;
6382 6381 }
6383 6382
6384 6383 DPRINTF5(
6385 6384 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%lx, nwin=%x\n",
6386 6385 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
6387 6386 sp->cmd_nwin);
6388 6387
6389 6388 if (sp->cmd_cur_win < sp->cmd_nwin) {
6390 6389 sp->cmd_cur_win++;
6391 6390 if (fas_set_new_window(fas, sp)) {
6392 6391 fas_printstate(fas, "cannot set new window");
6393 6392 sp->cmd_cur_win--;
6394 6393 return (-1);
6395 6394 }
6396 6395 /*
6397 6396 * if there are no more windows, we have a data overrun condition
6398 6397 */
6399 6398 } else {
6400 6399 int slot = sp->cmd_slot;
6401 6400
6402 6401 fas_printstate(fas, "data transfer overrun");
6403 6402 fas_set_pkt_reason(fas, sp, CMD_DATA_OVR, 0);
6404 6403
6405 6404 /*
6406 6405 * if we get data transfer overruns, assume we have
6407 6406 * a weak scsi bus. Note that this won't catch consistent
6408 6407 * underruns or other noise related syndromes.
6409 6408 */
6410 6409 fas_sync_wide_backoff(fas, sp, slot);
6411 6410 return (-1);
6412 6411 }
6413 6412 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
6414 6413 DPRINTF1("cur_addr=%x\n", sp->cmd_cur_addr);
6415 6414 return (0);
6416 6415 }
6417 6416
6418 6417 /*
6419 6418 * dma error handler
6420 6419 */
6421 6420 static int
6422 6421 fas_check_dma_error(struct fas *fas)
6423 6422 {
6424 6423 /*
6425 6424 * was there a dma error that caused fas_intr_svc() to be called?
6426 6425 */
6427 6426 if (fas->f_dma->dma_csr & DMA_ERRPEND) {
6428 6427 /*
6429 6428 * It would be desirable to set the ATN* line and attempt to
6430 6429 * do the whole schmear of INITIATOR DETECTED ERROR here,
6431 6430 * but that is too hard to do at present.
6432 6431 */
6433 6432 fas_log(fas, CE_WARN, "Unrecoverable DMA error");
6434 6433 fas_printstate(fas, "dma error");
6435 6434 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6436 6435 return (-1);
6437 6436 }
6438 6437 return (0);
6439 6438 }
6440 6439
6441 6440 /*
6442 6441 * check for gross error or spurious interrupt
6443 6442 */
6444 6443 static int
6445 6444 fas_handle_gross_err(struct fas *fas)
6446 6445 {
6447 6446 volatile struct fasreg *fasreg = fas->f_reg;
6448 6447
6449 6448 fas_log(fas, CE_WARN,
6450 6449 "gross error in fas status (%x)", fas->f_stat);
6451 6450
6452 6451 IPRINTF5("fas_cmd=%x, stat=%x, intr=%x, step=%x, fifoflag=%x\n",
6453 6452 fasreg->fas_cmd, fas->f_stat, fas->f_intr, fasreg->fas_step,
6454 6453 fasreg->fas_fifo_flag);
6455 6454
6456 6455 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6457 6456
6458 6457 fas_internal_reset(fas, FAS_RESET_FAS);
6459 6458 return (ACTION_RESET);
6460 6459 }
6461 6460
6462 6461
6463 6462 /*
6464 6463 * handle illegal cmd interrupt or (external) bus reset cleanup
6465 6464 */
6466 6465 static int
6467 6466 fas_illegal_cmd_or_bus_reset(struct fas *fas)
6468 6467 {
6469 6468 /*
6470 6469 * If we detect a SCSI reset, we blow away the current
6471 6470 * command (if there is one) and all disconnected commands
6472 6471 * because we now don't know the state of them at all.
6473 6472 */
6474 6473 ASSERT(fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET));
6475 6474
6476 6475 if (fas->f_intr & FAS_INT_RESET) {
6477 6476 return (ACTION_FINRST);
6478 6477 }
6479 6478
6480 6479 /*
6481 6480 * Illegal cmd to fas:
6482 6481 * This should not happen. The one situation where
6483 6482 * we can get an ILLEGAL COMMAND interrupt is due to
6484 6483 * a bug in the FAS366 during reselection which we
6485 6484 * should be handling in fas_reconnect().
6486 6485 */
6487 6486 if (fas->f_intr & FAS_INT_ILLEGAL) {
6488 6487 IPRINTF1("lastcmd=%x\n", fas->f_reg->fas_cmd);
6489 6488 fas_printstate(fas, "ILLEGAL bit set");
6490 6489 return (ACTION_RESET);
6491 6490 }
6492 6491 /*NOTREACHED*/
6493 6492 return (ACTION_RETURN);
6494 6493 }
6495 6494
6496 6495 /*
6497 6496 * set throttles for all luns of this target
6498 6497 */
6499 6498 static void
6500 6499 fas_set_throttles(struct fas *fas, int slot, int n, int what)
6501 6500 {
6502 6501 int i;
6503 6502
6504 6503 /*
6505 6504 * if the bus is draining/quiesced, no changes to the throttles
6506 6505 * are allowed. Not allowing change of throttles during draining
6507 6506 * limits error recovery but will reduce draining time
6508 6507 *
6509 6508 * all throttles should have been set to HOLD_THROTTLE
6510 6509 */
6511 6510 if (fas->f_softstate & (FAS_SS_QUIESCED | FAS_SS_DRAINING)) {
6512 6511 return;
6513 6512 }
6514 6513
6515 6514 ASSERT((n == 1) || (n == N_SLOTS) || (n == NLUNS_PER_TARGET));
6516 6515 ASSERT((slot + n) <= N_SLOTS);
6517 6516 if (n == NLUNS_PER_TARGET) {
6518 6517 slot &= ~(NLUNS_PER_TARGET - 1);
6519 6518 }
6520 6519
6521 6520 for (i = slot; i < (slot + n); i++) {
6522 6521 if (what == HOLD_THROTTLE) {
6523 6522 fas->f_throttle[i] = HOLD_THROTTLE;
6524 6523 } else if ((fas->f_reset_delay[i/NLUNS_PER_TARGET]) == 0) {
6525 6524 if (what == MAX_THROTTLE) {
6526 6525 int tshift = 1 << (i/NLUNS_PER_TARGET);
6527 6526 fas->f_throttle[i] = (short)
6528 6527 ((fas->f_notag & tshift)? 1 : what);
6529 6528 } else {
6530 6529 fas->f_throttle[i] = what;
6531 6530 }
6532 6531 }
6533 6532 }
6534 6533 }
6535 6534
6536 6535 static void
6537 6536 fas_set_all_lun_throttles(struct fas *fas, int slot, int what)
6538 6537 {
6539 6538 /*
6540 6539 * fas_set_throttle will adjust slot to starting at LUN 0
6541 6540 */
6542 6541 fas_set_throttles(fas, slot, NLUNS_PER_TARGET, what);
6543 6542 }
6544 6543
6545 6544 static void
6546 6545 fas_full_throttle(struct fas *fas, int slot)
6547 6546 {
6548 6547 fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
6549 6548 }
6550 6549
6551 6550 /*
6552 6551 * run a polled cmd
6553 6552 */
6554 6553 static void
6555 6554 fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp)
6556 6555 {
6557 6556 int limit, i, n;
6558 6557 int timeout = 0;
6559 6558
6560 6559 DPRINTF4("runpoll: slot=%x, cmd=%x, current_sp=0x%p, tcmds=%x\n",
6561 6560 slot, *((uchar_t *)sp->cmd_pkt->pkt_cdbp),
6562 6561 (void *)fas->f_current_sp, fas->f_tcmds[slot]);
6563 6562
6564 6563 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_START, "fas_runpoll_start");
6565 6564
6566 6565 /*
6567 6566 * wait for cmd to complete
6568 6567 * don't start new cmds so set throttles to HOLD_THROTTLE
6569 6568 */
6570 6569 while ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6571 6570 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6572 6571 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
6573 6572 }
6574 6573 if ((fas->f_state != STATE_FREE) || INTPENDING(fas)) {
6575 6574 if (fas_dopoll(fas, POLL_TIMEOUT) <= 0) {
6576 6575 IPRINTF("runpoll: timeout on draining\n");
6577 6576 goto bad;
6578 6577 }
6579 6578 }
6580 6579
6581 6580 ASSERT(fas->f_state == STATE_FREE);
6582 6581 ASSERT(fas->f_current_sp == NULL);
6583 6582
6584 6583 /*
6585 6584 * if this is not a proxy cmd, don't start the cmd
6586 6585 * without draining the active cmd(s)
6587 6586 * for proxy cmds, we zap the active cmd and assume
6588 6587 * that the caller will take care of this
6589 6588 * For tagged cmds, wait with submitting a non-tagged
6590 6589 * cmd until the queue has been drained
6591 6590 * If the cmd is a request sense, then draining won't
6592 6591 * help since we are in contingence allegiance condition
6593 6592 */
6594 6593 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6595 6594 uchar_t *cmdp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
6596 6595
6597 6596 if ((fas->f_tcmds[slot]) &&
6598 6597 (NOTAG(Tgt(sp)) ||
6599 6598 (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
6600 6599 (*cmdp != SCMD_REQUEST_SENSE)))) {
6601 6600 if (timeout < POLL_TIMEOUT) {
6602 6601 timeout += 100;
6603 6602 drv_usecwait(100);
6604 6603 continue;
6605 6604 } else {
6606 6605 fas_log(fas, CE_WARN,
6607 6606 "polled cmd failed (target busy)");
6608 6607 goto cleanup;
6609 6608 }
6610 6609 }
6611 6610 }
6612 6611
6613 6612 /*
6614 6613 * If the draining of active commands killed the
6615 6614 * the current polled command, we're done..
6616 6615 */
6617 6616 if (sp->cmd_flags & CFLAG_COMPLETED) {
6618 6617 break;
6619 6618 }
6620 6619
6621 6620 /*
6622 6621 * ensure we are not accessing a target too quickly
6623 6622 * after a reset. the throttles get set back later
6624 6623 * by the reset delay watch; hopefully, we don't go
6625 6624 * thru this loop more than once
6626 6625 */
6627 6626 if (fas->f_reset_delay[slot/NLUNS_PER_TARGET]) {
6628 6627 IPRINTF1("reset delay set for slot %x\n", slot);
6629 6628 drv_usecwait(fas->f_scsi_reset_delay * 1000);
6630 6629 for (i = 0; i < NTARGETS_WIDE; i++) {
6631 6630 if (fas->f_reset_delay[i]) {
6632 6631 int s = i * NLUNS_PER_TARGET;
6633 6632 int e = s + NLUNS_PER_TARGET;
6634 6633 fas->f_reset_delay[i] = 0;
6635 6634 for (; s < e; s++) {
6636 6635 fas_full_throttle(fas, s);
6637 6636 }
6638 6637 }
6639 6638 }
6640 6639 }
6641 6640
6642 6641 /*
6643 6642 * fas_startcmd() will return false if preempted
6644 6643 * or draining
6645 6644 */
6646 6645 if (fas_startcmd(fas, sp) != TRUE) {
6647 6646 IPRINTF("runpoll: cannot start new cmds\n");
6648 6647 ASSERT(fas->f_current_sp != sp);
6649 6648 continue;
6650 6649 }
6651 6650
6652 6651 /*
6653 6652 * We're now 'running' this command.
6654 6653 *
6655 6654 * fas_dopoll will always return when
6656 6655 * fas->f_state is STATE_FREE, and
6657 6656 */
6658 6657 limit = sp->cmd_pkt->pkt_time * 1000000;
6659 6658 if (limit == 0) {
6660 6659 limit = POLL_TIMEOUT;
6661 6660 }
6662 6661
6663 6662 /*
6664 6663 * if the cmd disconnected, the first call to fas_dopoll
6665 6664 * will return with bus free; we go thru the loop one more
6666 6665 * time and wait limit usec for the target to reconnect
6667 6666 */
6668 6667 for (i = 0; i <= POLL_TIMEOUT; i += 100) {
6669 6668
6670 6669 if ((n = fas_dopoll(fas, limit)) <= 0) {
6671 6670 IPRINTF("runpoll: timeout on polling\n");
6672 6671 goto bad;
6673 6672 }
6674 6673
6675 6674 /*
6676 6675 * If a preemption occurred that caused this
6677 6676 * command to actually not start, go around
6678 6677 * the loop again. If CFLAG_COMPLETED is set, the
6679 6678 * command completed
6680 6679 */
6681 6680 if ((sp->cmd_flags & CFLAG_COMPLETED) ||
6682 6681 (sp->cmd_pkt->pkt_state == 0)) {
6683 6682 break;
6684 6683 }
6685 6684
6686 6685 /*
6687 6686 * the bus may have gone free because the target
6688 6687 * disconnected; go thru the loop again
6689 6688 */
6690 6689 ASSERT(fas->f_state == STATE_FREE);
6691 6690 if (n == 0) {
6692 6691 /*
6693 6692 * bump i, we have waited limit usecs in
6694 6693 * fas_dopoll
6695 6694 */
6696 6695 i += limit - 100;
6697 6696 }
6698 6697 }
6699 6698
6700 6699 if ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6701 6700
6702 6701 if (i > POLL_TIMEOUT) {
6703 6702 IPRINTF("polled timeout on disc. cmd\n");
6704 6703 goto bad;
6705 6704 }
6706 6705
6707 6706 if (sp->cmd_pkt->pkt_state) {
6708 6707 /*
6709 6708 * don't go thru the loop again; the cmd
6710 6709 * was already started
6711 6710 */
6712 6711 IPRINTF("fas_runpoll: cmd started??\n");
6713 6712 goto bad;
6714 6713 }
6715 6714 }
6716 6715 }
6717 6716
6718 6717 /*
6719 6718 * blindly restore throttles which is preferable over
6720 6719 * leaving throttle hanging at 0 and noone to clear it
6721 6720 */
6722 6721 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6723 6722 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6724 6723 }
6725 6724
6726 6725 /*
6727 6726 * ensure that the cmd is completely removed
6728 6727 */
6729 6728 fas_remove_cmd(fas, sp, 0);
6730 6729
6731 6730 /*
6732 6731 * If we stored up commands to do, start them off now.
6733 6732 */
6734 6733 if ((fas->f_state == STATE_FREE) &&
6735 6734 (!(sp->cmd_flags & CFLAG_CMDPROXY))) {
6736 6735 (void) fas_ustart(fas);
6737 6736 }
6738 6737 exit:
6739 6738 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_END, "fas_runpoll_end");
6740 6739 return;
6741 6740
6742 6741 bad:
6743 6742 fas_log(fas, CE_WARN, "Polled cmd failed");
6744 6743 #ifdef FASDEBUG
6745 6744 fas_printstate(fas, "fas_runpoll: polled cmd failed");
6746 6745 #endif /* FASDEBUG */
6747 6746
6748 6747 cleanup:
6749 6748 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6750 6749
6751 6750 /*
6752 6751 * clean up all traces of this sp because fas_runpoll will return
6753 6752 * before fas_reset_recovery() cleans up
6754 6753 */
6755 6754 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
6756 6755 fas_decrement_ncmds(fas, sp);
6757 6756 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
6758 6757
6759 6758 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
6760 6759 (void) fas_reset_bus(fas);
6761 6760 }
6762 6761 goto exit;
6763 6762 }
6764 6763
6765 6764 /*
6766 6765 * Poll for command completion (i.e., no interrupts)
6767 6766 * limit is in usec (and will not be very accurate)
6768 6767 *
6769 6768 * the assumption is that we only run polled cmds in interrupt context
6770 6769 * as scsi_transport will filter out FLAG_NOINTR
6771 6770 */
6772 6771 static int
6773 6772 fas_dopoll(struct fas *fas, int limit)
6774 6773 {
6775 6774 int i, n;
6776 6775
6777 6776 /*
6778 6777 * timeout is not very accurate since we don't know how
6779 6778 * long the poll takes
6780 6779 * also if the packet gets started fairly late, we may
6781 6780 * timeout prematurely
6782 6781 * fas_dopoll always returns if e_state transitions to STATE_FREE
6783 6782 */
6784 6783 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_START, "fas_dopoll_start");
6785 6784
6786 6785 if (limit == 0) {
6787 6786 limit = POLL_TIMEOUT;
6788 6787 }
6789 6788
6790 6789 for (n = i = 0; i < limit; i += 100) {
6791 6790 if (INTPENDING(fas)) {
6792 6791 fas->f_polled_intr = 1;
6793 6792 n++;
6794 6793 (void) fas_intr_svc(fas);
6795 6794 if (fas->f_state == STATE_FREE)
6796 6795 break;
6797 6796 }
6798 6797 drv_usecwait(100);
6799 6798 }
6800 6799
6801 6800 if (i >= limit && fas->f_state != STATE_FREE) {
6802 6801 fas_printstate(fas, "polled command timeout");
6803 6802 n = -1;
6804 6803 }
6805 6804 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_END,
6806 6805 "fas_dopoll_end: rval %x", n);
6807 6806 return (n);
6808 6807 }
6809 6808
6810 6809 /*
6811 6810 * prepare a sync negotiation message
6812 6811 */
6813 6812 static void
6814 6813 fas_make_sdtr(struct fas *fas, int msgout_offset, int target)
6815 6814 {
6816 6815 uchar_t *p = fas->f_cur_msgout + msgout_offset;
6817 6816 ushort_t tshift = 1<<target;
6818 6817 uchar_t period = MIN_SYNC_PERIOD(fas);
6819 6818 uchar_t offset = fas_default_offset;
6820 6819
6821 6820 /*
6822 6821 * If this target experienced a sync backoff use the
6823 6822 * target's sync speed that was adjusted in
6824 6823 * fas_sync_wide_backoff. For second sync backoff,
6825 6824 * offset will be ajusted below in sanity checks.
6826 6825 */
6827 6826 if (fas->f_backoff & tshift) {
6828 6827 period = fas->f_neg_period[target];
6829 6828 }
6830 6829
6831 6830 /*
6832 6831 * If this is a responce to a target initiated sdtr,
6833 6832 * use the agreed upon values.
6834 6833 */
6835 6834 if (fas->f_sdtr_sent & 1) {
6836 6835 period = fas->f_neg_period[target];
6837 6836 offset = fas->f_offset[target];
6838 6837 }
6839 6838
6840 6839 /*
6841 6840 * If the target driver disabled
6842 6841 * sync then make offset = 0
6843 6842 */
6844 6843 if (fas->f_force_async & tshift) {
6845 6844 offset = 0;
6846 6845 }
6847 6846
6848 6847 /*
6849 6848 * sanity check of period and offset
6850 6849 */
6851 6850 if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_FAST) {
6852 6851 if (period < (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4)) {
6853 6852 period = (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4);
6854 6853 }
6855 6854 } else if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_SYNC) {
6856 6855 if (period < (uchar_t)(DEFAULT_SYNC_PERIOD/4)) {
6857 6856 period = (uchar_t)(DEFAULT_SYNC_PERIOD/4);
6858 6857 }
6859 6858 } else {
6860 6859 fas->f_nosync |= tshift;
6861 6860 }
6862 6861
6863 6862 if (fas->f_nosync & tshift) {
6864 6863 offset = 0;
6865 6864 }
6866 6865
6867 6866 if ((uchar_t)(offset & 0xf) > fas_default_offset) {
6868 6867 offset = fas_default_offset | fas->f_req_ack_delay;
6869 6868 }
6870 6869
6871 6870 fas->f_neg_period[target] = (uchar_t)period;
6872 6871 fas->f_offset[target] = (uchar_t)offset;
6873 6872
6874 6873 *p++ = (uchar_t)MSG_EXTENDED;
6875 6874 *p++ = (uchar_t)3;
6876 6875 *p++ = (uchar_t)MSG_SYNCHRONOUS;
6877 6876 *p++ = period;
6878 6877 *p++ = offset & 0xf;
6879 6878 fas->f_omsglen = 5 + msgout_offset;
6880 6879
6881 6880 IPRINTF2("fas_make_sdtr: period = %x, offset = %x\n",
6882 6881 period, offset);
6883 6882 /*
6884 6883 * increment sdtr flag, odd value indicates that we initiated
6885 6884 * the negotiation
6886 6885 */
6887 6886 fas->f_sdtr_sent++;
6888 6887
6889 6888 /*
6890 6889 * the target may reject the optional sync message so
6891 6890 * to avoid negotiating on every cmd, set sync known here
6892 6891 * we should not negotiate wide after sync again
6893 6892 */
6894 6893 fas->f_sync_known |= 1<<target;
6895 6894 fas->f_wide_known |= 1<<target;
6896 6895 }
6897 6896
6898 6897 /*
6899 6898 * prepare a wide negotiation message
6900 6899 */
6901 6900 static void
6902 6901 fas_make_wdtr(struct fas *fas, int msgout_offset, int target, int width)
6903 6902 {
6904 6903 uchar_t *p = fas->f_cur_msgout + msgout_offset;
6905 6904
6906 6905 if (((fas->f_target_scsi_options[target] & SCSI_OPTIONS_WIDE) == 0) ||
6907 6906 (fas->f_nowide & (1<<target))) {
6908 6907 fas->f_nowide |= 1<<target;
6909 6908 width = 0;
6910 6909 }
6911 6910 if (fas->f_force_narrow & (1<<target)) {
6912 6911 width = 0;
6913 6912 }
6914 6913 width = min(FAS_XFER_WIDTH, width);
6915 6914
6916 6915 *p++ = (uchar_t)MSG_EXTENDED;
6917 6916 *p++ = (uchar_t)2;
6918 6917 *p++ = (uchar_t)MSG_WIDE_DATA_XFER;
6919 6918 *p++ = (uchar_t)width;
6920 6919 fas->f_omsglen = 4 + msgout_offset;
6921 6920 IPRINTF1("fas_make_wdtr: width=%x\n", width);
6922 6921
6923 6922 /*
6924 6923 * increment wdtr flag, odd value indicates that we initiated
6925 6924 * the negotiation
6926 6925 */
6927 6926 fas->f_wdtr_sent++;
6928 6927
6929 6928 /*
6930 6929 * the target may reject the optional wide message so
6931 6930 * to avoid negotiating on every cmd, set wide known here
6932 6931 */
6933 6932 fas->f_wide_known |= 1<<target;
6934 6933
6935 6934 fas_set_wide_conf3(fas, target, width);
6936 6935 }
6937 6936
6938 6937 /*
6939 6938 * auto request sense support
6940 6939 * create or destroy an auto request sense packet
6941 6940 */
6942 6941 static int
6943 6942 fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap)
6944 6943 {
6945 6944 /*
6946 6945 * Allocate a request sense packet using get_pktiopb
6947 6946 */
6948 6947 struct fas_cmd *rqpktp;
6949 6948 uchar_t slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
6950 6949 struct buf *bp;
6951 6950 struct arq_private_data *arq_data;
6952 6951
6953 6952 /*
6954 6953 * if one exists, don't create another
6955 6954 */
6956 6955 if (fas->f_arq_pkt[slot] != 0) {
6957 6956 return (0);
6958 6957 }
6959 6958
6960 6959 /*
6961 6960 * it would be nicer if we could allow the target driver
6962 6961 * to specify the size but this is easier and OK for most
6963 6962 * drivers to use SENSE_LENGTH
6964 6963 * Allocate a request sense packet.
6965 6964 */
6966 6965 bp = scsi_alloc_consistent_buf(ap, (struct buf *)NULL,
6967 6966 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
6968 6967 rqpktp = PKT2CMD(scsi_init_pkt(ap,
6969 6968 NULL, bp, CDB_GROUP0, 1, PKT_PRIV_LEN,
6970 6969 PKT_CONSISTENT, SLEEP_FUNC, NULL));
6971 6970 arq_data =
6972 6971 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
6973 6972 arq_data->arq_save_bp = bp;
6974 6973
6975 6974 RQ_MAKECOM_G0((CMD2PKT(rqpktp)),
6976 6975 FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON,
6977 6976 (char)SCMD_REQUEST_SENSE, 0, (char)SENSE_LENGTH);
6978 6977 rqpktp->cmd_flags |= CFLAG_CMDARQ;
6979 6978 rqpktp->cmd_slot = slot;
6980 6979 rqpktp->cmd_pkt->pkt_ha_private = rqpktp;
6981 6980 fas->f_arq_pkt[slot] = rqpktp;
6982 6981
6983 6982 /*
6984 6983 * we need a function ptr here so abort/reset can
6985 6984 * defer callbacks; fas_call_pkt_comp() calls
6986 6985 * fas_complete_arq_pkt() directly without releasing the lock
6987 6986 * However, since we are not calling back directly thru
6988 6987 * pkt_comp, don't check this with warlock
6989 6988 */
6990 6989 #ifndef __lock_lint
6991 6990 rqpktp->cmd_pkt->pkt_comp =
6992 6991 (void (*)(struct scsi_pkt *))fas_complete_arq_pkt;
6993 6992 #endif
6994 6993 return (0);
6995 6994 }
6996 6995
6997 6996 static int
6998 6997 fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap)
6999 6998 {
7000 6999 struct fas_cmd *rqpktp;
7001 7000 int slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
7002 7001
7003 7002 /*
7004 7003 * if there is still a pkt saved or no rqpkt
7005 7004 * then we cannot deallocate or there is nothing to do
7006 7005 */
7007 7006 if ((rqpktp = fas->f_arq_pkt[slot]) != NULL) {
7008 7007 struct arq_private_data *arq_data =
7009 7008 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
7010 7009 struct buf *bp = arq_data->arq_save_bp;
7011 7010 /*
7012 7011 * is arq pkt in use?
7013 7012 */
7014 7013 if (arq_data->arq_save_sp) {
7015 7014 return (-1);
7016 7015 }
7017 7016
7018 7017 scsi_destroy_pkt(CMD2PKT(rqpktp));
7019 7018 scsi_free_consistent_buf(bp);
7020 7019 fas->f_arq_pkt[slot] = 0;
7021 7020 }
7022 7021 return (0);
7023 7022 }
7024 7023
7025 7024 /*
7026 7025 * complete an arq packet by copying over transport info and the actual
7027 7026 * request sense data; called with mutex held from fas_call_pkt_comp()
7028 7027 */
7029 7028 void
7030 7029 fas_complete_arq_pkt(struct scsi_pkt *pkt)
7031 7030 {
7032 7031 struct fas *fas = ADDR2FAS(&pkt->pkt_address);
7033 7032 struct fas_cmd *sp = pkt->pkt_ha_private;
7034 7033 struct scsi_arq_status *arqstat;
7035 7034 struct arq_private_data *arq_data =
7036 7035 (struct arq_private_data *)sp->cmd_pkt->pkt_private;
7037 7036 struct fas_cmd *ssp = arq_data->arq_save_sp;
7038 7037 struct buf *bp = arq_data->arq_save_bp;
7039 7038 int slot = sp->cmd_slot;
7040 7039
7041 7040 DPRINTF1("completing arq pkt sp=0x%p\n", (void *)sp);
7042 7041 ASSERT(sp == fas->f_arq_pkt[slot]);
7043 7042 ASSERT(arq_data->arq_save_sp != NULL);
7044 7043 ASSERT(ssp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7045 7044
7046 7045 arqstat = (struct scsi_arq_status *)(ssp->cmd_pkt->pkt_scbp);
7047 7046 arqstat->sts_rqpkt_status = *((struct scsi_status *)
7048 7047 (sp->cmd_pkt->pkt_scbp));
7049 7048 arqstat->sts_rqpkt_reason = sp->cmd_pkt->pkt_reason;
7050 7049 arqstat->sts_rqpkt_state = sp->cmd_pkt->pkt_state;
7051 7050 arqstat->sts_rqpkt_statistics = sp->cmd_pkt->pkt_statistics;
7052 7051 arqstat->sts_rqpkt_resid = sp->cmd_pkt->pkt_resid;
7053 7052 arqstat->sts_sensedata =
7054 7053 *((struct scsi_extended_sense *)bp->b_un.b_addr);
7055 7054 ssp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
7056 7055 arq_data->arq_save_sp = NULL;
7057 7056
7058 7057 /*
7059 7058 * ASC=0x47 is parity error
7060 7059 */
7061 7060 if (arqstat->sts_sensedata.es_key == KEY_ABORTED_COMMAND &&
7062 7061 arqstat->sts_sensedata.es_add_code == 0x47) {
7063 7062 fas_sync_wide_backoff(fas, sp, slot);
7064 7063 }
7065 7064
7066 7065 fas_call_pkt_comp(fas, ssp);
7067 7066 }
7068 7067
7069 7068 /*
7070 7069 * handle check condition and start an arq packet
7071 7070 */
7072 7071 static int
7073 7072 fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp)
7074 7073 {
7075 7074 struct fas_cmd *arqsp = fas->f_arq_pkt[sp->cmd_slot];
7076 7075 struct arq_private_data *arq_data;
7077 7076 struct buf *bp;
7078 7077
7079 7078 if ((arqsp == NULL) || (arqsp == sp) ||
7080 7079 (sp->cmd_scblen < sizeof (struct scsi_arq_status))) {
7081 7080 IPRINTF("no arq packet or cannot arq on arq pkt\n");
7082 7081 fas_call_pkt_comp(fas, sp);
7083 7082 return (0);
7084 7083 }
7085 7084
7086 7085 arq_data = (struct arq_private_data *)arqsp->cmd_pkt->pkt_private;
7087 7086 bp = arq_data->arq_save_bp;
7088 7087
7089 7088 ASSERT(sp->cmd_flags & CFLAG_FINISHED);
7090 7089 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7091 7090 DPRINTF3("start arq for slot=%x, arqsp=0x%p, rqpkt=0x%p\n",
7092 7091 sp->cmd_slot, (void *)arqsp, (void *)fas->f_arq_pkt[sp->cmd_slot]);
7093 7092 if (arq_data->arq_save_sp != NULL) {
7094 7093 IPRINTF("auto request sense already in progress\n");
7095 7094 goto fail;
7096 7095 }
7097 7096
7098 7097 arq_data->arq_save_sp = sp;
7099 7098
7100 7099 bzero(bp->b_un.b_addr, sizeof (struct scsi_extended_sense));
7101 7100
7102 7101 /*
7103 7102 * copy the timeout from the original packet by lack of a better
7104 7103 * value
7105 7104 * we could take the residue of the timeout but that could cause
7106 7105 * premature timeouts perhaps
7107 7106 */
7108 7107 arqsp->cmd_pkt->pkt_time = sp->cmd_pkt->pkt_time;
7109 7108 arqsp->cmd_flags &= ~CFLAG_TRANFLAG;
7110 7109 ASSERT(arqsp->cmd_pkt->pkt_comp != NULL);
7111 7110
7112 7111 /*
7113 7112 * make sure that auto request sense always goes out
7114 7113 * after queue full and after throttle was set to draining
7115 7114 */
7116 7115 fas_full_throttle(fas, sp->cmd_slot);
7117 7116 (void) fas_accept_pkt(fas, arqsp, NO_TRAN_BUSY);
7118 7117 return (0);
7119 7118
7120 7119 fail:
7121 7120 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
7122 7121 fas_log(fas, CE_WARN, "auto request sense failed\n");
7123 7122 fas_dump_cmd(fas, sp);
7124 7123 fas_call_pkt_comp(fas, sp);
7125 7124 return (-1);
7126 7125 }
7127 7126
7128 7127
7129 7128 /*
7130 7129 * handle qfull condition
7131 7130 */
7132 7131 static void
7133 7132 fas_handle_qfull(struct fas *fas, struct fas_cmd *sp)
7134 7133 {
7135 7134 int slot = sp->cmd_slot;
7136 7135
7137 7136 if ((++sp->cmd_qfull_retries > fas->f_qfull_retries[Tgt(sp)]) ||
7138 7137 (fas->f_qfull_retries[Tgt(sp)] == 0)) {
7139 7138 /*
7140 7139 * We have exhausted the retries on QFULL, or,
7141 7140 * the target driver has indicated that it
7142 7141 * wants to handle QFULL itself by setting
7143 7142 * qfull-retries capability to 0. In either case
7144 7143 * we want the target driver's QFULL handling
7145 7144 * to kick in. We do this by having pkt_reason
7146 7145 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
7147 7146 */
7148 7147 IPRINTF2("%d.%d: status queue full, retries over\n",
7149 7148 Tgt(sp), Lun(sp));
7150 7149 fas_set_all_lun_throttles(fas, slot, DRAIN_THROTTLE);
7151 7150 fas_call_pkt_comp(fas, sp);
7152 7151 } else {
7153 7152 if (fas->f_reset_delay[Tgt(sp)] == 0) {
7154 7153 fas->f_throttle[slot] =
7155 7154 max((fas->f_tcmds[slot] - 2), 0);
7156 7155 }
7157 7156 IPRINTF3("%d.%d: status queue full, new throttle = %d, "
7158 7157 "retrying\n", Tgt(sp), Lun(sp), fas->f_throttle[slot]);
7159 7158 sp->cmd_pkt->pkt_flags |= FLAG_HEAD;
7160 7159 sp->cmd_flags &= ~CFLAG_TRANFLAG;
7161 7160 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
7162 7161
7163 7162 /*
7164 7163 * when target gives queue full status with no commands
7165 7164 * outstanding (f_tcmds[] == 0), throttle is set to 0
7166 7165 * (HOLD_THROTTLE), and the queue full handling starts
7167 7166 * (see psarc/1994/313); if there are commands outstanding,
7168 7167 * the throttle is set to (f_tcmds[] - 2)
7169 7168 */
7170 7169 if (fas->f_throttle[slot] == HOLD_THROTTLE) {
7171 7170 /*
7172 7171 * By setting throttle to QFULL_THROTTLE, we
7173 7172 * avoid submitting new commands and in
7174 7173 * fas_restart_cmd find out slots which need
7175 7174 * their throttles to be cleared.
7176 7175 */
7177 7176 fas_set_all_lun_throttles(fas, slot, QFULL_THROTTLE);
7178 7177 if (fas->f_restart_cmd_timeid == 0) {
7179 7178 fas->f_restart_cmd_timeid =
7180 7179 timeout(fas_restart_cmd, fas,
7181 7180 fas->f_qfull_retry_interval[Tgt(sp)]);
7182 7181 }
7183 7182 }
7184 7183 }
7185 7184 }
7186 7185
7187 7186 /*
7188 7187 * invoked from timeout() to restart qfull cmds with throttle == 0
7189 7188 */
7190 7189 static void
7191 7190 fas_restart_cmd(void *fas_arg)
7192 7191 {
7193 7192 struct fas *fas = fas_arg;
7194 7193 int i;
7195 7194
7196 7195 IPRINTF("fas_restart_cmd:\n");
7197 7196
7198 7197 mutex_enter(FAS_MUTEX(fas));
7199 7198 fas->f_restart_cmd_timeid = 0;
7200 7199
7201 7200 for (i = 0; i < N_SLOTS; i += NLUNS_PER_TARGET) {
7202 7201 if (fas->f_reset_delay[i/NLUNS_PER_TARGET] == 0) {
7203 7202 if (fas->f_throttle[i] == QFULL_THROTTLE) {
7204 7203 fas_set_all_lun_throttles(fas,
7205 7204 i, MAX_THROTTLE);
7206 7205 }
7207 7206 }
7208 7207 }
7209 7208
7210 7209 (void) fas_ustart(fas);
7211 7210 mutex_exit(FAS_MUTEX(fas));
7212 7211 }
7213 7212
7214 7213 /*
7215 7214 * Timeout handling:
7216 7215 * Command watchdog routines
7217 7216 */
7218 7217
7219 7218 /*ARGSUSED*/
7220 7219 static void
7221 7220 fas_watch(void *arg)
7222 7221 {
7223 7222 struct fas *fas;
7224 7223 ushort_t props_update = 0;
7225 7224
7226 7225 rw_enter(&fas_global_rwlock, RW_READER);
7227 7226
7228 7227 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
7229 7228
7230 7229 mutex_enter(FAS_MUTEX(fas));
7231 7230 IPRINTF2("ncmds=%x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
7232 7231
7233 7232 #ifdef FAS_PIO_COUNTS
7234 7233 if (fas->f_total_cmds) {
7235 7234 int n = fas->f_total_cmds;
7236 7235
7237 7236 fas_log(fas, CE_NOTE,
7238 7237 "total=%d, cmds=%d fas-rd=%d, fas-wrt=%d, dma-rd=%d, dma-wrt=%d\n",
7239 7238 fas->f_total_cmds,
7240 7239 fas->f_reg_cmds/n,
7241 7240 fas->f_reg_reads/n, fas->f_reg_writes/n,
7242 7241 fas->f_reg_dma_reads/n, fas->f_reg_dma_writes/n);
7243 7242
7244 7243 fas->f_reg_reads = fas->f_reg_writes =
7245 7244 fas->f_reg_dma_reads = fas->f_reg_dma_writes =
7246 7245 fas->f_reg_cmds = fas->f_total_cmds = 0;
7247 7246 }
7248 7247 #endif
7249 7248 if (fas->f_ncmds) {
7250 7249 int i;
7251 7250 fas_watchsubr(fas);
7252 7251
7253 7252 /*
7254 7253 * reset throttle. the throttle may have been
7255 7254 * too low if queue full was caused by
7256 7255 * another initiator
7257 7256 * Only reset throttle if no cmd active in slot 0
7258 7257 * (untagged cmd)
7259 7258 */
7260 7259 #ifdef FAS_TEST
7261 7260 if (fas_enable_untagged) {
7262 7261 fas_test_untagged++;
7263 7262 }
7264 7263 #endif
7265 7264 for (i = 0; i < N_SLOTS; i++) {
7266 7265 if ((fas->f_throttle[i] > HOLD_THROTTLE) &&
7267 7266 (fas->f_active[i] &&
7268 7267 (fas->f_active[i]->f_slot[0] == NULL))) {
7269 7268 fas_full_throttle(fas, i);
7270 7269 }
7271 7270 }
7272 7271 }
7273 7272
7274 7273 if (fas->f_props_update) {
7275 7274 int i;
7276 7275 /*
7277 7276 * f_mutex will be released and reentered in
7278 7277 * fas_props_update().
7279 7278 * Hence we save the fas->f_props_update now and
7280 7279 * set to 0 indicating that property has been
7281 7280 * updated. This will avoid a race condition with
7282 7281 * any thread that runs in interrupt context that
7283 7282 * attempts to set the f_props_update to non-zero value
7284 7283 */
7285 7284 props_update = fas->f_props_update;
7286 7285 fas->f_props_update = 0;
7287 7286 for (i = 0; i < NTARGETS_WIDE; i++) {
7288 7287 if (props_update & (1<<i)) {
7289 7288 fas_update_props(fas, i);
7290 7289 }
7291 7290 }
7292 7291 }
7293 7292 fas_check_waitQ_and_mutex_exit(fas);
7294 7293
7295 7294 }
7296 7295 rw_exit(&fas_global_rwlock);
7297 7296
7298 7297 again:
7299 7298 mutex_enter(&fas_global_mutex);
7300 7299 if (fas_timeout_initted && fas_timeout_id) {
7301 7300 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
7302 7301 }
7303 7302 mutex_exit(&fas_global_mutex);
7304 7303 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_WATCH_END, "fas_watch_end");
7305 7304 }
7306 7305
7307 7306 static void
7308 7307 fas_watchsubr(struct fas *fas)
7309 7308 {
7310 7309 short slot;
7311 7310 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7312 7311 struct f_slots *tag_slots;
7313 7312
7314 7313 for (slot = 0; slot < N_SLOTS; slot += d) {
7315 7314
7316 7315 #ifdef FAS_TEST
7317 7316 if (fas_btest) {
7318 7317 fas_btest = 0;
7319 7318 (void) fas_reset_bus(fas);
7320 7319 return;
7321 7320 }
7322 7321 if (fas_force_timeout && fas->f_tcmds[slot]) {
7323 7322 fas_cmd_timeout(fas, slot);
7324 7323 fas_force_timeout = 0;
7325 7324 return;
7326 7325 }
7327 7326 fas_test_reset(fas, slot);
7328 7327 fas_test_abort(fas, slot);
7329 7328 #endif /* FAS_TEST */
7330 7329
7331 7330 /*
7332 7331 * check tagged cmds first
7333 7332 */
7334 7333 tag_slots = fas->f_active[slot];
7335 7334 DPRINTF3(
7336 7335 "fas_watchsubr: slot %x: tcmds=%x, timeout=%x\n",
7337 7336 slot, fas->f_tcmds[slot], tag_slots->f_timeout);
7338 7337
7339 7338 if ((fas->f_tcmds[slot] > 0) && (tag_slots->f_timebase)) {
7340 7339
7341 7340 if (tag_slots->f_timebase <=
7342 7341 fas_scsi_watchdog_tick) {
7343 7342 tag_slots->f_timebase +=
7344 7343 fas_scsi_watchdog_tick;
7345 7344 continue;
7346 7345 }
7347 7346
7348 7347 tag_slots->f_timeout -= fas_scsi_watchdog_tick;
7349 7348
7350 7349 if (tag_slots->f_timeout < 0) {
7351 7350 fas_cmd_timeout(fas, slot);
7352 7351 return;
7353 7352 }
7354 7353 if ((tag_slots->f_timeout) <=
7355 7354 fas_scsi_watchdog_tick) {
7356 7355 IPRINTF1("pending timeout on slot=%x\n",
7357 7356 slot);
7358 7357 IPRINTF("draining all queues\n");
7359 7358 fas_set_throttles(fas, 0, N_SLOTS,
7360 7359 DRAIN_THROTTLE);
7361 7360 }
7362 7361 }
7363 7362 }
7364 7363 }
7365 7364
7366 7365 /*
7367 7366 * timeout recovery
7368 7367 */
7369 7368 static void
7370 7369 fas_cmd_timeout(struct fas *fas, int slot)
7371 7370 {
7372 7371 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7373 7372 int target, lun, i, n, tag, ncmds;
7374 7373 struct fas_cmd *sp = NULL;
7375 7374 struct fas_cmd *ssp;
7376 7375
7377 7376 ASSERT(fas->f_tcmds[slot]);
7378 7377
7379 7378 #ifdef FAS_TEST
7380 7379 if (fas_test_stop) {
7381 7380 debug_enter("timeout");
7382 7381 }
7383 7382 #endif
7384 7383
7385 7384 /*
7386 7385 * set throttle back; no more draining necessary
7387 7386 */
7388 7387 for (i = 0; i < N_SLOTS; i += d) {
7389 7388 if (fas->f_throttle[i] == DRAIN_THROTTLE) {
7390 7389 fas_full_throttle(fas, i);
7391 7390 }
7392 7391 }
7393 7392
7394 7393 if (NOTAG(slot/NLUNS_PER_TARGET)) {
7395 7394 sp = fas->f_active[slot]->f_slot[0];
7396 7395 }
7397 7396
7398 7397 /*
7399 7398 * if no interrupt pending for next second then the current
7400 7399 * cmd must be stuck; switch slot and sp to current slot and cmd
7401 7400 */
7402 7401 if (fas->f_current_sp && fas->f_state != STATE_FREE) {
7403 7402 for (i = 0; (i < 10000) && (INTPENDING(fas) == 0); i++) {
7404 7403 drv_usecwait(100);
7405 7404 }
7406 7405 if (INTPENDING(fas) == 0) {
7407 7406 slot = fas->f_current_sp->cmd_slot;
7408 7407 sp = fas->f_current_sp;
7409 7408 }
7410 7409 }
7411 7410
7412 7411 target = slot / NLUNS_PER_TARGET;
7413 7412 lun = slot % NLUNS_PER_TARGET;
7414 7413
7415 7414 /*
7416 7415 * update all outstanding pkts for this slot
7417 7416 */
7418 7417 n = fas->f_active[slot]->f_n_slots;
7419 7418 for (ncmds = tag = 0; tag < n; tag++) {
7420 7419 ssp = fas->f_active[slot]->f_slot[tag];
7421 7420 if (ssp && ssp->cmd_pkt->pkt_time) {
7422 7421 fas_set_pkt_reason(fas, ssp, CMD_TIMEOUT,
7423 7422 STAT_TIMEOUT | STAT_ABORTED);
7424 7423 fas_short_dump_cmd(fas, ssp);
7425 7424 ncmds++;
7426 7425 }
7427 7426 }
7428 7427
7429 7428 /*
7430 7429 * no timed-out cmds here?
7431 7430 */
7432 7431 if (ncmds == 0) {
7433 7432 return;
7434 7433 }
7435 7434
7436 7435 /*
7437 7436 * dump all we know about this timeout
7438 7437 */
7439 7438 if (sp) {
7440 7439 if (sp->cmd_flags & CFLAG_CMDDISC) {
7441 7440 fas_log(fas, CE_WARN,
7442 7441 "Disconnected command timeout for Target %d.%d",
7443 7442 target, lun);
7444 7443 } else {
7445 7444 ASSERT(sp == fas->f_current_sp);
7446 7445 fas_log(fas, CE_WARN,
7447 7446 "Connected command timeout for Target %d.%d",
7448 7447 target, lun);
7449 7448 /*
7450 7449 * Current command timeout appears to relate often
7451 7450 * to noisy SCSI in synchronous mode.
7452 7451 */
7453 7452 if (fas->f_state == ACTS_DATA_DONE) {
7454 7453 fas_sync_wide_backoff(fas, sp, slot);
7455 7454 }
7456 7455 }
7457 7456 #ifdef FASDEBUG
7458 7457 fas_printstate(fas, "timeout");
7459 7458 #endif
7460 7459 } else {
7461 7460 fas_log(fas, CE_WARN,
7462 7461 "Disconnected tagged cmd(s) (%d) timeout for Target %d.%d",
7463 7462 fas->f_tcmds[slot], target, lun);
7464 7463 }
7465 7464
7466 7465 if (fas_abort_cmd(fas, sp, slot) == ACTION_SEARCH) {
7467 7466 (void) fas_istart(fas);
7468 7467 }
7469 7468 }
7470 7469
7471 7470 /*
7472 7471 * fas_sync_wide_backoff() increases sync period and enables slow
7473 7472 * cable mode.
7474 7473 * the second time, we revert back to narrow/async
7475 7474 * we count on a bus reset to disable wide in the target and will
7476 7475 * never renegotiate wide again
7477 7476 */
7478 7477 static void
7479 7478 fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
7480 7479 int slot)
7481 7480 {
7482 7481 char phase;
7483 7482 ushort_t state = fas->f_state;
7484 7483 uchar_t tgt = slot / NLUNS_PER_TARGET;
7485 7484 uint_t tshift = 1 << tgt;
7486 7485
7487 7486 phase = fas_reg_read(fas, &fas->f_reg->fas_stat);
7488 7487 phase &= FAS_PHASE_MASK;
7489 7488
7490 7489 IPRINTF4(
7491 7490 "fas_sync_wide_backoff: target %d: state=%x, phase=%x, sp=0x%p\n",
7492 7491 tgt, state, phase, (void *)sp);
7493 7492
7494 7493 #ifdef FASDEBUG
7495 7494 if (fas_no_sync_wide_backoff) {
7496 7495 return;
7497 7496 }
7498 7497 #endif
7499 7498
7500 7499 /*
7501 7500 * if this not the first time or sync is disabled
7502 7501 * thru scsi_options then disable wide
7503 7502 */
7504 7503 if ((fas->f_backoff & tshift) ||
7505 7504 (fas->f_nosync & tshift)) {
7506 7505 /*
7507 7506 * disable wide for just this target
7508 7507 */
7509 7508 if ((fas->f_nowide & tshift) == 0) {
7510 7509 fas_log(fas, CE_WARN,
7511 7510 "Target %d disabled wide SCSI mode", tgt);
7512 7511 }
7513 7512 /*
7514 7513 * do not reset the bit in f_nowide because that
7515 7514 * would not force a renegotiation of wide
7516 7515 * and do not change any register value yet because
7517 7516 * we may have reconnects before the renegotiations
7518 7517 */
7519 7518 fas->f_target_scsi_options[tgt] &= ~SCSI_OPTIONS_WIDE;
7520 7519 }
7521 7520
7522 7521 /*
7523 7522 * reduce xfer rate. if this is the first time, reduce by
7524 7523 * 100%. second time, disable sync and wide.
7525 7524 */
7526 7525 if (fas->f_offset[tgt] != 0) {
7527 7526 /*
7528 7527 * do not reset the bit in f_nosync because that
7529 7528 * would not force a renegotiation of sync
7530 7529 */
7531 7530 if (fas->f_backoff & tshift) {
7532 7531 if ((fas->f_nosync & tshift) == 0) {
7533 7532 fas_log(fas, CE_WARN,
7534 7533 "Target %d reverting to async. mode",
7535 7534 tgt);
7536 7535 }
7537 7536 fas->f_target_scsi_options[tgt] &=
7538 7537 ~(SCSI_OPTIONS_SYNC | SCSI_OPTIONS_FAST);
7539 7538 } else {
7540 7539 /* increase period by 100% */
7541 7540 fas->f_neg_period[tgt] *= 2;
7542 7541
7543 7542 fas_log(fas, CE_WARN,
7544 7543 "Target %d reducing sync. transfer rate", tgt);
7545 7544 }
7546 7545 }
7547 7546 fas->f_backoff |= tshift;
7548 7547
7549 7548 /*
7550 7549 * always enable slow cable mode, if not already enabled
7551 7550 */
7552 7551 if ((fas->f_fasconf & FAS_CONF_SLOWMODE) == 0) {
7553 7552 fas->f_fasconf |= FAS_CONF_SLOWMODE;
7554 7553 fas_reg_write(fas, &fas->f_reg->fas_conf, fas->f_fasconf);
7555 7554 IPRINTF("Reverting to slow SCSI cable mode\n");
7556 7555 }
7557 7556
7558 7557 /*
7559 7558 * Force sync renegotiation and update properties
7560 7559 */
7561 7560 fas_force_renegotiation(fas, tgt);
7562 7561 fas->f_props_update |= (1<<tgt);
7563 7562 }
7564 7563
7565 7564 /*
7566 7565 * handle failed negotiations (either reject or bus free condition)
7567 7566 */
7568 7567 static void
7569 7568 fas_reset_sync_wide(struct fas *fas)
7570 7569 {
7571 7570 struct fas_cmd *sp = fas->f_current_sp;
7572 7571 int tgt = Tgt(sp);
7573 7572
7574 7573 if (fas->f_wdtr_sent) {
7575 7574 IPRINTF("wide neg message rejected or bus free\n");
7576 7575 fas->f_nowide |= (1<<tgt);
7577 7576 fas->f_fasconf3[tgt] &= ~FAS_CONF3_WIDE;
7578 7577 fas_reg_write(fas, &fas->f_reg->fas_conf3,
7579 7578 fas->f_fasconf3[tgt]);
7580 7579 /*
7581 7580 * clear offset just in case it goes to
7582 7581 * data phase
7583 7582 */
7584 7583 fas_reg_write(fas,
7585 7584 (uchar_t *)&fas->f_reg->fas_sync_offset, 0);
7586 7585 } else if (fas->f_sdtr_sent) {
7587 7586 volatile struct fasreg *fasreg =
7588 7587 fas->f_reg;
7589 7588 IPRINTF("sync neg message rejected or bus free\n");
7590 7589 fas->f_nosync |= (1<<tgt);
7591 7590 fas->f_offset[tgt] = 0;
7592 7591 fas->f_sync_period[tgt] = 0;
7593 7592 fas_reg_write(fas,
7594 7593 (uchar_t *)&fasreg->fas_sync_period, 0);
7595 7594 fas_reg_write(fas,
7596 7595 (uchar_t *)&fasreg->fas_sync_offset, 0);
7597 7596 fas->f_offset[tgt] = 0;
7598 7597 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
7599 7598 fas_reg_write(fas, &fasreg->fas_conf3,
7600 7599 fas->f_fasconf3[tgt]);
7601 7600 }
7602 7601
7603 7602 fas_force_renegotiation(fas, tgt);
7604 7603 }
7605 7604
7606 7605 /*
7607 7606 * force wide and sync renegotiation
7608 7607 */
7609 7608 static void
7610 7609 fas_force_renegotiation(struct fas *fas, int target)
7611 7610 {
7612 7611 ushort_t tshift = 1<<target;
7613 7612 fas->f_sync_known &= ~tshift;
7614 7613 fas->f_sync_enabled &= ~tshift;
7615 7614 fas->f_wide_known &= ~tshift;
7616 7615 fas->f_wide_enabled &= ~tshift;
7617 7616 }
7618 7617
7619 7618 /*
7620 7619 * update conf3 register for wide negotiation
7621 7620 */
7622 7621 static void
7623 7622 fas_set_wide_conf3(struct fas *fas, int target, int width)
7624 7623 {
7625 7624 ASSERT(width <= 1);
7626 7625 switch (width) {
7627 7626 case 0:
7628 7627 fas->f_fasconf3[target] &= ~FAS_CONF3_WIDE;
7629 7628 break;
7630 7629 case 1:
7631 7630 fas->f_fasconf3[target] |= FAS_CONF3_WIDE;
7632 7631 fas->f_wide_enabled |= (1<<target);
7633 7632 break;
7634 7633 }
7635 7634
7636 7635 fas_reg_write(fas, &fas->f_reg->fas_conf3, fas->f_fasconf3[target]);
7637 7636 fas->f_fasconf3_reg_last = fas->f_fasconf3[target];
7638 7637 }
7639 7638
7640 7639 /*
7641 7640 * Abort command handling
7642 7641 *
7643 7642 * abort current cmd, either by device reset or immediately with bus reset
7644 7643 * (usually an abort msg doesn't completely solve the problem, therefore
7645 7644 * a device or bus reset is recommended)
7646 7645 */
7647 7646 static int
7648 7647 fas_abort_curcmd(struct fas *fas)
7649 7648 {
7650 7649 if (fas->f_current_sp) {
7651 7650 return (fas_abort_cmd(fas, fas->f_current_sp,
7652 7651 fas->f_current_sp->cmd_slot));
7653 7652 } else {
7654 7653 return (fas_reset_bus(fas));
7655 7654 }
7656 7655 }
7657 7656
7658 7657 static int
7659 7658 fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot)
7660 7659 {
7661 7660 struct scsi_address ap;
7662 7661
7663 7662 ap.a_hba_tran = fas->f_tran;
7664 7663 ap.a_target = slot / NLUNS_PER_TARGET;
7665 7664 ap.a_lun = slot % NLUNS_PER_TARGET;
7666 7665
7667 7666 IPRINTF1("abort cmd 0x%p\n", (void *)sp);
7668 7667
7669 7668 /*
7670 7669 * attempting to abort a connected cmd is usually fruitless, so
7671 7670 * only try disconnected cmds
7672 7671 * a reset is preferable over an abort (see 1161701)
7673 7672 */
7674 7673 if ((fas->f_current_sp && (fas->f_current_sp->cmd_slot != slot)) ||
7675 7674 (fas->f_state == STATE_FREE)) {
7676 7675 IPRINTF2("attempting to reset target %d.%d\n",
7677 7676 ap.a_target, ap.a_lun);
7678 7677 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
7679 7678 return (ACTION_SEARCH);
7680 7679 }
7681 7680 }
7682 7681
7683 7682 /*
7684 7683 * if the target won't listen, then a retry is useless
7685 7684 * there is also the possibility that the cmd still completed while
7686 7685 * we were trying to reset and the target driver may have done a
7687 7686 * device reset which has blown away this sp.
7688 7687 * well, we've tried, now pull the chain
7689 7688 */
7690 7689 IPRINTF("aborting all cmds by bus reset\n");
7691 7690 return (fas_reset_bus(fas));
7692 7691 }
7693 7692
7694 7693 /*
7695 7694 * fas_do_scsi_abort() assumes that we already have the mutex.
7696 7695 * during the abort, we hold the mutex and prevent callbacks by setting
7697 7696 * completion pointer to NULL. this will also avoid that a target driver
7698 7697 * attempts to do a scsi_abort/reset while we are aborting.
7699 7698 * because the completion pointer is NULL we can still update the
7700 7699 * packet after completion
7701 7700 * the throttle for this slot is cleared either by fas_abort_connected_cmd
7702 7701 * or fas_runpoll which prevents new cmds from starting while aborting
7703 7702 */
7704 7703 static int
7705 7704 fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
7706 7705 {
7707 7706 struct fas *fas = ADDR2FAS(ap);
7708 7707 struct fas_cmd *sp;
7709 7708 int rval = FALSE;
7710 7709 short slot;
7711 7710 struct fas_cmd *cur_sp = fas->f_current_sp;
7712 7711 void (*cur_savec)(), (*sp_savec)();
7713 7712 int sp_tagged_flag, abort_msg;
7714 7713
7715 7714 if (pkt) {
7716 7715 sp = PKT2CMD(pkt);
7717 7716 slot = sp->cmd_slot;
7718 7717 ASSERT(slot == ((ap->a_target * NLUNS_PER_TARGET) | ap->a_lun));
7719 7718 } else {
7720 7719 sp = NULL;
7721 7720 slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
7722 7721 }
7723 7722
7724 7723 fas_move_waitQ_to_readyQ(fas);
7725 7724
7726 7725 /*
7727 7726 * If no specific command was passed, all cmds here will be aborted
7728 7727 * If a specific command was passed as an argument (to be aborted)
7729 7728 * only the specified command will be aborted
7730 7729 */
7731 7730 ASSERT(mutex_owned(FAS_MUTEX(fas)));
7732 7731 IPRINTF4("fas_scsi_abort for slot %x, "
7733 7732 "sp=0x%p, pkt_flags=%x, cur_sp=0x%p\n",
7734 7733 slot, (void *)sp, (sp? sp->cmd_pkt_flags : 0), (void *)cur_sp);
7735 7734
7736 7735 /*
7737 7736 * first check if the cmd is in the ready queue or
7738 7737 * in the active queue
7739 7738 */
7740 7739 if (sp) {
7741 7740 IPRINTF3("aborting one command 0x%p for %d.%d\n",
7742 7741 (void *)sp, ap->a_target, ap->a_lun);
7743 7742 rval = fas_remove_from_readyQ(fas, sp, slot);
7744 7743 if (rval) {
7745 7744 IPRINTF("aborted one ready cmd\n");
7746 7745 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7747 7746 fas_decrement_ncmds(fas, sp);
7748 7747 fas_call_pkt_comp(fas, sp);
7749 7748 goto exit;
7750 7749
7751 7750 } else if ((sp !=
7752 7751 fas->f_active[slot]->f_slot[sp->cmd_tag[1]])) {
7753 7752 IPRINTF("cmd doesn't exist here\n");
7754 7753 rval = TRUE;
7755 7754 goto exit;
7756 7755 }
7757 7756 }
7758 7757
7759 7758 /*
7760 7759 * hold off any new commands while attempting to abort
7761 7760 * an active cmd
7762 7761 */
7763 7762 fas_set_throttles(fas, slot, 1, HOLD_THROTTLE);
7764 7763
7765 7764 if (cur_sp) {
7766 7765 /*
7767 7766 * prevent completion on current cmd
7768 7767 */
7769 7768 cur_savec = cur_sp->cmd_pkt->pkt_comp;
7770 7769 cur_sp->cmd_pkt->pkt_comp = NULL;
7771 7770 }
7772 7771
7773 7772 if (sp) {
7774 7773 /*
7775 7774 * the cmd exists here. is it connected or disconnected?
7776 7775 * if connected but still selecting then can't abort now.
7777 7776 * prevent completion on this cmd
7778 7777 */
7779 7778 sp_tagged_flag = (sp->cmd_pkt_flags & FLAG_TAGMASK);
7780 7779 abort_msg = (sp_tagged_flag? MSG_ABORT_TAG : MSG_ABORT);
7781 7780 sp_savec = sp->cmd_pkt->pkt_comp;
7782 7781 sp->cmd_pkt->pkt_comp = NULL;
7783 7782
7784 7783 /* connected but not selecting? */
7785 7784 if ((sp == cur_sp) && (fas->f_state != STATE_FREE) &&
7786 7785 (sp->cmd_pkt->pkt_state)) {
7787 7786 rval = fas_abort_connected_cmd(fas, sp, abort_msg);
7788 7787 }
7789 7788
7790 7789 /* if abort connected cmd failed, try abort disconnected */
7791 7790 if ((rval == 0) &&
7792 7791 (sp->cmd_flags & CFLAG_CMDDISC) &&
7793 7792 ((sp->cmd_flags & CFLAG_COMPLETED) == 0)) {
7794 7793 rval = fas_abort_disconnected_cmd(fas, ap, sp,
7795 7794 abort_msg, slot);
7796 7795 }
7797 7796
7798 7797 if (rval) {
7799 7798 sp->cmd_flags |= CFLAG_COMPLETED;
7800 7799 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7801 7800 }
7802 7801
7803 7802 sp->cmd_pkt->pkt_comp = sp_savec;
7804 7803
7805 7804 } else {
7806 7805 IPRINTF2("aborting all commands for %d.%d\n",
7807 7806 ap->a_target, ap->a_lun);
7808 7807 abort_msg = MSG_ABORT;
7809 7808
7810 7809 /* active and not selecting ? */
7811 7810 if (cur_sp && (fas->f_state != STATE_FREE) &&
7812 7811 (cur_sp->cmd_slot == slot) &&
7813 7812 cur_sp->cmd_pkt->pkt_state) {
7814 7813 rval = fas_abort_connected_cmd(fas, cur_sp,
7815 7814 abort_msg);
7816 7815 }
7817 7816 if (rval == 0) {
7818 7817 rval = fas_abort_disconnected_cmd(fas, ap,
7819 7818 NULL, abort_msg, slot);
7820 7819 }
7821 7820 }
7822 7821
7823 7822 done:
7824 7823 /* complete the current sp */
7825 7824 if (cur_sp) {
7826 7825 cur_sp->cmd_pkt->pkt_comp = cur_savec;
7827 7826 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
7828 7827 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
7829 7828 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
7830 7829 fas_decrement_ncmds(fas, cur_sp);
7831 7830 fas_call_pkt_comp(fas, cur_sp);
7832 7831 }
7833 7832 }
7834 7833
7835 7834 /* complete the sp passed as 2nd arg */
7836 7835 if (sp && (sp != cur_sp) && (sp->cmd_flags & CFLAG_COMPLETED)) {
7837 7836 sp->cmd_flags &= ~CFLAG_COMPLETED;
7838 7837 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
7839 7838 fas_decrement_ncmds(fas, sp);
7840 7839 fas_call_pkt_comp(fas, sp);
7841 7840 }
7842 7841
7843 7842 /* clean up all cmds for this slot */
7844 7843 if (rval && (abort_msg == MSG_ABORT)) {
7845 7844 /*
7846 7845 * mark all commands here as aborted
7847 7846 * abort msg has been accepted, now cleanup queues;
7848 7847 */
7849 7848 fas_mark_packets(fas, slot, CMD_ABORTED, STAT_ABORTED);
7850 7849 fas_flush_tagQ(fas, slot);
7851 7850 fas_flush_readyQ(fas, slot);
7852 7851 }
7853 7852 fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
7854 7853
7855 7854 exit:
7856 7855 if (fas->f_state == STATE_FREE) {
7857 7856 (void) fas_ustart(fas);
7858 7857 }
7859 7858
7860 7859 ASSERT(mutex_owned(FAS_MUTEX(fas)));
7861 7860
7862 7861 #ifdef FASDEBUG
7863 7862 if (rval && fas_test_stop) {
7864 7863 debug_enter("abort succeeded");
7865 7864 }
7866 7865 #endif
7867 7866 return (rval);
7868 7867 }
7869 7868
7870 7869 /*
7871 7870 * mark all packets with new reason and update statistics
7872 7871 */
7873 7872 static void
7874 7873 fas_mark_packets(struct fas *fas, int slot, uchar_t reason, uint_t stat)
7875 7874 {
7876 7875 struct fas_cmd *sp = fas->f_readyf[slot];
7877 7876
7878 7877 while (sp != 0) {
7879 7878 fas_set_pkt_reason(fas, sp, reason, STAT_ABORTED);
7880 7879 sp = sp->cmd_forw;
7881 7880 }
7882 7881 if (fas->f_tcmds[slot]) {
7883 7882 int n = 0;
7884 7883 ushort_t tag;
7885 7884
7886 7885 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
7887 7886 if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) {
7888 7887 fas_set_pkt_reason(fas, sp, reason, stat);
7889 7888 n++;
7890 7889 }
7891 7890 }
7892 7891 ASSERT(fas->f_tcmds[slot] == n);
7893 7892 }
7894 7893 }
7895 7894
7896 7895 /*
7897 7896 * set pkt_reason and OR in pkt_statistics flag
7898 7897 */
7899 7898 static void
7900 7899 fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
7901 7900 uint_t stat)
7902 7901 {
7903 7902 if (sp) {
7904 7903 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
7905 7904 sp->cmd_pkt->pkt_reason = reason;
7906 7905 }
7907 7906 sp->cmd_pkt->pkt_statistics |= stat;
7908 7907 IPRINTF3("sp=0x%p, pkt_reason=%x, pkt_stat=%x\n",
7909 7908 (void *)sp, reason, sp->cmd_pkt->pkt_statistics);
7910 7909 }
7911 7910 }
7912 7911
7913 7912 /*
7914 7913 * delete specified cmd from the ready queue
7915 7914 */
7916 7915 static int
7917 7916 fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, int slot)
7918 7917 {
7919 7918 struct fas_cmd *ssp, *psp;
7920 7919
7921 7920 /*
7922 7921 * command has not been started yet and is still in the ready queue
7923 7922 */
7924 7923 if (sp) {
7925 7924 ASSERT(fas->f_ncmds > 0);
7926 7925 /*
7927 7926 * find packet on the ready queue and remove it
7928 7927 */
7929 7928 for (psp = NULL, ssp = fas->f_readyf[slot]; ssp != NULL;
7930 7929 psp = ssp, ssp = ssp->cmd_forw) {
7931 7930 if (ssp == sp) {
7932 7931 if (fas->f_readyf[slot] == sp) {
7933 7932 fas->f_readyf[slot] = sp->cmd_forw;
7934 7933 } else {
7935 7934 psp->cmd_forw = sp->cmd_forw;
7936 7935 }
7937 7936 if (fas->f_readyb[slot] == sp) {
7938 7937 fas->f_readyb[slot] = psp;
7939 7938 }
7940 7939 return (TRUE);
7941 7940 }
7942 7941 }
7943 7942 }
7944 7943 return (FALSE);
7945 7944 }
7946 7945
7947 7946 /*
7948 7947 * add cmd to to head of the readyQ
7949 7948 * due to tag allocation failure or preemption we have to return
7950 7949 * this cmd to the readyQ
7951 7950 */
7952 7951 static void
7953 7952 fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp)
7954 7953 {
7955 7954 /*
7956 7955 * never return a NOINTR pkt to the readyQ
7957 7956 * (fas_runpoll will resubmit)
7958 7957 */
7959 7958 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7960 7959 struct fas_cmd *dp;
7961 7960 int slot = sp->cmd_slot;
7962 7961
7963 7962 dp = fas->f_readyf[slot];
7964 7963 fas->f_readyf[slot] = sp;
7965 7964 sp->cmd_forw = dp;
7966 7965 if (fas->f_readyb[slot] == NULL) {
7967 7966 fas->f_readyb[slot] = sp;
7968 7967 }
7969 7968 }
7970 7969 }
7971 7970
7972 7971 /*
7973 7972 * flush cmds in ready queue
7974 7973 */
7975 7974 static void
7976 7975 fas_flush_readyQ(struct fas *fas, int slot)
7977 7976 {
7978 7977 if (fas->f_readyf[slot]) {
7979 7978 struct fas_cmd *sp, *nsp;
7980 7979
7981 7980 IPRINTF1("flushing ready queue, slot=%x\n", slot);
7982 7981 ASSERT(fas->f_ncmds > 0);
7983 7982
7984 7983 sp = fas->f_readyf[slot];
7985 7984 fas->f_readyf[slot] = fas->f_readyb[slot] = NULL;
7986 7985
7987 7986 while (sp != 0) {
7988 7987 /*
7989 7988 * save the forward pointer before calling
7990 7989 * the completion routine
7991 7990 */
7992 7991 nsp = sp->cmd_forw;
7993 7992 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
7994 7993 ASSERT(Tgt(sp) == slot/NLUNS_PER_TARGET);
7995 7994 fas_decrement_ncmds(fas, sp);
7996 7995 fas_call_pkt_comp(fas, sp);
7997 7996 sp = nsp;
7998 7997 }
7999 7998 fas_check_ncmds(fas);
8000 7999 }
8001 8000 }
8002 8001
8003 8002 /*
8004 8003 * cleanup the tag queue
8005 8004 * preserve some order by starting with the oldest tag
8006 8005 */
8007 8006 static void
8008 8007 fas_flush_tagQ(struct fas *fas, int slot)
8009 8008 {
8010 8009 ushort_t tag, starttag;
8011 8010 struct fas_cmd *sp;
8012 8011 struct f_slots *tagque = fas->f_active[slot];
8013 8012
8014 8013 if (tagque == NULL) {
8015 8014 return;
8016 8015 }
8017 8016
8018 8017 DPRINTF2("flushing entire tag queue, slot=%x, tcmds=%x\n",
8019 8018 slot, fas->f_tcmds[slot]);
8020 8019
8021 8020 #ifdef FASDEBUG
8022 8021 {
8023 8022 int n = 0;
8024 8023 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
8025 8024 if ((sp = tagque->f_slot[tag]) != 0) {
8026 8025 n++;
8027 8026 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8028 8027 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
8029 8028 if ((sp->cmd_flags & CFLAG_FINISHED) ==
8030 8029 0) {
8031 8030 debug_enter("fas_flush_tagQ");
8032 8031 }
8033 8032 }
8034 8033 }
8035 8034 }
8036 8035 ASSERT(fas->f_tcmds[slot] == n);
8037 8036 }
8038 8037 #endif
8039 8038 tag = starttag = fas->f_active[slot]->f_tags;
8040 8039
8041 8040 do {
8042 8041 if ((sp = tagque->f_slot[tag]) != 0) {
8043 8042 fas_flush_cmd(fas, sp, 0, 0);
8044 8043 }
8045 8044 tag = ((ushort_t)(tag + 1)) %
8046 8045 (ushort_t)fas->f_active[slot]->f_n_slots;
8047 8046 } while (tag != starttag);
8048 8047
8049 8048 ASSERT(fas->f_tcmds[slot] == 0);
8050 8049 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8051 8050 fas_check_ncmds(fas);
8052 8051 }
8053 8052
8054 8053 /*
8055 8054 * cleanup one active command
8056 8055 */
8057 8056 static void
8058 8057 fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
8059 8058 uint_t stat)
8060 8059 {
8061 8060 short slot = sp->cmd_slot;
8062 8061
8063 8062 ASSERT(fas->f_ncmds > 0);
8064 8063 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8065 8064 ASSERT(sp == fas->f_active[slot]->f_slot[sp->cmd_tag[1]]);
8066 8065
8067 8066 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
8068 8067 fas_decrement_ncmds(fas, sp);
8069 8068 fas_set_pkt_reason(fas, sp, reason, stat);
8070 8069 fas_call_pkt_comp(fas, sp);
8071 8070
8072 8071 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8073 8072 fas_check_ncmds(fas);
8074 8073 }
8075 8074
8076 8075 /*
8077 8076 * prepare a proxy cmd (a cmd sent on behalf of the target driver,
8078 8077 * usually for error recovery or abort/reset)
8079 8078 */
8080 8079 static void
8081 8080 fas_makeproxy_cmd(struct fas_cmd *sp, struct scsi_address *ap,
8082 8081 struct scsi_pkt *pkt, int nmsgs, ...)
8083 8082 {
8084 8083 va_list vap;
8085 8084 int i;
8086 8085
8087 8086 ASSERT(nmsgs <= (CDB_GROUP5 - CDB_GROUP0 - 3));
8088 8087
8089 8088 bzero(sp, sizeof (*sp));
8090 8089 bzero(pkt, scsi_pkt_size());
8091 8090
8092 8091 pkt->pkt_address = *ap;
8093 8092 pkt->pkt_cdbp = (opaque_t)&sp->cmd_cdb[0];
8094 8093 pkt->pkt_scbp = (opaque_t)&sp->cmd_scb;
8095 8094 pkt->pkt_ha_private = (opaque_t)sp;
8096 8095 sp->cmd_pkt = pkt;
8097 8096 sp->cmd_scblen = 1;
8098 8097 sp->cmd_pkt_flags = pkt->pkt_flags = FLAG_NOINTR;
8099 8098 sp->cmd_flags = CFLAG_CMDPROXY;
8100 8099 sp->cmd_cdb[FAS_PROXY_TYPE] = FAS_PROXY_SNDMSG;
8101 8100 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
8102 8101 sp->cmd_cdb[FAS_PROXY_DATA] = (char)nmsgs;
8103 8102
8104 8103 va_start(vap, nmsgs);
8105 8104 for (i = 0; i < nmsgs; i++) {
8106 8105 sp->cmd_cdb[FAS_PROXY_DATA + 1 + i] = (uchar_t)va_arg(vap, int);
8107 8106 }
8108 8107 va_end(vap);
8109 8108 }
8110 8109
8111 8110 /*
8112 8111 * send a proxy cmd and check the result
8113 8112 */
8114 8113 static int
8115 8114 fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
8116 8115 struct scsi_address *ap, char *what)
8117 8116 {
8118 8117 int rval;
8119 8118
8120 8119 IPRINTF3("Sending proxy %s message to %d.%d\n", what,
8121 8120 ap->a_target, ap->a_lun);
8122 8121 if (fas_accept_pkt(fas, sp, TRAN_BUSY_OK) == TRAN_ACCEPT &&
8123 8122 sp->cmd_pkt->pkt_reason == CMD_CMPLT &&
8124 8123 sp->cmd_cdb[FAS_PROXY_RESULT] == TRUE) {
8125 8124 IPRINTF3("Proxy %s succeeded for %d.%d\n", what,
8126 8125 ap->a_target, ap->a_lun);
8127 8126 ASSERT(fas->f_current_sp != sp);
8128 8127 rval = TRUE;
8129 8128 } else {
8130 8129 IPRINTF5(
8131 8130 "Proxy %s failed for %d.%d, result=%x, reason=%x\n", what,
8132 8131 ap->a_target, ap->a_lun, sp->cmd_cdb[FAS_PROXY_RESULT],
8133 8132 sp->cmd_pkt->pkt_reason);
8134 8133 ASSERT(fas->f_current_sp != sp);
8135 8134 rval = FALSE;
8136 8135 }
8137 8136 return (rval);
8138 8137 }
8139 8138
8140 8139 /*
8141 8140 * abort a connected command by sending an abort msg; hold off on
8142 8141 * starting new cmds by setting throttles to HOLD_THROTTLE
8143 8142 */
8144 8143 static int
8145 8144 fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t msg)
8146 8145 {
8147 8146 int rval = FALSE;
8148 8147 int flags = sp->cmd_pkt_flags;
8149 8148
8150 8149 /*
8151 8150 * if reset delay active we cannot access the target.
8152 8151 */
8153 8152 if (fas->f_reset_delay[Tgt(sp)]) {
8154 8153 return (rval);
8155 8154 }
8156 8155
8157 8156 /*
8158 8157 * only abort while in data phase; otherwise we mess up msg phase
8159 8158 */
8160 8159 if (!((fas->f_state == ACTS_DATA) ||
8161 8160 (fas->f_state == ACTS_DATA_DONE))) {
8162 8161 return (rval);
8163 8162 }
8164 8163
8165 8164
8166 8165 IPRINTF3("Sending abort message %s to connected %d.%d\n",
8167 8166 scsi_mname(msg), Tgt(sp), Lun(sp));
8168 8167
8169 8168
8170 8169 fas->f_abort_msg_sent = 0;
8171 8170 fas->f_omsglen = 1;
8172 8171 fas->f_cur_msgout[0] = msg;
8173 8172 sp->cmd_pkt_flags |= FLAG_NOINTR;
8174 8173 fas_assert_atn(fas);
8175 8174
8176 8175 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8177 8176
8178 8177 /*
8179 8178 * now check if the msg was taken
8180 8179 * e_abort is set in fas_handle_msg_out_done when the abort
8181 8180 * msg has actually gone out (ie. msg out phase occurred
8182 8181 */
8183 8182 if (fas->f_abort_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8184 8183 IPRINTF2("target %d.%d aborted\n",
8185 8184 Tgt(sp), Lun(sp));
8186 8185 rval = TRUE;
8187 8186 } else {
8188 8187 IPRINTF2("target %d.%d did not abort\n",
8189 8188 Tgt(sp), Lun(sp));
8190 8189 }
8191 8190 sp->cmd_pkt_flags = flags;
8192 8191 fas->f_omsglen = 0;
8193 8192 return (rval);
8194 8193 }
8195 8194
8196 8195 /*
8197 8196 * abort a disconnected command; if it is a tagged command, we need
8198 8197 * to include the tag
8199 8198 */
8200 8199 static int
8201 8200 fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
8202 8201 struct fas_cmd *sp, uchar_t msg, int slot)
8203 8202 {
8204 8203 auto struct fas_cmd local;
8205 8204 struct fas_cmd *proxy_cmdp = &local;
8206 8205 struct scsi_pkt *pkt;
8207 8206 int rval;
8208 8207 int target = ap->a_target;
8209 8208
8210 8209 /*
8211 8210 * if reset delay is active, we cannot start a selection
8212 8211 * and there shouldn't be a cmd outstanding
8213 8212 */
8214 8213 if (fas->f_reset_delay[target] != 0) {
8215 8214 return (FALSE);
8216 8215 }
8217 8216
8218 8217 if (sp)
8219 8218 ASSERT(sp->cmd_slot == slot);
8220 8219
8221 8220 IPRINTF1("aborting disconnected tagged cmd(s) with %s\n",
8222 8221 scsi_mname(msg));
8223 8222 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8224 8223 if (sp && (TAGGED(target) && (msg == MSG_ABORT_TAG))) {
8225 8224 int tag = sp->cmd_tag[1];
8226 8225 ASSERT(sp == fas->f_active[slot]->f_slot[tag]);
8227 8226 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 3,
8228 8227 MSG_SIMPLE_QTAG, tag, msg);
8229 8228 } else {
8230 8229 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 1, msg);
8231 8230 }
8232 8231
8233 8232 rval = fas_do_proxy_cmd(fas, proxy_cmdp, ap, scsi_mname(msg));
8234 8233 kmem_free(pkt, scsi_pkt_size());
8235 8234 return (rval);
8236 8235 }
8237 8236
8238 8237 /*
8239 8238 * reset handling:
8240 8239 * fas_do_scsi_reset assumes that we have already entered the mutex
8241 8240 */
8242 8241 static int
8243 8242 fas_do_scsi_reset(struct scsi_address *ap, int level)
8244 8243 {
8245 8244 int rval = FALSE;
8246 8245 struct fas *fas = ADDR2FAS(ap);
8247 8246 short slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
8248 8247
8249 8248 ASSERT(mutex_owned(FAS_MUTEX(fas)));
8250 8249 IPRINTF3("fas_scsi_reset for slot %x, level=%x, tcmds=%x\n",
8251 8250 slot, level, fas->f_tcmds[slot]);
8252 8251
8253 8252 fas_move_waitQ_to_readyQ(fas);
8254 8253
8255 8254 if (level == RESET_ALL) {
8256 8255 /*
8257 8256 * We know that fas_reset_bus() returns ACTION_RETURN.
8258 8257 */
8259 8258 (void) fas_reset_bus(fas);
8260 8259
8261 8260 /*
8262 8261 * Now call fas_dopoll() to field the reset interrupt
8263 8262 * which will then call fas_reset_recovery which will
8264 8263 * call the completion function for all commands.
8265 8264 */
8266 8265 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8267 8266 /*
8268 8267 * reset fas
8269 8268 */
8270 8269 fas_internal_reset(fas, FAS_RESET_FAS);
8271 8270 (void) fas_reset_bus(fas);
8272 8271 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8273 8272 fas_log(fas,
8274 8273 CE_WARN, "reset scsi bus failed");
8275 8274 New_state(fas, STATE_FREE);
8276 8275 } else {
8277 8276 rval = TRUE;
8278 8277 }
8279 8278 } else {
8280 8279 rval = TRUE;
8281 8280 }
8282 8281
8283 8282 } else {
8284 8283 struct fas_cmd *cur_sp = fas->f_current_sp;
8285 8284 void (*savec)() = NULL;
8286 8285
8287 8286 /*
8288 8287 * prevent new commands from starting
8289 8288 */
8290 8289 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
8291 8290
8292 8291 /*
8293 8292 * zero pkt_comp so it won't complete during the reset and
8294 8293 * we can still update the packet after the reset.
8295 8294 */
8296 8295 if (cur_sp) {
8297 8296 savec = cur_sp->cmd_pkt->pkt_comp;
8298 8297 cur_sp->cmd_pkt->pkt_comp = NULL;
8299 8298 }
8300 8299
8301 8300 /*
8302 8301 * is this a connected cmd but not selecting?
8303 8302 */
8304 8303 if (cur_sp && (fas->f_state != STATE_FREE) &&
8305 8304 (cur_sp->cmd_pkt->pkt_state != 0) &&
8306 8305 (ap->a_target == (Tgt(cur_sp)))) {
8307 8306 rval = fas_reset_connected_cmd(fas, ap);
8308 8307 }
8309 8308
8310 8309 /*
8311 8310 * if not connected or fas_reset_connected_cmd() failed,
8312 8311 * attempt a reset_disconnected_cmd
8313 8312 */
8314 8313 if (rval == FALSE) {
8315 8314 rval = fas_reset_disconnected_cmd(fas, ap);
8316 8315 }
8317 8316
8318 8317 /*
8319 8318 * cleanup if reset was successful
8320 8319 * complete the current sp first.
8321 8320 */
8322 8321 if (cur_sp) {
8323 8322 cur_sp->cmd_pkt->pkt_comp = savec;
8324 8323 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
8325 8324 if (ap->a_target == (Tgt(cur_sp))) {
8326 8325 fas_set_pkt_reason(fas, cur_sp,
8327 8326 CMD_RESET, STAT_DEV_RESET);
8328 8327 }
8329 8328 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
8330 8329 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
8331 8330 fas_decrement_ncmds(fas, cur_sp);
8332 8331 fas_call_pkt_comp(fas, cur_sp);
8333 8332 }
8334 8333 }
8335 8334
8336 8335 if (rval == TRUE) {
8337 8336 fas_reset_cleanup(fas, slot);
8338 8337 } else {
8339 8338 IPRINTF1("fas_scsi_reset failed for slot %x\n", slot);
8340 8339
8341 8340 /*
8342 8341 * restore throttles to max throttle, regardless
8343 8342 * of what it was (fas_set_throttles() will deal
8344 8343 * with reset delay active)
8345 8344 * restoring to the old throttle is not
8346 8345 * a such a good idea
8347 8346 */
8348 8347 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
8349 8348
8350 8349 }
8351 8350
8352 8351 if (fas->f_state == STATE_FREE) {
8353 8352 (void) fas_ustart(fas);
8354 8353 }
8355 8354 }
8356 8355 exit:
8357 8356 ASSERT(mutex_owned(FAS_MUTEX(fas)));
8358 8357 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8359 8358
8360 8359 #ifdef FASDEBUG
8361 8360 if (rval && fas_test_stop) {
8362 8361 debug_enter("reset succeeded");
8363 8362 }
8364 8363 #endif
8365 8364 return (rval);
8366 8365 }
8367 8366
8368 8367 /*
8369 8368 * reset delay is handled by a separate watchdog; this ensures that
8370 8369 * regardless of fas_scsi_watchdog_tick, the reset delay will not change
8371 8370 */
8372 8371 static void
8373 8372 fas_start_watch_reset_delay(struct fas *fas)
8374 8373 {
8375 8374 mutex_enter(&fas_global_mutex);
8376 8375 if ((fas_reset_watch == 0) && FAS_CAN_SCHED) {
8377 8376 fas_reset_watch = timeout(fas_watch_reset_delay, NULL,
8378 8377 drv_usectohz((clock_t)FAS_WATCH_RESET_DELAY_TICK * 1000));
8379 8378 }
8380 8379 ASSERT((fas_reset_watch != 0) || (fas->f_flags & FAS_FLG_NOTIMEOUTS));
8381 8380 mutex_exit(&fas_global_mutex);
8382 8381 }
8383 8382
8384 8383 /*
8385 8384 * set throttles to HOLD and set reset_delay for all target/luns
8386 8385 */
8387 8386 static void
8388 8387 fas_setup_reset_delay(struct fas *fas)
8389 8388 {
8390 8389 if (!ddi_in_panic()) {
8391 8390 int i;
8392 8391
8393 8392 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
8394 8393 for (i = 0; i < NTARGETS_WIDE; i++) {
8395 8394 fas->f_reset_delay[i] = fas->f_scsi_reset_delay;
8396 8395 }
8397 8396 fas_start_watch_reset_delay(fas);
8398 8397 } else {
8399 8398 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8400 8399 }
8401 8400 }
8402 8401
8403 8402 /*
8404 8403 * fas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8405 8404 * fas instance for active reset delays
8406 8405 */
8407 8406 /*ARGSUSED*/
8408 8407 static void
8409 8408 fas_watch_reset_delay(void *arg)
8410 8409 {
8411 8410 struct fas *fas;
8412 8411 struct fas *lfas; /* last not_done fas */
8413 8412 int not_done = 0;
8414 8413
8415 8414 mutex_enter(&fas_global_mutex);
8416 8415 fas_reset_watch = 0;
8417 8416 mutex_exit(&fas_global_mutex);
8418 8417
8419 8418 rw_enter(&fas_global_rwlock, RW_READER);
8420 8419 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
8421 8420 if (fas->f_tran == 0) {
8422 8421 continue;
8423 8422 }
8424 8423 mutex_enter(FAS_MUTEX(fas));
8425 8424 not_done += fas_watch_reset_delay_subr(fas);
8426 8425 lfas = fas;
8427 8426 fas_check_waitQ_and_mutex_exit(fas);
8428 8427 }
8429 8428 rw_exit(&fas_global_rwlock);
8430 8429 if (not_done) {
8431 8430 ASSERT(lfas != NULL);
8432 8431 fas_start_watch_reset_delay(lfas);
8433 8432 }
8434 8433 }
8435 8434
8436 8435 static int
8437 8436 fas_watch_reset_delay_subr(struct fas *fas)
8438 8437 {
8439 8438 short slot, s;
8440 8439 int start_slot = -1;
8441 8440 int done = 0;
8442 8441
8443 8442 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
8444 8443
8445 8444 /*
8446 8445 * check if a reset delay is active; if so back to full throttle
8447 8446 * which will unleash the cmds in the ready Q
8448 8447 */
8449 8448 s = slot/NLUNS_PER_TARGET;
8450 8449 if (fas->f_reset_delay[s] != 0) {
8451 8450 EPRINTF2("target%d: reset delay=%d\n", s,
8452 8451 fas->f_reset_delay[s]);
8453 8452 fas->f_reset_delay[s] -= FAS_WATCH_RESET_DELAY_TICK;
8454 8453 if (fas->f_reset_delay[s] <= 0) {
8455 8454 /*
8456 8455 * clear throttle for all luns on this target
8457 8456 */
8458 8457 fas->f_reset_delay[s] = 0;
8459 8458 fas_set_all_lun_throttles(fas,
8460 8459 slot, MAX_THROTTLE);
8461 8460 IPRINTF1("reset delay completed, slot=%x\n",
8462 8461 slot);
8463 8462 if (start_slot == -1) {
8464 8463 start_slot = slot;
8465 8464 }
8466 8465 } else {
8467 8466 done = -1;
8468 8467 }
8469 8468 }
8470 8469 }
8471 8470
8472 8471 /*
8473 8472 * start a cmd if a reset delay expired
8474 8473 */
8475 8474 if (start_slot != -1 && fas->f_state == STATE_FREE) {
8476 8475 (void) fas_ustart(fas);
8477 8476 }
8478 8477 return (done);
8479 8478 }
8480 8479
8481 8480 /*
8482 8481 * cleanup after a device reset. this affects all target's luns
8483 8482 */
8484 8483 static void
8485 8484 fas_reset_cleanup(struct fas *fas, int slot)
8486 8485 {
8487 8486 /*
8488 8487 * reset msg has been accepted, now cleanup queues;
8489 8488 * for all luns of this target
8490 8489 */
8491 8490 int i, start, end;
8492 8491 int target = slot/NLUNS_PER_TARGET;
8493 8492
8494 8493 start = slot & ~(NLUNS_PER_TARGET-1);
8495 8494 end = start + NLUNS_PER_TARGET;
8496 8495 IPRINTF4("fas_reset_cleanup: slot %x, start=%x, end=%x, tcmds=%x\n",
8497 8496 slot, start, end, fas->f_tcmds[slot]);
8498 8497
8499 8498 ASSERT(!(fas->f_current_sp &&
8500 8499 (fas->f_current_sp->cmd_slot == slot) &&
8501 8500 (fas->f_state & STATE_SELECTING)));
8502 8501
8503 8502 /*
8504 8503 * if we are not in panic set up a reset delay for this target,
8505 8504 * a zero throttle forces all new requests into the ready Q
8506 8505 */
8507 8506 if (!ddi_in_panic()) {
8508 8507 fas_set_all_lun_throttles(fas, start, HOLD_THROTTLE);
8509 8508 fas->f_reset_delay[target] = fas->f_scsi_reset_delay;
8510 8509 fas_start_watch_reset_delay(fas);
8511 8510 } else {
8512 8511 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8513 8512 }
8514 8513
8515 8514 for (i = start; i < end; i++) {
8516 8515 fas_mark_packets(fas, i, CMD_RESET, STAT_DEV_RESET);
8517 8516 fas_flush_tagQ(fas, i);
8518 8517 fas_flush_readyQ(fas, i);
8519 8518 if (fas->f_arq_pkt[i]) {
8520 8519 struct fas_cmd *sp = fas->f_arq_pkt[i];
8521 8520 struct arq_private_data *arq_data =
8522 8521 (struct arq_private_data *)
8523 8522 (sp->cmd_pkt->pkt_private);
8524 8523 if (sp->cmd_pkt->pkt_comp) {
8525 8524 ASSERT(arq_data->arq_save_sp == NULL);
8526 8525 }
8527 8526 }
8528 8527 ASSERT(fas->f_tcmds[i] == 0);
8529 8528 }
8530 8529 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8531 8530
8532 8531 fas_force_renegotiation(fas, target);
8533 8532 }
8534 8533
8535 8534 /*
8536 8535 * reset a currently disconnected target
8537 8536 */
8538 8537 static int
8539 8538 fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap)
8540 8539 {
8541 8540 auto struct fas_cmd local;
8542 8541 struct fas_cmd *sp = &local;
8543 8542 struct scsi_pkt *pkt;
8544 8543 int rval;
8545 8544
8546 8545 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8547 8546 fas_makeproxy_cmd(sp, ap, pkt, 1, MSG_DEVICE_RESET);
8548 8547 rval = fas_do_proxy_cmd(fas, sp, ap, scsi_mname(MSG_DEVICE_RESET));
8549 8548 kmem_free(pkt, scsi_pkt_size());
8550 8549 return (rval);
8551 8550 }
8552 8551
8553 8552 /*
8554 8553 * reset a target with a currently connected command
8555 8554 * Assert ATN and send MSG_DEVICE_RESET, zero throttles temporarily
8556 8555 * to prevent new cmds from starting regardless of the outcome
8557 8556 */
8558 8557 static int
8559 8558 fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap)
8560 8559 {
8561 8560 int rval = FALSE;
8562 8561 struct fas_cmd *sp = fas->f_current_sp;
8563 8562 int flags = sp->cmd_pkt_flags;
8564 8563
8565 8564 /*
8566 8565 * only attempt to reset in data phase; during other phases
8567 8566 * asserting ATN may just cause confusion
8568 8567 */
8569 8568 if (!((fas->f_state == ACTS_DATA) ||
8570 8569 (fas->f_state == ACTS_DATA_DONE))) {
8571 8570 return (rval);
8572 8571 }
8573 8572
8574 8573 IPRINTF2("Sending reset message to connected %d.%d\n",
8575 8574 ap->a_target, ap->a_lun);
8576 8575 fas->f_reset_msg_sent = 0;
8577 8576 fas->f_omsglen = 1;
8578 8577 fas->f_cur_msgout[0] = MSG_DEVICE_RESET;
8579 8578 sp->cmd_pkt_flags |= FLAG_NOINTR;
8580 8579
8581 8580 fas_assert_atn(fas);
8582 8581
8583 8582 /*
8584 8583 * poll for interrupts until bus free
8585 8584 */
8586 8585 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8587 8586
8588 8587 /*
8589 8588 * now check if the msg was taken
8590 8589 * f_reset is set in fas_handle_msg_out_done when
8591 8590 * msg has actually gone out (ie. msg out phase occurred)
8592 8591 */
8593 8592 if (fas->f_reset_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8594 8593 IPRINTF2("target %d.%d reset\n", ap->a_target, ap->a_lun);
8595 8594 rval = TRUE;
8596 8595 } else {
8597 8596 IPRINTF2("target %d.%d did not reset\n",
8598 8597 ap->a_target, ap->a_lun);
8599 8598 }
8600 8599 sp->cmd_pkt_flags = flags;
8601 8600 fas->f_omsglen = 0;
8602 8601
8603 8602 return (rval);
8604 8603 }
8605 8604
8606 8605 /*
8607 8606 * reset the scsi bus to blow all commands away
8608 8607 */
8609 8608 static int
8610 8609 fas_reset_bus(struct fas *fas)
8611 8610 {
8612 8611 IPRINTF("fas_reset_bus:\n");
8613 8612 New_state(fas, ACTS_RESET);
8614 8613
8615 8614 fas_internal_reset(fas, FAS_RESET_SCSIBUS);
8616 8615
8617 8616 /*
8618 8617 * Now that we've reset the SCSI bus, we'll take a SCSI RESET
8619 8618 * interrupt and use that to clean up the state of things.
8620 8619 */
8621 8620 return (ACTION_RETURN);
8622 8621 }
8623 8622
8624 8623 /*
8625 8624 * fas_reset_recovery is called on the reset interrupt and cleans
8626 8625 * up all cmds (active or waiting)
8627 8626 */
8628 8627 static int
8629 8628 fas_reset_recovery(struct fas *fas)
8630 8629 {
8631 8630 short slot, start_slot;
8632 8631 int i;
8633 8632 int rval = ACTION_SEARCH;
8634 8633 int max_loop = 0;
8635 8634
8636 8635 IPRINTF("fas_reset_recovery:\n");
8637 8636 fas_check_ncmds(fas);
8638 8637
8639 8638 /*
8640 8639 * renegotiate wide and sync for all targets
8641 8640 */
8642 8641 fas->f_sync_known = fas->f_wide_known = 0;
8643 8642
8644 8643 /*
8645 8644 * reset dma engine
8646 8645 */
8647 8646 FAS_FLUSH_DMA_HARD(fas);
8648 8647
8649 8648 /*
8650 8649 * set throttles and reset delay
8651 8650 */
8652 8651 fas_setup_reset_delay(fas);
8653 8652
8654 8653 /*
8655 8654 * clear interrupts until they go away
8656 8655 */
8657 8656 while (INTPENDING(fas) && (max_loop < FAS_RESET_SPIN_MAX_LOOP)) {
8658 8657 volatile struct fasreg *fasreg = fas->f_reg;
8659 8658 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
8660 8659 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
8661 8660 fas->f_step = fas_reg_read(fas, &fasreg->fas_step);
8662 8661 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
8663 8662 drv_usecwait(FAS_RESET_SPIN_DELAY_USEC);
8664 8663 max_loop++;
8665 8664 }
8666 8665
8667 8666 if (max_loop >= FAS_RESET_SPIN_MAX_LOOP) {
8668 8667 fas_log(fas, CE_WARN, "Resetting SCSI bus failed");
8669 8668 }
8670 8669
8671 8670 fas_reg_cmd_write(fas, CMD_FLUSH);
8672 8671
8673 8672 /*
8674 8673 * reset the chip, this shouldn't be necessary but sometimes
8675 8674 * we get a hang in the next data in phase
8676 8675 */
8677 8676 fas_internal_reset(fas, FAS_RESET_FAS);
8678 8677
8679 8678 /*
8680 8679 * reset was expected? if not, it must be external bus reset
8681 8680 */
8682 8681 if (fas->f_state != ACTS_RESET) {
8683 8682 if (fas->f_ncmds) {
8684 8683 fas_log(fas, CE_WARN, "external SCSI bus reset");
8685 8684 }
8686 8685 }
8687 8686
8688 8687 if (fas->f_ncmds == 0) {
8689 8688 rval = ACTION_RETURN;
8690 8689 goto done;
8691 8690 }
8692 8691
8693 8692 /*
8694 8693 * completely reset the state of the softc data.
8695 8694 */
8696 8695 fas_internal_reset(fas, FAS_RESET_SOFTC);
8697 8696
8698 8697 /*
8699 8698 * Hold the state of the host adapter open
8700 8699 */
8701 8700 New_state(fas, ACTS_FROZEN);
8702 8701
8703 8702 /*
8704 8703 * for right now just claim that all
8705 8704 * commands have been destroyed by a SCSI reset
8706 8705 * and let already set reason fields or callers
8707 8706 * decide otherwise for specific commands.
8708 8707 */
8709 8708 start_slot = fas->f_next_slot;
8710 8709 slot = start_slot;
8711 8710 do {
8712 8711 fas_check_ncmds(fas);
8713 8712 fas_mark_packets(fas, slot, CMD_RESET, STAT_BUS_RESET);
8714 8713 fas_flush_tagQ(fas, slot);
8715 8714 fas_flush_readyQ(fas, slot);
8716 8715 if (fas->f_arq_pkt[slot]) {
8717 8716 struct fas_cmd *sp = fas->f_arq_pkt[slot];
8718 8717 struct arq_private_data *arq_data =
8719 8718 (struct arq_private_data *)
8720 8719 (sp->cmd_pkt->pkt_private);
8721 8720 if (sp->cmd_pkt->pkt_comp) {
8722 8721 ASSERT(arq_data->arq_save_sp == NULL);
8723 8722 }
8724 8723 }
8725 8724 slot = NEXTSLOT(slot, fas->f_dslot);
8726 8725 } while (slot != start_slot);
8727 8726
8728 8727 fas_check_ncmds(fas);
8729 8728
8730 8729 /*
8731 8730 * reset timeouts
8732 8731 */
8733 8732 for (i = 0; i < N_SLOTS; i++) {
8734 8733 if (fas->f_active[i]) {
8735 8734 fas->f_active[i]->f_timebase = 0;
8736 8735 fas->f_active[i]->f_timeout = 0;
8737 8736 fas->f_active[i]->f_dups = 0;
8738 8737 }
8739 8738 }
8740 8739
8741 8740 done:
8742 8741 /*
8743 8742 * Move the state back to free...
8744 8743 */
8745 8744 New_state(fas, STATE_FREE);
8746 8745 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8747 8746
8748 8747 /*
8749 8748 * perform the reset notification callbacks that are registered.
8750 8749 */
8751 8750 (void) scsi_hba_reset_notify_callback(&fas->f_mutex,
8752 8751 &fas->f_reset_notify_listf);
8753 8752
8754 8753 /*
8755 8754 * if reset delay is still active a search is meaningless
8756 8755 * but do it anyway
8757 8756 */
8758 8757 return (rval);
8759 8758 }
8760 8759
8761 8760 /*
8762 8761 * hba_tran ops for quiesce and unquiesce
8763 8762 */
8764 8763 static int
8765 8764 fas_scsi_quiesce(dev_info_t *dip)
8766 8765 {
8767 8766 struct fas *fas;
8768 8767 scsi_hba_tran_t *tran;
8769 8768
8770 8769 tran = ddi_get_driver_private(dip);
8771 8770 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8772 8771 return (-1);
8773 8772 }
8774 8773
8775 8774 return (fas_quiesce_bus(fas));
8776 8775 }
8777 8776
8778 8777 static int
8779 8778 fas_scsi_unquiesce(dev_info_t *dip)
8780 8779 {
8781 8780 struct fas *fas;
8782 8781 scsi_hba_tran_t *tran;
8783 8782
8784 8783 tran = ddi_get_driver_private(dip);
8785 8784 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8786 8785 return (-1);
8787 8786 }
8788 8787
8789 8788 return (fas_unquiesce_bus(fas));
8790 8789 }
8791 8790
8792 8791 #ifdef FAS_TEST
8793 8792 /*
8794 8793 * torture test functions
8795 8794 */
8796 8795 static void
8797 8796 fas_test_reset(struct fas *fas, int slot)
8798 8797 {
8799 8798 struct scsi_address ap;
8800 8799 char target = slot/NLUNS_PER_TARGET;
8801 8800
8802 8801 if (fas_rtest & (1 << target)) {
8803 8802 ap.a_hba_tran = fas->f_tran;
8804 8803 ap.a_target = target;
8805 8804 ap.a_lun = 0;
8806 8805 if ((fas_rtest_type == 1) &&
8807 8806 (fas->f_state == ACTS_DATA_DONE)) {
8808 8807 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8809 8808 fas_rtest = 0;
8810 8809 }
8811 8810 } else if ((fas_rtest_type == 2) &&
8812 8811 (fas->f_state == ACTS_DATA_DONE)) {
8813 8812 if (fas_do_scsi_reset(&ap, RESET_ALL)) {
8814 8813 fas_rtest = 0;
8815 8814 }
8816 8815 } else {
8817 8816 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8818 8817 fas_rtest = 0;
8819 8818 }
8820 8819 }
8821 8820 }
8822 8821 }
8823 8822
8824 8823 static void
8825 8824 fas_test_abort(struct fas *fas, int slot)
8826 8825 {
8827 8826 struct fas_cmd *sp = fas->f_current_sp;
8828 8827 struct scsi_address ap;
8829 8828 char target = slot/NLUNS_PER_TARGET;
8830 8829 struct scsi_pkt *pkt = NULL;
8831 8830
8832 8831 if (fas_atest & (1 << target)) {
8833 8832 ap.a_hba_tran = fas->f_tran;
8834 8833 ap.a_target = target;
8835 8834 ap.a_lun = 0;
8836 8835
8837 8836 if ((fas_atest_disc == 0) && sp &&
8838 8837 (sp->cmd_slot == slot) &&
8839 8838 ((sp->cmd_flags & CFLAG_CMDDISC) == 0)) {
8840 8839 pkt = sp->cmd_pkt;
8841 8840 } else if ((fas_atest_disc == 1) && NOTAG(target)) {
8842 8841 sp = fas->f_active[slot]->f_slot[0];
8843 8842 if (sp && (sp->cmd_flags & CFLAG_CMDDISC)) {
8844 8843 pkt = sp->cmd_pkt;
8845 8844 }
8846 8845 } else if ((fas_atest_disc == 1) && (sp == 0) &&
8847 8846 TAGGED(target) &&
8848 8847 (fas->f_tcmds[slot] != 0)) {
8849 8848 int tag;
8850 8849 /*
8851 8850 * find the oldest tag
8852 8851 */
8853 8852 for (tag = NTAGS-1; tag >= 0; tag--) {
8854 8853 if ((sp = fas->f_active[slot]->f_slot[tag])
8855 8854 != 0)
8856 8855 break;
8857 8856 }
8858 8857 if (sp) {
8859 8858 pkt = sp->cmd_pkt;
8860 8859 ASSERT(sp->cmd_slot == slot);
8861 8860 } else {
8862 8861 return;
8863 8862 }
8864 8863 } else if (fas_atest_disc == 2 && (sp == 0) &&
8865 8864 (fas->f_tcmds[slot] != 0)) {
8866 8865 pkt = NULL;
8867 8866 } else if (fas_atest_disc == 2 && NOTAG(target)) {
8868 8867 pkt = NULL;
8869 8868 } else if (fas_atest_disc == 3 && fas->f_readyf[slot]) {
8870 8869 pkt = fas->f_readyf[slot]->cmd_pkt;
8871 8870 } else if (fas_atest_disc == 4 &&
8872 8871 fas->f_readyf[slot] && fas->f_readyf[slot]->cmd_forw) {
8873 8872 pkt = fas->f_readyf[slot]->cmd_forw->cmd_pkt;
8874 8873 } else if (fas_atest_disc == 5 && fas->f_readyb[slot]) {
8875 8874 pkt = fas->f_readyb[slot]->cmd_pkt;
8876 8875 } else if ((fas_atest_disc == 6) && sp &&
8877 8876 (sp->cmd_slot == slot) &&
8878 8877 (fas->f_state == ACTS_DATA_DONE)) {
8879 8878 pkt = sp->cmd_pkt;
8880 8879 } else if (fas_atest_disc == 7) {
8881 8880 if (fas_do_scsi_abort(&ap, NULL)) {
8882 8881 if (fas_do_scsi_abort(&ap, NULL)) {
8883 8882 if (fas_do_scsi_reset(&ap,
8884 8883 RESET_TARGET)) {
8885 8884 fas_atest = 0;
8886 8885 }
8887 8886 }
8888 8887 }
8889 8888 return;
8890 8889 } else {
8891 8890 return;
8892 8891 }
8893 8892
8894 8893 fas_log(fas, CE_NOTE, "aborting pkt=0x%p state=%x\n",
8895 8894 (void *)pkt, (pkt != NULL? pkt->pkt_state : 0));
8896 8895 if (fas_do_scsi_abort(&ap, pkt)) {
8897 8896 fas_atest = 0;
8898 8897 }
8899 8898 }
8900 8899 }
8901 8900 #endif /* FAS_TEST */
8902 8901
8903 8902 /*
8904 8903 * capability interface
8905 8904 */
8906 8905 static int
8907 8906 fas_commoncap(struct scsi_address *ap, char *cap, int val,
8908 8907 int tgtonly, int doset)
8909 8908 {
8910 8909 struct fas *fas = ADDR2FAS(ap);
8911 8910 int cidx;
8912 8911 int target = ap->a_target;
8913 8912 ushort_t tshift = (1<<target);
8914 8913 ushort_t ntshift = ~tshift;
8915 8914 int rval = FALSE;
8916 8915
8917 8916 mutex_enter(FAS_MUTEX(fas));
8918 8917
8919 8918 if (cap == (char *)0) {
8920 8919 goto exit;
8921 8920 }
8922 8921
8923 8922 cidx = scsi_hba_lookup_capstr(cap);
8924 8923 if (cidx == -1) {
8925 8924 rval = UNDEFINED;
8926 8925 } else if (doset) {
8927 8926 /*
8928 8927 * we usually don't allow setting capabilities for
8929 8928 * other targets!
8930 8929 */
8931 8930 if (!tgtonly) {
8932 8931 goto exit;
8933 8932 }
8934 8933 switch (cidx) {
8935 8934 case SCSI_CAP_DMA_MAX:
8936 8935 case SCSI_CAP_MSG_OUT:
8937 8936 case SCSI_CAP_PARITY:
8938 8937 case SCSI_CAP_INITIATOR_ID:
8939 8938 case SCSI_CAP_LINKED_CMDS:
8940 8939 case SCSI_CAP_UNTAGGED_QING:
8941 8940 case SCSI_CAP_RESET_NOTIFICATION:
8942 8941 /*
8943 8942 * None of these are settable via
8944 8943 * the capability interface.
8945 8944 */
8946 8945 break;
8947 8946
8948 8947 case SCSI_CAP_DISCONNECT:
8949 8948 if (val)
8950 8949 fas->f_target_scsi_options[ap->a_target] |=
8951 8950 SCSI_OPTIONS_DR;
8952 8951 else
8953 8952 fas->f_target_scsi_options[ap->a_target] &=
8954 8953 ~SCSI_OPTIONS_DR;
8955 8954
8956 8955 break;
8957 8956
8958 8957 case SCSI_CAP_SYNCHRONOUS:
8959 8958 if (val) {
8960 8959 fas->f_force_async &= ~tshift;
8961 8960 } else {
8962 8961 fas->f_force_async |= tshift;
8963 8962 }
8964 8963 fas_force_renegotiation(fas, target);
8965 8964 rval = TRUE;
8966 8965 break;
8967 8966
8968 8967 case SCSI_CAP_TAGGED_QING:
8969 8968 {
8970 8969 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
8971 8970 ushort_t old_notag = fas->f_notag;
8972 8971
8973 8972 /* do not allow with active tgt */
8974 8973 if (fas->f_tcmds[slot]) {
8975 8974 break;
8976 8975 }
8977 8976
8978 8977 slot = target * NLUNS_PER_TARGET | ap->a_lun;
8979 8978
8980 8979 if (val) {
8981 8980 if (fas->f_target_scsi_options[target] &
8982 8981 SCSI_OPTIONS_TAG) {
8983 8982 IPRINTF1("target %d: TQ enabled\n",
8984 8983 target);
8985 8984 fas->f_notag &= ntshift;
8986 8985 } else {
8987 8986 break;
8988 8987 }
8989 8988 } else {
8990 8989 IPRINTF1("target %d: TQ disabled\n",
8991 8990 target);
8992 8991 fas->f_notag |= tshift;
8993 8992 }
8994 8993
8995 8994 if (val && fas_alloc_active_slots(fas, slot,
8996 8995 KM_NOSLEEP)) {
8997 8996 fas->f_notag = old_notag;
8998 8997 break;
8999 8998 }
9000 8999
9001 9000 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
9002 9001
9003 9002 fas_update_props(fas, target);
9004 9003 rval = TRUE;
9005 9004 break;
9006 9005 }
9007 9006
9008 9007 case SCSI_CAP_WIDE_XFER:
9009 9008 if (val) {
9010 9009 if (fas->f_target_scsi_options[target] &
9011 9010 SCSI_OPTIONS_WIDE) {
9012 9011 fas->f_nowide &= ntshift;
9013 9012 fas->f_force_narrow &= ~tshift;
9014 9013 } else {
9015 9014 break;
9016 9015 }
9017 9016 } else {
9018 9017 fas->f_force_narrow |= tshift;
9019 9018 }
9020 9019 fas_force_renegotiation(fas, target);
9021 9020 rval = TRUE;
9022 9021 break;
9023 9022
9024 9023 case SCSI_CAP_ARQ:
9025 9024 if (val) {
9026 9025 if (fas_create_arq_pkt(fas, ap)) {
9027 9026 break;
9028 9027 }
9029 9028 } else {
9030 9029 if (fas_delete_arq_pkt(fas, ap)) {
9031 9030 break;
9032 9031 }
9033 9032 }
9034 9033 rval = TRUE;
9035 9034 break;
9036 9035
9037 9036 case SCSI_CAP_QFULL_RETRIES:
9038 9037 fas->f_qfull_retries[target] = (uchar_t)val;
9039 9038 rval = TRUE;
9040 9039 break;
9041 9040
9042 9041 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9043 9042 fas->f_qfull_retry_interval[target] =
9044 9043 drv_usectohz(val * 1000);
9045 9044 rval = TRUE;
9046 9045 break;
9047 9046
9048 9047 default:
9049 9048 rval = UNDEFINED;
9050 9049 break;
9051 9050 }
9052 9051
9053 9052 } else if (doset == 0) {
9054 9053 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
9055 9054
9056 9055 switch (cidx) {
9057 9056 case SCSI_CAP_DMA_MAX:
9058 9057 /* very high limit because of multiple dma windows */
9059 9058 rval = 1<<30;
9060 9059 break;
9061 9060 case SCSI_CAP_MSG_OUT:
9062 9061 rval = TRUE;
9063 9062 break;
9064 9063 case SCSI_CAP_DISCONNECT:
9065 9064 if (tgtonly &&
9066 9065 (fas->f_target_scsi_options[target] &
9067 9066 SCSI_OPTIONS_DR)) {
9068 9067 rval = TRUE;
9069 9068 }
9070 9069 break;
9071 9070 case SCSI_CAP_SYNCHRONOUS:
9072 9071 if (tgtonly && fas->f_offset[target]) {
9073 9072 rval = TRUE;
9074 9073 }
9075 9074 break;
9076 9075 case SCSI_CAP_PARITY:
9077 9076 rval = TRUE;
9078 9077 break;
9079 9078 case SCSI_CAP_INITIATOR_ID:
9080 9079 rval = MY_ID(fas);
9081 9080 break;
9082 9081 case SCSI_CAP_TAGGED_QING:
9083 9082 if (tgtonly && ((fas->f_notag & tshift) == 0)) {
9084 9083 rval = TRUE;
9085 9084 }
9086 9085 break;
9087 9086 case SCSI_CAP_WIDE_XFER:
9088 9087 if ((tgtonly && (fas->f_nowide & tshift) == 0)) {
9089 9088 rval = TRUE;
9090 9089 }
9091 9090 break;
9092 9091 case SCSI_CAP_UNTAGGED_QING:
9093 9092 rval = TRUE;
9094 9093 break;
9095 9094 case SCSI_CAP_ARQ:
9096 9095 if (tgtonly && fas->f_arq_pkt[slot]) {
9097 9096 rval = TRUE;
9098 9097 }
9099 9098 break;
9100 9099 case SCSI_CAP_LINKED_CMDS:
9101 9100 break;
9102 9101 case SCSI_CAP_RESET_NOTIFICATION:
9103 9102 rval = TRUE;
9104 9103 break;
9105 9104 case SCSI_CAP_QFULL_RETRIES:
9106 9105 rval = fas->f_qfull_retries[target];
9107 9106 break;
9108 9107 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9109 9108 rval = drv_hztousec(
9110 9109 fas->f_qfull_retry_interval[target]) /
9111 9110 1000;
9112 9111 break;
9113 9112
9114 9113 default:
9115 9114 rval = UNDEFINED;
9116 9115 break;
9117 9116 }
9118 9117 }
9119 9118 exit:
9120 9119 if (val && tgtonly) {
9121 9120 fas_update_props(fas, target);
9122 9121 }
9123 9122 fas_check_waitQ_and_mutex_exit(fas);
9124 9123
9125 9124 if (doset) {
9126 9125 IPRINTF6(
9127 9126 "fas_commoncap:tgt=%x,cap=%s,tgtonly=%x,doset=%x,val=%x,rval=%x\n",
9128 9127 target, cap, tgtonly, doset, val, rval);
9129 9128 }
9130 9129 return (rval);
9131 9130 }
9132 9131
9133 9132 /*
9134 9133 * property management
9135 9134 * fas_update_props:
9136 9135 * create/update sync/wide/TQ/scsi-options properties for this target
9137 9136 */
9138 9137 static void
9139 9138 fas_update_props(struct fas *fas, int tgt)
9140 9139 {
9141 9140 char property[32];
9142 9141 uint_t xfer_speed = 0;
9143 9142 uint_t xfer_rate = 0;
9144 9143 int wide_enabled, tq_enabled;
9145 9144 uint_t regval = fas->f_sync_period[tgt];
9146 9145 int offset = fas->f_offset[tgt];
9147 9146
9148 9147 wide_enabled = ((fas->f_nowide & (1<<tgt)) == 0);
9149 9148 if (offset && regval) {
9150 9149 xfer_speed =
9151 9150 FAS_SYNC_KBPS((regval * fas->f_clock_cycle) / 1000);
9152 9151 xfer_rate = ((wide_enabled)? 2 : 1) * xfer_speed;
9153 9152 }
9154 9153 (void) sprintf(property, "target%x-sync-speed", tgt);
9155 9154 fas_update_this_prop(fas, property, xfer_rate);
9156 9155
9157 9156 (void) sprintf(property, "target%x-wide", tgt);
9158 9157 fas_update_this_prop(fas, property, wide_enabled);
9159 9158
9160 9159 (void) sprintf(property, "target%x-TQ", tgt);
9161 9160 tq_enabled = ((fas->f_notag & (1<<tgt))? 0 : 1);
9162 9161 fas_update_this_prop(fas, property, tq_enabled);
9163 9162
9164 9163 }
9165 9164
9166 9165 static void
9167 9166 fas_update_this_prop(struct fas *fas, char *property, int value)
9168 9167 {
9169 9168 dev_info_t *dip = fas->f_dev;
9170 9169
9171 9170 IPRINTF2("update prop: %s value=%x\n", property, value);
9172 9171 ASSERT(mutex_owned(FAS_MUTEX(fas)));
9173 9172 /*
9174 9173 * We cannot hold any mutex at this point because the call to
9175 9174 * ddi_prop_update_int() may block.
9176 9175 */
9177 9176 mutex_exit(FAS_MUTEX(fas));
9178 9177 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
9179 9178 property, value) != DDI_PROP_SUCCESS) {
9180 9179 IPRINTF1("cannot modify/create %s property\n", property);
9181 9180 }
9182 9181 mutex_enter(FAS_MUTEX(fas));
9183 9182 }
9184 9183
9185 9184 /*
9186 9185 * allocate active slots array, size is dependent on whether tagQ enabled
9187 9186 */
9188 9187 static int
9189 9188 fas_alloc_active_slots(struct fas *fas, int slot, int flag)
9190 9189 {
9191 9190 int target = slot / NLUNS_PER_TARGET;
9192 9191 struct f_slots *old_active = fas->f_active[slot];
9193 9192 struct f_slots *new_active;
9194 9193 ushort_t size;
9195 9194 int rval = -1;
9196 9195
9197 9196 if (fas->f_tcmds[slot]) {
9198 9197 IPRINTF("cannot change size of active slots array\n");
9199 9198 return (rval);
9200 9199 }
9201 9200
9202 9201 size = ((NOTAG(target)) ? FAS_F_SLOT_SIZE : FAS_F_SLOTS_SIZE_TQ);
9203 9202 EPRINTF4(
9204 9203 "fas_alloc_active_slots: target=%x size=%x, old=0x%p, oldsize=%x\n",
9205 9204 target, size, (void *)old_active,
9206 9205 ((old_active == NULL) ? -1 : old_active->f_size));
9207 9206
9208 9207 new_active = kmem_zalloc(size, flag);
9209 9208 if (new_active == NULL) {
9210 9209 IPRINTF("new active alloc failed\n");
9211 9210 } else {
9212 9211 fas->f_active[slot] = new_active;
9213 9212 fas->f_active[slot]->f_n_slots = (NOTAG(target) ? 1 : NTAGS);
9214 9213 fas->f_active[slot]->f_size = size;
9215 9214 /*
9216 9215 * reserve tag 0 for non-tagged cmds to tagged targets
9217 9216 */
9218 9217 if (TAGGED(target)) {
9219 9218 fas->f_active[slot]->f_tags = 1;
9220 9219 }
9221 9220 if (old_active) {
9222 9221 kmem_free((caddr_t)old_active, old_active->f_size);
9223 9222 }
9224 9223 rval = 0;
9225 9224 }
9226 9225 return (rval);
9227 9226 }
9228 9227
9229 9228 /*
9230 9229 * Error logging, printing, and debug print routines
9231 9230 */
9232 9231 static char *fas_label = "fas";
9233 9232
9234 9233 /*PRINTFLIKE3*/
9235 9234 static void
9236 9235 fas_log(struct fas *fas, int level, const char *fmt, ...)
9237 9236 {
9238 9237 dev_info_t *dev;
9239 9238 va_list ap;
9240 9239
9241 9240 if (fas) {
9242 9241 dev = fas->f_dev;
9243 9242 } else {
9244 9243 dev = 0;
9245 9244 }
9246 9245
9247 9246 mutex_enter(&fas_log_mutex);
9248 9247
9249 9248 va_start(ap, fmt);
9250 9249 (void) vsprintf(fas_log_buf, fmt, ap);
9251 9250 va_end(ap);
9252 9251
9253 9252 if (level == CE_CONT) {
9254 9253 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9255 9254 } else {
9256 9255 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9257 9256 }
9258 9257
9259 9258 mutex_exit(&fas_log_mutex);
9260 9259 }
9261 9260
9262 9261 /*PRINTFLIKE2*/
9263 9262 static void
9264 9263 fas_printf(struct fas *fas, const char *fmt, ...)
9265 9264 {
9266 9265 dev_info_t *dev = 0;
9267 9266 va_list ap;
9268 9267 int level = CE_CONT;
9269 9268
9270 9269 mutex_enter(&fas_log_mutex);
9271 9270
9272 9271 va_start(ap, fmt);
9273 9272 (void) vsprintf(fas_log_buf, fmt, ap);
9274 9273 va_end(ap);
9275 9274
9276 9275 if (fas) {
9277 9276 dev = fas->f_dev;
9278 9277 level = CE_NOTE;
9279 9278 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9280 9279 } else {
9281 9280 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9282 9281 }
9283 9282
9284 9283 mutex_exit(&fas_log_mutex);
9285 9284 }
9286 9285
9287 9286 #ifdef FASDEBUG
9288 9287 /*PRINTFLIKE2*/
9289 9288 void
9290 9289 fas_dprintf(struct fas *fas, const char *fmt, ...)
9291 9290 {
9292 9291 dev_info_t *dev = 0;
9293 9292 va_list ap;
9294 9293
9295 9294 if (fas) {
9296 9295 dev = fas->f_dev;
9297 9296 }
9298 9297
9299 9298 mutex_enter(&fas_log_mutex);
9300 9299
9301 9300 va_start(ap, fmt);
9302 9301 (void) vsprintf(fas_log_buf, fmt, ap);
9303 9302 va_end(ap);
9304 9303
9305 9304 scsi_log(dev, fas_label, SCSI_DEBUG, "%s", fas_log_buf);
9306 9305
9307 9306 mutex_exit(&fas_log_mutex);
9308 9307 }
9309 9308 #endif
9310 9309
9311 9310
9312 9311 static void
9313 9312 fas_printstate(struct fas *fas, char *msg)
9314 9313 {
9315 9314 volatile struct fasreg *fasreg = fas->f_reg;
9316 9315 volatile struct dma *dmar = fas->f_dma;
9317 9316 uint_t csr = fas_dma_reg_read(fas, &dmar->dma_csr);
9318 9317 uint_t count = fas_dma_reg_read(fas, &dmar->dma_count);
9319 9318 uint_t addr = fas_dma_reg_read(fas, &dmar->dma_addr);
9320 9319 uint_t test = fas_dma_reg_read(fas, &dmar->dma_test);
9321 9320 uint_t fas_cnt;
9322 9321
9323 9322 fas_log(fas, CE_WARN, "%s: current fas state:", msg);
9324 9323 fas_printf(NULL, "Latched stat=0x%b intr=0x%b",
9325 9324 fas->f_stat, FAS_STAT_BITS, fas->f_intr, FAS_INT_BITS);
9326 9325 fas_printf(NULL, "last msgout: %s, last msgin: %s",
9327 9326 scsi_mname(fas->f_last_msgout), scsi_mname(fas->f_last_msgin));
9328 9327 fas_printf(NULL, "DMA csr=0x%b", csr, dma_bits);
9329 9328 fas_printf(NULL,
9330 9329 "addr=%x dmacnt=%x test=%x last=%x last_cnt=%x",
9331 9330 addr, count, test, fas->f_lastdma, fas->f_lastcount);
9332 9331
9333 9332 GET_FAS_COUNT(fasreg, fas_cnt);
9334 9333 fas_printf(NULL, "fas state:");
9335 9334 fas_printf(NULL, "\tcount(32)=%x cmd=%x stat=%x stat2=%x intr=%x",
9336 9335 fas_cnt, fasreg->fas_cmd, fasreg->fas_stat, fasreg->fas_stat2,
9337 9336 fasreg->fas_intr);
9338 9337 fas_printf(NULL,
9339 9338 "\tstep=%x fifoflag=%x conf=%x test=%x conf2=%x conf3=%x",
9340 9339 fasreg->fas_step, fasreg->fas_fifo_flag, fasreg->fas_conf,
9341 9340 fasreg->fas_test, fasreg->fas_conf2, fasreg->fas_conf3);
9342 9341
9343 9342 if (fas->f_current_sp) {
9344 9343 fas_dump_cmd(fas, fas->f_current_sp);
9345 9344 }
9346 9345 }
9347 9346
9348 9347 /*
9349 9348 * dump all we know about a cmd
9350 9349 */
9351 9350 static void
9352 9351 fas_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9353 9352 {
9354 9353 int i;
9355 9354 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9356 9355 auto char buf[128];
9357 9356
9358 9357 buf[0] = '\0';
9359 9358 fas_printf(NULL, "Cmd dump for Target %d Lun %d:",
9360 9359 Tgt(sp), Lun(sp));
9361 9360 (void) sprintf(&buf[0], " cdb=[");
9362 9361 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9363 9362 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9364 9363 }
9365 9364 (void) sprintf(&buf[strlen(buf)], " ]");
9366 9365 fas_printf(NULL, buf);
9367 9366 fas_printf(NULL, "State=%s Last State=%s",
9368 9367 fas_state_name(fas->f_state), fas_state_name(fas->f_laststate));
9369 9368 fas_printf(NULL,
9370 9369 "pkt_state=0x%b pkt_flags=0x%x pkt_statistics=0x%x",
9371 9370 sp->cmd_pkt->pkt_state, scsi_state_bits, sp->cmd_pkt_flags,
9372 9371 sp->cmd_pkt->pkt_statistics);
9373 9372 if (sp->cmd_pkt->pkt_state & STATE_GOT_STATUS) {
9374 9373 fas_printf(NULL, "Status=0x%x\n", sp->cmd_pkt->pkt_scbp[0]);
9375 9374 }
9376 9375 }
9377 9376
9378 9377 /*ARGSUSED*/
9379 9378 static void
9380 9379 fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9381 9380 {
9382 9381 int i;
9383 9382 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9384 9383 auto char buf[128];
9385 9384
9386 9385 buf[0] = '\0';
9387 9386 (void) sprintf(&buf[0], "?%d.%d: cdb=[", Tgt(sp), Lun(sp));
9388 9387 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9389 9388 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9390 9389 }
9391 9390 (void) sprintf(&buf[strlen(buf)], " ]");
9392 9391 fas_printf(NULL, buf);
9393 9392 }
9394 9393
9395 9394 /*
9396 9395 * state decoding for error messages
9397 9396 */
9398 9397 static char *
9399 9398 fas_state_name(ushort_t state)
9400 9399 {
9401 9400 if (state == STATE_FREE) {
9402 9401 return ("FREE");
9403 9402 } else if (state & STATE_SELECTING) {
9404 9403 if (state == STATE_SELECT_NORMAL)
9405 9404 return ("SELECT");
9406 9405 else if (state == STATE_SELECT_N_STOP)
9407 9406 return ("SEL&STOP");
9408 9407 else if (state == STATE_SELECT_N_SENDMSG)
9409 9408 return ("SELECT_SNDMSG");
9410 9409 else
9411 9410 return ("SEL_NO_ATN");
9412 9411 } else {
9413 9412 static struct {
9414 9413 char *sname;
9415 9414 char state;
9416 9415 } names[] = {
9417 9416 "CMD_START", ACTS_CMD_START,
9418 9417 "CMD_DONE", ACTS_CMD_DONE,
9419 9418 "MSG_OUT", ACTS_MSG_OUT,
9420 9419 "MSG_OUT_DONE", ACTS_MSG_OUT_DONE,
9421 9420 "MSG_IN", ACTS_MSG_IN,
9422 9421 "MSG_IN_MORE", ACTS_MSG_IN_MORE,
9423 9422 "MSG_IN_DONE", ACTS_MSG_IN_DONE,
9424 9423 "CLEARING", ACTS_CLEARING,
9425 9424 "DATA", ACTS_DATA,
9426 9425 "DATA_DONE", ACTS_DATA_DONE,
9427 9426 "CMD_CMPLT", ACTS_C_CMPLT,
9428 9427 "UNKNOWN", ACTS_UNKNOWN,
9429 9428 "RESEL", ACTS_RESEL,
9430 9429 "ENDVEC", ACTS_ENDVEC,
9431 9430 "RESET", ACTS_RESET,
9432 9431 "ABORTING", ACTS_ABORTING,
9433 9432 "FROZEN", ACTS_FROZEN,
9434 9433 0
9435 9434 };
9436 9435 int i;
9437 9436 for (i = 0; names[i].sname; i++) {
9438 9437 if (names[i].state == state)
9439 9438 return (names[i].sname);
9440 9439 }
9441 9440 }
9442 9441 return ("<BAD>");
9443 9442 }
↓ open down ↓ |
7994 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX