Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_clock.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_clock.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <emlxs.h>
28 28
29 29 /* Timer period in seconds */
30 30 #define EMLXS_TIMER_PERIOD 1 /* secs */
31 31 #define EMLXS_PKT_PERIOD 5 /* secs */
32 32 #define EMLXS_UB_PERIOD 60 /* secs */
33 33
34 34 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
35 35
36 36
37 37 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
38 38
39 39 #ifdef DHCHAP_SUPPORT
40 40 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
41 41 #endif /* DHCHAP_SUPPORT */
42 42
43 43 static void emlxs_timer_check_pools(emlxs_hba_t *hba);
44 44 static void emlxs_timer(void *arg);
45 45 static void emlxs_timer_check_fw_update(emlxs_hba_t *hba);
46 46 static void emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
47 47 static uint32_t emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
48 48 static void emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
49 49 static void emlxs_timer_check_linkup(emlxs_hba_t *hba);
50 50 static void emlxs_timer_check_discovery(emlxs_port_t *port);
51 51 static void emlxs_timer_check_clean_address(emlxs_port_t *port);
52 52 static void emlxs_timer_check_ub(emlxs_port_t *port);
53 53 static void emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag);
54 54 static uint32_t emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
55 55 Q *abortq, uint8_t *flag);
56 56
57 57 #ifdef TX_WATCHDOG
58 58 static void emlxs_tx_watchdog(emlxs_hba_t *hba);
59 59 #endif /* TX_WATCHDOG */
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
60 60
61 61 extern clock_t
62 62 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
63 63 {
64 64 emlxs_config_t *cfg = &CFG;
65 65 clock_t time;
66 66
67 67 /* Set thread timeout */
68 68 if (cfg[CFG_TIMEOUT_ENABLE].current) {
69 69 (void) drv_getparm(LBOLT, &time);
70 - time += (timeout * drv_usectohz(1000000));
70 + time += drv_sectohz(timeout);
71 71 } else {
72 72 time = -1;
73 73 }
74 74
75 75 return (time);
76 76
77 77 } /* emlxs_timeout() */
78 78
79 79
80 80 static void
81 81 emlxs_timer(void *arg)
82 82 {
83 83 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
84 84 emlxs_port_t *port = &PPORT;
85 85
86 86 if (!hba->timer_id) {
87 87 return;
88 88 }
89 89
90 90 mutex_enter(&EMLXS_TIMER_LOCK);
91 91
92 92 /* Only one timer thread is allowed */
93 93 if (hba->timer_flags & EMLXS_TIMER_BUSY) {
94 94 mutex_exit(&EMLXS_TIMER_LOCK);
95 95 return;
96 96 }
97 97
98 98 /* Check if a kill request has been made */
99 99 if (hba->timer_flags & EMLXS_TIMER_KILL) {
100 100 hba->timer_id = 0;
101 101 hba->timer_tics = 0;
102 102 hba->timer_flags |= EMLXS_TIMER_ENDED;
103 103
104 104 mutex_exit(&EMLXS_TIMER_LOCK);
105 105 return;
106 106 }
107 107
108 108 hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
109 109 hba->timer_tics = DRV_TIME;
110 110
111 111 /* Check io_active count (Safety net) */
112 112 if (hba->io_active & 0x80000000) {
113 113 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
114 114 "Timer: io_active=0x%x. Reset to zero.", hba->io_active);
115 115 hba->io_active = 0;
116 116 }
117 117
118 118 mutex_exit(&EMLXS_TIMER_LOCK);
119 119
120 120 EMLXS_SLI_POLL_ERRATT(hba);
121 121
122 122 /* Perform standard checks */
123 123 emlxs_timer_checks(hba);
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
124 124
125 125 /* Restart the timer */
126 126 mutex_enter(&EMLXS_TIMER_LOCK);
127 127
128 128 hba->timer_flags &= ~EMLXS_TIMER_BUSY;
129 129
130 130 /* If timer is still enabled, restart it */
131 131 if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
132 132 hba->timer_id =
133 133 timeout(emlxs_timer, (void *)hba,
134 - (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
134 + drv_sectohz(EMLXS_TIMER_PERIOD));
135 135 } else {
136 136 hba->timer_id = 0;
137 137 hba->timer_flags |= EMLXS_TIMER_ENDED;
138 138 }
139 139
140 140 mutex_exit(&EMLXS_TIMER_LOCK);
141 141
142 142 return;
143 143
144 144 } /* emlxs_timer() */
145 145
146 146
147 147 extern void
148 148 emlxs_timer_checks(emlxs_hba_t *hba)
149 149 {
150 150 emlxs_port_t *port = &PPORT;
151 151 uint8_t flag[MAX_CHANNEL];
152 152 uint32_t i;
153 153 uint32_t rc;
154 154
155 155 /* Exit if we are still initializing */
156 156 if (hba->state < FC_LINK_DOWN) {
157 157 return;
158 158 }
159 159
160 160 bzero((void *)flag, sizeof (flag));
161 161
162 162 /* Check SLI level timeouts */
163 163 EMLXS_SLI_TIMER(hba);
164 164
165 165 /* Check event queue */
166 166 emlxs_timer_check_events(hba);
167 167
168 168 /* Check heartbeat timer */
169 169 emlxs_timer_check_heartbeat(hba);
170 170
171 171 /* Check fw update timer */
172 172 emlxs_timer_check_fw_update(hba);
173 173
174 174 #ifdef IDLE_TIMER
175 175 emlxs_pm_idle_timer(hba);
176 176 #endif /* IDLE_TIMER */
177 177
178 178 /* Check for loopback timeouts */
179 179 emlxs_timer_check_loopback(hba);
180 180
181 181 /* Check for packet timeouts */
182 182 rc = emlxs_timer_check_pkts(hba, flag);
183 183
184 184 if (rc) {
185 185 /* Link or adapter is being reset */
186 186 return;
187 187 }
188 188
189 189 /* Check for linkup timeout */
190 190 emlxs_timer_check_linkup(hba);
191 191
192 192 /* Check the ports */
193 193 for (i = 0; i < MAX_VPORTS; i++) {
194 194 port = &VPORT(i);
195 195
196 196 if (!(port->flag & EMLXS_PORT_BOUND)) {
197 197 continue;
198 198 }
199 199
200 200 /* Check for node gate timeouts */
201 201 emlxs_timer_check_nodes(port, flag);
202 202
203 203 /* Check for clean address bit delay timeout */
204 204 emlxs_timer_check_clean_address(port);
205 205
206 206 /* Check for tape discovery timeout */
207 207 emlxs_timer_check_discovery(port);
208 208
209 209 /* Check for UB timeouts */
210 210 emlxs_timer_check_ub(port);
211 211
212 212 #ifdef DHCHAP_SUPPORT
213 213 /* Check for DHCHAP authentication timeouts */
214 214 emlxs_timer_check_dhchap(port);
215 215 #endif /* DHCHAP_SUPPORT */
216 216
217 217 }
218 218
219 219 /* Check memory pools */
220 220 emlxs_timer_check_pools(hba);
221 221
222 222 /* Check for IO channel service timeouts */
223 223 /* Always do this last */
224 224 emlxs_timer_check_channels(hba, flag);
225 225
226 226 return;
227 227
228 228 } /* emlxs_timer_checks() */
229 229
230 230
231 231 extern void
232 232 emlxs_timer_start(emlxs_hba_t *hba)
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
233 233 {
234 234 if (hba->timer_id) {
235 235 return;
236 236 }
237 237
238 238 /* Restart the timer */
239 239 mutex_enter(&EMLXS_TIMER_LOCK);
240 240 if (!hba->timer_id) {
241 241 hba->timer_flags = 0;
242 242 hba->timer_id =
243 - timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
243 + timeout(emlxs_timer, (void *)hba, drv_sectohz(1));
244 244 }
245 245 mutex_exit(&EMLXS_TIMER_LOCK);
246 246
247 247 } /* emlxs_timer_start() */
248 248
249 249
250 250 extern void
251 251 emlxs_timer_stop(emlxs_hba_t *hba)
252 252 {
253 253 if (!hba->timer_id) {
254 254 return;
255 255 }
256 256
257 257 mutex_enter(&EMLXS_TIMER_LOCK);
258 258 hba->timer_flags |= EMLXS_TIMER_KILL;
259 259
260 260 while (hba->timer_id) {
261 261 mutex_exit(&EMLXS_TIMER_LOCK);
262 262 delay(drv_usectohz(500000));
263 263 mutex_enter(&EMLXS_TIMER_LOCK);
264 264 }
265 265 mutex_exit(&EMLXS_TIMER_LOCK);
266 266
267 267 return;
268 268
269 269 } /* emlxs_timer_stop() */
270 270
271 271
272 272 static uint32_t
273 273 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
274 274 {
275 275 emlxs_port_t *port = &PPORT;
276 276 emlxs_config_t *cfg = &CFG;
277 277 Q tmo;
278 278 int32_t channelno;
279 279 CHANNEL *cp;
280 280 NODELIST *nlp;
281 281 IOCBQ *prev;
282 282 IOCBQ *next;
283 283 IOCB *iocb;
284 284 IOCBQ *iocbq;
285 285 emlxs_buf_t *sbp;
286 286 fc_packet_t *pkt;
287 287 Q abort;
288 288 uint32_t iotag;
289 289 uint32_t rc;
290 290
291 291 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
292 292 return (0);
293 293 }
294 294
295 295 if (hba->pkt_timer > hba->timer_tics) {
296 296 return (0);
297 297 }
298 298
299 299 hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
300 300
301 301
302 302 bzero((void *)&tmo, sizeof (Q));
303 303
304 304 /*
305 305 * We must hold the locks here because we never know when an iocb
306 306 * will be removed out from under us
307 307 */
308 308
309 309 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
310 310
311 311 for (channelno = 0; channelno < hba->chan_count; channelno++) {
312 312 cp = &hba->chan[channelno];
313 313
314 314 /* Scan the tx queues for each active node on the channel */
315 315
316 316 /* Get the first node */
317 317 nlp = (NODELIST *)cp->nodeq.q_first;
318 318
319 319 while (nlp) {
320 320 /* Scan the node's priority tx queue */
321 321 prev = NULL;
322 322 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
323 323
324 324 while (iocbq) {
325 325 next = (IOCBQ *)iocbq->next;
326 326 iocb = &iocbq->iocb;
327 327 sbp = (emlxs_buf_t *)iocbq->sbp;
328 328
329 329 /* Check if iocb has timed out */
330 330 if (sbp && hba->timer_tics >= sbp->ticks) {
331 331 /* iocb timed out, now deque it */
332 332 if (next == NULL) {
333 333 nlp->nlp_ptx[channelno].q_last =
334 334 (uint8_t *)prev;
335 335 }
336 336
337 337 if (prev == NULL) {
338 338 nlp->nlp_ptx[channelno].
339 339 q_first = (uint8_t *)next;
340 340 } else {
341 341 prev->next = next;
342 342 }
343 343
344 344 iocbq->next = NULL;
345 345 nlp->nlp_ptx[channelno].q_cnt--;
346 346
347 347 /* Add this iocb to our local */
348 348 /* timout queue */
349 349
350 350 /*
351 351 * This way we don't hold the TX_CHANNEL
352 352 * lock too long
353 353 */
354 354
355 355 if (tmo.q_first) {
356 356 ((IOCBQ *)tmo.q_last)->next =
357 357 iocbq;
358 358 tmo.q_last =
359 359 (uint8_t *)iocbq;
360 360 tmo.q_cnt++;
361 361 } else {
362 362 tmo.q_first =
363 363 (uint8_t *)iocbq;
364 364 tmo.q_last =
365 365 (uint8_t *)iocbq;
366 366 tmo.q_cnt = 1;
367 367 }
368 368 iocbq->next = NULL;
369 369
370 370 } else {
371 371 prev = iocbq;
372 372 }
373 373
374 374 iocbq = next;
375 375
376 376 } /* while (iocbq) */
377 377
378 378
379 379 /* Scan the node's tx queue */
380 380 prev = NULL;
381 381 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
382 382
383 383 while (iocbq) {
384 384 next = (IOCBQ *)iocbq->next;
385 385 iocb = &iocbq->iocb;
386 386 sbp = (emlxs_buf_t *)iocbq->sbp;
387 387
388 388 /* Check if iocb has timed out */
389 389 if (sbp && hba->timer_tics >= sbp->ticks) {
390 390 /* iocb timed out, now deque it */
391 391 if (next == NULL) {
392 392 nlp->nlp_tx[channelno].q_last =
393 393 (uint8_t *)prev;
394 394 }
395 395
396 396 if (prev == NULL) {
397 397 nlp->nlp_tx[channelno].q_first =
398 398 (uint8_t *)next;
399 399 } else {
400 400 prev->next = next;
401 401 }
402 402
403 403 iocbq->next = NULL;
404 404 nlp->nlp_tx[channelno].q_cnt--;
405 405
406 406 /* Add this iocb to our local */
407 407 /* timout queue */
408 408
409 409 /*
410 410 * This way we don't hold the TX_CHANNEL
411 411 * lock too long
412 412 */
413 413
414 414 if (tmo.q_first) {
415 415 ((IOCBQ *)tmo.q_last)->next =
416 416 iocbq;
417 417 tmo.q_last =
418 418 (uint8_t *)iocbq;
419 419 tmo.q_cnt++;
420 420 } else {
421 421 tmo.q_first =
422 422 (uint8_t *)iocbq;
423 423 tmo.q_last =
424 424 (uint8_t *)iocbq;
425 425 tmo.q_cnt = 1;
426 426 }
427 427 iocbq->next = NULL;
428 428
429 429 } else {
430 430 prev = iocbq;
431 431 }
432 432
433 433 iocbq = next;
434 434
435 435 } /* while (iocbq) */
436 436
437 437 if (nlp == (NODELIST *)cp->nodeq.q_last) {
438 438 nlp = NULL;
439 439 } else {
440 440 nlp = nlp->nlp_next[channelno];
441 441 }
442 442
443 443 } /* while (nlp) */
444 444
445 445 } /* end of for */
446 446
447 447 /* Now cleanup the iocb's */
448 448 iocbq = (IOCBQ *)tmo.q_first;
449 449 while (iocbq) {
450 450 /* Free the IoTag and the bmp */
451 451 iocb = &iocbq->iocb;
452 452 channelno = ((CHANNEL *)iocbq->channel)->channelno;
453 453 sbp = iocbq->sbp;
454 454 if (sbp && (sbp != STALE_PACKET)) {
455 455 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
456 456 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
457 457 } else {
458 458 (void) emlxs_unregister_pkt(
459 459 (CHANNEL *)iocbq->channel,
460 460 iocb->ULPIOTAG, 0);
461 461 }
462 462
463 463 mutex_enter(&sbp->mtx);
464 464 sbp->pkt_flags |= PACKET_IN_TIMEOUT;
465 465 mutex_exit(&sbp->mtx);
466 466 }
467 467
468 468 iocbq = (IOCBQ *)iocbq->next;
469 469
470 470 } /* end of while */
471 471
472 472 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
473 473
474 474 /* Now complete the transmit timeouts outside the locks */
475 475 iocbq = (IOCBQ *)tmo.q_first;
476 476 while (iocbq) {
477 477 /* Save the next iocbq for now */
478 478 next = (IOCBQ *)iocbq->next;
479 479
480 480 /* Unlink this iocbq */
481 481 iocbq->next = NULL;
482 482
483 483 /* Get the pkt */
484 484 sbp = (emlxs_buf_t *)iocbq->sbp;
485 485
486 486 if (sbp) {
487 487 /* Warning: Some FCT sbp's don't have */
488 488 /* fc_packet objects */
489 489 pkt = PRIV2PKT(sbp);
490 490
491 491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
492 492 "TXQ abort: sbp=%p iotag=%d tmo=%d", sbp,
493 493 sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
494 494
495 495 if (hba->state >= FC_LINK_UP) {
496 496 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
497 497 IOERR_ABORT_TIMEOUT, 1);
498 498 } else {
499 499 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
500 500 IOERR_LINK_DOWN, 1);
501 501 }
502 502
503 503 }
504 504
505 505 iocbq = next;
506 506
507 507 } /* end of while */
508 508
509 509
510 510
511 511 /* Now check the chip */
512 512 bzero((void *)&abort, sizeof (Q));
513 513
514 514 /* Check the HBA for outstanding IOs */
515 515 rc = 0;
516 516 mutex_enter(&EMLXS_FCTAB_LOCK);
517 517 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
518 518 sbp = hba->fc_table[iotag];
519 519
520 520 if (!sbp || sbp == STALE_PACKET) {
521 521 continue;
522 522 }
523 523
524 524 /* Check if IO is valid */
525 525 if (!(sbp->pkt_flags & PACKET_VALID) ||
526 526 (sbp->pkt_flags & (PACKET_ULP_OWNED|
527 527 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
528 528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
529 529 "timer_check_pkts: Invalid IO found. iotag=%d",
530 530 iotag);
531 531
532 532 hba->fc_table[iotag] = STALE_PACKET;
533 533 hba->io_count--;
534 534 continue;
535 535 }
536 536
537 537 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
538 538 (hba->timer_tics >= sbp->ticks)) {
539 539 rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
540 540 sbp, &abort, flag);
541 541
542 542 if (rc) {
543 543 break;
544 544 }
545 545 }
546 546 }
547 547 mutex_exit(&EMLXS_FCTAB_LOCK);
548 548
549 549 /* Now put the iocb's on the tx queue */
550 550 iocbq = (IOCBQ *)abort.q_first;
551 551 while (iocbq) {
552 552 /* Save the next iocbq for now */
553 553 next = (IOCBQ *)iocbq->next;
554 554
555 555 /* Unlink this iocbq */
556 556 iocbq->next = NULL;
557 557
558 558 /* Send this iocbq */
559 559 emlxs_tx_put(iocbq, 1);
560 560
561 561 iocbq = next;
562 562 }
563 563
564 564 /* Now trigger IO channel service to send these abort iocbq */
565 565 for (channelno = 0; channelno < hba->chan_count; channelno++) {
566 566 if (!flag[channelno]) {
567 567 continue;
568 568 }
569 569 cp = &hba->chan[channelno];
570 570
571 571 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
572 572 }
573 573
574 574 if (rc == 1) {
575 575 mutex_enter(&EMLXS_PORT_LOCK);
576 576 /* If a link reset or port reset is already requested, exit */
577 577 if (!(hba->reset_request & (FC_LINK_RESET | FC_PORT_RESET))) {
578 578 hba->reset_request |= FC_LINK_RESET;
579 579 mutex_exit(&EMLXS_PORT_LOCK);
580 580 /* Spawn a thread to reset the link */
581 581 emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL,
582 582 NULL);
583 583 goto exit;
584 584 }
585 585 mutex_exit(&EMLXS_PORT_LOCK);
586 586 } else if (rc == 2) {
587 587 mutex_enter(&EMLXS_PORT_LOCK);
588 588 /* If a port reset is already requested, exit */
589 589 if (!(hba->reset_request & FC_PORT_RESET)) {
590 590 hba->reset_request |= FC_PORT_RESET;
591 591 mutex_exit(&EMLXS_PORT_LOCK);
592 592 /* Spawn a thread to reset the adapter */
593 593 emlxs_thread_spawn(hba, emlxs_restart_thread, NULL,
594 594 NULL);
595 595 goto exit;
596 596 }
597 597 mutex_exit(&EMLXS_PORT_LOCK);
598 598 }
599 599
600 600 exit:
601 601 return (rc);
602 602
603 603 } /* emlxs_timer_check_pkts() */
604 604
605 605
606 606 static void
607 607 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag)
608 608 {
609 609 emlxs_port_t *port = &PPORT;
610 610 emlxs_config_t *cfg = &CFG;
611 611 int32_t channelno;
612 612 CHANNEL *cp;
613 613 uint32_t logit;
614 614
615 615 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
616 616 return;
617 617 }
618 618
619 619 for (channelno = 0; channelno < hba->chan_count; channelno++) {
620 620 cp = &hba->chan[channelno];
621 621
622 622 logit = 0;
623 623
624 624 /* Check for channel timeout now */
625 625 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
626 626 if (cp->timeout && (hba->timer_tics >= cp->timeout)) {
627 627 /* Check if there is work to do on channel and */
628 628 /* the link is still up */
629 629 if (cp->nodeq.q_first) {
630 630 flag[channelno] = 1;
631 631 cp->timeout = hba->timer_tics + 10;
632 632
633 633 if (hba->state >= FC_LINK_UP) {
634 634 logit = 1;
635 635 }
636 636 } else {
637 637 cp->timeout = 0;
638 638 }
639 639 }
640 640 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
641 641
642 642 if (logit) {
643 643 EMLXS_MSGF(EMLXS_CONTEXT,
644 644 &emlxs_chan_watchdog_msg,
645 645 "IO Channel %d cnt=%d,%d",
646 646 channelno,
647 647 hba->channel_tx_count,
648 648 hba->io_count);
649 649 }
650 650
651 651 /*
652 652 * If IO channel flag is set, request iocb servicing
653 653 * here to send any iocb's that may still be queued
654 654 */
655 655 if (flag[channelno]) {
656 656 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
657 657 }
658 658 }
659 659
660 660 return;
661 661
662 662 } /* emlxs_timer_check_channels() */
663 663
664 664
665 665 static void
666 666 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
667 667 {
668 668 emlxs_hba_t *hba = HBA;
669 669 uint32_t found;
670 670 uint32_t i;
671 671 NODELIST *nlp;
672 672 int32_t channelno;
673 673
674 674 for (;;) {
675 675 /* Check node gate flag for expiration */
676 676 found = 0;
677 677
678 678 /*
679 679 * We need to lock, scan, and unlock because we can't hold the
680 680 * lock while we call node_open
681 681 */
682 682 rw_enter(&port->node_rwlock, RW_READER);
683 683 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
684 684 nlp = port->node_table[i];
685 685 while (nlp != NULL) {
686 686 #ifdef NODE_THROTTLE_SUPPORT
687 687 /* Check io_active count (Safety net) */
688 688 if (nlp->io_active & 0x80000000) {
689 689 EMLXS_MSGF(EMLXS_CONTEXT,
690 690 &emlxs_sli_debug_msg,
691 691 "timer_check_nodes: did=%06x "
692 692 "io_active=0x%x. Reset to zero.",
693 693 nlp->nlp_DID, nlp->io_active);
694 694
695 695 nlp->io_active = 0;
696 696 }
697 697 #endif /* NODE_THROTTLE_SUPPORT */
698 698
699 699 for (channelno = 0;
700 700 channelno < hba->chan_count;
701 701 channelno++) {
702 702 /* Check if the node timer is active */
703 703 /* and if timer has expired */
704 704 if (nlp->nlp_tics[channelno] &&
705 705 (hba->timer_tics >=
706 706 nlp->nlp_tics[channelno])) {
707 707 /* If so, set the flag and */
708 708 /* break out */
709 709 found = 1;
710 710 flag[channelno] = 1;
711 711 break;
712 712 }
713 713 }
714 714
715 715 if (nlp->nlp_force_rscn &&
716 716 (hba->timer_tics >= nlp->nlp_force_rscn)) {
717 717 nlp->nlp_force_rscn = 0;
718 718 /*
719 719 * Generate an RSCN to
720 720 * wakeup ULP
721 721 */
722 722 (void) emlxs_generate_rscn(port,
723 723 nlp->nlp_DID);
724 724 }
725 725
726 726 if (found) {
727 727 break;
728 728 }
729 729
730 730 nlp = nlp->nlp_list_next;
731 731 }
732 732
733 733 if (found) {
734 734 break;
735 735 }
736 736
737 737 }
738 738 rw_exit(&port->node_rwlock);
739 739
740 740 if (!found) {
741 741 break;
742 742 }
743 743
744 744 emlxs_node_timeout(port, nlp, channelno);
745 745 }
746 746
747 747 } /* emlxs_timer_check_nodes() */
748 748
749 749
750 750 static void
751 751 emlxs_timer_check_loopback(emlxs_hba_t *hba)
752 752 {
753 753 emlxs_port_t *port = &PPORT;
754 754 emlxs_config_t *cfg = &CFG;
755 755 int32_t reset = 0;
756 756
757 757 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
758 758 return;
759 759 }
760 760
761 761 /* Check the loopback timer for expiration */
762 762 mutex_enter(&EMLXS_PORT_LOCK);
763 763
764 764 if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
765 765 mutex_exit(&EMLXS_PORT_LOCK);
766 766 return;
767 767 }
768 768
769 769 hba->loopback_tics = 0;
770 770
771 771 if (hba->flag & FC_LOOPBACK_MODE) {
772 772 reset = 1;
773 773 }
774 774
775 775 mutex_exit(&EMLXS_PORT_LOCK);
776 776
777 777 if (reset) {
778 778 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
779 779 "LOOPBACK_MODE: Expired. Resetting...");
780 780 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
781 781 }
782 782
783 783 return;
784 784
785 785 } /* emlxs_timer_check_loopback() */
786 786
787 787
788 788 static void
789 789 emlxs_timer_check_linkup(emlxs_hba_t *hba)
790 790 {
791 791 emlxs_port_t *port = &PPORT;
792 792 uint32_t linkup;
793 793
794 794 /* Check if all mbox commands from previous activity are processed */
795 795 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
796 796 mutex_enter(&EMLXS_MBOX_LOCK);
797 797 if (hba->mbox_queue.q_first) {
798 798 mutex_exit(&EMLXS_MBOX_LOCK);
799 799 return;
800 800 }
801 801 mutex_exit(&EMLXS_MBOX_LOCK);
802 802 }
803 803
804 804 /* Check the linkup timer for expiration */
805 805 mutex_enter(&EMLXS_PORT_LOCK);
806 806 linkup = 0;
807 807 if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
808 808 hba->linkup_timer = 0;
809 809
810 810 /* Make sure link is still ready */
811 811 if (hba->state >= FC_LINK_UP) {
812 812 linkup = 1;
813 813 }
814 814 }
815 815 mutex_exit(&EMLXS_PORT_LOCK);
816 816
817 817 /* Make the linkup callback */
818 818 if (linkup) {
819 819 emlxs_port_online(port);
820 820 }
821 821 return;
822 822
823 823 } /* emlxs_timer_check_linkup() */
824 824
825 825
826 826 static void
827 827 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
828 828 {
829 829 emlxs_port_t *port = &PPORT;
830 830 MAILBOXQ *mbq;
831 831 emlxs_config_t *cfg = &CFG;
832 832 int rc;
833 833
834 834 if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
835 835 return;
836 836 }
837 837
838 838 if (hba->timer_tics < hba->heartbeat_timer) {
839 839 return;
840 840 }
841 841
842 842 hba->heartbeat_timer = hba->timer_tics + 5;
843 843
844 844 /* Return if adapter interrupts have occurred */
845 845 if (hba->heartbeat_flag) {
846 846 hba->heartbeat_flag = 0;
847 847 return;
848 848 }
849 849 /* No adapter interrupts have occured for 5 seconds now */
850 850
851 851 /* Return if mailbox is busy */
852 852 /* This means the mailbox timer routine is watching for problems */
853 853 if (hba->mbox_timer) {
854 854 return;
855 855 }
856 856
857 857 /* Return if heartbeat is still outstanding */
858 858 if (hba->heartbeat_active) {
859 859 return;
860 860 }
861 861
862 862 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) {
863 863 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
864 864 "Unable to allocate heartbeat mailbox.");
865 865 return;
866 866 }
867 867
868 868 emlxs_mb_heartbeat(hba, mbq);
869 869 hba->heartbeat_active = 1;
870 870
871 871 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
872 872 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
873 873 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
874 874 }
875 875
876 876 return;
877 877
878 878 } /* emlxs_timer_check_heartbeat() */
879 879
880 880
881 881 static void
882 882 emlxs_timer_check_fw_update(emlxs_hba_t *hba)
883 883 {
884 884 emlxs_port_t *port = &PPORT;
885 885 char msgbuf[128];
886 886
887 887 if (!(hba->fw_flag & FW_UPDATE_NEEDED)) {
888 888 hba->fw_timer = 0;
889 889 return;
890 890 }
891 891
892 892 if (hba->timer_tics < hba->fw_timer) {
893 893 return;
894 894 }
895 895
896 896 if (port->mode == MODE_TARGET) {
897 897 (void) strncpy(msgbuf,
898 898 "To trigger an update, a manual HBA or link reset "
899 899 "using emlxadm is required.",
900 900 (sizeof (msgbuf)-1));
901 901 } else {
902 902 (void) strncpy(msgbuf,
903 903 "To trigger an update, a manual HBA or link reset "
904 904 "using luxadm, fcadm, or emlxadm is required.",
905 905 (sizeof (msgbuf)-1));
906 906 }
907 907
908 908 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fw_update_msg, msgbuf);
909 909
910 910 /* Force message to console */
911 911 cmn_err(CE_WARN,
912 912 "^%s%d: Firmware update required.\n\t(%s)\n",
913 913 DRIVER_NAME, hba->ddiinst, msgbuf);
914 914
915 915 /* Set timer for 24 hours */
916 916 hba->fw_timer = hba->timer_tics + (60 * 60 * 24);
917 917
918 918 return;
919 919
920 920 } /* emlxs_timer_check_fw_update() */
921 921
922 922
923 923 static void
924 924 emlxs_timer_check_discovery(emlxs_port_t *port)
925 925 {
926 926 emlxs_hba_t *hba = HBA;
927 927 emlxs_config_t *cfg = &CFG;
928 928 int32_t send_clear_la;
929 929 uint32_t found;
930 930 uint32_t i;
931 931 NODELIST *nlp;
932 932 MAILBOXQ *mbox;
933 933 int rc;
934 934
935 935 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
936 936 return;
937 937 }
938 938
939 939 /* Check the discovery timer for expiration */
940 940 send_clear_la = 0;
941 941 mutex_enter(&EMLXS_PORT_LOCK);
942 942 while (hba->discovery_timer &&
943 943 (hba->timer_tics >= hba->discovery_timer) &&
944 944 (hba->state == FC_LINK_UP)) {
945 945 send_clear_la = 1;
946 946
947 947 /* Perform a flush on fcp2 nodes that are still closed */
948 948 found = 0;
949 949 rw_enter(&port->node_rwlock, RW_READER);
950 950 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
951 951 nlp = port->node_table[i];
952 952 while (nlp != NULL) {
953 953 if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
954 954 (nlp->nlp_flag[hba->channel_fcp] &
955 955 NLP_CLOSED)) {
956 956 found = 1;
957 957 break;
958 958
959 959 }
960 960 nlp = nlp->nlp_list_next;
961 961 }
962 962
963 963 if (found) {
964 964 break;
965 965 }
966 966 }
967 967 rw_exit(&port->node_rwlock);
968 968
969 969 if (!found) {
970 970 break;
971 971 }
972 972
973 973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
974 974 "FCP2 device (did=%06x) missing. Flushing...",
975 975 nlp->nlp_DID);
976 976
977 977 mutex_exit(&EMLXS_PORT_LOCK);
978 978
979 979 (void) EMLXS_SLI_UNREG_NODE(port, nlp, NULL, NULL, NULL);
980 980
981 981 mutex_enter(&EMLXS_PORT_LOCK);
982 982
983 983 }
984 984 mutex_exit(&EMLXS_PORT_LOCK);
985 985
986 986 /* Try to send clear link attention, if needed */
987 987 if ((hba->sli_mode < EMLXS_HBA_SLI4_MODE) && (send_clear_la == 1) &&
988 988 (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
989 989 mutex_enter(&EMLXS_PORT_LOCK);
990 990
991 991 /*
992 992 * If state is not FC_LINK_UP, then either the link has gone
993 993 * down or a FC_CLEAR_LA has already been issued
994 994 */
995 995 if (hba->state != FC_LINK_UP) {
996 996 mutex_exit(&EMLXS_PORT_LOCK);
997 997 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
998 998 } else {
999 999 /* Change state and clear discovery timer */
1000 1000 EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA);
1001 1001
1002 1002 hba->discovery_timer = 0;
1003 1003
1004 1004 mutex_exit(&EMLXS_PORT_LOCK);
1005 1005
1006 1006 /* Prepare and send the CLEAR_LA command */
1007 1007 emlxs_mb_clear_la(hba, mbox);
1008 1008
1009 1009 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
1010 1010 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1011 1011 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1012 1012 }
1013 1013 }
1014 1014 }
1015 1015
1016 1016 return;
1017 1017
1018 1018 } /* emlxs_timer_check_discovery() */
1019 1019
1020 1020
1021 1021 static void
1022 1022 emlxs_timer_check_clean_address(emlxs_port_t *port)
1023 1023 {
1024 1024 emlxs_hba_t *hba = HBA;
1025 1025 emlxs_buf_t *sbp;
1026 1026
1027 1027 if (port->clean_address_timer &&
1028 1028 (hba->timer_tics < port->clean_address_timer)) {
1029 1029 return;
1030 1030 }
1031 1031 port->clean_address_timer = 0;
1032 1032
1033 1033 sbp = port->clean_address_sbp;
1034 1034 if (!sbp) {
1035 1035 return;
1036 1036 }
1037 1037 port->clean_address_sbp = 0;
1038 1038
1039 1039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_completion_msg,
1040 1040 "Clean Address timeout: sid=%x prev=%x RATOV %d",
1041 1041 port->did, port->prev_did, hba->fc_ratov);
1042 1042
1043 1043 if (EMLXS_SLI_REG_DID(port, FABRIC_DID, &port->fabric_sparam,
1044 1044 sbp, NULL, NULL) == 0) {
1045 1045 /* Deferred completion of this pkt until */
1046 1046 /* login is complete */
1047 1047 return;
1048 1048 }
1049 1049
1050 1050 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
1051 1051 IOERR_NO_RESOURCES, 1);
1052 1052
1053 1053 return;
1054 1054
1055 1055 } /* emlxs_timer_check_clean_address() */
1056 1056
1057 1057 extern void
1058 1058 emlxs_timer_cancel_clean_address(emlxs_port_t *port)
1059 1059 {
1060 1060 emlxs_hba_t *hba = HBA;
1061 1061 emlxs_buf_t *sbp;
1062 1062
1063 1063 port->clean_address_timer = 0;
1064 1064 sbp = port->clean_address_sbp;
1065 1065 if (!sbp) {
1066 1066 return;
1067 1067 }
1068 1068 port->clean_address_sbp = 0;
1069 1069
1070 1070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_completion_msg,
1071 1071 "Clean Address cancel: sid=%x prev=%x RATOV %d",
1072 1072 port->did, port->prev_did, hba->fc_ratov);
1073 1073
1074 1074 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
1075 1075 IOERR_LINK_DOWN, 1);
1076 1076
1077 1077 } /* emlxs_timer_cancel_clean_address() */
1078 1078
1079 1079 static void
1080 1080 emlxs_timer_check_ub(emlxs_port_t *port)
1081 1081 {
1082 1082 emlxs_hba_t *hba = HBA;
1083 1083 emlxs_unsol_buf_t *ulistp;
1084 1084 fc_unsol_buf_t *ubp;
1085 1085 emlxs_ub_priv_t *ub_priv;
1086 1086 uint32_t i;
1087 1087
1088 1088 if (port->ub_timer > hba->timer_tics) {
1089 1089 return;
1090 1090 }
1091 1091
1092 1092 port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
1093 1093
1094 1094 /* Check the unsolicited buffers */
1095 1095 mutex_enter(&EMLXS_UB_LOCK);
1096 1096
1097 1097 ulistp = port->ub_pool;
1098 1098 while (ulistp) {
1099 1099 /* Check buffers in this pool */
1100 1100 for (i = 0; i < ulistp->pool_nentries; i++) {
1101 1101 ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
1102 1102 ub_priv = ubp->ub_fca_private;
1103 1103
1104 1104 if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
1105 1105 continue;
1106 1106 }
1107 1107
1108 1108 /* If buffer has timed out, print message and */
1109 1109 /* increase timeout */
1110 1110 if ((ub_priv->time + ub_priv->timeout) <=
1111 1111 hba->timer_tics) {
1112 1112 ub_priv->flags |= EMLXS_UB_TIMEOUT;
1113 1113
1114 1114 EMLXS_MSGF(EMLXS_CONTEXT,
1115 1115 &emlxs_sfs_debug_msg,
1116 1116 "Stale UB buffer detected (%d mins): "
1117 1117 "buffer=%p (%x,%x,%x,%x)",
1118 1118 (ub_priv->timeout / 60), ubp,
1119 1119 ubp->ub_frame.type, ubp->ub_frame.s_id,
1120 1120 ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
1121 1121
1122 1122 /* Increase timeout period */
1123 1123
1124 1124 /* If timeout was 5 mins or less, */
1125 1125 /* increase it to 10 mins */
1126 1126 if (ub_priv->timeout <= (5 * 60)) {
1127 1127 ub_priv->timeout = (10 * 60);
1128 1128 }
1129 1129 /* If timeout was 10 mins or less, */
1130 1130 /* increase it to 30 mins */
1131 1131 else if (ub_priv->timeout <= (10 * 60)) {
1132 1132 ub_priv->timeout = (30 * 60);
1133 1133 }
1134 1134 /* Otherwise double it. */
1135 1135 else {
1136 1136 ub_priv->timeout *= 2;
1137 1137 }
1138 1138 }
1139 1139 }
1140 1140
1141 1141 ulistp = ulistp->pool_next;
1142 1142 }
1143 1143
1144 1144 mutex_exit(&EMLXS_UB_LOCK);
1145 1145
1146 1146 return;
1147 1147
1148 1148 } /* emlxs_timer_check_ub() */
1149 1149
1150 1150
1151 1151 /* EMLXS_FCTAB_LOCK must be held to call this */
1152 1152 static uint32_t
1153 1153 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
1154 1154 uint8_t *flag)
1155 1155 {
1156 1156 emlxs_hba_t *hba = HBA;
1157 1157 CHANNEL *cp = (CHANNEL *)sbp->channel;
1158 1158 IOCBQ *iocbq = NULL;
1159 1159 fc_packet_t *pkt;
1160 1160 uint32_t rc = 0;
1161 1161
1162 1162 mutex_enter(&sbp->mtx);
1163 1163
1164 1164 /* Warning: Some FCT sbp's don't have fc_packet objects */
1165 1165 pkt = PRIV2PKT(sbp);
1166 1166
1167 1167 switch (sbp->abort_attempts) {
1168 1168 case 0:
1169 1169
1170 1170 /* Create the abort IOCB */
1171 1171 if (hba->state >= FC_LINK_UP) {
1172 1172 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1173 1173 "chipQ: 1:Aborting. sbp=%p iotag=%d tmo=%d "
1174 1174 "flags=%x",
1175 1175 sbp, sbp->iotag,
1176 1176 (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1177 1177
1178 1178 iocbq =
1179 1179 emlxs_create_abort_xri_cn(port, sbp->node,
1180 1180 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
1181 1181
1182 1182 /* The adapter will make 2 attempts to send ABTS */
1183 1183 /* with 2*ratov timeout each time */
1184 1184 sbp->ticks =
1185 1185 hba->timer_tics + (4 * hba->fc_ratov) + 10;
1186 1186 } else {
1187 1187 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1188 1188 "chipQ: 1:Closing. sbp=%p iotag=%d tmo=%d "
1189 1189 "flags=%x",
1190 1190 sbp, sbp->iotag,
1191 1191 (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1192 1192
1193 1193 iocbq =
1194 1194 emlxs_create_close_xri_cn(port, sbp->node,
1195 1195 sbp->iotag, cp);
1196 1196
1197 1197 sbp->ticks = hba->timer_tics + 30;
1198 1198 }
1199 1199
1200 1200 /* set the flags */
1201 1201 sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1202 1202
1203 1203 flag[cp->channelno] = 1;
1204 1204 rc = 0;
1205 1205
1206 1206 break;
1207 1207
1208 1208 case 1:
1209 1209
1210 1210 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1211 1211 "chipQ: 2:Closing. sbp=%p iotag=%d", sbp, sbp->iotag);
1212 1212
1213 1213 iocbq =
1214 1214 emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1215 1215 cp);
1216 1216
1217 1217 sbp->ticks = hba->timer_tics + 30;
1218 1218
1219 1219 flag[cp->channelno] = 1;
1220 1220 rc = 0;
1221 1221
1222 1222 break;
1223 1223
1224 1224 case 2:
1225 1225
1226 1226 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1227 1227 "chipQ: 3:Resetting link. sbp=%p iotag=%d", sbp,
1228 1228 sbp->iotag);
1229 1229
1230 1230 sbp->ticks = hba->timer_tics + 60;
1231 1231 rc = 1;
1232 1232
1233 1233 break;
1234 1234
1235 1235 default:
1236 1236
1237 1237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1238 1238 "chipQ: %d:Resetting adapter. sbp=%p iotag=%d",
1239 1239 sbp->abort_attempts, sbp, sbp->iotag);
1240 1240
1241 1241 sbp->ticks = hba->timer_tics + 60;
1242 1242 rc = 2;
1243 1243
1244 1244 break;
1245 1245 }
1246 1246
1247 1247 sbp->abort_attempts++;
1248 1248 mutex_exit(&sbp->mtx);
1249 1249
1250 1250 if (iocbq) {
1251 1251 if (abortq->q_first) {
1252 1252 ((IOCBQ *)abortq->q_last)->next = iocbq;
1253 1253 abortq->q_last = (uint8_t *)iocbq;
1254 1254 abortq->q_cnt++;
1255 1255 } else {
1256 1256 abortq->q_first = (uint8_t *)iocbq;
1257 1257 abortq->q_last = (uint8_t *)iocbq;
1258 1258 abortq->q_cnt = 1;
1259 1259 }
1260 1260 iocbq->next = NULL;
1261 1261 }
1262 1262
1263 1263 return (rc);
1264 1264
1265 1265 } /* emlxs_pkt_chip_timeout() */
1266 1266
1267 1267
1268 1268 static void
1269 1269 emlxs_timer_check_pools(emlxs_hba_t *hba)
1270 1270 {
1271 1271 uint32_t i;
1272 1272 MEMSEG *seg;
1273 1273 emlxs_config_t *cfg = &CFG;
1274 1274
1275 1275 if (cfg[CFG_MEM_DYNAMIC].current == 0) {
1276 1276 return;
1277 1277 }
1278 1278
1279 1279 if (hba->mem_timer > hba->timer_tics) {
1280 1280 return;
1281 1281 }
1282 1282
1283 1283 hba->mem_timer = hba->timer_tics + cfg[CFG_MEM_DYNAMIC].current;
1284 1284
1285 1285 seg = hba->memseg;
1286 1286 for (i = 0; i < FC_MAX_SEG; i++, seg++) {
1287 1287 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
1288 1288 emlxs_mem_pool_clean(hba, seg);
1289 1289 }
1290 1290 }
1291 1291
1292 1292 #ifdef SFCT_SUPPORT
1293 1293 {
1294 1294 uint32_t j;
1295 1295 emlxs_port_t *port;
1296 1296
1297 1297 for (i = 0; i < MAX_VPORTS; i++) {
1298 1298 port = &VPORT(i);
1299 1299
1300 1300 if (!(port->flag & EMLXS_PORT_BOUND) ||
1301 1301 !(port->flag & EMLXS_TGT_ENABLED) ||
1302 1302 !port->fct_memseg) {
1303 1303 continue;
1304 1304 }
1305 1305
1306 1306 seg = port->fct_memseg;
1307 1307 for (j = 0; j < port->fct_memseg_cnt; j++, seg++) {
1308 1308 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
1309 1309 emlxs_mem_pool_clean(hba, seg);
1310 1310 }
1311 1311 }
1312 1312 }
1313 1313 }
1314 1314 #endif /* SFCT_SUPPORT */
1315 1315
1316 1316 return;
1317 1317
1318 1318 } /* emlxs_timer_check_pools() */
1319 1319
1320 1320
1321 1321 #ifdef TX_WATCHDOG
1322 1322
1323 1323 static void
1324 1324 emlxs_tx_watchdog(emlxs_hba_t *hba)
1325 1325 {
1326 1326 emlxs_port_t *port = &PPORT;
1327 1327 NODELIST *nlp;
1328 1328 uint32_t channelno;
1329 1329 CHANNEL *cp;
1330 1330 IOCBQ *next;
1331 1331 IOCBQ *iocbq;
1332 1332 IOCB *iocb;
1333 1333 uint32_t found;
1334 1334 MATCHMAP *bmp;
1335 1335 Q abort;
1336 1336 uint32_t iotag;
1337 1337 emlxs_buf_t *sbp;
1338 1338 fc_packet_t *pkt = NULL;
1339 1339 uint32_t cmd;
1340 1340 uint32_t did;
1341 1341
1342 1342 bzero((void *)&abort, sizeof (Q));
1343 1343
1344 1344 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1345 1345
1346 1346 mutex_enter(&EMLXS_FCTAB_LOCK);
1347 1347 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
1348 1348 sbp = hba->fc_table[iotag];
1349 1349 if (sbp && (sbp != STALE_PACKET) &&
1350 1350 (sbp->pkt_flags & PACKET_IN_TXQ)) {
1351 1351 nlp = sbp->node;
1352 1352 iocbq = &sbp->iocbq;
1353 1353
1354 1354 channelno = (CHANNEL *)(sbp->channel)->channelno;
1355 1355 if (iocbq->flag & IOCB_PRIORITY) {
1356 1356 iocbq =
1357 1357 (IOCBQ *)nlp->nlp_ptx[channelno].
1358 1358 q_first;
1359 1359 } else {
1360 1360 iocbq =
1361 1361 (IOCBQ *)nlp->nlp_tx[channelno].
1362 1362 q_first;
1363 1363 }
1364 1364
1365 1365 /* Find a matching entry */
1366 1366 found = 0;
1367 1367 while (iocbq) {
1368 1368 if (iocbq == &sbp->iocbq) {
1369 1369 found = 1;
1370 1370 break;
1371 1371 }
1372 1372
1373 1373 iocbq = (IOCBQ *)iocbq->next;
1374 1374 }
1375 1375
1376 1376 if (!found) {
1377 1377 if (!(sbp->pkt_flags & PACKET_STALE)) {
1378 1378 mutex_enter(&sbp->mtx);
1379 1379 sbp->pkt_flags |=
1380 1380 PACKET_STALE;
1381 1381 mutex_exit(&sbp->mtx);
1382 1382 } else {
1383 1383 if (abort.q_first == 0) {
1384 1384 abort.q_first =
1385 1385 &sbp->iocbq;
1386 1386 } else {
1387 1387 ((IOCBQ *)abort.
1388 1388 q_last)->next =
1389 1389 &sbp->iocbq;
1390 1390 }
1391 1391
1392 1392 abort.q_last = &sbp->iocbq;
1393 1393 abort.q_cnt++;
1394 1394 }
1395 1395
1396 1396 } else {
1397 1397 if ((sbp->pkt_flags & PACKET_STALE)) {
1398 1398 mutex_enter(&sbp->mtx);
1399 1399 sbp->pkt_flags &=
1400 1400 ~PACKET_STALE;
1401 1401 mutex_exit(&sbp->mtx);
1402 1402 }
1403 1403 }
1404 1404 }
1405 1405 }
1406 1406 mutex_exit(&EMLXS_FCTAB_LOCK);
1407 1407
1408 1408 iocbq = (IOCBQ *)abort.q_first;
1409 1409 while (iocbq) {
1410 1410 next = (IOCBQ *)iocbq->next;
1411 1411 iocbq->next = NULL;
1412 1412 sbp = (emlxs_buf_t *)iocbq->sbp;
1413 1413
1414 1414 pkt = PRIV2PKT(sbp);
1415 1415 if (pkt) {
1416 1416 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
1417 1417 cmd = *((uint32_t *)pkt->pkt_cmd);
1418 1418 cmd = LE_SWAP32(cmd);
1419 1419 }
1420 1420
1421 1421
1422 1422 emlxs_tx_put(iocbq, 0);
1423 1423
1424 1424 iocbq = next;
1425 1425
1426 1426 } /* end of while */
1427 1427
1428 1428 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1429 1429
1430 1430 return;
1431 1431
1432 1432 } /* emlxs_tx_watchdog() */
1433 1433
1434 1434 #endif /* TX_WATCHDOG */
1435 1435
1436 1436
1437 1437 #ifdef DHCHAP_SUPPORT
1438 1438
1439 1439 static void
1440 1440 emlxs_timer_check_dhchap(emlxs_port_t *port)
1441 1441 {
1442 1442 emlxs_hba_t *hba = HBA;
1443 1443 uint32_t i;
1444 1444 NODELIST *ndlp = NULL;
1445 1445
1446 1446 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1447 1447 ndlp = port->node_table[i];
1448 1448
1449 1449 if (!ndlp) {
1450 1450 continue;
1451 1451 }
1452 1452
1453 1453 /* Check authentication response timeout */
1454 1454 if (ndlp->node_dhc.nlp_authrsp_tmo &&
1455 1455 (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1456 1456 /* Trigger authresp timeout handler */
1457 1457 (void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1458 1458 }
1459 1459
1460 1460 /* Check reauthentication timeout */
1461 1461 if (ndlp->node_dhc.nlp_reauth_tmo &&
1462 1462 (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1463 1463 /* Trigger reauth timeout handler */
1464 1464 emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1465 1465 }
1466 1466 }
1467 1467 return;
1468 1468
1469 1469 } /* emlxs_timer_check_dhchap */
1470 1470
1471 1471 #endif /* DHCHAP_SUPPORT */
↓ open down ↓ |
1218 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX