1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 26 * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 30 #define DEF_ICFG 1 31 32 #include <emlxs.h> 33 #include <emlxs_version.h> 34 35 36 char emlxs_revision[] = EMLXS_REVISION; 37 char emlxs_version[] = EMLXS_VERSION; 38 char emlxs_name[] = EMLXS_NAME; 39 char emlxs_label[] = EMLXS_LABEL; 40 41 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 42 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 43 44 #ifdef MENLO_SUPPORT 45 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 46 #endif /* MENLO_SUPPORT */ 47 48 static void emlxs_fca_attach(emlxs_hba_t *hba); 49 static void emlxs_fca_detach(emlxs_hba_t *hba); 50 static void emlxs_drv_banner(emlxs_hba_t *hba); 51 52 static int32_t emlxs_get_props(emlxs_hba_t *hba); 53 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, 54 uint32_t *pkt_flags); 55 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 58 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 59 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 60 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 61 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 62 static uint32_t emlxs_add_instance(int32_t ddiinst); 63 static void emlxs_iodone(emlxs_buf_t *sbp); 64 static int emlxs_pm_lower_power(dev_info_t *dip); 65 static int emlxs_pm_raise_power(dev_info_t *dip); 66 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 67 uint32_t failed); 68 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 69 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 70 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 71 uint32_t args, uint32_t *arg); 72 73 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 74 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 75 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 76 77 78 79 extern int 80 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id); 81 extern int 82 emlxs_select_msiid(emlxs_hba_t *hba); 83 84 /* 85 * Driver Entry Routines. 86 */ 87 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 88 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 89 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 90 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 91 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 92 cred_t *, int32_t *); 93 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 94 95 96 /* 97 * FC_AL Transport Functions. 98 */ 99 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *, 100 fc_fca_bind_info_t *); 101 static void emlxs_fca_unbind_port(opaque_t); 102 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 103 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *); 104 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *); 105 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *); 106 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t, 107 uint32_t *, uint32_t); 108 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *); 109 110 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t); 111 static int32_t emlxs_fca_notify(opaque_t, uint32_t); 112 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 113 114 /* 115 * Driver Internal Functions. 116 */ 117 118 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 119 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 120 #ifdef EMLXS_I386 121 #ifdef S11 122 static int32_t emlxs_quiesce(dev_info_t *); 123 #endif 124 #endif 125 static int32_t emlxs_hba_resume(dev_info_t *); 126 static int32_t emlxs_hba_suspend(dev_info_t *); 127 static int32_t emlxs_hba_detach(dev_info_t *); 128 static int32_t emlxs_hba_attach(dev_info_t *); 129 static void emlxs_lock_destroy(emlxs_hba_t *); 130 static void emlxs_lock_init(emlxs_hba_t *); 131 132 char *emlxs_pm_components[] = { 133 "NAME=emlxx000", 134 "0=Device D3 State", 135 "1=Device D0 State" 136 }; 137 138 139 /* 140 * Default emlx dma limits 141 */ 142 ddi_dma_lim_t emlxs_dma_lim = { 143 (uint32_t)0, /* dlim_addr_lo */ 144 (uint32_t)0xffffffff, /* dlim_addr_hi */ 145 (uint_t)0x00ffffff, /* dlim_cntr_max */ 146 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 147 1, /* dlim_minxfer */ 148 0x00ffffff /* dlim_dmaspeed */ 149 }; 150 151 /* 152 * Be careful when using these attributes; the defaults listed below are 153 * (almost) the most general case, permitting allocation in almost any 154 * way supported by the LightPulse family. The sole exception is the 155 * alignment specified as requiring memory allocation on a 4-byte boundary; 156 * the Lightpulse can DMA memory on any byte boundary. 157 * 158 * The LightPulse family currently is limited to 16M transfers; 159 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 160 */ 161 ddi_dma_attr_t emlxs_dma_attr = { 162 DMA_ATTR_V0, /* dma_attr_version */ 163 (uint64_t)0, /* dma_attr_addr_lo */ 164 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 165 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 166 1, /* dma_attr_align */ 167 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 168 1, /* dma_attr_minxfer */ 169 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 170 (uint64_t)0xffffffff, /* dma_attr_seg */ 171 EMLXS_SGLLEN, /* dma_attr_sgllen */ 172 1, /* dma_attr_granular */ 173 0 /* dma_attr_flags */ 174 }; 175 176 ddi_dma_attr_t emlxs_dma_attr_ro = { 177 DMA_ATTR_V0, /* dma_attr_version */ 178 (uint64_t)0, /* dma_attr_addr_lo */ 179 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 180 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 181 1, /* dma_attr_align */ 182 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 183 1, /* dma_attr_minxfer */ 184 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 185 (uint64_t)0xffffffff, /* dma_attr_seg */ 186 EMLXS_SGLLEN, /* dma_attr_sgllen */ 187 1, /* dma_attr_granular */ 188 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 189 }; 190 191 ddi_dma_attr_t emlxs_dma_attr_1sg = { 192 DMA_ATTR_V0, /* dma_attr_version */ 193 (uint64_t)0, /* dma_attr_addr_lo */ 194 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 195 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 196 1, /* dma_attr_align */ 197 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 198 1, /* dma_attr_minxfer */ 199 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 200 (uint64_t)0xffffffff, /* dma_attr_seg */ 201 1, /* dma_attr_sgllen */ 202 1, /* dma_attr_granular */ 203 0 /* dma_attr_flags */ 204 }; 205 206 #if (EMLXS_MODREV >= EMLXS_MODREV3) 207 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 208 DMA_ATTR_V0, /* dma_attr_version */ 209 (uint64_t)0, /* dma_attr_addr_lo */ 210 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 211 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 212 1, /* dma_attr_align */ 213 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 214 1, /* dma_attr_minxfer */ 215 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 216 (uint64_t)0xffffffff, /* dma_attr_seg */ 217 EMLXS_SGLLEN, /* dma_attr_sgllen */ 218 1, /* dma_attr_granular */ 219 0 /* dma_attr_flags */ 220 }; 221 #endif /* >= EMLXS_MODREV3 */ 222 223 /* 224 * DDI access attributes for device 225 */ 226 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 227 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 228 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 229 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 230 DDI_DEFAULT_ACC /* devacc_attr_access */ 231 }; 232 233 /* 234 * DDI access attributes for data 235 */ 236 ddi_device_acc_attr_t emlxs_data_acc_attr = { 237 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 238 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 239 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 240 DDI_DEFAULT_ACC /* devacc_attr_access */ 241 }; 242 243 /* 244 * Fill in the FC Transport structure, 245 * as defined in the Fibre Channel Transport Programmming Guide. 246 */ 247 #if (EMLXS_MODREV == EMLXS_MODREV5) 248 static fc_fca_tran_t emlxs_fca_tran = { 249 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 250 MAX_VPORTS, /* fca numerb of ports */ 251 sizeof (emlxs_buf_t), /* fca pkt size */ 252 2048, /* fca cmd max */ 253 &emlxs_dma_lim, /* fca dma limits */ 254 0, /* fca iblock, to be filled in later */ 255 &emlxs_dma_attr, /* fca dma attributes */ 256 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 257 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 258 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 259 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 260 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 261 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 262 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 263 &emlxs_data_acc_attr, /* fca access atributes */ 264 0, /* fca_num_npivports */ 265 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 266 emlxs_fca_bind_port, 267 emlxs_fca_unbind_port, 268 emlxs_fca_pkt_init, 269 emlxs_fca_pkt_uninit, 270 emlxs_fca_transport, 271 emlxs_fca_get_cap, 272 emlxs_fca_set_cap, 273 emlxs_fca_get_map, 274 emlxs_fca_transport, 275 emlxs_fca_ub_alloc, 276 emlxs_fca_ub_free, 277 emlxs_fca_ub_release, 278 emlxs_fca_pkt_abort, 279 emlxs_fca_reset, 280 emlxs_fca_port_manage, 281 emlxs_fca_get_device, 282 emlxs_fca_notify 283 }; 284 #endif /* EMLXS_MODREV5 */ 285 286 287 #if (EMLXS_MODREV == EMLXS_MODREV4) 288 static fc_fca_tran_t emlxs_fca_tran = { 289 FCTL_FCA_MODREV_4, /* fca_version */ 290 MAX_VPORTS, /* fca numerb of ports */ 291 sizeof (emlxs_buf_t), /* fca pkt size */ 292 2048, /* fca cmd max */ 293 &emlxs_dma_lim, /* fca dma limits */ 294 0, /* fca iblock, to be filled in later */ 295 &emlxs_dma_attr, /* fca dma attributes */ 296 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 297 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 298 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 299 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 300 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 301 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 302 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 303 &emlxs_data_acc_attr, /* fca access atributes */ 304 emlxs_fca_bind_port, 305 emlxs_fca_unbind_port, 306 emlxs_fca_pkt_init, 307 emlxs_fca_pkt_uninit, 308 emlxs_fca_transport, 309 emlxs_fca_get_cap, 310 emlxs_fca_set_cap, 311 emlxs_fca_get_map, 312 emlxs_fca_transport, 313 emlxs_fca_ub_alloc, 314 emlxs_fca_ub_free, 315 emlxs_fca_ub_release, 316 emlxs_fca_pkt_abort, 317 emlxs_fca_reset, 318 emlxs_fca_port_manage, 319 emlxs_fca_get_device, 320 emlxs_fca_notify 321 }; 322 #endif /* EMLXS_MODEREV4 */ 323 324 325 #if (EMLXS_MODREV == EMLXS_MODREV3) 326 static fc_fca_tran_t emlxs_fca_tran = { 327 FCTL_FCA_MODREV_3, /* fca_version */ 328 MAX_VPORTS, /* fca numerb of ports */ 329 sizeof (emlxs_buf_t), /* fca pkt size */ 330 2048, /* fca cmd max */ 331 &emlxs_dma_lim, /* fca dma limits */ 332 0, /* fca iblock, to be filled in later */ 333 &emlxs_dma_attr, /* fca dma attributes */ 334 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 335 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 336 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 337 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 338 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 339 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 340 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 341 &emlxs_data_acc_attr, /* fca access atributes */ 342 emlxs_fca_bind_port, 343 emlxs_fca_unbind_port, 344 emlxs_fca_pkt_init, 345 emlxs_fca_pkt_uninit, 346 emlxs_fca_transport, 347 emlxs_fca_get_cap, 348 emlxs_fca_set_cap, 349 emlxs_fca_get_map, 350 emlxs_fca_transport, 351 emlxs_fca_ub_alloc, 352 emlxs_fca_ub_free, 353 emlxs_fca_ub_release, 354 emlxs_fca_pkt_abort, 355 emlxs_fca_reset, 356 emlxs_fca_port_manage, 357 emlxs_fca_get_device, 358 emlxs_fca_notify 359 }; 360 #endif /* EMLXS_MODREV3 */ 361 362 363 #if (EMLXS_MODREV == EMLXS_MODREV2) 364 static fc_fca_tran_t emlxs_fca_tran = { 365 FCTL_FCA_MODREV_2, /* fca_version */ 366 MAX_VPORTS, /* number of ports */ 367 sizeof (emlxs_buf_t), /* pkt size */ 368 2048, /* max cmds */ 369 &emlxs_dma_lim, /* DMA limits */ 370 0, /* iblock, to be filled in later */ 371 &emlxs_dma_attr, /* dma attributes */ 372 &emlxs_data_acc_attr, /* access atributes */ 373 emlxs_fca_bind_port, 374 emlxs_fca_unbind_port, 375 emlxs_fca_pkt_init, 376 emlxs_fca_pkt_uninit, 377 emlxs_fca_transport, 378 emlxs_fca_get_cap, 379 emlxs_fca_set_cap, 380 emlxs_fca_get_map, 381 emlxs_fca_transport, 382 emlxs_fca_ub_alloc, 383 emlxs_fca_ub_free, 384 emlxs_fca_ub_release, 385 emlxs_fca_pkt_abort, 386 emlxs_fca_reset, 387 emlxs_fca_port_manage, 388 emlxs_fca_get_device, 389 emlxs_fca_notify 390 }; 391 #endif /* EMLXS_MODREV2 */ 392 393 /* 394 * state pointer which the implementation uses as a place to 395 * hang a set of per-driver structures; 396 * 397 */ 398 void *emlxs_soft_state = NULL; 399 400 /* 401 * Driver Global variables. 402 */ 403 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 404 405 emlxs_device_t emlxs_device; 406 407 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 408 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 409 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */ 410 #define EMLXS_FW_SHOW 0x00000001 411 412 413 /* 414 * Single private "global" lock used to gain access to 415 * the hba_list and/or any other case where we want need to be 416 * single-threaded. 417 */ 418 uint32_t emlxs_diag_state; 419 420 /* 421 * CB ops vector. Used for administration only. 422 */ 423 static struct cb_ops emlxs_cb_ops = { 424 emlxs_open, /* cb_open */ 425 emlxs_close, /* cb_close */ 426 nodev, /* cb_strategy */ 427 nodev, /* cb_print */ 428 nodev, /* cb_dump */ 429 nodev, /* cb_read */ 430 nodev, /* cb_write */ 431 emlxs_ioctl, /* cb_ioctl */ 432 nodev, /* cb_devmap */ 433 nodev, /* cb_mmap */ 434 nodev, /* cb_segmap */ 435 nochpoll, /* cb_chpoll */ 436 ddi_prop_op, /* cb_prop_op */ 437 0, /* cb_stream */ 438 #ifdef _LP64 439 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 440 #else 441 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 442 #endif 443 CB_REV, /* rev */ 444 nodev, /* cb_aread */ 445 nodev /* cb_awrite */ 446 }; 447 448 static struct dev_ops emlxs_ops = { 449 DEVO_REV, /* rev */ 450 0, /* refcnt */ 451 emlxs_info, /* getinfo */ 452 nulldev, /* identify */ 453 nulldev, /* probe */ 454 emlxs_attach, /* attach */ 455 emlxs_detach, /* detach */ 456 nodev, /* reset */ 457 &emlxs_cb_ops, /* devo_cb_ops */ 458 NULL, /* devo_bus_ops */ 459 emlxs_power, /* power ops */ 460 #ifdef EMLXS_I386 461 #ifdef S11 462 emlxs_quiesce, /* quiesce */ 463 #endif 464 #endif 465 }; 466 467 #include <sys/modctl.h> 468 extern struct mod_ops mod_driverops; 469 470 #ifdef SAN_DIAG_SUPPORT 471 extern kmutex_t sd_bucket_mutex; 472 extern sd_bucket_info_t sd_bucket; 473 #endif /* SAN_DIAG_SUPPORT */ 474 475 /* 476 * Module linkage information for the kernel. 477 */ 478 static struct modldrv emlxs_modldrv = { 479 &mod_driverops, /* module type - driver */ 480 emlxs_name, /* module name */ 481 &emlxs_ops, /* driver ops */ 482 }; 483 484 485 /* 486 * Driver module linkage structure 487 */ 488 static struct modlinkage emlxs_modlinkage = { 489 MODREV_1, /* ml_rev - must be MODREV_1 */ 490 &emlxs_modldrv, /* ml_linkage */ 491 NULL /* end of driver linkage */ 492 }; 493 494 495 /* We only need to add entries for non-default return codes. */ 496 /* Entries do not need to be in order. */ 497 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 498 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 499 500 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 501 /* {f/w code, pkt_state, pkt_reason, */ 502 /* pkt_expln, pkt_action} */ 503 504 /* 0x00 - Do not remove */ 505 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 506 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 507 508 /* 0x01 - Do not remove */ 509 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 510 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 511 512 /* 0x02 */ 513 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 514 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 515 516 /* 517 * This is a default entry. 518 * The real codes are written dynamically in emlxs_els.c 519 */ 520 /* 0x09 */ 521 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 522 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 523 524 /* Special error code */ 525 /* 0x10 */ 526 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 527 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 528 529 /* Special error code */ 530 /* 0x11 */ 531 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 532 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 533 534 /* CLASS 2 only */ 535 /* 0x04 */ 536 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 537 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 538 539 /* CLASS 2 only */ 540 /* 0x05 */ 541 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 542 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 543 544 /* CLASS 2 only */ 545 /* 0x06 */ 546 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 547 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 548 549 /* CLASS 2 only */ 550 /* 0x07 */ 551 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 552 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 553 }; 554 555 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 556 557 558 /* We only need to add entries for non-default return codes. */ 559 /* Entries do not need to be in order. */ 560 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 561 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 562 563 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 564 /* {f/w code, pkt_state, pkt_reason, */ 565 /* pkt_expln, pkt_action} */ 566 567 /* 0x01 */ 568 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 569 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 570 571 /* 0x02 */ 572 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 573 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 574 575 /* 0x04 */ 576 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 577 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 578 579 /* 0x05 */ 580 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 581 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 582 583 /* 0x06 */ 584 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 585 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 586 587 /* 0x07 */ 588 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 589 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 590 591 /* 0x08 */ 592 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 593 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 594 595 /* 0x0B */ 596 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 597 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 598 599 /* 0x0D */ 600 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 601 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 602 603 /* 0x0E */ 604 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 605 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 606 607 /* 0x0F */ 608 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 609 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 610 611 /* 0x11 */ 612 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 613 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 614 615 /* 0x13 */ 616 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 617 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 618 619 /* 0x14 */ 620 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 621 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 622 623 /* 0x15 */ 624 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 625 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 626 627 /* 0x16 */ 628 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 629 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 630 631 /* 0x17 */ 632 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 633 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 634 635 /* 0x18 */ 636 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 637 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 638 639 /* 0x1A */ 640 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 641 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 642 643 /* 0x21 */ 644 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 645 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 646 647 /* Occurs at link down */ 648 /* 0x28 */ 649 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 650 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 651 652 /* 0xF0 */ 653 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 654 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 655 }; 656 657 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 658 659 660 661 emlxs_table_t emlxs_error_table[] = { 662 {IOERR_SUCCESS, "No error."}, 663 {IOERR_MISSING_CONTINUE, "Missing continue."}, 664 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 665 {IOERR_INTERNAL_ERROR, "Internal error."}, 666 {IOERR_INVALID_RPI, "Invalid RPI."}, 667 {IOERR_NO_XRI, "No XRI."}, 668 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 669 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 670 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 671 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 672 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 673 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 674 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 675 {IOERR_NO_RESOURCES, "No resources."}, 676 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 677 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 678 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 679 {IOERR_ABORT_REQUESTED, "Abort requested."}, 680 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 681 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 682 {IOERR_RING_RESET, "Ring reset."}, 683 {IOERR_LINK_DOWN, "Link down."}, 684 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 685 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 686 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 687 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 688 {IOERR_DUP_FRAME, "Duplicate frame."}, 689 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 690 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 691 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 692 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 693 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 694 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 695 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 696 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 697 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 698 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 699 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 700 {IOERR_INSUF_BUFFER, "Buffer too small."}, 701 {IOERR_MISSING_SI, "ELS frame missing SI"}, 702 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 703 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 704 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 705 706 }; /* emlxs_error_table */ 707 708 709 emlxs_table_t emlxs_state_table[] = { 710 {IOSTAT_SUCCESS, "Success."}, 711 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 712 {IOSTAT_REMOTE_STOP, "Remote stop."}, 713 {IOSTAT_LOCAL_REJECT, "Local reject."}, 714 {IOSTAT_NPORT_RJT, "NPort reject."}, 715 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 716 {IOSTAT_NPORT_BSY, "Nport busy."}, 717 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 718 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 719 {IOSTAT_LS_RJT, "LS reject."}, 720 {IOSTAT_CMD_REJECT, "Cmd reject."}, 721 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 722 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."}, 723 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 724 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 725 726 }; /* emlxs_state_table */ 727 728 729 #ifdef MENLO_SUPPORT 730 emlxs_table_t emlxs_menlo_cmd_table[] = { 731 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 732 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 733 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 734 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 735 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 736 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 737 738 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 739 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 740 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 741 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 742 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 743 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 744 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 745 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 746 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 747 748 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 749 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 750 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 751 752 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 753 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 754 755 {MENLO_CMD_RESET, "MENLO_RESET"}, 756 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 757 758 }; /* emlxs_menlo_cmd_table */ 759 760 emlxs_table_t emlxs_menlo_rsp_table[] = { 761 {MENLO_RSP_SUCCESS, "SUCCESS"}, 762 {MENLO_ERR_FAILED, "FAILED"}, 763 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 764 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 765 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 766 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 767 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 768 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 769 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 770 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 771 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 772 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 773 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 774 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 775 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 776 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 777 {MENLO_ERR_BUSY, "BUSY"}, 778 779 }; /* emlxs_menlo_rsp_table */ 780 781 #endif /* MENLO_SUPPORT */ 782 783 784 emlxs_table_t emlxs_mscmd_table[] = { 785 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 786 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 787 {MS_GTIN, "MS_GTIN"}, 788 {MS_GIEL, "MS_GIEL"}, 789 {MS_GIET, "MS_GIET"}, 790 {MS_GDID, "MS_GDID"}, 791 {MS_GMID, "MS_GMID"}, 792 {MS_GFN, "MS_GFN"}, 793 {MS_GIELN, "MS_GIELN"}, 794 {MS_GMAL, "MS_GMAL"}, 795 {MS_GIEIL, "MS_GIEIL"}, 796 {MS_GPL, "MS_GPL"}, 797 {MS_GPT, "MS_GPT"}, 798 {MS_GPPN, "MS_GPPN"}, 799 {MS_GAPNL, "MS_GAPNL"}, 800 {MS_GPS, "MS_GPS"}, 801 {MS_GPSC, "MS_GPSC"}, 802 {MS_GATIN, "MS_GATIN"}, 803 {MS_GSES, "MS_GSES"}, 804 {MS_GPLNL, "MS_GPLNL"}, 805 {MS_GPLT, "MS_GPLT"}, 806 {MS_GPLML, "MS_GPLML"}, 807 {MS_GPAB, "MS_GPAB"}, 808 {MS_GNPL, "MS_GNPL"}, 809 {MS_GPNL, "MS_GPNL"}, 810 {MS_GPFCP, "MS_GPFCP"}, 811 {MS_GPLI, "MS_GPLI"}, 812 {MS_GNID, "MS_GNID"}, 813 {MS_RIELN, "MS_RIELN"}, 814 {MS_RPL, "MS_RPL"}, 815 {MS_RPLN, "MS_RPLN"}, 816 {MS_RPLT, "MS_RPLT"}, 817 {MS_RPLM, "MS_RPLM"}, 818 {MS_RPAB, "MS_RPAB"}, 819 {MS_RPFCP, "MS_RPFCP"}, 820 {MS_RPLI, "MS_RPLI"}, 821 {MS_DPL, "MS_DPL"}, 822 {MS_DPLN, "MS_DPLN"}, 823 {MS_DPLM, "MS_DPLM"}, 824 {MS_DPLML, "MS_DPLML"}, 825 {MS_DPLI, "MS_DPLI"}, 826 {MS_DPAB, "MS_DPAB"}, 827 {MS_DPALL, "MS_DPALL"} 828 829 }; /* emlxs_mscmd_table */ 830 831 832 emlxs_table_t emlxs_ctcmd_table[] = { 833 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 834 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 835 {SLI_CTNS_GA_NXT, "GA_NXT"}, 836 {SLI_CTNS_GPN_ID, "GPN_ID"}, 837 {SLI_CTNS_GNN_ID, "GNN_ID"}, 838 {SLI_CTNS_GCS_ID, "GCS_ID"}, 839 {SLI_CTNS_GFT_ID, "GFT_ID"}, 840 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 841 {SLI_CTNS_GPT_ID, "GPT_ID"}, 842 {SLI_CTNS_GID_PN, "GID_PN"}, 843 {SLI_CTNS_GID_NN, "GID_NN"}, 844 {SLI_CTNS_GIP_NN, "GIP_NN"}, 845 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 846 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 847 {SLI_CTNS_GNN_IP, "GNN_IP"}, 848 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 849 {SLI_CTNS_GID_FT, "GID_FT"}, 850 {SLI_CTNS_GID_PT, "GID_PT"}, 851 {SLI_CTNS_RPN_ID, "RPN_ID"}, 852 {SLI_CTNS_RNN_ID, "RNN_ID"}, 853 {SLI_CTNS_RCS_ID, "RCS_ID"}, 854 {SLI_CTNS_RFT_ID, "RFT_ID"}, 855 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 856 {SLI_CTNS_RPT_ID, "RPT_ID"}, 857 {SLI_CTNS_RIP_NN, "RIP_NN"}, 858 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 859 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 860 {SLI_CTNS_DA_ID, "DA_ID"}, 861 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 862 863 }; /* emlxs_ctcmd_table */ 864 865 866 867 emlxs_table_t emlxs_rmcmd_table[] = { 868 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 869 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 870 {CT_OP_GSAT, "RM_GSAT"}, 871 {CT_OP_GHAT, "RM_GHAT"}, 872 {CT_OP_GPAT, "RM_GPAT"}, 873 {CT_OP_GDAT, "RM_GDAT"}, 874 {CT_OP_GPST, "RM_GPST"}, 875 {CT_OP_GDP, "RM_GDP"}, 876 {CT_OP_GDPG, "RM_GDPG"}, 877 {CT_OP_GEPS, "RM_GEPS"}, 878 {CT_OP_GLAT, "RM_GLAT"}, 879 {CT_OP_SSAT, "RM_SSAT"}, 880 {CT_OP_SHAT, "RM_SHAT"}, 881 {CT_OP_SPAT, "RM_SPAT"}, 882 {CT_OP_SDAT, "RM_SDAT"}, 883 {CT_OP_SDP, "RM_SDP"}, 884 {CT_OP_SBBS, "RM_SBBS"}, 885 {CT_OP_RPST, "RM_RPST"}, 886 {CT_OP_VFW, "RM_VFW"}, 887 {CT_OP_DFW, "RM_DFW"}, 888 {CT_OP_RES, "RM_RES"}, 889 {CT_OP_RHD, "RM_RHD"}, 890 {CT_OP_UFW, "RM_UFW"}, 891 {CT_OP_RDP, "RM_RDP"}, 892 {CT_OP_GHDR, "RM_GHDR"}, 893 {CT_OP_CHD, "RM_CHD"}, 894 {CT_OP_SSR, "RM_SSR"}, 895 {CT_OP_RSAT, "RM_RSAT"}, 896 {CT_OP_WSAT, "RM_WSAT"}, 897 {CT_OP_RSAH, "RM_RSAH"}, 898 {CT_OP_WSAH, "RM_WSAH"}, 899 {CT_OP_RACT, "RM_RACT"}, 900 {CT_OP_WACT, "RM_WACT"}, 901 {CT_OP_RKT, "RM_RKT"}, 902 {CT_OP_WKT, "RM_WKT"}, 903 {CT_OP_SSC, "RM_SSC"}, 904 {CT_OP_QHBA, "RM_QHBA"}, 905 {CT_OP_GST, "RM_GST"}, 906 {CT_OP_GFTM, "RM_GFTM"}, 907 {CT_OP_SRL, "RM_SRL"}, 908 {CT_OP_SI, "RM_SI"}, 909 {CT_OP_SRC, "RM_SRC"}, 910 {CT_OP_GPB, "RM_GPB"}, 911 {CT_OP_SPB, "RM_SPB"}, 912 {CT_OP_RPB, "RM_RPB"}, 913 {CT_OP_RAPB, "RM_RAPB"}, 914 {CT_OP_GBC, "RM_GBC"}, 915 {CT_OP_GBS, "RM_GBS"}, 916 {CT_OP_SBS, "RM_SBS"}, 917 {CT_OP_GANI, "RM_GANI"}, 918 {CT_OP_GRV, "RM_GRV"}, 919 {CT_OP_GAPBS, "RM_GAPBS"}, 920 {CT_OP_APBC, "RM_APBC"}, 921 {CT_OP_GDT, "RM_GDT"}, 922 {CT_OP_GDLMI, "RM_GDLMI"}, 923 {CT_OP_GANA, "RM_GANA"}, 924 {CT_OP_GDLV, "RM_GDLV"}, 925 {CT_OP_GWUP, "RM_GWUP"}, 926 {CT_OP_GLM, "RM_GLM"}, 927 {CT_OP_GABS, "RM_GABS"}, 928 {CT_OP_SABS, "RM_SABS"}, 929 {CT_OP_RPR, "RM_RPR"}, 930 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 931 932 }; /* emlxs_rmcmd_table */ 933 934 935 emlxs_table_t emlxs_elscmd_table[] = { 936 {ELS_CMD_ACC, "ACC"}, 937 {ELS_CMD_LS_RJT, "LS_RJT"}, 938 {ELS_CMD_PLOGI, "PLOGI"}, 939 {ELS_CMD_FLOGI, "FLOGI"}, 940 {ELS_CMD_LOGO, "LOGO"}, 941 {ELS_CMD_ABTX, "ABTX"}, 942 {ELS_CMD_RCS, "RCS"}, 943 {ELS_CMD_RES, "RES"}, 944 {ELS_CMD_RSS, "RSS"}, 945 {ELS_CMD_RSI, "RSI"}, 946 {ELS_CMD_ESTS, "ESTS"}, 947 {ELS_CMD_ESTC, "ESTC"}, 948 {ELS_CMD_ADVC, "ADVC"}, 949 {ELS_CMD_RTV, "RTV"}, 950 {ELS_CMD_RLS, "RLS"}, 951 {ELS_CMD_ECHO, "ECHO"}, 952 {ELS_CMD_TEST, "TEST"}, 953 {ELS_CMD_RRQ, "RRQ"}, 954 {ELS_CMD_REC, "REC"}, 955 {ELS_CMD_PRLI, "PRLI"}, 956 {ELS_CMD_PRLO, "PRLO"}, 957 {ELS_CMD_SCN, "SCN"}, 958 {ELS_CMD_TPLS, "TPLS"}, 959 {ELS_CMD_GPRLO, "GPRLO"}, 960 {ELS_CMD_GAID, "GAID"}, 961 {ELS_CMD_FACT, "FACT"}, 962 {ELS_CMD_FDACT, "FDACT"}, 963 {ELS_CMD_NACT, "NACT"}, 964 {ELS_CMD_NDACT, "NDACT"}, 965 {ELS_CMD_QoSR, "QoSR"}, 966 {ELS_CMD_RVCS, "RVCS"}, 967 {ELS_CMD_PDISC, "PDISC"}, 968 {ELS_CMD_FDISC, "FDISC"}, 969 {ELS_CMD_ADISC, "ADISC"}, 970 {ELS_CMD_FARP, "FARP"}, 971 {ELS_CMD_FARPR, "FARPR"}, 972 {ELS_CMD_FAN, "FAN"}, 973 {ELS_CMD_RSCN, "RSCN"}, 974 {ELS_CMD_SCR, "SCR"}, 975 {ELS_CMD_LINIT, "LINIT"}, 976 {ELS_CMD_RNID, "RNID"}, 977 {ELS_CMD_AUTH, "AUTH"} 978 979 }; /* emlxs_elscmd_table */ 980 981 982 /* 983 * 984 * Device Driver Entry Routines 985 * 986 */ 987 988 #ifdef MODSYM_SUPPORT 989 static void emlxs_fca_modclose(); 990 static int emlxs_fca_modopen(); 991 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */ 992 993 static int 994 emlxs_fca_modopen() 995 { 996 int err; 997 998 if (emlxs_modsym.mod_fctl) { 999 return (0); 1000 } 1001 1002 /* Leadville (fctl) */ 1003 err = 0; 1004 emlxs_modsym.mod_fctl = 1005 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 1006 if (!emlxs_modsym.mod_fctl) { 1007 cmn_err(CE_WARN, 1008 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1009 DRIVER_NAME, err); 1010 1011 goto failed; 1012 } 1013 1014 err = 0; 1015 /* Check if the fctl fc_fca_attach is present */ 1016 emlxs_modsym.fc_fca_attach = 1017 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1018 &err); 1019 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1020 cmn_err(CE_WARN, 1021 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1022 goto failed; 1023 } 1024 1025 err = 0; 1026 /* Check if the fctl fc_fca_detach is present */ 1027 emlxs_modsym.fc_fca_detach = 1028 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1029 &err); 1030 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1031 cmn_err(CE_WARN, 1032 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1033 goto failed; 1034 } 1035 1036 err = 0; 1037 /* Check if the fctl fc_fca_init is present */ 1038 emlxs_modsym.fc_fca_init = 1039 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1040 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1041 cmn_err(CE_WARN, 1042 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1043 goto failed; 1044 } 1045 1046 return (0); 1047 1048 failed: 1049 1050 emlxs_fca_modclose(); 1051 1052 return (1); 1053 1054 1055 } /* emlxs_fca_modopen() */ 1056 1057 1058 static void 1059 emlxs_fca_modclose() 1060 { 1061 if (emlxs_modsym.mod_fctl) { 1062 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1063 emlxs_modsym.mod_fctl = 0; 1064 } 1065 1066 emlxs_modsym.fc_fca_attach = NULL; 1067 emlxs_modsym.fc_fca_detach = NULL; 1068 emlxs_modsym.fc_fca_init = NULL; 1069 1070 return; 1071 1072 } /* emlxs_fca_modclose() */ 1073 1074 #endif /* MODSYM_SUPPORT */ 1075 1076 1077 1078 /* 1079 * Global driver initialization, called once when driver is loaded 1080 */ 1081 int 1082 _init(void) 1083 { 1084 int ret; 1085 char buf[64]; 1086 1087 /* 1088 * First init call for this driver, 1089 * so initialize the emlxs_dev_ctl structure. 1090 */ 1091 bzero(&emlxs_device, sizeof (emlxs_device)); 1092 1093 #ifdef MODSYM_SUPPORT 1094 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1095 #endif /* MODSYM_SUPPORT */ 1096 1097 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME); 1098 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL); 1099 1100 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1101 emlxs_device.drv_timestamp = gethrtime(); 1102 1103 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1104 emlxs_instance[ret] = (uint32_t)-1; 1105 } 1106 1107 /* 1108 * Provide for one ddiinst of the emlxs_dev_ctl structure 1109 * for each possible board in the system. 1110 */ 1111 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1112 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1113 cmn_err(CE_WARN, 1114 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1115 DRIVER_NAME, ret); 1116 1117 return (ret); 1118 } 1119 1120 #ifdef MODSYM_SUPPORT 1121 /* Open SFS */ 1122 (void) emlxs_fca_modopen(); 1123 #endif /* MODSYM_SUPPORT */ 1124 1125 /* Setup devops for SFS */ 1126 MODSYM(fc_fca_init)(&emlxs_ops); 1127 1128 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1129 (void) ddi_soft_state_fini(&emlxs_soft_state); 1130 #ifdef MODSYM_SUPPORT 1131 /* Close SFS */ 1132 emlxs_fca_modclose(); 1133 #endif /* MODSYM_SUPPORT */ 1134 1135 return (ret); 1136 } 1137 1138 #ifdef SAN_DIAG_SUPPORT 1139 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME); 1140 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL); 1141 #endif /* SAN_DIAG_SUPPORT */ 1142 1143 return (ret); 1144 1145 } /* _init() */ 1146 1147 1148 /* 1149 * Called when driver is unloaded. 1150 */ 1151 int 1152 _fini(void) 1153 { 1154 int ret; 1155 1156 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1157 return (ret); 1158 } 1159 #ifdef MODSYM_SUPPORT 1160 /* Close SFS */ 1161 emlxs_fca_modclose(); 1162 #endif /* MODSYM_SUPPORT */ 1163 1164 /* 1165 * Destroy the soft state structure 1166 */ 1167 (void) ddi_soft_state_fini(&emlxs_soft_state); 1168 1169 /* Destroy the global device lock */ 1170 mutex_destroy(&emlxs_device.lock); 1171 1172 #ifdef SAN_DIAG_SUPPORT 1173 mutex_destroy(&sd_bucket_mutex); 1174 #endif /* SAN_DIAG_SUPPORT */ 1175 1176 return (ret); 1177 1178 } /* _fini() */ 1179 1180 1181 1182 int 1183 _info(struct modinfo *modinfop) 1184 { 1185 1186 return (mod_info(&emlxs_modlinkage, modinfop)); 1187 1188 } /* _info() */ 1189 1190 1191 /* 1192 * Attach an ddiinst of an emlx host adapter. 1193 * Allocate data structures, initialize the adapter and we're ready to fly. 1194 */ 1195 static int 1196 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1197 { 1198 emlxs_hba_t *hba; 1199 int ddiinst; 1200 int emlxinst; 1201 int rval; 1202 1203 switch (cmd) { 1204 case DDI_ATTACH: 1205 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1206 rval = emlxs_hba_attach(dip); 1207 break; 1208 1209 case DDI_PM_RESUME: 1210 /* This will resume the driver */ 1211 rval = emlxs_pm_raise_power(dip); 1212 break; 1213 1214 case DDI_RESUME: 1215 /* This will resume the driver */ 1216 rval = emlxs_hba_resume(dip); 1217 break; 1218 1219 default: 1220 rval = DDI_FAILURE; 1221 } 1222 1223 if (rval == DDI_SUCCESS) { 1224 ddiinst = ddi_get_instance(dip); 1225 emlxinst = emlxs_get_instance(ddiinst); 1226 hba = emlxs_device.hba[emlxinst]; 1227 1228 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1229 1230 /* Enable driver dump feature */ 1231 mutex_enter(&EMLXS_PORT_LOCK); 1232 hba->flag |= FC_DUMP_SAFE; 1233 mutex_exit(&EMLXS_PORT_LOCK); 1234 } 1235 } 1236 1237 return (rval); 1238 1239 } /* emlxs_attach() */ 1240 1241 1242 /* 1243 * Detach/prepare driver to unload (see detach(9E)). 1244 */ 1245 static int 1246 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1247 { 1248 emlxs_hba_t *hba; 1249 emlxs_port_t *port; 1250 int ddiinst; 1251 int emlxinst; 1252 int rval; 1253 1254 ddiinst = ddi_get_instance(dip); 1255 emlxinst = emlxs_get_instance(ddiinst); 1256 hba = emlxs_device.hba[emlxinst]; 1257 1258 if (hba == NULL) { 1259 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1260 1261 return (DDI_FAILURE); 1262 } 1263 1264 if (hba == (emlxs_hba_t *)-1) { 1265 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1266 DRIVER_NAME); 1267 1268 return (DDI_FAILURE); 1269 } 1270 1271 port = &PPORT; 1272 rval = DDI_SUCCESS; 1273 1274 /* Check driver dump */ 1275 mutex_enter(&EMLXS_PORT_LOCK); 1276 1277 if (hba->flag & FC_DUMP_ACTIVE) { 1278 mutex_exit(&EMLXS_PORT_LOCK); 1279 1280 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1281 "emlxs_detach: Driver busy. Driver dump active."); 1282 1283 return (DDI_FAILURE); 1284 } 1285 1286 #ifdef SFCT_SUPPORT 1287 if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) || 1288 (port->fct_flags & FCT_STATE_NOT_ACKED))) { 1289 mutex_exit(&EMLXS_PORT_LOCK); 1290 1291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1292 "emlxs_detach: Driver busy. Target mode active."); 1293 1294 return (DDI_FAILURE); 1295 } 1296 #endif /* SFCT_SUPPORT */ 1297 1298 if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) { 1299 mutex_exit(&EMLXS_PORT_LOCK); 1300 1301 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1302 "emlxs_detach: Driver busy. Initiator mode active."); 1303 1304 return (DDI_FAILURE); 1305 } 1306 1307 hba->flag &= ~FC_DUMP_SAFE; 1308 1309 mutex_exit(&EMLXS_PORT_LOCK); 1310 1311 switch (cmd) { 1312 case DDI_DETACH: 1313 1314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1315 "DDI_DETACH"); 1316 1317 rval = emlxs_hba_detach(dip); 1318 1319 if (rval != DDI_SUCCESS) { 1320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1321 "Unable to detach."); 1322 } 1323 break; 1324 1325 1326 case DDI_PM_SUSPEND: 1327 1328 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1329 "DDI_PM_SUSPEND"); 1330 1331 /* This will suspend the driver */ 1332 rval = emlxs_pm_lower_power(dip); 1333 1334 if (rval != DDI_SUCCESS) { 1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1336 "Unable to lower power."); 1337 } 1338 1339 break; 1340 1341 1342 case DDI_SUSPEND: 1343 1344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1345 "DDI_SUSPEND"); 1346 1347 /* Suspend the driver */ 1348 rval = emlxs_hba_suspend(dip); 1349 1350 if (rval != DDI_SUCCESS) { 1351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1352 "Unable to suspend driver."); 1353 } 1354 break; 1355 1356 1357 default: 1358 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1359 DRIVER_NAME, cmd); 1360 rval = DDI_FAILURE; 1361 } 1362 1363 if (rval == DDI_FAILURE) { 1364 /* Re-Enable driver dump feature */ 1365 mutex_enter(&EMLXS_PORT_LOCK); 1366 hba->flag |= FC_DUMP_SAFE; 1367 mutex_exit(&EMLXS_PORT_LOCK); 1368 } 1369 1370 return (rval); 1371 1372 } /* emlxs_detach() */ 1373 1374 1375 /* EMLXS_PORT_LOCK must be held when calling this */ 1376 extern void 1377 emlxs_port_init(emlxs_port_t *port) 1378 { 1379 emlxs_hba_t *hba = HBA; 1380 1381 /* Initialize the base node */ 1382 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1383 port->node_base.nlp_Rpi = 0; 1384 port->node_base.nlp_DID = 0xffffff; 1385 port->node_base.nlp_list_next = NULL; 1386 port->node_base.nlp_list_prev = NULL; 1387 port->node_base.nlp_active = 1; 1388 port->node_base.nlp_base = 1; 1389 port->node_count = 0; 1390 1391 if (!(port->flag & EMLXS_PORT_ENABLE)) { 1392 uint8_t dummy_wwn[8] = 1393 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1394 1395 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1396 sizeof (NAME_TYPE)); 1397 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1398 sizeof (NAME_TYPE)); 1399 } 1400 1401 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1402 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256); 1403 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256); 1404 } 1405 1406 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1407 sizeof (SERV_PARM)); 1408 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1409 sizeof (NAME_TYPE)); 1410 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1411 sizeof (NAME_TYPE)); 1412 1413 return; 1414 1415 } /* emlxs_port_init() */ 1416 1417 1418 void 1419 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba) 1420 { 1421 uint16_t reg; 1422 1423 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) { 1424 return; 1425 } 1426 1427 /* Turn off the Correctable Error Reporting */ 1428 /* (the Device Control Register, bit 0). */ 1429 reg = ddi_get16(hba->pci_acc_handle, 1430 (uint16_t *)(hba->pci_addr + 1431 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] + 1432 PCIE_DEVCTL_OFFSET)); 1433 1434 reg &= ~1; 1435 1436 (void) ddi_put16(hba->pci_acc_handle, 1437 (uint16_t *)(hba->pci_addr + 1438 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] + 1439 PCIE_DEVCTL_OFFSET), 1440 reg); 1441 1442 return; 1443 1444 } /* emlxs_disable_pcie_ce_err() */ 1445 1446 1447 /* 1448 * emlxs_fca_bind_port 1449 * 1450 * Arguments: 1451 * 1452 * dip: the dev_info pointer for the ddiinst 1453 * port_info: pointer to info handed back to the transport 1454 * bind_info: pointer to info from the transport 1455 * 1456 * Return values: a port handle for this port, NULL for failure 1457 * 1458 */ 1459 static opaque_t 1460 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1461 fc_fca_bind_info_t *bind_info) 1462 { 1463 emlxs_hba_t *hba; 1464 emlxs_port_t *port; 1465 emlxs_port_t *vport; 1466 int ddiinst; 1467 emlxs_vpd_t *vpd; 1468 emlxs_config_t *cfg; 1469 char *dptr; 1470 char buffer[16]; 1471 uint32_t length; 1472 uint32_t len; 1473 char topology[32]; 1474 char linkspeed[32]; 1475 1476 ddiinst = ddi_get_instance(dip); 1477 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1478 port = &PPORT; 1479 1480 ddiinst = hba->ddiinst; 1481 vpd = &VPD; 1482 cfg = &CFG; 1483 1484 mutex_enter(&EMLXS_PORT_LOCK); 1485 1486 if (bind_info->port_num > 0) { 1487 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1488 if (!(hba->flag & FC_NPIV_ENABLED) || 1489 !(bind_info->port_npiv) || 1490 (bind_info->port_num > hba->vpi_max)) 1491 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1492 if (!(hba->flag & FC_NPIV_ENABLED) || 1493 (bind_info->port_num > hba->vpi_high)) 1494 #endif 1495 { 1496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1497 "fca_bind_port: Port %d not supported.", 1498 bind_info->port_num); 1499 1500 mutex_exit(&EMLXS_PORT_LOCK); 1501 1502 port_info->pi_error = FC_OUTOFBOUNDS; 1503 return (NULL); 1504 } 1505 } 1506 1507 /* Get true port pointer */ 1508 port = &VPORT(bind_info->port_num); 1509 1510 if (port->tgt_mode) { 1511 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1512 "fca_bind_port: Port %d is in target mode.", 1513 bind_info->port_num); 1514 1515 mutex_exit(&EMLXS_PORT_LOCK); 1516 1517 port_info->pi_error = FC_OUTOFBOUNDS; 1518 return (NULL); 1519 } 1520 1521 if (!port->ini_mode) { 1522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1523 "fca_bind_port: Port %d is not in initiator mode.", 1524 bind_info->port_num); 1525 1526 mutex_exit(&EMLXS_PORT_LOCK); 1527 1528 port_info->pi_error = FC_OUTOFBOUNDS; 1529 return (NULL); 1530 } 1531 1532 /* Make sure the port is not already bound to the transport */ 1533 if (port->flag & EMLXS_PORT_BOUND) { 1534 1535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1536 "fca_bind_port: Port %d already bound. flag=%x", 1537 bind_info->port_num, port->flag); 1538 1539 mutex_exit(&EMLXS_PORT_LOCK); 1540 1541 port_info->pi_error = FC_ALREADY; 1542 return (NULL); 1543 } 1544 1545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1546 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1547 bind_info->port_num, port_info, bind_info); 1548 1549 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1550 if (bind_info->port_npiv) { 1551 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1552 sizeof (NAME_TYPE)); 1553 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1554 sizeof (NAME_TYPE)); 1555 if (port->snn[0] == 0) { 1556 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1557 256); 1558 } 1559 1560 if (port->spn[0] == 0) { 1561 (void) sprintf((caddr_t)port->spn, "%s VPort-%d", 1562 (caddr_t)hba->spn, port->vpi); 1563 } 1564 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 1565 } 1566 #endif /* >= EMLXS_MODREV5 */ 1567 1568 /* 1569 * Restricted login should apply both physical and 1570 * virtual ports. 1571 */ 1572 if (cfg[CFG_VPORT_RESTRICTED].current) { 1573 port->flag |= EMLXS_PORT_RESTRICTED; 1574 } 1575 1576 /* Perform generic port initialization */ 1577 emlxs_port_init(port); 1578 1579 /* Perform SFS specific initialization */ 1580 port->ulp_handle = bind_info->port_handle; 1581 port->ulp_statec_cb = bind_info->port_statec_cb; 1582 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1583 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 1584 port->ub_pool = NULL; 1585 1586 /* Update the port info structure */ 1587 1588 /* Set the topology and state */ 1589 if ((hba->state < FC_LINK_UP) || 1590 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) || 1591 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1592 port_info->pi_port_state = FC_STATE_OFFLINE; 1593 port_info->pi_topology = FC_TOP_UNKNOWN; 1594 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) && 1595 (port->VPIobj.state == VPI_STATE_OFFLINE)) { 1596 port_info->pi_port_state = FC_STATE_OFFLINE; 1597 port_info->pi_topology = FC_TOP_UNKNOWN; 1598 } 1599 #ifdef MENLO_SUPPORT 1600 else if (hba->flag & FC_MENLO_MODE) { 1601 port_info->pi_port_state = FC_STATE_OFFLINE; 1602 port_info->pi_topology = FC_TOP_UNKNOWN; 1603 } 1604 #endif /* MENLO_SUPPORT */ 1605 else { 1606 /* Check for loop topology */ 1607 if (hba->topology == TOPOLOGY_LOOP) { 1608 port_info->pi_port_state = FC_STATE_LOOP; 1609 (void) strcpy(topology, ", loop"); 1610 1611 if (hba->flag & FC_FABRIC_ATTACHED) { 1612 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1613 } else { 1614 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1615 } 1616 } else { 1617 port_info->pi_topology = FC_TOP_FABRIC; 1618 port_info->pi_port_state = FC_STATE_ONLINE; 1619 (void) strcpy(topology, ", fabric"); 1620 } 1621 1622 /* Set the link speed */ 1623 switch (hba->linkspeed) { 1624 case 0: 1625 (void) strcpy(linkspeed, "Gb"); 1626 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1627 break; 1628 1629 case LA_1GHZ_LINK: 1630 (void) strcpy(linkspeed, "1Gb"); 1631 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1632 break; 1633 case LA_2GHZ_LINK: 1634 (void) strcpy(linkspeed, "2Gb"); 1635 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1636 break; 1637 case LA_4GHZ_LINK: 1638 (void) strcpy(linkspeed, "4Gb"); 1639 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1640 break; 1641 case LA_8GHZ_LINK: 1642 (void) strcpy(linkspeed, "8Gb"); 1643 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1644 break; 1645 case LA_10GHZ_LINK: 1646 (void) strcpy(linkspeed, "10Gb"); 1647 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1648 break; 1649 default: 1650 (void) sprintf(linkspeed, "unknown(0x%x)", 1651 hba->linkspeed); 1652 break; 1653 } 1654 1655 /* Adjusting port context for link up messages */ 1656 vport = port; 1657 port = &PPORT; 1658 if (vport->vpi == 0) { 1659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s", 1660 linkspeed, topology); 1661 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1662 hba->flag |= FC_NPIV_LINKUP; 1663 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg, 1664 "%s%s", linkspeed, topology); 1665 } 1666 port = vport; 1667 1668 } 1669 1670 /* PCIE Correctable Error Reporting workaround */ 1671 if (((hba->model_info.chip == EMLXS_BE2_CHIP) || 1672 (hba->model_info.chip == EMLXS_BE3_CHIP)) && 1673 (bind_info->port_num == 0)) { 1674 emlxs_disable_pcie_ce_err(hba); 1675 } 1676 1677 /* Save initial state */ 1678 port->ulp_statec = port_info->pi_port_state; 1679 1680 /* 1681 * The transport needs a copy of the common service parameters 1682 * for this port. The transport can get any updates through 1683 * the getcap entry point. 1684 */ 1685 bcopy((void *) &port->sparam, 1686 (void *) &port_info->pi_login_params.common_service, 1687 sizeof (SERV_PARM)); 1688 1689 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1690 /* Swap the service parameters for ULP */ 1691 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1692 common_service); 1693 #endif /* EMLXS_MODREV2X */ 1694 1695 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1696 1697 bcopy((void *) &port->wwnn, 1698 (void *) &port_info->pi_login_params.node_ww_name, 1699 sizeof (NAME_TYPE)); 1700 1701 bcopy((void *) &port->wwpn, 1702 (void *) &port_info->pi_login_params.nport_ww_name, 1703 sizeof (NAME_TYPE)); 1704 1705 /* 1706 * We need to turn off CLASS2 support. 1707 * Otherwise, FC transport will use CLASS2 as default class 1708 * and never try with CLASS3. 1709 */ 1710 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1711 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1712 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1713 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1714 } 1715 1716 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1717 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1718 } 1719 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1720 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1721 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1722 } 1723 1724 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1725 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1726 } 1727 #endif /* >= EMLXS_MODREV3X */ 1728 #endif /* >= EMLXS_MODREV3 */ 1729 1730 1731 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1732 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1733 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1734 } 1735 1736 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1737 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1738 } 1739 #endif /* <= EMLXS_MODREV2 */ 1740 1741 /* Additional parameters */ 1742 port_info->pi_s_id.port_id = port->did; 1743 port_info->pi_s_id.priv_lilp_posit = 0; 1744 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1745 1746 /* Initialize the RNID parameters */ 1747 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1748 1749 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id, 1750 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1751 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1752 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1753 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1754 1755 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1756 port_info->pi_rnid_params.params.port_id = port->did; 1757 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1758 1759 /* Initialize the port attributes */ 1760 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1761 1762 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex"); 1763 1764 port_info->pi_rnid_params.status = FC_SUCCESS; 1765 1766 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num); 1767 1768 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)", 1769 vpd->fw_version, vpd->fw_label); 1770 1771 #ifdef EMLXS_I386 1772 (void) sprintf(port_info->pi_attrs.option_rom_version, 1773 "Boot:%s", vpd->boot_version); 1774 #else /* EMLXS_SPARC */ 1775 (void) sprintf(port_info->pi_attrs.option_rom_version, 1776 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1777 #endif /* EMLXS_I386 */ 1778 1779 1780 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)", 1781 emlxs_version, emlxs_revision); 1782 1783 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME); 1784 1785 port_info->pi_attrs.vendor_specific_id = 1786 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX); 1787 1788 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3); 1789 1790 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1791 1792 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1793 1794 port_info->pi_rnid_params.params.num_attached = 0; 1795 1796 /* 1797 * Copy the serial number string (right most 16 chars) into the right 1798 * justified local buffer 1799 */ 1800 bzero(buffer, sizeof (buffer)); 1801 length = strlen(vpd->serial_num); 1802 len = (length > 16) ? 16 : length; 1803 bcopy(&vpd->serial_num[(length - len)], 1804 &buffer[(sizeof (buffer) - len)], len); 1805 1806 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index; 1807 1808 #endif /* >= EMLXS_MODREV5 */ 1809 1810 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4)) 1811 1812 port_info->pi_rnid_params.params.num_attached = 0; 1813 1814 if (hba->flag & FC_NPIV_ENABLED) { 1815 uint8_t byte; 1816 uint8_t *wwpn; 1817 uint32_t i; 1818 uint32_t j; 1819 1820 /* Copy the WWPN as a string into the local buffer */ 1821 wwpn = (uint8_t *)&hba->wwpn; 1822 for (i = 0; i < 16; i++) { 1823 byte = *wwpn++; 1824 j = ((byte & 0xf0) >> 4); 1825 if (j <= 9) { 1826 buffer[i] = 1827 (char)((uint8_t)'0' + (uint8_t)j); 1828 } else { 1829 buffer[i] = 1830 (char)((uint8_t)'A' + (uint8_t)(j - 1831 10)); 1832 } 1833 1834 i++; 1835 j = (byte & 0xf); 1836 if (j <= 9) { 1837 buffer[i] = 1838 (char)((uint8_t)'0' + (uint8_t)j); 1839 } else { 1840 buffer[i] = 1841 (char)((uint8_t)'A' + (uint8_t)(j - 1842 10)); 1843 } 1844 } 1845 1846 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1847 } else { 1848 /* Copy the serial number string (right most 16 chars) */ 1849 /* into the right justified local buffer */ 1850 bzero(buffer, sizeof (buffer)); 1851 length = strlen(vpd->serial_num); 1852 len = (length > 16) ? 16 : length; 1853 bcopy(&vpd->serial_num[(length - len)], 1854 &buffer[(sizeof (buffer) - len)], len); 1855 1856 port_info->pi_attrs.hba_fru_details.port_index = 1857 vpd->port_index; 1858 } 1859 1860 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1861 1862 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1863 1864 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1865 dptr[0] = buffer[0]; 1866 dptr[1] = buffer[1]; 1867 dptr[2] = buffer[2]; 1868 dptr[3] = buffer[3]; 1869 dptr[4] = buffer[4]; 1870 dptr[5] = buffer[5]; 1871 dptr[6] = buffer[6]; 1872 dptr[7] = buffer[7]; 1873 port_info->pi_attrs.hba_fru_details.high = 1874 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high); 1875 1876 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1877 dptr[0] = buffer[8]; 1878 dptr[1] = buffer[9]; 1879 dptr[2] = buffer[10]; 1880 dptr[3] = buffer[11]; 1881 dptr[4] = buffer[12]; 1882 dptr[5] = buffer[13]; 1883 dptr[6] = buffer[14]; 1884 dptr[7] = buffer[15]; 1885 port_info->pi_attrs.hba_fru_details.low = 1886 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low); 1887 1888 #endif /* >= EMLXS_MODREV3 */ 1889 1890 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1891 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1892 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1893 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1894 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1895 #endif /* >= EMLXS_MODREV4 */ 1896 1897 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev); 1898 1899 /* Set the hba speed limit */ 1900 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1901 port_info->pi_attrs.supported_speed |= 1902 FC_HBA_PORTSPEED_10GBIT; 1903 } 1904 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1905 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1906 } 1907 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1908 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1909 } 1910 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1911 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1912 } 1913 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1914 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1915 } 1916 1917 /* Set the hba model info */ 1918 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model); 1919 (void) strcpy(port_info->pi_attrs.model_description, 1920 hba->model_info.model_desc); 1921 1922 1923 /* Log information */ 1924 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1925 "Bind info: port_num = %d", bind_info->port_num); 1926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1927 "Bind info: port_handle = %p", bind_info->port_handle); 1928 1929 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1931 "Bind info: port_npiv = %d", bind_info->port_npiv); 1932 #endif /* >= EMLXS_MODREV5 */ 1933 1934 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1935 "Port info: pi_topology = %x", port_info->pi_topology); 1936 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1937 "Port info: pi_error = %x", port_info->pi_error); 1938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1939 "Port info: pi_port_state = %x", port_info->pi_port_state); 1940 1941 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1942 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1944 "Port info: priv_lilp_posit = %x", 1945 port_info->pi_s_id.priv_lilp_posit); 1946 1947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1948 "Port info: hard_addr = %x", 1949 port_info->pi_hard_addr.hard_addr); 1950 1951 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1952 "Port info: rnid.status = %x", 1953 port_info->pi_rnid_params.status); 1954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1955 "Port info: rnid.global_id = %16s", 1956 port_info->pi_rnid_params.params.global_id); 1957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1958 "Port info: rnid.unit_type = %x", 1959 port_info->pi_rnid_params.params.unit_type); 1960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1961 "Port info: rnid.port_id = %x", 1962 port_info->pi_rnid_params.params.port_id); 1963 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1964 "Port info: rnid.num_attached = %x", 1965 port_info->pi_rnid_params.params.num_attached); 1966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1967 "Port info: rnid.ip_version = %x", 1968 port_info->pi_rnid_params.params.ip_version); 1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1970 "Port info: rnid.udp_port = %x", 1971 port_info->pi_rnid_params.params.udp_port); 1972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1973 "Port info: rnid.ip_addr = %16s", 1974 port_info->pi_rnid_params.params.ip_addr); 1975 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1976 "Port info: rnid.spec_id_resv = %x", 1977 port_info->pi_rnid_params.params.specific_id_resv); 1978 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1979 "Port info: rnid.topo_flags = %x", 1980 port_info->pi_rnid_params.params.topo_flags); 1981 1982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1983 "Port info: manufacturer = %s", 1984 port_info->pi_attrs.manufacturer); 1985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1986 "Port info: serial_num = %s", 1987 port_info->pi_attrs.serial_number); 1988 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1989 "Port info: model = %s", port_info->pi_attrs.model); 1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1991 "Port info: model_description = %s", 1992 port_info->pi_attrs.model_description); 1993 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1994 "Port info: hardware_version = %s", 1995 port_info->pi_attrs.hardware_version); 1996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1997 "Port info: driver_version = %s", 1998 port_info->pi_attrs.driver_version); 1999 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2000 "Port info: option_rom_version = %s", 2001 port_info->pi_attrs.option_rom_version); 2002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2003 "Port info: firmware_version = %s", 2004 port_info->pi_attrs.firmware_version); 2005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2006 "Port info: driver_name = %s", 2007 port_info->pi_attrs.driver_name); 2008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2009 "Port info: vendor_specific_id = %x", 2010 port_info->pi_attrs.vendor_specific_id); 2011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2012 "Port info: supported_cos = %x", 2013 port_info->pi_attrs.supported_cos); 2014 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2015 "Port info: supported_speed = %x", 2016 port_info->pi_attrs.supported_speed); 2017 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2018 "Port info: max_frame_size = %x", 2019 port_info->pi_attrs.max_frame_size); 2020 2021 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2022 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2023 "Port info: fru_port_index = %x", 2024 port_info->pi_attrs.hba_fru_details.port_index); 2025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2026 "Port info: fru_high = %llx", 2027 port_info->pi_attrs.hba_fru_details.high); 2028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2029 "Port info: fru_low = %llx", 2030 port_info->pi_attrs.hba_fru_details.low); 2031 #endif /* >= EMLXS_MODREV3 */ 2032 2033 #if (EMLXS_MODREV >= EMLXS_MODREV4) 2034 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2035 "Port info: sym_node_name = %s", 2036 port_info->pi_attrs.sym_node_name); 2037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2038 "Port info: sym_port_name = %s", 2039 port_info->pi_attrs.sym_port_name); 2040 #endif /* >= EMLXS_MODREV4 */ 2041 2042 /* Set the bound flag */ 2043 port->flag |= EMLXS_PORT_BOUND; 2044 hba->num_of_ports++; 2045 2046 mutex_exit(&EMLXS_PORT_LOCK); 2047 2048 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 2049 (void) emlxs_vpi_port_bind_notify(port); 2050 } 2051 2052 return ((opaque_t)port); 2053 2054 } /* emlxs_fca_bind_port() */ 2055 2056 2057 static void 2058 emlxs_fca_unbind_port(opaque_t fca_port_handle) 2059 { 2060 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2061 emlxs_hba_t *hba = HBA; 2062 2063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2064 "fca_unbind_port: port=%p", port); 2065 2066 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 2067 (void) emlxs_vpi_port_unbind_notify(port, 1); 2068 } 2069 2070 /* Destroy & flush all port nodes, if they exist */ 2071 if (port->node_count) { 2072 (void) emlxs_mb_unreg_node(port, 0, 0, 0, 0); 2073 } 2074 2075 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2076 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) && 2077 (hba->flag & FC_NPIV_ENABLED) && 2078 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) { 2079 (void) emlxs_mb_unreg_vpi(port); 2080 } 2081 #endif 2082 2083 mutex_enter(&EMLXS_PORT_LOCK); 2084 2085 if (!(port->flag & EMLXS_PORT_BOUND)) { 2086 mutex_exit(&EMLXS_PORT_LOCK); 2087 return; 2088 } 2089 2090 port->flag &= ~EMLXS_PORT_BOUND; 2091 hba->num_of_ports--; 2092 2093 port->ulp_handle = 0; 2094 port->ulp_statec = FC_STATE_OFFLINE; 2095 port->ulp_statec_cb = NULL; 2096 port->ulp_unsol_cb = NULL; 2097 2098 mutex_exit(&EMLXS_PORT_LOCK); 2099 2100 return; 2101 2102 } /* emlxs_fca_unbind_port() */ 2103 2104 2105 /*ARGSUSED*/ 2106 extern int 2107 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2108 { 2109 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2110 emlxs_hba_t *hba = HBA; 2111 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2112 2113 if (!sbp) { 2114 return (FC_FAILURE); 2115 } 2116 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2117 2118 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg)); 2119 sbp->pkt_flags = 2120 PACKET_VALID | PACKET_ULP_OWNED; 2121 sbp->port = port; 2122 sbp->pkt = pkt; 2123 sbp->iocbq.sbp = sbp; 2124 2125 return (FC_SUCCESS); 2126 2127 } /* emlxs_fca_pkt_init() */ 2128 2129 2130 2131 static void 2132 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2133 { 2134 emlxs_hba_t *hba = HBA; 2135 emlxs_config_t *cfg = &CFG; 2136 fc_packet_t *pkt = PRIV2PKT(sbp); 2137 uint32_t *iptr; 2138 2139 mutex_enter(&sbp->mtx); 2140 2141 /* Reinitialize */ 2142 sbp->pkt = pkt; 2143 sbp->port = port; 2144 sbp->bmp = NULL; 2145 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2146 sbp->iotag = 0; 2147 sbp->ticks = 0; 2148 sbp->abort_attempts = 0; 2149 sbp->fpkt = NULL; 2150 sbp->flush_count = 0; 2151 sbp->next = NULL; 2152 2153 if (!port->tgt_mode) { 2154 sbp->node = NULL; 2155 sbp->did = 0; 2156 sbp->lun = EMLXS_LUN_NONE; 2157 sbp->class = 0; 2158 sbp->class = 0; 2159 sbp->channel = NULL; 2160 } 2161 2162 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2163 sbp->iocbq.sbp = sbp; 2164 2165 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2166 ddi_in_panic()) { 2167 sbp->pkt_flags |= PACKET_POLLED; 2168 } 2169 2170 /* Prepare the fc packet */ 2171 pkt->pkt_state = FC_PKT_SUCCESS; 2172 pkt->pkt_reason = 0; 2173 pkt->pkt_action = 0; 2174 pkt->pkt_expln = 0; 2175 pkt->pkt_data_resid = 0; 2176 pkt->pkt_resp_resid = 0; 2177 2178 /* Make sure all pkt's have a proper timeout */ 2179 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2180 /* This disables all IOCB on chip timeouts */ 2181 pkt->pkt_timeout = 0x80000000; 2182 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2183 pkt->pkt_timeout = 60; 2184 } 2185 2186 /* Clear the response buffer */ 2187 if (pkt->pkt_rsplen) { 2188 /* Check for FCP commands */ 2189 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) || 2190 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) { 2191 iptr = (uint32_t *)pkt->pkt_resp; 2192 iptr[2] = 0; 2193 iptr[3] = 0; 2194 } else { 2195 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2196 } 2197 } 2198 2199 mutex_exit(&sbp->mtx); 2200 2201 return; 2202 2203 } /* emlxs_initialize_pkt() */ 2204 2205 2206 2207 /* 2208 * We may not need this routine 2209 */ 2210 /*ARGSUSED*/ 2211 extern int 2212 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2213 { 2214 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2215 2216 if (!sbp) { 2217 return (FC_FAILURE); 2218 } 2219 2220 if (!(sbp->pkt_flags & PACKET_VALID)) { 2221 return (FC_FAILURE); 2222 } 2223 sbp->pkt_flags &= ~PACKET_VALID; 2224 mutex_destroy(&sbp->mtx); 2225 2226 return (FC_SUCCESS); 2227 2228 } /* emlxs_fca_pkt_uninit() */ 2229 2230 2231 static int 2232 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2233 { 2234 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2235 emlxs_hba_t *hba = HBA; 2236 int32_t rval; 2237 2238 if (!(port->flag & EMLXS_PORT_BOUND)) { 2239 return (FC_CAP_ERROR); 2240 } 2241 2242 if (strcmp(cap, FC_NODE_WWN) == 0) { 2243 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2244 "fca_get_cap: FC_NODE_WWN"); 2245 2246 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2247 rval = FC_CAP_FOUND; 2248 2249 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2251 "fca_get_cap: FC_LOGIN_PARAMS"); 2252 2253 /* 2254 * We need to turn off CLASS2 support. 2255 * Otherwise, FC transport will use CLASS2 as default class 2256 * and never try with CLASS3. 2257 */ 2258 hba->sparam.cls2.classValid = 0; 2259 2260 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2261 2262 rval = FC_CAP_FOUND; 2263 2264 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2265 int32_t *num_bufs; 2266 emlxs_config_t *cfg = &CFG; 2267 2268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2269 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2270 cfg[CFG_UB_BUFS].current); 2271 2272 num_bufs = (int32_t *)ptr; 2273 2274 /* We multiply by MAX_VPORTS because ULP uses a */ 2275 /* formula to calculate ub bufs from this */ 2276 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2277 2278 rval = FC_CAP_FOUND; 2279 2280 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2281 int32_t *size; 2282 2283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2284 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2285 2286 size = (int32_t *)ptr; 2287 *size = -1; 2288 rval = FC_CAP_FOUND; 2289 2290 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2291 fc_reset_action_t *action; 2292 2293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2294 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2295 2296 action = (fc_reset_action_t *)ptr; 2297 *action = FC_RESET_RETURN_ALL; 2298 rval = FC_CAP_FOUND; 2299 2300 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2301 fc_dma_behavior_t *behavior; 2302 2303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2304 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2305 2306 behavior = (fc_dma_behavior_t *)ptr; 2307 *behavior = FC_ALLOW_STREAMING; 2308 rval = FC_CAP_FOUND; 2309 2310 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2311 fc_fcp_dma_t *fcp_dma; 2312 2313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2314 "fca_get_cap: FC_CAP_FCP_DMA"); 2315 2316 fcp_dma = (fc_fcp_dma_t *)ptr; 2317 *fcp_dma = FC_DVMA_SPACE; 2318 rval = FC_CAP_FOUND; 2319 2320 } else { 2321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2322 "fca_get_cap: Unknown capability. [%s]", cap); 2323 2324 rval = FC_CAP_ERROR; 2325 2326 } 2327 2328 return (rval); 2329 2330 } /* emlxs_fca_get_cap() */ 2331 2332 2333 2334 static int 2335 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2336 { 2337 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2338 2339 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2340 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2341 2342 return (FC_CAP_ERROR); 2343 2344 } /* emlxs_fca_set_cap() */ 2345 2346 2347 static opaque_t 2348 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2349 { 2350 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2351 2352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2353 "fca_get_device: did=%x", d_id.port_id); 2354 2355 return (NULL); 2356 2357 } /* emlxs_fca_get_device() */ 2358 2359 2360 static int32_t 2361 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd) 2362 { 2363 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2364 2365 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2366 cmd); 2367 2368 return (FC_SUCCESS); 2369 2370 } /* emlxs_fca_notify */ 2371 2372 2373 2374 static int 2375 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2376 { 2377 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2378 emlxs_hba_t *hba = HBA; 2379 uint32_t lilp_length; 2380 2381 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2382 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2383 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2384 port->alpa_map[3], port->alpa_map[4]); 2385 2386 if (!(port->flag & EMLXS_PORT_BOUND)) { 2387 return (FC_NOMAP); 2388 } 2389 2390 if (hba->topology != TOPOLOGY_LOOP) { 2391 return (FC_NOMAP); 2392 } 2393 2394 /* Check if alpa map is available */ 2395 if (port->alpa_map[0] != 0) { 2396 mapbuf->lilp_magic = MAGIC_LILP; 2397 } else { /* No LILP map available */ 2398 2399 /* Set lilp_magic to MAGIC_LISA and this will */ 2400 /* trigger an ALPA scan in ULP */ 2401 mapbuf->lilp_magic = MAGIC_LISA; 2402 } 2403 2404 mapbuf->lilp_myalpa = port->did; 2405 2406 /* The first byte of the alpa_map is the lilp map length */ 2407 /* Add one to include the lilp length byte itself */ 2408 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2409 2410 /* Make sure the max transfer is 128 bytes */ 2411 if (lilp_length > 128) { 2412 lilp_length = 128; 2413 } 2414 2415 /* We start copying from the lilp_length field */ 2416 /* in order to get a word aligned address */ 2417 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2418 lilp_length); 2419 2420 return (FC_SUCCESS); 2421 2422 } /* emlxs_fca_get_map() */ 2423 2424 2425 2426 extern int 2427 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2428 { 2429 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2430 emlxs_hba_t *hba = HBA; 2431 emlxs_buf_t *sbp; 2432 uint32_t rval; 2433 uint32_t pkt_flags; 2434 2435 /* Make sure adapter is online */ 2436 if (!(hba->flag & FC_ONLINE_MODE)) { 2437 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2438 "Adapter offline."); 2439 2440 return (FC_OFFLINE); 2441 } 2442 2443 /* Validate packet */ 2444 sbp = PKT2PRIV(pkt); 2445 2446 /* Make sure ULP was told that the port was online */ 2447 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2448 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2450 "Port offline."); 2451 2452 return (FC_OFFLINE); 2453 } 2454 2455 if (sbp->port != port) { 2456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2457 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2458 sbp->port, sbp->pkt_flags); 2459 return (FC_BADPACKET); 2460 } 2461 2462 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) { 2463 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2464 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2465 sbp->port, sbp->pkt_flags); 2466 return (FC_BADPACKET); 2467 } 2468 #ifdef SFCT_SUPPORT 2469 if (port->tgt_mode && !sbp->fct_cmd && 2470 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2472 "Packet blocked. Target mode."); 2473 return (FC_TRANSPORT_ERROR); 2474 } 2475 #endif /* SFCT_SUPPORT */ 2476 2477 #ifdef IDLE_TIMER 2478 emlxs_pm_busy_component(hba); 2479 #endif /* IDLE_TIMER */ 2480 2481 /* Prepare the packet for transport */ 2482 emlxs_initialize_pkt(port, sbp); 2483 2484 /* Save a copy of the pkt flags. */ 2485 /* We will check the polling flag later */ 2486 pkt_flags = sbp->pkt_flags; 2487 2488 /* Send the packet */ 2489 switch (pkt->pkt_tran_type) { 2490 case FC_PKT_FCP_READ: 2491 case FC_PKT_FCP_WRITE: 2492 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags); 2493 break; 2494 2495 case FC_PKT_IP_WRITE: 2496 case FC_PKT_BROADCAST: 2497 rval = emlxs_send_ip(port, sbp); 2498 break; 2499 2500 case FC_PKT_EXCHANGE: 2501 switch (pkt->pkt_cmd_fhdr.type) { 2502 case FC_TYPE_SCSI_FCP: 2503 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags); 2504 break; 2505 2506 case FC_TYPE_FC_SERVICES: 2507 rval = emlxs_send_ct(port, sbp); 2508 break; 2509 2510 #ifdef MENLO_SUPPORT 2511 case EMLXS_MENLO_TYPE: 2512 rval = emlxs_send_menlo(port, sbp); 2513 break; 2514 #endif /* MENLO_SUPPORT */ 2515 2516 default: 2517 rval = emlxs_send_els(port, sbp); 2518 } 2519 break; 2520 2521 case FC_PKT_OUTBOUND: 2522 switch (pkt->pkt_cmd_fhdr.type) { 2523 #ifdef SFCT_SUPPORT 2524 case FC_TYPE_SCSI_FCP: 2525 rval = emlxs_send_fct_status(port, sbp); 2526 break; 2527 2528 case FC_TYPE_BASIC_LS: 2529 rval = emlxs_send_fct_abort(port, sbp); 2530 break; 2531 #endif /* SFCT_SUPPORT */ 2532 2533 case FC_TYPE_FC_SERVICES: 2534 rval = emlxs_send_ct_rsp(port, sbp); 2535 break; 2536 #ifdef MENLO_SUPPORT 2537 case EMLXS_MENLO_TYPE: 2538 rval = emlxs_send_menlo(port, sbp); 2539 break; 2540 #endif /* MENLO_SUPPORT */ 2541 2542 default: 2543 rval = emlxs_send_els_rsp(port, sbp); 2544 } 2545 break; 2546 2547 default: 2548 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2549 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2550 rval = FC_TRANSPORT_ERROR; 2551 break; 2552 } 2553 2554 /* Check if send was not successful */ 2555 if (rval != FC_SUCCESS) { 2556 /* Return packet to ULP */ 2557 mutex_enter(&sbp->mtx); 2558 sbp->pkt_flags |= PACKET_ULP_OWNED; 2559 mutex_exit(&sbp->mtx); 2560 2561 return (rval); 2562 } 2563 2564 /* Check if this packet should be polled for completion before */ 2565 /* returning. This check must be done with a saved copy of the */ 2566 /* pkt_flags because the packet itself could already be freed from */ 2567 /* memory if it was not polled. */ 2568 if (pkt_flags & PACKET_POLLED) { 2569 emlxs_poll(port, sbp); 2570 } 2571 2572 return (FC_SUCCESS); 2573 2574 } /* emlxs_fca_transport() */ 2575 2576 2577 2578 static void 2579 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2580 { 2581 emlxs_hba_t *hba = HBA; 2582 fc_packet_t *pkt = PRIV2PKT(sbp); 2583 clock_t timeout; 2584 clock_t time; 2585 uint32_t att_bit; 2586 CHANNEL *cp; 2587 int in_panic = 0; 2588 2589 mutex_enter(&EMLXS_PORT_LOCK); 2590 hba->io_poll_count++; 2591 mutex_exit(&EMLXS_PORT_LOCK); 2592 2593 /* Check for panic situation */ 2594 cp = (CHANNEL *)sbp->channel; 2595 2596 if (ddi_in_panic()) { 2597 in_panic = 1; 2598 /* 2599 * In panic situations there will be one thread with 2600 * no interrrupts (hard or soft) and no timers 2601 */ 2602 2603 /* 2604 * We must manually poll everything in this thread 2605 * to keep the driver going. 2606 */ 2607 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 2608 switch (cp->channelno) { 2609 case FC_FCP_RING: 2610 att_bit = HA_R0ATT; 2611 break; 2612 2613 case FC_IP_RING: 2614 att_bit = HA_R1ATT; 2615 break; 2616 2617 case FC_ELS_RING: 2618 att_bit = HA_R2ATT; 2619 break; 2620 2621 case FC_CT_RING: 2622 att_bit = HA_R3ATT; 2623 break; 2624 } 2625 } 2626 2627 /* Keep polling the chip until our IO is completed */ 2628 /* Driver's timer will not function during panics. */ 2629 /* Therefore, timer checks must be performed manually. */ 2630 (void) drv_getparm(LBOLT, &time); 2631 timeout = time + drv_usectohz(1000000); 2632 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2633 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 2634 EMLXS_SLI_POLL_INTR(hba, att_bit); 2635 } else { 2636 EMLXS_SLI_POLL_INTR(hba, 0); 2637 } 2638 (void) drv_getparm(LBOLT, &time); 2639 2640 /* Trigger timer checks periodically */ 2641 if (time >= timeout) { 2642 emlxs_timer_checks(hba); 2643 timeout = time + drv_usectohz(1000000); 2644 } 2645 } 2646 } else { 2647 /* Wait for IO completion */ 2648 /* The driver's timer will detect */ 2649 /* any timeout and abort the I/O. */ 2650 mutex_enter(&EMLXS_PKT_LOCK); 2651 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2652 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2653 } 2654 mutex_exit(&EMLXS_PKT_LOCK); 2655 } 2656 2657 /* Check for fcp reset pkt */ 2658 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2659 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2660 /* Flush the IO's on the chipq */ 2661 (void) emlxs_chipq_node_flush(port, 2662 &hba->chan[hba->channel_fcp], 2663 sbp->node, sbp); 2664 } else { 2665 /* Flush the IO's on the chipq for this lun */ 2666 (void) emlxs_chipq_lun_flush(port, 2667 sbp->node, sbp->lun, sbp); 2668 } 2669 2670 if (sbp->flush_count == 0) { 2671 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2672 goto done; 2673 } 2674 2675 /* Set the timeout so the flush has time to complete */ 2676 timeout = emlxs_timeout(hba, 60); 2677 (void) drv_getparm(LBOLT, &time); 2678 while ((time < timeout) && sbp->flush_count > 0) { 2679 delay(drv_usectohz(500000)); 2680 (void) drv_getparm(LBOLT, &time); 2681 } 2682 2683 if (sbp->flush_count == 0) { 2684 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2685 goto done; 2686 } 2687 2688 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2689 "sbp=%p flush_count=%d. Waiting...", sbp, 2690 sbp->flush_count); 2691 2692 /* Let's try this one more time */ 2693 2694 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2695 /* Flush the IO's on the chipq */ 2696 (void) emlxs_chipq_node_flush(port, 2697 &hba->chan[hba->channel_fcp], 2698 sbp->node, sbp); 2699 } else { 2700 /* Flush the IO's on the chipq for this lun */ 2701 (void) emlxs_chipq_lun_flush(port, 2702 sbp->node, sbp->lun, sbp); 2703 } 2704 2705 /* Reset the timeout so the flush has time to complete */ 2706 timeout = emlxs_timeout(hba, 60); 2707 (void) drv_getparm(LBOLT, &time); 2708 while ((time < timeout) && sbp->flush_count > 0) { 2709 delay(drv_usectohz(500000)); 2710 (void) drv_getparm(LBOLT, &time); 2711 } 2712 2713 if (sbp->flush_count == 0) { 2714 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2715 goto done; 2716 } 2717 2718 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2719 "sbp=%p flush_count=%d. Resetting link.", sbp, 2720 sbp->flush_count); 2721 2722 /* Let's first try to reset the link */ 2723 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2724 2725 if (sbp->flush_count == 0) { 2726 goto done; 2727 } 2728 2729 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2730 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2731 sbp->flush_count); 2732 2733 /* If that doesn't work, reset the adapter */ 2734 (void) emlxs_reset(port, FC_FCA_RESET); 2735 2736 if (sbp->flush_count != 0) { 2737 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2738 "sbp=%p flush_count=%d. Giving up.", sbp, 2739 sbp->flush_count); 2740 } 2741 2742 } 2743 /* PACKET_FCP_RESET */ 2744 done: 2745 2746 /* Packet has been declared completed and is now ready to be returned */ 2747 2748 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2749 emlxs_unswap_pkt(sbp); 2750 #endif /* EMLXS_MODREV2X */ 2751 2752 mutex_enter(&sbp->mtx); 2753 sbp->pkt_flags |= PACKET_ULP_OWNED; 2754 mutex_exit(&sbp->mtx); 2755 2756 mutex_enter(&EMLXS_PORT_LOCK); 2757 hba->io_poll_count--; 2758 mutex_exit(&EMLXS_PORT_LOCK); 2759 2760 #ifdef FMA_SUPPORT 2761 if (!in_panic) { 2762 emlxs_check_dma(hba, sbp); 2763 } 2764 #endif 2765 2766 /* Make ULP completion callback if required */ 2767 if (pkt->pkt_comp) { 2768 cp->ulpCmplCmd++; 2769 (*pkt->pkt_comp) (pkt); 2770 } 2771 2772 #ifdef FMA_SUPPORT 2773 if (hba->flag & FC_DMA_CHECK_ERROR) { 2774 emlxs_thread_spawn(hba, emlxs_restart_thread, 2775 NULL, NULL); 2776 } 2777 #endif 2778 2779 return; 2780 2781 } /* emlxs_poll() */ 2782 2783 2784 static int 2785 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2786 uint32_t *count, uint32_t type) 2787 { 2788 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2789 emlxs_hba_t *hba = HBA; 2790 2791 char *err = NULL; 2792 emlxs_unsol_buf_t *pool; 2793 emlxs_unsol_buf_t *new_pool; 2794 int32_t i; 2795 int result; 2796 uint32_t free_resv; 2797 uint32_t free; 2798 emlxs_config_t *cfg = &CFG; 2799 fc_unsol_buf_t *ubp; 2800 emlxs_ub_priv_t *ub_priv; 2801 int rc; 2802 2803 if (port->tgt_mode) { 2804 if (tokens && count) { 2805 bzero(tokens, (sizeof (uint64_t) * (*count))); 2806 } 2807 return (FC_SUCCESS); 2808 } 2809 2810 if (!(port->flag & EMLXS_PORT_BOUND)) { 2811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2812 "fca_ub_alloc failed: Port not bound! size=%x count=%d " 2813 "type=%x", size, *count, type); 2814 2815 return (FC_FAILURE); 2816 } 2817 2818 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2819 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type); 2820 2821 if (count && (*count > EMLXS_MAX_UBUFS)) { 2822 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2823 "fca_ub_alloc failed: Too many unsolicted buffers " 2824 "requested. count=%x", *count); 2825 2826 return (FC_FAILURE); 2827 2828 } 2829 2830 if (tokens == NULL) { 2831 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2832 "fca_ub_alloc failed: Token array is NULL."); 2833 2834 return (FC_FAILURE); 2835 } 2836 2837 /* Clear the token array */ 2838 bzero(tokens, (sizeof (uint64_t) * (*count))); 2839 2840 free_resv = 0; 2841 free = *count; 2842 switch (type) { 2843 case FC_TYPE_BASIC_LS: 2844 err = "BASIC_LS"; 2845 break; 2846 case FC_TYPE_EXTENDED_LS: 2847 err = "EXTENDED_LS"; 2848 free = *count / 2; /* Hold 50% for normal use */ 2849 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2850 break; 2851 case FC_TYPE_IS8802: 2852 err = "IS8802"; 2853 break; 2854 case FC_TYPE_IS8802_SNAP: 2855 err = "IS8802_SNAP"; 2856 2857 if (cfg[CFG_NETWORK_ON].current == 0) { 2858 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2859 "fca_ub_alloc failed: IP support is disabled."); 2860 2861 return (FC_FAILURE); 2862 } 2863 break; 2864 case FC_TYPE_SCSI_FCP: 2865 err = "SCSI_FCP"; 2866 break; 2867 case FC_TYPE_SCSI_GPP: 2868 err = "SCSI_GPP"; 2869 break; 2870 case FC_TYPE_HIPP_FP: 2871 err = "HIPP_FP"; 2872 break; 2873 case FC_TYPE_IPI3_MASTER: 2874 err = "IPI3_MASTER"; 2875 break; 2876 case FC_TYPE_IPI3_SLAVE: 2877 err = "IPI3_SLAVE"; 2878 break; 2879 case FC_TYPE_IPI3_PEER: 2880 err = "IPI3_PEER"; 2881 break; 2882 case FC_TYPE_FC_SERVICES: 2883 err = "FC_SERVICES"; 2884 break; 2885 } 2886 2887 mutex_enter(&EMLXS_UB_LOCK); 2888 2889 /* 2890 * Walk through the list of the unsolicited buffers 2891 * for this ddiinst of emlx. 2892 */ 2893 2894 pool = port->ub_pool; 2895 2896 /* 2897 * The emlxs_fca_ub_alloc() can be called more than once with different 2898 * size. We will reject the call if there are 2899 * duplicate size with the same FC-4 type. 2900 */ 2901 while (pool) { 2902 if ((pool->pool_type == type) && 2903 (pool->pool_buf_size == size)) { 2904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2905 "fca_ub_alloc failed: Unsolicited buffer pool " 2906 "for %s of size 0x%x bytes already exists.", 2907 err, size); 2908 2909 result = FC_FAILURE; 2910 goto fail; 2911 } 2912 2913 pool = pool->pool_next; 2914 } 2915 2916 mutex_exit(&EMLXS_UB_LOCK); 2917 2918 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2919 KM_SLEEP); 2920 2921 new_pool->pool_next = NULL; 2922 new_pool->pool_type = type; 2923 new_pool->pool_buf_size = size; 2924 new_pool->pool_nentries = *count; 2925 new_pool->pool_available = new_pool->pool_nentries; 2926 new_pool->pool_free = free; 2927 new_pool->pool_free_resv = free_resv; 2928 new_pool->fc_ubufs = 2929 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2930 2931 new_pool->pool_first_token = port->ub_count; 2932 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2933 2934 for (i = 0; i < new_pool->pool_nentries; i++) { 2935 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2936 ubp->ub_port_handle = port->ulp_handle; 2937 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2938 ubp->ub_bufsize = size; 2939 ubp->ub_class = FC_TRAN_CLASS3; 2940 ubp->ub_port_private = NULL; 2941 ubp->ub_fca_private = 2942 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2943 KM_SLEEP); 2944 2945 /* 2946 * Initialize emlxs_ub_priv_t 2947 */ 2948 ub_priv = ubp->ub_fca_private; 2949 ub_priv->ubp = ubp; 2950 ub_priv->port = port; 2951 ub_priv->flags = EMLXS_UB_FREE; 2952 ub_priv->available = 1; 2953 ub_priv->pool = new_pool; 2954 ub_priv->time = 0; 2955 ub_priv->timeout = 0; 2956 ub_priv->token = port->ub_count; 2957 ub_priv->cmd = 0; 2958 2959 /* Allocate the actual buffer */ 2960 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2961 2962 2963 tokens[i] = (uint64_t)((unsigned long)ubp); 2964 port->ub_count++; 2965 } 2966 2967 mutex_enter(&EMLXS_UB_LOCK); 2968 2969 /* Add the pool to the top of the pool list */ 2970 new_pool->pool_prev = NULL; 2971 new_pool->pool_next = port->ub_pool; 2972 2973 if (port->ub_pool) { 2974 port->ub_pool->pool_prev = new_pool; 2975 } 2976 port->ub_pool = new_pool; 2977 2978 /* Set the post counts */ 2979 if (type == FC_TYPE_IS8802_SNAP) { 2980 MAILBOXQ *mbox; 2981 2982 port->ub_post[hba->channel_ip] += new_pool->pool_nentries; 2983 2984 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 2985 MEM_MBOX, 1))) { 2986 emlxs_mb_config_farp(hba, mbox); 2987 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 2988 mbox, MBX_NOWAIT, 0); 2989 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 2990 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox); 2991 } 2992 } 2993 port->flag |= EMLXS_PORT_IP_UP; 2994 } else if (type == FC_TYPE_EXTENDED_LS) { 2995 port->ub_post[hba->channel_els] += new_pool->pool_nentries; 2996 } else if (type == FC_TYPE_FC_SERVICES) { 2997 port->ub_post[hba->channel_ct] += new_pool->pool_nentries; 2998 } 2999 3000 mutex_exit(&EMLXS_UB_LOCK); 3001 3002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 3003 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 3004 *count, err, size); 3005 3006 return (FC_SUCCESS); 3007 3008 fail: 3009 3010 /* Clean the pool */ 3011 for (i = 0; tokens[i] != NULL; i++) { 3012 /* Get the buffer object */ 3013 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3014 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3015 3016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3017 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x " 3018 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 3019 3020 /* Free the actual buffer */ 3021 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3022 3023 /* Free the private area of the buffer object */ 3024 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3025 3026 tokens[i] = 0; 3027 port->ub_count--; 3028 } 3029 3030 /* Free the array of buffer objects in the pool */ 3031 kmem_free((caddr_t)new_pool->fc_ubufs, 3032 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3033 3034 /* Free the pool object */ 3035 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3036 3037 mutex_exit(&EMLXS_UB_LOCK); 3038 3039 return (result); 3040 3041 } /* emlxs_fca_ub_alloc() */ 3042 3043 3044 static void 3045 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3046 { 3047 emlxs_hba_t *hba = HBA; 3048 emlxs_ub_priv_t *ub_priv; 3049 fc_packet_t *pkt; 3050 ELS_PKT *els; 3051 uint32_t sid; 3052 3053 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3054 3055 if (hba->state <= FC_LINK_DOWN) { 3056 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id); 3057 return; 3058 } 3059 3060 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3061 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3062 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id); 3063 return; 3064 } 3065 3066 sid = LE_SWAP24_LO(ubp->ub_frame.s_id); 3067 3068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3069 "%s dropped: sid=%x. Rejecting.", 3070 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3071 3072 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3073 pkt->pkt_timeout = (2 * hba->fc_ratov); 3074 3075 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3076 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3077 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3078 } 3079 3080 /* Build the fc header */ 3081 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3082 pkt->pkt_cmd_fhdr.r_ctl = 3083 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3084 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did); 3085 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3086 pkt->pkt_cmd_fhdr.f_ctl = 3087 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3088 pkt->pkt_cmd_fhdr.seq_id = 0; 3089 pkt->pkt_cmd_fhdr.df_ctl = 0; 3090 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3091 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3092 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3093 pkt->pkt_cmd_fhdr.ro = 0; 3094 3095 /* Build the command */ 3096 els = (ELS_PKT *) pkt->pkt_cmd; 3097 els->elsCode = 0x01; 3098 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3099 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3100 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3101 els->un.lsRjt.un.b.vendorUnique = 0x02; 3102 3103 /* Send the pkt later in another thread */ 3104 (void) emlxs_pkt_send(pkt, 0); 3105 3106 return; 3107 3108 } /* emlxs_ub_els_reject() */ 3109 3110 extern int 3111 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count, 3112 uint64_t tokens[]) 3113 { 3114 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3115 emlxs_hba_t *hba = HBA; 3116 fc_unsol_buf_t *ubp; 3117 emlxs_ub_priv_t *ub_priv; 3118 uint32_t i; 3119 uint32_t time; 3120 emlxs_unsol_buf_t *pool; 3121 3122 if (count == 0) { 3123 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3124 "fca_ub_release: Nothing to do. count=%d", count); 3125 3126 return (FC_SUCCESS); 3127 } 3128 3129 if (!(port->flag & EMLXS_PORT_BOUND)) { 3130 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3131 "fca_ub_release failed: Port not bound. count=%d " 3132 "token[0]=%p", 3133 count, tokens[0]); 3134 3135 return (FC_UNBOUND); 3136 } 3137 3138 mutex_enter(&EMLXS_UB_LOCK); 3139 3140 if (!port->ub_pool) { 3141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3142 "fca_ub_release failed: No pools! count=%d token[0]=%p", 3143 count, tokens[0]); 3144 3145 mutex_exit(&EMLXS_UB_LOCK); 3146 return (FC_UB_BADTOKEN); 3147 } 3148 3149 for (i = 0; i < count; i++) { 3150 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3151 3152 if (!ubp) { 3153 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3154 "fca_ub_release failed: count=%d tokens[%d]=0", 3155 count, i); 3156 3157 mutex_exit(&EMLXS_UB_LOCK); 3158 return (FC_UB_BADTOKEN); 3159 } 3160 3161 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3162 3163 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3165 "fca_ub_release failed: Dead buffer found. ubp=%p", 3166 ubp); 3167 3168 mutex_exit(&EMLXS_UB_LOCK); 3169 return (FC_UB_BADTOKEN); 3170 } 3171 3172 if (ub_priv->flags == EMLXS_UB_FREE) { 3173 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3174 "fca_ub_release: Buffer already free! ubp=%p " 3175 "token=%x", 3176 ubp, ub_priv->token); 3177 3178 continue; 3179 } 3180 3181 /* Check for dropped els buffer */ 3182 /* ULP will do this sometimes without sending a reply */ 3183 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3184 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3185 emlxs_ub_els_reject(port, ubp); 3186 } 3187 3188 /* Mark the buffer free */ 3189 ub_priv->flags = EMLXS_UB_FREE; 3190 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3191 3192 time = hba->timer_tics - ub_priv->time; 3193 ub_priv->time = 0; 3194 ub_priv->timeout = 0; 3195 3196 pool = ub_priv->pool; 3197 3198 if (ub_priv->flags & EMLXS_UB_RESV) { 3199 pool->pool_free_resv++; 3200 } else { 3201 pool->pool_free++; 3202 } 3203 3204 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3205 "fca_ub_release: ubp=%p token=%x time=%d av=%d " 3206 "(%d,%d,%d,%d)", 3207 ubp, ub_priv->token, time, ub_priv->available, 3208 pool->pool_nentries, pool->pool_available, 3209 pool->pool_free, pool->pool_free_resv); 3210 3211 /* Check if pool can be destroyed now */ 3212 if ((pool->pool_available == 0) && 3213 (pool->pool_free + pool->pool_free_resv == 3214 pool->pool_nentries)) { 3215 emlxs_ub_destroy(port, pool); 3216 } 3217 } 3218 3219 mutex_exit(&EMLXS_UB_LOCK); 3220 3221 return (FC_SUCCESS); 3222 3223 } /* emlxs_fca_ub_release() */ 3224 3225 3226 static int 3227 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3228 { 3229 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3230 emlxs_unsol_buf_t *pool; 3231 fc_unsol_buf_t *ubp; 3232 emlxs_ub_priv_t *ub_priv; 3233 uint32_t i; 3234 3235 if (port->tgt_mode) { 3236 return (FC_SUCCESS); 3237 } 3238 3239 if (count == 0) { 3240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3241 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count, 3242 tokens[0]); 3243 3244 return (FC_SUCCESS); 3245 } 3246 3247 if (!(port->flag & EMLXS_PORT_BOUND)) { 3248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3249 "fca_ub_free: Port not bound. count=%d token[0]=%p", count, 3250 tokens[0]); 3251 3252 return (FC_SUCCESS); 3253 } 3254 3255 mutex_enter(&EMLXS_UB_LOCK); 3256 3257 if (!port->ub_pool) { 3258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3259 "fca_ub_free failed: No pools! count=%d token[0]=%p", count, 3260 tokens[0]); 3261 3262 mutex_exit(&EMLXS_UB_LOCK); 3263 return (FC_UB_BADTOKEN); 3264 } 3265 3266 /* Process buffer list */ 3267 for (i = 0; i < count; i++) { 3268 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3269 3270 if (!ubp) { 3271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3272 "fca_ub_free failed: count=%d tokens[%d]=0", count, 3273 i); 3274 3275 mutex_exit(&EMLXS_UB_LOCK); 3276 return (FC_UB_BADTOKEN); 3277 } 3278 3279 /* Mark buffer unavailable */ 3280 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3281 3282 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3284 "fca_ub_free failed: Dead buffer found. ubp=%p", 3285 ubp); 3286 3287 mutex_exit(&EMLXS_UB_LOCK); 3288 return (FC_UB_BADTOKEN); 3289 } 3290 3291 ub_priv->available = 0; 3292 3293 /* Mark one less buffer available in the parent pool */ 3294 pool = ub_priv->pool; 3295 3296 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3297 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3298 ub_priv->token, pool->pool_nentries, 3299 pool->pool_available - 1, pool->pool_free, 3300 pool->pool_free_resv); 3301 3302 if (pool->pool_available) { 3303 pool->pool_available--; 3304 3305 /* Check if pool can be destroyed */ 3306 if ((pool->pool_available == 0) && 3307 (pool->pool_free + pool->pool_free_resv == 3308 pool->pool_nentries)) { 3309 emlxs_ub_destroy(port, pool); 3310 } 3311 } 3312 } 3313 3314 mutex_exit(&EMLXS_UB_LOCK); 3315 3316 return (FC_SUCCESS); 3317 3318 } /* emlxs_fca_ub_free() */ 3319 3320 3321 /* EMLXS_UB_LOCK must be held when calling this routine */ 3322 extern void 3323 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3324 { 3325 emlxs_hba_t *hba = HBA; 3326 emlxs_unsol_buf_t *next; 3327 emlxs_unsol_buf_t *prev; 3328 fc_unsol_buf_t *ubp; 3329 uint32_t i; 3330 3331 /* Remove the pool object from the pool list */ 3332 next = pool->pool_next; 3333 prev = pool->pool_prev; 3334 3335 if (port->ub_pool == pool) { 3336 port->ub_pool = next; 3337 } 3338 3339 if (prev) { 3340 prev->pool_next = next; 3341 } 3342 3343 if (next) { 3344 next->pool_prev = prev; 3345 } 3346 3347 pool->pool_prev = NULL; 3348 pool->pool_next = NULL; 3349 3350 /* Clear the post counts */ 3351 switch (pool->pool_type) { 3352 case FC_TYPE_IS8802_SNAP: 3353 port->ub_post[hba->channel_ip] -= pool->pool_nentries; 3354 break; 3355 3356 case FC_TYPE_EXTENDED_LS: 3357 port->ub_post[hba->channel_els] -= pool->pool_nentries; 3358 break; 3359 3360 case FC_TYPE_FC_SERVICES: 3361 port->ub_post[hba->channel_ct] -= pool->pool_nentries; 3362 break; 3363 } 3364 3365 /* Now free the pool memory */ 3366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3367 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3368 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3369 3370 /* Process the array of buffer objects in the pool */ 3371 for (i = 0; i < pool->pool_nentries; i++) { 3372 /* Get the buffer object */ 3373 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3374 3375 /* Free the memory the buffer object represents */ 3376 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3377 3378 /* Free the private area of the buffer object */ 3379 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3380 } 3381 3382 /* Free the array of buffer objects in the pool */ 3383 kmem_free((caddr_t)pool->fc_ubufs, 3384 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3385 3386 /* Free the pool object */ 3387 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3388 3389 return; 3390 3391 } /* emlxs_ub_destroy() */ 3392 3393 3394 /*ARGSUSED*/ 3395 extern int 3396 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3397 { 3398 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3399 emlxs_hba_t *hba = HBA; 3400 emlxs_config_t *cfg = &CFG; 3401 3402 emlxs_buf_t *sbp; 3403 NODELIST *nlp; 3404 NODELIST *prev_nlp; 3405 uint8_t channelno; 3406 CHANNEL *cp; 3407 clock_t timeout; 3408 clock_t time; 3409 int32_t pkt_ret; 3410 IOCBQ *iocbq; 3411 IOCBQ *next; 3412 IOCBQ *prev; 3413 uint32_t found; 3414 uint32_t att_bit; 3415 uint32_t pass = 0; 3416 3417 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3418 iocbq = &sbp->iocbq; 3419 nlp = (NODELIST *)sbp->node; 3420 cp = (CHANNEL *)sbp->channel; 3421 channelno = (cp) ? cp->channelno : 0; 3422 3423 if (!(port->flag & EMLXS_PORT_BOUND)) { 3424 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3425 "Port not bound."); 3426 return (FC_UNBOUND); 3427 } 3428 3429 if (!(hba->flag & FC_ONLINE_MODE)) { 3430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3431 "Adapter offline."); 3432 return (FC_OFFLINE); 3433 } 3434 3435 /* ULP requires the aborted pkt to be completed */ 3436 /* back to ULP before returning from this call. */ 3437 /* SUN knows of problems with this call so they suggested that we */ 3438 /* always return a FC_FAILURE for this call, until it is worked out. */ 3439 3440 /* Check if pkt is no good */ 3441 if (!(sbp->pkt_flags & PACKET_VALID) || 3442 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3444 "Bad sbp. flags=%x", sbp->pkt_flags); 3445 return (FC_FAILURE); 3446 } 3447 3448 /* Tag this now */ 3449 /* This will prevent any thread except ours from completing it */ 3450 mutex_enter(&sbp->mtx); 3451 3452 /* Check again if we still own this */ 3453 if (!(sbp->pkt_flags & PACKET_VALID) || 3454 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3455 mutex_exit(&sbp->mtx); 3456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3457 "Bad sbp. flags=%x", sbp->pkt_flags); 3458 return (FC_FAILURE); 3459 } 3460 3461 /* Check if pkt is a real polled command */ 3462 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3463 (sbp->pkt_flags & PACKET_POLLED)) { 3464 mutex_exit(&sbp->mtx); 3465 3466 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3467 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3468 sbp->pkt_flags); 3469 return (FC_FAILURE); 3470 } 3471 3472 sbp->pkt_flags |= PACKET_POLLED; 3473 sbp->pkt_flags |= PACKET_IN_ABORT; 3474 3475 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3476 PACKET_IN_TIMEOUT)) { 3477 mutex_exit(&sbp->mtx); 3478 3479 /* Do nothing, pkt already on its way out */ 3480 goto done; 3481 } 3482 3483 mutex_exit(&sbp->mtx); 3484 3485 begin: 3486 pass++; 3487 3488 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 3489 3490 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3491 /* Find it on the queue */ 3492 found = 0; 3493 if (iocbq->flag & IOCB_PRIORITY) { 3494 /* Search the priority queue */ 3495 prev = NULL; 3496 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first; 3497 3498 while (next) { 3499 if (next == iocbq) { 3500 /* Remove it */ 3501 if (prev) { 3502 prev->next = iocbq->next; 3503 } 3504 3505 if (nlp->nlp_ptx[channelno].q_last == 3506 (void *)iocbq) { 3507 nlp->nlp_ptx[channelno].q_last = 3508 (void *)prev; 3509 } 3510 3511 if (nlp->nlp_ptx[channelno].q_first == 3512 (void *)iocbq) { 3513 nlp->nlp_ptx[channelno]. 3514 q_first = 3515 (void *)iocbq->next; 3516 } 3517 3518 nlp->nlp_ptx[channelno].q_cnt--; 3519 iocbq->next = NULL; 3520 found = 1; 3521 break; 3522 } 3523 3524 prev = next; 3525 next = next->next; 3526 } 3527 } else { 3528 /* Search the normal queue */ 3529 prev = NULL; 3530 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first; 3531 3532 while (next) { 3533 if (next == iocbq) { 3534 /* Remove it */ 3535 if (prev) { 3536 prev->next = iocbq->next; 3537 } 3538 3539 if (nlp->nlp_tx[channelno].q_last == 3540 (void *)iocbq) { 3541 nlp->nlp_tx[channelno].q_last = 3542 (void *)prev; 3543 } 3544 3545 if (nlp->nlp_tx[channelno].q_first == 3546 (void *)iocbq) { 3547 nlp->nlp_tx[channelno].q_first = 3548 (void *)iocbq->next; 3549 } 3550 3551 nlp->nlp_tx[channelno].q_cnt--; 3552 iocbq->next = NULL; 3553 found = 1; 3554 break; 3555 } 3556 3557 prev = next; 3558 next = (IOCBQ *) next->next; 3559 } 3560 } 3561 3562 if (!found) { 3563 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3565 "I/O not found in driver. sbp=%p flags=%x", sbp, 3566 sbp->pkt_flags); 3567 goto done; 3568 } 3569 3570 /* Check if node still needs servicing */ 3571 if ((nlp->nlp_ptx[channelno].q_first) || 3572 (nlp->nlp_tx[channelno].q_first && 3573 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) { 3574 3575 /* 3576 * If this is the base node, 3577 * then don't shift the pointers 3578 */ 3579 /* We want to drain the base node before moving on */ 3580 if (!nlp->nlp_base) { 3581 /* Just shift channel queue */ 3582 /* pointers to next node */ 3583 cp->nodeq.q_last = (void *) nlp; 3584 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3585 } 3586 } else { 3587 /* Remove node from channel queue */ 3588 3589 /* If this is the only node on list */ 3590 if (cp->nodeq.q_first == (void *)nlp && 3591 cp->nodeq.q_last == (void *)nlp) { 3592 cp->nodeq.q_last = NULL; 3593 cp->nodeq.q_first = NULL; 3594 cp->nodeq.q_cnt = 0; 3595 } else if (cp->nodeq.q_first == (void *)nlp) { 3596 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3597 ((NODELIST *) cp->nodeq.q_last)-> 3598 nlp_next[channelno] = cp->nodeq.q_first; 3599 cp->nodeq.q_cnt--; 3600 } else { 3601 /* 3602 * This is a little more difficult find the 3603 * previous node in the circular channel queue 3604 */ 3605 prev_nlp = nlp; 3606 while (prev_nlp->nlp_next[channelno] != nlp) { 3607 prev_nlp = prev_nlp-> 3608 nlp_next[channelno]; 3609 } 3610 3611 prev_nlp->nlp_next[channelno] = 3612 nlp->nlp_next[channelno]; 3613 3614 if (cp->nodeq.q_last == (void *)nlp) { 3615 cp->nodeq.q_last = (void *)prev_nlp; 3616 } 3617 cp->nodeq.q_cnt--; 3618 3619 } 3620 3621 /* Clear node */ 3622 nlp->nlp_next[channelno] = NULL; 3623 } 3624 3625 /* Free the ULPIOTAG and the bmp */ 3626 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 3627 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1); 3628 } else { 3629 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1); 3630 } 3631 3632 3633 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3634 3635 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3636 IOERR_ABORT_REQUESTED, 1); 3637 3638 goto done; 3639 } 3640 3641 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3642 3643 3644 /* Check the chip queue */ 3645 mutex_enter(&EMLXS_FCTAB_LOCK); 3646 3647 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3648 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3649 (sbp == hba->fc_table[sbp->iotag])) { 3650 3651 /* Create the abort IOCB */ 3652 if (hba->state >= FC_LINK_UP) { 3653 iocbq = 3654 emlxs_create_abort_xri_cn(port, sbp->node, 3655 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS); 3656 3657 mutex_enter(&sbp->mtx); 3658 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3659 sbp->ticks = 3660 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3661 sbp->abort_attempts++; 3662 mutex_exit(&sbp->mtx); 3663 } else { 3664 iocbq = 3665 emlxs_create_close_xri_cn(port, sbp->node, 3666 sbp->iotag, cp); 3667 3668 mutex_enter(&sbp->mtx); 3669 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3670 sbp->ticks = hba->timer_tics + 30; 3671 sbp->abort_attempts++; 3672 mutex_exit(&sbp->mtx); 3673 } 3674 3675 mutex_exit(&EMLXS_FCTAB_LOCK); 3676 3677 /* Send this iocbq */ 3678 if (iocbq) { 3679 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 3680 iocbq = NULL; 3681 } 3682 3683 goto done; 3684 } 3685 3686 mutex_exit(&EMLXS_FCTAB_LOCK); 3687 3688 /* Pkt was not on any queues */ 3689 3690 /* Check again if we still own this */ 3691 if (!(sbp->pkt_flags & PACKET_VALID) || 3692 (sbp->pkt_flags & 3693 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3694 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3695 goto done; 3696 } 3697 3698 if (!sleep) { 3699 return (FC_FAILURE); 3700 } 3701 3702 /* Apparently the pkt was not found. Let's delay and try again */ 3703 if (pass < 5) { 3704 delay(drv_usectohz(5000000)); /* 5 seconds */ 3705 3706 /* Check again if we still own this */ 3707 if (!(sbp->pkt_flags & PACKET_VALID) || 3708 (sbp->pkt_flags & 3709 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3710 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3711 goto done; 3712 } 3713 3714 goto begin; 3715 } 3716 3717 force_it: 3718 3719 /* Force the completion now */ 3720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3721 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag); 3722 3723 /* Now complete it */ 3724 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3725 1); 3726 3727 done: 3728 3729 /* Now wait for the pkt to complete */ 3730 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3731 /* Set thread timeout */ 3732 timeout = emlxs_timeout(hba, 30); 3733 3734 /* Check for panic situation */ 3735 if (ddi_in_panic()) { 3736 3737 /* 3738 * In panic situations there will be one thread with no 3739 * interrrupts (hard or soft) and no timers 3740 */ 3741 3742 /* 3743 * We must manually poll everything in this thread 3744 * to keep the driver going. 3745 */ 3746 3747 cp = (CHANNEL *)sbp->channel; 3748 switch (cp->channelno) { 3749 case FC_FCP_RING: 3750 att_bit = HA_R0ATT; 3751 break; 3752 3753 case FC_IP_RING: 3754 att_bit = HA_R1ATT; 3755 break; 3756 3757 case FC_ELS_RING: 3758 att_bit = HA_R2ATT; 3759 break; 3760 3761 case FC_CT_RING: 3762 att_bit = HA_R3ATT; 3763 break; 3764 } 3765 3766 /* Keep polling the chip until our IO is completed */ 3767 (void) drv_getparm(LBOLT, &time); 3768 while ((time < timeout) && 3769 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3770 EMLXS_SLI_POLL_INTR(hba, att_bit); 3771 (void) drv_getparm(LBOLT, &time); 3772 } 3773 } else { 3774 /* Wait for IO completion or timeout */ 3775 mutex_enter(&EMLXS_PKT_LOCK); 3776 pkt_ret = 0; 3777 while ((pkt_ret != -1) && 3778 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3779 pkt_ret = 3780 cv_timedwait(&EMLXS_PKT_CV, 3781 &EMLXS_PKT_LOCK, timeout); 3782 } 3783 mutex_exit(&EMLXS_PKT_LOCK); 3784 } 3785 3786 /* Check if timeout occured. This is not good. */ 3787 /* Something happened to our IO. */ 3788 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3789 /* Force the completion now */ 3790 goto force_it; 3791 } 3792 } 3793 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3794 emlxs_unswap_pkt(sbp); 3795 #endif /* EMLXS_MODREV2X */ 3796 3797 /* Check again if we still own this */ 3798 if ((sbp->pkt_flags & PACKET_VALID) && 3799 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3800 mutex_enter(&sbp->mtx); 3801 if ((sbp->pkt_flags & PACKET_VALID) && 3802 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3803 sbp->pkt_flags |= PACKET_ULP_OWNED; 3804 } 3805 mutex_exit(&sbp->mtx); 3806 } 3807 3808 #ifdef ULP_PATCH5 3809 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) { 3810 return (FC_FAILURE); 3811 } 3812 #endif /* ULP_PATCH5 */ 3813 3814 return (FC_SUCCESS); 3815 3816 } /* emlxs_fca_pkt_abort() */ 3817 3818 3819 static void 3820 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip) 3821 { 3822 emlxs_port_t *port = &PPORT; 3823 fc_packet_t *pkt; 3824 emlxs_buf_t *sbp; 3825 uint32_t i; 3826 uint32_t flg; 3827 uint32_t rc; 3828 uint32_t txcnt; 3829 uint32_t chipcnt; 3830 3831 txcnt = 0; 3832 chipcnt = 0; 3833 3834 mutex_enter(&EMLXS_FCTAB_LOCK); 3835 for (i = 0; i < hba->max_iotag; i++) { 3836 sbp = hba->fc_table[i]; 3837 if (sbp == NULL || sbp == STALE_PACKET) { 3838 continue; 3839 } 3840 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ); 3841 pkt = PRIV2PKT(sbp); 3842 mutex_exit(&EMLXS_FCTAB_LOCK); 3843 rc = emlxs_fca_pkt_abort(port, pkt, 0); 3844 if (rc == FC_SUCCESS) { 3845 if (flg) { 3846 chipcnt++; 3847 } else { 3848 txcnt++; 3849 } 3850 } 3851 mutex_enter(&EMLXS_FCTAB_LOCK); 3852 } 3853 mutex_exit(&EMLXS_FCTAB_LOCK); 3854 *tx = txcnt; 3855 *chip = chipcnt; 3856 } /* emlxs_abort_all() */ 3857 3858 3859 extern int32_t 3860 emlxs_reset(emlxs_port_t *port, uint32_t cmd) 3861 { 3862 emlxs_hba_t *hba = HBA; 3863 int rval; 3864 int ret; 3865 clock_t timeout; 3866 3867 switch (cmd) { 3868 case FC_FCA_LINK_RESET: 3869 3870 if (!(hba->flag & FC_ONLINE_MODE) || 3871 (hba->state <= FC_LINK_DOWN)) { 3872 return (FC_SUCCESS); 3873 } 3874 3875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3876 "Resetting Link."); 3877 3878 mutex_enter(&EMLXS_LINKUP_LOCK); 3879 hba->linkup_wait_flag = TRUE; 3880 mutex_exit(&EMLXS_LINKUP_LOCK); 3881 3882 if (emlxs_reset_link(hba, 1, 1)) { 3883 mutex_enter(&EMLXS_LINKUP_LOCK); 3884 hba->linkup_wait_flag = FALSE; 3885 mutex_exit(&EMLXS_LINKUP_LOCK); 3886 3887 return (FC_FAILURE); 3888 } 3889 3890 mutex_enter(&EMLXS_LINKUP_LOCK); 3891 timeout = emlxs_timeout(hba, 60); 3892 ret = 0; 3893 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3894 ret = 3895 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3896 timeout); 3897 } 3898 3899 hba->linkup_wait_flag = FALSE; 3900 mutex_exit(&EMLXS_LINKUP_LOCK); 3901 3902 if (ret == -1) { 3903 return (FC_FAILURE); 3904 } 3905 3906 return (FC_SUCCESS); 3907 3908 case FC_FCA_CORE: 3909 #ifdef DUMP_SUPPORT 3910 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3911 "Dumping Core."); 3912 3913 /* Schedule a USER dump */ 3914 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3915 3916 /* Wait for dump to complete */ 3917 emlxs_dump_wait(hba); 3918 3919 return (FC_SUCCESS); 3920 #endif /* DUMP_SUPPORT */ 3921 3922 case FC_FCA_RESET: 3923 case FC_FCA_RESET_CORE: 3924 3925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3926 "Resetting Adapter."); 3927 3928 rval = FC_SUCCESS; 3929 3930 if (emlxs_offline(hba) == 0) { 3931 (void) emlxs_online(hba); 3932 } else { 3933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3934 "Adapter reset failed. Device busy."); 3935 3936 rval = FC_DEVICE_BUSY; 3937 } 3938 3939 return (rval); 3940 3941 default: 3942 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3943 "emlxs_reset: Unknown command. cmd=%x", cmd); 3944 3945 break; 3946 } 3947 3948 return (FC_FAILURE); 3949 3950 } /* emlxs_reset() */ 3951 3952 3953 extern int32_t 3954 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd) 3955 { 3956 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3957 emlxs_hba_t *hba = HBA; 3958 int32_t rval; 3959 3960 if (!(port->flag & EMLXS_PORT_BOUND)) { 3961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3962 "fca_reset: Port not bound."); 3963 3964 return (FC_UNBOUND); 3965 } 3966 3967 switch (cmd) { 3968 case FC_FCA_LINK_RESET: 3969 if (hba->fw_flag & FW_UPDATE_NEEDED) { 3970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3971 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET"); 3972 cmd = FC_FCA_RESET; 3973 } else { 3974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3975 "fca_reset: FC_FCA_LINK_RESET"); 3976 } 3977 break; 3978 3979 case FC_FCA_CORE: 3980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3981 "fca_reset: FC_FCA_CORE"); 3982 break; 3983 3984 case FC_FCA_RESET: 3985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3986 "fca_reset: FC_FCA_RESET"); 3987 break; 3988 3989 case FC_FCA_RESET_CORE: 3990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3991 "fca_reset: FC_FCA_RESET_CORE"); 3992 break; 3993 3994 default: 3995 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3996 "fca_reset: Unknown command. cmd=%x", cmd); 3997 return (FC_FAILURE); 3998 } 3999 4000 if (hba->fw_flag & FW_UPDATE_NEEDED) { 4001 hba->fw_flag |= FW_UPDATE_KERNEL; 4002 } 4003 4004 rval = emlxs_reset(port, cmd); 4005 4006 return (rval); 4007 4008 } /* emlxs_fca_reset() */ 4009 4010 4011 extern int 4012 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 4013 { 4014 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 4015 emlxs_hba_t *hba = HBA; 4016 int32_t ret; 4017 emlxs_vpd_t *vpd = &VPD; 4018 4019 4020 ret = FC_SUCCESS; 4021 4022 if (!(port->flag & EMLXS_PORT_BOUND)) { 4023 return (FC_UNBOUND); 4024 } 4025 4026 4027 #ifdef IDLE_TIMER 4028 emlxs_pm_busy_component(hba); 4029 #endif /* IDLE_TIMER */ 4030 4031 switch (pm->pm_cmd_code) { 4032 4033 case FC_PORT_GET_FW_REV: 4034 { 4035 char buffer[128]; 4036 4037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4038 "fca_port_manage: FC_PORT_GET_FW_REV"); 4039 4040 (void) sprintf(buffer, "%s %s", hba->model_info.model, 4041 vpd->fw_version); 4042 bzero(pm->pm_data_buf, pm->pm_data_len); 4043 4044 if (pm->pm_data_len < strlen(buffer) + 1) { 4045 ret = FC_NOMEM; 4046 4047 break; 4048 } 4049 4050 (void) strcpy(pm->pm_data_buf, buffer); 4051 break; 4052 } 4053 4054 case FC_PORT_GET_FCODE_REV: 4055 { 4056 char buffer[128]; 4057 4058 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4059 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 4060 4061 /* Force update here just to be sure */ 4062 emlxs_get_fcode_version(hba); 4063 4064 (void) sprintf(buffer, "%s %s", hba->model_info.model, 4065 vpd->fcode_version); 4066 bzero(pm->pm_data_buf, pm->pm_data_len); 4067 4068 if (pm->pm_data_len < strlen(buffer) + 1) { 4069 ret = FC_NOMEM; 4070 break; 4071 } 4072 4073 (void) strcpy(pm->pm_data_buf, buffer); 4074 break; 4075 } 4076 4077 case FC_PORT_GET_DUMP_SIZE: 4078 { 4079 #ifdef DUMP_SUPPORT 4080 uint32_t dump_size = 0; 4081 4082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4083 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 4084 4085 if (pm->pm_data_len < sizeof (uint32_t)) { 4086 ret = FC_NOMEM; 4087 break; 4088 } 4089 4090 (void) emlxs_get_dump(hba, NULL, &dump_size); 4091 4092 *((uint32_t *)pm->pm_data_buf) = dump_size; 4093 4094 #else 4095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4096 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 4097 4098 #endif /* DUMP_SUPPORT */ 4099 4100 break; 4101 } 4102 4103 case FC_PORT_GET_DUMP: 4104 { 4105 #ifdef DUMP_SUPPORT 4106 uint32_t dump_size = 0; 4107 4108 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4109 "fca_port_manage: FC_PORT_GET_DUMP"); 4110 4111 (void) emlxs_get_dump(hba, NULL, &dump_size); 4112 4113 if (pm->pm_data_len < dump_size) { 4114 ret = FC_NOMEM; 4115 break; 4116 } 4117 4118 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 4119 (uint32_t *)&dump_size); 4120 #else 4121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4122 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 4123 4124 #endif /* DUMP_SUPPORT */ 4125 4126 break; 4127 } 4128 4129 case FC_PORT_FORCE_DUMP: 4130 { 4131 #ifdef DUMP_SUPPORT 4132 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4133 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4134 4135 /* Schedule a USER dump */ 4136 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4137 4138 /* Wait for dump to complete */ 4139 emlxs_dump_wait(hba); 4140 #else 4141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4142 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4143 4144 #endif /* DUMP_SUPPORT */ 4145 break; 4146 } 4147 4148 case FC_PORT_LINK_STATE: 4149 { 4150 uint32_t *link_state; 4151 4152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4153 "fca_port_manage: FC_PORT_LINK_STATE"); 4154 4155 if (pm->pm_stat_len != sizeof (*link_state)) { 4156 ret = FC_NOMEM; 4157 break; 4158 } 4159 4160 if (pm->pm_cmd_buf != NULL) { 4161 /* 4162 * Can't look beyond the FCA port. 4163 */ 4164 ret = FC_INVALID_REQUEST; 4165 break; 4166 } 4167 4168 link_state = (uint32_t *)pm->pm_stat_buf; 4169 4170 /* Set the state */ 4171 if (hba->state >= FC_LINK_UP) { 4172 /* Check for loop topology */ 4173 if (hba->topology == TOPOLOGY_LOOP) { 4174 *link_state = FC_STATE_LOOP; 4175 } else { 4176 *link_state = FC_STATE_ONLINE; 4177 } 4178 4179 /* Set the link speed */ 4180 switch (hba->linkspeed) { 4181 case LA_2GHZ_LINK: 4182 *link_state |= FC_STATE_2GBIT_SPEED; 4183 break; 4184 case LA_4GHZ_LINK: 4185 *link_state |= FC_STATE_4GBIT_SPEED; 4186 break; 4187 case LA_8GHZ_LINK: 4188 *link_state |= FC_STATE_8GBIT_SPEED; 4189 break; 4190 case LA_10GHZ_LINK: 4191 *link_state |= FC_STATE_10GBIT_SPEED; 4192 break; 4193 case LA_1GHZ_LINK: 4194 default: 4195 *link_state |= FC_STATE_1GBIT_SPEED; 4196 break; 4197 } 4198 } else { 4199 *link_state = FC_STATE_OFFLINE; 4200 } 4201 4202 break; 4203 } 4204 4205 4206 case FC_PORT_ERR_STATS: 4207 case FC_PORT_RLS: 4208 { 4209 MAILBOXQ *mbq; 4210 MAILBOX *mb; 4211 fc_rls_acc_t *bp; 4212 4213 if (!(hba->flag & FC_ONLINE_MODE)) { 4214 return (FC_OFFLINE); 4215 } 4216 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4217 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4218 4219 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4220 ret = FC_NOMEM; 4221 break; 4222 } 4223 4224 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, 4225 MEM_MBOX, 1)) == 0) { 4226 ret = FC_NOMEM; 4227 break; 4228 } 4229 mb = (MAILBOX *)mbq; 4230 4231 emlxs_mb_read_lnk_stat(hba, mbq); 4232 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 4233 != MBX_SUCCESS) { 4234 ret = FC_PBUSY; 4235 } else { 4236 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4237 4238 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4239 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4240 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4241 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4242 bp->rls_invalid_word = 4243 mb->un.varRdLnk.invalidXmitWord; 4244 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4245 } 4246 4247 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 4248 break; 4249 } 4250 4251 case FC_PORT_DOWNLOAD_FW: 4252 if (!(hba->flag & FC_ONLINE_MODE)) { 4253 return (FC_OFFLINE); 4254 } 4255 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4256 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4257 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4258 pm->pm_data_len, 1); 4259 break; 4260 4261 case FC_PORT_DOWNLOAD_FCODE: 4262 if (!(hba->flag & FC_ONLINE_MODE)) { 4263 return (FC_OFFLINE); 4264 } 4265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4266 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4267 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4268 pm->pm_data_len, 1); 4269 break; 4270 4271 case FC_PORT_DIAG: 4272 { 4273 uint32_t errno = 0; 4274 uint32_t did = 0; 4275 uint32_t pattern = 0; 4276 4277 switch (pm->pm_cmd_flags) { 4278 case EMLXS_DIAG_BIU: 4279 4280 if (!(hba->flag & FC_ONLINE_MODE)) { 4281 return (FC_OFFLINE); 4282 } 4283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4284 "fca_port_manage: EMLXS_DIAG_BIU"); 4285 4286 if (pm->pm_data_len) { 4287 pattern = *((uint32_t *)pm->pm_data_buf); 4288 } 4289 4290 errno = emlxs_diag_biu_run(hba, pattern); 4291 4292 if (pm->pm_stat_len == sizeof (errno)) { 4293 *(int *)pm->pm_stat_buf = errno; 4294 } 4295 4296 break; 4297 4298 4299 case EMLXS_DIAG_POST: 4300 4301 if (!(hba->flag & FC_ONLINE_MODE)) { 4302 return (FC_OFFLINE); 4303 } 4304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4305 "fca_port_manage: EMLXS_DIAG_POST"); 4306 4307 errno = emlxs_diag_post_run(hba); 4308 4309 if (pm->pm_stat_len == sizeof (errno)) { 4310 *(int *)pm->pm_stat_buf = errno; 4311 } 4312 4313 break; 4314 4315 4316 case EMLXS_DIAG_ECHO: 4317 4318 if (!(hba->flag & FC_ONLINE_MODE)) { 4319 return (FC_OFFLINE); 4320 } 4321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4322 "fca_port_manage: EMLXS_DIAG_ECHO"); 4323 4324 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4325 ret = FC_INVALID_REQUEST; 4326 break; 4327 } 4328 4329 did = *((uint32_t *)pm->pm_cmd_buf); 4330 4331 if (pm->pm_data_len) { 4332 pattern = *((uint32_t *)pm->pm_data_buf); 4333 } 4334 4335 errno = emlxs_diag_echo_run(port, did, pattern); 4336 4337 if (pm->pm_stat_len == sizeof (errno)) { 4338 *(int *)pm->pm_stat_buf = errno; 4339 } 4340 4341 break; 4342 4343 4344 case EMLXS_PARM_GET_NUM: 4345 { 4346 uint32_t *num; 4347 emlxs_config_t *cfg; 4348 uint32_t i; 4349 uint32_t count; 4350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4351 "fca_port_manage: EMLXS_PARM_GET_NUM"); 4352 4353 if (pm->pm_stat_len < sizeof (uint32_t)) { 4354 ret = FC_NOMEM; 4355 break; 4356 } 4357 4358 num = (uint32_t *)pm->pm_stat_buf; 4359 count = 0; 4360 cfg = &CFG; 4361 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4362 if (!(cfg->flags & PARM_HIDDEN)) { 4363 count++; 4364 } 4365 4366 } 4367 4368 *num = count; 4369 4370 break; 4371 } 4372 4373 case EMLXS_PARM_GET_LIST: 4374 { 4375 emlxs_parm_t *parm; 4376 emlxs_config_t *cfg; 4377 uint32_t i; 4378 uint32_t max_count; 4379 4380 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4381 "fca_port_manage: EMLXS_PARM_GET_LIST"); 4382 4383 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4384 ret = FC_NOMEM; 4385 break; 4386 } 4387 4388 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4389 4390 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4391 cfg = &CFG; 4392 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4393 cfg++) { 4394 if (!(cfg->flags & PARM_HIDDEN)) { 4395 (void) strcpy(parm->label, cfg->string); 4396 parm->min = cfg->low; 4397 parm->max = cfg->hi; 4398 parm->def = cfg->def; 4399 parm->current = cfg->current; 4400 parm->flags = cfg->flags; 4401 (void) strcpy(parm->help, cfg->help); 4402 parm++; 4403 max_count--; 4404 } 4405 } 4406 4407 break; 4408 } 4409 4410 case EMLXS_PARM_GET: 4411 { 4412 emlxs_parm_t *parm_in; 4413 emlxs_parm_t *parm_out; 4414 emlxs_config_t *cfg; 4415 uint32_t i; 4416 uint32_t len; 4417 4418 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4419 EMLXS_MSGF(EMLXS_CONTEXT, 4420 &emlxs_sfs_debug_msg, 4421 "fca_port_manage: EMLXS_PARM_GET. " 4422 "inbuf too small."); 4423 4424 ret = FC_BADCMD; 4425 break; 4426 } 4427 4428 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4429 EMLXS_MSGF(EMLXS_CONTEXT, 4430 &emlxs_sfs_debug_msg, 4431 "fca_port_manage: EMLXS_PARM_GET. " 4432 "outbuf too small"); 4433 4434 ret = FC_BADCMD; 4435 break; 4436 } 4437 4438 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4439 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4440 len = strlen(parm_in->label); 4441 cfg = &CFG; 4442 ret = FC_BADOBJECT; 4443 4444 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4445 "fca_port_manage: EMLXS_PARM_GET: %s", 4446 parm_in->label); 4447 4448 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4449 if (len == strlen(cfg->string) && 4450 (strcmp(parm_in->label, 4451 cfg->string) == 0)) { 4452 (void) strcpy(parm_out->label, 4453 cfg->string); 4454 parm_out->min = cfg->low; 4455 parm_out->max = cfg->hi; 4456 parm_out->def = cfg->def; 4457 parm_out->current = cfg->current; 4458 parm_out->flags = cfg->flags; 4459 (void) strcpy(parm_out->help, 4460 cfg->help); 4461 4462 ret = FC_SUCCESS; 4463 break; 4464 } 4465 } 4466 4467 break; 4468 } 4469 4470 case EMLXS_PARM_SET: 4471 { 4472 emlxs_parm_t *parm_in; 4473 emlxs_parm_t *parm_out; 4474 emlxs_config_t *cfg; 4475 uint32_t i; 4476 uint32_t len; 4477 4478 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4479 EMLXS_MSGF(EMLXS_CONTEXT, 4480 &emlxs_sfs_debug_msg, 4481 "fca_port_manage: EMLXS_PARM_GET. " 4482 "inbuf too small."); 4483 4484 ret = FC_BADCMD; 4485 break; 4486 } 4487 4488 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4489 EMLXS_MSGF(EMLXS_CONTEXT, 4490 &emlxs_sfs_debug_msg, 4491 "fca_port_manage: EMLXS_PARM_GET. " 4492 "outbuf too small"); 4493 ret = FC_BADCMD; 4494 break; 4495 } 4496 4497 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4498 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4499 len = strlen(parm_in->label); 4500 cfg = &CFG; 4501 ret = FC_BADOBJECT; 4502 4503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4504 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d", 4505 parm_in->label, parm_in->current, 4506 parm_in->current); 4507 4508 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4509 /* Find matching parameter string */ 4510 if (len == strlen(cfg->string) && 4511 (strcmp(parm_in->label, 4512 cfg->string) == 0)) { 4513 /* Attempt to update parameter */ 4514 if (emlxs_set_parm(hba, i, 4515 parm_in->current) == FC_SUCCESS) { 4516 (void) strcpy(parm_out->label, 4517 cfg->string); 4518 parm_out->min = cfg->low; 4519 parm_out->max = cfg->hi; 4520 parm_out->def = cfg->def; 4521 parm_out->current = 4522 cfg->current; 4523 parm_out->flags = cfg->flags; 4524 (void) strcpy(parm_out->help, 4525 cfg->help); 4526 4527 ret = FC_SUCCESS; 4528 } 4529 4530 break; 4531 } 4532 } 4533 4534 break; 4535 } 4536 4537 case EMLXS_LOG_GET: 4538 { 4539 emlxs_log_req_t *req; 4540 emlxs_log_resp_t *resp; 4541 uint32_t len; 4542 4543 /* Check command size */ 4544 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4545 ret = FC_BADCMD; 4546 break; 4547 } 4548 4549 /* Get the request */ 4550 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4551 4552 /* Calculate the response length from the request */ 4553 len = sizeof (emlxs_log_resp_t) + 4554 (req->count * MAX_LOG_MSG_LENGTH); 4555 4556 /* Check the response buffer length */ 4557 if (pm->pm_stat_len < len) { 4558 ret = FC_BADCMD; 4559 break; 4560 } 4561 4562 /* Get the response pointer */ 4563 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4564 4565 /* Get the request log enties */ 4566 (void) emlxs_msg_log_get(hba, req, resp); 4567 4568 ret = FC_SUCCESS; 4569 break; 4570 } 4571 4572 case EMLXS_GET_BOOT_REV: 4573 { 4574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4575 "fca_port_manage: EMLXS_GET_BOOT_REV"); 4576 4577 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4578 ret = FC_NOMEM; 4579 break; 4580 } 4581 4582 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4583 (void) sprintf(pm->pm_stat_buf, "%s %s", 4584 hba->model_info.model, vpd->boot_version); 4585 4586 break; 4587 } 4588 4589 case EMLXS_DOWNLOAD_BOOT: 4590 if (!(hba->flag & FC_ONLINE_MODE)) { 4591 return (FC_OFFLINE); 4592 } 4593 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4594 "fca_port_manage: EMLXS_DOWNLOAD_BOOT"); 4595 4596 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4597 pm->pm_data_len, 1); 4598 break; 4599 4600 case EMLXS_DOWNLOAD_CFL: 4601 { 4602 uint32_t *buffer; 4603 uint32_t region; 4604 uint32_t length; 4605 4606 if (!(hba->flag & FC_ONLINE_MODE)) { 4607 return (FC_OFFLINE); 4608 } 4609 4610 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4611 "fca_port_manage: EMLXS_DOWNLOAD_CFL"); 4612 4613 /* Extract the region number from the first word. */ 4614 buffer = (uint32_t *)pm->pm_data_buf; 4615 region = *buffer++; 4616 4617 /* Adjust the image length for the header word */ 4618 length = pm->pm_data_len - 4; 4619 4620 ret = 4621 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4622 length); 4623 break; 4624 } 4625 4626 case EMLXS_VPD_GET: 4627 { 4628 emlxs_vpd_desc_t *vpd_out; 4629 4630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4631 "fca_port_manage: EMLXS_VPD_GET"); 4632 4633 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4634 ret = FC_BADCMD; 4635 break; 4636 } 4637 4638 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4639 bzero(vpd_out, sizeof (emlxs_vpd_desc_t)); 4640 4641 (void) strncpy(vpd_out->id, vpd->id, 4642 sizeof (vpd_out->id)); 4643 (void) strncpy(vpd_out->part_num, vpd->part_num, 4644 sizeof (vpd_out->part_num)); 4645 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4646 sizeof (vpd_out->eng_change)); 4647 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4648 sizeof (vpd_out->manufacturer)); 4649 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4650 sizeof (vpd_out->serial_num)); 4651 (void) strncpy(vpd_out->model, vpd->model, 4652 sizeof (vpd_out->model)); 4653 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4654 sizeof (vpd_out->model_desc)); 4655 (void) strncpy(vpd_out->port_num, vpd->port_num, 4656 sizeof (vpd_out->port_num)); 4657 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4658 sizeof (vpd_out->prog_types)); 4659 4660 ret = FC_SUCCESS; 4661 4662 break; 4663 } 4664 4665 case EMLXS_GET_FCIO_REV: 4666 { 4667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4668 "fca_port_manage: EMLXS_GET_FCIO_REV"); 4669 4670 if (pm->pm_stat_len < sizeof (uint32_t)) { 4671 ret = FC_NOMEM; 4672 break; 4673 } 4674 4675 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4676 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 4677 4678 break; 4679 } 4680 4681 case EMLXS_GET_DFC_REV: 4682 { 4683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4684 "fca_port_manage: EMLXS_GET_DFC_REV"); 4685 4686 if (pm->pm_stat_len < sizeof (uint32_t)) { 4687 ret = FC_NOMEM; 4688 break; 4689 } 4690 4691 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4692 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 4693 4694 break; 4695 } 4696 4697 case EMLXS_SET_BOOT_STATE: 4698 case EMLXS_SET_BOOT_STATE_old: 4699 { 4700 uint32_t state; 4701 4702 if (!(hba->flag & FC_ONLINE_MODE)) { 4703 return (FC_OFFLINE); 4704 } 4705 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4706 EMLXS_MSGF(EMLXS_CONTEXT, 4707 &emlxs_sfs_debug_msg, 4708 "fca_port_manage: EMLXS_SET_BOOT_STATE"); 4709 ret = FC_BADCMD; 4710 break; 4711 } 4712 4713 state = *(uint32_t *)pm->pm_cmd_buf; 4714 4715 if (state == 0) { 4716 EMLXS_MSGF(EMLXS_CONTEXT, 4717 &emlxs_sfs_debug_msg, 4718 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4719 "Disable"); 4720 ret = emlxs_boot_code_disable(hba); 4721 } else { 4722 EMLXS_MSGF(EMLXS_CONTEXT, 4723 &emlxs_sfs_debug_msg, 4724 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4725 "Enable"); 4726 ret = emlxs_boot_code_enable(hba); 4727 } 4728 4729 break; 4730 } 4731 4732 case EMLXS_GET_BOOT_STATE: 4733 case EMLXS_GET_BOOT_STATE_old: 4734 { 4735 if (!(hba->flag & FC_ONLINE_MODE)) { 4736 return (FC_OFFLINE); 4737 } 4738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4739 "fca_port_manage: EMLXS_GET_BOOT_STATE"); 4740 4741 if (pm->pm_stat_len < sizeof (uint32_t)) { 4742 ret = FC_NOMEM; 4743 break; 4744 } 4745 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4746 4747 ret = emlxs_boot_code_state(hba); 4748 4749 if (ret == FC_SUCCESS) { 4750 *(uint32_t *)pm->pm_stat_buf = 1; 4751 ret = FC_SUCCESS; 4752 } else if (ret == FC_FAILURE) { 4753 ret = FC_SUCCESS; 4754 } 4755 4756 break; 4757 } 4758 4759 case EMLXS_HW_ERROR_TEST: 4760 { 4761 /* 4762 * This command is used for simulating HW ERROR 4763 * on SLI4 only. 4764 */ 4765 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4766 ret = FC_INVALID_REQUEST; 4767 break; 4768 } 4769 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR; 4770 break; 4771 } 4772 4773 case EMLXS_MB_TIMEOUT_TEST: 4774 { 4775 if (!(hba->flag & FC_ONLINE_MODE)) { 4776 return (FC_OFFLINE); 4777 } 4778 4779 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4780 "fca_port_manage: EMLXS_HW_ERROR_TEST"); 4781 4782 /* Trigger a mailbox timeout */ 4783 hba->mbox_timer = hba->timer_tics; 4784 4785 break; 4786 } 4787 4788 case EMLXS_TEST_CODE: 4789 { 4790 uint32_t *cmd; 4791 4792 if (!(hba->flag & FC_ONLINE_MODE)) { 4793 return (FC_OFFLINE); 4794 } 4795 4796 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4797 "fca_port_manage: EMLXS_TEST_CODE"); 4798 4799 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4800 EMLXS_MSGF(EMLXS_CONTEXT, 4801 &emlxs_sfs_debug_msg, 4802 "fca_port_manage: EMLXS_TEST_CODE. " 4803 "inbuf to small."); 4804 4805 ret = FC_BADCMD; 4806 break; 4807 } 4808 4809 cmd = (uint32_t *)pm->pm_cmd_buf; 4810 4811 ret = emlxs_test(hba, cmd[0], 4812 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 4813 4814 break; 4815 } 4816 4817 case EMLXS_BAR_IO: 4818 { 4819 uint32_t *cmd; 4820 uint32_t *datap; 4821 uint32_t offset; 4822 caddr_t addr; 4823 uint32_t i; 4824 uint32_t tx_cnt; 4825 uint32_t chip_cnt; 4826 4827 cmd = (uint32_t *)pm->pm_cmd_buf; 4828 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4829 "fca_port_manage: EMLXS_BAR_IO %x %x %x", 4830 cmd[0], cmd[1], cmd[2]); 4831 4832 offset = cmd[1]; 4833 4834 ret = FC_SUCCESS; 4835 4836 switch (cmd[0]) { 4837 case 2: /* bar1read */ 4838 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4839 return (FC_BADCMD); 4840 } 4841 4842 /* Registers in this range are invalid */ 4843 if ((offset >= 0x4C00) && (offset < 0x5000)) { 4844 return (FC_BADCMD); 4845 } 4846 if ((offset >= 0x5800) || (offset & 0x3)) { 4847 return (FC_BADCMD); 4848 } 4849 datap = (uint32_t *)pm->pm_stat_buf; 4850 4851 for (i = 0; i < pm->pm_stat_len; 4852 i += sizeof (uint32_t)) { 4853 if ((offset >= 0x4C00) && 4854 (offset < 0x5000)) { 4855 pm->pm_stat_len = i; 4856 break; 4857 } 4858 if (offset >= 0x5800) { 4859 pm->pm_stat_len = i; 4860 break; 4861 } 4862 addr = hba->sli.sli4.bar1_addr + offset; 4863 *datap = READ_BAR1_REG(hba, addr); 4864 datap++; 4865 offset += sizeof (uint32_t); 4866 } 4867 #ifdef FMA_SUPPORT 4868 /* Access handle validation */ 4869 EMLXS_CHK_ACC_HANDLE(hba, 4870 hba->sli.sli4.bar1_acc_handle); 4871 #endif /* FMA_SUPPORT */ 4872 break; 4873 case 3: /* bar2read */ 4874 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4875 return (FC_BADCMD); 4876 } 4877 if ((offset >= 0x1000) || (offset & 0x3)) { 4878 return (FC_BADCMD); 4879 } 4880 datap = (uint32_t *)pm->pm_stat_buf; 4881 4882 for (i = 0; i < pm->pm_stat_len; 4883 i += sizeof (uint32_t)) { 4884 *datap = READ_BAR2_REG(hba, 4885 hba->sli.sli4.bar2_addr + offset); 4886 datap++; 4887 offset += sizeof (uint32_t); 4888 } 4889 #ifdef FMA_SUPPORT 4890 /* Access handle validation */ 4891 EMLXS_CHK_ACC_HANDLE(hba, 4892 hba->sli.sli4.bar2_acc_handle); 4893 #endif /* FMA_SUPPORT */ 4894 break; 4895 case 4: /* bar1write */ 4896 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4897 return (FC_BADCMD); 4898 } 4899 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr + 4900 offset, cmd[2]); 4901 #ifdef FMA_SUPPORT 4902 /* Access handle validation */ 4903 EMLXS_CHK_ACC_HANDLE(hba, 4904 hba->sli.sli4.bar1_acc_handle); 4905 #endif /* FMA_SUPPORT */ 4906 break; 4907 case 5: /* bar2write */ 4908 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4909 return (FC_BADCMD); 4910 } 4911 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr + 4912 offset, cmd[2]); 4913 #ifdef FMA_SUPPORT 4914 /* Access handle validation */ 4915 EMLXS_CHK_ACC_HANDLE(hba, 4916 hba->sli.sli4.bar2_acc_handle); 4917 #endif /* FMA_SUPPORT */ 4918 break; 4919 case 6: /* dumpbsmbox */ 4920 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4921 return (FC_BADCMD); 4922 } 4923 if (offset != 0) { 4924 return (FC_BADCMD); 4925 } 4926 4927 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt, 4928 (caddr_t)pm->pm_stat_buf, 256); 4929 break; 4930 case 7: /* pciread */ 4931 if ((offset >= 0x200) || (offset & 0x3)) { 4932 return (FC_BADCMD); 4933 } 4934 datap = (uint32_t *)pm->pm_stat_buf; 4935 for (i = 0; i < pm->pm_stat_len; 4936 i += sizeof (uint32_t)) { 4937 *datap = ddi_get32(hba->pci_acc_handle, 4938 (uint32_t *)(hba->pci_addr + 4939 offset)); 4940 datap++; 4941 offset += sizeof (uint32_t); 4942 } 4943 #ifdef FMA_SUPPORT 4944 /* Access handle validation */ 4945 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle); 4946 #endif /* FMA_SUPPORT */ 4947 break; 4948 case 8: /* abortall */ 4949 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4950 return (FC_BADCMD); 4951 } 4952 emlxs_abort_all(hba, &tx_cnt, &chip_cnt); 4953 datap = (uint32_t *)pm->pm_stat_buf; 4954 *datap++ = tx_cnt; 4955 *datap = chip_cnt; 4956 break; 4957 default: 4958 ret = FC_BADCMD; 4959 break; 4960 } 4961 break; 4962 } 4963 4964 default: 4965 4966 ret = FC_INVALID_REQUEST; 4967 break; 4968 } 4969 4970 break; 4971 4972 } 4973 4974 case FC_PORT_INITIALIZE: 4975 if (!(hba->flag & FC_ONLINE_MODE)) { 4976 return (FC_OFFLINE); 4977 } 4978 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4979 "fca_port_manage: FC_PORT_INITIALIZE"); 4980 break; 4981 4982 case FC_PORT_LOOPBACK: 4983 if (!(hba->flag & FC_ONLINE_MODE)) { 4984 return (FC_OFFLINE); 4985 } 4986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4987 "fca_port_manage: FC_PORT_LOOPBACK"); 4988 break; 4989 4990 case FC_PORT_BYPASS: 4991 if (!(hba->flag & FC_ONLINE_MODE)) { 4992 return (FC_OFFLINE); 4993 } 4994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4995 "fca_port_manage: FC_PORT_BYPASS"); 4996 ret = FC_INVALID_REQUEST; 4997 break; 4998 4999 case FC_PORT_UNBYPASS: 5000 if (!(hba->flag & FC_ONLINE_MODE)) { 5001 return (FC_OFFLINE); 5002 } 5003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5004 "fca_port_manage: FC_PORT_UNBYPASS"); 5005 ret = FC_INVALID_REQUEST; 5006 break; 5007 5008 case FC_PORT_GET_NODE_ID: 5009 { 5010 fc_rnid_t *rnid; 5011 5012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5013 "fca_port_manage: FC_PORT_GET_NODE_ID"); 5014 5015 bzero(pm->pm_data_buf, pm->pm_data_len); 5016 5017 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 5018 ret = FC_NOMEM; 5019 break; 5020 } 5021 5022 rnid = (fc_rnid_t *)pm->pm_data_buf; 5023 5024 (void) sprintf((char *)rnid->global_id, 5025 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 5026 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 5027 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 5028 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 5029 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 5030 5031 rnid->unit_type = RNID_HBA; 5032 rnid->port_id = port->did; 5033 rnid->ip_version = RNID_IPV4; 5034 5035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5036 "GET_NODE_ID: wwpn: %s", rnid->global_id); 5037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5038 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 5039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5040 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 5041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5042 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 5043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5044 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 5045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5046 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 5047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5048 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5049 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5050 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5052 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5053 5054 ret = FC_SUCCESS; 5055 break; 5056 } 5057 5058 case FC_PORT_SET_NODE_ID: 5059 { 5060 fc_rnid_t *rnid; 5061 5062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5063 "fca_port_manage: FC_PORT_SET_NODE_ID"); 5064 5065 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 5066 ret = FC_NOMEM; 5067 break; 5068 } 5069 5070 rnid = (fc_rnid_t *)pm->pm_data_buf; 5071 5072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5073 "SET_NODE_ID: wwpn: %s", rnid->global_id); 5074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5075 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 5076 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5077 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 5078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5079 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 5080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5081 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 5082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5083 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 5084 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5085 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5087 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5088 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5089 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5090 5091 ret = FC_SUCCESS; 5092 break; 5093 } 5094 5095 #ifdef S11 5096 case FC_PORT_GET_P2P_INFO: 5097 { 5098 fc_fca_p2p_info_t *p2p_info; 5099 NODELIST *ndlp; 5100 5101 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5102 "fca_port_manage: FC_PORT_GET_P2P_INFO"); 5103 5104 bzero(pm->pm_data_buf, pm->pm_data_len); 5105 5106 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) { 5107 ret = FC_NOMEM; 5108 break; 5109 } 5110 5111 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf; 5112 5113 if (hba->state >= FC_LINK_UP) { 5114 if ((hba->topology == TOPOLOGY_PT_PT) && 5115 (hba->flag & FC_PT_TO_PT)) { 5116 p2p_info->fca_d_id = port->did; 5117 p2p_info->d_id = port->rdid; 5118 5119 ndlp = emlxs_node_find_did(port, 5120 port->rdid); 5121 5122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5123 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, " 5124 "d_id: 0x%x, ndlp: 0x%p", port->did, 5125 port->rdid, ndlp); 5126 if (ndlp) { 5127 bcopy(&ndlp->nlp_portname, 5128 (caddr_t)&p2p_info->pwwn, 5129 sizeof (la_wwn_t)); 5130 bcopy(&ndlp->nlp_nodename, 5131 (caddr_t)&p2p_info->nwwn, 5132 sizeof (la_wwn_t)); 5133 5134 ret = FC_SUCCESS; 5135 break; 5136 5137 } 5138 } 5139 } 5140 5141 ret = FC_FAILURE; 5142 break; 5143 } 5144 #endif /* S11 */ 5145 5146 default: 5147 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5148 "fca_port_manage: code=%x", pm->pm_cmd_code); 5149 ret = FC_INVALID_REQUEST; 5150 break; 5151 5152 } 5153 5154 return (ret); 5155 5156 } /* emlxs_fca_port_manage() */ 5157 5158 5159 /*ARGSUSED*/ 5160 static uint32_t 5161 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 5162 uint32_t *arg) 5163 { 5164 uint32_t rval = 0; 5165 emlxs_port_t *port = &PPORT; 5166 5167 switch (test_code) { 5168 #ifdef TEST_SUPPORT 5169 case 1: /* SCSI underrun */ 5170 { 5171 hba->underrun_counter = (args)? arg[0]:1; 5172 break; 5173 } 5174 #endif /* TEST_SUPPORT */ 5175 5176 default: 5177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5178 "emlxs_test: Unsupported test code. (0x%x)", test_code); 5179 rval = FC_INVALID_REQUEST; 5180 } 5181 5182 return (rval); 5183 5184 } /* emlxs_test() */ 5185 5186 5187 /* 5188 * Given the device number, return the devinfo pointer or the ddiinst number. 5189 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 5190 * before attach. 5191 * 5192 * Translate "dev_t" to a pointer to the associated "dev_info_t". 5193 */ 5194 /*ARGSUSED*/ 5195 static int 5196 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5197 { 5198 emlxs_hba_t *hba; 5199 int32_t ddiinst; 5200 5201 ddiinst = getminor((dev_t)arg); 5202 5203 switch (infocmd) { 5204 case DDI_INFO_DEVT2DEVINFO: 5205 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5206 if (hba) 5207 *result = hba->dip; 5208 else 5209 *result = NULL; 5210 break; 5211 5212 case DDI_INFO_DEVT2INSTANCE: 5213 *result = (void *)((unsigned long)ddiinst); 5214 break; 5215 5216 default: 5217 return (DDI_FAILURE); 5218 } 5219 5220 return (DDI_SUCCESS); 5221 5222 } /* emlxs_info() */ 5223 5224 5225 static int32_t 5226 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 5227 { 5228 emlxs_hba_t *hba; 5229 emlxs_port_t *port; 5230 int32_t ddiinst; 5231 int rval = DDI_SUCCESS; 5232 5233 ddiinst = ddi_get_instance(dip); 5234 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5235 port = &PPORT; 5236 5237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5238 "fca_power: comp=%x level=%x", comp, level); 5239 5240 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 5241 return (DDI_FAILURE); 5242 } 5243 5244 mutex_enter(&EMLXS_PM_LOCK); 5245 5246 /* If we are already at the proper level then return success */ 5247 if (hba->pm_level == level) { 5248 mutex_exit(&EMLXS_PM_LOCK); 5249 return (DDI_SUCCESS); 5250 } 5251 5252 switch (level) { 5253 case EMLXS_PM_ADAPTER_UP: 5254 5255 /* 5256 * If we are already in emlxs_attach, 5257 * let emlxs_hba_attach take care of things 5258 */ 5259 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 5260 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5261 break; 5262 } 5263 5264 /* Check if adapter is suspended */ 5265 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5266 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5267 5268 /* Try to resume the port */ 5269 rval = emlxs_hba_resume(dip); 5270 5271 if (rval != DDI_SUCCESS) { 5272 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5273 } 5274 break; 5275 } 5276 5277 /* Set adapter up */ 5278 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5279 break; 5280 5281 case EMLXS_PM_ADAPTER_DOWN: 5282 5283 5284 /* 5285 * If we are already in emlxs_detach, 5286 * let emlxs_hba_detach take care of things 5287 */ 5288 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 5289 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5290 break; 5291 } 5292 5293 /* Check if adapter is not suspended */ 5294 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5295 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5296 5297 /* Try to suspend the port */ 5298 rval = emlxs_hba_suspend(dip); 5299 5300 if (rval != DDI_SUCCESS) { 5301 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5302 } 5303 5304 break; 5305 } 5306 5307 /* Set adapter down */ 5308 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5309 break; 5310 5311 default: 5312 rval = DDI_FAILURE; 5313 break; 5314 5315 } 5316 5317 mutex_exit(&EMLXS_PM_LOCK); 5318 5319 return (rval); 5320 5321 } /* emlxs_power() */ 5322 5323 5324 #ifdef EMLXS_I386 5325 #ifdef S11 5326 /* 5327 * quiesce(9E) entry point. 5328 * 5329 * This function is called when the system is single-thread at hight PIL 5330 * with preemption disabled. Therefore, this function must not be blocked. 5331 * 5332 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5333 * DDI_FAILURE indicates an error condition and should almost never happen. 5334 */ 5335 static int 5336 emlxs_quiesce(dev_info_t *dip) 5337 { 5338 emlxs_hba_t *hba; 5339 emlxs_port_t *port; 5340 int32_t ddiinst; 5341 int rval = DDI_SUCCESS; 5342 5343 ddiinst = ddi_get_instance(dip); 5344 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5345 port = &PPORT; 5346 5347 if (hba == NULL || port == NULL) { 5348 return (DDI_FAILURE); 5349 } 5350 5351 /* The fourth arg 1 indicates the call is from quiesce */ 5352 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) { 5353 return (rval); 5354 } else { 5355 return (DDI_FAILURE); 5356 } 5357 5358 } /* emlxs_quiesce */ 5359 #endif 5360 #endif /* EMLXS_I386 */ 5361 5362 5363 static int 5364 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5365 { 5366 emlxs_hba_t *hba; 5367 emlxs_port_t *port; 5368 int ddiinst; 5369 5370 ddiinst = getminor(*dev_p); 5371 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5372 5373 if (hba == NULL) { 5374 return (ENXIO); 5375 } 5376 5377 port = &PPORT; 5378 5379 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5380 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5381 "open failed: Driver suspended."); 5382 return (ENXIO); 5383 } 5384 5385 if (otype != OTYP_CHR) { 5386 return (EINVAL); 5387 } 5388 5389 if (drv_priv(cred_p)) { 5390 return (EPERM); 5391 } 5392 5393 mutex_enter(&EMLXS_IOCTL_LOCK); 5394 5395 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5396 mutex_exit(&EMLXS_IOCTL_LOCK); 5397 return (EBUSY); 5398 } 5399 5400 if (flag & FEXCL) { 5401 if (hba->ioctl_flags & EMLXS_OPEN) { 5402 mutex_exit(&EMLXS_IOCTL_LOCK); 5403 return (EBUSY); 5404 } 5405 5406 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5407 } 5408 5409 hba->ioctl_flags |= EMLXS_OPEN; 5410 5411 mutex_exit(&EMLXS_IOCTL_LOCK); 5412 5413 return (0); 5414 5415 } /* emlxs_open() */ 5416 5417 5418 /*ARGSUSED*/ 5419 static int 5420 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 5421 { 5422 emlxs_hba_t *hba; 5423 int ddiinst; 5424 5425 ddiinst = getminor(dev); 5426 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5427 5428 if (hba == NULL) { 5429 return (ENXIO); 5430 } 5431 5432 if (otype != OTYP_CHR) { 5433 return (EINVAL); 5434 } 5435 5436 mutex_enter(&EMLXS_IOCTL_LOCK); 5437 5438 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5439 mutex_exit(&EMLXS_IOCTL_LOCK); 5440 return (ENODEV); 5441 } 5442 5443 hba->ioctl_flags &= ~EMLXS_OPEN; 5444 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 5445 5446 mutex_exit(&EMLXS_IOCTL_LOCK); 5447 5448 return (0); 5449 5450 } /* emlxs_close() */ 5451 5452 5453 /*ARGSUSED*/ 5454 static int 5455 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 5456 cred_t *cred_p, int32_t *rval_p) 5457 { 5458 emlxs_hba_t *hba; 5459 emlxs_port_t *port; 5460 int rval = 0; /* return code */ 5461 int ddiinst; 5462 5463 ddiinst = getminor(dev); 5464 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5465 5466 if (hba == NULL) { 5467 return (ENXIO); 5468 } 5469 5470 port = &PPORT; 5471 5472 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5473 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5474 "ioctl failed: Driver suspended."); 5475 5476 return (ENXIO); 5477 } 5478 5479 mutex_enter(&EMLXS_IOCTL_LOCK); 5480 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5481 mutex_exit(&EMLXS_IOCTL_LOCK); 5482 return (ENXIO); 5483 } 5484 mutex_exit(&EMLXS_IOCTL_LOCK); 5485 5486 #ifdef IDLE_TIMER 5487 emlxs_pm_busy_component(hba); 5488 #endif /* IDLE_TIMER */ 5489 5490 switch (cmd) { 5491 case EMLXS_DFC_COMMAND: 5492 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 5493 break; 5494 5495 default: 5496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5497 "ioctl: Invalid command received. cmd=%x", cmd); 5498 rval = EINVAL; 5499 } 5500 5501 done: 5502 return (rval); 5503 5504 } /* emlxs_ioctl() */ 5505 5506 5507 5508 /* 5509 * 5510 * Device Driver Common Routines 5511 * 5512 */ 5513 5514 /* EMLXS_PM_LOCK must be held for this call */ 5515 static int 5516 emlxs_hba_resume(dev_info_t *dip) 5517 { 5518 emlxs_hba_t *hba; 5519 emlxs_port_t *port; 5520 int ddiinst; 5521 5522 ddiinst = ddi_get_instance(dip); 5523 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5524 port = &PPORT; 5525 5526 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 5527 5528 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5529 return (DDI_SUCCESS); 5530 } 5531 5532 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5533 5534 /* Take the adapter online */ 5535 if (emlxs_power_up(hba)) { 5536 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 5537 "Unable to take adapter online."); 5538 5539 hba->pm_state |= EMLXS_PM_SUSPENDED; 5540 5541 return (DDI_FAILURE); 5542 } 5543 5544 return (DDI_SUCCESS); 5545 5546 } /* emlxs_hba_resume() */ 5547 5548 5549 /* EMLXS_PM_LOCK must be held for this call */ 5550 static int 5551 emlxs_hba_suspend(dev_info_t *dip) 5552 { 5553 emlxs_hba_t *hba; 5554 emlxs_port_t *port; 5555 int ddiinst; 5556 5557 ddiinst = ddi_get_instance(dip); 5558 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5559 port = &PPORT; 5560 5561 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 5562 5563 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5564 return (DDI_SUCCESS); 5565 } 5566 5567 hba->pm_state |= EMLXS_PM_SUSPENDED; 5568 5569 /* Take the adapter offline */ 5570 if (emlxs_power_down(hba)) { 5571 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5572 5573 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 5574 "Unable to take adapter offline."); 5575 5576 return (DDI_FAILURE); 5577 } 5578 5579 return (DDI_SUCCESS); 5580 5581 } /* emlxs_hba_suspend() */ 5582 5583 5584 5585 static void 5586 emlxs_lock_init(emlxs_hba_t *hba) 5587 { 5588 emlxs_port_t *port = &PPORT; 5589 int32_t ddiinst; 5590 char buf[64]; 5591 uint32_t i; 5592 5593 ddiinst = hba->ddiinst; 5594 5595 /* Initialize the power management */ 5596 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst); 5597 mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER, 5598 DDI_INTR_PRI(hba->intr_arg)); 5599 5600 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst); 5601 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, 5602 DDI_INTR_PRI(hba->intr_arg)); 5603 5604 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst); 5605 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL); 5606 5607 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst); 5608 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, 5609 DDI_INTR_PRI(hba->intr_arg)); 5610 5611 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst); 5612 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, 5613 DDI_INTR_PRI(hba->intr_arg)); 5614 5615 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst); 5616 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL); 5617 5618 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst); 5619 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER, 5620 DDI_INTR_PRI(hba->intr_arg)); 5621 5622 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst); 5623 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL); 5624 5625 (void) sprintf(buf, "%s%d_tx channel_lock mutex", DRIVER_NAME, ddiinst); 5626 mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER, 5627 DDI_INTR_PRI(hba->intr_arg)); 5628 5629 for (i = 0; i < MAX_RINGS; i++) { 5630 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME, 5631 ddiinst, i); 5632 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER, 5633 DDI_INTR_PRI(hba->intr_arg)); 5634 } 5635 5636 5637 for (i = 0; i < EMLXS_MAX_WQS; i++) { 5638 (void) sprintf(buf, "%s%d wq_cq_eq%d lock mutex", DRIVER_NAME, 5639 ddiinst, i); 5640 mutex_init(&EMLXS_QUE_LOCK(i), buf, MUTEX_DRIVER, 5641 DDI_INTR_PRI(hba->intr_arg)); 5642 } 5643 5644 (void) sprintf(buf, "%s%d_msiid lock mutex", DRIVER_NAME, ddiinst); 5645 mutex_init(&EMLXS_MSIID_LOCK, buf, MUTEX_DRIVER, 5646 DDI_INTR_PRI(hba->intr_arg)); 5647 5648 (void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst); 5649 mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER, 5650 DDI_INTR_PRI(hba->intr_arg)); 5651 5652 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst); 5653 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER, 5654 DDI_INTR_PRI(hba->intr_arg)); 5655 5656 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst); 5657 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER, 5658 DDI_INTR_PRI(hba->intr_arg)); 5659 5660 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst); 5661 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, 5662 DDI_INTR_PRI(hba->intr_arg)); 5663 5664 #ifdef DUMP_SUPPORT 5665 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst); 5666 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER, 5667 DDI_INTR_PRI(hba->intr_arg)); 5668 #endif /* DUMP_SUPPORT */ 5669 5670 (void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst); 5671 mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER, 5672 DDI_INTR_PRI(hba->intr_arg)); 5673 5674 /* Create per port locks */ 5675 for (i = 0; i < MAX_VPORTS; i++) { 5676 port = &VPORT(i); 5677 5678 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 5679 5680 if (i == 0) { 5681 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME, 5682 ddiinst); 5683 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5684 DDI_INTR_PRI(hba->intr_arg)); 5685 5686 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME, 5687 ddiinst); 5688 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5689 5690 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME, 5691 ddiinst); 5692 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5693 DDI_INTR_PRI(hba->intr_arg)); 5694 } else { 5695 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex", 5696 DRIVER_NAME, ddiinst, port->vpi); 5697 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5698 DDI_INTR_PRI(hba->intr_arg)); 5699 5700 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME, 5701 ddiinst, port->vpi); 5702 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5703 5704 (void) sprintf(buf, "%s%d.%d_ub_lock mutex", 5705 DRIVER_NAME, ddiinst, port->vpi); 5706 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5707 DDI_INTR_PRI(hba->intr_arg)); 5708 } 5709 } 5710 5711 return; 5712 5713 } /* emlxs_lock_init() */ 5714 5715 5716 5717 static void 5718 emlxs_lock_destroy(emlxs_hba_t *hba) 5719 { 5720 emlxs_port_t *port = &PPORT; 5721 uint32_t i; 5722 5723 mutex_destroy(&EMLXS_TIMER_LOCK); 5724 cv_destroy(&hba->timer_lock_cv); 5725 5726 mutex_destroy(&EMLXS_PORT_LOCK); 5727 5728 cv_destroy(&EMLXS_MBOX_CV); 5729 cv_destroy(&EMLXS_LINKUP_CV); 5730 5731 mutex_destroy(&EMLXS_LINKUP_LOCK); 5732 mutex_destroy(&EMLXS_MBOX_LOCK); 5733 5734 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK); 5735 5736 for (i = 0; i < MAX_RINGS; i++) { 5737 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 5738 } 5739 5740 for (i = 0; i < EMLXS_MAX_WQS; i++) { 5741 mutex_destroy(&EMLXS_QUE_LOCK(i)); 5742 } 5743 5744 mutex_destroy(&EMLXS_MSIID_LOCK); 5745 5746 mutex_destroy(&EMLXS_FCTAB_LOCK); 5747 mutex_destroy(&EMLXS_MEMGET_LOCK); 5748 mutex_destroy(&EMLXS_MEMPUT_LOCK); 5749 mutex_destroy(&EMLXS_IOCTL_LOCK); 5750 mutex_destroy(&EMLXS_SPAWN_LOCK); 5751 mutex_destroy(&EMLXS_PM_LOCK); 5752 5753 #ifdef DUMP_SUPPORT 5754 mutex_destroy(&EMLXS_DUMP_LOCK); 5755 #endif /* DUMP_SUPPORT */ 5756 5757 /* Destroy per port locks */ 5758 for (i = 0; i < MAX_VPORTS; i++) { 5759 port = &VPORT(i); 5760 rw_destroy(&port->node_rwlock); 5761 mutex_destroy(&EMLXS_PKT_LOCK); 5762 cv_destroy(&EMLXS_PKT_CV); 5763 mutex_destroy(&EMLXS_UB_LOCK); 5764 } 5765 5766 return; 5767 5768 } /* emlxs_lock_destroy() */ 5769 5770 5771 /* init_flag values */ 5772 #define ATTACH_SOFT_STATE 0x00000001 5773 #define ATTACH_FCA_TRAN 0x00000002 5774 #define ATTACH_HBA 0x00000004 5775 #define ATTACH_LOG 0x00000008 5776 #define ATTACH_MAP_BUS 0x00000010 5777 #define ATTACH_INTR_INIT 0x00000020 5778 #define ATTACH_PROP 0x00000040 5779 #define ATTACH_LOCK 0x00000080 5780 #define ATTACH_THREAD 0x00000100 5781 #define ATTACH_INTR_ADD 0x00000200 5782 #define ATTACH_ONLINE 0x00000400 5783 #define ATTACH_NODE 0x00000800 5784 #define ATTACH_FCT 0x00001000 5785 #define ATTACH_FCA 0x00002000 5786 #define ATTACH_KSTAT 0x00004000 5787 #define ATTACH_DHCHAP 0x00008000 5788 #define ATTACH_FM 0x00010000 5789 #define ATTACH_MAP_SLI 0x00020000 5790 #define ATTACH_SPAWN 0x00040000 5791 #define ATTACH_EVENTS 0x00080000 5792 5793 static void 5794 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 5795 { 5796 emlxs_hba_t *hba = NULL; 5797 int ddiinst; 5798 5799 ddiinst = ddi_get_instance(dip); 5800 5801 if (init_flag & ATTACH_HBA) { 5802 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5803 5804 if (init_flag & ATTACH_SPAWN) { 5805 emlxs_thread_spawn_destroy(hba); 5806 } 5807 5808 if (init_flag & ATTACH_EVENTS) { 5809 (void) emlxs_event_queue_destroy(hba); 5810 } 5811 5812 if (init_flag & ATTACH_ONLINE) { 5813 (void) emlxs_offline(hba); 5814 } 5815 5816 if (init_flag & ATTACH_INTR_ADD) { 5817 (void) EMLXS_INTR_REMOVE(hba); 5818 } 5819 #ifdef SFCT_SUPPORT 5820 if (init_flag & ATTACH_FCT) { 5821 emlxs_fct_detach(hba); 5822 if (hba->tgt_mode) { 5823 emlxs_fct_modclose(); 5824 } 5825 } 5826 #endif /* SFCT_SUPPORT */ 5827 5828 #ifdef DHCHAP_SUPPORT 5829 if (init_flag & ATTACH_DHCHAP) { 5830 emlxs_dhc_detach(hba); 5831 } 5832 #endif /* DHCHAP_SUPPORT */ 5833 5834 if (init_flag & ATTACH_KSTAT) { 5835 kstat_delete(hba->kstat); 5836 } 5837 5838 if (init_flag & ATTACH_FCA) { 5839 emlxs_fca_detach(hba); 5840 } 5841 5842 if (init_flag & ATTACH_NODE) { 5843 (void) ddi_remove_minor_node(hba->dip, "devctl"); 5844 } 5845 5846 if (init_flag & ATTACH_THREAD) { 5847 emlxs_thread_destroy(&hba->iodone_thread); 5848 } 5849 5850 if (init_flag & ATTACH_PROP) { 5851 (void) ddi_prop_remove_all(hba->dip); 5852 } 5853 5854 if (init_flag & ATTACH_LOCK) { 5855 emlxs_lock_destroy(hba); 5856 } 5857 5858 if (init_flag & ATTACH_INTR_INIT) { 5859 (void) EMLXS_INTR_UNINIT(hba); 5860 } 5861 5862 if (init_flag & ATTACH_MAP_BUS) { 5863 emlxs_unmap_bus(hba); 5864 } 5865 5866 if (init_flag & ATTACH_MAP_SLI) { 5867 EMLXS_SLI_UNMAP_HDW(hba); 5868 } 5869 5870 #ifdef FMA_SUPPORT 5871 if (init_flag & ATTACH_FM) { 5872 emlxs_fm_fini(hba); 5873 } 5874 #endif /* FMA_SUPPORT */ 5875 5876 if (init_flag & ATTACH_LOG) { 5877 emlxs_msg_log_destroy(hba); 5878 } 5879 5880 if (init_flag & ATTACH_FCA_TRAN) { 5881 (void) ddi_set_driver_private(hba->dip, NULL); 5882 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 5883 hba->fca_tran = NULL; 5884 } 5885 5886 if (init_flag & ATTACH_HBA) { 5887 emlxs_device.log[hba->emlxinst] = 0; 5888 emlxs_device.hba[hba->emlxinst] = 5889 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 5890 #ifdef DUMP_SUPPORT 5891 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 5892 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 5893 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 5894 #endif /* DUMP_SUPPORT */ 5895 5896 } 5897 } 5898 5899 if (init_flag & ATTACH_SOFT_STATE) { 5900 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 5901 } 5902 5903 return; 5904 5905 } /* emlxs_driver_remove() */ 5906 5907 5908 5909 /* This determines which ports will be initiator mode */ 5910 static void 5911 emlxs_fca_init(emlxs_hba_t *hba) 5912 { 5913 emlxs_port_t *port = &PPORT; 5914 emlxs_port_t *vport; 5915 uint32_t i; 5916 5917 if (!hba->ini_mode) { 5918 return; 5919 } 5920 /* Check if SFS present */ 5921 if (((void *)MODSYM(fc_fca_init) == NULL) || 5922 ((void *)MODSYM(fc_fca_attach) == NULL)) { 5923 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5924 "SFS not present. Initiator mode disabled."); 5925 goto failed; 5926 } 5927 5928 /* Check if our SFS driver interface matches the current SFS stack */ 5929 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 5930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5931 "SFS/FCA version mismatch. FCA=0x%x", 5932 hba->fca_tran->fca_version); 5933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5934 "SFS present. Initiator mode disabled."); 5935 5936 goto failed; 5937 } 5938 5939 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5940 "SFS present. Initiator mode enabled."); 5941 5942 return; 5943 5944 failed: 5945 5946 hba->ini_mode = 0; 5947 for (i = 0; i < MAX_VPORTS; i++) { 5948 vport = &VPORT(i); 5949 vport->ini_mode = 0; 5950 } 5951 5952 return; 5953 5954 } /* emlxs_fca_init() */ 5955 5956 5957 /* This determines which ports will be initiator or target mode */ 5958 static void 5959 emlxs_set_mode(emlxs_hba_t *hba) 5960 { 5961 emlxs_port_t *port = &PPORT; 5962 emlxs_port_t *vport; 5963 uint32_t i; 5964 uint32_t tgt_mode = 0; 5965 5966 #ifdef SFCT_SUPPORT 5967 emlxs_config_t *cfg; 5968 5969 cfg = &hba->config[CFG_TARGET_MODE]; 5970 tgt_mode = cfg->current; 5971 5972 if (tgt_mode) { 5973 if (emlxs_fct_modopen() != 0) { 5974 tgt_mode = 0; 5975 } 5976 } 5977 5978 port->fct_flags = 0; 5979 #endif /* SFCT_SUPPORT */ 5980 5981 /* Initialize physical port */ 5982 if (tgt_mode) { 5983 hba->tgt_mode = 1; 5984 hba->ini_mode = 0; 5985 5986 port->tgt_mode = 1; 5987 port->ini_mode = 0; 5988 } else { 5989 hba->tgt_mode = 0; 5990 hba->ini_mode = 1; 5991 5992 port->tgt_mode = 0; 5993 port->ini_mode = 1; 5994 } 5995 5996 /* Initialize virtual ports */ 5997 /* Virtual ports take on the mode of the parent physical port */ 5998 for (i = 1; i < MAX_VPORTS; i++) { 5999 vport = &VPORT(i); 6000 6001 #ifdef SFCT_SUPPORT 6002 vport->fct_flags = 0; 6003 #endif /* SFCT_SUPPORT */ 6004 6005 vport->ini_mode = port->ini_mode; 6006 vport->tgt_mode = port->tgt_mode; 6007 } 6008 6009 /* Check if initiator mode is requested */ 6010 if (hba->ini_mode) { 6011 emlxs_fca_init(hba); 6012 } else { 6013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6014 "Initiator mode not enabled."); 6015 } 6016 6017 #ifdef SFCT_SUPPORT 6018 /* Check if target mode is requested */ 6019 if (hba->tgt_mode) { 6020 emlxs_fct_init(hba); 6021 } else { 6022 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6023 "Target mode not enabled."); 6024 } 6025 #endif /* SFCT_SUPPORT */ 6026 6027 return; 6028 6029 } /* emlxs_set_mode() */ 6030 6031 6032 6033 static void 6034 emlxs_fca_attach(emlxs_hba_t *hba) 6035 { 6036 /* Update our transport structure */ 6037 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 6038 hba->fca_tran->fca_cmd_max = hba->io_throttle; 6039 6040 #if (EMLXS_MODREV >= EMLXS_MODREV5) 6041 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 6042 sizeof (NAME_TYPE)); 6043 #endif /* >= EMLXS_MODREV5 */ 6044 6045 return; 6046 6047 } /* emlxs_fca_attach() */ 6048 6049 6050 static void 6051 emlxs_fca_detach(emlxs_hba_t *hba) 6052 { 6053 uint32_t i; 6054 emlxs_port_t *vport; 6055 6056 if (hba->ini_mode) { 6057 if ((void *)MODSYM(fc_fca_detach) != NULL) { 6058 MODSYM(fc_fca_detach)(hba->dip); 6059 } 6060 6061 hba->ini_mode = 0; 6062 6063 for (i = 0; i < MAX_VPORTS; i++) { 6064 vport = &VPORT(i); 6065 vport->ini_mode = 0; 6066 } 6067 } 6068 6069 return; 6070 6071 } /* emlxs_fca_detach() */ 6072 6073 6074 6075 static void 6076 emlxs_drv_banner(emlxs_hba_t *hba) 6077 { 6078 emlxs_port_t *port = &PPORT; 6079 uint32_t i; 6080 char sli_mode[16]; 6081 char msi_mode[16]; 6082 char npiv_mode[16]; 6083 emlxs_vpd_t *vpd = &VPD; 6084 emlxs_config_t *cfg = &CFG; 6085 uint8_t *wwpn; 6086 uint8_t *wwnn; 6087 uint32_t fw_show = 0; 6088 6089 /* Display firmware library one time for all driver instances */ 6090 mutex_enter(&emlxs_device.lock); 6091 if (! (emlxs_instance_flag & EMLXS_FW_SHOW)) { 6092 emlxs_instance_flag |= EMLXS_FW_SHOW; 6093 fw_show = 1; 6094 } 6095 mutex_exit(&emlxs_device.lock); 6096 6097 if (fw_show) { 6098 emlxs_fw_show(hba); 6099 } 6100 6101 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 6102 emlxs_revision); 6103 6104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6105 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 6106 hba->model_info.device_id, hba->model_info.ssdid, 6107 hba->model_info.id); 6108 6109 #ifdef EMLXS_I386 6110 6111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6112 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 6113 vpd->boot_version); 6114 6115 #else /* EMLXS_SPARC */ 6116 6117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6118 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 6119 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 6120 6121 #endif /* EMLXS_I386 */ 6122 6123 if (hba->sli_mode > 3) { 6124 (void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode, 6125 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP")); 6126 } else { 6127 (void) sprintf(sli_mode, "SLI:%d", hba->sli_mode); 6128 } 6129 6130 (void) strcpy(msi_mode, " INTX:1"); 6131 6132 #ifdef MSI_SUPPORT 6133 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 6134 switch (hba->intr_type) { 6135 case DDI_INTR_TYPE_FIXED: 6136 (void) strcpy(msi_mode, " MSI:0"); 6137 break; 6138 6139 case DDI_INTR_TYPE_MSI: 6140 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count); 6141 break; 6142 6143 case DDI_INTR_TYPE_MSIX: 6144 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count); 6145 break; 6146 } 6147 } 6148 #endif 6149 6150 (void) strcpy(npiv_mode, ""); 6151 6152 if (hba->flag & FC_NPIV_ENABLED) { 6153 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1); 6154 } else { 6155 (void) strcpy(npiv_mode, " NPIV:0"); 6156 } 6157 6158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s", 6159 sli_mode, msi_mode, npiv_mode, 6160 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":"")); 6161 6162 wwpn = (uint8_t *)&hba->wwpn; 6163 wwnn = (uint8_t *)&hba->wwnn; 6164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6165 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6166 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6167 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 6168 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 6169 wwnn[6], wwnn[7]); 6170 6171 for (i = 0; i < MAX_VPORTS; i++) { 6172 port = &VPORT(i); 6173 6174 if (!(port->flag & EMLXS_PORT_CONFIG)) { 6175 continue; 6176 } 6177 6178 wwpn = (uint8_t *)&port->wwpn; 6179 wwnn = (uint8_t *)&port->wwnn; 6180 6181 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6182 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6183 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6184 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 6185 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 6186 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 6187 } 6188 port = &PPORT; 6189 6190 /* 6191 * No dependency for Restricted login parameter. 6192 */ 6193 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) { 6194 port->flag |= EMLXS_PORT_RESTRICTED; 6195 } else { 6196 port->flag &= ~EMLXS_PORT_RESTRICTED; 6197 } 6198 6199 /* 6200 * Announce the device: ddi_report_dev() prints a banner at boot time, 6201 * announcing the device pointed to by dip. 6202 */ 6203 (void) ddi_report_dev(hba->dip); 6204 6205 return; 6206 6207 } /* emlxs_drv_banner() */ 6208 6209 6210 extern void 6211 emlxs_get_fcode_version(emlxs_hba_t *hba) 6212 { 6213 emlxs_vpd_t *vpd = &VPD; 6214 char *prop_str; 6215 int status; 6216 6217 /* Setup fcode version property */ 6218 prop_str = NULL; 6219 status = 6220 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 6221 "fcode-version", (char **)&prop_str); 6222 6223 if (status == DDI_PROP_SUCCESS) { 6224 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 6225 (void) ddi_prop_free((void *)prop_str); 6226 } else { 6227 (void) strcpy(vpd->fcode_version, "none"); 6228 } 6229 6230 return; 6231 6232 } /* emlxs_get_fcode_version() */ 6233 6234 6235 static int 6236 emlxs_hba_attach(dev_info_t *dip) 6237 { 6238 emlxs_hba_t *hba; 6239 emlxs_port_t *port; 6240 emlxs_config_t *cfg; 6241 char *prop_str; 6242 int ddiinst; 6243 int32_t emlxinst; 6244 int status; 6245 uint32_t rval; 6246 uint32_t init_flag = 0; 6247 char local_pm_components[32]; 6248 #ifdef EMLXS_I386 6249 uint32_t i; 6250 #endif /* EMLXS_I386 */ 6251 6252 ddiinst = ddi_get_instance(dip); 6253 emlxinst = emlxs_add_instance(ddiinst); 6254 6255 if (emlxinst >= MAX_FC_BRDS) { 6256 cmn_err(CE_WARN, 6257 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 6258 "inst=%x", DRIVER_NAME, ddiinst); 6259 return (DDI_FAILURE); 6260 } 6261 6262 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 6263 return (DDI_FAILURE); 6264 } 6265 6266 if (emlxs_device.hba[emlxinst]) { 6267 return (DDI_SUCCESS); 6268 } 6269 6270 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 6271 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 6272 cmn_err(CE_WARN, 6273 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 6274 DRIVER_NAME, ddiinst); 6275 return (DDI_FAILURE); 6276 } 6277 6278 /* Allocate emlxs_dev_ctl structure. */ 6279 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 6280 cmn_err(CE_WARN, 6281 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 6282 "state.", DRIVER_NAME, ddiinst); 6283 return (DDI_FAILURE); 6284 } 6285 init_flag |= ATTACH_SOFT_STATE; 6286 6287 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 6288 ddiinst)) == NULL) { 6289 cmn_err(CE_WARN, 6290 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 6291 DRIVER_NAME, ddiinst); 6292 goto failed; 6293 } 6294 bzero((char *)hba, sizeof (emlxs_hba_t)); 6295 6296 emlxs_device.hba[emlxinst] = hba; 6297 emlxs_device.log[emlxinst] = &hba->log; 6298 6299 #ifdef DUMP_SUPPORT 6300 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 6301 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 6302 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 6303 #endif /* DUMP_SUPPORT */ 6304 6305 hba->dip = dip; 6306 hba->emlxinst = emlxinst; 6307 hba->ddiinst = ddiinst; 6308 hba->ini_mode = 0; 6309 hba->tgt_mode = 0; 6310 6311 init_flag |= ATTACH_HBA; 6312 6313 /* Enable the physical port on this HBA */ 6314 port = &PPORT; 6315 port->hba = hba; 6316 port->vpi = 0; 6317 port->flag |= EMLXS_PORT_ENABLE; 6318 6319 /* Allocate a transport structure */ 6320 hba->fca_tran = 6321 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP); 6322 if (hba->fca_tran == NULL) { 6323 cmn_err(CE_WARN, 6324 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 6325 "memory.", DRIVER_NAME, ddiinst); 6326 goto failed; 6327 } 6328 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 6329 sizeof (fc_fca_tran_t)); 6330 6331 /* 6332 * Copy the global ddi_dma_attr to the local hba fields 6333 */ 6334 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr, 6335 sizeof (ddi_dma_attr_t)); 6336 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro, 6337 sizeof (ddi_dma_attr_t)); 6338 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg, 6339 sizeof (ddi_dma_attr_t)); 6340 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp, 6341 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t)); 6342 6343 /* Reset the fca_tran dma_attr fields to the per-hba copies */ 6344 hba->fca_tran->fca_dma_attr = &hba->dma_attr; 6345 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg; 6346 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg; 6347 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro; 6348 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg; 6349 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp; 6350 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg; 6351 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr; 6352 6353 /* Set the transport structure pointer in our dip */ 6354 /* SFS may panic if we are in target only mode */ 6355 /* We will update the transport structure later */ 6356 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 6357 init_flag |= ATTACH_FCA_TRAN; 6358 6359 /* Perform driver integrity check */ 6360 rval = emlxs_integrity_check(hba); 6361 if (rval) { 6362 cmn_err(CE_WARN, 6363 "?%s%d: fca_hba_attach failed. Driver integrity check " 6364 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 6365 goto failed; 6366 } 6367 6368 cfg = &CFG; 6369 6370 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 6371 #ifdef MSI_SUPPORT 6372 if ((void *)&ddi_intr_get_supported_types != NULL) { 6373 hba->intr_flags |= EMLXS_MSI_ENABLED; 6374 } 6375 #endif /* MSI_SUPPORT */ 6376 6377 6378 /* Create the msg log file */ 6379 if (emlxs_msg_log_create(hba) == 0) { 6380 cmn_err(CE_WARN, 6381 "?%s%d: fca_hba_attach failed. Unable to create message " 6382 "log", DRIVER_NAME, ddiinst); 6383 goto failed; 6384 6385 } 6386 init_flag |= ATTACH_LOG; 6387 6388 /* We can begin to use EMLXS_MSGF from this point on */ 6389 6390 /* 6391 * Find the I/O bus type If it is not a SBUS card, 6392 * then it is a PCI card. Default is PCI_FC (0). 6393 */ 6394 prop_str = NULL; 6395 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 6396 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 6397 6398 if (status == DDI_PROP_SUCCESS) { 6399 if (strncmp(prop_str, "lpfs", 4) == 0) { 6400 hba->bus_type = SBUS_FC; 6401 } 6402 6403 (void) ddi_prop_free((void *)prop_str); 6404 } 6405 6406 /* 6407 * Copy DDS from the config method and update configuration parameters 6408 */ 6409 (void) emlxs_get_props(hba); 6410 6411 #ifdef FMA_SUPPORT 6412 hba->fm_caps = cfg[CFG_FM_CAPS].current; 6413 6414 emlxs_fm_init(hba); 6415 6416 init_flag |= ATTACH_FM; 6417 #endif /* FMA_SUPPORT */ 6418 6419 if (emlxs_map_bus(hba)) { 6420 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6421 "Unable to map memory"); 6422 goto failed; 6423 6424 } 6425 init_flag |= ATTACH_MAP_BUS; 6426 6427 /* Attempt to identify the adapter */ 6428 rval = emlxs_init_adapter_info(hba); 6429 6430 if (rval == 0) { 6431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6432 "Unable to get adapter info. Id:%d Device id:0x%x " 6433 "Model:%s", hba->model_info.id, 6434 hba->model_info.device_id, hba->model_info.model); 6435 goto failed; 6436 } 6437 #define FILTER_ORACLE_BRANDED 6438 #ifdef FILTER_ORACLE_BRANDED 6439 6440 /* Sun-branded adapters are not supported */ 6441 if (hba->model_info.flags & EMLXS_SUN_BRANDED) { 6442 hba->model_info.flags |= EMLXS_NOT_SUPPORTED; 6443 } 6444 #endif /* FILTER_ORACLE_BRANDED */ 6445 6446 /* Check if adapter is not supported */ 6447 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 6448 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6449 "Unsupported adapter found. Id:%d Device id:0x%x " 6450 "SSDID:0x%x Model:%s", hba->model_info.id, 6451 hba->model_info.device_id, 6452 hba->model_info.ssdid, hba->model_info.model); 6453 goto failed; 6454 } 6455 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 6456 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE; 6457 #ifdef EMLXS_I386 6458 /* 6459 * TigerShark has 64K limit for SG element size 6460 * Do this for x86 alone. For SPARC, the driver 6461 * breaks up the single SGE later on. 6462 */ 6463 hba->dma_attr_ro.dma_attr_count_max = 0xffff; 6464 6465 i = cfg[CFG_MAX_XFER_SIZE].current; 6466 /* Update SGL size based on max_xfer_size */ 6467 if (i > 688128) { 6468 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6469 hba->sli.sli4.mem_sgl_size = 4096; 6470 } else if (i > 339968) { 6471 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6472 hba->sli.sli4.mem_sgl_size = 2048; 6473 } else { 6474 hba->sli.sli4.mem_sgl_size = 1024; 6475 } 6476 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size); 6477 #endif /* EMLXS_I386 */ 6478 } else { 6479 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE; 6480 #ifdef EMLXS_I386 6481 i = cfg[CFG_MAX_XFER_SIZE].current; 6482 /* Update BPL size based on max_xfer_size */ 6483 if (i > 688128) { 6484 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6485 hba->sli.sli3.mem_bpl_size = 4096; 6486 } else if (i > 339968) { 6487 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6488 hba->sli.sli3.mem_bpl_size = 2048; 6489 } else { 6490 hba->sli.sli3.mem_bpl_size = 1024; 6491 } 6492 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size); 6493 #endif /* EMLXS_I386 */ 6494 } 6495 6496 #ifdef EMLXS_I386 6497 /* Update dma_attr_sgllen based on BPL size */ 6498 hba->dma_attr.dma_attr_sgllen = i; 6499 hba->dma_attr_ro.dma_attr_sgllen = i; 6500 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i; 6501 #endif /* EMLXS_I386 */ 6502 6503 if (EMLXS_SLI_MAP_HDW(hba)) { 6504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6505 "Unable to map memory"); 6506 goto failed; 6507 6508 } 6509 init_flag |= ATTACH_MAP_SLI; 6510 6511 /* Initialize the interrupts. But don't add them yet */ 6512 status = EMLXS_INTR_INIT(hba, 0); 6513 if (status != DDI_SUCCESS) { 6514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6515 "Unable to initalize interrupt(s)."); 6516 goto failed; 6517 6518 } 6519 init_flag |= ATTACH_INTR_INIT; 6520 6521 /* Initialize LOCKs */ 6522 emlxs_msg_lock_reinit(hba); 6523 emlxs_lock_init(hba); 6524 init_flag |= ATTACH_LOCK; 6525 6526 /* Create the event queue */ 6527 if (emlxs_event_queue_create(hba) == 0) { 6528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6529 "Unable to create event queue"); 6530 6531 goto failed; 6532 6533 } 6534 init_flag |= ATTACH_EVENTS; 6535 6536 /* Initialize the power management */ 6537 mutex_enter(&EMLXS_PM_LOCK); 6538 hba->pm_state = EMLXS_PM_IN_ATTACH; 6539 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 6540 hba->pm_busy = 0; 6541 #ifdef IDLE_TIMER 6542 hba->pm_active = 1; 6543 hba->pm_idle_timer = 0; 6544 #endif /* IDLE_TIMER */ 6545 mutex_exit(&EMLXS_PM_LOCK); 6546 6547 /* Set the pm component name */ 6548 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME, 6549 ddiinst); 6550 emlxs_pm_components[0] = local_pm_components; 6551 6552 /* Check if power management support is enabled */ 6553 if (cfg[CFG_PM_SUPPORT].current) { 6554 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 6555 "pm-components", emlxs_pm_components, 6556 sizeof (emlxs_pm_components) / 6557 sizeof (emlxs_pm_components[0])) != 6558 DDI_PROP_SUCCESS) { 6559 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6560 "Unable to create pm components."); 6561 goto failed; 6562 } 6563 } 6564 6565 /* Needed for suspend and resume support */ 6566 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 6567 "needs-suspend-resume"); 6568 init_flag |= ATTACH_PROP; 6569 6570 emlxs_thread_spawn_create(hba); 6571 init_flag |= ATTACH_SPAWN; 6572 6573 emlxs_thread_create(hba, &hba->iodone_thread); 6574 6575 init_flag |= ATTACH_THREAD; 6576 6577 /* Setup initiator / target ports */ 6578 emlxs_set_mode(hba); 6579 6580 /* If driver did not attach to either stack, */ 6581 /* then driver attach failed */ 6582 if (!hba->tgt_mode && !hba->ini_mode) { 6583 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6584 "Driver interfaces not enabled."); 6585 goto failed; 6586 } 6587 6588 /* 6589 * Initialize HBA 6590 */ 6591 6592 /* Set initial state */ 6593 mutex_enter(&EMLXS_PORT_LOCK); 6594 emlxs_diag_state = DDI_OFFDI; 6595 hba->flag |= FC_OFFLINE_MODE; 6596 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 6597 mutex_exit(&EMLXS_PORT_LOCK); 6598 6599 if (status = emlxs_online(hba)) { 6600 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6601 "Unable to initialize adapter."); 6602 goto failed; 6603 } 6604 init_flag |= ATTACH_ONLINE; 6605 6606 /* This is to ensure that the model property is properly set */ 6607 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 6608 hba->model_info.model); 6609 6610 /* Create the device node. */ 6611 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 6612 DDI_FAILURE) { 6613 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6614 "Unable to create device node."); 6615 goto failed; 6616 } 6617 init_flag |= ATTACH_NODE; 6618 6619 /* Attach initiator now */ 6620 /* This must come after emlxs_online() */ 6621 emlxs_fca_attach(hba); 6622 init_flag |= ATTACH_FCA; 6623 6624 /* Initialize kstat information */ 6625 hba->kstat = kstat_create(DRIVER_NAME, 6626 ddiinst, "statistics", "controller", 6627 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 6628 KSTAT_FLAG_VIRTUAL); 6629 6630 if (hba->kstat == NULL) { 6631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6632 "kstat_create failed."); 6633 } else { 6634 hba->kstat->ks_data = (void *)&hba->stats; 6635 kstat_install(hba->kstat); 6636 init_flag |= ATTACH_KSTAT; 6637 } 6638 6639 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 6640 /* Setup virtual port properties */ 6641 emlxs_read_vport_prop(hba); 6642 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 6643 6644 6645 #ifdef DHCHAP_SUPPORT 6646 emlxs_dhc_attach(hba); 6647 init_flag |= ATTACH_DHCHAP; 6648 #endif /* DHCHAP_SUPPORT */ 6649 6650 /* Display the driver banner now */ 6651 emlxs_drv_banner(hba); 6652 6653 /* Raise the power level */ 6654 6655 /* 6656 * This will not execute emlxs_hba_resume because 6657 * EMLXS_PM_IN_ATTACH is set 6658 */ 6659 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 6660 /* Set power up anyway. This should not happen! */ 6661 mutex_enter(&EMLXS_PM_LOCK); 6662 hba->pm_level = EMLXS_PM_ADAPTER_UP; 6663 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6664 mutex_exit(&EMLXS_PM_LOCK); 6665 } else { 6666 mutex_enter(&EMLXS_PM_LOCK); 6667 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6668 mutex_exit(&EMLXS_PM_LOCK); 6669 } 6670 6671 #ifdef SFCT_SUPPORT 6672 /* Do this last */ 6673 emlxs_fct_attach(hba); 6674 init_flag |= ATTACH_FCT; 6675 #endif /* SFCT_SUPPORT */ 6676 6677 return (DDI_SUCCESS); 6678 6679 failed: 6680 6681 emlxs_driver_remove(dip, init_flag, 1); 6682 6683 return (DDI_FAILURE); 6684 6685 } /* emlxs_hba_attach() */ 6686 6687 6688 static int 6689 emlxs_hba_detach(dev_info_t *dip) 6690 { 6691 emlxs_hba_t *hba; 6692 emlxs_port_t *port; 6693 int ddiinst; 6694 int count; 6695 uint32_t init_flag = (uint32_t)-1; 6696 6697 ddiinst = ddi_get_instance(dip); 6698 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6699 port = &PPORT; 6700 6701 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 6702 6703 mutex_enter(&EMLXS_PM_LOCK); 6704 hba->pm_state |= EMLXS_PM_IN_DETACH; 6705 mutex_exit(&EMLXS_PM_LOCK); 6706 6707 /* Lower the power level */ 6708 /* 6709 * This will not suspend the driver since the 6710 * EMLXS_PM_IN_DETACH has been set 6711 */ 6712 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 6713 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6714 "Unable to lower power."); 6715 6716 mutex_enter(&EMLXS_PM_LOCK); 6717 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6718 mutex_exit(&EMLXS_PM_LOCK); 6719 6720 return (DDI_FAILURE); 6721 } 6722 6723 /* Take the adapter offline first, if not already */ 6724 if (emlxs_offline(hba) != 0) { 6725 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6726 "Unable to take adapter offline."); 6727 6728 mutex_enter(&EMLXS_PM_LOCK); 6729 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6730 mutex_exit(&EMLXS_PM_LOCK); 6731 6732 (void) emlxs_pm_raise_power(dip); 6733 6734 return (DDI_FAILURE); 6735 } 6736 /* Check ub buffer pools */ 6737 if (port->ub_pool) { 6738 mutex_enter(&EMLXS_UB_LOCK); 6739 6740 /* Wait up to 10 seconds for all ub pools to be freed */ 6741 count = 10 * 2; 6742 while (port->ub_pool && count) { 6743 mutex_exit(&EMLXS_UB_LOCK); 6744 delay(drv_usectohz(500000)); /* half second wait */ 6745 count--; 6746 mutex_enter(&EMLXS_UB_LOCK); 6747 } 6748 6749 if (port->ub_pool) { 6750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6751 "fca_unbind_port: Unsolicited buffers still " 6752 "active. port=%p. Destroying...", port); 6753 6754 /* Destroy all pools */ 6755 while (port->ub_pool) { 6756 emlxs_ub_destroy(port, port->ub_pool); 6757 } 6758 } 6759 6760 mutex_exit(&EMLXS_UB_LOCK); 6761 } 6762 init_flag &= ~ATTACH_ONLINE; 6763 6764 /* Remove the driver instance */ 6765 emlxs_driver_remove(dip, init_flag, 0); 6766 6767 return (DDI_SUCCESS); 6768 6769 } /* emlxs_hba_detach() */ 6770 6771 6772 extern int 6773 emlxs_map_bus(emlxs_hba_t *hba) 6774 { 6775 emlxs_port_t *port = &PPORT; 6776 dev_info_t *dip; 6777 ddi_device_acc_attr_t dev_attr; 6778 int status; 6779 6780 dip = (dev_info_t *)hba->dip; 6781 dev_attr = emlxs_dev_acc_attr; 6782 6783 if (hba->bus_type == SBUS_FC) { 6784 if (hba->pci_acc_handle == 0) { 6785 status = ddi_regs_map_setup(dip, 6786 SBUS_DFLY_PCI_CFG_RINDEX, 6787 (caddr_t *)&hba->pci_addr, 6788 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6789 if (status != DDI_SUCCESS) { 6790 EMLXS_MSGF(EMLXS_CONTEXT, 6791 &emlxs_attach_failed_msg, 6792 "(SBUS) ddi_regs_map_setup PCI failed. " 6793 "status=%x", status); 6794 goto failed; 6795 } 6796 } 6797 6798 if (hba->sbus_pci_handle == 0) { 6799 status = ddi_regs_map_setup(dip, 6800 SBUS_TITAN_PCI_CFG_RINDEX, 6801 (caddr_t *)&hba->sbus_pci_addr, 6802 0, 0, &dev_attr, &hba->sbus_pci_handle); 6803 if (status != DDI_SUCCESS) { 6804 EMLXS_MSGF(EMLXS_CONTEXT, 6805 &emlxs_attach_failed_msg, 6806 "(SBUS) ddi_regs_map_setup TITAN PCI " 6807 "failed. status=%x", status); 6808 goto failed; 6809 } 6810 } 6811 6812 } else { /* ****** PCI ****** */ 6813 6814 if (hba->pci_acc_handle == 0) { 6815 status = ddi_regs_map_setup(dip, 6816 PCI_CFG_RINDEX, 6817 (caddr_t *)&hba->pci_addr, 6818 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6819 if (status != DDI_SUCCESS) { 6820 EMLXS_MSGF(EMLXS_CONTEXT, 6821 &emlxs_attach_failed_msg, 6822 "(PCI) ddi_regs_map_setup PCI failed. " 6823 "status=%x", status); 6824 goto failed; 6825 } 6826 } 6827 #ifdef EMLXS_I386 6828 /* Setting up PCI configure space */ 6829 (void) ddi_put16(hba->pci_acc_handle, 6830 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 6831 CMD_CFG_VALUE | CMD_IO_ENBL); 6832 6833 #ifdef FMA_SUPPORT 6834 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 6835 != DDI_FM_OK) { 6836 EMLXS_MSGF(EMLXS_CONTEXT, 6837 &emlxs_invalid_access_handle_msg, NULL); 6838 goto failed; 6839 } 6840 #endif /* FMA_SUPPORT */ 6841 6842 #endif /* EMLXS_I386 */ 6843 6844 } 6845 return (0); 6846 6847 failed: 6848 6849 emlxs_unmap_bus(hba); 6850 return (ENOMEM); 6851 6852 } /* emlxs_map_bus() */ 6853 6854 6855 extern void 6856 emlxs_unmap_bus(emlxs_hba_t *hba) 6857 { 6858 if (hba->pci_acc_handle) { 6859 (void) ddi_regs_map_free(&hba->pci_acc_handle); 6860 hba->pci_acc_handle = 0; 6861 } 6862 6863 if (hba->sbus_pci_handle) { 6864 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 6865 hba->sbus_pci_handle = 0; 6866 } 6867 6868 return; 6869 6870 } /* emlxs_unmap_bus() */ 6871 6872 6873 static int 6874 emlxs_get_props(emlxs_hba_t *hba) 6875 { 6876 emlxs_config_t *cfg; 6877 uint32_t i; 6878 char string[256]; 6879 uint32_t new_value; 6880 6881 /* Initialize each parameter */ 6882 for (i = 0; i < NUM_CFG_PARAM; i++) { 6883 cfg = &hba->config[i]; 6884 6885 /* Ensure strings are terminated */ 6886 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 6887 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 6888 6889 /* Set the current value to the default value */ 6890 new_value = cfg->def; 6891 6892 /* First check for the global setting */ 6893 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6894 (void *)hba->dip, DDI_PROP_DONTPASS, 6895 cfg->string, new_value); 6896 6897 /* Now check for the per adapter ddiinst setting */ 6898 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst, 6899 cfg->string); 6900 6901 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6902 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 6903 6904 /* Now check the parameter */ 6905 cfg->current = emlxs_check_parm(hba, i, new_value); 6906 } 6907 6908 return (0); 6909 6910 } /* emlxs_get_props() */ 6911 6912 6913 extern uint32_t 6914 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6915 { 6916 emlxs_port_t *port = &PPORT; 6917 uint32_t i; 6918 emlxs_config_t *cfg; 6919 emlxs_vpd_t *vpd = &VPD; 6920 6921 if (index > NUM_CFG_PARAM) { 6922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6923 "emlxs_check_parm failed. Invalid index = %d", index); 6924 6925 return (new_value); 6926 } 6927 6928 cfg = &hba->config[index]; 6929 6930 if (new_value > cfg->hi) { 6931 new_value = cfg->def; 6932 } else if (new_value < cfg->low) { 6933 new_value = cfg->def; 6934 } 6935 6936 /* Perform additional checks */ 6937 switch (index) { 6938 case CFG_NPIV_ENABLE: 6939 if (hba->tgt_mode) { 6940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6941 "enable-npiv: Not supported in target mode. " 6942 "Disabling."); 6943 6944 new_value = 0; 6945 } 6946 break; 6947 6948 #ifdef DHCHAP_SUPPORT 6949 case CFG_AUTH_ENABLE: 6950 if (hba->tgt_mode) { 6951 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6952 "enable-auth: Not supported in target mode. " 6953 "Disabling."); 6954 6955 new_value = 0; 6956 } 6957 break; 6958 #endif /* DHCHAP_SUPPORT */ 6959 6960 case CFG_NUM_NODES: 6961 switch (new_value) { 6962 case 1: 6963 case 2: 6964 /* Must have at least 3 if not 0 */ 6965 return (3); 6966 6967 default: 6968 break; 6969 } 6970 break; 6971 6972 case CFG_FW_CHECK: 6973 /* The 0x2 bit implies the 0x1 bit will also be set */ 6974 if (new_value & 0x2) { 6975 new_value |= 0x1; 6976 } 6977 6978 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */ 6979 if (!(new_value & 0x3) && (new_value & 0x4)) { 6980 new_value &= ~0x4; 6981 } 6982 break; 6983 6984 case CFG_LINK_SPEED: 6985 if (vpd->link_speed) { 6986 switch (new_value) { 6987 case 0: 6988 break; 6989 6990 case 1: 6991 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 6992 new_value = 0; 6993 6994 EMLXS_MSGF(EMLXS_CONTEXT, 6995 &emlxs_init_msg, 6996 "link-speed: 1Gb not supported " 6997 "by adapter. Switching to auto " 6998 "detect."); 6999 } 7000 break; 7001 7002 case 2: 7003 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 7004 new_value = 0; 7005 7006 EMLXS_MSGF(EMLXS_CONTEXT, 7007 &emlxs_init_msg, 7008 "link-speed: 2Gb not supported " 7009 "by adapter. Switching to auto " 7010 "detect."); 7011 } 7012 break; 7013 case 4: 7014 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 7015 new_value = 0; 7016 7017 EMLXS_MSGF(EMLXS_CONTEXT, 7018 &emlxs_init_msg, 7019 "link-speed: 4Gb not supported " 7020 "by adapter. Switching to auto " 7021 "detect."); 7022 } 7023 break; 7024 7025 case 8: 7026 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 7027 new_value = 0; 7028 7029 EMLXS_MSGF(EMLXS_CONTEXT, 7030 &emlxs_init_msg, 7031 "link-speed: 8Gb not supported " 7032 "by adapter. Switching to auto " 7033 "detect."); 7034 } 7035 break; 7036 7037 case 10: 7038 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 7039 new_value = 0; 7040 7041 EMLXS_MSGF(EMLXS_CONTEXT, 7042 &emlxs_init_msg, 7043 "link-speed: 10Gb not supported " 7044 "by adapter. Switching to auto " 7045 "detect."); 7046 } 7047 break; 7048 7049 default: 7050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7051 "link-speed: Invalid value=%d provided. " 7052 "Switching to auto detect.", 7053 new_value); 7054 7055 new_value = 0; 7056 } 7057 } else { /* Perform basic validity check */ 7058 7059 /* Perform additional check on link speed */ 7060 switch (new_value) { 7061 case 0: 7062 case 1: 7063 case 2: 7064 case 4: 7065 case 8: 7066 case 10: 7067 /* link-speed is a valid choice */ 7068 break; 7069 7070 default: 7071 new_value = cfg->def; 7072 } 7073 } 7074 break; 7075 7076 case CFG_TOPOLOGY: 7077 /* Perform additional check on topology */ 7078 switch (new_value) { 7079 case 0: 7080 case 2: 7081 case 4: 7082 case 6: 7083 /* topology is a valid choice */ 7084 break; 7085 7086 default: 7087 return (cfg->def); 7088 } 7089 break; 7090 7091 #ifdef DHCHAP_SUPPORT 7092 case CFG_AUTH_TYPE: 7093 { 7094 uint32_t shift; 7095 uint32_t mask; 7096 7097 /* Perform additional check on auth type */ 7098 shift = 12; 7099 mask = 0xF000; 7100 for (i = 0; i < 4; i++) { 7101 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 7102 return (cfg->def); 7103 } 7104 7105 shift -= 4; 7106 mask >>= 4; 7107 } 7108 break; 7109 } 7110 7111 case CFG_AUTH_HASH: 7112 { 7113 uint32_t shift; 7114 uint32_t mask; 7115 7116 /* Perform additional check on auth hash */ 7117 shift = 12; 7118 mask = 0xF000; 7119 for (i = 0; i < 4; i++) { 7120 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 7121 return (cfg->def); 7122 } 7123 7124 shift -= 4; 7125 mask >>= 4; 7126 } 7127 break; 7128 } 7129 7130 case CFG_AUTH_GROUP: 7131 { 7132 uint32_t shift; 7133 uint32_t mask; 7134 7135 /* Perform additional check on auth group */ 7136 shift = 28; 7137 mask = 0xF0000000; 7138 for (i = 0; i < 8; i++) { 7139 if (((new_value & mask) >> shift) > 7140 DFC_AUTH_GROUP_MAX) { 7141 return (cfg->def); 7142 } 7143 7144 shift -= 4; 7145 mask >>= 4; 7146 } 7147 break; 7148 } 7149 7150 case CFG_AUTH_INTERVAL: 7151 if (new_value < 10) { 7152 return (10); 7153 } 7154 break; 7155 7156 7157 #endif /* DHCHAP_SUPPORT */ 7158 7159 } /* switch */ 7160 7161 return (new_value); 7162 7163 } /* emlxs_check_parm() */ 7164 7165 7166 extern uint32_t 7167 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 7168 { 7169 emlxs_port_t *port = &PPORT; 7170 emlxs_port_t *vport; 7171 uint32_t vpi; 7172 emlxs_config_t *cfg; 7173 uint32_t old_value; 7174 7175 if (index > NUM_CFG_PARAM) { 7176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7177 "emlxs_set_parm failed. Invalid index = %d", index); 7178 7179 return ((uint32_t)FC_FAILURE); 7180 } 7181 7182 cfg = &hba->config[index]; 7183 7184 if (!(cfg->flags & PARM_DYNAMIC)) { 7185 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7186 "emlxs_set_parm failed. %s is not dynamic.", cfg->string); 7187 7188 return ((uint32_t)FC_FAILURE); 7189 } 7190 7191 /* Check new value */ 7192 old_value = new_value; 7193 new_value = emlxs_check_parm(hba, index, new_value); 7194 7195 if (old_value != new_value) { 7196 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7197 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x", 7198 cfg->string, old_value, new_value); 7199 } 7200 7201 /* Return now if no actual change */ 7202 if (new_value == cfg->current) { 7203 return (FC_SUCCESS); 7204 } 7205 7206 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7207 "emlxs_set_parm: %s changing. 0x%x --> 0x%x", 7208 cfg->string, cfg->current, new_value); 7209 7210 old_value = cfg->current; 7211 cfg->current = new_value; 7212 7213 /* React to change if needed */ 7214 switch (index) { 7215 7216 case CFG_PCI_MAX_READ: 7217 /* Update MXR */ 7218 emlxs_pcix_mxr_update(hba, 1); 7219 break; 7220 7221 case CFG_SLI_MODE: 7222 /* Check SLI mode */ 7223 if ((hba->sli_mode == 3) && (new_value == 2)) { 7224 /* All vports must be disabled first */ 7225 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 7226 vport = &VPORT(vpi); 7227 7228 if (vport->flag & EMLXS_PORT_ENABLE) { 7229 /* Reset current value */ 7230 cfg->current = old_value; 7231 7232 EMLXS_MSGF(EMLXS_CONTEXT, 7233 &emlxs_sfs_debug_msg, 7234 "emlxs_set_parm failed. %s: vpi=%d " 7235 "still enabled. Value restored to " 7236 "0x%x.", cfg->string, vpi, 7237 old_value); 7238 7239 return (2); 7240 } 7241 } 7242 } 7243 break; 7244 7245 case CFG_NPIV_ENABLE: 7246 /* Check if NPIV is being disabled */ 7247 if ((old_value == 1) && (new_value == 0)) { 7248 /* All vports must be disabled first */ 7249 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 7250 vport = &VPORT(vpi); 7251 7252 if (vport->flag & EMLXS_PORT_ENABLE) { 7253 /* Reset current value */ 7254 cfg->current = old_value; 7255 7256 EMLXS_MSGF(EMLXS_CONTEXT, 7257 &emlxs_sfs_debug_msg, 7258 "emlxs_set_parm failed. %s: vpi=%d " 7259 "still enabled. Value restored to " 7260 "0x%x.", cfg->string, vpi, 7261 old_value); 7262 7263 return (2); 7264 } 7265 } 7266 } 7267 7268 /* Trigger adapter reset */ 7269 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 7270 7271 break; 7272 7273 7274 case CFG_VPORT_RESTRICTED: 7275 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 7276 vport = &VPORT(vpi); 7277 7278 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 7279 continue; 7280 } 7281 7282 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 7283 continue; 7284 } 7285 7286 if (new_value) { 7287 vport->flag |= EMLXS_PORT_RESTRICTED; 7288 } else { 7289 vport->flag &= ~EMLXS_PORT_RESTRICTED; 7290 } 7291 } 7292 7293 break; 7294 7295 #ifdef DHCHAP_SUPPORT 7296 case CFG_AUTH_ENABLE: 7297 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 7298 break; 7299 7300 case CFG_AUTH_TMO: 7301 hba->auth_cfg.authentication_timeout = cfg->current; 7302 break; 7303 7304 case CFG_AUTH_MODE: 7305 hba->auth_cfg.authentication_mode = cfg->current; 7306 break; 7307 7308 case CFG_AUTH_BIDIR: 7309 hba->auth_cfg.bidirectional = cfg->current; 7310 break; 7311 7312 case CFG_AUTH_TYPE: 7313 hba->auth_cfg.authentication_type_priority[0] = 7314 (cfg->current & 0xF000) >> 12; 7315 hba->auth_cfg.authentication_type_priority[1] = 7316 (cfg->current & 0x0F00) >> 8; 7317 hba->auth_cfg.authentication_type_priority[2] = 7318 (cfg->current & 0x00F0) >> 4; 7319 hba->auth_cfg.authentication_type_priority[3] = 7320 (cfg->current & 0x000F); 7321 break; 7322 7323 case CFG_AUTH_HASH: 7324 hba->auth_cfg.hash_priority[0] = 7325 (cfg->current & 0xF000) >> 12; 7326 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 7327 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 7328 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 7329 break; 7330 7331 case CFG_AUTH_GROUP: 7332 hba->auth_cfg.dh_group_priority[0] = 7333 (cfg->current & 0xF0000000) >> 28; 7334 hba->auth_cfg.dh_group_priority[1] = 7335 (cfg->current & 0x0F000000) >> 24; 7336 hba->auth_cfg.dh_group_priority[2] = 7337 (cfg->current & 0x00F00000) >> 20; 7338 hba->auth_cfg.dh_group_priority[3] = 7339 (cfg->current & 0x000F0000) >> 16; 7340 hba->auth_cfg.dh_group_priority[4] = 7341 (cfg->current & 0x0000F000) >> 12; 7342 hba->auth_cfg.dh_group_priority[5] = 7343 (cfg->current & 0x00000F00) >> 8; 7344 hba->auth_cfg.dh_group_priority[6] = 7345 (cfg->current & 0x000000F0) >> 4; 7346 hba->auth_cfg.dh_group_priority[7] = 7347 (cfg->current & 0x0000000F); 7348 break; 7349 7350 case CFG_AUTH_INTERVAL: 7351 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 7352 break; 7353 #endif /* DHCHAP_SUPPORT */ 7354 7355 } 7356 7357 return (FC_SUCCESS); 7358 7359 } /* emlxs_set_parm() */ 7360 7361 7362 /* 7363 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 7364 * 7365 * The buf_info->flags field describes the memory operation requested. 7366 * 7367 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 7368 * Virtual address is supplied in buf_info->virt 7369 * DMA mapping flag is in buf_info->align 7370 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 7371 * The mapped physical address is returned buf_info->phys 7372 * 7373 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 7374 * if FC_MBUF_DMA is set the memory is also mapped for DMA 7375 * The byte alignment of the memory request is supplied in buf_info->align 7376 * The byte size of the memory request is supplied in buf_info->size 7377 * The virtual address is returned buf_info->virt 7378 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 7379 */ 7380 extern uint8_t * 7381 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7382 { 7383 emlxs_port_t *port = &PPORT; 7384 ddi_dma_attr_t dma_attr; 7385 ddi_device_acc_attr_t dev_attr; 7386 uint_t cookie_count; 7387 size_t dma_reallen; 7388 ddi_dma_cookie_t dma_cookie; 7389 uint_t dma_flag; 7390 int status; 7391 7392 dma_attr = hba->dma_attr_1sg; 7393 dev_attr = emlxs_data_acc_attr; 7394 7395 if (buf_info->flags & FC_MBUF_SNGLSG) { 7396 dma_attr.dma_attr_sgllen = 1; 7397 } 7398 7399 if (buf_info->flags & FC_MBUF_DMA32) { 7400 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff; 7401 } 7402 7403 if (buf_info->flags & FC_MBUF_PHYSONLY) { 7404 7405 if (buf_info->virt == NULL) { 7406 goto done; 7407 } 7408 7409 /* 7410 * Allocate the DMA handle for this DMA object 7411 */ 7412 status = ddi_dma_alloc_handle((void *)hba->dip, 7413 &dma_attr, DDI_DMA_DONTWAIT, 7414 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 7415 if (status != DDI_SUCCESS) { 7416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7417 "ddi_dma_alloc_handle failed: size=%x align=%x " 7418 "flags=%x", buf_info->size, buf_info->align, 7419 buf_info->flags); 7420 7421 buf_info->phys = 0; 7422 buf_info->dma_handle = 0; 7423 goto done; 7424 } 7425 7426 switch (buf_info->align) { 7427 case DMA_READ_WRITE: 7428 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 7429 break; 7430 case DMA_READ_ONLY: 7431 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 7432 break; 7433 case DMA_WRITE_ONLY: 7434 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 7435 break; 7436 } 7437 7438 /* Map this page of memory */ 7439 status = ddi_dma_addr_bind_handle( 7440 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7441 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7442 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie, 7443 &cookie_count); 7444 7445 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7446 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7447 "ddi_dma_addr_bind_handle failed: status=%x " 7448 "count=%x flags=%x", status, cookie_count, 7449 buf_info->flags); 7450 7451 (void) ddi_dma_free_handle( 7452 (ddi_dma_handle_t *)&buf_info->dma_handle); 7453 buf_info->phys = 0; 7454 buf_info->dma_handle = 0; 7455 goto done; 7456 } 7457 7458 if (hba->bus_type == SBUS_FC) { 7459 7460 int32_t burstsizes_limit = 0xff; 7461 int32_t ret_burst; 7462 7463 ret_burst = ddi_dma_burstsizes( 7464 buf_info->dma_handle) & burstsizes_limit; 7465 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7466 ret_burst) == DDI_FAILURE) { 7467 EMLXS_MSGF(EMLXS_CONTEXT, 7468 &emlxs_mem_alloc_failed_msg, 7469 "ddi_dma_set_sbus64 failed."); 7470 } 7471 } 7472 7473 /* Save Physical address */ 7474 buf_info->phys = dma_cookie.dmac_laddress; 7475 7476 /* 7477 * Just to be sure, let's add this 7478 */ 7479 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 7480 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7481 7482 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) { 7483 7484 dma_attr.dma_attr_align = buf_info->align; 7485 7486 /* 7487 * Allocate the DMA handle for this DMA object 7488 */ 7489 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 7490 DDI_DMA_DONTWAIT, NULL, 7491 (ddi_dma_handle_t *)&buf_info->dma_handle); 7492 if (status != DDI_SUCCESS) { 7493 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7494 "ddi_dma_alloc_handle failed: size=%x align=%x " 7495 "flags=%x", buf_info->size, buf_info->align, 7496 buf_info->flags); 7497 7498 buf_info->virt = NULL; 7499 buf_info->phys = 0; 7500 buf_info->data_handle = 0; 7501 buf_info->dma_handle = 0; 7502 goto done; 7503 } 7504 7505 status = ddi_dma_mem_alloc( 7506 (ddi_dma_handle_t)buf_info->dma_handle, 7507 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 7508 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt, 7509 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 7510 7511 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 7512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7513 "ddi_dma_mem_alloc failed: size=%x align=%x " 7514 "flags=%x", buf_info->size, buf_info->align, 7515 buf_info->flags); 7516 7517 (void) ddi_dma_free_handle( 7518 (ddi_dma_handle_t *)&buf_info->dma_handle); 7519 7520 buf_info->virt = NULL; 7521 buf_info->phys = 0; 7522 buf_info->data_handle = 0; 7523 buf_info->dma_handle = 0; 7524 goto done; 7525 } 7526 7527 /* Map this page of memory */ 7528 status = ddi_dma_addr_bind_handle( 7529 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7530 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7531 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 7532 &dma_cookie, &cookie_count); 7533 7534 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7536 "ddi_dma_addr_bind_handle failed: status=%x " 7537 "count=%d size=%x align=%x flags=%x", status, 7538 cookie_count, buf_info->size, buf_info->align, 7539 buf_info->flags); 7540 7541 (void) ddi_dma_mem_free( 7542 (ddi_acc_handle_t *)&buf_info->data_handle); 7543 (void) ddi_dma_free_handle( 7544 (ddi_dma_handle_t *)&buf_info->dma_handle); 7545 7546 buf_info->virt = NULL; 7547 buf_info->phys = 0; 7548 buf_info->dma_handle = 0; 7549 buf_info->data_handle = 0; 7550 goto done; 7551 } 7552 7553 if (hba->bus_type == SBUS_FC) { 7554 int32_t burstsizes_limit = 0xff; 7555 int32_t ret_burst; 7556 7557 ret_burst = 7558 ddi_dma_burstsizes(buf_info-> 7559 dma_handle) & burstsizes_limit; 7560 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7561 ret_burst) == DDI_FAILURE) { 7562 EMLXS_MSGF(EMLXS_CONTEXT, 7563 &emlxs_mem_alloc_failed_msg, 7564 "ddi_dma_set_sbus64 failed."); 7565 } 7566 } 7567 7568 /* Save Physical address */ 7569 buf_info->phys = dma_cookie.dmac_laddress; 7570 7571 /* Just to be sure, let's add this */ 7572 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 7573 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7574 7575 } else { /* allocate virtual memory */ 7576 7577 buf_info->virt = 7578 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP); 7579 buf_info->phys = 0; 7580 buf_info->data_handle = 0; 7581 buf_info->dma_handle = 0; 7582 7583 if (buf_info->virt == (uint32_t *)0) { 7584 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7585 "size=%x flags=%x", buf_info->size, 7586 buf_info->flags); 7587 } 7588 7589 } 7590 7591 done: 7592 7593 return ((uint8_t *)buf_info->virt); 7594 7595 } /* emlxs_mem_alloc() */ 7596 7597 7598 7599 /* 7600 * emlxs_mem_free: 7601 * 7602 * OS specific routine for memory de-allocation / unmapping 7603 * 7604 * The buf_info->flags field describes the memory operation requested. 7605 * 7606 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 7607 * for DMA, but not freed. The mapped physical address to be unmapped is in 7608 * buf_info->phys 7609 * 7610 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 7611 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 7612 * buf_info->phys. The virtual address to be freed is in buf_info->virt 7613 */ 7614 /*ARGSUSED*/ 7615 extern void 7616 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7617 { 7618 if (buf_info->flags & FC_MBUF_PHYSONLY) { 7619 7620 if (buf_info->dma_handle) { 7621 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7622 (void) ddi_dma_free_handle( 7623 (ddi_dma_handle_t *)&buf_info->dma_handle); 7624 buf_info->dma_handle = NULL; 7625 } 7626 7627 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) { 7628 7629 if (buf_info->dma_handle) { 7630 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7631 (void) ddi_dma_mem_free( 7632 (ddi_acc_handle_t *)&buf_info->data_handle); 7633 (void) ddi_dma_free_handle( 7634 (ddi_dma_handle_t *)&buf_info->dma_handle); 7635 buf_info->dma_handle = NULL; 7636 buf_info->data_handle = NULL; 7637 } 7638 7639 } else { /* allocate virtual memory */ 7640 7641 if (buf_info->virt) { 7642 kmem_free(buf_info->virt, (size_t)buf_info->size); 7643 buf_info->virt = NULL; 7644 } 7645 } 7646 7647 } /* emlxs_mem_free() */ 7648 7649 7650 static int 7651 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset) 7652 { 7653 int channel; 7654 int msi_id; 7655 7656 7657 /* IO to FCP2 device or a device reset always use fcp channel */ 7658 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) { 7659 return (hba->channel_fcp); 7660 } 7661 7662 7663 msi_id = emlxs_select_msiid(hba); 7664 channel = emlxs_msiid_to_chan(hba, msi_id); 7665 7666 7667 7668 /* If channel is closed, then try fcp channel */ 7669 if (ndlp->nlp_flag[channel] & NLP_CLOSED) { 7670 channel = hba->channel_fcp; 7671 } 7672 return (channel); 7673 7674 } 7675 7676 static int32_t 7677 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp) 7678 { 7679 emlxs_hba_t *hba = HBA; 7680 fc_packet_t *pkt; 7681 emlxs_config_t *cfg; 7682 MAILBOXQ *mbq; 7683 MAILBOX *mb; 7684 uint32_t rc; 7685 7686 /* 7687 * This routine provides a alternative target reset provessing 7688 * method. Instead of sending an actual target reset to the 7689 * NPort, we will first unreg the login to that NPort. This 7690 * will cause all the outstanding IOs the quickly complete with 7691 * a NO RPI local error. Next we will force the ULP to relogin 7692 * to the NPort by sending an RSCN (for that NPort) to the 7693 * upper layer. This method should result in a fast target 7694 * reset, as far as IOs completing; however, since an actual 7695 * target reset is not sent to the NPort, it is not 100% 7696 * compatable. Things like reservations will not be broken. 7697 * By default this option is DISABLED, and its only enabled thru 7698 * a hidden configuration parameter (fast-tgt-reset). 7699 */ 7700 rc = FC_TRAN_BUSY; 7701 pkt = PRIV2PKT(sbp); 7702 cfg = &CFG; 7703 7704 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) { 7705 /* issue the mbox cmd to the sli */ 7706 mb = (MAILBOX *) mbq->mbox; 7707 bzero((void *) mb, MAILBOX_CMD_BSIZE); 7708 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi; 7709 #ifdef SLI3_SUPPORT 7710 mb->un.varUnregLogin.vpi = port->vpi; 7711 #endif /* SLI3_SUPPORT */ 7712 mb->mbxCommand = MBX_UNREG_LOGIN; 7713 mb->mbxOwner = OWN_HOST; 7714 7715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7716 "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi, 7717 cfg[CFG_FAST_TGT_RESET_TMR].current); 7718 7719 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 7720 == MBX_SUCCESS) { 7721 7722 ndlp->nlp_Rpi = 0; 7723 7724 mutex_enter(&sbp->mtx); 7725 sbp->node = (void *)ndlp; 7726 sbp->did = ndlp->nlp_DID; 7727 mutex_exit(&sbp->mtx); 7728 7729 if (pkt->pkt_rsplen) { 7730 bzero((uint8_t *)pkt->pkt_resp, 7731 pkt->pkt_rsplen); 7732 } 7733 if (cfg[CFG_FAST_TGT_RESET_TMR].current) { 7734 ndlp->nlp_force_rscn = hba->timer_tics + 7735 cfg[CFG_FAST_TGT_RESET_TMR].current; 7736 } 7737 7738 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0); 7739 } 7740 7741 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 7742 rc = FC_SUCCESS; 7743 } 7744 return (rc); 7745 } 7746 7747 static int32_t 7748 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags) 7749 { 7750 emlxs_hba_t *hba = HBA; 7751 fc_packet_t *pkt; 7752 emlxs_config_t *cfg; 7753 IOCBQ *iocbq; 7754 IOCB *iocb; 7755 CHANNEL *cp; 7756 NODELIST *ndlp; 7757 char *cmd; 7758 uint16_t lun; 7759 FCP_CMND *fcp_cmd; 7760 uint32_t did; 7761 uint32_t reset = 0; 7762 int channel; 7763 int32_t rval; 7764 7765 pkt = PRIV2PKT(sbp); 7766 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 7767 7768 /* Find target node object */ 7769 ndlp = emlxs_node_find_did(port, did); 7770 7771 if (!ndlp || !ndlp->nlp_active) { 7772 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7773 "Node not found. did=%x", did); 7774 7775 return (FC_BADPACKET); 7776 } 7777 7778 /* When the fcp channel is closed we stop accepting any FCP cmd */ 7779 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 7780 return (FC_TRAN_BUSY); 7781 } 7782 7783 /* Snoop for target or lun reset first */ 7784 /* We always use FCP channel to send out target/lun reset fcp cmds */ 7785 /* interrupt affinity only applies to non tgt lun reset fcp cmd */ 7786 7787 cmd = (char *)pkt->pkt_cmd; 7788 lun = *((uint16_t *)cmd); 7789 lun = LE_SWAP16(lun); 7790 7791 iocbq = &sbp->iocbq; 7792 iocb = &iocbq->iocb; 7793 iocbq->node = (void *) ndlp; 7794 7795 /* Check for target reset */ 7796 if (cmd[10] & 0x20) { 7797 /* prepare iocb */ 7798 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7799 hba->channel_fcp)) != FC_SUCCESS) { 7800 7801 if (rval == 0xff) { 7802 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7803 0, 1); 7804 rval = FC_SUCCESS; 7805 } 7806 7807 return (rval); 7808 } 7809 7810 mutex_enter(&sbp->mtx); 7811 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 7812 sbp->pkt_flags |= PACKET_POLLED; 7813 *pkt_flags = sbp->pkt_flags; 7814 mutex_exit(&sbp->mtx); 7815 7816 #ifdef SAN_DIAG_SUPPORT 7817 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 7818 (HBA_WWN *)&ndlp->nlp_portname, -1); 7819 #endif /* SAN_DIAG_SUPPORT */ 7820 7821 iocbq->flag |= IOCB_PRIORITY; 7822 7823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7824 "Target Reset: did=%x", did); 7825 7826 cfg = &CFG; 7827 if (cfg[CFG_FAST_TGT_RESET].current) { 7828 if (emlxs_fast_target_reset(port, sbp, ndlp) == 7829 FC_SUCCESS) { 7830 return (FC_SUCCESS); 7831 } 7832 } 7833 7834 /* Close the node for any further normal IO */ 7835 emlxs_node_close(port, ndlp, hba->channel_fcp, 7836 pkt->pkt_timeout); 7837 7838 /* Flush the IO's on the tx queues */ 7839 (void) emlxs_tx_node_flush(port, ndlp, 7840 &hba->chan[hba->channel_fcp], 0, sbp); 7841 7842 /* This is the target reset fcp cmd */ 7843 reset = 1; 7844 } 7845 7846 /* Check for lun reset */ 7847 else if (cmd[10] & 0x10) { 7848 /* prepare iocb */ 7849 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7850 hba->channel_fcp)) != FC_SUCCESS) { 7851 7852 if (rval == 0xff) { 7853 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7854 0, 1); 7855 rval = FC_SUCCESS; 7856 } 7857 7858 return (rval); 7859 } 7860 7861 mutex_enter(&sbp->mtx); 7862 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 7863 sbp->pkt_flags |= PACKET_POLLED; 7864 *pkt_flags = sbp->pkt_flags; 7865 mutex_exit(&sbp->mtx); 7866 7867 #ifdef SAN_DIAG_SUPPORT 7868 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 7869 (HBA_WWN *)&ndlp->nlp_portname, lun); 7870 #endif /* SAN_DIAG_SUPPORT */ 7871 7872 iocbq->flag |= IOCB_PRIORITY; 7873 7874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7875 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun, 7876 cmd[0], cmd[1]); 7877 7878 /* Flush the IO's on the tx queues for this lun */ 7879 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 7880 7881 /* This is the lun reset fcp cmd */ 7882 reset = 1; 7883 } 7884 7885 channel = emlxs_select_fcp_channel(hba, ndlp, reset); 7886 7887 #ifdef SAN_DIAG_SUPPORT 7888 sbp->sd_start_time = gethrtime(); 7889 #endif /* SAN_DIAG_SUPPORT */ 7890 7891 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7892 emlxs_swap_fcp_pkt(sbp); 7893 #endif /* EMLXS_MODREV2X */ 7894 7895 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd; 7896 7897 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 7898 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 7899 } 7900 7901 if (reset == 0) { 7902 /* 7903 * tgt lun reset fcp cmd has been prepared 7904 * separately in the beginning 7905 */ 7906 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7907 channel)) != FC_SUCCESS) { 7908 7909 if (rval == 0xff) { 7910 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7911 0, 1); 7912 rval = FC_SUCCESS; 7913 } 7914 7915 return (rval); 7916 } 7917 } 7918 7919 cp = &hba->chan[channel]; 7920 cp->ulpSendCmd++; 7921 7922 /* Initalize sbp */ 7923 mutex_enter(&sbp->mtx); 7924 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7925 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7926 sbp->node = (void *)ndlp; 7927 sbp->lun = lun; 7928 sbp->class = iocb->ULPCLASS; 7929 sbp->did = ndlp->nlp_DID; 7930 mutex_exit(&sbp->mtx); 7931 7932 if (pkt->pkt_cmdlen) { 7933 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7934 DDI_DMA_SYNC_FORDEV); 7935 } 7936 7937 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 7938 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 7939 DDI_DMA_SYNC_FORDEV); 7940 } 7941 7942 HBASTATS.FcpIssued++; 7943 7944 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 7945 return (FC_SUCCESS); 7946 7947 } /* emlxs_send_fcp_cmd() */ 7948 7949 7950 7951 7952 /* 7953 * We have to consider this setup works for INTX, MSI, and MSIX 7954 * For INTX, intr_count is always 1 7955 * For MSI, intr_count is always 2 by default 7956 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now. 7957 */ 7958 extern int 7959 emlxs_select_msiid(emlxs_hba_t *hba) 7960 { 7961 int msiid = 0; 7962 7963 /* We use round-robin */ 7964 mutex_enter(&EMLXS_MSIID_LOCK); 7965 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 7966 msiid = hba->last_msiid; 7967 hba->last_msiid ++; 7968 if (hba->last_msiid >= hba->intr_count) { 7969 hba->last_msiid = 0; 7970 } 7971 } else { 7972 /* This should work for INTX and MSI also */ 7973 /* For SLI3 the chan_count is always 4 */ 7974 /* For SLI3 the msiid is limited to chan_count */ 7975 msiid = hba->last_msiid; 7976 hba->last_msiid ++; 7977 if (hba->intr_count > hba->chan_count) { 7978 if (hba->last_msiid >= hba->chan_count) { 7979 hba->last_msiid = 0; 7980 } 7981 } else { 7982 if (hba->last_msiid >= hba->intr_count) { 7983 hba->last_msiid = 0; 7984 } 7985 } 7986 } 7987 mutex_exit(&EMLXS_MSIID_LOCK); 7988 7989 return (msiid); 7990 } /* emlxs_select_msiid */ 7991 7992 7993 /* 7994 * A channel has a association with a msi id. 7995 * One msi id could be associated with multiple channels. 7996 */ 7997 extern int 7998 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id) 7999 { 8000 emlxs_config_t *cfg = &CFG; 8001 EQ_DESC_t *eqp; 8002 int chan; 8003 int num_wq; 8004 8005 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8006 /* For SLI4 round robin all WQs associated with the msi_id */ 8007 eqp = &hba->sli.sli4.eq[msi_id]; 8008 8009 mutex_enter(&eqp->lastwq_lock); 8010 chan = eqp->lastwq; 8011 eqp->lastwq++; 8012 num_wq = cfg[CFG_NUM_WQ].current; 8013 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) { 8014 eqp->lastwq -= num_wq; 8015 } 8016 mutex_exit(&eqp->lastwq_lock); 8017 8018 return (chan); 8019 } else { 8020 /* This is for SLI3 mode */ 8021 return (hba->msi2chan[msi_id]); 8022 } 8023 8024 } /* emlxs_msiid_to_chan */ 8025 8026 8027 #ifdef SFCT_SUPPORT 8028 static int32_t 8029 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 8030 { 8031 emlxs_hba_t *hba = HBA; 8032 fc_packet_t *pkt; 8033 IOCBQ *iocbq; 8034 IOCB *iocb; 8035 NODELIST *ndlp; 8036 CHANNEL *cp; 8037 uint16_t iotag; 8038 uint32_t did; 8039 ddi_dma_cookie_t *cp_cmd; 8040 8041 pkt = PRIV2PKT(sbp); 8042 8043 did = sbp->did; 8044 ndlp = sbp->node; 8045 8046 iocbq = &sbp->iocbq; 8047 iocb = &iocbq->iocb; 8048 8049 /* Make sure node is still active */ 8050 if (!ndlp->nlp_active) { 8051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8052 "*Node not found. did=%x", did); 8053 8054 return (FC_BADPACKET); 8055 } 8056 8057 /* If gate is closed */ 8058 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 8059 return (FC_TRAN_BUSY); 8060 } 8061 8062 /* Get the iotag by registering the packet */ 8063 iotag = emlxs_register_pkt(sbp->channel, sbp); 8064 8065 if (!iotag) { 8066 /* No more command slots available, retry later */ 8067 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8068 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 8069 8070 return (FC_TRAN_BUSY); 8071 } 8072 8073 /* Point of no return */ 8074 8075 cp = sbp->channel; 8076 cp->ulpSendCmd++; 8077 8078 #if (EMLXS_MODREV >= EMLXS_MODREV3) 8079 cp_cmd = pkt->pkt_cmd_cookie; 8080 #else 8081 cp_cmd = &pkt->pkt_cmd_cookie; 8082 #endif /* >= EMLXS_MODREV3 */ 8083 8084 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress); 8085 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress); 8086 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 8087 iocb->un.fcpt64.bdl.bdeFlags = 0; 8088 8089 if (hba->sli_mode < 3) { 8090 iocb->ULPBDECOUNT = 1; 8091 iocb->ULPLE = 1; 8092 } else { /* SLI3 */ 8093 8094 iocb->ULPBDECOUNT = 0; 8095 iocb->ULPLE = 0; 8096 iocb->unsli3.ext_iocb.ebde_count = 0; 8097 } 8098 8099 /* Initalize iocbq */ 8100 iocbq->port = (void *)port; 8101 iocbq->node = (void *)ndlp; 8102 iocbq->channel = (void *)cp; 8103 8104 /* Initalize iocb */ 8105 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 8106 iocb->ULPIOTAG = iotag; 8107 iocb->ULPRSVDBYTE = 8108 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 8109 iocb->ULPOWNER = OWN_CHIP; 8110 iocb->ULPCLASS = sbp->class; 8111 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX; 8112 8113 /* Set the pkt timer */ 8114 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8115 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8116 8117 if (pkt->pkt_cmdlen) { 8118 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8119 DDI_DMA_SYNC_FORDEV); 8120 } 8121 8122 HBASTATS.FcpIssued++; 8123 8124 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8125 8126 return (FC_SUCCESS); 8127 8128 } /* emlxs_send_fct_status() */ 8129 8130 8131 static int32_t 8132 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 8133 { 8134 emlxs_hba_t *hba = HBA; 8135 fc_packet_t *pkt; 8136 IOCBQ *iocbq; 8137 IOCB *iocb; 8138 NODELIST *ndlp; 8139 uint16_t iotag; 8140 uint32_t did; 8141 8142 pkt = PRIV2PKT(sbp); 8143 8144 did = sbp->did; 8145 ndlp = sbp->node; 8146 8147 8148 iocbq = &sbp->iocbq; 8149 iocb = &iocbq->iocb; 8150 8151 /* Make sure node is still active */ 8152 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 8153 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8154 "*Node not found. did=%x", did); 8155 8156 return (FC_BADPACKET); 8157 } 8158 8159 /* If gate is closed */ 8160 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 8161 return (FC_TRAN_BUSY); 8162 } 8163 8164 /* Get the iotag by registering the packet */ 8165 iotag = emlxs_register_pkt(sbp->channel, sbp); 8166 8167 if (!iotag) { 8168 /* No more command slots available, retry later */ 8169 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8170 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 8171 8172 return (FC_TRAN_BUSY); 8173 } 8174 8175 /* Point of no return */ 8176 iocbq->port = (void *)port; 8177 iocbq->node = (void *)ndlp; 8178 iocbq->channel = (void *)sbp->channel; 8179 ((CHANNEL *)sbp->channel)->ulpSendCmd++; 8180 8181 /* 8182 * Don't give the abort priority, we want the IOCB 8183 * we are aborting to be processed first. 8184 */ 8185 iocbq->flag |= IOCB_SPECIAL; 8186 8187 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 8188 iocb->ULPIOTAG = iotag; 8189 iocb->ULPLE = 1; 8190 iocb->ULPCLASS = sbp->class; 8191 iocb->ULPOWNER = OWN_CHIP; 8192 8193 if (hba->state >= FC_LINK_UP) { 8194 /* Create the abort IOCB */ 8195 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 8196 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX; 8197 8198 } else { 8199 /* Create the close IOCB */ 8200 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX; 8201 8202 } 8203 8204 iocb->ULPRSVDBYTE = 8205 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 8206 /* Set the pkt timer */ 8207 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8208 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8209 8210 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq); 8211 8212 return (FC_SUCCESS); 8213 8214 } /* emlxs_send_fct_abort() */ 8215 8216 #endif /* SFCT_SUPPORT */ 8217 8218 8219 static int32_t 8220 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 8221 { 8222 emlxs_hba_t *hba = HBA; 8223 fc_packet_t *pkt; 8224 IOCBQ *iocbq; 8225 IOCB *iocb; 8226 CHANNEL *cp; 8227 uint32_t i; 8228 NODELIST *ndlp; 8229 uint32_t did; 8230 int32_t rval; 8231 8232 pkt = PRIV2PKT(sbp); 8233 cp = &hba->chan[hba->channel_ip]; 8234 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8235 8236 /* Check if node exists */ 8237 /* Broadcast did is always a success */ 8238 ndlp = emlxs_node_find_did(port, did); 8239 8240 if (!ndlp || !ndlp->nlp_active) { 8241 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8242 "Node not found. did=0x%x", did); 8243 8244 return (FC_BADPACKET); 8245 } 8246 8247 /* Check if gate is temporarily closed */ 8248 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) { 8249 return (FC_TRAN_BUSY); 8250 } 8251 8252 /* Check if an exchange has been created */ 8253 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) { 8254 /* No exchange. Try creating one */ 8255 (void) emlxs_create_xri(port, cp, ndlp); 8256 8257 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8258 "Adapter Busy. Exchange not found. did=0x%x", did); 8259 8260 return (FC_TRAN_BUSY); 8261 } 8262 8263 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 8264 /* on BROADCAST commands */ 8265 if (pkt->pkt_cmdlen == 0) { 8266 /* Set the pkt_cmdlen to the cookie size */ 8267 #if (EMLXS_MODREV >= EMLXS_MODREV3) 8268 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 8269 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 8270 } 8271 #else 8272 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 8273 #endif /* >= EMLXS_MODREV3 */ 8274 8275 } 8276 8277 iocbq = &sbp->iocbq; 8278 iocb = &iocbq->iocb; 8279 8280 iocbq->node = (void *)ndlp; 8281 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) { 8282 8283 if (rval == 0xff) { 8284 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8285 rval = FC_SUCCESS; 8286 } 8287 8288 return (rval); 8289 } 8290 8291 cp->ulpSendCmd++; 8292 8293 /* Initalize sbp */ 8294 mutex_enter(&sbp->mtx); 8295 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8296 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8297 sbp->node = (void *)ndlp; 8298 sbp->lun = EMLXS_LUN_NONE; 8299 sbp->class = iocb->ULPCLASS; 8300 sbp->did = did; 8301 mutex_exit(&sbp->mtx); 8302 8303 if (pkt->pkt_cmdlen) { 8304 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8305 DDI_DMA_SYNC_FORDEV); 8306 } 8307 8308 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8309 8310 return (FC_SUCCESS); 8311 8312 } /* emlxs_send_ip() */ 8313 8314 8315 static int32_t 8316 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 8317 { 8318 emlxs_hba_t *hba = HBA; 8319 emlxs_port_t *vport; 8320 fc_packet_t *pkt; 8321 IOCBQ *iocbq; 8322 CHANNEL *cp; 8323 uint32_t cmd; 8324 int i; 8325 ELS_PKT *els_pkt; 8326 NODELIST *ndlp; 8327 uint32_t did; 8328 char fcsp_msg[32]; 8329 int rc; 8330 int32_t rval; 8331 emlxs_config_t *cfg = &CFG; 8332 8333 fcsp_msg[0] = 0; 8334 pkt = PRIV2PKT(sbp); 8335 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 8336 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8337 8338 iocbq = &sbp->iocbq; 8339 8340 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8341 emlxs_swap_els_pkt(sbp); 8342 #endif /* EMLXS_MODREV2X */ 8343 8344 cmd = *((uint32_t *)pkt->pkt_cmd); 8345 cmd &= ELS_CMD_MASK; 8346 8347 /* Point of no return, except for ADISC & PLOGI */ 8348 8349 /* Check node */ 8350 switch (cmd) { 8351 case ELS_CMD_FLOGI: 8352 case ELS_CMD_FDISC: 8353 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8354 8355 if (emlxs_vpi_logi_notify(port, sbp)) { 8356 pkt->pkt_state = FC_PKT_LOCAL_RJT; 8357 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8358 emlxs_unswap_pkt(sbp); 8359 #endif /* EMLXS_MODREV2X */ 8360 return (FC_FAILURE); 8361 } 8362 } else { 8363 /* 8364 * If FLOGI is already complete, then we 8365 * should not be receiving another FLOGI. 8366 * Reset the link to recover. 8367 */ 8368 if (port->flag & EMLXS_PORT_FLOGI_CMPL) { 8369 pkt->pkt_state = FC_PKT_LOCAL_RJT; 8370 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8371 emlxs_unswap_pkt(sbp); 8372 #endif /* EMLXS_MODREV2X */ 8373 8374 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 8375 return (FC_FAILURE); 8376 } 8377 8378 if (port->vpi > 0) { 8379 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC; 8380 } 8381 } 8382 8383 /* Command may have been changed */ 8384 cmd = *((uint32_t *)pkt->pkt_cmd); 8385 cmd &= ELS_CMD_MASK; 8386 8387 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 8388 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 8389 } 8390 8391 ndlp = NULL; 8392 8393 /* We will process these cmds at the bottom of this routine */ 8394 break; 8395 8396 case ELS_CMD_PLOGI: 8397 /* Make sure we don't log into ourself */ 8398 for (i = 0; i < MAX_VPORTS; i++) { 8399 vport = &VPORT(i); 8400 8401 if (!(vport->flag & EMLXS_PORT_BOUND)) { 8402 continue; 8403 } 8404 8405 if (did == vport->did) { 8406 pkt->pkt_state = FC_PKT_NPORT_RJT; 8407 8408 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8409 emlxs_unswap_pkt(sbp); 8410 #endif /* EMLXS_MODREV2X */ 8411 8412 return (FC_FAILURE); 8413 } 8414 } 8415 8416 ndlp = NULL; 8417 8418 /* Check if this is the first PLOGI */ 8419 /* after a PT_TO_PT connection */ 8420 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) { 8421 MAILBOXQ *mbox; 8422 8423 /* ULP bug fix */ 8424 if (pkt->pkt_cmd_fhdr.s_id == 0) { 8425 pkt->pkt_cmd_fhdr.s_id = 8426 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID + 8427 FP_DEFAULT_SID; 8428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 8429 "PLOGI: P2P Fix. sid=0-->%x did=%x", 8430 pkt->pkt_cmd_fhdr.s_id, 8431 pkt->pkt_cmd_fhdr.d_id); 8432 } 8433 8434 mutex_enter(&EMLXS_PORT_LOCK); 8435 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id); 8436 mutex_exit(&EMLXS_PORT_LOCK); 8437 8438 /* Update our service parms */ 8439 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 8440 MEM_MBOX, 1))) { 8441 emlxs_mb_config_link(hba, mbox); 8442 8443 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 8444 mbox, MBX_NOWAIT, 0); 8445 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 8446 emlxs_mem_put(hba, MEM_MBOX, 8447 (void *)mbox); 8448 } 8449 8450 } 8451 } 8452 8453 /* We will process these cmds at the bottom of this routine */ 8454 break; 8455 8456 default: 8457 ndlp = emlxs_node_find_did(port, did); 8458 8459 /* If an ADISC is being sent and we have no node, */ 8460 /* then we must fail the ADISC now */ 8461 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) { 8462 8463 /* Build the LS_RJT response */ 8464 els_pkt = (ELS_PKT *)pkt->pkt_resp; 8465 els_pkt->elsCode = 0x01; 8466 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 8467 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 8468 LSRJT_LOGICAL_ERR; 8469 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 8470 LSEXP_NOTHING_MORE; 8471 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 8472 8473 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8474 "ADISC Rejected. Node not found. did=0x%x", did); 8475 8476 if (sbp->channel == NULL) { 8477 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8478 sbp->channel = 8479 &hba->chan[hba->channel_els]; 8480 } else { 8481 sbp->channel = 8482 &hba->chan[FC_ELS_RING]; 8483 } 8484 } 8485 8486 /* Return this as rejected by the target */ 8487 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 8488 8489 return (FC_SUCCESS); 8490 } 8491 } 8492 8493 /* DID == BCAST_DID is special case to indicate that */ 8494 /* RPI is being passed in seq_id field */ 8495 /* This is used by emlxs_send_logo() for target mode */ 8496 8497 /* Initalize iocbq */ 8498 iocbq->node = (void *)ndlp; 8499 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 8500 8501 if (rval == 0xff) { 8502 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8503 rval = FC_SUCCESS; 8504 } 8505 8506 return (rval); 8507 } 8508 8509 cp = &hba->chan[hba->channel_els]; 8510 cp->ulpSendCmd++; 8511 8512 /* Check cmd */ 8513 switch (cmd) { 8514 case ELS_CMD_PRLI: 8515 /* 8516 * if our firmware version is 3.20 or later, 8517 * set the following bits for FC-TAPE support. 8518 */ 8519 if (port->ini_mode && 8520 (hba->vpd.feaLevelHigh >= 0x02) && 8521 (cfg[CFG_ADISC_SUPPORT].current != 0)) { 8522 els_pkt->un.prli.ConfmComplAllowed = 1; 8523 els_pkt->un.prli.Retry = 1; 8524 els_pkt->un.prli.TaskRetryIdReq = 1; 8525 } else { 8526 els_pkt->un.prli.ConfmComplAllowed = 0; 8527 els_pkt->un.prli.Retry = 0; 8528 els_pkt->un.prli.TaskRetryIdReq = 0; 8529 } 8530 8531 break; 8532 8533 /* This is a patch for the ULP stack. */ 8534 8535 /* 8536 * ULP only reads our service parameters once during bind_port, 8537 * but the service parameters change due to topology. 8538 */ 8539 case ELS_CMD_FLOGI: 8540 case ELS_CMD_FDISC: 8541 case ELS_CMD_PLOGI: 8542 case ELS_CMD_PDISC: 8543 /* Copy latest service parameters to payload */ 8544 bcopy((void *) &port->sparam, 8545 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8546 8547 if ((hba->flag & FC_NPIV_ENABLED) && 8548 (hba->flag & FC_NPIV_SUPPORTED) && 8549 (cmd == ELS_CMD_PLOGI)) { 8550 SERV_PARM *sp; 8551 emlxs_vvl_fmt_t *vvl; 8552 8553 sp = (SERV_PARM *)&els_pkt->un.logi; 8554 sp->VALID_VENDOR_VERSION = 1; 8555 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 8556 vvl->un0.w0.oui = 0x0000C9; 8557 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0); 8558 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 8559 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1); 8560 } 8561 8562 #ifdef DHCHAP_SUPPORT 8563 emlxs_dhc_init_sp(port, did, 8564 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8565 #endif /* DHCHAP_SUPPORT */ 8566 8567 break; 8568 } 8569 8570 /* Initialize the sbp */ 8571 mutex_enter(&sbp->mtx); 8572 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8573 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8574 sbp->node = (void *)ndlp; 8575 sbp->lun = EMLXS_LUN_NONE; 8576 sbp->did = did; 8577 mutex_exit(&sbp->mtx); 8578 8579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 8580 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 8581 8582 if (pkt->pkt_cmdlen) { 8583 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8584 DDI_DMA_SYNC_FORDEV); 8585 } 8586 8587 /* Check node */ 8588 switch (cmd) { 8589 case ELS_CMD_FLOGI: 8590 case ELS_CMD_FDISC: 8591 if (port->ini_mode) { 8592 /* Make sure fabric node is destroyed */ 8593 /* It should already have been destroyed at link down */ 8594 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 8595 ndlp = emlxs_node_find_did(port, FABRIC_DID); 8596 if (ndlp) { 8597 if (emlxs_mb_unreg_node(port, ndlp, 8598 NULL, NULL, iocbq) == 0) { 8599 /* Deferring iocb tx until */ 8600 /* completion of unreg */ 8601 return (FC_SUCCESS); 8602 } 8603 } 8604 } 8605 } 8606 break; 8607 8608 case ELS_CMD_PLOGI: 8609 8610 ndlp = emlxs_node_find_did(port, did); 8611 8612 if (ndlp && ndlp->nlp_active) { 8613 /* Close the node for any further normal IO */ 8614 emlxs_node_close(port, ndlp, hba->channel_fcp, 8615 pkt->pkt_timeout + 10); 8616 emlxs_node_close(port, ndlp, hba->channel_ip, 8617 pkt->pkt_timeout + 10); 8618 8619 /* Flush tx queues */ 8620 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8621 8622 /* Flush chip queues */ 8623 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8624 } 8625 8626 break; 8627 8628 case ELS_CMD_PRLI: 8629 8630 ndlp = emlxs_node_find_did(port, did); 8631 8632 if (ndlp && ndlp->nlp_active) { 8633 /* 8634 * Close the node for any further FCP IO; 8635 * Flush all outstanding I/O only if 8636 * "Establish Image Pair" bit is set. 8637 */ 8638 emlxs_node_close(port, ndlp, hba->channel_fcp, 8639 pkt->pkt_timeout + 10); 8640 8641 if (els_pkt->un.prli.estabImagePair) { 8642 /* Flush tx queues */ 8643 (void) emlxs_tx_node_flush(port, ndlp, 8644 &hba->chan[hba->channel_fcp], 0, 0); 8645 8646 /* Flush chip queues */ 8647 (void) emlxs_chipq_node_flush(port, 8648 &hba->chan[hba->channel_fcp], ndlp, 0); 8649 } 8650 } 8651 8652 break; 8653 8654 } 8655 8656 HBASTATS.ElsCmdIssued++; 8657 8658 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8659 8660 return (FC_SUCCESS); 8661 8662 } /* emlxs_send_els() */ 8663 8664 8665 8666 8667 static int32_t 8668 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 8669 { 8670 emlxs_hba_t *hba = HBA; 8671 emlxs_config_t *cfg = &CFG; 8672 fc_packet_t *pkt; 8673 IOCBQ *iocbq; 8674 IOCB *iocb; 8675 NODELIST *ndlp; 8676 CHANNEL *cp; 8677 int i; 8678 uint32_t cmd; 8679 uint32_t ucmd; 8680 ELS_PKT *els_pkt; 8681 fc_unsol_buf_t *ubp; 8682 emlxs_ub_priv_t *ub_priv; 8683 uint32_t did; 8684 char fcsp_msg[32]; 8685 uint8_t *ub_buffer; 8686 int32_t rval; 8687 8688 fcsp_msg[0] = 0; 8689 pkt = PRIV2PKT(sbp); 8690 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 8691 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8692 8693 iocbq = &sbp->iocbq; 8694 iocb = &iocbq->iocb; 8695 8696 /* Acquire the unsolicited command this pkt is replying to */ 8697 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 8698 /* This is for auto replies when no ub's are used */ 8699 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 8700 ubp = NULL; 8701 ub_priv = NULL; 8702 ub_buffer = NULL; 8703 8704 #ifdef SFCT_SUPPORT 8705 if (sbp->fct_cmd) { 8706 fct_els_t *els = 8707 (fct_els_t *)sbp->fct_cmd->cmd_specific; 8708 ub_buffer = (uint8_t *)els->els_req_payload; 8709 } 8710 #endif /* SFCT_SUPPORT */ 8711 8712 } else { 8713 /* Find the ub buffer that goes with this reply */ 8714 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 8715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 8716 "ELS reply: Invalid oxid=%x", 8717 pkt->pkt_cmd_fhdr.ox_id); 8718 return (FC_BADPACKET); 8719 } 8720 8721 ub_buffer = (uint8_t *)ubp->ub_buffer; 8722 ub_priv = ubp->ub_fca_private; 8723 ucmd = ub_priv->cmd; 8724 8725 ub_priv->flags |= EMLXS_UB_REPLY; 8726 8727 /* Reset oxid to ELS command */ 8728 /* We do this because the ub is only valid */ 8729 /* until we return from this thread */ 8730 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 8731 } 8732 8733 /* Save the result */ 8734 sbp->ucmd = ucmd; 8735 8736 if (sbp->channel == NULL) { 8737 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8738 sbp->channel = &hba->chan[hba->channel_els]; 8739 } else { 8740 sbp->channel = &hba->chan[FC_ELS_RING]; 8741 } 8742 } 8743 8744 /* Check for interceptions */ 8745 switch (ucmd) { 8746 8747 #ifdef ULP_PATCH2 8748 case ELS_CMD_LOGO: 8749 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) { 8750 break; 8751 } 8752 8753 /* Check if this was generated by ULP and not us */ 8754 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8755 8756 /* 8757 * Since we replied to this already, 8758 * we won't need to send this now 8759 */ 8760 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8761 8762 return (FC_SUCCESS); 8763 } 8764 8765 break; 8766 #endif /* ULP_PATCH2 */ 8767 8768 #ifdef ULP_PATCH3 8769 case ELS_CMD_PRLI: 8770 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) { 8771 break; 8772 } 8773 8774 /* Check if this was generated by ULP and not us */ 8775 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8776 8777 /* 8778 * Since we replied to this already, 8779 * we won't need to send this now 8780 */ 8781 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8782 8783 return (FC_SUCCESS); 8784 } 8785 8786 break; 8787 #endif /* ULP_PATCH3 */ 8788 8789 8790 #ifdef ULP_PATCH4 8791 case ELS_CMD_PRLO: 8792 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) { 8793 break; 8794 } 8795 8796 /* Check if this was generated by ULP and not us */ 8797 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8798 /* 8799 * Since we replied to this already, 8800 * we won't need to send this now 8801 */ 8802 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8803 8804 return (FC_SUCCESS); 8805 } 8806 8807 break; 8808 #endif /* ULP_PATCH4 */ 8809 8810 #ifdef ULP_PATCH6 8811 case ELS_CMD_RSCN: 8812 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) { 8813 break; 8814 } 8815 8816 /* Check if this RSCN was generated by us */ 8817 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8818 cmd = *((uint32_t *)pkt->pkt_cmd); 8819 cmd = LE_SWAP32(cmd); 8820 cmd &= ELS_CMD_MASK; 8821 8822 /* 8823 * If ULP is accepting this, 8824 * then close affected node 8825 */ 8826 if (port->ini_mode && ub_buffer && cmd 8827 == ELS_CMD_ACC) { 8828 fc_rscn_t *rscn; 8829 uint32_t count; 8830 uint32_t *lp; 8831 8832 /* 8833 * Only the Leadville code path will 8834 * come thru here. The RSCN data is NOT 8835 * swapped properly for the Comstar code 8836 * path. 8837 */ 8838 lp = (uint32_t *)ub_buffer; 8839 rscn = (fc_rscn_t *)lp++; 8840 count = 8841 ((rscn->rscn_payload_len - 4) / 4); 8842 8843 /* Close affected ports */ 8844 for (i = 0; i < count; i++, lp++) { 8845 (void) emlxs_port_offline(port, 8846 *lp); 8847 } 8848 } 8849 8850 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8851 "RSCN %s: did=%x oxid=%x rxid=%x. " 8852 "Intercepted.", emlxs_elscmd_xlate(cmd), 8853 did, pkt->pkt_cmd_fhdr.ox_id, 8854 pkt->pkt_cmd_fhdr.rx_id); 8855 8856 /* 8857 * Since we generated this RSCN, 8858 * we won't need to send this reply 8859 */ 8860 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8861 8862 return (FC_SUCCESS); 8863 } 8864 8865 break; 8866 #endif /* ULP_PATCH6 */ 8867 8868 case ELS_CMD_PLOGI: 8869 /* Check if this PLOGI was generated by us */ 8870 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8871 cmd = *((uint32_t *)pkt->pkt_cmd); 8872 cmd = LE_SWAP32(cmd); 8873 cmd &= ELS_CMD_MASK; 8874 8875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8876 "PLOGI %s: did=%x oxid=%x rxid=%x. " 8877 "Intercepted.", emlxs_elscmd_xlate(cmd), 8878 did, pkt->pkt_cmd_fhdr.ox_id, 8879 pkt->pkt_cmd_fhdr.rx_id); 8880 8881 /* 8882 * Since we generated this PLOGI, 8883 * we won't need to send this reply 8884 */ 8885 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8886 8887 return (FC_SUCCESS); 8888 } 8889 8890 break; 8891 } 8892 8893 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8894 emlxs_swap_els_pkt(sbp); 8895 #endif /* EMLXS_MODREV2X */ 8896 8897 8898 cmd = *((uint32_t *)pkt->pkt_cmd); 8899 cmd &= ELS_CMD_MASK; 8900 8901 /* Check if modifications are needed */ 8902 switch (ucmd) { 8903 case (ELS_CMD_PRLI): 8904 8905 if (cmd == ELS_CMD_ACC) { 8906 /* This is a patch for the ULP stack. */ 8907 /* ULP does not keep track of FCP2 support */ 8908 if (port->ini_mode && 8909 (hba->vpd.feaLevelHigh >= 0x02) && 8910 (cfg[CFG_ADISC_SUPPORT].current != 0)) { 8911 els_pkt->un.prli.ConfmComplAllowed = 1; 8912 els_pkt->un.prli.Retry = 1; 8913 els_pkt->un.prli.TaskRetryIdReq = 1; 8914 } else { 8915 els_pkt->un.prli.ConfmComplAllowed = 0; 8916 els_pkt->un.prli.Retry = 0; 8917 els_pkt->un.prli.TaskRetryIdReq = 0; 8918 } 8919 } 8920 8921 break; 8922 8923 case ELS_CMD_FLOGI: 8924 case ELS_CMD_PLOGI: 8925 case ELS_CMD_FDISC: 8926 case ELS_CMD_PDISC: 8927 8928 if (cmd == ELS_CMD_ACC) { 8929 /* This is a patch for the ULP stack. */ 8930 8931 /* 8932 * ULP only reads our service parameters 8933 * once during bind_port, but the service 8934 * parameters change due to topology. 8935 */ 8936 8937 /* Copy latest service parameters to payload */ 8938 bcopy((void *)&port->sparam, 8939 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8940 8941 #ifdef DHCHAP_SUPPORT 8942 emlxs_dhc_init_sp(port, did, 8943 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8944 #endif /* DHCHAP_SUPPORT */ 8945 8946 } 8947 8948 break; 8949 8950 } 8951 8952 /* Initalize iocbq */ 8953 iocbq->node = (void *)NULL; 8954 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 8955 8956 if (rval == 0xff) { 8957 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8958 rval = FC_SUCCESS; 8959 } 8960 8961 return (rval); 8962 } 8963 8964 cp = &hba->chan[hba->channel_els]; 8965 cp->ulpSendCmd++; 8966 8967 /* Initalize sbp */ 8968 mutex_enter(&sbp->mtx); 8969 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8970 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8971 sbp->node = (void *) NULL; 8972 sbp->lun = EMLXS_LUN_NONE; 8973 sbp->class = iocb->ULPCLASS; 8974 sbp->did = did; 8975 mutex_exit(&sbp->mtx); 8976 8977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8978 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 8979 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 8980 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 8981 8982 /* Process nodes */ 8983 switch (ucmd) { 8984 case ELS_CMD_RSCN: 8985 { 8986 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) { 8987 fc_rscn_t *rscn; 8988 uint32_t count; 8989 uint32_t *lp = NULL; 8990 8991 /* 8992 * Only the Leadville code path will come thru 8993 * here. The RSCN data is NOT swapped properly 8994 * for the Comstar code path. 8995 */ 8996 lp = (uint32_t *)ub_buffer; 8997 rscn = (fc_rscn_t *)lp++; 8998 count = ((rscn->rscn_payload_len - 4) / 4); 8999 9000 /* Close affected ports */ 9001 for (i = 0; i < count; i++, lp++) { 9002 (void) emlxs_port_offline(port, *lp); 9003 } 9004 } 9005 break; 9006 } 9007 case ELS_CMD_PLOGI: 9008 9009 if (cmd == ELS_CMD_ACC) { 9010 ndlp = emlxs_node_find_did(port, did); 9011 9012 if (ndlp && ndlp->nlp_active) { 9013 /* Close the node for any further normal IO */ 9014 emlxs_node_close(port, ndlp, hba->channel_fcp, 9015 pkt->pkt_timeout + 10); 9016 emlxs_node_close(port, ndlp, hba->channel_ip, 9017 pkt->pkt_timeout + 10); 9018 9019 /* Flush tx queue */ 9020 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 9021 9022 /* Flush chip queue */ 9023 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 9024 } 9025 } 9026 9027 break; 9028 9029 case ELS_CMD_PRLI: 9030 9031 if (cmd == ELS_CMD_ACC) { 9032 ndlp = emlxs_node_find_did(port, did); 9033 9034 if (ndlp && ndlp->nlp_active) { 9035 /* Close the node for any further normal IO */ 9036 emlxs_node_close(port, ndlp, hba->channel_fcp, 9037 pkt->pkt_timeout + 10); 9038 9039 /* Flush tx queues */ 9040 (void) emlxs_tx_node_flush(port, ndlp, 9041 &hba->chan[hba->channel_fcp], 0, 0); 9042 9043 /* Flush chip queues */ 9044 (void) emlxs_chipq_node_flush(port, 9045 &hba->chan[hba->channel_fcp], ndlp, 0); 9046 } 9047 } 9048 9049 break; 9050 9051 case ELS_CMD_PRLO: 9052 9053 if (cmd == ELS_CMD_ACC) { 9054 ndlp = emlxs_node_find_did(port, did); 9055 9056 if (ndlp && ndlp->nlp_active) { 9057 /* Close the node for any further normal IO */ 9058 emlxs_node_close(port, ndlp, 9059 hba->channel_fcp, 60); 9060 9061 /* Flush tx queues */ 9062 (void) emlxs_tx_node_flush(port, ndlp, 9063 &hba->chan[hba->channel_fcp], 0, 0); 9064 9065 /* Flush chip queues */ 9066 (void) emlxs_chipq_node_flush(port, 9067 &hba->chan[hba->channel_fcp], ndlp, 0); 9068 } 9069 } 9070 9071 break; 9072 9073 case ELS_CMD_LOGO: 9074 9075 if (cmd == ELS_CMD_ACC) { 9076 ndlp = emlxs_node_find_did(port, did); 9077 9078 if (ndlp && ndlp->nlp_active) { 9079 /* Close the node for any further normal IO */ 9080 emlxs_node_close(port, ndlp, 9081 hba->channel_fcp, 60); 9082 emlxs_node_close(port, ndlp, 9083 hba->channel_ip, 60); 9084 9085 /* Flush tx queues */ 9086 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 9087 9088 /* Flush chip queues */ 9089 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 9090 } 9091 } 9092 9093 break; 9094 } 9095 9096 if (pkt->pkt_cmdlen) { 9097 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9098 DDI_DMA_SYNC_FORDEV); 9099 } 9100 9101 HBASTATS.ElsRspIssued++; 9102 9103 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9104 9105 return (FC_SUCCESS); 9106 9107 } /* emlxs_send_els_rsp() */ 9108 9109 9110 #ifdef MENLO_SUPPORT 9111 static int32_t 9112 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 9113 { 9114 emlxs_hba_t *hba = HBA; 9115 fc_packet_t *pkt; 9116 IOCBQ *iocbq; 9117 IOCB *iocb; 9118 CHANNEL *cp; 9119 NODELIST *ndlp; 9120 uint32_t did; 9121 uint32_t *lp; 9122 int32_t rval; 9123 9124 pkt = PRIV2PKT(sbp); 9125 did = EMLXS_MENLO_DID; 9126 lp = (uint32_t *)pkt->pkt_cmd; 9127 9128 iocbq = &sbp->iocbq; 9129 iocb = &iocbq->iocb; 9130 9131 ndlp = emlxs_node_find_did(port, did); 9132 9133 if (!ndlp || !ndlp->nlp_active) { 9134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9135 "Node not found. did=0x%x", did); 9136 9137 return (FC_BADPACKET); 9138 } 9139 9140 iocbq->node = (void *) ndlp; 9141 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9142 9143 if (rval == 0xff) { 9144 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9145 rval = FC_SUCCESS; 9146 } 9147 9148 return (rval); 9149 } 9150 9151 cp = &hba->chan[hba->channel_ct]; 9152 cp->ulpSendCmd++; 9153 9154 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 9155 /* Cmd phase */ 9156 9157 /* Initalize iocb */ 9158 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 9159 iocb->ULPCONTEXT = 0; 9160 iocb->ULPPU = 3; 9161 9162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9163 "%s: [%08x,%08x,%08x,%08x]", 9164 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]), 9165 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4])); 9166 9167 } else { /* FC_PKT_OUTBOUND */ 9168 9169 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 9170 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX; 9171 9172 /* Initalize iocb */ 9173 iocb->un.genreq64.param = 0; 9174 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 9175 iocb->ULPPU = 1; 9176 9177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9178 "%s: Data: rxid=0x%x size=%d", 9179 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 9180 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 9181 } 9182 9183 /* Initalize sbp */ 9184 mutex_enter(&sbp->mtx); 9185 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9186 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9187 sbp->node = (void *) ndlp; 9188 sbp->lun = EMLXS_LUN_NONE; 9189 sbp->class = iocb->ULPCLASS; 9190 sbp->did = did; 9191 mutex_exit(&sbp->mtx); 9192 9193 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9194 DDI_DMA_SYNC_FORDEV); 9195 9196 HBASTATS.CtCmdIssued++; 9197 9198 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9199 9200 return (FC_SUCCESS); 9201 9202 } /* emlxs_send_menlo() */ 9203 #endif /* MENLO_SUPPORT */ 9204 9205 9206 static int32_t 9207 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 9208 { 9209 emlxs_hba_t *hba = HBA; 9210 fc_packet_t *pkt; 9211 IOCBQ *iocbq; 9212 IOCB *iocb; 9213 NODELIST *ndlp; 9214 uint32_t did; 9215 CHANNEL *cp; 9216 int32_t rval; 9217 9218 pkt = PRIV2PKT(sbp); 9219 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9220 9221 iocbq = &sbp->iocbq; 9222 iocb = &iocbq->iocb; 9223 9224 ndlp = emlxs_node_find_did(port, did); 9225 9226 if (!ndlp || !ndlp->nlp_active) { 9227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9228 "Node not found. did=0x%x", did); 9229 9230 return (FC_BADPACKET); 9231 } 9232 9233 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9234 emlxs_swap_ct_pkt(sbp); 9235 #endif /* EMLXS_MODREV2X */ 9236 9237 iocbq->node = (void *)ndlp; 9238 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9239 9240 if (rval == 0xff) { 9241 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9242 rval = FC_SUCCESS; 9243 } 9244 9245 return (rval); 9246 } 9247 9248 cp = &hba->chan[hba->channel_ct]; 9249 cp->ulpSendCmd++; 9250 9251 /* Initalize sbp */ 9252 mutex_enter(&sbp->mtx); 9253 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9254 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9255 sbp->node = (void *)ndlp; 9256 sbp->lun = EMLXS_LUN_NONE; 9257 sbp->class = iocb->ULPCLASS; 9258 sbp->did = did; 9259 mutex_exit(&sbp->mtx); 9260 9261 if (did == NAMESERVER_DID) { 9262 SLI_CT_REQUEST *CtCmd; 9263 uint32_t *lp0; 9264 9265 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9266 lp0 = (uint32_t *)pkt->pkt_cmd; 9267 9268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9269 "%s: did=%x [%08x,%08x]", 9270 emlxs_ctcmd_xlate( 9271 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9272 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9273 9274 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 9275 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 9276 } 9277 9278 } else if (did == FDMI_DID) { 9279 SLI_CT_REQUEST *CtCmd; 9280 uint32_t *lp0; 9281 9282 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9283 lp0 = (uint32_t *)pkt->pkt_cmd; 9284 9285 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9286 "%s: did=%x [%08x,%08x]", 9287 emlxs_mscmd_xlate( 9288 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9289 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9290 } else { 9291 SLI_CT_REQUEST *CtCmd; 9292 uint32_t *lp0; 9293 9294 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9295 lp0 = (uint32_t *)pkt->pkt_cmd; 9296 9297 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9298 "%s: did=%x [%08x,%08x]", 9299 emlxs_rmcmd_xlate( 9300 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9301 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9302 } 9303 9304 if (pkt->pkt_cmdlen) { 9305 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9306 DDI_DMA_SYNC_FORDEV); 9307 } 9308 9309 HBASTATS.CtCmdIssued++; 9310 9311 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9312 9313 return (FC_SUCCESS); 9314 9315 } /* emlxs_send_ct() */ 9316 9317 9318 static int32_t 9319 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 9320 { 9321 emlxs_hba_t *hba = HBA; 9322 fc_packet_t *pkt; 9323 CHANNEL *cp; 9324 IOCBQ *iocbq; 9325 IOCB *iocb; 9326 uint32_t *cmd; 9327 SLI_CT_REQUEST *CtCmd; 9328 int32_t rval; 9329 9330 pkt = PRIV2PKT(sbp); 9331 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9332 cmd = (uint32_t *)pkt->pkt_cmd; 9333 9334 iocbq = &sbp->iocbq; 9335 iocb = &iocbq->iocb; 9336 9337 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9338 emlxs_swap_ct_pkt(sbp); 9339 #endif /* EMLXS_MODREV2X */ 9340 9341 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9342 9343 if (rval == 0xff) { 9344 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9345 rval = FC_SUCCESS; 9346 } 9347 9348 return (rval); 9349 } 9350 9351 cp = &hba->chan[hba->channel_ct]; 9352 cp->ulpSendCmd++; 9353 9354 /* Initalize sbp */ 9355 mutex_enter(&sbp->mtx); 9356 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9357 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9358 sbp->node = NULL; 9359 sbp->lun = EMLXS_LUN_NONE; 9360 sbp->class = iocb->ULPCLASS; 9361 mutex_exit(&sbp->mtx); 9362 9363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 9364 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 9365 emlxs_rmcmd_xlate(LE_SWAP16( 9366 CtCmd->CommandResponse.bits.CmdRsp)), 9367 CtCmd->ReasonCode, CtCmd->Explanation, 9368 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]), 9369 pkt->pkt_cmd_fhdr.rx_id); 9370 9371 if (pkt->pkt_cmdlen) { 9372 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9373 DDI_DMA_SYNC_FORDEV); 9374 } 9375 9376 HBASTATS.CtRspIssued++; 9377 9378 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9379 9380 return (FC_SUCCESS); 9381 9382 } /* emlxs_send_ct_rsp() */ 9383 9384 9385 /* 9386 * emlxs_get_instance() 9387 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 9388 */ 9389 extern uint32_t 9390 emlxs_get_instance(int32_t ddiinst) 9391 { 9392 uint32_t i; 9393 uint32_t inst; 9394 9395 mutex_enter(&emlxs_device.lock); 9396 9397 inst = MAX_FC_BRDS; 9398 for (i = 0; i < emlxs_instance_count; i++) { 9399 if (emlxs_instance[i] == ddiinst) { 9400 inst = i; 9401 break; 9402 } 9403 } 9404 9405 mutex_exit(&emlxs_device.lock); 9406 9407 return (inst); 9408 9409 } /* emlxs_get_instance() */ 9410 9411 9412 /* 9413 * emlxs_add_instance() 9414 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 9415 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 9416 */ 9417 static uint32_t 9418 emlxs_add_instance(int32_t ddiinst) 9419 { 9420 uint32_t i; 9421 9422 mutex_enter(&emlxs_device.lock); 9423 9424 /* First see if the ddiinst already exists */ 9425 for (i = 0; i < emlxs_instance_count; i++) { 9426 if (emlxs_instance[i] == ddiinst) { 9427 break; 9428 } 9429 } 9430 9431 /* If it doesn't already exist, add it */ 9432 if (i >= emlxs_instance_count) { 9433 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 9434 emlxs_instance[i] = ddiinst; 9435 emlxs_instance_count++; 9436 emlxs_device.hba_count = emlxs_instance_count; 9437 } 9438 } 9439 9440 mutex_exit(&emlxs_device.lock); 9441 9442 return (i); 9443 9444 } /* emlxs_add_instance() */ 9445 9446 9447 /*ARGSUSED*/ 9448 extern void 9449 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9450 uint32_t doneq) 9451 { 9452 emlxs_hba_t *hba; 9453 emlxs_port_t *port; 9454 emlxs_buf_t *fpkt; 9455 9456 port = sbp->port; 9457 9458 if (!port) { 9459 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 9460 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 9461 9462 return; 9463 } 9464 9465 hba = HBA; 9466 9467 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) && 9468 (sbp->iotag)) { 9469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg, 9470 "WARNING: Completing IO with iotag. sbp=%p iotag=%x " 9471 "xri_flags=%x", 9472 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0)); 9473 9474 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1); 9475 } 9476 9477 mutex_enter(&sbp->mtx); 9478 9479 /* Check for error conditions */ 9480 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED | 9481 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 9482 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 9483 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 9484 EMLXS_MSGF(EMLXS_CONTEXT, 9485 &emlxs_pkt_completion_error_msg, 9486 "Packet already returned. sbp=%p flags=%x", sbp, 9487 sbp->pkt_flags); 9488 } 9489 9490 else if (sbp->pkt_flags & PACKET_COMPLETED) { 9491 EMLXS_MSGF(EMLXS_CONTEXT, 9492 &emlxs_pkt_completion_error_msg, 9493 "Packet already completed. sbp=%p flags=%x", sbp, 9494 sbp->pkt_flags); 9495 } 9496 9497 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 9498 EMLXS_MSGF(EMLXS_CONTEXT, 9499 &emlxs_pkt_completion_error_msg, 9500 "Pkt already on done queue. sbp=%p flags=%x", sbp, 9501 sbp->pkt_flags); 9502 } 9503 9504 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 9505 EMLXS_MSGF(EMLXS_CONTEXT, 9506 &emlxs_pkt_completion_error_msg, 9507 "Packet already in completion. sbp=%p flags=%x", 9508 sbp, sbp->pkt_flags); 9509 } 9510 9511 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 9512 EMLXS_MSGF(EMLXS_CONTEXT, 9513 &emlxs_pkt_completion_error_msg, 9514 "Packet still on chip queue. sbp=%p flags=%x", 9515 sbp, sbp->pkt_flags); 9516 } 9517 9518 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 9519 EMLXS_MSGF(EMLXS_CONTEXT, 9520 &emlxs_pkt_completion_error_msg, 9521 "Packet still on tx queue. sbp=%p flags=%x", sbp, 9522 sbp->pkt_flags); 9523 } 9524 9525 mutex_exit(&sbp->mtx); 9526 return; 9527 } 9528 9529 /* Packet is now in completion */ 9530 sbp->pkt_flags |= PACKET_IN_COMPLETION; 9531 9532 /* Set the state if not already set */ 9533 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9534 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 9535 } 9536 9537 /* Check for parent flush packet */ 9538 /* If pkt has a parent flush packet then adjust its count now */ 9539 fpkt = sbp->fpkt; 9540 if (fpkt) { 9541 /* 9542 * We will try to NULL sbp->fpkt inside the 9543 * fpkt's mutex if possible 9544 */ 9545 9546 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) { 9547 mutex_enter(&fpkt->mtx); 9548 if (fpkt->flush_count) { 9549 fpkt->flush_count--; 9550 } 9551 sbp->fpkt = NULL; 9552 mutex_exit(&fpkt->mtx); 9553 } else { /* fpkt has been returned already */ 9554 9555 sbp->fpkt = NULL; 9556 } 9557 } 9558 9559 /* If pkt is polled, then wake up sleeping thread */ 9560 if (sbp->pkt_flags & PACKET_POLLED) { 9561 /* Don't set the PACKET_ULP_OWNED flag here */ 9562 /* because the polling thread will do it */ 9563 sbp->pkt_flags |= PACKET_COMPLETED; 9564 mutex_exit(&sbp->mtx); 9565 9566 /* Wake up sleeping thread */ 9567 mutex_enter(&EMLXS_PKT_LOCK); 9568 cv_broadcast(&EMLXS_PKT_CV); 9569 mutex_exit(&EMLXS_PKT_LOCK); 9570 } 9571 9572 /* If packet was generated by our driver, */ 9573 /* then complete it immediately */ 9574 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 9575 mutex_exit(&sbp->mtx); 9576 9577 emlxs_iodone(sbp); 9578 } 9579 9580 /* Put the pkt on the done queue for callback */ 9581 /* completion in another thread */ 9582 else { 9583 sbp->pkt_flags |= PACKET_IN_DONEQ; 9584 sbp->next = NULL; 9585 mutex_exit(&sbp->mtx); 9586 9587 /* Put pkt on doneq, so I/O's will be completed in order */ 9588 mutex_enter(&EMLXS_PORT_LOCK); 9589 if (hba->iodone_tail == NULL) { 9590 hba->iodone_list = sbp; 9591 hba->iodone_count = 1; 9592 } else { 9593 hba->iodone_tail->next = sbp; 9594 hba->iodone_count++; 9595 } 9596 hba->iodone_tail = sbp; 9597 mutex_exit(&EMLXS_PORT_LOCK); 9598 9599 /* Trigger a thread to service the doneq */ 9600 emlxs_thread_trigger1(&hba->iodone_thread, 9601 emlxs_iodone_server); 9602 } 9603 9604 return; 9605 9606 } /* emlxs_pkt_complete() */ 9607 9608 9609 #ifdef SAN_DIAG_SUPPORT 9610 /* 9611 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 9612 * normally. Don't have to use atomic operations. 9613 */ 9614 extern void 9615 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 9616 { 9617 emlxs_port_t *vport; 9618 fc_packet_t *pkt; 9619 uint32_t did; 9620 hrtime_t t; 9621 hrtime_t delta_time; 9622 int i; 9623 NODELIST *ndlp; 9624 9625 vport = sbp->port; 9626 9627 if ((sd_bucket.search_type == 0) || 9628 (vport->sd_io_latency_state != SD_COLLECTING)) 9629 return; 9630 9631 /* Compute the iolatency time in microseconds */ 9632 t = gethrtime(); 9633 delta_time = t - sbp->sd_start_time; 9634 pkt = PRIV2PKT(sbp); 9635 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9636 ndlp = emlxs_node_find_did(vport, did); 9637 9638 if (ndlp) { 9639 if (delta_time >= 9640 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) 9641 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 9642 count++; 9643 else if (delta_time <= sd_bucket.values[0]) 9644 ndlp->sd_dev_bucket[0].count++; 9645 else { 9646 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 9647 if ((delta_time > sd_bucket.values[i-1]) && 9648 (delta_time <= sd_bucket.values[i])) { 9649 ndlp->sd_dev_bucket[i].count++; 9650 break; 9651 } 9652 } 9653 } 9654 } 9655 } 9656 #endif /* SAN_DIAG_SUPPORT */ 9657 9658 /*ARGSUSED*/ 9659 static void 9660 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 9661 { 9662 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 9663 emlxs_buf_t *sbp; 9664 9665 mutex_enter(&EMLXS_PORT_LOCK); 9666 9667 /* Remove one pkt from the doneq head and complete it */ 9668 while ((sbp = hba->iodone_list) != NULL) { 9669 if ((hba->iodone_list = sbp->next) == NULL) { 9670 hba->iodone_tail = NULL; 9671 hba->iodone_count = 0; 9672 } else { 9673 hba->iodone_count--; 9674 } 9675 9676 mutex_exit(&EMLXS_PORT_LOCK); 9677 9678 /* Prepare the pkt for completion */ 9679 mutex_enter(&sbp->mtx); 9680 sbp->next = NULL; 9681 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 9682 mutex_exit(&sbp->mtx); 9683 9684 /* Complete the IO now */ 9685 emlxs_iodone(sbp); 9686 9687 /* Reacquire lock and check if more work is to be done */ 9688 mutex_enter(&EMLXS_PORT_LOCK); 9689 } 9690 9691 mutex_exit(&EMLXS_PORT_LOCK); 9692 9693 #ifdef FMA_SUPPORT 9694 if (hba->flag & FC_DMA_CHECK_ERROR) { 9695 emlxs_thread_spawn(hba, emlxs_restart_thread, 9696 NULL, NULL); 9697 } 9698 #endif /* FMA_SUPPORT */ 9699 9700 return; 9701 9702 } /* End emlxs_iodone_server */ 9703 9704 9705 static void 9706 emlxs_iodone(emlxs_buf_t *sbp) 9707 { 9708 #ifdef FMA_SUPPORT 9709 emlxs_port_t *port = sbp->port; 9710 emlxs_hba_t *hba = port->hba; 9711 #endif /* FMA_SUPPORT */ 9712 9713 fc_packet_t *pkt; 9714 CHANNEL *cp; 9715 9716 pkt = PRIV2PKT(sbp); 9717 9718 /* Check one more time that the pkt has not already been returned */ 9719 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 9720 return; 9721 } 9722 9723 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9724 emlxs_unswap_pkt(sbp); 9725 #endif /* EMLXS_MODREV2X */ 9726 9727 mutex_enter(&sbp->mtx); 9728 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED); 9729 mutex_exit(&sbp->mtx); 9730 9731 if (pkt->pkt_comp) { 9732 #ifdef FMA_SUPPORT 9733 emlxs_check_dma(hba, sbp); 9734 #endif /* FMA_SUPPORT */ 9735 9736 if (sbp->channel) { 9737 cp = (CHANNEL *)sbp->channel; 9738 cp->ulpCmplCmd++; 9739 } 9740 9741 (*pkt->pkt_comp) (pkt); 9742 } 9743 9744 return; 9745 9746 } /* emlxs_iodone() */ 9747 9748 9749 9750 extern fc_unsol_buf_t * 9751 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 9752 { 9753 emlxs_unsol_buf_t *pool; 9754 fc_unsol_buf_t *ubp; 9755 emlxs_ub_priv_t *ub_priv; 9756 9757 /* Check if this is a valid ub token */ 9758 if (token < EMLXS_UB_TOKEN_OFFSET) { 9759 return (NULL); 9760 } 9761 9762 mutex_enter(&EMLXS_UB_LOCK); 9763 9764 pool = port->ub_pool; 9765 while (pool) { 9766 /* Find a pool with the proper token range */ 9767 if (token >= pool->pool_first_token && 9768 token <= pool->pool_last_token) { 9769 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 9770 pool->pool_first_token)]; 9771 ub_priv = ubp->ub_fca_private; 9772 9773 if (ub_priv->token != token) { 9774 EMLXS_MSGF(EMLXS_CONTEXT, 9775 &emlxs_sfs_debug_msg, 9776 "ub_find: Invalid token=%x", ubp, token, 9777 ub_priv->token); 9778 9779 ubp = NULL; 9780 } 9781 9782 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 9783 EMLXS_MSGF(EMLXS_CONTEXT, 9784 &emlxs_sfs_debug_msg, 9785 "ub_find: Buffer not in use. buffer=%p " 9786 "token=%x", ubp, token); 9787 9788 ubp = NULL; 9789 } 9790 9791 mutex_exit(&EMLXS_UB_LOCK); 9792 9793 return (ubp); 9794 } 9795 9796 pool = pool->pool_next; 9797 } 9798 9799 mutex_exit(&EMLXS_UB_LOCK); 9800 9801 return (NULL); 9802 9803 } /* emlxs_ub_find() */ 9804 9805 9806 9807 extern fc_unsol_buf_t * 9808 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 9809 uint32_t reserve) 9810 { 9811 emlxs_hba_t *hba = HBA; 9812 emlxs_unsol_buf_t *pool; 9813 fc_unsol_buf_t *ubp; 9814 emlxs_ub_priv_t *ub_priv; 9815 uint32_t i; 9816 uint32_t resv_flag; 9817 uint32_t pool_free; 9818 uint32_t pool_free_resv; 9819 9820 mutex_enter(&EMLXS_UB_LOCK); 9821 9822 pool = port->ub_pool; 9823 while (pool) { 9824 /* Find a pool of the appropriate type and size */ 9825 if ((pool->pool_available == 0) || 9826 (pool->pool_type != type) || 9827 (pool->pool_buf_size < size)) { 9828 goto next_pool; 9829 } 9830 9831 9832 /* Adjust free counts based on availablity */ 9833 /* The free reserve count gets first priority */ 9834 pool_free_resv = 9835 min(pool->pool_free_resv, pool->pool_available); 9836 pool_free = 9837 min(pool->pool_free, 9838 (pool->pool_available - pool_free_resv)); 9839 9840 /* Initialize reserve flag */ 9841 resv_flag = reserve; 9842 9843 if (resv_flag) { 9844 if (pool_free_resv == 0) { 9845 if (pool_free == 0) { 9846 goto next_pool; 9847 } 9848 resv_flag = 0; 9849 } 9850 } else if (pool_free == 0) { 9851 goto next_pool; 9852 } 9853 9854 /* Find next available free buffer in this pool */ 9855 for (i = 0; i < pool->pool_nentries; i++) { 9856 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 9857 ub_priv = ubp->ub_fca_private; 9858 9859 if (!ub_priv->available || 9860 ub_priv->flags != EMLXS_UB_FREE) { 9861 continue; 9862 } 9863 9864 ub_priv->time = hba->timer_tics; 9865 9866 /* Timeout in 5 minutes */ 9867 ub_priv->timeout = (5 * 60); 9868 9869 ub_priv->flags = EMLXS_UB_IN_USE; 9870 9871 /* Alloc the buffer from the pool */ 9872 if (resv_flag) { 9873 ub_priv->flags |= EMLXS_UB_RESV; 9874 pool->pool_free_resv--; 9875 } else { 9876 pool->pool_free--; 9877 } 9878 9879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 9880 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 9881 ub_priv->token, pool->pool_nentries, 9882 pool->pool_available, pool->pool_free, 9883 pool->pool_free_resv); 9884 9885 mutex_exit(&EMLXS_UB_LOCK); 9886 9887 return (ubp); 9888 } 9889 next_pool: 9890 9891 pool = pool->pool_next; 9892 } 9893 9894 mutex_exit(&EMLXS_UB_LOCK); 9895 9896 return (NULL); 9897 9898 } /* emlxs_ub_get() */ 9899 9900 9901 9902 extern void 9903 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9904 uint32_t lock) 9905 { 9906 fc_packet_t *pkt; 9907 fcp_rsp_t *fcp_rsp; 9908 uint32_t i; 9909 emlxs_xlat_err_t *tptr; 9910 emlxs_xlat_err_t *entry; 9911 9912 9913 pkt = PRIV2PKT(sbp); 9914 9915 if (lock) { 9916 mutex_enter(&sbp->mtx); 9917 } 9918 9919 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9920 sbp->pkt_flags |= PACKET_STATE_VALID; 9921 9922 /* Perform table lookup */ 9923 entry = NULL; 9924 if (iostat != IOSTAT_LOCAL_REJECT) { 9925 tptr = emlxs_iostat_tbl; 9926 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 9927 if (iostat == tptr->emlxs_status) { 9928 entry = tptr; 9929 break; 9930 } 9931 } 9932 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 9933 9934 tptr = emlxs_ioerr_tbl; 9935 for (i = 0; i < IOERR_MAX; i++, tptr++) { 9936 if (localstat == tptr->emlxs_status) { 9937 entry = tptr; 9938 break; 9939 } 9940 } 9941 } 9942 9943 if (entry) { 9944 pkt->pkt_state = entry->pkt_state; 9945 pkt->pkt_reason = entry->pkt_reason; 9946 pkt->pkt_expln = entry->pkt_expln; 9947 pkt->pkt_action = entry->pkt_action; 9948 } else { 9949 /* Set defaults */ 9950 pkt->pkt_state = FC_PKT_TRAN_ERROR; 9951 pkt->pkt_reason = FC_REASON_ABORTED; 9952 pkt->pkt_expln = FC_EXPLN_NONE; 9953 pkt->pkt_action = FC_ACTION_RETRYABLE; 9954 } 9955 9956 9957 /* Set the residual counts and response frame */ 9958 /* Check if response frame was received from the chip */ 9959 /* If so, then the residual counts will already be set */ 9960 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 9961 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 9962 /* We have to create the response frame */ 9963 if (iostat == IOSTAT_SUCCESS) { 9964 pkt->pkt_resp_resid = 0; 9965 pkt->pkt_data_resid = 0; 9966 9967 if ((pkt->pkt_cmd_fhdr.type == 9968 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 9969 pkt->pkt_resp) { 9970 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 9971 9972 fcp_rsp->fcp_u.fcp_status. 9973 rsp_len_set = 1; 9974 fcp_rsp->fcp_response_len = 8; 9975 } 9976 } else { 9977 /* Otherwise assume no data */ 9978 /* and no response received */ 9979 pkt->pkt_data_resid = pkt->pkt_datalen; 9980 pkt->pkt_resp_resid = pkt->pkt_rsplen; 9981 } 9982 } 9983 } 9984 9985 if (lock) { 9986 mutex_exit(&sbp->mtx); 9987 } 9988 9989 return; 9990 9991 } /* emlxs_set_pkt_state() */ 9992 9993 9994 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9995 9996 extern void 9997 emlxs_swap_service_params(SERV_PARM *sp) 9998 { 9999 uint16_t *p; 10000 int size; 10001 int i; 10002 10003 size = (sizeof (CSP) - 4) / 2; 10004 p = (uint16_t *)&sp->cmn; 10005 for (i = 0; i < size; i++) { 10006 p[i] = LE_SWAP16(p[i]); 10007 } 10008 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov); 10009 10010 size = sizeof (CLASS_PARMS) / 2; 10011 p = (uint16_t *)&sp->cls1; 10012 for (i = 0; i < size; i++, p++) { 10013 *p = LE_SWAP16(*p); 10014 } 10015 10016 size = sizeof (CLASS_PARMS) / 2; 10017 p = (uint16_t *)&sp->cls2; 10018 for (i = 0; i < size; i++, p++) { 10019 *p = LE_SWAP16(*p); 10020 } 10021 10022 size = sizeof (CLASS_PARMS) / 2; 10023 p = (uint16_t *)&sp->cls3; 10024 for (i = 0; i < size; i++, p++) { 10025 *p = LE_SWAP16(*p); 10026 } 10027 10028 size = sizeof (CLASS_PARMS) / 2; 10029 p = (uint16_t *)&sp->cls4; 10030 for (i = 0; i < size; i++, p++) { 10031 *p = LE_SWAP16(*p); 10032 } 10033 10034 return; 10035 10036 } /* emlxs_swap_service_params() */ 10037 10038 extern void 10039 emlxs_unswap_pkt(emlxs_buf_t *sbp) 10040 { 10041 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 10042 emlxs_swap_fcp_pkt(sbp); 10043 } 10044 10045 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 10046 emlxs_swap_els_pkt(sbp); 10047 } 10048 10049 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 10050 emlxs_swap_ct_pkt(sbp); 10051 } 10052 10053 } /* emlxs_unswap_pkt() */ 10054 10055 10056 extern void 10057 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 10058 { 10059 fc_packet_t *pkt; 10060 FCP_CMND *cmd; 10061 fcp_rsp_t *rsp; 10062 uint16_t *lunp; 10063 uint32_t i; 10064 10065 mutex_enter(&sbp->mtx); 10066 10067 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10068 mutex_exit(&sbp->mtx); 10069 return; 10070 } 10071 10072 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 10073 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 10074 } else { 10075 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 10076 } 10077 10078 mutex_exit(&sbp->mtx); 10079 10080 pkt = PRIV2PKT(sbp); 10081 10082 cmd = (FCP_CMND *)pkt->pkt_cmd; 10083 rsp = (pkt->pkt_rsplen && 10084 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 10085 (fcp_rsp_t *)pkt->pkt_resp : NULL; 10086 10087 /* The size of data buffer needs to be swapped. */ 10088 cmd->fcpDl = LE_SWAP32(cmd->fcpDl); 10089 10090 /* 10091 * Swap first 2 words of FCP CMND payload. 10092 */ 10093 lunp = (uint16_t *)&cmd->fcpLunMsl; 10094 for (i = 0; i < 4; i++) { 10095 lunp[i] = LE_SWAP16(lunp[i]); 10096 } 10097 10098 if (rsp) { 10099 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid); 10100 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len); 10101 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len); 10102 } 10103 10104 return; 10105 10106 } /* emlxs_swap_fcp_pkt() */ 10107 10108 10109 extern void 10110 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 10111 { 10112 fc_packet_t *pkt; 10113 uint32_t *cmd; 10114 uint32_t *rsp; 10115 uint32_t command; 10116 uint16_t *c; 10117 uint32_t i; 10118 uint32_t swapped; 10119 10120 mutex_enter(&sbp->mtx); 10121 10122 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10123 mutex_exit(&sbp->mtx); 10124 return; 10125 } 10126 10127 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 10128 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 10129 swapped = 1; 10130 } else { 10131 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 10132 swapped = 0; 10133 } 10134 10135 mutex_exit(&sbp->mtx); 10136 10137 pkt = PRIV2PKT(sbp); 10138 10139 cmd = (uint32_t *)pkt->pkt_cmd; 10140 rsp = (pkt->pkt_rsplen && 10141 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 10142 (uint32_t *)pkt->pkt_resp : NULL; 10143 10144 if (!swapped) { 10145 cmd[0] = LE_SWAP32(cmd[0]); 10146 command = cmd[0] & ELS_CMD_MASK; 10147 } else { 10148 command = cmd[0] & ELS_CMD_MASK; 10149 cmd[0] = LE_SWAP32(cmd[0]); 10150 } 10151 10152 if (rsp) { 10153 rsp[0] = LE_SWAP32(rsp[0]); 10154 } 10155 10156 switch (command) { 10157 case ELS_CMD_ACC: 10158 if (sbp->ucmd == ELS_CMD_ADISC) { 10159 /* Hard address of originator */ 10160 cmd[1] = LE_SWAP32(cmd[1]); 10161 10162 /* N_Port ID of originator */ 10163 cmd[6] = LE_SWAP32(cmd[6]); 10164 } 10165 break; 10166 10167 case ELS_CMD_PLOGI: 10168 case ELS_CMD_FLOGI: 10169 case ELS_CMD_FDISC: 10170 if (rsp) { 10171 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 10172 } 10173 break; 10174 10175 case ELS_CMD_LOGO: 10176 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */ 10177 break; 10178 10179 case ELS_CMD_RLS: 10180 cmd[1] = LE_SWAP32(cmd[1]); 10181 10182 if (rsp) { 10183 for (i = 0; i < 6; i++) { 10184 rsp[1 + i] = LE_SWAP32(rsp[1 + i]); 10185 } 10186 } 10187 break; 10188 10189 case ELS_CMD_ADISC: 10190 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */ 10191 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */ 10192 break; 10193 10194 case ELS_CMD_PRLI: 10195 c = (uint16_t *)&cmd[1]; 10196 c[1] = LE_SWAP16(c[1]); 10197 10198 cmd[4] = LE_SWAP32(cmd[4]); 10199 10200 if (rsp) { 10201 rsp[4] = LE_SWAP32(rsp[4]); 10202 } 10203 break; 10204 10205 case ELS_CMD_SCR: 10206 cmd[1] = LE_SWAP32(cmd[1]); 10207 break; 10208 10209 case ELS_CMD_LINIT: 10210 if (rsp) { 10211 rsp[1] = LE_SWAP32(rsp[1]); 10212 } 10213 break; 10214 10215 default: 10216 break; 10217 } 10218 10219 return; 10220 10221 } /* emlxs_swap_els_pkt() */ 10222 10223 10224 extern void 10225 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 10226 { 10227 fc_packet_t *pkt; 10228 uint32_t *cmd; 10229 uint32_t *rsp; 10230 uint32_t command; 10231 uint32_t i; 10232 uint32_t swapped; 10233 10234 mutex_enter(&sbp->mtx); 10235 10236 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10237 mutex_exit(&sbp->mtx); 10238 return; 10239 } 10240 10241 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 10242 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 10243 swapped = 1; 10244 } else { 10245 sbp->pkt_flags |= PACKET_CT_SWAPPED; 10246 swapped = 0; 10247 } 10248 10249 mutex_exit(&sbp->mtx); 10250 10251 pkt = PRIV2PKT(sbp); 10252 10253 cmd = (uint32_t *)pkt->pkt_cmd; 10254 rsp = (pkt->pkt_rsplen && 10255 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 10256 (uint32_t *)pkt->pkt_resp : NULL; 10257 10258 if (!swapped) { 10259 cmd[0] = 0x01000000; 10260 command = cmd[2]; 10261 } 10262 10263 cmd[0] = LE_SWAP32(cmd[0]); 10264 cmd[1] = LE_SWAP32(cmd[1]); 10265 cmd[2] = LE_SWAP32(cmd[2]); 10266 cmd[3] = LE_SWAP32(cmd[3]); 10267 10268 if (swapped) { 10269 command = cmd[2]; 10270 } 10271 10272 switch ((command >> 16)) { 10273 case SLI_CTNS_GA_NXT: 10274 cmd[4] = LE_SWAP32(cmd[4]); 10275 break; 10276 10277 case SLI_CTNS_GPN_ID: 10278 case SLI_CTNS_GNN_ID: 10279 case SLI_CTNS_RPN_ID: 10280 case SLI_CTNS_RNN_ID: 10281 case SLI_CTNS_RSPN_ID: 10282 cmd[4] = LE_SWAP32(cmd[4]); 10283 break; 10284 10285 case SLI_CTNS_RCS_ID: 10286 case SLI_CTNS_RPT_ID: 10287 cmd[4] = LE_SWAP32(cmd[4]); 10288 cmd[5] = LE_SWAP32(cmd[5]); 10289 break; 10290 10291 case SLI_CTNS_RFT_ID: 10292 cmd[4] = LE_SWAP32(cmd[4]); 10293 10294 /* Swap FC4 types */ 10295 for (i = 0; i < 8; i++) { 10296 cmd[5 + i] = LE_SWAP32(cmd[5 + i]); 10297 } 10298 break; 10299 10300 case SLI_CTNS_GFT_ID: 10301 if (rsp) { 10302 /* Swap FC4 types */ 10303 for (i = 0; i < 8; i++) { 10304 rsp[4 + i] = LE_SWAP32(rsp[4 + i]); 10305 } 10306 } 10307 break; 10308 10309 case SLI_CTNS_GCS_ID: 10310 case SLI_CTNS_GSPN_ID: 10311 case SLI_CTNS_GSNN_NN: 10312 case SLI_CTNS_GIP_NN: 10313 case SLI_CTNS_GIPA_NN: 10314 10315 case SLI_CTNS_GPT_ID: 10316 case SLI_CTNS_GID_NN: 10317 case SLI_CTNS_GNN_IP: 10318 case SLI_CTNS_GIPA_IP: 10319 case SLI_CTNS_GID_FT: 10320 case SLI_CTNS_GID_PT: 10321 case SLI_CTNS_GID_PN: 10322 case SLI_CTNS_RIP_NN: 10323 case SLI_CTNS_RIPA_NN: 10324 case SLI_CTNS_RSNN_NN: 10325 case SLI_CTNS_DA_ID: 10326 case SLI_CT_RESPONSE_FS_RJT: 10327 case SLI_CT_RESPONSE_FS_ACC: 10328 10329 default: 10330 break; 10331 } 10332 return; 10333 10334 } /* emlxs_swap_ct_pkt() */ 10335 10336 10337 extern void 10338 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 10339 { 10340 emlxs_ub_priv_t *ub_priv; 10341 fc_rscn_t *rscn; 10342 uint32_t count; 10343 uint32_t i; 10344 uint32_t *lp; 10345 la_els_logi_t *logi; 10346 10347 ub_priv = ubp->ub_fca_private; 10348 10349 switch (ub_priv->cmd) { 10350 case ELS_CMD_RSCN: 10351 rscn = (fc_rscn_t *)ubp->ub_buffer; 10352 10353 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len); 10354 10355 count = ((rscn->rscn_payload_len - 4) / 4); 10356 lp = (uint32_t *)ubp->ub_buffer + 1; 10357 for (i = 0; i < count; i++, lp++) { 10358 *lp = LE_SWAP32(*lp); 10359 } 10360 10361 break; 10362 10363 case ELS_CMD_FLOGI: 10364 case ELS_CMD_PLOGI: 10365 case ELS_CMD_FDISC: 10366 case ELS_CMD_PDISC: 10367 logi = (la_els_logi_t *)ubp->ub_buffer; 10368 emlxs_swap_service_params( 10369 (SERV_PARM *)&logi->common_service); 10370 break; 10371 10372 /* ULP handles this */ 10373 case ELS_CMD_LOGO: 10374 case ELS_CMD_PRLI: 10375 case ELS_CMD_PRLO: 10376 case ELS_CMD_ADISC: 10377 default: 10378 break; 10379 } 10380 10381 return; 10382 10383 } /* emlxs_swap_els_ub() */ 10384 10385 10386 #endif /* EMLXS_MODREV2X */ 10387 10388 10389 extern char * 10390 emlxs_elscmd_xlate(uint32_t elscmd) 10391 { 10392 static char buffer[32]; 10393 uint32_t i; 10394 uint32_t count; 10395 10396 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 10397 for (i = 0; i < count; i++) { 10398 if (elscmd == emlxs_elscmd_table[i].code) { 10399 return (emlxs_elscmd_table[i].string); 10400 } 10401 } 10402 10403 (void) sprintf(buffer, "ELS=0x%x", elscmd); 10404 return (buffer); 10405 10406 } /* emlxs_elscmd_xlate() */ 10407 10408 10409 extern char * 10410 emlxs_ctcmd_xlate(uint32_t ctcmd) 10411 { 10412 static char buffer[32]; 10413 uint32_t i; 10414 uint32_t count; 10415 10416 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 10417 for (i = 0; i < count; i++) { 10418 if (ctcmd == emlxs_ctcmd_table[i].code) { 10419 return (emlxs_ctcmd_table[i].string); 10420 } 10421 } 10422 10423 (void) sprintf(buffer, "cmd=0x%x", ctcmd); 10424 return (buffer); 10425 10426 } /* emlxs_ctcmd_xlate() */ 10427 10428 10429 #ifdef MENLO_SUPPORT 10430 extern char * 10431 emlxs_menlo_cmd_xlate(uint32_t cmd) 10432 { 10433 static char buffer[32]; 10434 uint32_t i; 10435 uint32_t count; 10436 10437 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 10438 for (i = 0; i < count; i++) { 10439 if (cmd == emlxs_menlo_cmd_table[i].code) { 10440 return (emlxs_menlo_cmd_table[i].string); 10441 } 10442 } 10443 10444 (void) sprintf(buffer, "Cmd=0x%x", cmd); 10445 return (buffer); 10446 10447 } /* emlxs_menlo_cmd_xlate() */ 10448 10449 extern char * 10450 emlxs_menlo_rsp_xlate(uint32_t rsp) 10451 { 10452 static char buffer[32]; 10453 uint32_t i; 10454 uint32_t count; 10455 10456 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 10457 for (i = 0; i < count; i++) { 10458 if (rsp == emlxs_menlo_rsp_table[i].code) { 10459 return (emlxs_menlo_rsp_table[i].string); 10460 } 10461 } 10462 10463 (void) sprintf(buffer, "Rsp=0x%x", rsp); 10464 return (buffer); 10465 10466 } /* emlxs_menlo_rsp_xlate() */ 10467 10468 #endif /* MENLO_SUPPORT */ 10469 10470 10471 extern char * 10472 emlxs_rmcmd_xlate(uint32_t rmcmd) 10473 { 10474 static char buffer[32]; 10475 uint32_t i; 10476 uint32_t count; 10477 10478 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 10479 for (i = 0; i < count; i++) { 10480 if (rmcmd == emlxs_rmcmd_table[i].code) { 10481 return (emlxs_rmcmd_table[i].string); 10482 } 10483 } 10484 10485 (void) sprintf(buffer, "RM=0x%x", rmcmd); 10486 return (buffer); 10487 10488 } /* emlxs_rmcmd_xlate() */ 10489 10490 10491 10492 extern char * 10493 emlxs_mscmd_xlate(uint16_t mscmd) 10494 { 10495 static char buffer[32]; 10496 uint32_t i; 10497 uint32_t count; 10498 10499 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 10500 for (i = 0; i < count; i++) { 10501 if (mscmd == emlxs_mscmd_table[i].code) { 10502 return (emlxs_mscmd_table[i].string); 10503 } 10504 } 10505 10506 (void) sprintf(buffer, "Cmd=0x%x", mscmd); 10507 return (buffer); 10508 10509 } /* emlxs_mscmd_xlate() */ 10510 10511 10512 extern char * 10513 emlxs_state_xlate(uint8_t state) 10514 { 10515 static char buffer[32]; 10516 uint32_t i; 10517 uint32_t count; 10518 10519 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 10520 for (i = 0; i < count; i++) { 10521 if (state == emlxs_state_table[i].code) { 10522 return (emlxs_state_table[i].string); 10523 } 10524 } 10525 10526 (void) sprintf(buffer, "State=0x%x", state); 10527 return (buffer); 10528 10529 } /* emlxs_state_xlate() */ 10530 10531 10532 extern char * 10533 emlxs_error_xlate(uint8_t errno) 10534 { 10535 static char buffer[32]; 10536 uint32_t i; 10537 uint32_t count; 10538 10539 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 10540 for (i = 0; i < count; i++) { 10541 if (errno == emlxs_error_table[i].code) { 10542 return (emlxs_error_table[i].string); 10543 } 10544 } 10545 10546 (void) sprintf(buffer, "Errno=0x%x", errno); 10547 return (buffer); 10548 10549 } /* emlxs_error_xlate() */ 10550 10551 10552 static int 10553 emlxs_pm_lower_power(dev_info_t *dip) 10554 { 10555 int ddiinst; 10556 int emlxinst; 10557 emlxs_config_t *cfg; 10558 int32_t rval; 10559 emlxs_hba_t *hba; 10560 10561 ddiinst = ddi_get_instance(dip); 10562 emlxinst = emlxs_get_instance(ddiinst); 10563 hba = emlxs_device.hba[emlxinst]; 10564 cfg = &CFG; 10565 10566 rval = DDI_SUCCESS; 10567 10568 /* Lower the power level */ 10569 if (cfg[CFG_PM_SUPPORT].current) { 10570 rval = 10571 pm_lower_power(dip, EMLXS_PM_ADAPTER, 10572 EMLXS_PM_ADAPTER_DOWN); 10573 } else { 10574 /* We do not have kernel support of power management enabled */ 10575 /* therefore, call our power management routine directly */ 10576 rval = 10577 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 10578 } 10579 10580 return (rval); 10581 10582 } /* emlxs_pm_lower_power() */ 10583 10584 10585 static int 10586 emlxs_pm_raise_power(dev_info_t *dip) 10587 { 10588 int ddiinst; 10589 int emlxinst; 10590 emlxs_config_t *cfg; 10591 int32_t rval; 10592 emlxs_hba_t *hba; 10593 10594 ddiinst = ddi_get_instance(dip); 10595 emlxinst = emlxs_get_instance(ddiinst); 10596 hba = emlxs_device.hba[emlxinst]; 10597 cfg = &CFG; 10598 10599 /* Raise the power level */ 10600 if (cfg[CFG_PM_SUPPORT].current) { 10601 rval = 10602 pm_raise_power(dip, EMLXS_PM_ADAPTER, 10603 EMLXS_PM_ADAPTER_UP); 10604 } else { 10605 /* We do not have kernel support of power management enabled */ 10606 /* therefore, call our power management routine directly */ 10607 rval = 10608 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 10609 } 10610 10611 return (rval); 10612 10613 } /* emlxs_pm_raise_power() */ 10614 10615 10616 #ifdef IDLE_TIMER 10617 10618 extern int 10619 emlxs_pm_busy_component(emlxs_hba_t *hba) 10620 { 10621 emlxs_config_t *cfg = &CFG; 10622 int rval; 10623 10624 hba->pm_active = 1; 10625 10626 if (hba->pm_busy) { 10627 return (DDI_SUCCESS); 10628 } 10629 10630 mutex_enter(&EMLXS_PM_LOCK); 10631 10632 if (hba->pm_busy) { 10633 mutex_exit(&EMLXS_PM_LOCK); 10634 return (DDI_SUCCESS); 10635 } 10636 hba->pm_busy = 1; 10637 10638 mutex_exit(&EMLXS_PM_LOCK); 10639 10640 /* Attempt to notify system that we are busy */ 10641 if (cfg[CFG_PM_SUPPORT].current) { 10642 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10643 "pm_busy_component."); 10644 10645 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 10646 10647 if (rval != DDI_SUCCESS) { 10648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10649 "pm_busy_component failed. ret=%d", rval); 10650 10651 /* If this attempt failed then clear our flags */ 10652 mutex_enter(&EMLXS_PM_LOCK); 10653 hba->pm_busy = 0; 10654 mutex_exit(&EMLXS_PM_LOCK); 10655 10656 return (rval); 10657 } 10658 } 10659 10660 return (DDI_SUCCESS); 10661 10662 } /* emlxs_pm_busy_component() */ 10663 10664 10665 extern int 10666 emlxs_pm_idle_component(emlxs_hba_t *hba) 10667 { 10668 emlxs_config_t *cfg = &CFG; 10669 int rval; 10670 10671 if (!hba->pm_busy) { 10672 return (DDI_SUCCESS); 10673 } 10674 10675 mutex_enter(&EMLXS_PM_LOCK); 10676 10677 if (!hba->pm_busy) { 10678 mutex_exit(&EMLXS_PM_LOCK); 10679 return (DDI_SUCCESS); 10680 } 10681 hba->pm_busy = 0; 10682 10683 mutex_exit(&EMLXS_PM_LOCK); 10684 10685 if (cfg[CFG_PM_SUPPORT].current) { 10686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10687 "pm_idle_component."); 10688 10689 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 10690 10691 if (rval != DDI_SUCCESS) { 10692 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10693 "pm_idle_component failed. ret=%d", rval); 10694 10695 /* If this attempt failed then */ 10696 /* reset our flags for another attempt */ 10697 mutex_enter(&EMLXS_PM_LOCK); 10698 hba->pm_busy = 1; 10699 mutex_exit(&EMLXS_PM_LOCK); 10700 10701 return (rval); 10702 } 10703 } 10704 10705 return (DDI_SUCCESS); 10706 10707 } /* emlxs_pm_idle_component() */ 10708 10709 10710 extern void 10711 emlxs_pm_idle_timer(emlxs_hba_t *hba) 10712 { 10713 emlxs_config_t *cfg = &CFG; 10714 10715 if (hba->pm_active) { 10716 /* Clear active flag and reset idle timer */ 10717 mutex_enter(&EMLXS_PM_LOCK); 10718 hba->pm_active = 0; 10719 hba->pm_idle_timer = 10720 hba->timer_tics + cfg[CFG_PM_IDLE].current; 10721 mutex_exit(&EMLXS_PM_LOCK); 10722 } 10723 10724 /* Check for idle timeout */ 10725 else if (hba->timer_tics >= hba->pm_idle_timer) { 10726 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 10727 mutex_enter(&EMLXS_PM_LOCK); 10728 hba->pm_idle_timer = 10729 hba->timer_tics + cfg[CFG_PM_IDLE].current; 10730 mutex_exit(&EMLXS_PM_LOCK); 10731 } 10732 } 10733 10734 return; 10735 10736 } /* emlxs_pm_idle_timer() */ 10737 10738 #endif /* IDLE_TIMER */ 10739 10740 10741 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 10742 static void 10743 emlxs_read_vport_prop(emlxs_hba_t *hba) 10744 { 10745 emlxs_port_t *port = &PPORT; 10746 emlxs_config_t *cfg = &CFG; 10747 char **arrayp; 10748 uint8_t *s; 10749 uint8_t *np; 10750 NAME_TYPE pwwpn; 10751 NAME_TYPE wwnn; 10752 NAME_TYPE wwpn; 10753 uint32_t vpi; 10754 uint32_t cnt; 10755 uint32_t rval; 10756 uint32_t i; 10757 uint32_t j; 10758 uint32_t c1; 10759 uint32_t sum; 10760 uint32_t errors; 10761 char buffer[64]; 10762 10763 /* Check for the per adapter vport setting */ 10764 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst); 10765 cnt = 0; 10766 arrayp = NULL; 10767 rval = 10768 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 10769 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 10770 10771 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 10772 /* Check for the global vport setting */ 10773 cnt = 0; 10774 arrayp = NULL; 10775 rval = 10776 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 10777 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 10778 } 10779 10780 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 10781 return; 10782 } 10783 10784 for (i = 0; i < cnt; i++) { 10785 errors = 0; 10786 s = (uint8_t *)arrayp[i]; 10787 10788 if (!s) { 10789 break; 10790 } 10791 10792 np = (uint8_t *)&pwwpn; 10793 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10794 c1 = *s++; 10795 if ((c1 >= '0') && (c1 <= '9')) { 10796 sum = ((c1 - '0') << 4); 10797 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10798 sum = ((c1 - 'a' + 10) << 4); 10799 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10800 sum = ((c1 - 'A' + 10) << 4); 10801 } else { 10802 EMLXS_MSGF(EMLXS_CONTEXT, 10803 &emlxs_attach_debug_msg, 10804 "Config error: Invalid PWWPN found. " 10805 "entry=%d byte=%d hi_nibble=%c", 10806 i, j, c1); 10807 errors++; 10808 } 10809 10810 c1 = *s++; 10811 if ((c1 >= '0') && (c1 <= '9')) { 10812 sum |= (c1 - '0'); 10813 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10814 sum |= (c1 - 'a' + 10); 10815 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10816 sum |= (c1 - 'A' + 10); 10817 } else { 10818 EMLXS_MSGF(EMLXS_CONTEXT, 10819 &emlxs_attach_debug_msg, 10820 "Config error: Invalid PWWPN found. " 10821 "entry=%d byte=%d lo_nibble=%c", 10822 i, j, c1); 10823 errors++; 10824 } 10825 10826 *np++ = (uint8_t)sum; 10827 } 10828 10829 if (*s++ != ':') { 10830 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10831 "Config error: Invalid delimiter after PWWPN. " 10832 "entry=%d", i); 10833 goto out; 10834 } 10835 10836 np = (uint8_t *)&wwnn; 10837 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10838 c1 = *s++; 10839 if ((c1 >= '0') && (c1 <= '9')) { 10840 sum = ((c1 - '0') << 4); 10841 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10842 sum = ((c1 - 'a' + 10) << 4); 10843 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10844 sum = ((c1 - 'A' + 10) << 4); 10845 } else { 10846 EMLXS_MSGF(EMLXS_CONTEXT, 10847 &emlxs_attach_debug_msg, 10848 "Config error: Invalid WWNN found. " 10849 "entry=%d byte=%d hi_nibble=%c", 10850 i, j, c1); 10851 errors++; 10852 } 10853 10854 c1 = *s++; 10855 if ((c1 >= '0') && (c1 <= '9')) { 10856 sum |= (c1 - '0'); 10857 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10858 sum |= (c1 - 'a' + 10); 10859 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10860 sum |= (c1 - 'A' + 10); 10861 } else { 10862 EMLXS_MSGF(EMLXS_CONTEXT, 10863 &emlxs_attach_debug_msg, 10864 "Config error: Invalid WWNN found. " 10865 "entry=%d byte=%d lo_nibble=%c", 10866 i, j, c1); 10867 errors++; 10868 } 10869 10870 *np++ = (uint8_t)sum; 10871 } 10872 10873 if (*s++ != ':') { 10874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10875 "Config error: Invalid delimiter after WWNN. " 10876 "entry=%d", i); 10877 goto out; 10878 } 10879 10880 np = (uint8_t *)&wwpn; 10881 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10882 c1 = *s++; 10883 if ((c1 >= '0') && (c1 <= '9')) { 10884 sum = ((c1 - '0') << 4); 10885 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10886 sum = ((c1 - 'a' + 10) << 4); 10887 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10888 sum = ((c1 - 'A' + 10) << 4); 10889 } else { 10890 EMLXS_MSGF(EMLXS_CONTEXT, 10891 &emlxs_attach_debug_msg, 10892 "Config error: Invalid WWPN found. " 10893 "entry=%d byte=%d hi_nibble=%c", 10894 i, j, c1); 10895 10896 errors++; 10897 } 10898 10899 c1 = *s++; 10900 if ((c1 >= '0') && (c1 <= '9')) { 10901 sum |= (c1 - '0'); 10902 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10903 sum |= (c1 - 'a' + 10); 10904 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10905 sum |= (c1 - 'A' + 10); 10906 } else { 10907 EMLXS_MSGF(EMLXS_CONTEXT, 10908 &emlxs_attach_debug_msg, 10909 "Config error: Invalid WWPN found. " 10910 "entry=%d byte=%d lo_nibble=%c", 10911 i, j, c1); 10912 10913 errors++; 10914 } 10915 10916 *np++ = (uint8_t)sum; 10917 } 10918 10919 if (*s++ != ':') { 10920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10921 "Config error: Invalid delimiter after WWPN. " 10922 "entry=%d", i); 10923 10924 goto out; 10925 } 10926 10927 sum = 0; 10928 do { 10929 c1 = *s++; 10930 if ((c1 < '0') || (c1 > '9')) { 10931 EMLXS_MSGF(EMLXS_CONTEXT, 10932 &emlxs_attach_debug_msg, 10933 "Config error: Invalid VPI found. " 10934 "entry=%d c=%c vpi=%d", i, c1, sum); 10935 10936 goto out; 10937 } 10938 10939 sum = (sum * 10) + (c1 - '0'); 10940 10941 } while (*s != 0); 10942 10943 vpi = sum; 10944 10945 if (errors) { 10946 continue; 10947 } 10948 10949 /* Entry has been read */ 10950 10951 /* Check if the physical port wwpn */ 10952 /* matches our physical port wwpn */ 10953 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 10954 continue; 10955 } 10956 10957 /* Check vpi range */ 10958 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 10959 continue; 10960 } 10961 10962 /* Check if port has already been configured */ 10963 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 10964 continue; 10965 } 10966 10967 /* Set the highest configured vpi */ 10968 if (vpi > hba->vpi_high) { 10969 hba->vpi_high = vpi; 10970 } 10971 10972 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 10973 sizeof (NAME_TYPE)); 10974 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 10975 sizeof (NAME_TYPE)); 10976 10977 if (hba->port[vpi].snn[0] == 0) { 10978 (void) strncpy((caddr_t)hba->port[vpi].snn, 10979 (caddr_t)hba->snn, 256); 10980 } 10981 10982 if (hba->port[vpi].spn[0] == 0) { 10983 (void) sprintf((caddr_t)hba->port[vpi].spn, 10984 "%s VPort-%d", 10985 (caddr_t)hba->spn, vpi); 10986 } 10987 10988 hba->port[vpi].flag |= 10989 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 10990 10991 if (cfg[CFG_VPORT_RESTRICTED].current) { 10992 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 10993 } 10994 } 10995 10996 out: 10997 10998 (void) ddi_prop_free((void *) arrayp); 10999 return; 11000 11001 } /* emlxs_read_vport_prop() */ 11002 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 11003 11004 11005 extern char * 11006 emlxs_wwn_xlate(char *buffer, uint8_t *wwn) 11007 { 11008 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x", 11009 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 11010 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 11011 11012 return (buffer); 11013 11014 } /* emlxs_wwn_xlate() */ 11015 11016 11017 /* This is called at port online and offline */ 11018 extern void 11019 emlxs_ub_flush(emlxs_port_t *port) 11020 { 11021 emlxs_hba_t *hba = HBA; 11022 fc_unsol_buf_t *ubp; 11023 emlxs_ub_priv_t *ub_priv; 11024 emlxs_ub_priv_t *next; 11025 11026 /* Return if nothing to do */ 11027 if (!port->ub_wait_head) { 11028 return; 11029 } 11030 11031 mutex_enter(&EMLXS_PORT_LOCK); 11032 ub_priv = port->ub_wait_head; 11033 port->ub_wait_head = NULL; 11034 port->ub_wait_tail = NULL; 11035 mutex_exit(&EMLXS_PORT_LOCK); 11036 11037 while (ub_priv) { 11038 next = ub_priv->next; 11039 ubp = ub_priv->ubp; 11040 11041 /* Check if ULP is online and we have a callback function */ 11042 if ((port->ulp_statec != FC_STATE_OFFLINE) && 11043 port->ulp_unsol_cb) { 11044 /* Send ULP the ub buffer */ 11045 port->ulp_unsol_cb(port->ulp_handle, ubp, 11046 ubp->ub_frame.type); 11047 } else { /* Drop the buffer */ 11048 11049 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token); 11050 } 11051 11052 ub_priv = next; 11053 11054 } /* while () */ 11055 11056 return; 11057 11058 } /* emlxs_ub_flush() */ 11059 11060 11061 extern void 11062 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 11063 { 11064 emlxs_hba_t *hba = HBA; 11065 emlxs_ub_priv_t *ub_priv; 11066 11067 ub_priv = ubp->ub_fca_private; 11068 11069 /* Check if ULP is online */ 11070 if (port->ulp_statec != FC_STATE_OFFLINE) { 11071 if (port->ulp_unsol_cb) { 11072 port->ulp_unsol_cb(port->ulp_handle, ubp, 11073 ubp->ub_frame.type); 11074 } else { 11075 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token); 11076 } 11077 11078 return; 11079 } else { /* ULP offline */ 11080 11081 if (hba->state >= FC_LINK_UP) { 11082 /* Add buffer to queue tail */ 11083 mutex_enter(&EMLXS_PORT_LOCK); 11084 11085 if (port->ub_wait_tail) { 11086 port->ub_wait_tail->next = ub_priv; 11087 } 11088 port->ub_wait_tail = ub_priv; 11089 11090 if (!port->ub_wait_head) { 11091 port->ub_wait_head = ub_priv; 11092 } 11093 11094 mutex_exit(&EMLXS_PORT_LOCK); 11095 } else { 11096 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token); 11097 } 11098 } 11099 11100 return; 11101 11102 } /* emlxs_ub_callback() */ 11103 11104 11105 static uint32_t 11106 emlxs_integrity_check(emlxs_hba_t *hba) 11107 { 11108 uint32_t size; 11109 uint32_t errors = 0; 11110 int ddiinst = hba->ddiinst; 11111 11112 size = 16; 11113 if (sizeof (ULP_BDL) != size) { 11114 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 11115 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 11116 11117 errors++; 11118 } 11119 size = 8; 11120 if (sizeof (ULP_BDE) != size) { 11121 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 11122 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 11123 11124 errors++; 11125 } 11126 size = 12; 11127 if (sizeof (ULP_BDE64) != size) { 11128 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 11129 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 11130 11131 errors++; 11132 } 11133 size = 16; 11134 if (sizeof (HBQE_t) != size) { 11135 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 11136 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 11137 11138 errors++; 11139 } 11140 size = 8; 11141 if (sizeof (HGP) != size) { 11142 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 11143 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 11144 11145 errors++; 11146 } 11147 if (sizeof (PGP) != size) { 11148 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 11149 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 11150 11151 errors++; 11152 } 11153 size = 4; 11154 if (sizeof (WORD5) != size) { 11155 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 11156 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 11157 11158 errors++; 11159 } 11160 size = 124; 11161 if (sizeof (MAILVARIANTS) != size) { 11162 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 11163 "%d != 124", DRIVER_NAME, ddiinst, 11164 (int)sizeof (MAILVARIANTS)); 11165 11166 errors++; 11167 } 11168 size = 128; 11169 if (sizeof (SLI1_DESC) != size) { 11170 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 11171 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 11172 11173 errors++; 11174 } 11175 if (sizeof (SLI2_DESC) != size) { 11176 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 11177 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 11178 11179 errors++; 11180 } 11181 size = MBOX_SIZE; 11182 if (sizeof (MAILBOX) != size) { 11183 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 11184 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 11185 11186 errors++; 11187 } 11188 size = PCB_SIZE; 11189 if (sizeof (PCB) != size) { 11190 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 11191 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 11192 11193 errors++; 11194 } 11195 size = 260; 11196 if (sizeof (ATTRIBUTE_ENTRY) != size) { 11197 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 11198 "%d != 260", DRIVER_NAME, ddiinst, 11199 (int)sizeof (ATTRIBUTE_ENTRY)); 11200 11201 errors++; 11202 } 11203 size = SLI_SLIM1_SIZE; 11204 if (sizeof (SLIM1) != size) { 11205 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 11206 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 11207 11208 errors++; 11209 } 11210 size = SLI3_IOCB_CMD_SIZE; 11211 if (sizeof (IOCB) != size) { 11212 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 11213 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 11214 SLI3_IOCB_CMD_SIZE); 11215 11216 errors++; 11217 } 11218 11219 size = SLI_SLIM2_SIZE; 11220 if (sizeof (SLIM2) != size) { 11221 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 11222 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 11223 SLI_SLIM2_SIZE); 11224 11225 errors++; 11226 } 11227 return (errors); 11228 11229 } /* emlxs_integrity_check() */ 11230 11231 11232 #ifdef FMA_SUPPORT 11233 /* 11234 * FMA support 11235 */ 11236 11237 extern void 11238 emlxs_fm_init(emlxs_hba_t *hba) 11239 { 11240 ddi_iblock_cookie_t iblk; 11241 11242 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 11243 return; 11244 } 11245 11246 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11247 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 11248 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 11249 } 11250 11251 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) { 11252 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 11253 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR; 11254 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR; 11255 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR; 11256 } else { 11257 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11258 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11259 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11260 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11261 } 11262 11263 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 11264 11265 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 11266 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11267 pci_ereport_setup(hba->dip); 11268 } 11269 11270 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11271 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 11272 (void *)hba); 11273 } 11274 11275 } /* emlxs_fm_init() */ 11276 11277 11278 extern void 11279 emlxs_fm_fini(emlxs_hba_t *hba) 11280 { 11281 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 11282 return; 11283 } 11284 11285 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 11286 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11287 pci_ereport_teardown(hba->dip); 11288 } 11289 11290 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11291 ddi_fm_handler_unregister(hba->dip); 11292 } 11293 11294 (void) ddi_fm_fini(hba->dip); 11295 11296 } /* emlxs_fm_fini() */ 11297 11298 11299 extern int 11300 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle) 11301 { 11302 ddi_fm_error_t err; 11303 11304 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11305 return (DDI_FM_OK); 11306 } 11307 11308 /* Some S10 versions do not define the ahi_err structure */ 11309 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) { 11310 return (DDI_FM_OK); 11311 } 11312 11313 err.fme_status = DDI_FM_OK; 11314 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION); 11315 11316 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */ 11317 if ((void *)&ddi_fm_acc_err_clear != NULL) { 11318 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 11319 } 11320 11321 return (err.fme_status); 11322 11323 } /* emlxs_fm_check_acc_handle() */ 11324 11325 11326 extern int 11327 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle) 11328 { 11329 ddi_fm_error_t err; 11330 11331 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11332 return (DDI_FM_OK); 11333 } 11334 11335 err.fme_status = DDI_FM_OK; 11336 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION); 11337 11338 return (err.fme_status); 11339 11340 } /* emlxs_fm_check_dma_handle() */ 11341 11342 11343 extern void 11344 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 11345 { 11346 uint64_t ena; 11347 char buf[FM_MAX_CLASS]; 11348 11349 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 11350 return; 11351 } 11352 11353 if (detail == NULL) { 11354 return; 11355 } 11356 11357 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 11358 ena = fm_ena_generate(0, FM_ENA_FMT1); 11359 11360 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 11361 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 11362 11363 } /* emlxs_fm_ereport() */ 11364 11365 11366 extern void 11367 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact) 11368 { 11369 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 11370 return; 11371 } 11372 11373 if (impact == NULL) { 11374 return; 11375 } 11376 11377 if ((hba->pm_state & EMLXS_PM_IN_DETACH) && 11378 (impact == DDI_SERVICE_DEGRADED)) { 11379 impact = DDI_SERVICE_UNAFFECTED; 11380 } 11381 11382 ddi_fm_service_impact(hba->dip, impact); 11383 11384 return; 11385 11386 } /* emlxs_fm_service_impact() */ 11387 11388 11389 /* 11390 * The I/O fault service error handling callback function 11391 */ 11392 /*ARGSUSED*/ 11393 extern int 11394 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 11395 const void *impl_data) 11396 { 11397 /* 11398 * as the driver can always deal with an error 11399 * in any dma or access handle, we can just return 11400 * the fme_status value. 11401 */ 11402 pci_ereport_post(dip, err, NULL); 11403 return (err->fme_status); 11404 11405 } /* emlxs_fm_error_cb() */ 11406 11407 extern void 11408 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp) 11409 { 11410 emlxs_port_t *port = sbp->port; 11411 fc_packet_t *pkt = PRIV2PKT(sbp); 11412 11413 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 11414 if (emlxs_fm_check_dma_handle(hba, 11415 hba->sli.sli4.slim2.dma_handle) 11416 != DDI_FM_OK) { 11417 EMLXS_MSGF(EMLXS_CONTEXT, 11418 &emlxs_invalid_dma_handle_msg, 11419 "slim2: hdl=%p", 11420 hba->sli.sli4.slim2.dma_handle); 11421 11422 mutex_enter(&EMLXS_PORT_LOCK); 11423 hba->flag |= FC_DMA_CHECK_ERROR; 11424 mutex_exit(&EMLXS_PORT_LOCK); 11425 } 11426 } else { 11427 if (emlxs_fm_check_dma_handle(hba, 11428 hba->sli.sli3.slim2.dma_handle) 11429 != DDI_FM_OK) { 11430 EMLXS_MSGF(EMLXS_CONTEXT, 11431 &emlxs_invalid_dma_handle_msg, 11432 "slim2: hdl=%p", 11433 hba->sli.sli3.slim2.dma_handle); 11434 11435 mutex_enter(&EMLXS_PORT_LOCK); 11436 hba->flag |= FC_DMA_CHECK_ERROR; 11437 mutex_exit(&EMLXS_PORT_LOCK); 11438 } 11439 } 11440 11441 if (hba->flag & FC_DMA_CHECK_ERROR) { 11442 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11443 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11444 pkt->pkt_expln = FC_EXPLN_NONE; 11445 pkt->pkt_action = FC_ACTION_RETRYABLE; 11446 return; 11447 } 11448 11449 if (pkt->pkt_cmdlen) { 11450 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma) 11451 != DDI_FM_OK) { 11452 EMLXS_MSGF(EMLXS_CONTEXT, 11453 &emlxs_invalid_dma_handle_msg, 11454 "pkt_cmd_dma: hdl=%p", 11455 pkt->pkt_cmd_dma); 11456 11457 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11458 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11459 pkt->pkt_expln = FC_EXPLN_NONE; 11460 pkt->pkt_action = FC_ACTION_RETRYABLE; 11461 11462 return; 11463 } 11464 } 11465 11466 if (pkt->pkt_rsplen) { 11467 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma) 11468 != DDI_FM_OK) { 11469 EMLXS_MSGF(EMLXS_CONTEXT, 11470 &emlxs_invalid_dma_handle_msg, 11471 "pkt_resp_dma: hdl=%p", 11472 pkt->pkt_resp_dma); 11473 11474 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11475 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11476 pkt->pkt_expln = FC_EXPLN_NONE; 11477 pkt->pkt_action = FC_ACTION_RETRYABLE; 11478 11479 return; 11480 } 11481 } 11482 11483 if (pkt->pkt_datalen) { 11484 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma) 11485 != DDI_FM_OK) { 11486 EMLXS_MSGF(EMLXS_CONTEXT, 11487 &emlxs_invalid_dma_handle_msg, 11488 "pkt_data_dma: hdl=%p", 11489 pkt->pkt_data_dma); 11490 11491 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11492 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11493 pkt->pkt_expln = FC_EXPLN_NONE; 11494 pkt->pkt_action = FC_ACTION_RETRYABLE; 11495 11496 return; 11497 } 11498 } 11499 11500 return; 11501 11502 } 11503 #endif /* FMA_SUPPORT */ 11504 11505 11506 extern void 11507 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size) 11508 { 11509 uint32_t word; 11510 uint32_t *wptr; 11511 uint32_t i; 11512 11513 wptr = (uint32_t *)buffer; 11514 11515 size += (size%4)? (4-(size%4)):0; 11516 for (i = 0; i < size / 4; i++) { 11517 word = *wptr; 11518 *wptr++ = SWAP32(word); 11519 } 11520 11521 return; 11522 11523 } /* emlxs_swap32_buffer() */ 11524 11525 11526 extern void 11527 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size) 11528 { 11529 uint32_t word; 11530 uint32_t *sptr; 11531 uint32_t *dptr; 11532 uint32_t i; 11533 11534 sptr = (uint32_t *)src; 11535 dptr = (uint32_t *)dst; 11536 11537 size += (size%4)? (4-(size%4)):0; 11538 for (i = 0; i < size / 4; i++) { 11539 word = *sptr++; 11540 *dptr++ = SWAP32(word); 11541 } 11542 11543 return; 11544 11545 } /* emlxs_swap32_buffer() */