1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Emulex.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 #include <emlxs.h>
  29 
  30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
  31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
  32 
  33 static int              emlxs_sli4_create_queues(emlxs_hba_t *hba,
  34                                 MAILBOXQ *mbq);
  35 static int              emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
  36                                 MAILBOXQ *mbq);
  37 static int              emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
  38                                 MAILBOXQ *mbq);
  39 
  40 static int              emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
  41 
  42 extern void             emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
  43 
  44 extern int32_t          emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
  45                                 uint32_t size);
  46 extern void             emlxs_decode_label(char *label, char *buffer, int bige);
  47 
  48 extern void             emlxs_build_prog_types(emlxs_hba_t *hba,
  49                                 char *prog_types);
  50 
  51 extern int              emlxs_pci_model_count;
  52 
  53 extern emlxs_model_t    emlxs_pci_model[];
  54 
  55 static int              emlxs_sli4_map_hdw(emlxs_hba_t *hba);
  56 
  57 static void             emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
  58 
  59 static int32_t          emlxs_sli4_online(emlxs_hba_t *hba);
  60 
  61 static void             emlxs_sli4_offline(emlxs_hba_t *hba);
  62 
  63 static uint32_t         emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
  64                                 uint32_t skip_post, uint32_t quiesce);
  65 static void             emlxs_sli4_hba_kill(emlxs_hba_t *hba);
  66 
  67 static uint32_t         emlxs_sli4_hba_init(emlxs_hba_t *hba);
  68 
  69 static uint32_t         emlxs_sli4_bde_setup(emlxs_port_t *port,
  70                                 emlxs_buf_t *sbp);
  71 
  72 
  73 static void             emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
  74                                 CHANNEL *cp, IOCBQ *iocb_cmd);
  75 static uint32_t         emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
  76                                 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
  77 static uint32_t         emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
  78                                 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
  79 #ifdef SFCT_SUPPORT
  80 static uint32_t         emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
  81                                 emlxs_buf_t *cmd_sbp, int channel);
  82 #endif /* SFCT_SUPPORT */
  83 
  84 static uint32_t         emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
  85                                 emlxs_buf_t *sbp, int ring);
  86 static uint32_t         emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
  87                                 emlxs_buf_t *sbp);
  88 static uint32_t         emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
  89                                 emlxs_buf_t *sbp);
  90 static uint32_t         emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
  91                                 emlxs_buf_t *sbp);
  92 static void             emlxs_sli4_poll_intr(emlxs_hba_t *hba,
  93                                 uint32_t att_bit);
  94 static int32_t          emlxs_sli4_intx_intr(char *arg);
  95 
  96 #ifdef MSI_SUPPORT
  97 static uint32_t         emlxs_sli4_msi_intr(char *arg1, char *arg2);
  98 #endif /* MSI_SUPPORT */
  99 
 100 static void             emlxs_sli4_resource_free(emlxs_hba_t *hba);
 101 
 102 static int              emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
 103 
 104 static XRIobj_t         *emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
 105                                 emlxs_buf_t *sbp, RPIobj_t *rpip);
 106 static void             emlxs_sli4_enable_intr(emlxs_hba_t *hba);
 107 
 108 static void             emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
 109 
 110 extern void             emlxs_sli4_timer(emlxs_hba_t *hba);
 111 
 112 static void             emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
 113 
 114 static void             emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
 115 
 116 static XRIobj_t         *emlxs_sli4_register_xri(emlxs_hba_t *hba,
 117                                 emlxs_buf_t *sbp, uint16_t xri);
 118 
 119 static XRIobj_t         *emlxs_sli4_reserve_xri(emlxs_hba_t *hba,
 120                                 RPIobj_t *rpip);
 121 static int              emlxs_check_hdw_ready(emlxs_hba_t *);
 122 
 123 
 124 /* Define SLI4 API functions */
 125 emlxs_sli_api_t emlxs_sli4_api = {
 126         emlxs_sli4_map_hdw,
 127         emlxs_sli4_unmap_hdw,
 128         emlxs_sli4_online,
 129         emlxs_sli4_offline,
 130         emlxs_sli4_hba_reset,
 131         emlxs_sli4_hba_kill,
 132         emlxs_sli4_issue_iocb_cmd,
 133         emlxs_sli4_issue_mbox_cmd,
 134 #ifdef SFCT_SUPPORT
 135         emlxs_sli4_prep_fct_iocb,
 136 #else
 137         NULL,
 138 #endif /* SFCT_SUPPORT */
 139         emlxs_sli4_prep_fcp_iocb,
 140         emlxs_sli4_prep_ip_iocb,
 141         emlxs_sli4_prep_els_iocb,
 142         emlxs_sli4_prep_ct_iocb,
 143         emlxs_sli4_poll_intr,
 144         emlxs_sli4_intx_intr,
 145         emlxs_sli4_msi_intr,
 146         emlxs_sli4_disable_intr,
 147         emlxs_sli4_timer,
 148         emlxs_sli4_poll_erratt
 149 };
 150 
 151 
 152 /* ************************************************************************** */
 153 
 154 
 155 /*
 156  * emlxs_sli4_online()
 157  *
 158  * This routine will start initialization of the SLI4 HBA.
 159  */
 160 static int32_t
 161 emlxs_sli4_online(emlxs_hba_t *hba)
 162 {
 163         emlxs_port_t *port = &PPORT;
 164         emlxs_config_t *cfg;
 165         emlxs_vpd_t *vpd;
 166         MAILBOXQ *mbq = NULL;
 167         MAILBOX4 *mb  = NULL;
 168         MATCHMAP *mp  = NULL;
 169         uint32_t i;
 170         uint32_t j;
 171         uint32_t rval = 0;
 172         uint8_t *vpd_data;
 173         uint32_t sli_mode;
 174         uint8_t *outptr;
 175         uint32_t status;
 176         uint32_t fw_check;
 177         uint32_t kern_update = 0;
 178         emlxs_firmware_t hba_fw;
 179         emlxs_firmware_t *fw;
 180         uint16_t ssvid;
 181 
 182         cfg = &CFG;
 183         vpd = &VPD;
 184 
 185         sli_mode = EMLXS_HBA_SLI4_MODE;
 186         hba->sli_mode = sli_mode;
 187 
 188         /* Set the fw_check flag */
 189         fw_check = cfg[CFG_FW_CHECK].current;
 190 
 191         if ((fw_check & 0x04) ||
 192             (hba->fw_flag & FW_UPDATE_KERNEL)) {
 193                 kern_update = 1;
 194         }
 195 
 196         hba->mbox_queue_flag = 0;
 197         hba->fc_edtov = FF_DEF_EDTOV;
 198         hba->fc_ratov = FF_DEF_RATOV;
 199         hba->fc_altov = FF_DEF_ALTOV;
 200         hba->fc_arbtov = FF_DEF_ARBTOV;
 201 
 202         /* Target mode not supported */
 203         if (hba->tgt_mode) {
 204                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 205                     "Target mode not supported in SLI4.");
 206 
 207                 return (ENOMEM);
 208         }
 209 
 210         /* Networking not supported */
 211         if (cfg[CFG_NETWORK_ON].current) {
 212                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
 213                     "Networking not supported in SLI4, turning it off");
 214                 cfg[CFG_NETWORK_ON].current = 0;
 215         }
 216 
 217         hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
 218         if (hba->chan_count > MAX_CHANNEL) {
 219                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 220                     "Max channels exceeded, dropping num-wq from %d to 1",
 221                     cfg[CFG_NUM_WQ].current);
 222                 cfg[CFG_NUM_WQ].current = 1;
 223                 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
 224         }
 225         hba->channel_fcp = 0; /* First channel */
 226 
 227         /* Default channel for everything else is the last channel */
 228         hba->channel_ip = hba->chan_count - 1;
 229         hba->channel_els = hba->chan_count - 1;
 230         hba->channel_ct = hba->chan_count - 1;
 231 
 232         hba->fc_iotag = 1;
 233         hba->io_count = 0;
 234         hba->channel_tx_count = 0;
 235 
 236         /* Initialize the local dump region buffer */
 237         bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
 238         hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
 239         hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
 240             | FC_MBUF_DMA32;
 241         hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
 242 
 243         (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
 244 
 245         if (hba->sli.sli4.dump_region.virt == NULL) {
 246                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 247                     "Unable to allocate dump region buffer.");
 248 
 249                 return (ENOMEM);
 250         }
 251 
 252         /*
 253          * Get a buffer which will be used repeatedly for mailbox commands
 254          */
 255         mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
 256 
 257         mb = (MAILBOX4 *)mbq;
 258 
 259 reset:
 260         /* Reset & Initialize the adapter */
 261         if (emlxs_sli4_hba_init(hba)) {
 262                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 263                     "Unable to init hba.");
 264 
 265                 rval = EIO;
 266                 goto failed1;
 267         }
 268 
 269 #ifdef FMA_SUPPORT
 270         /* Access handle validation */
 271         if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
 272             != DDI_FM_OK) ||
 273             (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
 274             != DDI_FM_OK) ||
 275             (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
 276             != DDI_FM_OK)) {
 277                 EMLXS_MSGF(EMLXS_CONTEXT,
 278                     &emlxs_invalid_access_handle_msg, NULL);
 279 
 280                 rval = EIO;
 281                 goto failed1;
 282         }
 283 #endif  /* FMA_SUPPORT */
 284 
 285         /*
 286          * Setup and issue mailbox READ REV command
 287          */
 288         vpd->opFwRev = 0;
 289         vpd->postKernRev = 0;
 290         vpd->sli1FwRev = 0;
 291         vpd->sli2FwRev = 0;
 292         vpd->sli3FwRev = 0;
 293         vpd->sli4FwRev = 0;
 294 
 295         vpd->postKernName[0] = 0;
 296         vpd->opFwName[0] = 0;
 297         vpd->sli1FwName[0] = 0;
 298         vpd->sli2FwName[0] = 0;
 299         vpd->sli3FwName[0] = 0;
 300         vpd->sli4FwName[0] = 0;
 301 
 302         vpd->opFwLabel[0] = 0;
 303         vpd->sli1FwLabel[0] = 0;
 304         vpd->sli2FwLabel[0] = 0;
 305         vpd->sli3FwLabel[0] = 0;
 306         vpd->sli4FwLabel[0] = 0;
 307 
 308         EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
 309 
 310         emlxs_mb_read_rev(hba, mbq, 0);
 311         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
 312                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 313                     "Unable to read rev. Mailbox cmd=%x status=%x",
 314                     mb->mbxCommand, mb->mbxStatus);
 315 
 316                 rval = EIO;
 317                 goto failed1;
 318 
 319         }
 320 
 321 emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
 322         if (mb->un.varRdRev4.sliLevel != 4) {
 323                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 324                     "Invalid read rev Version for SLI4: 0x%x",
 325                     mb->un.varRdRev4.sliLevel);
 326 
 327                 rval = EIO;
 328                 goto failed1;
 329         }
 330 
 331         switch (mb->un.varRdRev4.dcbxMode) {
 332         case EMLXS_DCBX_MODE_CIN:       /* Mapped to nonFIP mode */
 333                 hba->flag &= ~FC_FIP_SUPPORTED;
 334                 break;
 335 
 336         case EMLXS_DCBX_MODE_CEE:       /* Mapped to FIP mode */
 337                 hba->flag |= FC_FIP_SUPPORTED;
 338                 break;
 339 
 340         default:
 341                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 342                     "Invalid read rev dcbx mode for SLI4: 0x%x",
 343                     mb->un.varRdRev4.dcbxMode);
 344 
 345                 rval = EIO;
 346                 goto failed1;
 347         }
 348 
 349 
 350         /* Save information as VPD data */
 351         vpd->rBit = 1;
 352 
 353         vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
 354         bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
 355 
 356         vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
 357         bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
 358 
 359         vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
 360         bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
 361 
 362         vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
 363         vpd->fcphLow = mb->un.varRdRev4.fcphLow;
 364         vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
 365         vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
 366 
 367         /* Decode FW labels */
 368         emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
 369         emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
 370         emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
 371 
 372         if (hba->model_info.chip == EMLXS_BE2_CHIP) {
 373                 (void) strcpy(vpd->sli4FwLabel, "be2.ufi");
 374         } else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
 375                 (void) strcpy(vpd->sli4FwLabel, "be3.ufi");
 376         } else {
 377                 (void) strcpy(vpd->sli4FwLabel, "sli4.fw");
 378         }
 379 
 380         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
 381             "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
 382             vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
 383             vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
 384             mb->un.varRdRev4.dcbxMode);
 385 
 386         /* No key information is needed for SLI4 products */
 387 
 388         /* Get adapter VPD information */
 389         vpd->port_index = (uint32_t)-1;
 390 
 391         /* Reuse mbq from previous mbox */
 392         bzero(mbq, sizeof (MAILBOXQ));
 393 
 394         emlxs_mb_dump_vpd(hba, mbq, 0);
 395         vpd_data = hba->sli.sli4.dump_region.virt;
 396 
 397         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
 398             MBX_SUCCESS) {
 399                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
 400                     "No VPD found. status=%x", mb->mbxStatus);
 401         } else {
 402                 EMLXS_MSGF(EMLXS_CONTEXT,
 403                     &emlxs_init_debug_msg,
 404                     "VPD dumped. rsp_cnt=%d status=%x",
 405                     mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
 406 
 407                 if (mb->un.varDmp4.rsp_cnt) {
 408                         EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
 409                             0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
 410 
 411 #ifdef FMA_SUPPORT
 412                         if (hba->sli.sli4.dump_region.dma_handle) {
 413                                 if (emlxs_fm_check_dma_handle(hba,
 414                                     hba->sli.sli4.dump_region.dma_handle)
 415                                     != DDI_FM_OK) {
 416                                         EMLXS_MSGF(EMLXS_CONTEXT,
 417                                             &emlxs_invalid_dma_handle_msg,
 418                                             "emlxs_sli4_online: hdl=%p",
 419                                             hba->sli.sli4.dump_region.
 420                                             dma_handle);
 421                                         rval = EIO;
 422                                         goto failed1;
 423                                 }
 424                         }
 425 #endif /* FMA_SUPPORT */
 426 
 427                 }
 428         }
 429 
 430         if (vpd_data[0]) {
 431                 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
 432                     mb->un.varDmp4.rsp_cnt);
 433 
 434                 /*
 435                  * If there is a VPD part number, and it does not
 436                  * match the current default HBA model info,
 437                  * replace the default data with an entry that
 438                  * does match.
 439                  *
 440                  * After emlxs_parse_vpd model holds the VPD value
 441                  * for V2 and part_num hold the value for PN. These
 442                  * 2 values are NOT necessarily the same.
 443                  */
 444 
 445                 rval = 0;
 446                 if ((vpd->model[0] != 0) &&
 447                     (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
 448 
 449                         /* First scan for a V2 match */
 450 
 451                         for (i = 1; i < emlxs_pci_model_count; i++) {
 452                                 if (strcmp(&vpd->model[0],
 453                                     emlxs_pci_model[i].model) == 0) {
 454                                         bcopy(&emlxs_pci_model[i],
 455                                             &hba->model_info,
 456                                             sizeof (emlxs_model_t));
 457                                         rval = 1;
 458                                         break;
 459                                 }
 460                         }
 461                 }
 462 
 463                 if (!rval && (vpd->part_num[0] != 0) &&
 464                     (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
 465 
 466                         /* Next scan for a PN match */
 467 
 468                         for (i = 1; i < emlxs_pci_model_count; i++) {
 469                                 if (strcmp(&vpd->part_num[0],
 470                                     emlxs_pci_model[i].model) == 0) {
 471                                         bcopy(&emlxs_pci_model[i],
 472                                             &hba->model_info,
 473                                             sizeof (emlxs_model_t));
 474                                         break;
 475                                 }
 476                         }
 477                 }
 478 
 479                 /* HP CNA port indices start at 1 instead of 0 */
 480                 if ((hba->model_info.chip == EMLXS_BE2_CHIP) ||
 481                     (hba->model_info.chip == EMLXS_BE3_CHIP)) {
 482 
 483                         ssvid = ddi_get16(hba->pci_acc_handle,
 484                             (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
 485 
 486                         if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
 487                                 vpd->port_index--;
 488                         }
 489                 }
 490 
 491                 /*
 492                  * Now lets update hba->model_info with the real
 493                  * VPD data, if any.
 494                  */
 495 
 496                 /*
 497                  * Replace the default model description with vpd data
 498                  */
 499                 if (vpd->model_desc[0] != 0) {
 500                         (void) strcpy(hba->model_info.model_desc,
 501                             vpd->model_desc);
 502                 }
 503 
 504                 /* Replace the default model with vpd data */
 505                 if (vpd->model[0] != 0) {
 506                         (void) strcpy(hba->model_info.model, vpd->model);
 507                 }
 508 
 509                 /* Replace the default program types with vpd data */
 510                 if (vpd->prog_types[0] != 0) {
 511                         emlxs_parse_prog_types(hba, vpd->prog_types);
 512                 }
 513         }
 514 
 515         /*
 516          * Since the adapter model may have changed with the vpd data
 517          * lets double check if adapter is not supported
 518          */
 519         if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
 520                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 521                     "Unsupported adapter found.  "
 522                     "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
 523                     hba->model_info.id, hba->model_info.device_id,
 524                     hba->model_info.ssdid, hba->model_info.model);
 525 
 526                 rval = EIO;
 527                 goto failed1;
 528         }
 529 
 530         (void) strcpy(vpd->boot_version, vpd->sli4FwName);
 531 
 532         /* Get fcode version property */
 533         emlxs_get_fcode_version(hba);
 534 
 535         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
 536             "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
 537             vpd->opFwRev, vpd->sli1FwRev);
 538 
 539         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
 540             "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
 541             vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
 542 
 543         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
 544             "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
 545 
 546         /*
 547          * If firmware checking is enabled and the adapter model indicates
 548          * a firmware image, then perform firmware version check
 549          */
 550         hba->fw_flag = 0;
 551         hba->fw_timer = 0;
 552 
 553         if (((fw_check & 0x1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
 554             hba->model_info.fwid) || ((fw_check & 0x2) &&
 555             hba->model_info.fwid)) {
 556 
 557                 /* Find firmware image indicated by adapter model */
 558                 fw = NULL;
 559                 for (i = 0; i < emlxs_fw_count; i++) {
 560                         if (emlxs_fw_table[i].id == hba->model_info.fwid) {
 561                                 fw = &emlxs_fw_table[i];
 562                                 break;
 563                         }
 564                 }
 565 
 566                 /*
 567                  * If the image was found, then verify current firmware
 568                  * versions of adapter
 569                  */
 570                 if (fw) {
 571 
 572                         /* Obtain current firmware version info */
 573                         if ((hba->model_info.chip == EMLXS_BE2_CHIP) ||
 574                             (hba->model_info.chip == EMLXS_BE3_CHIP)) {
 575                                 (void) emlxs_sli4_read_fw_version(hba, &hba_fw);
 576                         } else {
 577                                 hba_fw.kern = vpd->postKernRev;
 578                                 hba_fw.stub = vpd->opFwRev;
 579                                 hba_fw.sli1 = vpd->sli1FwRev;
 580                                 hba_fw.sli2 = vpd->sli2FwRev;
 581                                 hba_fw.sli3 = vpd->sli3FwRev;
 582                                 hba_fw.sli4 = vpd->sli4FwRev;
 583                         }
 584 
 585                         if (!kern_update &&
 586                             ((fw->kern && (hba_fw.kern != fw->kern)) ||
 587                             (fw->stub && (hba_fw.stub != fw->stub)))) {
 588 
 589                                 hba->fw_flag |= FW_UPDATE_NEEDED;
 590 
 591                         } else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
 592                             (fw->stub && (hba_fw.stub != fw->stub)) ||
 593                             (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
 594                             (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
 595                             (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
 596                             (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
 597 
 598                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
 599                                     "Firmware update needed. "
 600                                     "Updating. id=%d fw=%d",
 601                                     hba->model_info.id, hba->model_info.fwid);
 602 
 603 #ifdef MODFW_SUPPORT
 604                                 /*
 605                                  * Load the firmware image now
 606                                  * If MODFW_SUPPORT is not defined, the
 607                                  * firmware image will already be defined
 608                                  * in the emlxs_fw_table
 609                                  */
 610                                 emlxs_fw_load(hba, fw);
 611 #endif /* MODFW_SUPPORT */
 612 
 613                                 if (fw->image && fw->size) {
 614                                         if (emlxs_fw_download(hba,
 615                                             (char *)fw->image, fw->size, 0)) {
 616                                                 EMLXS_MSGF(EMLXS_CONTEXT,
 617                                                     &emlxs_init_msg,
 618                                                     "Firmware update failed.");
 619 
 620                                                 hba->fw_flag |=
 621                                                     FW_UPDATE_NEEDED;
 622                                         }
 623 #ifdef MODFW_SUPPORT
 624                                         /*
 625                                          * Unload the firmware image from
 626                                          * kernel memory
 627                                          */
 628                                         emlxs_fw_unload(hba, fw);
 629 #endif /* MODFW_SUPPORT */
 630 
 631                                         fw_check = 0;
 632 
 633                                         goto reset;
 634                                 }
 635 
 636                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
 637                                     "Firmware image unavailable.");
 638                         } else {
 639                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
 640                                     "Firmware update not needed.");
 641                         }
 642                 } else {
 643                         /*
 644                          * This means either the adapter database is not
 645                          * correct or a firmware image is missing from the
 646                          * compile
 647                          */
 648                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
 649                             "Firmware image unavailable. id=%d fw=%d",
 650                             hba->model_info.id, hba->model_info.fwid);
 651                 }
 652         }
 653 
 654         /* Reuse mbq from previous mbox */
 655         bzero(mbq, sizeof (MAILBOXQ));
 656 
 657         emlxs_mb_dump_fcoe(hba, mbq, 0);
 658 
 659         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
 660             MBX_SUCCESS) {
 661                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
 662                     "No FCOE info found. status=%x", mb->mbxStatus);
 663         } else {
 664                 EMLXS_MSGF(EMLXS_CONTEXT,
 665                     &emlxs_init_debug_msg,
 666                     "FCOE info dumped. rsp_cnt=%d status=%x",
 667                     mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
 668                 (void) emlxs_parse_fcoe(hba,
 669                     (uint8_t *)hba->sli.sli4.dump_region.virt,
 670                     mb->un.varDmp4.rsp_cnt);
 671         }
 672 
 673         /* Reuse mbq from previous mbox */
 674         bzero(mbq, sizeof (MAILBOXQ));
 675 
 676         emlxs_mb_request_features(hba, mbq);
 677 
 678         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
 679                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 680                     "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
 681                     mb->mbxCommand, mb->mbxStatus);
 682 
 683                 rval = EIO;
 684                 goto failed1;
 685         }
 686 emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
 687 
 688         /* Make sure we get the features we requested */
 689         if (mb->un.varReqFeatures.featuresRequested !=
 690             mb->un.varReqFeatures.featuresEnabled) {
 691 
 692                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 693                     "Unable to get REQUESTed_FEATURES. want:x%x  got:x%x",
 694                     mb->un.varReqFeatures.featuresRequested,
 695                     mb->un.varReqFeatures.featuresEnabled);
 696 
 697                 rval = EIO;
 698                 goto failed1;
 699         }
 700 
 701         if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
 702                 hba->flag |= FC_NPIV_ENABLED;
 703         }
 704 
 705         /* Check enable-npiv driver parameter for now */
 706         if (cfg[CFG_NPIV_ENABLE].current) {
 707                 hba->flag |= FC_NPIV_ENABLED;
 708         }
 709 
 710         /* Reuse mbq from previous mbox */
 711         bzero(mbq, sizeof (MAILBOXQ));
 712 
 713         emlxs_mb_read_config(hba, mbq);
 714         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
 715                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 716                     "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
 717                     mb->mbxCommand, mb->mbxStatus);
 718 
 719                 rval = EIO;
 720                 goto failed1;
 721         }
 722 emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
 723 
 724         hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
 725         hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
 726         hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
 727         hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
 728         hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
 729         hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
 730         hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
 731         hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
 732         hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
 733 
 734         if (hba->sli.sli4.VPICount) {
 735                 hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
 736         }
 737         hba->vpi_base = mb->un.varRdConfig4.VPIBase;
 738 
 739         /* Set the max node count */
 740         if (cfg[CFG_NUM_NODES].current > 0) {
 741                 hba->max_nodes =
 742                     min(cfg[CFG_NUM_NODES].current,
 743                     hba->sli.sli4.RPICount);
 744         } else {
 745                 hba->max_nodes = hba->sli.sli4.RPICount;
 746         }
 747 
 748         /* Set the io throttle */
 749         hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
 750         hba->max_iotag = hba->sli.sli4.XRICount;
 751 
 752         /* Save the link speed capabilities */
 753         vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
 754         emlxs_process_link_speed(hba);
 755 
 756         /*
 757          * Allocate some memory for buffers
 758          */
 759         if (emlxs_mem_alloc_buffer(hba) == 0) {
 760                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 761                     "Unable to allocate memory buffers.");
 762 
 763                 rval = ENOMEM;
 764                 goto failed1;
 765         }
 766 
 767         /*
 768          * OutOfRange (oor) iotags are used for abort or close
 769          * XRI commands or any WQE that does not require a SGL
 770          */
 771         hba->fc_oor_iotag = hba->max_iotag;
 772 
 773         if (emlxs_sli4_resource_alloc(hba)) {
 774                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 775                     "Unable to allocate resources.");
 776 
 777                 rval = ENOMEM;
 778                 goto failed2;
 779         }
 780 emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
 781 
 782 #if (EMLXS_MODREV >= EMLXS_MODREV5)
 783         if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
 784                 hba->fca_tran->fca_num_npivports = hba->vpi_max;
 785         }
 786 #endif /* >= EMLXS_MODREV5 */
 787 
 788         /* Reuse mbq from previous mbox */
 789         bzero(mbq, sizeof (MAILBOXQ));
 790 
 791         if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
 792                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 793                     "Unable to post sgl pages.");
 794 
 795                 rval = EIO;
 796                 goto failed3;
 797         }
 798 
 799         /* Reuse mbq from previous mbox */
 800         bzero(mbq, sizeof (MAILBOXQ));
 801 
 802         if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
 803                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 804                     "Unable to post header templates.");
 805 
 806                 rval = EIO;
 807                 goto failed3;
 808         }
 809 
 810         /*
 811          * Add our interrupt routine to kernel's interrupt chain & enable it
 812          * If MSI is enabled this will cause Solaris to program the MSI address
 813          * and data registers in PCI config space
 814          */
 815         if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
 816                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 817                     "Unable to add interrupt(s).");
 818 
 819                 rval = EIO;
 820                 goto failed3;
 821         }
 822 
 823         /* Reuse mbq from previous mbox */
 824         bzero(mbq, sizeof (MAILBOXQ));
 825 
 826         /* This MUST be done after EMLXS_INTR_ADD */
 827         if (emlxs_sli4_create_queues(hba, mbq)) {
 828                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 829                     "Unable to create queues.");
 830 
 831                 rval = EIO;
 832                 goto failed3;
 833         }
 834 
 835         EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
 836 
 837         /* Get and save the current firmware version (based on sli_mode) */
 838         emlxs_decode_firmware_rev(hba, vpd);
 839 
 840 
 841         EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
 842 
 843         /* Reuse mbq from previous mbox */
 844         bzero(mbq, sizeof (MAILBOXQ));
 845 
 846         /*
 847          * We need to get login parameters for NID
 848          */
 849         (void) emlxs_mb_read_sparam(hba, mbq);
 850         mp = (MATCHMAP *)mbq->bp;
 851         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
 852                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 853                     "Unable to read parameters. Mailbox cmd=%x status=%x",
 854                     mb->mbxCommand, mb->mbxStatus);
 855 
 856                 rval = EIO;
 857                 goto failed3;
 858         }
 859 
 860         /* Free the buffer since we were polling */
 861         emlxs_mem_put(hba, MEM_BUF, (void *)mp);
 862         mp = NULL;
 863 
 864         /* If no serial number in VPD data, then use the WWPN */
 865         if (vpd->serial_num[0] == 0) {
 866                 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
 867                 for (i = 0; i < 12; i++) {
 868                         status = *outptr++;
 869                         j = ((status & 0xf0) >> 4);
 870                         if (j <= 9) {
 871                                 vpd->serial_num[i] =
 872                                     (char)((uint8_t)'0' + (uint8_t)j);
 873                         } else {
 874                                 vpd->serial_num[i] =
 875                                     (char)((uint8_t)'A' + (uint8_t)(j - 10));
 876                         }
 877 
 878                         i++;
 879                         j = (status & 0xf);
 880                         if (j <= 9) {
 881                                 vpd->serial_num[i] =
 882                                     (char)((uint8_t)'0' + (uint8_t)j);
 883                         } else {
 884                                 vpd->serial_num[i] =
 885                                     (char)((uint8_t)'A' + (uint8_t)(j - 10));
 886                         }
 887                 }
 888 
 889                 /*
 890                  * Set port number and port index to zero
 891                  * The WWN's are unique to each port and therefore port_num
 892                  * must equal zero. This effects the hba_fru_details structure
 893                  * in fca_bind_port()
 894                  */
 895                 vpd->port_num[0] = 0;
 896                 vpd->port_index = 0;
 897         }
 898 
 899         /* Make attempt to set a port index */
 900         if (vpd->port_index == (uint32_t)-1) {
 901                 dev_info_t *p_dip;
 902                 dev_info_t *c_dip;
 903 
 904                 p_dip = ddi_get_parent(hba->dip);
 905                 c_dip = ddi_get_child(p_dip);
 906 
 907                 vpd->port_index = 0;
 908                 while (c_dip && (hba->dip != c_dip)) {
 909                         c_dip = ddi_get_next_sibling(c_dip);
 910 
 911                         if (strcmp(ddi_get_name(c_dip), "ethernet")) {
 912                                 vpd->port_index++;
 913                         }
 914                 }
 915         }
 916 
 917         if (vpd->port_num[0] == 0) {
 918                 if (hba->model_info.channels > 1) {
 919                         (void) sprintf(vpd->port_num, "%d", vpd->port_index);
 920                 }
 921         }
 922 
 923         if (vpd->id[0] == 0) {
 924                 (void) sprintf(vpd->id, "%s %d",
 925                     hba->model_info.model_desc, vpd->port_index);
 926 
 927         }
 928 
 929         if (vpd->manufacturer[0] == 0) {
 930                 (void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
 931         }
 932 
 933         if (vpd->part_num[0] == 0) {
 934                 (void) strcpy(vpd->part_num, hba->model_info.model);
 935         }
 936 
 937         if (vpd->model_desc[0] == 0) {
 938                 (void) sprintf(vpd->model_desc, "%s %d",
 939                     hba->model_info.model_desc, vpd->port_index);
 940         }
 941 
 942         if (vpd->model[0] == 0) {
 943                 (void) strcpy(vpd->model, hba->model_info.model);
 944         }
 945 
 946         if (vpd->prog_types[0] == 0) {
 947                 emlxs_build_prog_types(hba, vpd->prog_types);
 948         }
 949 
 950         /* Create the symbolic names */
 951         (void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
 952             hba->model_info.model, hba->vpd.fw_version, emlxs_version,
 953             (char *)utsname.nodename);
 954 
 955         (void) sprintf(hba->spn,
 956             "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
 957             hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
 958             hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
 959             hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
 960 
 961 
 962         EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
 963         emlxs_sli4_enable_intr(hba);
 964 
 965         /* Reuse mbq from previous mbox */
 966         bzero(mbq, sizeof (MAILBOXQ));
 967 
 968         /*
 969          * Setup and issue mailbox INITIALIZE LINK command
 970          * At this point, the interrupt will be generated by the HW
 971          * Do this only if persist-linkdown is not set
 972          */
 973         if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
 974                 emlxs_mb_init_link(hba, mbq,
 975                     cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
 976 
 977                 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
 978                     != MBX_SUCCESS) {
 979                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
 980                             "Unable to initialize link. "
 981                             "Mailbox cmd=%x status=%x",
 982                             mb->mbxCommand, mb->mbxStatus);
 983 
 984                         rval = EIO;
 985                         goto failed3;
 986                 }
 987 
 988                 /* Wait for link to come up */
 989                 i = cfg[CFG_LINKUP_DELAY].current;
 990                 while (i && (hba->state < FC_LINK_UP)) {
 991                         /* Check for hardware error */
 992                         if (hba->state == FC_ERROR) {
 993                                 EMLXS_MSGF(EMLXS_CONTEXT,
 994                                     &emlxs_init_failed_msg,
 995                                     "Adapter error.", mb->mbxCommand,
 996                                     mb->mbxStatus);
 997 
 998                                 rval = EIO;
 999                                 goto failed3;
1000                         }
1001 
1002                         DELAYMS(1000);
1003                         i--;
1004                 }
1005         } else {
1006                 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1007         }
1008 
1009         /*
1010          * The leadvile driver will now handle the FLOGI at the driver level
1011          */
1012 
1013         if (mbq) {
1014                 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1015                 mbq = NULL;
1016                 mb = NULL;
1017         }
1018         return (0);
1019 
1020 failed3:
1021         EMLXS_STATE_CHANGE(hba, FC_ERROR);
1022 
1023         if (mp) {
1024                 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1025                 mp = NULL;
1026         }
1027 
1028 
1029         if (hba->intr_flags & EMLXS_MSI_ADDED) {
1030                 (void) EMLXS_INTR_REMOVE(hba);
1031         }
1032 
1033         emlxs_sli4_resource_free(hba);
1034 
1035 failed2:
1036         (void) emlxs_mem_free_buffer(hba);
1037 
1038 failed1:
1039         if (mbq) {
1040                 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1041                 mbq = NULL;
1042                 mb = NULL;
1043         }
1044 
1045         if (hba->sli.sli4.dump_region.virt) {
1046                 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1047         }
1048 
1049         if (rval == 0) {
1050                 rval = EIO;
1051         }
1052 
1053         return (rval);
1054 
1055 } /* emlxs_sli4_online() */
1056 
1057 
1058 static void
1059 emlxs_sli4_offline(emlxs_hba_t *hba)
1060 {
1061         emlxs_port_t            *port = &PPORT;
1062         MAILBOXQ mboxq;
1063 
1064         /* Reverse emlxs_sli4_online */
1065 
1066         mutex_enter(&EMLXS_PORT_LOCK);
1067         if (!(hba->flag & FC_INTERLOCKED)) {
1068                 mutex_exit(&EMLXS_PORT_LOCK);
1069 
1070                 /* This is the only way to disable interupts */
1071                 bzero((void *)&mboxq, sizeof (MAILBOXQ));
1072                 emlxs_mb_resetport(hba, &mboxq);
1073                 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1074                     MBX_WAIT, 0) != MBX_SUCCESS) {
1075                         /* Timeout occurred */
1076                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1077                             "Timeout: Offline RESET");
1078                 }
1079                 (void) emlxs_check_hdw_ready(hba);
1080         } else {
1081                 mutex_exit(&EMLXS_PORT_LOCK);
1082         }
1083 
1084         /* Shutdown the adapter interface */
1085         emlxs_sli4_hba_kill(hba);
1086 
1087         /* Free SLI shared memory */
1088         emlxs_sli4_resource_free(hba);
1089 
1090         /* Free driver shared memory */
1091         (void) emlxs_mem_free_buffer(hba);
1092 
1093         /* Free the host dump region buffer */
1094         (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1095 
1096 } /* emlxs_sli4_offline() */
1097 
1098 
1099 /*ARGSUSED*/
1100 static int
1101 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1102 {
1103         emlxs_port_t            *port = &PPORT;
1104         dev_info_t              *dip;
1105         ddi_device_acc_attr_t   dev_attr;
1106         int                     status;
1107 
1108         dip = (dev_info_t *)hba->dip;
1109         dev_attr = emlxs_dev_acc_attr;
1110 
1111         /*
1112          * Map in Hardware BAR pages that will be used for
1113          * communication with HBA.
1114          */
1115         if (hba->sli.sli4.bar1_acc_handle == 0) {
1116                 status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1117                     (caddr_t *)&hba->sli.sli4.bar1_addr,
1118                     0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1119                 if (status != DDI_SUCCESS) {
1120                         EMLXS_MSGF(EMLXS_CONTEXT,
1121                             &emlxs_attach_failed_msg,
1122                             "(PCI) ddi_regs_map_setup BAR1 failed. "
1123                             "stat=%d mem=%p attr=%p hdl=%p",
1124                             status, &hba->sli.sli4.bar1_addr, &dev_attr,
1125                             &hba->sli.sli4.bar1_acc_handle);
1126                         goto failed;
1127                 }
1128         }
1129 
1130         if (hba->sli.sli4.bar2_acc_handle == 0) {
1131                 status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1132                     (caddr_t *)&hba->sli.sli4.bar2_addr,
1133                     0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1134                 if (status != DDI_SUCCESS) {
1135                         EMLXS_MSGF(EMLXS_CONTEXT,
1136                             &emlxs_attach_failed_msg,
1137                             "ddi_regs_map_setup BAR2 failed. status=%x",
1138                             status);
1139                         goto failed;
1140                 }
1141         }
1142 
1143         if (hba->sli.sli4.bootstrapmb.virt == 0) {
1144                 MBUF_INFO       *buf_info;
1145                 MBUF_INFO       bufinfo;
1146 
1147                 buf_info = &bufinfo;
1148 
1149                 bzero(buf_info, sizeof (MBUF_INFO));
1150                 buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1151                 buf_info->flags =
1152                     FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1153                 buf_info->align = ddi_ptob(dip, 1L);
1154 
1155                 (void) emlxs_mem_alloc(hba, buf_info);
1156 
1157                 if (buf_info->virt == NULL) {
1158                         goto failed;
1159                 }
1160 
1161                 hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1162                 hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1163                 hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1164                     MBOX_EXTENSION_SIZE;
1165                 hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1166                 hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1167                 bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1168                     EMLXS_BOOTSTRAP_MB_SIZE);
1169         }
1170 
1171         /* offset from beginning of register space */
1172         hba->sli.sli4.MPUEPSemaphore_reg_addr =
1173             (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1174         hba->sli.sli4.MBDB_reg_addr =
1175             (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1176         hba->sli.sli4.CQDB_reg_addr =
1177             (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1178         hba->sli.sli4.MQDB_reg_addr =
1179             (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1180         hba->sli.sli4.WQDB_reg_addr =
1181             (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1182         hba->sli.sli4.RQDB_reg_addr =
1183             (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1184         hba->chan_count = MAX_CHANNEL;
1185 
1186         return (0);
1187 
1188 failed:
1189 
1190         emlxs_sli4_unmap_hdw(hba);
1191         return (ENOMEM);
1192 
1193 
1194 } /* emlxs_sli4_map_hdw() */
1195 
1196 
1197 /*ARGSUSED*/
1198 static void
1199 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1200 {
1201         MBUF_INFO       bufinfo;
1202         MBUF_INFO       *buf_info = &bufinfo;
1203 
1204         /*
1205          * Free map for Hardware BAR pages that were used for
1206          * communication with HBA.
1207          */
1208         if (hba->sli.sli4.bar1_acc_handle) {
1209                 ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1210                 hba->sli.sli4.bar1_acc_handle = 0;
1211         }
1212 
1213         if (hba->sli.sli4.bar2_acc_handle) {
1214                 ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1215                 hba->sli.sli4.bar2_acc_handle = 0;
1216         }
1217         if (hba->sli.sli4.bootstrapmb.virt) {
1218                 bzero(buf_info, sizeof (MBUF_INFO));
1219 
1220                 if (hba->sli.sli4.bootstrapmb.phys) {
1221                         buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1222                         buf_info->data_handle =
1223                             hba->sli.sli4.bootstrapmb.data_handle;
1224                         buf_info->dma_handle =
1225                             hba->sli.sli4.bootstrapmb.dma_handle;
1226                         buf_info->flags = FC_MBUF_DMA;
1227                 }
1228 
1229                 buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1230                 buf_info->size = hba->sli.sli4.bootstrapmb.size;
1231                 emlxs_mem_free(hba, buf_info);
1232 
1233                 hba->sli.sli4.bootstrapmb.virt = NULL;
1234         }
1235 
1236         return;
1237 
1238 } /* emlxs_sli4_unmap_hdw() */
1239 
1240 
1241 static int
1242 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1243 {
1244         emlxs_port_t *port = &PPORT;
1245         uint32_t status;
1246         uint32_t i = 0;
1247 
1248         /* Wait for reset completion */
1249         while (i < 30) {
1250                 /* Check Semaphore register to see what the ARM state is */
1251                 status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1252 
1253                 /* Check to see if any errors occurred during init */
1254                 if (status & ARM_POST_FATAL) {
1255                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1256                             "SEMA Error: status=0x%x", status);
1257 
1258                         EMLXS_STATE_CHANGE(hba, FC_ERROR);
1259 #ifdef FMA_SUPPORT
1260                         /* Access handle validation */
1261                         EMLXS_CHK_ACC_HANDLE(hba,
1262                             hba->sli.sli4.bar1_acc_handle);
1263 #endif  /* FMA_SUPPORT */
1264                         return (1);
1265                 }
1266                 if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1267                         /* ARM Ready !! */
1268                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1269                             "ARM Ready: status=0x%x", status);
1270 #ifdef FMA_SUPPORT
1271                         /* Access handle validation */
1272                         EMLXS_CHK_ACC_HANDLE(hba,
1273                             hba->sli.sli4.bar1_acc_handle);
1274 #endif  /* FMA_SUPPORT */
1275                         return (0);
1276                 }
1277 
1278                 DELAYMS(1000);
1279                 i++;
1280         }
1281 
1282         /* Timeout occurred */
1283         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1284             "Timeout waiting for READY: status=0x%x", status);
1285 
1286         EMLXS_STATE_CHANGE(hba, FC_ERROR);
1287 
1288 #ifdef FMA_SUPPORT
1289         /* Access handle validation */
1290         EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1291 #endif  /* FMA_SUPPORT */
1292 
1293         /* Log a dump event - not supported */
1294 
1295         return (2);
1296 
1297 } /* emlxs_check_hdw_ready() */
1298 
1299 
1300 static uint32_t
1301 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1302 {
1303         emlxs_port_t *port = &PPORT;
1304         uint32_t status;
1305 
1306         /* Wait for reset completion, tmo is in 10ms ticks */
1307         while (tmo) {
1308                 /* Check Semaphore register to see what the ARM state is */
1309                 status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1310 
1311                 /* Check to see if any errors occurred during init */
1312                 if (status & BMBX_READY) {
1313                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1314                             "BMBX Ready: status=0x%x", status);
1315 #ifdef FMA_SUPPORT
1316                         /* Access handle validation */
1317                         EMLXS_CHK_ACC_HANDLE(hba,
1318                             hba->sli.sli4.bar2_acc_handle);
1319 #endif  /* FMA_SUPPORT */
1320                         return (tmo);
1321                 }
1322 
1323                 DELAYMS(10);
1324                 tmo--;
1325         }
1326 
1327         /* Timeout occurred */
1328         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1329             "Timeout waiting for BMailbox: status=0x%x", status);
1330 
1331         EMLXS_STATE_CHANGE(hba, FC_ERROR);
1332 
1333 #ifdef FMA_SUPPORT
1334         /* Access handle validation */
1335         EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1336 #endif  /* FMA_SUPPORT */
1337 
1338         /* Log a dump event - not supported */
1339 
1340         return (0);
1341 
1342 } /* emlxs_check_bootstrap_ready() */
1343 
1344 
1345 static uint32_t
1346 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1347 {
1348         emlxs_port_t *port = &PPORT;
1349         uint32_t *iptr;
1350         uint32_t addr30;
1351 
1352         /*
1353          * This routine assumes the bootstrap mbox is loaded
1354          * with the mailbox command to be executed.
1355          *
1356          * First, load the high 30 bits of bootstrap mailbox
1357          */
1358         addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1359         addr30 |= BMBX_ADDR_HI;
1360         WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1361 
1362         tmo = emlxs_check_bootstrap_ready(hba, tmo);
1363         if (tmo == 0) {
1364                 return (0);
1365         }
1366 
1367         /* Load the low 30 bits of bootstrap mailbox */
1368         addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1369         WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1370 
1371         tmo = emlxs_check_bootstrap_ready(hba, tmo);
1372         if (tmo == 0) {
1373                 return (0);
1374         }
1375 
1376         iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1377 
1378         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1379             "BootstrapMB: %p Completed %08x %08x %08x",
1380             hba->sli.sli4.bootstrapmb.virt,
1381             *iptr, *(iptr+1), *(iptr+2));
1382 
1383         return (tmo);
1384 
1385 } /* emlxs_issue_bootstrap_mb() */
1386 
1387 
1388 static int
1389 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1390 {
1391 #ifdef FMA_SUPPORT
1392         emlxs_port_t *port = &PPORT;
1393 #endif /* FMA_SUPPORT */
1394         uint32_t *iptr;
1395         uint32_t tmo;
1396 
1397         if (emlxs_check_hdw_ready(hba)) {
1398                 return (1);
1399         }
1400 
1401         if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1402                 return (0);  /* Already initialized */
1403         }
1404 
1405         /* NOTE: tmo is in 10ms ticks */
1406         tmo = emlxs_check_bootstrap_ready(hba, 3000);
1407         if (tmo == 0) {
1408                 return (1);
1409         }
1410 
1411         /* Special words to initialize bootstrap mbox MUST be little endian */
1412         iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1413         *iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1414         *iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1415 
1416         EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1417             MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1418 
1419 emlxs_data_dump(port, "EndianIN", (uint32_t *)iptr, 6, 0);
1420         if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1421                 return (1);
1422         }
1423         EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1424             MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1425 emlxs_data_dump(port, "EndianOUT", (uint32_t *)iptr, 6, 0);
1426 
1427 #ifdef FMA_SUPPORT
1428         if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
1429             != DDI_FM_OK) {
1430                 EMLXS_MSGF(EMLXS_CONTEXT,
1431                     &emlxs_invalid_dma_handle_msg,
1432                     "emlxs_init_bootstrap_mb: hdl=%p",
1433                     hba->sli.sli4.bootstrapmb.dma_handle);
1434                 return (1);
1435         }
1436 #endif
1437         hba->flag |= FC_BOOTSTRAPMB_INIT;
1438         return (0);
1439 
1440 } /* emlxs_init_bootstrap_mb() */
1441 
1442 
1443 static uint32_t
1444 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1445 {
1446         int rc;
1447         uint16_t i;
1448         emlxs_port_t *vport;
1449         emlxs_config_t *cfg = &CFG;
1450         CHANNEL *cp;
1451 
1452         /* Restart the adapter */
1453         if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1454                 return (1);
1455         }
1456 
1457         for (i = 0; i < hba->chan_count; i++) {
1458                 cp = &hba->chan[i];
1459                 cp->iopath = (void *)&hba->sli.sli4.wq[i];
1460         }
1461 
1462         /* Initialize all the port objects */
1463         hba->vpi_base = 0;
1464         hba->vpi_max  = 0;
1465         for (i = 0; i < MAX_VPORTS; i++) {
1466                 vport = &VPORT(i);
1467                 vport->hba = hba;
1468                 vport->vpi = i;
1469 
1470                 vport->VPIobj.index = i;
1471                 vport->VPIobj.VPI = i;
1472                 vport->VPIobj.port = vport;
1473         }
1474 
1475         /* Set the max node count */
1476         if (hba->max_nodes == 0) {
1477                 if (cfg[CFG_NUM_NODES].current > 0) {
1478                         hba->max_nodes = cfg[CFG_NUM_NODES].current;
1479                 } else {
1480                         hba->max_nodes = 4096;
1481                 }
1482         }
1483 
1484         rc = emlxs_init_bootstrap_mb(hba);
1485         if (rc) {
1486                 return (rc);
1487         }
1488 
1489         hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1490         hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1491         hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1492 
1493         /* Cache the UE MASK registers value for UE error detection */
1494         hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
1495             (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
1496         hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
1497             (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
1498 
1499         return (0);
1500 
1501 } /* emlxs_sli4_hba_init() */
1502 
1503 
1504 /*ARGSUSED*/
1505 static uint32_t
1506 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1507                 uint32_t quiesce)
1508 {
1509         emlxs_port_t *port = &PPORT;
1510         emlxs_port_t *vport;
1511         CHANNEL *cp;
1512         emlxs_config_t *cfg = &CFG;
1513         MAILBOXQ mboxq;
1514         uint32_t i;
1515         uint32_t rc;
1516         uint16_t channelno;
1517 
1518         if (!cfg[CFG_RESET_ENABLE].current) {
1519                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1520                     "Adapter reset disabled.");
1521                 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1522 
1523                 return (1);
1524         }
1525 
1526         if (quiesce == 0) {
1527                 emlxs_sli4_hba_kill(hba);
1528 
1529                 /*
1530                  * Initalize Hardware that will be used to bring
1531                  * SLI4 online.
1532                  */
1533                 rc = emlxs_init_bootstrap_mb(hba);
1534                 if (rc) {
1535                         return (rc);
1536                 }
1537         }
1538 
1539         bzero((void *)&mboxq, sizeof (MAILBOXQ));
1540         emlxs_mb_resetport(hba, &mboxq);
1541 
1542         if (quiesce == 0) {
1543                 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1544                     MBX_POLL, 0) != MBX_SUCCESS) {
1545                         /* Timeout occurred */
1546                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1547                             "Timeout: RESET");
1548                         EMLXS_STATE_CHANGE(hba, FC_ERROR);
1549                         /* Log a dump event - not supported */
1550                         return (1);
1551                 }
1552         } else {
1553                 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1554                     MBX_POLL, 0) != MBX_SUCCESS) {
1555                         EMLXS_STATE_CHANGE(hba, FC_ERROR);
1556                         /* Log a dump event - not supported */
1557                         return (1);
1558                 }
1559         }
1560 emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
1561 
1562         /* Reset the hba structure */
1563         hba->flag &= FC_RESET_MASK;
1564 
1565         for (channelno = 0; channelno < hba->chan_count; channelno++) {
1566                 cp = &hba->chan[channelno];
1567                 cp->hba = hba;
1568                 cp->channelno = channelno;
1569         }
1570 
1571         hba->channel_tx_count = 0;
1572         hba->io_count = 0;
1573         hba->iodone_count = 0;
1574         hba->topology = 0;
1575         hba->linkspeed = 0;
1576         hba->heartbeat_active = 0;
1577         hba->discovery_timer = 0;
1578         hba->linkup_timer = 0;
1579         hba->loopback_tics = 0;
1580 
1581         /* Reset the port objects */
1582         for (i = 0; i < MAX_VPORTS; i++) {
1583                 vport = &VPORT(i);
1584 
1585                 vport->flag &= EMLXS_PORT_RESET_MASK;
1586                 vport->did = 0;
1587                 vport->prev_did = 0;
1588                 vport->lip_type = 0;
1589                 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1590                 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
1591 
1592                 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1593                 vport->node_base.nlp_Rpi = 0;
1594                 vport->node_base.nlp_DID = 0xffffff;
1595                 vport->node_base.nlp_list_next = NULL;
1596                 vport->node_base.nlp_list_prev = NULL;
1597                 vport->node_base.nlp_active = 1;
1598                 vport->node_count = 0;
1599 
1600                 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1601                         vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1602                 }
1603         }
1604 
1605         if (emlxs_check_hdw_ready(hba)) {
1606                 return (1);
1607         }
1608 
1609         return (0);
1610 
1611 } /* emlxs_sli4_hba_reset */
1612 
1613 
1614 #define SGL_CMD         0
1615 #define SGL_RESP        1
1616 #define SGL_DATA        2
1617 #define SGL_LAST        0x80
1618 
1619 /*ARGSUSED*/
1620 ULP_SGE64 *
1621 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1622     uint32_t sgl_type, uint32_t *pcnt)
1623 {
1624 #ifdef DEBUG_SGE
1625         emlxs_hba_t *hba = HBA;
1626 #endif
1627         ddi_dma_cookie_t *cp;
1628         uint_t i;
1629         uint_t last;
1630         int32_t size;
1631         int32_t sge_size;
1632         uint64_t sge_addr;
1633         int32_t len;
1634         uint32_t cnt;
1635         uint_t cookie_cnt;
1636         ULP_SGE64 stage_sge;
1637 
1638         last = sgl_type & SGL_LAST;
1639         sgl_type &= ~SGL_LAST;
1640 
1641 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1642         switch (sgl_type) {
1643         case SGL_CMD:
1644                 cp = pkt->pkt_cmd_cookie;
1645                 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1646                 size = (int32_t)pkt->pkt_cmdlen;
1647                 break;
1648 
1649         case SGL_RESP:
1650                 cp = pkt->pkt_resp_cookie;
1651                 cookie_cnt = pkt->pkt_resp_cookie_cnt;
1652                 size = (int32_t)pkt->pkt_rsplen;
1653                 break;
1654 
1655 
1656         case SGL_DATA:
1657                 cp = pkt->pkt_data_cookie;
1658                 cookie_cnt = pkt->pkt_data_cookie_cnt;
1659                 size = (int32_t)pkt->pkt_datalen;
1660                 break;
1661         }
1662 
1663 #else
1664         switch (sgl_type) {
1665         case SGL_CMD:
1666                 cp = &pkt->pkt_cmd_cookie;
1667                 cookie_cnt = 1;
1668                 size = (int32_t)pkt->pkt_cmdlen;
1669                 break;
1670 
1671         case SGL_RESP:
1672                 cp = &pkt->pkt_resp_cookie;
1673                 cookie_cnt = 1;
1674                 size = (int32_t)pkt->pkt_rsplen;
1675                 break;
1676 
1677 
1678         case SGL_DATA:
1679                 cp = &pkt->pkt_data_cookie;
1680                 cookie_cnt = 1;
1681                 size = (int32_t)pkt->pkt_datalen;
1682                 break;
1683         }
1684 #endif  /* >= EMLXS_MODREV3 */
1685 
1686         stage_sge.offset = 0;
1687         stage_sge.reserved = 0;
1688         stage_sge.last = 0;
1689         cnt = 0;
1690         for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1691 
1692                 sge_size = cp->dmac_size;
1693                 sge_addr = cp->dmac_laddress;
1694                 while (sge_size && size) {
1695                         if (cnt) {
1696                                 /* Copy staged SGE before we build next one */
1697                                 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1698                                     (uint8_t *)sge, sizeof (ULP_SGE64));
1699                                 sge++;
1700                         }
1701                         len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1702                         len = MIN(size, len);
1703 
1704                         stage_sge.addrHigh =
1705                             PADDR_HI(sge_addr);
1706                         stage_sge.addrLow =
1707                             PADDR_LO(sge_addr);
1708                         stage_sge.length = len;
1709                         if (sgl_type == SGL_DATA) {
1710                                 stage_sge.offset = cnt;
1711                         }
1712 #ifdef DEBUG_SGE
1713                         emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
1714                             4, 0);
1715 #endif
1716                         sge_addr += len;
1717                         sge_size -= len;
1718 
1719                         cnt += len;
1720                         size -= len;
1721                 }
1722         }
1723 
1724         if (last) {
1725                 stage_sge.last = 1;
1726         }
1727         BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1728             sizeof (ULP_SGE64));
1729 
1730         sge++;
1731 
1732         *pcnt = cnt;
1733         return (sge);
1734 
1735 } /* emlxs_pkt_to_sgl */
1736 
1737 
1738 /*ARGSUSED*/
1739 uint32_t
1740 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1741 {
1742         fc_packet_t *pkt;
1743         XRIobj_t *xrip;
1744         ULP_SGE64 *sge;
1745         emlxs_wqe_t *wqe;
1746         IOCBQ *iocbq;
1747         ddi_dma_cookie_t *cp_cmd;
1748         uint32_t cmd_cnt;
1749         uint32_t resp_cnt;
1750         uint32_t cnt;
1751 
1752         iocbq = (IOCBQ *) &sbp->iocbq;
1753         wqe = &iocbq->wqe;
1754         pkt = PRIV2PKT(sbp);
1755         xrip = sbp->xrip;
1756         sge = xrip->SGList.virt;
1757 
1758 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1759         cp_cmd = pkt->pkt_cmd_cookie;
1760 #else
1761         cp_cmd  = &pkt->pkt_cmd_cookie;
1762 #endif  /* >= EMLXS_MODREV3 */
1763 
1764         iocbq = &sbp->iocbq;
1765         if (iocbq->flag & IOCB_FCP_CMD) {
1766 
1767                 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1768                         return (1);
1769                 }
1770 
1771                 /* CMD payload */
1772                 sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1773 
1774                 /* DATA payload */
1775                 if (pkt->pkt_datalen != 0) {
1776                         /* RSP payload */
1777                         sge = emlxs_pkt_to_sgl(port, sge, pkt,
1778                             SGL_RESP, &resp_cnt);
1779 
1780                         /* Data portion */
1781                         sge = emlxs_pkt_to_sgl(port, sge, pkt,
1782                             SGL_DATA | SGL_LAST, &cnt);
1783                 } else {
1784                         /* RSP payload */
1785                         sge = emlxs_pkt_to_sgl(port, sge, pkt,
1786                             SGL_RESP | SGL_LAST, &resp_cnt);
1787                 }
1788 
1789                 wqe->un.FcpCmd.Payload.addrHigh =
1790                     PADDR_HI(cp_cmd->dmac_laddress);
1791                 wqe->un.FcpCmd.Payload.addrLow =
1792                     PADDR_LO(cp_cmd->dmac_laddress);
1793                 wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1794                 wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1795 
1796         } else {
1797 
1798                 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1799                         /* CMD payload */
1800                         sge = emlxs_pkt_to_sgl(port, sge, pkt,
1801                             SGL_CMD | SGL_LAST, &cmd_cnt);
1802                 } else {
1803                         /* CMD payload */
1804                         sge = emlxs_pkt_to_sgl(port, sge, pkt,
1805                             SGL_CMD, &cmd_cnt);
1806 
1807                         /* RSP payload */
1808                         sge = emlxs_pkt_to_sgl(port, sge, pkt,
1809                             SGL_RESP | SGL_LAST, &resp_cnt);
1810                         wqe->un.GenReq.PayloadLength = cmd_cnt;
1811                 }
1812 
1813                 wqe->un.GenReq.Payload.addrHigh =
1814                     PADDR_HI(cp_cmd->dmac_laddress);
1815                 wqe->un.GenReq.Payload.addrLow =
1816                     PADDR_LO(cp_cmd->dmac_laddress);
1817                 wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1818         }
1819         return (0);
1820 } /* emlxs_sli4_bde_setup */
1821 
1822 
1823 
1824 
1825 static void
1826 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1827 {
1828         emlxs_port_t *port = &PPORT;
1829         emlxs_buf_t *sbp;
1830         uint32_t channelno;
1831         int32_t throttle;
1832         emlxs_wqe_t *wqe;
1833         emlxs_wqe_t *wqeslot;
1834         WQ_DESC_t *wq;
1835         uint32_t flag;
1836         uint32_t wqdb;
1837         uint16_t next_wqe;
1838         off_t offset;
1839 
1840 
1841         channelno = cp->channelno;
1842         wq = (WQ_DESC_t *)cp->iopath;
1843 
1844 #ifdef SLI4_FASTPATH_DEBUG
1845         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1846             "ISSUE WQE channel: %x  %p", channelno, wq);
1847 #endif
1848 
1849         throttle = 0;
1850 
1851         /* Check if FCP ring and adapter is not ready */
1852         /* We may use any ring for FCP_CMD */
1853         if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1854                 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1855                     !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1856                         emlxs_tx_put(iocbq, 1);
1857                         return;
1858                 }
1859         }
1860 
1861         /* Attempt to acquire CMD_RING lock */
1862         if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
1863                 /* Queue it for later */
1864                 if (iocbq) {
1865                         if ((hba->io_count -
1866                             hba->channel_tx_count) > 10) {
1867                                 emlxs_tx_put(iocbq, 1);
1868                                 return;
1869                         } else {
1870 
1871                                 mutex_enter(&EMLXS_QUE_LOCK(channelno));
1872                         }
1873                 } else {
1874                         return;
1875                 }
1876         }
1877         /* EMLXS_QUE_LOCK acquired */
1878 
1879         /* Throttle check only applies to non special iocb */
1880         if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1881                 /* Check if HBA is full */
1882                 throttle = hba->io_throttle - hba->io_active;
1883                 if (throttle <= 0) {
1884                         /* Hitting adapter throttle limit */
1885                         /* Queue it for later */
1886                         if (iocbq) {
1887                                 emlxs_tx_put(iocbq, 1);
1888                         }
1889 
1890                         goto busy;
1891                 }
1892         }
1893 
1894         /* Check to see if we have room for this WQE */
1895         next_wqe = wq->host_index + 1;
1896         if (next_wqe >= wq->max_index) {
1897                 next_wqe = 0;
1898         }
1899 
1900         if (next_wqe == wq->port_index) {
1901                 /* Queue it for later */
1902                 if (iocbq) {
1903                         emlxs_tx_put(iocbq, 1);
1904                 }
1905                 goto busy;
1906         }
1907 
1908         /*
1909          * We have a command ring slot available
1910          * Make sure we have an iocb to send
1911          */
1912         if (iocbq) {
1913                 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1914 
1915                 /* Check if the ring already has iocb's waiting */
1916                 if (cp->nodeq.q_first != NULL) {
1917                         /* Put the current iocbq on the tx queue */
1918                         emlxs_tx_put(iocbq, 0);
1919 
1920                         /*
1921                          * Attempt to replace it with the next iocbq
1922                          * in the tx queue
1923                          */
1924                         iocbq = emlxs_tx_get(cp, 0);
1925                 }
1926 
1927                 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1928         } else {
1929                 iocbq = emlxs_tx_get(cp, 1);
1930         }
1931 
1932 sendit:
1933         /* Process each iocbq */
1934         while (iocbq) {
1935 
1936                 wqe = &iocbq->wqe;
1937 #ifdef SLI4_FASTPATH_DEBUG
1938                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1939                     "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1940                     wqe->RequestTag, wqe->XRITag);
1941 #endif
1942 
1943                 sbp = iocbq->sbp;
1944                 if (sbp) {
1945                         /* If exchange removed after wqe was prep'ed, drop it */
1946                         if (!(sbp->xrip)) {
1947                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1948                                     "Xmit WQE iotag: %x xri: %x aborted",
1949                                     wqe->RequestTag, wqe->XRITag);
1950 
1951                                 /* Get next iocb from the tx queue */
1952                                 iocbq = emlxs_tx_get(cp, 1);
1953                                 continue;
1954                         }
1955 
1956                         if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1957 
1958                                 /* Perform delay */
1959                                 if ((channelno == hba->channel_els) &&
1960                                     !(iocbq->flag & IOCB_FCP_CMD)) {
1961                                         drv_usecwait(100000);
1962                                 } else {
1963                                         drv_usecwait(20000);
1964                                 }
1965                         }
1966                 }
1967 
1968                 /*
1969                  * At this point, we have a command ring slot available
1970                  * and an iocb to send
1971                  */
1972                 wq->release_depth--;
1973                 if (wq->release_depth == 0) {
1974                         wq->release_depth = WQE_RELEASE_DEPTH;
1975                         wqe->WQEC = 1;
1976                 }
1977 
1978 
1979                 HBASTATS.IocbIssued[channelno]++;
1980 
1981                 /* Check for ULP pkt request */
1982                 if (sbp) {
1983                         mutex_enter(&sbp->mtx);
1984 
1985                         if (sbp->node == NULL) {
1986                                 /* Set node to base node by default */
1987                                 iocbq->node = (void *)&port->node_base;
1988                                 sbp->node = (void *)&port->node_base;
1989                         }
1990 
1991                         sbp->pkt_flags |= PACKET_IN_CHIPQ;
1992                         mutex_exit(&sbp->mtx);
1993 
1994                         atomic_add_32(&hba->io_active, 1);
1995                         sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
1996                 }
1997 
1998 
1999                 /* Free the local iocb if there is no sbp tracking it */
2000                 if (sbp) {
2001 #ifdef SFCT_SUPPORT
2002 #ifdef FCT_IO_TRACE
2003                         if (sbp->fct_cmd) {
2004                                 emlxs_fct_io_trace(port, sbp->fct_cmd,
2005                                     EMLXS_FCT_IOCB_ISSUED);
2006                                 emlxs_fct_io_trace(port, sbp->fct_cmd,
2007                                     icmd->ULPCOMMAND);
2008                         }
2009 #endif /* FCT_IO_TRACE */
2010 #endif /* SFCT_SUPPORT */
2011                         cp->hbaSendCmd_sbp++;
2012                         iocbq->channel = cp;
2013                 } else {
2014                         cp->hbaSendCmd++;
2015                 }
2016 
2017                 flag = iocbq->flag;
2018 
2019                 /* Send the iocb */
2020                 wqeslot = (emlxs_wqe_t *)wq->addr.virt;
2021                 wqeslot += wq->host_index;
2022 
2023                 wqe->CQId = wq->cqid;
2024                 BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
2025                     sizeof (emlxs_wqe_t));
2026 #ifdef DEBUG_WQE
2027                 emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
2028 #endif
2029                 offset = (off_t)((uint64_t)((unsigned long)
2030                     wq->addr.virt) -
2031                     (uint64_t)((unsigned long)
2032                     hba->sli.sli4.slim2.virt));
2033 
2034                 EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
2035                     4096, DDI_DMA_SYNC_FORDEV);
2036 
2037                 /* Ring the WQ Doorbell */
2038                 wqdb = wq->qid;
2039                 wqdb |= ((1 << 24) | (wq->host_index << 16));
2040 
2041 
2042                 WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2043                 wq->host_index = next_wqe;
2044 
2045 #ifdef SLI4_FASTPATH_DEBUG
2046                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2047                     "WQ RING: %08x", wqdb);
2048 #endif
2049 
2050                 /*
2051                  * After this, the sbp / iocb / wqe should not be
2052                  * accessed in the xmit path.
2053                  */
2054 
2055                 if (!sbp) {
2056                         emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
2057                 }
2058 
2059                 if (iocbq && (!(flag & IOCB_SPECIAL))) {
2060                         /* Check if HBA is full */
2061                         throttle = hba->io_throttle - hba->io_active;
2062                         if (throttle <= 0) {
2063                                 goto busy;
2064                         }
2065                 }
2066 
2067                 /* Check to see if we have room for another WQE */
2068                 next_wqe++;
2069                 if (next_wqe >= wq->max_index) {
2070                         next_wqe = 0;
2071                 }
2072 
2073                 if (next_wqe == wq->port_index) {
2074                         /* Queue it for later */
2075                         goto busy;
2076                 }
2077 
2078 
2079                 /* Get the next iocb from the tx queue if there is one */
2080                 iocbq = emlxs_tx_get(cp, 1);
2081         }
2082 
2083         mutex_exit(&EMLXS_QUE_LOCK(channelno));
2084 
2085         return;
2086 
2087 busy:
2088         if (throttle <= 0) {
2089                 HBASTATS.IocbThrottled++;
2090         } else {
2091                 HBASTATS.IocbRingFull[channelno]++;
2092         }
2093 
2094         mutex_exit(&EMLXS_QUE_LOCK(channelno));
2095 
2096         return;
2097 
2098 } /* emlxs_sli4_issue_iocb_cmd() */
2099 
2100 
2101 /*ARGSUSED*/
2102 static uint32_t
2103 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
2104     uint32_t tmo)
2105 {
2106         emlxs_hba_t *hba = HBA;
2107         MAILBOXQ        *mbq;
2108         MAILBOX4        *mb4;
2109         MATCHMAP        *mp;
2110         uint32_t        *iptr;
2111         uint32_t        mqdb;
2112         off_t           offset;
2113 
2114         mbq = (MAILBOXQ *)mb;
2115         mb4 = (MAILBOX4 *)mb;
2116         mp = (MATCHMAP *) mbq->nonembed;
2117         hba->mbox_mqe = (void *)mqe;
2118 
2119         if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2120             (mb4->un.varSLIConfig.be.embedded)) {
2121                 /*
2122                  * If this is an embedded mbox, everything should fit
2123                  * into the mailbox area.
2124                  */
2125                 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2126                     MAILBOX_CMD_SLI4_BSIZE);
2127 
2128                 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2129                     4096, DDI_DMA_SYNC_FORDEV);
2130 
2131                 if (mb->mbxCommand != MBX_HEARTBEAT) {
2132                         emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
2133                             18, 0);
2134                 }
2135         } else {
2136                 /* SLI_CONFIG and non-embedded */
2137 
2138                 /*
2139                  * If this is not embedded, the MQ area
2140                  * MUST contain a SGE pointer to a larger area for the
2141                  * non-embedded mailbox command.
2142                  * mp will point to the actual mailbox command which
2143                  * should be copied into the non-embedded area.
2144                  */
2145                 mb4->un.varSLIConfig.be.sge_cnt = 1;
2146                 mb4->un.varSLIConfig.be.payload_length = mp->size;
2147                 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2148                 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
2149                 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
2150                 *iptr = mp->size;
2151 
2152                 BE_SWAP32_BUFFER(mp->virt, mp->size);
2153 
2154                 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2155                     DDI_DMA_SYNC_FORDEV);
2156 
2157                 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2158                     MAILBOX_CMD_SLI4_BSIZE);
2159 
2160                 offset = (off_t)((uint64_t)((unsigned long)
2161                     hba->sli.sli4.mq.addr.virt) -
2162                     (uint64_t)((unsigned long)
2163                     hba->sli.sli4.slim2.virt));
2164 
2165                 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
2166                     4096, DDI_DMA_SYNC_FORDEV);
2167 
2168                 emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2169                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2170                     "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2171                 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2172         }
2173 
2174         /* Ring the MQ Doorbell */
2175         mqdb = hba->sli.sli4.mq.qid;
2176         mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2177 
2178         if (mb->mbxCommand != MBX_HEARTBEAT) {
2179                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2180                     "MQ RING: %08x", mqdb);
2181         }
2182 
2183         WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2184         return (MBX_SUCCESS);
2185 
2186 } /* emlxs_sli4_issue_mq() */
2187 
2188 
2189 /*ARGSUSED*/
2190 static uint32_t
2191 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2192 {
2193         emlxs_port_t    *port = &PPORT;
2194         MAILBOXQ        *mbq;
2195         MAILBOX4        *mb4;
2196         MATCHMAP        *mp = NULL;
2197         uint32_t        *iptr;
2198         int             nonembed = 0;
2199 
2200         mbq = (MAILBOXQ *)mb;
2201         mb4 = (MAILBOX4 *)mb;
2202         mp = (MATCHMAP *) mbq->nonembed;
2203         hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
2204 
2205         if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2206             (mb4->un.varSLIConfig.be.embedded)) {
2207                 /*
2208                  * If this is an embedded mbox, everything should fit
2209                  * into the bootstrap mailbox area.
2210                  */
2211                 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2212                 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2213                     MAILBOX_CMD_SLI4_BSIZE);
2214 
2215                 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2216                     MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2217                 emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
2218         } else {
2219                 /*
2220                  * If this is not embedded, the bootstrap mailbox area
2221                  * MUST contain a SGE pointer to a larger area for the
2222                  * non-embedded mailbox command.
2223                  * mp will point to the actual mailbox command which
2224                  * should be copied into the non-embedded area.
2225                  */
2226                 nonembed = 1;
2227                 mb4->un.varSLIConfig.be.sge_cnt = 1;
2228                 mb4->un.varSLIConfig.be.payload_length = mp->size;
2229                 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2230                 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
2231                 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
2232                 *iptr = mp->size;
2233 
2234                 BE_SWAP32_BUFFER(mp->virt, mp->size);
2235 
2236                 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2237                     DDI_DMA_SYNC_FORDEV);
2238 
2239                 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2240                 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2241                     MAILBOX_CMD_SLI4_BSIZE);
2242 
2243                 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2244                     EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2245                     DDI_DMA_SYNC_FORDEV);
2246 
2247                 emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
2248                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2249                     "Extension Addr %p %p", mp->phys,
2250                     (uint32_t *)((uint8_t *)mp->virt));
2251                 iptr = (uint32_t *)((uint8_t *)mp->virt);
2252                 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2253         }
2254 
2255 
2256         /* NOTE: tmo is in 10ms ticks */
2257         if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2258                 return (MBX_TIMEOUT);
2259         }
2260 
2261         if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2262             (mb4->un.varSLIConfig.be.embedded)) {
2263                 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2264                     MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2265 
2266                 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2267                 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2268                     MAILBOX_CMD_SLI4_BSIZE);
2269 
2270                 emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
2271 
2272         } else {
2273                 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2274                     EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2275                     DDI_DMA_SYNC_FORKERNEL);
2276 
2277                 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2278                     DDI_DMA_SYNC_FORKERNEL);
2279 
2280                 BE_SWAP32_BUFFER(mp->virt, mp->size);
2281 
2282                 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2283                 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2284                     MAILBOX_CMD_SLI4_BSIZE);
2285 
2286                 emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
2287                 iptr = (uint32_t *)((uint8_t *)mp->virt);
2288                 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
2289         }
2290 
2291 #ifdef FMA_SUPPORT
2292         if (nonembed && mp) {
2293                 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
2294                     != DDI_FM_OK) {
2295                         EMLXS_MSGF(EMLXS_CONTEXT,
2296                             &emlxs_invalid_dma_handle_msg,
2297                             "emlxs_sli4_issue_bootstrap: mp_hdl=%p",
2298                             mp->dma_handle);
2299                         return (MBXERR_DMA_ERROR);
2300                 }
2301         }
2302 
2303         if (emlxs_fm_check_dma_handle(hba,
2304             hba->sli.sli4.bootstrapmb.dma_handle)
2305             != DDI_FM_OK) {
2306                 EMLXS_MSGF(EMLXS_CONTEXT,
2307                     &emlxs_invalid_dma_handle_msg,
2308                     "emlxs_sli4_issue_bootstrap: hdl=%p",
2309                     hba->sli.sli4.bootstrapmb.dma_handle);
2310                 return (MBXERR_DMA_ERROR);
2311         }
2312 #endif
2313 
2314         return (MBX_SUCCESS);
2315 
2316 } /* emlxs_sli4_issue_bootstrap() */
2317 
2318 
2319 /*ARGSUSED*/
2320 static uint32_t
2321 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2322     uint32_t tmo)
2323 {
2324         emlxs_port_t    *port;
2325         MAILBOX4        *mb4;
2326         MAILBOX         *mb;
2327         mbox_rsp_hdr_t  *hdr_rsp;
2328         MATCHMAP        *mp;
2329         uint32_t        *iptr;
2330         uint32_t        rc;
2331         uint32_t        i;
2332         uint32_t        tmo_local;
2333 
2334         if (!mbq->port) {
2335                 mbq->port = &PPORT;
2336         }
2337 
2338         port = (emlxs_port_t *)mbq->port;
2339 
2340         mb4 = (MAILBOX4 *)mbq;
2341         mb = (MAILBOX *)mbq;
2342 
2343         mb->mbxStatus = MBX_SUCCESS;
2344         rc = MBX_SUCCESS;
2345 
2346         /* Check for minimum timeouts */
2347         switch (mb->mbxCommand) {
2348         /* Mailbox commands that erase/write flash */
2349         case MBX_DOWN_LOAD:
2350         case MBX_UPDATE_CFG:
2351         case MBX_LOAD_AREA:
2352         case MBX_LOAD_EXP_ROM:
2353         case MBX_WRITE_NV:
2354         case MBX_FLASH_WR_ULA:
2355         case MBX_DEL_LD_ENTRY:
2356         case MBX_LOAD_SM:
2357                 if (tmo < 300) {
2358                         tmo = 300;
2359                 }
2360                 break;
2361 
2362         default:
2363                 if (tmo < 30) {
2364                         tmo = 30;
2365                 }
2366                 break;
2367         }
2368 
2369         /* Convert tmo seconds to 10 millisecond tics */
2370         tmo_local = tmo * 100;
2371 
2372         mutex_enter(&EMLXS_PORT_LOCK);
2373 
2374         /* Adjust wait flag */
2375         if (flag != MBX_NOWAIT) {
2376                 if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2377                         flag = MBX_SLEEP;
2378                 } else {
2379                         flag = MBX_POLL;
2380                 }
2381         } else {
2382                 /* Must have interrupts enabled to perform MBX_NOWAIT */
2383                 if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2384 
2385                         mb->mbxStatus = MBX_HARDWARE_ERROR;
2386                         mutex_exit(&EMLXS_PORT_LOCK);
2387 
2388                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2389                             "Interrupts disabled. %s failed.",
2390                             emlxs_mb_cmd_xlate(mb->mbxCommand));
2391 
2392                         return (MBX_HARDWARE_ERROR);
2393                 }
2394         }
2395 
2396         /* Check for hardware error ; special case SLI_CONFIG */
2397         if ((hba->flag & FC_HARDWARE_ERROR) &&
2398             ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
2399             (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
2400             COMMON_OPCODE_RESET))) {
2401                 mb->mbxStatus = MBX_HARDWARE_ERROR;
2402 
2403                 mutex_exit(&EMLXS_PORT_LOCK);
2404 
2405                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2406                     "Hardware error reported. %s failed. status=%x mb=%p",
2407                     emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
2408 
2409                 return (MBX_HARDWARE_ERROR);
2410         }
2411 
2412         if (hba->mbox_queue_flag) {
2413                 /* If we are not polling, then queue it for later */
2414                 if (flag == MBX_NOWAIT) {
2415                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2416                             "Busy.      %s: mb=%p NoWait.",
2417                             emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2418 
2419                         emlxs_mb_put(hba, mbq);
2420 
2421                         HBASTATS.MboxBusy++;
2422 
2423                         mutex_exit(&EMLXS_PORT_LOCK);
2424 
2425                         return (MBX_BUSY);
2426                 }
2427 
2428                 while (hba->mbox_queue_flag) {
2429                         mutex_exit(&EMLXS_PORT_LOCK);
2430 
2431                         if (tmo_local-- == 0) {
2432                                 EMLXS_MSGF(EMLXS_CONTEXT,
2433                                     &emlxs_mbox_event_msg,
2434                                     "Timeout.   %s: mb=%p tmo=%d Waiting.",
2435                                     emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2436                                     tmo);
2437 
2438                                 /* Non-lethalStatus mailbox timeout */
2439                                 /* Does not indicate a hardware error */
2440                                 mb->mbxStatus = MBX_TIMEOUT;
2441                                 return (MBX_TIMEOUT);
2442                         }
2443 
2444                         DELAYMS(10);
2445                         mutex_enter(&EMLXS_PORT_LOCK);
2446                 }
2447         }
2448 
2449         /* Initialize mailbox area */
2450         emlxs_mb_init(hba, mbq, flag, tmo);
2451 
2452         if (mb->mbxCommand == MBX_DOWN_LINK) {
2453                 hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
2454         }
2455 
2456         mutex_exit(&EMLXS_PORT_LOCK);
2457         switch (flag) {
2458 
2459         case MBX_NOWAIT:
2460                 if (mb->mbxCommand != MBX_HEARTBEAT) {
2461                         if (mb->mbxCommand != MBX_DOWN_LOAD
2462                             /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2463                                 EMLXS_MSGF(EMLXS_CONTEXT,
2464                                     &emlxs_mbox_detail_msg,
2465                                     "Sending.   %s: mb=%p NoWait. embedded %d",
2466                                     emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2467                                     ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2468                                     (mb4->un.varSLIConfig.be.embedded)));
2469                         }
2470                 }
2471 
2472                 iptr = hba->sli.sli4.mq.addr.virt;
2473                 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2474                 hba->sli.sli4.mq.host_index++;
2475                 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2476                         hba->sli.sli4.mq.host_index = 0;
2477                 }
2478 
2479                 if (mbq->bp) {
2480                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2481                             "BDE virt %p phys %p size x%x",
2482                             ((MATCHMAP *)mbq->bp)->virt,
2483                             ((MATCHMAP *)mbq->bp)->phys,
2484                             ((MATCHMAP *)mbq->bp)->size);
2485                         emlxs_data_dump(port, "DATA",
2486                             (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2487                 }
2488                 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
2489                 break;
2490 
2491         case MBX_POLL:
2492                 if (mb->mbxCommand != MBX_DOWN_LOAD
2493                     /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2494                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2495                             "Sending.   %s: mb=%p Poll. embedded %d",
2496                             emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2497                             ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2498                             (mb4->un.varSLIConfig.be.embedded)));
2499                 }
2500 
2501                 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2502 
2503                 /* Clean up the mailbox area */
2504                 if (rc == MBX_TIMEOUT) {
2505                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2506                             "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
2507                             emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2508                             ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2509                             (mb4->un.varSLIConfig.be.embedded)));
2510 
2511                         hba->flag |= FC_MBOX_TIMEOUT;
2512                         EMLXS_STATE_CHANGE(hba, FC_ERROR);
2513                         emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2514 
2515                 } else {
2516                         if (mb->mbxCommand != MBX_DOWN_LOAD
2517                             /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2518                                 EMLXS_MSGF(EMLXS_CONTEXT,
2519                                     &emlxs_mbox_detail_msg,
2520                                     "Completed.   %s: mb=%p status=%x Poll. "
2521                                     "embedded %d",
2522                                     emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2523                                     ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2524                                     (mb4->un.varSLIConfig.be.embedded)));
2525                         }
2526 
2527                         /* Process the result */
2528                         if (!(mbq->flag & MBQ_PASSTHRU)) {
2529                                 if (mbq->mbox_cmpl) {
2530                                         (void) (mbq->mbox_cmpl)(hba, mbq);
2531                                 }
2532                         }
2533 
2534                         emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2535                 }
2536 
2537                 mp = (MATCHMAP *)mbq->nonembed;
2538                 if (mp) {
2539                         hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2540                         if (hdr_rsp->status) {
2541                                 EMLXS_MSGF(EMLXS_CONTEXT,
2542                                     &emlxs_mbox_detail_msg,
2543                                     "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
2544                                     emlxs_mb_cmd_xlate(mb->mbxCommand),
2545                                     hdr_rsp->status, hdr_rsp->extra_status);
2546 
2547                                 mb->mbxStatus = MBX_NONEMBED_ERROR;
2548                         }
2549                 }
2550                 rc = mb->mbxStatus;
2551 
2552                 /* Attempt to send pending mailboxes */
2553                 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2554                 if (mbq) {
2555                         /* Attempt to send pending mailboxes */
2556                         i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2557                         if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2558                                 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2559                         }
2560                 }
2561                 break;
2562 
2563         case MBX_SLEEP:
2564                 if (mb->mbxCommand != MBX_DOWN_LOAD
2565                     /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2566                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2567                             "Sending.   %s: mb=%p Sleep. embedded %d",
2568                             emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2569                             ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2570                             (mb4->un.varSLIConfig.be.embedded)));
2571                 }
2572 
2573                 iptr = hba->sli.sli4.mq.addr.virt;
2574                 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2575                 hba->sli.sli4.mq.host_index++;
2576                 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2577                         hba->sli.sli4.mq.host_index = 0;
2578                 }
2579 
2580                 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
2581 
2582                 if (rc != MBX_SUCCESS) {
2583                         break;
2584                 }
2585 
2586                 /* Wait for completion */
2587                 /* The driver clock is timing the mailbox. */
2588 
2589                 mutex_enter(&EMLXS_MBOX_LOCK);
2590                 while (!(mbq->flag & MBQ_COMPLETED)) {
2591                         cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2592                 }
2593                 mutex_exit(&EMLXS_MBOX_LOCK);
2594 
2595                 mp = (MATCHMAP *)mbq->nonembed;
2596                 if (mp) {
2597                         hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2598                         if (hdr_rsp->status) {
2599                                 EMLXS_MSGF(EMLXS_CONTEXT,
2600                                     &emlxs_mbox_detail_msg,
2601                                     "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
2602                                     emlxs_mb_cmd_xlate(mb->mbxCommand),
2603                                     hdr_rsp->status, hdr_rsp->extra_status);
2604 
2605                                 mb->mbxStatus = MBX_NONEMBED_ERROR;
2606                         }
2607                 }
2608                 rc = mb->mbxStatus;
2609 
2610                 if (rc == MBX_TIMEOUT) {
2611                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2612                             "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
2613                             emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2614                             ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2615                             (mb4->un.varSLIConfig.be.embedded)));
2616                 } else {
2617                         if (mb->mbxCommand != MBX_DOWN_LOAD
2618                             /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2619                                 EMLXS_MSGF(EMLXS_CONTEXT,
2620                                     &emlxs_mbox_detail_msg,
2621                                     "Completed.   %s: mb=%p status=%x Sleep. "
2622                                     "embedded %d",
2623                                     emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2624                                     ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2625                                     (mb4->un.varSLIConfig.be.embedded)));
2626                         }
2627                 }
2628                 break;
2629         }
2630 
2631         return (rc);
2632 
2633 } /* emlxs_sli4_issue_mbox_cmd() */
2634 
2635 
2636 
2637 /*ARGSUSED*/
2638 static uint32_t
2639 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2640     uint32_t tmo)
2641 {
2642         emlxs_port_t    *port = &PPORT;
2643         MAILBOX         *mb;
2644         mbox_rsp_hdr_t  *hdr_rsp;
2645         MATCHMAP        *mp;
2646         uint32_t        rc;
2647         uint32_t        tmo_local;
2648 
2649         mb = (MAILBOX *)mbq;
2650 
2651         mb->mbxStatus = MBX_SUCCESS;
2652         rc = MBX_SUCCESS;
2653 
2654         if (tmo < 30) {
2655                 tmo = 30;
2656         }
2657 
2658         /* Convert tmo seconds to 10 millisecond tics */
2659         tmo_local = tmo * 100;
2660 
2661         flag = MBX_POLL;
2662 
2663         /* Check for hardware error */
2664         if (hba->flag & FC_HARDWARE_ERROR) {
2665                 mb->mbxStatus = MBX_HARDWARE_ERROR;
2666                 return (MBX_HARDWARE_ERROR);
2667         }
2668 
2669         /* Initialize mailbox area */
2670         emlxs_mb_init(hba, mbq, flag, tmo);
2671 
2672         switch (flag) {
2673 
2674         case MBX_POLL:
2675 
2676                 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2677 
2678                 /* Clean up the mailbox area */
2679                 if (rc == MBX_TIMEOUT) {
2680                         hba->flag |= FC_MBOX_TIMEOUT;
2681                         EMLXS_STATE_CHANGE(hba, FC_ERROR);
2682                         emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2683 
2684                 } else {
2685                         /* Process the result */
2686                         if (!(mbq->flag & MBQ_PASSTHRU)) {
2687                                 if (mbq->mbox_cmpl) {
2688                                         (void) (mbq->mbox_cmpl)(hba, mbq);
2689                                 }
2690                         }
2691 
2692                         emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2693                 }
2694 
2695                 mp = (MATCHMAP *)mbq->nonembed;
2696                 if (mp) {
2697                         hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2698                         if (hdr_rsp->status) {
2699                                 EMLXS_MSGF(EMLXS_CONTEXT,
2700                                     &emlxs_mbox_detail_msg,
2701                                     "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
2702                                     emlxs_mb_cmd_xlate(mb->mbxCommand),
2703                                     hdr_rsp->status, hdr_rsp->extra_status);
2704 
2705                                 mb->mbxStatus = MBX_NONEMBED_ERROR;
2706                         }
2707                 }
2708                 rc = mb->mbxStatus;
2709 
2710                 break;
2711         }
2712 
2713         return (rc);
2714 
2715 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2716 
2717 
2718 
2719 #ifdef SFCT_SUPPORT
2720 /*ARGSUSED*/
2721 static uint32_t
2722 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2723 {
2724         return (IOERR_NO_RESOURCES);
2725 
2726 } /* emlxs_sli4_prep_fct_iocb() */
2727 #endif /* SFCT_SUPPORT */
2728 
2729 
2730 /*ARGSUSED*/
2731 extern uint32_t
2732 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2733 {
2734         emlxs_hba_t *hba = HBA;
2735         fc_packet_t *pkt;
2736         CHANNEL *cp;
2737         RPIobj_t *rpip;
2738         XRIobj_t *xrip;
2739         emlxs_wqe_t *wqe;
2740         IOCBQ *iocbq;
2741         NODELIST *node;
2742         uint16_t iotag;
2743         uint32_t did;
2744         off_t offset;
2745 
2746         pkt = PRIV2PKT(sbp);
2747         did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2748         cp = &hba->chan[channel];
2749 
2750         iocbq = &sbp->iocbq;
2751         iocbq->channel = (void *) cp;
2752         iocbq->port = (void *) port;
2753 
2754         wqe = &iocbq->wqe;
2755         bzero((void *)wqe, sizeof (emlxs_wqe_t));
2756 
2757         /* Find target node object */
2758         node = (NODELIST *)iocbq->node;
2759         rpip = EMLXS_NODE_TO_RPI(port, node);
2760 
2761         if (!rpip) {
2762                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2763                     "Unable to find rpi. did=0x%x", did);
2764 
2765                 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2766                     IOERR_INVALID_RPI, 0);
2767                 return (0xff);
2768         }
2769 
2770         sbp->channel = cp;
2771         /* Next allocate an Exchange for this command */
2772         xrip = emlxs_sli4_alloc_xri(hba, sbp, rpip);
2773 
2774         if (!xrip) {
2775                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2776                     "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2777 
2778                 return (FC_TRAN_BUSY);
2779         }
2780         sbp->bmp = NULL;
2781         iotag = sbp->iotag;
2782 
2783 #ifdef SLI4_FASTPATH_DEBUG
2784         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,  /* DEBUG */
2785             "Prep FCP iotag: %x xri: %x", iotag, xrip->XRI);
2786 #endif
2787 
2788         /* Indicate this is a FCP cmd */
2789         iocbq->flag |= IOCB_FCP_CMD;
2790 
2791         if (emlxs_sli4_bde_setup(port, sbp)) {
2792                 emlxs_sli4_free_xri(hba, sbp, xrip, 1);
2793                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2794                     "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2795 
2796                 return (FC_TRAN_BUSY);
2797         }
2798 
2799 
2800         /* DEBUG */
2801 #ifdef DEBUG_FCP
2802         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2803             "SGLaddr virt %p phys %p size %d", xrip->SGList.virt,
2804             xrip->SGList.phys, pkt->pkt_datalen);
2805         emlxs_data_dump(port, "SGL", (uint32_t *)xrip->SGList.virt, 20, 0);
2806         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2807             "CMD virt %p len %d:%d:%d",
2808             pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2809         emlxs_data_dump(port, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2810 #endif
2811 
2812         offset = (off_t)((uint64_t)((unsigned long)
2813             xrip->SGList.virt) -
2814             (uint64_t)((unsigned long)
2815             hba->sli.sli4.slim2.virt));
2816 
2817         EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
2818             xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
2819 
2820         /* if device is FCP-2 device, set the following bit */
2821         /* that says to run the FC-TAPE protocol. */
2822         if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2823                 wqe->ERP = 1;
2824         }
2825 
2826         if (pkt->pkt_datalen == 0) {
2827                 wqe->Command = CMD_FCP_ICMND64_CR;
2828                 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2829         } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2830                 wqe->Command = CMD_FCP_IREAD64_CR;
2831                 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2832                 wqe->PU = PARM_READ_CHECK;
2833         } else {
2834                 wqe->Command = CMD_FCP_IWRITE64_CR;
2835                 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2836         }
2837         wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2838 
2839         wqe->ContextTag = rpip->RPI;
2840         wqe->ContextType = WQE_RPI_CONTEXT;
2841         wqe->XRITag = xrip->XRI;
2842         wqe->Timer =
2843             ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2844 
2845         if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2846                 wqe->CCPE = 1;
2847                 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2848         }
2849 
2850         switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2851         case FC_TRAN_CLASS2:
2852                 wqe->Class = CLASS2;
2853                 break;
2854         case FC_TRAN_CLASS3:
2855         default:
2856                 wqe->Class = CLASS3;
2857                 break;
2858         }
2859         sbp->class = wqe->Class;
2860         wqe->RequestTag = iotag;
2861         wqe->CQId = 0x3ff;  /* default CQ for response */
2862         return (FC_SUCCESS);
2863 } /* emlxs_sli4_prep_fcp_iocb() */
2864 
2865 
2866 /*ARGSUSED*/
2867 static uint32_t
2868 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2869 {
2870         return (FC_TRAN_BUSY);
2871 
2872 } /* emlxs_sli4_prep_ip_iocb() */
2873 
2874 
2875 /*ARGSUSED*/
2876 static uint32_t
2877 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2878 {
2879         emlxs_hba_t *hba = HBA;
2880         fc_packet_t *pkt;
2881         IOCBQ *iocbq;
2882         IOCB *iocb;
2883         emlxs_wqe_t *wqe;
2884         FCFIobj_t *fcfp;
2885         RPIobj_t *rpip = NULL;
2886         XRIobj_t *xrip;
2887         CHANNEL *cp;
2888         uint32_t did;
2889         uint32_t cmd;
2890         ULP_SGE64 stage_sge;
2891         ULP_SGE64 *sge;
2892         ddi_dma_cookie_t *cp_cmd;
2893         ddi_dma_cookie_t *cp_resp;
2894         emlxs_node_t *node;
2895         off_t offset;
2896 
2897         pkt = PRIV2PKT(sbp);
2898         did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2899 
2900         iocbq = &sbp->iocbq;
2901         wqe = &iocbq->wqe;
2902         iocb = &iocbq->iocb;
2903         bzero((void *)wqe, sizeof (emlxs_wqe_t));
2904         bzero((void *)iocb, sizeof (IOCB));
2905         cp = &hba->chan[hba->channel_els];
2906 
2907         /* Initalize iocbq */
2908         iocbq->port = (void *) port;
2909         iocbq->channel = (void *) cp;
2910 
2911         sbp->channel = cp;
2912         sbp->bmp = NULL;
2913 
2914 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2915         cp_cmd = pkt->pkt_cmd_cookie;
2916         cp_resp = pkt->pkt_resp_cookie;
2917 #else
2918         cp_cmd  = &pkt->pkt_cmd_cookie;
2919         cp_resp = &pkt->pkt_resp_cookie;
2920 #endif  /* >= EMLXS_MODREV3 */
2921 
2922         /* CMD payload */
2923         sge = &stage_sge;
2924         sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2925         sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2926         sge->length = pkt->pkt_cmdlen;
2927         sge->offset = 0;
2928         sge->reserved = 0;
2929 
2930         /* Initalize iocb */
2931         if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2932                 /* ELS Response */
2933 
2934                 xrip = emlxs_sli4_register_xri(hba, sbp,
2935                     pkt->pkt_cmd_fhdr.rx_id);
2936 
2937                 if (!xrip) {
2938                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2939                             "Unable to find XRI. rxid=%x",
2940                             pkt->pkt_cmd_fhdr.rx_id);
2941 
2942                         emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2943                             IOERR_NO_XRI, 0);
2944                         return (0xff);
2945                 }
2946 
2947                 rpip = xrip->rpip;
2948 
2949                 if (!rpip) {
2950                         /* This means that we had a node registered */
2951                         /* when the unsol request came in but the node */
2952                         /* has since been unregistered. */
2953                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2954                             "Unable to find RPI. rxid=%x",
2955                             pkt->pkt_cmd_fhdr.rx_id);
2956 
2957                         emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2958                             IOERR_INVALID_RPI, 0);
2959                         return (0xff);
2960                 }
2961 
2962                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2963                     "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2964                     xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
2965 
2966                 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2967                 wqe->CmdType = WQE_TYPE_GEN;
2968 
2969                 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2970                 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2971                 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2972 
2973                 wqe->un.ElsRsp.RemoteId = did;
2974                 wqe->PU = 0x3;
2975 
2976                 sge->last = 1;
2977                 /* Now sge is fully staged */
2978 
2979                 sge = xrip->SGList.virt;
2980                 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2981                     sizeof (ULP_SGE64));
2982 
2983                 wqe->ContextTag = port->VPIobj.VPI;
2984                 wqe->ContextType = WQE_VPI_CONTEXT;
2985                 wqe->OXId = xrip->rx_id;
2986 
2987         } else {
2988                 /* ELS Request */
2989 
2990                 node = (emlxs_node_t *)iocbq->node;
2991                 rpip = EMLXS_NODE_TO_RPI(port, node);
2992                 fcfp = port->VPIobj.vfip->fcfp;
2993 
2994                 if (!rpip) {
2995                         rpip = port->VPIobj.rpip;
2996                 }
2997 
2998                 /* Next allocate an Exchange for this command */
2999                 xrip = emlxs_sli4_alloc_xri(hba, sbp, rpip);
3000 
3001                 if (!xrip) {
3002                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3003                             "Adapter Busy. Unable to allocate exchange. "
3004                             "did=0x%x", did);
3005 
3006                         return (FC_TRAN_BUSY);
3007                 }
3008 
3009                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3010                     "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xrip->XRI,
3011                     xrip->iotag, rpip->RPI);
3012 
3013                 wqe->Command = CMD_ELS_REQUEST64_CR;
3014                 wqe->CmdType = WQE_TYPE_ELS;
3015 
3016                 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
3017                 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
3018                 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
3019 
3020                 /* setup for rsp */
3021                 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3022                 iocb->ULPPU = 1;     /* Wd4 is relative offset */
3023 
3024                 sge->last = 0;
3025 
3026                 sge = xrip->SGList.virt;
3027                 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
3028                     sizeof (ULP_SGE64));
3029 
3030                 wqe->un.ElsCmd.PayloadLength =
3031                     pkt->pkt_cmdlen; /* Byte offset of rsp data */
3032 
3033                 /* RSP payload */
3034                 sge = &stage_sge;
3035                 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
3036                 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
3037                 sge->length = pkt->pkt_rsplen;
3038                 sge->offset = 0;
3039                 sge->last = 1;
3040                 /* Now sge is fully staged */
3041 
3042                 sge = xrip->SGList.virt;
3043                 sge++;
3044                 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
3045                     sizeof (ULP_SGE64));
3046 #ifdef DEBUG_ELS
3047                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3048                     "SGLaddr virt %p phys %p",
3049                     xrip->SGList.virt, xrip->SGList.phys);
3050                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3051                     "PAYLOAD virt %p phys %p",
3052                     pkt->pkt_cmd, cp_cmd->dmac_laddress);
3053                 emlxs_data_dump(port, "SGL", (uint32_t *)xrip->SGList.virt,
3054                     12, 0);
3055 #endif
3056 
3057                 cmd = *((uint32_t *)pkt->pkt_cmd);
3058                 cmd &= ELS_CMD_MASK;
3059 
3060                 switch (cmd) {
3061                 case ELS_CMD_FLOGI:
3062                         wqe->un.ElsCmd.SP = 1;
3063                         wqe->ContextTag = fcfp->FCFI;
3064                         wqe->ContextType = WQE_FCFI_CONTEXT;
3065                         if (hba->flag & FC_FIP_SUPPORTED) {
3066                                 wqe->CmdType |= WQE_TYPE_MASK_FIP;
3067                                 wqe->ELSId |= WQE_ELSID_FLOGI;
3068                         }
3069                         break;
3070                 case ELS_CMD_FDISC:
3071                         wqe->un.ElsCmd.SP = 1;
3072                         wqe->ContextTag = port->VPIobj.VPI;
3073                         wqe->ContextType = WQE_VPI_CONTEXT;
3074                         if (hba->flag & FC_FIP_SUPPORTED) {
3075                                 wqe->CmdType |= WQE_TYPE_MASK_FIP;
3076                                 wqe->ELSId |= WQE_ELSID_FDISC;
3077                         }
3078                         break;
3079                 case ELS_CMD_LOGO:
3080                         if (did == FABRIC_DID) {
3081                                 wqe->ContextTag = fcfp->FCFI;
3082                                 wqe->ContextType = WQE_FCFI_CONTEXT;
3083                                 if (hba->flag & FC_FIP_SUPPORTED) {
3084                                         wqe->CmdType |= WQE_TYPE_MASK_FIP;
3085                                         wqe->ELSId |= WQE_ELSID_LOGO;
3086                                 }
3087                         } else {
3088                                 wqe->ContextTag = port->VPIobj.VPI;
3089                                 wqe->ContextType = WQE_VPI_CONTEXT;
3090                         }
3091                         break;
3092 
3093                 case ELS_CMD_SCR:
3094                 case ELS_CMD_PLOGI:
3095                 case ELS_CMD_PRLI:
3096                 default:
3097                         wqe->ContextTag = port->VPIobj.VPI;
3098                         wqe->ContextType = WQE_VPI_CONTEXT;
3099                         break;
3100                 }
3101                 wqe->un.ElsCmd.RemoteId = did;
3102                 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3103         }
3104 
3105         offset = (off_t)((uint64_t)((unsigned long)
3106             xrip->SGList.virt) -
3107             (uint64_t)((unsigned long)
3108             hba->sli.sli4.slim2.virt));
3109 
3110         EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
3111             xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
3112 
3113         if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3114                 wqe->CCPE = 1;
3115                 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3116         }
3117 
3118         switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3119         case FC_TRAN_CLASS2:
3120                 wqe->Class = CLASS2;
3121                 break;
3122         case FC_TRAN_CLASS3:
3123         default:
3124                 wqe->Class = CLASS3;
3125                 break;
3126         }
3127         sbp->class = wqe->Class;
3128         wqe->XRITag = xrip->XRI;
3129         wqe->RequestTag = xrip->iotag;
3130         wqe->CQId = 0x3ff;
3131         return (FC_SUCCESS);
3132 
3133 } /* emlxs_sli4_prep_els_iocb() */
3134 
3135 
3136 /*ARGSUSED*/
3137 static uint32_t
3138 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3139 {
3140         emlxs_hba_t *hba = HBA;
3141         fc_packet_t *pkt;
3142         IOCBQ *iocbq;
3143         IOCB *iocb;
3144         emlxs_wqe_t *wqe;
3145         NODELIST *node = NULL;
3146         CHANNEL *cp;
3147         RPIobj_t *rpip;
3148         XRIobj_t *xrip;
3149         uint32_t did;
3150         off_t offset;
3151 
3152         pkt = PRIV2PKT(sbp);
3153         did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3154 
3155         iocbq = &sbp->iocbq;
3156         wqe = &iocbq->wqe;
3157         iocb = &iocbq->iocb;
3158         bzero((void *)wqe, sizeof (emlxs_wqe_t));
3159         bzero((void *)iocb, sizeof (IOCB));
3160 
3161         cp = &hba->chan[hba->channel_ct];
3162 
3163         iocbq->port = (void *) port;
3164         iocbq->channel = (void *) cp;
3165 
3166         sbp->bmp = NULL;
3167         sbp->channel = cp;
3168 
3169         /* Initalize wqe */
3170         if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3171                 /* CT Response */
3172 
3173                 xrip = emlxs_sli4_register_xri(hba, sbp,
3174                     pkt->pkt_cmd_fhdr.rx_id);
3175 
3176                 if (!xrip) {
3177                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3178                             "Unable to find XRI. rxid=%x",
3179                             pkt->pkt_cmd_fhdr.rx_id);
3180 
3181                         emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3182                             IOERR_NO_XRI, 0);
3183                         return (0xff);
3184                 }
3185 
3186                 rpip = xrip->rpip;
3187 
3188                 if (!rpip) {
3189                         /* This means that we had a node registered */
3190                         /* when the unsol request came in but the node */
3191                         /* has since been unregistered. */
3192                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3193                             "Unable to find RPI. rxid=%x",
3194                             pkt->pkt_cmd_fhdr.rx_id);
3195 
3196                         emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3197                             IOERR_INVALID_RPI, 0);
3198                         return (0xff);
3199                 }
3200 
3201                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3202                     "Prep CT XRI: xri=%x iotag=%x oxid=%x", xrip->XRI,
3203                     xrip->iotag, xrip->rx_id);
3204 
3205                 if (emlxs_sli4_bde_setup(port, sbp)) {
3206                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3207                             "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3208 
3209                         return (FC_TRAN_BUSY);
3210                 }
3211 
3212                 wqe->CmdType = WQE_TYPE_GEN;
3213                 wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3214                 wqe->un.XmitSeq.la = 1;
3215 
3216                 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3217                         wqe->un.XmitSeq.ls = 1;
3218                 }
3219 
3220                 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3221                         wqe->un.XmitSeq.si = 1;
3222                 }
3223 
3224                 wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3225                 wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3226                 wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
3227                 wqe->OXId = xrip->rx_id;
3228                 wqe->XC = 0; /* xri_tag is a new exchange */
3229                 wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3230 
3231         } else {
3232                 /* CT Request */
3233 
3234                 node = (emlxs_node_t *)iocbq->node;
3235                 rpip = EMLXS_NODE_TO_RPI(port, node);
3236 
3237                 if (!rpip) {
3238                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3239                             "Unable to find rpi. did=0x%x rpi=%x",
3240                             did, node->nlp_Rpi);
3241 
3242                         emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3243                             IOERR_INVALID_RPI, 0);
3244                         return (0xff);
3245                 }
3246 
3247                 /* Next allocate an Exchange for this command */
3248                 xrip = emlxs_sli4_alloc_xri(hba, sbp, rpip);
3249 
3250                 if (!xrip) {
3251                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3252                             "Adapter Busy. Unable to allocate exchange. "
3253                             "did=0x%x", did);
3254 
3255                         return (FC_TRAN_BUSY);
3256                 }
3257 
3258                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3259                     "Prep CT XRI: %x iotag %x", xrip->XRI, xrip->iotag);
3260 
3261                 if (emlxs_sli4_bde_setup(port, sbp)) {
3262                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3263                             "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3264 
3265                         emlxs_sli4_free_xri(hba, sbp, xrip, 1);
3266                         return (FC_TRAN_BUSY);
3267                 }
3268 
3269                 wqe->CmdType = WQE_TYPE_GEN;
3270                 wqe->Command = CMD_GEN_REQUEST64_CR;
3271                 wqe->un.GenReq.la = 1;
3272                 wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3273                 wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3274                 wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
3275                 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3276 
3277 #ifdef DEBUG_CT
3278                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3279                     "SGLaddr virt %p phys %p", xrip->SGList.virt,
3280                     xrip->SGList.phys);
3281                 emlxs_data_dump(port, "SGL", (uint32_t *)xrip->SGList.virt,
3282                     12, 0);
3283                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3284                     "CMD virt %p len %d:%d",
3285                     pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3286                 emlxs_data_dump(port, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3287 #endif /* DEBUG_CT */
3288         }
3289 
3290         /* Setup for rsp */
3291         iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3292         iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3293         iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3294         iocb->ULPPU = 1;     /* Wd4 is relative offset */
3295 
3296         offset = (off_t)((uint64_t)((unsigned long)
3297             xrip->SGList.virt) -
3298             (uint64_t)((unsigned long)
3299             hba->sli.sli4.slim2.virt));
3300 
3301         EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
3302             xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
3303 
3304         wqe->ContextTag = rpip->RPI;
3305         wqe->ContextType = WQE_RPI_CONTEXT;
3306         wqe->XRITag = xrip->XRI;
3307 
3308         if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3309                 wqe->CCPE = 1;
3310                 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3311         }
3312 
3313         switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3314         case FC_TRAN_CLASS2:
3315                 wqe->Class = CLASS2;
3316                 break;
3317         case FC_TRAN_CLASS3:
3318         default:
3319                 wqe->Class = CLASS3;
3320                 break;
3321         }
3322         sbp->class = wqe->Class;
3323         wqe->RequestTag = xrip->iotag;
3324         wqe->CQId = 0x3ff;
3325         return (FC_SUCCESS);
3326 
3327 } /* emlxs_sli4_prep_ct_iocb() */
3328 
3329 
3330 /*ARGSUSED*/
3331 static int
3332 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3333 {
3334         uint32_t *ptr;
3335         EQE_u eqe;
3336         int rc = 0;
3337         off_t offset;
3338 
3339         /* EMLXS_PORT_LOCK must be held when entering this routine */
3340         ptr = eq->addr.virt;
3341         ptr += eq->host_index;
3342 
3343         offset = (off_t)((uint64_t)((unsigned long)
3344             eq->addr.virt) -
3345             (uint64_t)((unsigned long)
3346             hba->sli.sli4.slim2.virt));
3347 
3348         EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
3349             4096, DDI_DMA_SYNC_FORKERNEL);
3350 
3351         mutex_enter(&EMLXS_PORT_LOCK);
3352 
3353         eqe.word = *ptr;
3354         eqe.word = BE_SWAP32(eqe.word);
3355 
3356         if (eqe.word & EQE_VALID) {
3357                 rc = 1;
3358         }
3359 
3360         mutex_exit(&EMLXS_PORT_LOCK);
3361 
3362         return (rc);
3363 
3364 } /* emlxs_sli4_read_eq */
3365 
3366 
3367 /*ARGSUSED*/
3368 static void
3369 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3370 {
3371         int rc = 0;
3372         int i;
3373         char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3374         char arg2;
3375 
3376         /*
3377          * Poll the eqe to see if the valid bit is set or not
3378          */
3379 
3380         for (;;) {
3381                 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3382                         /* only poll eqe0 */
3383                         rc = emlxs_sli4_read_eq(hba,
3384                             &hba->sli.sli4.eq[0]);
3385                         if (rc == 1) {
3386                                 (void) bcopy((char *)&arg[0],
3387                                     (char *)&arg2, sizeof (char));
3388                                 break;
3389                         }
3390                 } else {
3391                         /* poll every msi vector */
3392                         for (i = 0; i < hba->intr_count; i++) {
3393                                 rc = emlxs_sli4_read_eq(hba,
3394                                     &hba->sli.sli4.eq[i]);
3395 
3396                                 if (rc == 1) {
3397                                         break;
3398                                 }
3399                         }
3400                         if ((i != hba->intr_count) && (rc == 1)) {
3401                                 (void) bcopy((char *)&arg[i],
3402                                     (char *)&arg2, sizeof (char));
3403                                 break;
3404                         }
3405                 }
3406         }
3407 
3408         /* process it here */
3409         rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3410 
3411         return;
3412 
3413 } /* emlxs_sli4_poll_intr() */
3414 
3415 
3416 /*ARGSUSED*/
3417 static void
3418 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3419 {
3420         emlxs_port_t *port = &PPORT;
3421 
3422         /* Save the event tag */
3423         hba->link_event_tag = cqe->un.link.event_tag;
3424 
3425         switch (cqe->event_code) {
3426         case ASYNC_EVENT_CODE_LINK_STATE:
3427                 switch (cqe->un.link.link_status) {
3428                 case ASYNC_EVENT_PHYS_LINK_UP:
3429                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3430                             "Link Async Event: PHYS_LINK_UP. val=%d type=%x",
3431                             cqe->valid, cqe->event_type);
3432                         break;
3433 
3434                 case ASYNC_EVENT_PHYS_LINK_DOWN:
3435                 case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3436                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3437                             "Link Async Event: LINK_DOWN. val=%d type=%x",
3438                             cqe->valid, cqe->event_type);
3439 
3440                         (void) emlxs_fcf_linkdown_notify(port);
3441 
3442                         mutex_enter(&EMLXS_PORT_LOCK);
3443                         hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
3444                         mutex_exit(&EMLXS_PORT_LOCK);
3445                         break;
3446 
3447                 case ASYNC_EVENT_LOGICAL_LINK_UP:
3448                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3449                             "Link Async Event: LOGICAL_LINK_UP. val=%d type=%x",
3450                             cqe->valid, cqe->event_type);
3451 
3452                         if (cqe->un.link.port_speed == PHY_1GHZ_LINK) {
3453                                 hba->linkspeed = LA_1GHZ_LINK;
3454                         } else {
3455                                 hba->linkspeed = LA_10GHZ_LINK;
3456                         }
3457                         hba->topology = TOPOLOGY_PT_PT;
3458                         hba->qos_linkspeed = cqe->un.link.qos_link_speed;
3459 
3460                         (void) emlxs_fcf_linkup_notify(port);
3461                         break;
3462                 }
3463                 break;
3464         case ASYNC_EVENT_CODE_FCOE_FIP:
3465                 switch (cqe->un.fcoe.evt_type) {
3466                 case ASYNC_EVENT_NEW_FCF_DISC:
3467                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3468                             "FCOE Async Event: FCF_FOUND %d:%d",
3469                             cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
3470 
3471                         (void) emlxs_fcf_found_notify(port,
3472                             cqe->un.fcoe.ref_index);
3473                         break;
3474                 case ASYNC_EVENT_FCF_TABLE_FULL:
3475                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3476                             "FCOE Async Event: FCFTAB_FULL %d:%d",
3477                             cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
3478 
3479                         (void) emlxs_fcf_full_notify(port);
3480                         break;
3481                 case ASYNC_EVENT_FCF_DEAD:
3482                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3483                             "FCOE Async Event: FCF_LOST %d:%d",
3484                             cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
3485 
3486                         (void) emlxs_fcf_lost_notify(port,
3487                             cqe->un.fcoe.ref_index);
3488                         break;
3489                 case ASYNC_EVENT_VIRT_LINK_CLEAR:
3490                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3491                             "FCOE Async Event: CVL %d",
3492                             cqe->un.fcoe.ref_index);
3493 
3494                         (void) emlxs_fcf_cvl_notify(port,
3495                             (cqe->un.fcoe.ref_index - hba->vpi_base));
3496                         break;
3497 
3498                 case ASYNC_EVENT_FCF_MODIFIED:
3499                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3500                             "FCOE Async Event: FCF_CHANGED %d",
3501                             cqe->un.fcoe.ref_index);
3502 
3503                         (void) emlxs_fcf_changed_notify(port,
3504                             cqe->un.fcoe.ref_index);
3505                         break;
3506                 }
3507                 break;
3508         case ASYNC_EVENT_CODE_DCBX:
3509                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3510                     "DCBX Async Event Code %d: Not supported",
3511                     cqe->event_code);
3512                 break;
3513         case ASYNC_EVENT_CODE_GRP_5:
3514                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3515                     "Group 5 Async Event type %d", cqe->event_type);
3516                 if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
3517                         hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
3518                 }
3519                 break;
3520         default:
3521                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3522                     "Unknown Async Event Code %d", cqe->event_code);
3523                 break;
3524         }
3525 
3526 } /* emlxs_sli4_process_async_event() */
3527 
3528 
3529 /*ARGSUSED*/
3530 static void
3531 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3532 {
3533         emlxs_port_t *port = &PPORT;
3534         MAILBOX4 *mb;
3535         MATCHMAP *mbox_bp;
3536         MATCHMAP *mbox_nonembed;
3537         MAILBOXQ *mbq = NULL;
3538         uint32_t size;
3539         uint32_t *iptr;
3540         int rc;
3541         off_t offset;
3542 
3543         if (cqe->consumed && !cqe->completed) {
3544                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3545                     "CQ ENTRY: Mbox event. Entry consumed but not completed");
3546                 return;
3547         }
3548 
3549         mutex_enter(&EMLXS_PORT_LOCK);
3550         switch (hba->mbox_queue_flag) {
3551         case 0:
3552                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3553                     "CQ ENTRY: Mbox event. No mailbox active.");
3554 
3555                 mutex_exit(&EMLXS_PORT_LOCK);
3556                 return;
3557 
3558         case MBX_POLL:
3559 
3560                 /* Mark mailbox complete, this should wake up any polling */
3561                 /* threads. This can happen if interrupts are enabled while */
3562                 /* a polled mailbox command is outstanding. If we don't set */
3563                 /* MBQ_COMPLETED here, the polling thread may wait until */
3564                 /* timeout error occurs */
3565 
3566                 mutex_enter(&EMLXS_MBOX_LOCK);
3567                 mbq = (MAILBOXQ *)hba->mbox_mbq;
3568                 if (mbq) {
3569                         port = (emlxs_port_t *)mbq->port;
3570                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3571                             "CQ ENTRY: Mbox event. Completing Polled command.");
3572                         mbq->flag |= MBQ_COMPLETED;
3573                 }
3574                 mutex_exit(&EMLXS_MBOX_LOCK);
3575 
3576                 mutex_exit(&EMLXS_PORT_LOCK);
3577                 return;
3578 
3579         case MBX_SLEEP:
3580         case MBX_NOWAIT:
3581                 /* Check mbox_timer, it acts as a service flag too */
3582                 /* The first to service the mbox queue will clear the timer */
3583                 if (hba->mbox_timer) {
3584                         hba->mbox_timer = 0;
3585 
3586                         mutex_enter(&EMLXS_MBOX_LOCK);
3587                         mbq = (MAILBOXQ *)hba->mbox_mbq;
3588                         mutex_exit(&EMLXS_MBOX_LOCK);
3589                 }
3590 
3591                 if (!mbq) {
3592                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3593                             "Mailbox event. No service required.");
3594                         mutex_exit(&EMLXS_PORT_LOCK);
3595                         return;
3596                 }
3597 
3598                 mb = (MAILBOX4 *)mbq;
3599                 mutex_exit(&EMLXS_PORT_LOCK);
3600                 break;
3601 
3602         default:
3603                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3604                     "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
3605                     hba->mbox_queue_flag);
3606 
3607                 mutex_exit(&EMLXS_PORT_LOCK);
3608                 return;
3609         }
3610 
3611         /* Set port context */
3612         port = (emlxs_port_t *)mbq->port;
3613 
3614         offset = (off_t)((uint64_t)((unsigned long)
3615             hba->sli.sli4.mq.addr.virt) -
3616             (uint64_t)((unsigned long)
3617             hba->sli.sli4.slim2.virt));
3618 
3619         /* Now that we are the owner, DMA Sync entire MQ if needed */
3620         EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3621             4096, DDI_DMA_SYNC_FORDEV);
3622 
3623         BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3624             MAILBOX_CMD_SLI4_BSIZE);
3625 
3626         if (mb->mbxCommand != MBX_HEARTBEAT) {
3627                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3628                     "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
3629                     mb->mbxStatus, mb->mbxCommand);
3630 
3631                 emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
3632                     12, 0);
3633         }
3634 
3635         if (mb->mbxCommand == MBX_SLI_CONFIG) {
3636                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3637                     "Mbox sge_cnt: %d length: %d embed: %d",
3638                     mb->un.varSLIConfig.be.sge_cnt,
3639                     mb->un.varSLIConfig.be.payload_length,
3640                     mb->un.varSLIConfig.be.embedded);
3641         }
3642 
3643         /* Now sync the memory buffer if one was used */
3644         if (mbq->bp) {
3645                 mbox_bp = (MATCHMAP *)mbq->bp;
3646                 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3647                     DDI_DMA_SYNC_FORKERNEL);
3648 #ifdef FMA_SUPPORT
3649                 if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
3650                     != DDI_FM_OK) {
3651                         EMLXS_MSGF(EMLXS_CONTEXT,
3652                             &emlxs_invalid_dma_handle_msg,
3653                             "emlxs_sli4_process_mbox_event: hdl=%p",
3654                             mbox_bp->dma_handle);
3655 
3656                         mb->mbxStatus = MBXERR_DMA_ERROR;
3657 }
3658 #endif
3659         }
3660 
3661         /* Now sync the memory buffer if one was used */
3662         if (mbq->nonembed) {
3663                 mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3664                 size = mbox_nonembed->size;
3665                 EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3666                     DDI_DMA_SYNC_FORKERNEL);
3667                 iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3668                 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3669 
3670 #ifdef FMA_SUPPORT
3671                 if (emlxs_fm_check_dma_handle(hba,
3672                     mbox_nonembed->dma_handle) != DDI_FM_OK) {
3673                         EMLXS_MSGF(EMLXS_CONTEXT,
3674                             &emlxs_invalid_dma_handle_msg,
3675                             "emlxs_sli4_process_mbox_event: hdl=%p",
3676                             mbox_nonembed->dma_handle);
3677 
3678                         mb->mbxStatus = MBXERR_DMA_ERROR;
3679                 }
3680 #endif
3681 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3682         }
3683 
3684         /* Mailbox has been completely received at this point */
3685 
3686         if (mb->mbxCommand == MBX_HEARTBEAT) {
3687                 hba->heartbeat_active = 0;
3688                 goto done;
3689         }
3690 
3691         if (hba->mbox_queue_flag == MBX_SLEEP) {
3692                 if (mb->mbxCommand != MBX_DOWN_LOAD
3693                     /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3694                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3695                             "Received.  %s: status=%x Sleep.",
3696                             emlxs_mb_cmd_xlate(mb->mbxCommand),
3697                             mb->mbxStatus);
3698                 }
3699         } else {
3700                 if (mb->mbxCommand != MBX_DOWN_LOAD
3701                     /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3702                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3703                             "Completed. %s: status=%x",
3704                             emlxs_mb_cmd_xlate(mb->mbxCommand),
3705                             mb->mbxStatus);
3706                 }
3707         }
3708 
3709         /* Filter out passthru mailbox */
3710         if (mbq->flag & MBQ_PASSTHRU) {
3711                 goto done;
3712         }
3713 
3714         if (mb->mbxStatus) {
3715                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3716                     "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3717                     (uint32_t)mb->mbxStatus);
3718         }
3719 
3720         if (mbq->mbox_cmpl) {
3721                 rc = (mbq->mbox_cmpl)(hba, mbq);
3722 
3723                 /* If mbox was retried, return immediately */
3724                 if (rc) {
3725                         return;
3726                 }
3727         }
3728 
3729 done:
3730 
3731         /* Clean up the mailbox area */
3732         emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3733 
3734         /* Attempt to send pending mailboxes */
3735         mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3736         if (mbq) {
3737                 /* Attempt to send pending mailboxes */
3738                 rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3739                 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3740                         emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3741                 }
3742         }
3743         return;
3744 
3745 } /* emlxs_sli4_process_mbox_event() */
3746 
3747 
3748 /*ARGSUSED*/
3749 static void
3750 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3751 {
3752 #ifdef SLI4_FASTPATH_DEBUG
3753         emlxs_port_t *port = &PPORT;
3754 #endif
3755         IOCBQ *iocbq;
3756         IOCB *iocb;
3757         uint32_t *iptr;
3758         fc_packet_t *pkt;
3759         emlxs_wqe_t *wqe;
3760 
3761         iocbq = &sbp->iocbq;
3762         wqe = &iocbq->wqe;
3763         iocb = &iocbq->iocb;
3764 
3765 #ifdef SLI4_FASTPATH_DEBUG
3766         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3767             "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3768             wqe->RequestTag, wqe->XRITag);
3769 #endif
3770 
3771         iocb->ULPSTATUS = cqe->Status;
3772         iocb->un.ulpWord[4] = cqe->Parameter;
3773         iocb->ULPIOTAG = cqe->RequestTag;
3774         iocb->ULPCONTEXT = wqe->XRITag;
3775 
3776         switch (wqe->Command) {
3777 
3778         case CMD_FCP_ICMND64_CR:
3779                 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3780                 break;
3781 
3782         case CMD_FCP_IREAD64_CR:
3783                 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3784                 iocb->ULPPU = PARM_READ_CHECK;
3785                 if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
3786                         iocb->un.fcpi64.fcpi_parm =
3787                             wqe->un.FcpCmd.TotalTransferCount -
3788                             cqe->CmdSpecific;
3789                 }
3790                 break;
3791 
3792         case CMD_FCP_IWRITE64_CR:
3793                 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3794                 break;
3795 
3796         case CMD_ELS_REQUEST64_CR:
3797                 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3798                 iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3799                 if (iocb->ULPSTATUS == 0) {
3800                         iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3801                 }
3802                 if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
3803                         /* For LS_RJT, the driver populates the rsp buffer */
3804                         pkt = PRIV2PKT(sbp);
3805                         iptr = (uint32_t *)pkt->pkt_resp;
3806                         *iptr++ = ELS_CMD_LS_RJT;
3807                         *iptr = cqe->Parameter;
3808                 }
3809                 break;
3810 
3811         case CMD_GEN_REQUEST64_CR:
3812                 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3813                 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3814                 break;
3815 
3816         case CMD_XMIT_SEQUENCE64_CR:
3817                 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3818                 break;
3819 
3820         default:
3821                 iocb->ULPCOMMAND = wqe->Command;
3822 
3823         }
3824 
3825 } /* emlxs_CQE_to_IOCB() */
3826 
3827 
3828 /*ARGSUSED*/
3829 static void
3830 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3831 {
3832 #ifdef SFCT_SUPPORT
3833 #ifdef FCT_IO_TRACE
3834         emlxs_port_t *port = &PPORT;
3835 #endif /* FCT_IO_TRACE */
3836 #endif /* SFCT_SUPPORT */
3837         CHANNEL *cp;
3838         emlxs_buf_t *sbp;
3839         IOCBQ *iocbq;
3840         uint16_t i;
3841         uint32_t trigger;
3842         CQE_CmplWQ_t cqe;
3843 
3844         mutex_enter(&EMLXS_FCTAB_LOCK);
3845         for (i = 0; i < hba->max_iotag; i++) {
3846                 sbp = hba->fc_table[i];
3847                 if (sbp == NULL || sbp == STALE_PACKET) {
3848                         continue;
3849                 }
3850                 hba->fc_table[i] = STALE_PACKET;
3851                 hba->io_count--;
3852                 sbp->iotag = 0;
3853                 mutex_exit(&EMLXS_FCTAB_LOCK);
3854 
3855                 cp = sbp->channel;
3856                 bzero(&cqe, sizeof (CQE_CmplWQ_t));
3857                 cqe.RequestTag = i;
3858                 cqe.Status = IOSTAT_LOCAL_REJECT;
3859                 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3860 
3861                 cp->hbaCmplCmd_sbp++;
3862 
3863 #ifdef SFCT_SUPPORT
3864 #ifdef FCT_IO_TRACE
3865                 if (sbp->fct_cmd) {
3866                         emlxs_fct_io_trace(port, sbp->fct_cmd,
3867                             EMLXS_FCT_IOCB_COMPLETE);
3868                 }
3869 #endif /* FCT_IO_TRACE */
3870 #endif /* SFCT_SUPPORT */
3871 
3872                 atomic_add_32(&hba->io_active, -1);
3873 
3874                 /* Copy entry to sbp's iocbq */
3875                 iocbq = &sbp->iocbq;
3876                 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3877 
3878                 iocbq->next = NULL;
3879 
3880                 /* Exchange is no longer busy on-chip, free it */
3881                 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3882 
3883                 if (!(sbp->pkt_flags &
3884                     (PACKET_POLLED | PACKET_ALLOCATED))) {
3885                         /* Add the IOCB to the channel list */
3886                         mutex_enter(&cp->rsp_lock);
3887                         if (cp->rsp_head == NULL) {
3888                                 cp->rsp_head = iocbq;
3889                                 cp->rsp_tail = iocbq;
3890                         } else {
3891                                 cp->rsp_tail->next = iocbq;
3892                                 cp->rsp_tail = iocbq;
3893                         }
3894                         mutex_exit(&cp->rsp_lock);
3895                         trigger = 1;
3896                 } else {
3897                         emlxs_proc_channel_event(hba, cp, iocbq);
3898                 }
3899                 mutex_enter(&EMLXS_FCTAB_LOCK);
3900         }
3901         mutex_exit(&EMLXS_FCTAB_LOCK);
3902 
3903         if (trigger) {
3904                 for (i = 0; i < hba->chan_count; i++) {
3905                         cp = &hba->chan[i];
3906                         if (cp->rsp_head != NULL) {
3907                                 emlxs_thread_trigger2(&cp->intr_thread,
3908                                     emlxs_proc_channel, cp);
3909                         }
3910                 }
3911         }
3912 
3913 } /* emlxs_sli4_hba_flush_chipq() */
3914 
3915 
3916 /*ARGSUSED*/
3917 static void
3918 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3919     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3920 {
3921         emlxs_port_t *port = &PPORT;
3922         CHANNEL *cp;
3923         uint16_t request_tag;
3924         CQE_u   *cq_entry;
3925 
3926         request_tag = cqe->RequestTag;
3927 
3928         cq_entry = (CQE_u *)cqe;
3929 
3930         /* 1 to 1 mapping between CQ and channel */
3931         cp = cq->channelp;
3932 
3933         cp->hbaCmplCmd++;
3934 
3935         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3936             "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3937 
3938         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3939             "CQ ENTRY: %08x %08x %08x %08x", cq_entry->word[0],
3940             cq_entry->word[1], cq_entry->word[2], cq_entry->word[3]);
3941 
3942 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3943 
3944 
3945 /*ARGSUSED*/
3946 static void
3947 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3948 {
3949         emlxs_port_t *port = &PPORT;
3950         CHANNEL *cp;
3951         emlxs_buf_t *sbp;
3952         IOCBQ *iocbq;
3953         uint16_t request_tag;
3954 #ifdef SFCT_SUPPORT
3955         fct_cmd_t *fct_cmd;
3956         emlxs_buf_t *cmd_sbp;
3957 #endif /* SFCT_SUPPORT */
3958 
3959         request_tag = cqe->RequestTag;
3960 
3961         /* 1 to 1 mapping between CQ and channel */
3962         cp = cq->channelp;
3963 
3964         mutex_enter(&EMLXS_FCTAB_LOCK);
3965         sbp = hba->fc_table[request_tag];
3966         atomic_add_32(&hba->io_active, -1);
3967 
3968         if (sbp == STALE_PACKET) {
3969                 cp->hbaCmplCmd_sbp++;
3970                 mutex_exit(&EMLXS_FCTAB_LOCK);
3971                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3972                     "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3973                 return;
3974         }
3975 
3976         if (!sbp || !(sbp->xrip)) {
3977                 cp->hbaCmplCmd++;
3978                 mutex_exit(&EMLXS_FCTAB_LOCK);
3979                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3980                     "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3981                     sbp, request_tag);
3982                 return;
3983         }
3984 
3985 #ifdef SLI4_FASTPATH_DEBUG
3986         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3987             "CQ ENTRY: process wqe compl");
3988 #endif
3989 
3990         cp->hbaCmplCmd_sbp++;
3991 
3992         /* Copy entry to sbp's iocbq */
3993         iocbq = &sbp->iocbq;
3994         emlxs_CQE_to_IOCB(hba, cqe, sbp);
3995 
3996         iocbq->next = NULL;
3997 
3998         if (cqe->XB) {
3999                 /* Mark exchange as ABORT in progress */
4000                 sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
4001                 sbp->xrip->flag |= EMLXS_XRI_ABORT_INP;
4002 
4003                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4004                     "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
4005                     sbp->xrip->XRI);
4006 
4007                 emlxs_sli4_free_xri(hba, sbp, 0, 0);
4008         } else {
4009                 /* Exchange is no longer busy on-chip, free it */
4010                 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 0);
4011         }
4012 
4013         mutex_exit(&EMLXS_FCTAB_LOCK);
4014 
4015 #ifdef SFCT_SUPPORT
4016         fct_cmd = sbp->fct_cmd;
4017         if (fct_cmd) {
4018                 cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4019                 mutex_enter(&cmd_sbp->fct_mtx);
4020                 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
4021                 mutex_exit(&cmd_sbp->fct_mtx);
4022         }
4023 #endif /* SFCT_SUPPORT */
4024 
4025         /*
4026          * If this is NOT a polled command completion
4027          * or a driver allocated pkt, then defer pkt
4028          * completion.
4029          */
4030         if (!(sbp->pkt_flags &
4031             (PACKET_POLLED | PACKET_ALLOCATED))) {
4032                 /* Add the IOCB to the channel list */
4033                 mutex_enter(&cp->rsp_lock);
4034                 if (cp->rsp_head == NULL) {
4035                         cp->rsp_head = iocbq;
4036                         cp->rsp_tail = iocbq;
4037                 } else {
4038                         cp->rsp_tail->next = iocbq;
4039                         cp->rsp_tail = iocbq;
4040                 }
4041                 mutex_exit(&cp->rsp_lock);
4042 
4043                 /* Delay triggering thread till end of ISR */
4044                 cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
4045         } else {
4046                 emlxs_proc_channel_event(hba, cp, iocbq);
4047         }
4048 
4049 } /* emlxs_sli4_process_wqe_cmpl() */
4050 
4051 
4052 /*ARGSUSED*/
4053 static void
4054 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
4055     CQE_RelWQ_t *cqe)
4056 {
4057 #ifdef SLI4_FASTPATH_DEBUG
4058         emlxs_port_t *port = &PPORT;
4059 #endif
4060         WQ_DESC_t *wq;
4061         CHANNEL *cp;
4062         uint32_t i;
4063 
4064         i = cqe->WQid;
4065         wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
4066 
4067 #ifdef SLI4_FASTPATH_DEBUG
4068         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4069             "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
4070             cqe->WQindex);
4071 #endif
4072 
4073         wq->port_index = cqe->WQindex;
4074 
4075         /* Cmd ring may be available. Try sending more iocbs */
4076         for (i = 0; i < hba->chan_count; i++) {
4077                 cp = &hba->chan[i];
4078                 if (wq == (WQ_DESC_t *)cp->iopath) {
4079                         emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
4080                 }
4081         }
4082 
4083 } /* emlxs_sli4_process_release_wqe() */
4084 
4085 
4086 /*ARGSUSED*/
4087 emlxs_iocbq_t *
4088 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
4089 {
4090         emlxs_queue_t *q;
4091         emlxs_iocbq_t *iocbq;
4092         emlxs_iocbq_t *prev;
4093         fc_frame_hdr_t *fchdr2;
4094         RXQ_DESC_t *rxq;
4095 
4096         switch (fchdr->type) {
4097         case 1: /* ELS */
4098                 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4099                 break;
4100         case 0x20: /* CT */
4101                 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4102                 break;
4103         default:
4104                 return (NULL);
4105         }
4106 
4107         mutex_enter(&rxq->lock);
4108 
4109         q = &rxq->active;
4110         iocbq  = (emlxs_iocbq_t *)q->q_first;
4111         prev = NULL;
4112 
4113         while (iocbq) {
4114 
4115                 fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
4116 
4117                 if ((fchdr2->s_id == fchdr->s_id) &&
4118                     (fchdr2->ox_id == fchdr->ox_id) &&
4119                     (fchdr2->seq_id == fchdr->seq_id)) {
4120                         /* Remove iocbq */
4121                         if (prev) {
4122                                 prev->next = iocbq->next;
4123                         }
4124                         if (q->q_first == (uint8_t *)iocbq) {
4125                                 q->q_first = (uint8_t *)iocbq->next;
4126                         }
4127                         if (q->q_last == (uint8_t *)iocbq) {
4128                                 q->q_last = (uint8_t *)prev;
4129                         }
4130                         q->q_cnt--;
4131 
4132                         break;
4133                 }
4134 
4135                 prev  = iocbq;
4136                 iocbq = iocbq->next;
4137         }
4138 
4139         mutex_exit(&rxq->lock);
4140 
4141         return (iocbq);
4142 
4143 } /* emlxs_sli4_rxq_get() */
4144 
4145 
4146 /*ARGSUSED*/
4147 void
4148 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
4149 {
4150         emlxs_queue_t *q;
4151         fc_frame_hdr_t *fchdr;
4152         RXQ_DESC_t *rxq;
4153 
4154         fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
4155 
4156         switch (fchdr->type) {
4157         case 1: /* ELS */
4158                 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4159                 break;
4160         case 0x20: /* CT */
4161                 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4162                 break;
4163         default:
4164                 return;
4165         }
4166 
4167         mutex_enter(&rxq->lock);
4168 
4169         q = &rxq->active;
4170 
4171         if (q->q_last) {
4172                 ((emlxs_iocbq_t *)q->q_last)->next = iocbq;
4173                 q->q_cnt++;
4174         } else {
4175                 q->q_first = (uint8_t *)iocbq;
4176                 q->q_cnt = 1;
4177         }
4178 
4179         q->q_last = (uint8_t *)iocbq;
4180         iocbq->next = NULL;
4181 
4182         mutex_exit(&rxq->lock);
4183 
4184         return;
4185 
4186 } /* emlxs_sli4_rxq_put() */
4187 
4188 
4189 static void
4190 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
4191 {
4192         emlxs_hba_t *hba = HBA;
4193         emlxs_rqdbu_t rqdb;
4194 
4195         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4196             "RQ POST: rqid=%d count=1", rqid);
4197 
4198         /* Ring the RQ doorbell once to repost the RQ buffer */
4199         rqdb.word = 0;
4200         rqdb.db.Qid = rqid;
4201         rqdb.db.NumPosted = 1;
4202 
4203         WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
4204 
4205 } /* emlxs_sli4_rq_post() */
4206 
4207 
4208 /*ARGSUSED*/
4209 static void
4210 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
4211     CQE_UnsolRcv_t *cqe)
4212 {
4213         emlxs_port_t *port = &PPORT;
4214         emlxs_port_t *vport;
4215         RQ_DESC_t *hdr_rq;
4216         RQ_DESC_t *data_rq;
4217         MBUF_INFO *hdr_mp;
4218         MBUF_INFO *data_mp;
4219         MATCHMAP *seq_mp;
4220         uint32_t *data;
4221         fc_frame_hdr_t fchdr;
4222         uint32_t hdr_rqi;
4223         uint32_t host_index;
4224         emlxs_iocbq_t *iocbq = NULL;
4225         emlxs_iocb_t *iocb;
4226         emlxs_node_t *node;
4227         uint32_t i;
4228         uint32_t seq_len;
4229         uint32_t seq_cnt;
4230         uint32_t buf_type;
4231         char label[32];
4232         emlxs_wqe_t *wqe;
4233         CHANNEL *cp;
4234         uint16_t iotag;
4235         XRIobj_t *xrip;
4236         RPIobj_t *rpip = NULL;
4237         uint32_t        cmd;
4238         uint32_t posted = 0;
4239         uint32_t abort = 1;
4240         off_t offset;
4241 
4242         hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4243         hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
4244         data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4245 
4246         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4247             "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x "
4248             "hdr_size=%d data_size=%d",
4249             cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4250             cqe->data_size);
4251 
4252         /* Validate the CQE */
4253 
4254         /* Check status */
4255         switch (cqe->Status) {
4256         case RQ_STATUS_SUCCESS: /* 0x10 */
4257                 break;
4258 
4259         case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
4260                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4261                     "CQ ENTRY: Unsol Rcv: Payload truncated.");
4262                 break;
4263 
4264         case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4265                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4266                     "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4267                 return;
4268 
4269         case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
4270                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4271                     "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4272                 return;
4273 
4274         default:
4275                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4276                     "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4277                     cqe->Status);
4278                 break;
4279         }
4280 
4281         /* Make sure there is a frame header */
4282         if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4283                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4284                     "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4285                 return;
4286         }
4287 
4288         /* Update host index */
4289         mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4290         host_index = hdr_rq->host_index;
4291         hdr_rq->host_index++;
4292         if (hdr_rq->host_index >= hdr_rq->max_index) {
4293                 hdr_rq->host_index = 0;
4294         }
4295         data_rq->host_index = hdr_rq->host_index;
4296         mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4297 
4298         /* Get the next header rqb */
4299         hdr_mp  = &hdr_rq->rqb[host_index];
4300 
4301         offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
4302             (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
4303 
4304         EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
4305             sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4306 
4307         LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4308             sizeof (fc_frame_hdr_t));
4309 
4310         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4311             "RQ HDR[%d]: rctl:%x type:%x "
4312             "sid:%x did:%x oxid:%x rxid:%x",
4313             host_index, fchdr.r_ctl, fchdr.type,
4314             fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4315 
4316         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4317             "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4318             host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4319             fchdr.df_ctl, fchdr.ro);
4320 
4321         /* Verify fc header type */
4322         switch (fchdr.type) {
4323         case 0: /* BLS */
4324                 if (fchdr.r_ctl != 0x81) {
4325                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4326                             "RQ ENTRY: Unexpected FC rctl (0x%x) "
4327                             "received. Dropping...",
4328                             fchdr.r_ctl);
4329 
4330                         goto done;
4331                 }
4332 
4333                 /* Make sure there is no payload */
4334                 if (cqe->data_size != 0) {
4335                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4336                             "RQ ENTRY: ABTS payload provided. Dropping...");
4337 
4338                         goto done;
4339                 }
4340 
4341                 buf_type = 0xFFFFFFFF;
4342                 (void) strcpy(label, "ABTS");
4343                 cp = &hba->chan[hba->channel_els];
4344                 break;
4345 
4346         case 0x01: /* ELS */
4347                 /* Make sure there is a payload */
4348                 if (cqe->data_size == 0) {
4349                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4350                             "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
4351                             "Dropping...");
4352 
4353                         goto done;
4354                 }
4355 
4356                 buf_type = MEM_ELSBUF;
4357                 (void) strcpy(label, "Unsol ELS");
4358                 cp = &hba->chan[hba->channel_els];
4359                 break;
4360 
4361         case 0x20: /* CT */
4362                 /* Make sure there is a payload */
4363                 if (cqe->data_size == 0) {
4364                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4365                             "RQ ENTRY: Unsol Rcv: No CT payload provided. "
4366                             "Dropping...");
4367 
4368                         goto done;
4369                 }
4370 
4371                 buf_type = MEM_CTBUF;
4372                 (void) strcpy(label, "Unsol CT");
4373                 cp = &hba->chan[hba->channel_ct];
4374                 break;
4375 
4376         default:
4377                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4378                     "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4379                     fchdr.type);
4380 
4381                 goto done;
4382         }
4383         /* Fc Header is valid */
4384 
4385         /* Check if this is an active sequence */
4386         iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4387 
4388         if (!iocbq) {
4389                 if (fchdr.type != 0) {
4390                         if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4391                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4392                                     "RQ ENTRY: %s: First of sequence not"
4393                                     " set.  Dropping...",
4394                                     label);
4395 
4396                                 goto done;
4397                         }
4398                 }
4399 
4400                 if (fchdr.seq_cnt != 0) {
4401                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4402                             "RQ ENTRY: %s: Sequence count not zero (%d).  "
4403                             "Dropping...",
4404                             label, fchdr.seq_cnt);
4405 
4406                         goto done;
4407                 }
4408 
4409                 /* Find vport (defaults to physical port) */
4410                 for (i = 0; i < MAX_VPORTS; i++) {
4411                         vport = &VPORT(i);
4412 
4413                         if (vport->did == fchdr.d_id) {
4414                                 port = vport;
4415                                 break;
4416                         }
4417                 }
4418 
4419                 /* Allocate an IOCBQ */
4420                 iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4421                     MEM_IOCB, 1);
4422 
4423                 if (!iocbq) {
4424                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4425                             "RQ ENTRY: %s: Out of IOCB "
4426                             "resources.  Dropping...",
4427                             label);
4428 
4429                         goto done;
4430                 }
4431 
4432                 seq_mp = NULL;
4433                 if (fchdr.type != 0) {
4434                         /* Allocate a buffer */
4435                         seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4436 
4437                         if (!seq_mp) {
4438                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4439                                     "RQ ENTRY: %s: Out of buffer "
4440                                     "resources.  Dropping...",
4441                                     label);
4442 
4443                                 goto done;
4444                         }
4445 
4446                         iocbq->bp = (uint8_t *)seq_mp;
4447                 }
4448 
4449                 node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4450                 if (node == NULL) {
4451                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4452                             "RQ ENTRY: %s: Node not found. sid=%x",
4453                             label, fchdr.s_id);
4454                 }
4455 
4456                 /* Initialize the iocbq */
4457                 iocbq->port = port;
4458                 iocbq->channel = cp;
4459                 iocbq->node = node;
4460 
4461                 iocb = &iocbq->iocb;
4462                 iocb->RXSEQCNT = 0;
4463                 iocb->RXSEQLEN = 0;
4464 
4465                 seq_len = 0;
4466                 seq_cnt = 0;
4467 
4468         } else {
4469 
4470                 iocb = &iocbq->iocb;
4471                 port = iocbq->port;
4472                 node = (emlxs_node_t *)iocbq->node;
4473 
4474                 seq_mp = (MATCHMAP *)iocbq->bp;
4475                 seq_len = iocb->RXSEQLEN;
4476                 seq_cnt = iocb->RXSEQCNT;
4477 
4478                 /* Check sequence order */
4479                 if (fchdr.seq_cnt != seq_cnt) {
4480                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4481                             "RQ ENTRY: %s: Out of order frame received "
4482                             "(%d != %d).  Dropping...",
4483                             label, fchdr.seq_cnt, seq_cnt);
4484 
4485                         goto done;
4486                 }
4487         }
4488 
4489         /* We now have an iocbq */
4490 
4491         if (!port->VPIobj.vfip) {
4492                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4493                     "RQ ENTRY: %s: No fabric connection. "
4494                     "Dropping...",
4495                     label);
4496 
4497                 goto done;
4498         }
4499 
4500         /* Save the frame data to our seq buffer */
4501         if (cqe->data_size && seq_mp) {
4502                 /* Get the next data rqb */
4503                 data_mp = &data_rq->rqb[host_index];
4504 
4505                 offset = (off_t)((uint64_t)((unsigned long)
4506                     data_mp->virt) -
4507                     (uint64_t)((unsigned long)
4508                     hba->sli.sli4.slim2.virt));
4509 
4510                 EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
4511                     cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4512 
4513                 data = (uint32_t *)data_mp->virt;
4514 
4515                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4516                     "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4517                     host_index, data[0], data[1], data[2], data[3],
4518                     data[4], data[5]);
4519 
4520                 /* Check sequence length */
4521                 if ((seq_len + cqe->data_size) > seq_mp->size) {
4522                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4523                             "RQ ENTRY: %s: Sequence buffer overflow. "
4524                             "(%d > %d). Dropping...",
4525                             label, (seq_len + cqe->data_size), seq_mp->size);
4526 
4527                         goto done;
4528                 }
4529 
4530                 /* Copy data to local receive buffer */
4531                 bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4532                     seq_len), cqe->data_size);
4533 
4534                 seq_len += cqe->data_size;
4535         }
4536 
4537         /* If this is not the last frame of sequence, queue it. */
4538         if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4539                 /* Save sequence header */
4540                 if (seq_cnt == 0) {
4541                         bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4542                             sizeof (fc_frame_hdr_t));
4543                 }
4544 
4545                 /* Update sequence info in iocb */
4546                 iocb->RXSEQCNT = seq_cnt + 1;
4547                 iocb->RXSEQLEN = seq_len;
4548 
4549                 /* Queue iocbq for next frame */
4550                 emlxs_sli4_rxq_put(hba, iocbq);
4551 
4552                 /* Don't free resources */
4553                 iocbq = NULL;
4554 
4555                 /* No need to abort */
4556                 abort = 0;
4557 
4558                 goto done;
4559         }
4560 
4561         emlxs_sli4_rq_post(port, hdr_rq->qid);
4562         posted = 1;
4563 
4564         /* End of sequence found. Process request now. */
4565 
4566         if (seq_cnt > 0) {
4567                 /* Retrieve first frame of sequence */
4568                 bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4569                     sizeof (fc_frame_hdr_t));
4570 
4571                 bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4572         }
4573 
4574         /* Build rcv iocb and process it */
4575         switch (fchdr.type) {
4576         case 0: /* BLS */
4577 
4578                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4579                     "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4580                     label, fchdr.ox_id, fchdr.s_id);
4581 
4582                 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4583 
4584                 /* Set up an iotag using special Abort iotags */
4585                 mutex_enter(&EMLXS_FCTAB_LOCK);
4586                 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4587                         hba->fc_oor_iotag = hba->max_iotag;
4588                 }
4589                 iotag = hba->fc_oor_iotag++;
4590                 mutex_exit(&EMLXS_FCTAB_LOCK);
4591 
4592                 /* BLS ACC Response */
4593                 wqe = &iocbq->wqe;
4594                 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4595 
4596                 wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4597                 wqe->CmdType = WQE_TYPE_GEN;
4598 
4599                 wqe->un.BlsRsp.Payload0 = 0x80;
4600                 wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4601 
4602                 wqe->un.BlsRsp.OXId = fchdr.ox_id;
4603                 wqe->un.BlsRsp.RXId = fchdr.rx_id;
4604 
4605                 wqe->un.BlsRsp.SeqCntLow = 0;
4606                 wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4607 
4608                 wqe->un.BlsRsp.XO = 0;
4609                 wqe->un.BlsRsp.AR = 0;
4610                 wqe->un.BlsRsp.PT = 1;
4611                 wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4612 
4613                 wqe->PU = 0x3;
4614                 wqe->ContextTag = port->VPIobj.VPI;
4615                 wqe->ContextType = WQE_VPI_CONTEXT;
4616                 wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4617                 wqe->XRITag = 0xffff;
4618 
4619                 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4620                         wqe->CCPE = 1;
4621                         wqe->CCP = fchdr.rsvd;
4622                 }
4623 
4624                 wqe->Class = CLASS3;
4625                 wqe->RequestTag = iotag;
4626                 wqe->CQId = 0x3ff;
4627 
4628                 emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4629 
4630                 break;
4631 
4632         case 1: /* ELS */
4633                 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
4634                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4635                             "RQ ENTRY: %s: Port not yet enabled. "
4636                             "Dropping...",
4637                             label);
4638 
4639                         goto done;
4640                 }
4641 
4642                 cmd = *((uint32_t *)seq_mp->virt);
4643                 cmd &= ELS_CMD_MASK;
4644                 rpip = NULL;
4645 
4646                 if (cmd != ELS_CMD_LOGO) {
4647                         rpip = EMLXS_NODE_TO_RPI(port, node);
4648                 }
4649 
4650                 if (!rpip) {
4651                         rpip = port->VPIobj.rpip;
4652                 }
4653 
4654                 xrip = emlxs_sli4_reserve_xri(hba, rpip);
4655 
4656                 if (!xrip) {
4657                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4658                             "RQ ENTRY: %s: Out of exchange "
4659                             "resources.  Dropping...",
4660                             label);
4661 
4662                         goto done;
4663                 }
4664 
4665                 xrip->rx_id = fchdr.ox_id;
4666 
4667                 /* Build CMD_RCV_ELS64_CX */
4668                 iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4669                 iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
4670                 iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
4671                 iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4672                 iocb->ULPBDECOUNT = 1;
4673 
4674                 iocb->un.rcvels64.remoteID = fchdr.s_id;
4675                 iocb->un.rcvels64.parmRo = fchdr.d_id;
4676 
4677                 iocb->ULPPU = 0x3;
4678                 iocb->ULPCONTEXT = xrip->XRI;
4679                 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4680                 iocb->ULPCLASS = CLASS3;
4681                 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4682 
4683                 iocb->unsli3.ext_rcv.seq_len = seq_len;
4684                 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
4685 
4686                 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4687                         iocb->unsli3.ext_rcv.ccpe = 1;
4688                         iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4689                 }
4690 
4691                 (void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4692                     iocbq, seq_mp, seq_len);
4693 
4694                 break;
4695 
4696         case 0x20: /* CT */
4697                 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
4698                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4699                             "RQ ENTRY: %s: Port not yet enabled. "
4700                             "Dropping...",
4701                             label);
4702 
4703                         goto done;
4704                 }
4705 
4706                 if (!node) {
4707                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4708                             "RQ ENTRY: %s: Node not found (did=%x).  "
4709                             "Dropping...",
4710                             label, fchdr.d_id);
4711 
4712                         goto done;
4713                 }
4714 
4715                 rpip = EMLXS_NODE_TO_RPI(port, node);
4716 
4717                 if (!rpip) {
4718                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4719                             "RQ ENTRY: %s: RPI not found (did=%x rpi=%x).  "
4720                             "Dropping...",
4721                             label, fchdr.d_id, node->nlp_Rpi);
4722 
4723                         goto done;
4724                 }
4725 
4726                 xrip = emlxs_sli4_reserve_xri(hba, rpip);
4727 
4728                 if (!xrip) {
4729                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4730                             "RQ ENTRY: %s: Out of exchange "
4731                             "resources.  Dropping...",
4732                             label);
4733 
4734                         goto done;
4735                 }
4736 
4737                 xrip->rx_id = fchdr.ox_id;
4738 
4739                 /* Build CMD_RCV_SEQ64_CX */
4740                 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4741                 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
4742                 iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
4743                 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4744                 iocb->ULPBDECOUNT = 1;
4745 
4746                 iocb->un.rcvseq64.xrsqRo = 0;
4747                 iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4748                 iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4749                 iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4750                 iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4751 
4752                 iocb->ULPPU = 0x3;
4753                 iocb->ULPCONTEXT = xrip->XRI;
4754                 iocb->ULPIOTAG = rpip->RPI;
4755                 iocb->ULPCLASS = CLASS3;
4756                 iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4757 
4758                 iocb->unsli3.ext_rcv.seq_len = seq_len;
4759                 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
4760 
4761                 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4762                         iocb->unsli3.ext_rcv.ccpe = 1;
4763                         iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4764                 }
4765 
4766                 (void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4767                     iocbq, seq_mp, seq_len);
4768 
4769                 break;
4770         }
4771 
4772         /* Sequence handled, no need to abort */
4773         abort = 0;
4774 
4775 done:
4776 
4777         if (!posted) {
4778                 emlxs_sli4_rq_post(port, hdr_rq->qid);
4779         }
4780 
4781         if (abort) {
4782                 /* Send ABTS for this exchange */
4783                 /* !!! Currently, we have no implementation for this !!! */
4784                 abort = 0;
4785         }
4786 
4787         /* Return memory resources to pools */
4788         if (iocbq) {
4789                 if (iocbq->bp) {
4790                         emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
4791                 }
4792 
4793                 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
4794         }
4795 
4796 #ifdef FMA_SUPPORT
4797         if (emlxs_fm_check_dma_handle(hba,
4798             hba->sli.sli4.slim2.dma_handle)
4799             != DDI_FM_OK) {
4800                 EMLXS_MSGF(EMLXS_CONTEXT,
4801                     &emlxs_invalid_dma_handle_msg,
4802                     "emlxs_sli4_process_unsol_rcv: hdl=%p",
4803                     hba->sli.sli4.slim2.dma_handle);
4804 
4805                 emlxs_thread_spawn(hba, emlxs_restart_thread,
4806                     0, 0);
4807         }
4808 #endif
4809         return;
4810 
4811 } /* emlxs_sli4_process_unsol_rcv() */
4812 
4813 
4814 /*ARGSUSED*/
4815 static void
4816 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4817     CQE_XRI_Abort_t *cqe)
4818 {
4819         emlxs_port_t *port = &PPORT;
4820         XRIobj_t *xrip;
4821 
4822         mutex_enter(&EMLXS_FCTAB_LOCK);
4823 
4824         xrip = emlxs_sli4_find_xri(hba, cqe->XRI);
4825         if (xrip == NULL) {
4826                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4827                     "CQ ENTRY: process xri aborted ignored");
4828 
4829                 mutex_exit(&EMLXS_FCTAB_LOCK);
4830                 return;
4831         }
4832 
4833         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4834             "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4835             cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4836 
4837         if (!(xrip->flag & EMLXS_XRI_ABORT_INP)) {
4838                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4839                     "XRI Aborted: Bad state: x%x xri x%x",
4840                     xrip->flag, xrip->XRI);
4841 
4842                 mutex_exit(&EMLXS_FCTAB_LOCK);
4843                 return;
4844         }
4845 
4846         /* Exchange is no longer busy on-chip, free it */
4847         emlxs_sli4_free_xri(hba, 0, xrip, 0);
4848 
4849         mutex_exit(&EMLXS_FCTAB_LOCK);
4850 
4851         return;
4852 
4853 } /* emlxs_sli4_process_xri_aborted () */
4854 
4855 
4856 /*ARGSUSED*/
4857 static void
4858 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4859 {
4860         emlxs_port_t *port = &PPORT;
4861         CQE_u *cqe;
4862         CQE_u cq_entry;
4863         uint32_t cqdb;
4864         int num_entries = 0;
4865         off_t offset;
4866 
4867         /* EMLXS_PORT_LOCK must be held when entering this routine */
4868 
4869         cqe = (CQE_u *)cq->addr.virt;
4870         cqe += cq->host_index;
4871 
4872         offset = (off_t)((uint64_t)((unsigned long)
4873             cq->addr.virt) -
4874             (uint64_t)((unsigned long)
4875             hba->sli.sli4.slim2.virt));
4876 
4877         EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
4878             4096, DDI_DMA_SYNC_FORKERNEL);
4879 
4880         for (;;) {
4881                 cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4882                 if (!(cq_entry.word[3] & CQE_VALID))
4883                         break;
4884 
4885                 cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4886                 cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4887                 cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4888 
4889 #ifdef SLI4_FASTPATH_DEBUG
4890                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4891                     "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4892                     cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4893 #endif
4894 
4895                 num_entries++;
4896                 cqe->word[3] = 0;
4897 
4898                 cq->host_index++;
4899                 if (cq->host_index >= cq->max_index) {
4900                         cq->host_index = 0;
4901                         cqe = (CQE_u *)cq->addr.virt;
4902                 } else {
4903                         cqe++;
4904                 }
4905                 mutex_exit(&EMLXS_PORT_LOCK);
4906 
4907                 /* Now handle specific cq type */
4908                 if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4909                         if (cq_entry.cqAsyncEntry.async_evt) {
4910                                 emlxs_sli4_process_async_event(hba,
4911                                     (CQE_ASYNC_t *)&cq_entry);
4912                         } else {
4913                                 emlxs_sli4_process_mbox_event(hba,
4914                                     (CQE_MBOX_t *)&cq_entry);
4915                         }
4916                 } else { /* EMLXS_CQ_TYPE_GROUP2 */
4917                         switch (cq_entry.cqCmplEntry.Code) {
4918                         case CQE_TYPE_WQ_COMPLETION:
4919                                 if (cq_entry.cqCmplEntry.RequestTag <
4920                                     hba->max_iotag) {
4921                                         emlxs_sli4_process_wqe_cmpl(hba, cq,
4922                                             (CQE_CmplWQ_t *)&cq_entry);
4923                                 } else {
4924                                         emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4925                                             (CQE_CmplWQ_t *)&cq_entry);
4926                                 }
4927                                 break;
4928                         case CQE_TYPE_RELEASE_WQE:
4929                                 emlxs_sli4_process_release_wqe(hba, cq,
4930                                     (CQE_RelWQ_t *)&cq_entry);
4931                                 break;
4932                         case CQE_TYPE_UNSOL_RCV:
4933                                 emlxs_sli4_process_unsol_rcv(hba, cq,
4934                                     (CQE_UnsolRcv_t *)&cq_entry);
4935                                 break;
4936                         case CQE_TYPE_XRI_ABORTED:
4937                                 emlxs_sli4_process_xri_aborted(hba, cq,
4938                                     (CQE_XRI_Abort_t *)&cq_entry);
4939                                 break;
4940                         default:
4941                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4942                                     "Invalid CQ entry %d: %08x %08x %08x %08x",
4943                                     cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4944                                     cq_entry.word[1], cq_entry.word[2],
4945                                     cq_entry.word[3]);
4946                                 break;
4947                         }
4948                 }
4949 
4950                 mutex_enter(&EMLXS_PORT_LOCK);
4951         }
4952 
4953         cqdb = cq->qid;
4954         cqdb |= CQ_DB_REARM;
4955         if (num_entries != 0) {
4956                 cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4957         }
4958 
4959 #ifdef SLI4_FASTPATH_DEBUG
4960         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4961             "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4962 #endif
4963 
4964         WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4965 
4966         /* EMLXS_PORT_LOCK must be held when exiting this routine */
4967 
4968 } /* emlxs_sli4_process_cq() */
4969 
4970 
4971 /*ARGSUSED*/
4972 static void
4973 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4974 {
4975 #ifdef SLI4_FASTPATH_DEBUG
4976         emlxs_port_t *port = &PPORT;
4977 #endif
4978         uint32_t eqdb;
4979         uint32_t *ptr;
4980         CHANNEL *cp;
4981         EQE_u eqe;
4982         uint32_t i;
4983         uint32_t value;
4984         int num_entries = 0;
4985         off_t offset;
4986 
4987         /* EMLXS_PORT_LOCK must be held when entering this routine */
4988 
4989         ptr = eq->addr.virt;
4990         ptr += eq->host_index;
4991 
4992         offset = (off_t)((uint64_t)((unsigned long)
4993             eq->addr.virt) -
4994             (uint64_t)((unsigned long)
4995             hba->sli.sli4.slim2.virt));
4996 
4997         EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4998             4096, DDI_DMA_SYNC_FORKERNEL);
4999 
5000         for (;;) {
5001                 eqe.word = *ptr;
5002                 eqe.word = BE_SWAP32(eqe.word);
5003 
5004                 if (!(eqe.word & EQE_VALID))
5005                         break;
5006 
5007 #ifdef SLI4_FASTPATH_DEBUG
5008                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5009                     "EQ ENTRY: %08x", eqe.word);
5010 #endif
5011 
5012                 *ptr = 0;
5013                 num_entries++;
5014                 eq->host_index++;
5015                 if (eq->host_index >= eq->max_index) {
5016                         eq->host_index = 0;
5017                         ptr = eq->addr.virt;
5018                 } else {
5019                         ptr++;
5020                 }
5021 
5022                 value = hba->sli.sli4.cq_map[eqe.entry.CQId];
5023 
5024 #ifdef SLI4_FASTPATH_DEBUG
5025                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5026                     "EQ ENTRY:  CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
5027 #endif
5028 
5029                 emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
5030         }
5031 
5032         eqdb = eq->qid;
5033         eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
5034 
5035 #ifdef SLI4_FASTPATH_DEBUG
5036         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5037             "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
5038 #endif
5039 
5040         if (num_entries != 0) {
5041                 eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
5042                 for (i = 0; i < hba->chan_count; i++) {
5043                         cp = &hba->chan[i];
5044                         if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
5045                                 cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
5046                                 emlxs_thread_trigger2(&cp->intr_thread,
5047                                     emlxs_proc_channel, cp);
5048                         }
5049                 }
5050         }
5051 
5052         WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
5053 
5054         /* EMLXS_PORT_LOCK must be held when exiting this routine */
5055 
5056 } /* emlxs_sli4_process_eq() */
5057 
5058 
5059 #ifdef MSI_SUPPORT
5060 /*ARGSUSED*/
5061 static uint32_t
5062 emlxs_sli4_msi_intr(char *arg1, char *arg2)
5063 {
5064         emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
5065 #ifdef SLI4_FASTPATH_DEBUG
5066         emlxs_port_t *port = &PPORT;
5067 #endif
5068         uint16_t msgid;
5069         int rc;
5070 
5071 #ifdef SLI4_FASTPATH_DEBUG
5072         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5073             "msiINTR arg1:%p arg2:%p", arg1, arg2);
5074 #endif
5075 
5076         /* Check for legacy interrupt handling */
5077         if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
5078                 rc = emlxs_sli4_intx_intr(arg1);
5079                 return (rc);
5080         }
5081 
5082         /* Get MSI message id */
5083         msgid = (uint16_t)((unsigned long)arg2);
5084 
5085         /* Validate the message id */
5086         if (msgid >= hba->intr_count) {
5087                 msgid = 0;
5088         }
5089         mutex_enter(&EMLXS_PORT_LOCK);
5090 
5091         if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
5092                 mutex_exit(&EMLXS_PORT_LOCK);
5093                 return (DDI_INTR_UNCLAIMED);
5094         }
5095 
5096         /* The eq[] index == the MSI vector number */
5097         emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
5098 
5099         mutex_exit(&EMLXS_PORT_LOCK);
5100         return (DDI_INTR_CLAIMED);
5101 
5102 } /* emlxs_sli4_msi_intr() */
5103 #endif /* MSI_SUPPORT */
5104 
5105 
5106 /*ARGSUSED*/
5107 static int
5108 emlxs_sli4_intx_intr(char *arg)
5109 {
5110         emlxs_hba_t *hba = (emlxs_hba_t *)arg;
5111 #ifdef SLI4_FASTPATH_DEBUG
5112         emlxs_port_t *port = &PPORT;
5113 #endif
5114 
5115 #ifdef SLI4_FASTPATH_DEBUG
5116         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5117             "intxINTR arg:%p", arg);
5118 #endif
5119 
5120         mutex_enter(&EMLXS_PORT_LOCK);
5121 
5122         if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
5123                 mutex_exit(&EMLXS_PORT_LOCK);
5124                 return (DDI_INTR_UNCLAIMED);
5125         }
5126 
5127         emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
5128 
5129         mutex_exit(&EMLXS_PORT_LOCK);
5130         return (DDI_INTR_CLAIMED);
5131 } /* emlxs_sli4_intx_intr() */
5132 
5133 
5134 static void
5135 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
5136 {
5137         emlxs_port_t *port = &PPORT;
5138         uint32_t j;
5139 
5140         mutex_enter(&EMLXS_PORT_LOCK);
5141         if (hba->flag & FC_INTERLOCKED) {
5142                 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5143 
5144                 mutex_exit(&EMLXS_PORT_LOCK);
5145 
5146                 return;
5147         }
5148 
5149         j = 0;
5150         while (j++ < 10000) {
5151                 if (hba->mbox_queue_flag == 0) {
5152                         break;
5153                 }
5154 
5155                 mutex_exit(&EMLXS_PORT_LOCK);
5156                 DELAYUS(100);
5157                 mutex_enter(&EMLXS_PORT_LOCK);
5158         }
5159 
5160         if (hba->mbox_queue_flag != 0) {
5161                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5162                     "Board kill failed. Mailbox busy.");
5163                 mutex_exit(&EMLXS_PORT_LOCK);
5164                 return;
5165         }
5166 
5167         hba->flag |= FC_INTERLOCKED;
5168 
5169         EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5170 
5171         mutex_exit(&EMLXS_PORT_LOCK);
5172 
5173 } /* emlxs_sli4_hba_kill() */
5174 
5175 
5176 static void
5177 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
5178 {
5179         emlxs_config_t *cfg = &CFG;
5180         int i;
5181         int num_cq;
5182         uint32_t data;
5183 
5184         hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
5185 
5186         num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
5187             EMLXS_CQ_OFFSET_WQ;
5188 
5189         /* ARM EQ / CQs */
5190         for (i = 0; i < num_cq; i++) {
5191                 data = hba->sli.sli4.cq[i].qid;
5192                 data |= CQ_DB_REARM;
5193                 WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5194         }
5195         for (i = 0; i < hba->intr_count; i++) {
5196                 data = hba->sli.sli4.eq[i].qid;
5197                 data |= (EQ_DB_REARM | EQ_DB_EVENT);
5198                 WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5199         }
5200 } /* emlxs_sli4_enable_intr() */
5201 
5202 
5203 static void
5204 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
5205 {
5206         if (att) {
5207                 return;
5208         }
5209 
5210         hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
5211 
5212         /* Short of reset, we cannot disable interrupts */
5213 } /* emlxs_sli4_disable_intr() */
5214 
5215 
5216 static void
5217 emlxs_sli4_resource_free(emlxs_hba_t *hba)
5218 {
5219         emlxs_port_t    *port = &PPORT;
5220         MBUF_INFO       *buf_info;
5221         uint32_t        i;
5222 
5223         emlxs_fcf_fini(hba);
5224 
5225         buf_info = &hba->sli.sli4.HeaderTmplate;
5226         if (buf_info->virt) {
5227                 bzero(buf_info, sizeof (MBUF_INFO));
5228         }
5229 
5230         if (hba->sli.sli4.XRIp) {
5231                 if ((hba->sli.sli4.XRIinuse_f !=
5232                     (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
5233                     (hba->sli.sli4.XRIinuse_b !=
5234                     (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
5235                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
5236                             "XRIs inuse during free!: %p %p != %p\n",
5237                             hba->sli.sli4.XRIinuse_f,
5238                             hba->sli.sli4.XRIinuse_b,
5239                             &hba->sli.sli4.XRIinuse_f);
5240                 }
5241                 kmem_free(hba->sli.sli4.XRIp,
5242                     (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
5243                 hba->sli.sli4.XRIp = NULL;
5244 
5245                 hba->sli.sli4.XRIfree_f =
5246                     (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5247                 hba->sli.sli4.XRIfree_b =
5248                     (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5249                 hba->sli.sli4.xrif_count = 0;
5250         }
5251 
5252         for (i = 0; i < EMLXS_MAX_EQS; i++) {
5253                 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
5254                 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5255         }
5256         for (i = 0; i < EMLXS_MAX_CQS; i++) {
5257                 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5258         }
5259         for (i = 0; i < EMLXS_MAX_WQS; i++) {
5260                 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5261         }
5262         for (i = 0; i < EMLXS_MAX_RQS; i++) {
5263                 mutex_destroy(&hba->sli.sli4.rq[i].lock);
5264                 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5265                 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5266                 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5267         }
5268 
5269         /* Free the MQ */
5270         bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5271 
5272         buf_info = &hba->sli.sli4.slim2;
5273         if (buf_info->virt) {
5274                 buf_info->flags = FC_MBUF_DMA;
5275                 emlxs_mem_free(hba, buf_info);
5276                 bzero(buf_info, sizeof (MBUF_INFO));
5277         }
5278 
5279         /* Cleanup queue ordinal mapping */
5280         for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5281                 hba->sli.sli4.eq_map[i] = 0xffff;
5282         }
5283         for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5284                 hba->sli.sli4.cq_map[i] = 0xffff;
5285         }
5286         for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5287                 hba->sli.sli4.wq_map[i] = 0xffff;
5288         }
5289 
5290 } /* emlxs_sli4_resource_free() */
5291 
5292 
5293 static int
5294 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5295 {
5296         emlxs_port_t    *port = &PPORT;
5297         emlxs_config_t  *cfg = &CFG;
5298         MBUF_INFO       *buf_info;
5299         uint16_t        index;
5300         int             num_eq;
5301         int             num_wq;
5302         uint16_t        i;
5303         uint32_t        j;
5304         uint32_t        k;
5305         uint32_t        word;
5306         XRIobj_t        *xrip;
5307         char            buf[64];
5308         RQE_t           *rqe;
5309         MBUF_INFO       *rqb;
5310         uint64_t        phys;
5311         uint64_t        tmp_phys;
5312         char            *virt;
5313         char            *tmp_virt;
5314         void            *data_handle;
5315         void            *dma_handle;
5316         int32_t         size;
5317         off_t           offset;
5318         uint32_t        count = 0;
5319 
5320         emlxs_fcf_init(hba);
5321 
5322         /* EQs - 1 per Interrupt vector */
5323         num_eq = hba->intr_count;
5324         /* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
5325         num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5326 
5327         /* Calculate total dmable memory we need */
5328         /* EQ */
5329         count += num_eq * 4096;
5330         /* CQ */
5331         count += (num_wq + EMLXS_CQ_OFFSET_WQ) * 4096;
5332         /* WQ */
5333         count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
5334         /* MQ */
5335         count +=  EMLXS_MAX_MQS * 4096;
5336         /* RQ */
5337         count +=  EMLXS_MAX_RQS * 4096;
5338         /* RQB/E */
5339         count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
5340         /* SGL */
5341         count += hba->sli.sli4.XRICount * hba->sli.sli4.mem_sgl_size;
5342         /* RPI Head Template */
5343         count += hba->sli.sli4.RPICount * sizeof (RPIHdrTmplate_t);
5344 
5345         /* Allocate slim2 for SLI4 */
5346         buf_info = &hba->sli.sli4.slim2;
5347         buf_info->size = count;
5348         buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5349         buf_info->align = ddi_ptob(hba->dip, 1L);
5350 
5351         (void) emlxs_mem_alloc(hba, buf_info);
5352 
5353         if (buf_info->virt == NULL) {
5354                 EMLXS_MSGF(EMLXS_CONTEXT,
5355                     &emlxs_init_failed_msg,
5356                     "Unable to allocate internal memory for SLI4: %d",
5357                     count);
5358                 goto failed;
5359         }
5360         bzero(buf_info->virt, buf_info->size);
5361         EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
5362             buf_info->size, DDI_DMA_SYNC_FORDEV);
5363 
5364         /* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
5365         data_handle = buf_info->data_handle;
5366         dma_handle = buf_info->dma_handle;
5367         phys = buf_info->phys;
5368         virt = (char *)buf_info->virt;
5369 
5370         /* Allocate space for queues */
5371         size = 4096;
5372         for (i = 0; i < num_eq; i++) {
5373                 buf_info = &hba->sli.sli4.eq[i].addr;
5374                 if (buf_info->virt == NULL) {
5375                         bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5376                         buf_info->size = size;
5377                         buf_info->flags =
5378                             FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5379                         buf_info->align = ddi_ptob(hba->dip, 1L);
5380                         buf_info->phys = phys;
5381                         buf_info->virt = (void *)virt;
5382                         buf_info->data_handle = data_handle;
5383                         buf_info->dma_handle = dma_handle;
5384 
5385                         phys += size;
5386                         virt += size;
5387 
5388                         hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5389                 }
5390 
5391                 (void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5392                     DRIVER_NAME, i);
5393                 mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5394                     MUTEX_DRIVER, NULL);
5395         }
5396 
5397         size = 4096;
5398         for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5399                 buf_info = &hba->sli.sli4.cq[i].addr;
5400                 if (buf_info->virt == NULL) {
5401                         bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5402                         buf_info->size = size;
5403                         buf_info->flags =
5404                             FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5405                         buf_info->align = ddi_ptob(hba->dip, 1L);
5406                         buf_info->phys = phys;
5407                         buf_info->virt = (void *)virt;
5408                         buf_info->data_handle = data_handle;
5409                         buf_info->dma_handle = dma_handle;
5410 
5411                         phys += size;
5412                         virt += size;
5413 
5414                         hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5415                 }
5416         }
5417 
5418         /* WQs - NUM_WQ config parameter * number of EQs */
5419         size = 4096 * EMLXS_NUM_WQ_PAGES;
5420         for (i = 0; i < num_wq; i++) {
5421                 buf_info = &hba->sli.sli4.wq[i].addr;
5422                 if (buf_info->virt == NULL) {
5423                         bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5424                         buf_info->size = size;
5425                         buf_info->flags =
5426                             FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5427                         buf_info->align = ddi_ptob(hba->dip, 1L);
5428                         buf_info->phys = phys;
5429                         buf_info->virt = (void *)virt;
5430                         buf_info->data_handle = data_handle;
5431                         buf_info->dma_handle = dma_handle;
5432 
5433                         phys += size;
5434                         virt += size;
5435 
5436                         hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5437                         hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5438                 }
5439         }
5440 
5441         /* MQ */
5442         size = 4096;
5443         buf_info = &hba->sli.sli4.mq.addr;
5444         if (!buf_info->virt) {
5445                 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5446                 buf_info->size = size;
5447                 buf_info->flags =
5448                     FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5449                 buf_info->align = ddi_ptob(hba->dip, 1L);
5450                 buf_info->phys = phys;
5451                 buf_info->virt = (void *)virt;
5452                 buf_info->data_handle = data_handle;
5453                 buf_info->dma_handle = dma_handle;
5454 
5455                 phys += size;
5456                 virt += size;
5457 
5458                 hba->sli.sli4.mq.max_index = MQ_DEPTH;
5459         }
5460 
5461         /* RXQs */
5462         for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5463                 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5464 
5465                 (void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5466                 mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5467         }
5468 
5469         /* RQs */
5470         size = 4096;
5471         for (i = 0; i < EMLXS_MAX_RQS; i++) {
5472                 buf_info = &hba->sli.sli4.rq[i].addr;
5473                 if (buf_info->virt) {
5474                         continue;
5475                 }
5476 
5477                 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5478                 buf_info->size = size;
5479                 buf_info->flags =
5480                     FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5481                 buf_info->align = ddi_ptob(hba->dip, 1L);
5482                 buf_info->phys = phys;
5483                 buf_info->virt = (void *)virt;
5484                 buf_info->data_handle = data_handle;
5485                 buf_info->dma_handle = dma_handle;
5486 
5487                 phys += size;
5488                 virt += size;
5489 
5490                 hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5491 
5492                 (void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5493                 mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5494         }
5495 
5496         /* Setup RQE */
5497         for (i = 0; i < EMLXS_MAX_RQS; i++) {
5498                 size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
5499                 tmp_phys = phys;
5500                 tmp_virt = virt;
5501 
5502                 /* Initialize the RQEs */
5503                 rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5504                 for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5505                         phys = tmp_phys;
5506                         virt = tmp_virt;
5507                         for (k = 0; k < RQB_COUNT; k++) {
5508                                 word = PADDR_HI(phys);
5509                                 rqe->AddrHi = BE_SWAP32(word);
5510 
5511                                 word = PADDR_LO(phys);
5512                                 rqe->AddrLo = BE_SWAP32(word);
5513 
5514                                 rqb = &hba->sli.sli4.rq[i].
5515                                     rqb[k + (j * RQB_COUNT)];
5516                                 rqb->size = size;
5517                                 rqb->flags = FC_MBUF_DMA |
5518                                     FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5519                                 rqb->align = ddi_ptob(hba->dip, 1L);
5520                                 rqb->phys = phys;
5521                                 rqb->virt = (void *)virt;
5522                                 rqb->data_handle = data_handle;
5523                                 rqb->dma_handle = dma_handle;
5524 
5525                                 phys += size;
5526                                 virt += size;
5527 #ifdef RQ_DEBUG
5528                                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5529                                     "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5530                                     i, j, k, mp, mp->tag);
5531 #endif
5532 
5533                                 rqe++;
5534                         }
5535                 }
5536 
5537                 offset = (off_t)((uint64_t)((unsigned long)
5538                     hba->sli.sli4.rq[i].addr.virt) -
5539                     (uint64_t)((unsigned long)
5540                     hba->sli.sli4.slim2.virt));
5541 
5542                 /* Sync the RQ buffer list */
5543                 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
5544                     hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5545         }
5546 
5547         if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5548                 /* Initialize double linked lists */
5549                 hba->sli.sli4.XRIinuse_f =
5550                     (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5551                 hba->sli.sli4.XRIinuse_b =
5552                     (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5553                 hba->sli.sli4.xria_count = 0;
5554 
5555                 hba->sli.sli4.XRIfree_f =
5556                     (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5557                 hba->sli.sli4.XRIfree_b =
5558                     (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5559                 hba->sli.sli4.xria_count = 0;
5560 
5561                 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5562                     (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5563 
5564                 xrip = hba->sli.sli4.XRIp;
5565                 index = hba->sli.sli4.XRIBase;
5566                 size = hba->sli.sli4.mem_sgl_size;
5567                 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5568                         xrip->sge_count =
5569                             (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5570                         xrip->XRI = index;
5571                         xrip->iotag = i;
5572                         if ((xrip->XRI == 0) || (xrip->iotag == 0)) {
5573                                 index++; /* Skip XRI 0 or IOTag 0 */
5574                                 xrip++;
5575                                 continue;
5576                         }
5577                         /* Add xrip to end of free list */
5578                         xrip->_b = hba->sli.sli4.XRIfree_b;
5579                         hba->sli.sli4.XRIfree_b->_f = xrip;
5580                         xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5581                         hba->sli.sli4.XRIfree_b = xrip;
5582                         hba->sli.sli4.xrif_count++;
5583 
5584                         /* Allocate SGL for this xrip */
5585                         buf_info = &xrip->SGList;
5586                         buf_info->size = size;
5587                         buf_info->flags =
5588                             FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5589                         buf_info->align = size;
5590                         buf_info->phys = phys;
5591                         buf_info->virt = (void *)virt;
5592                         buf_info->data_handle = data_handle;
5593                         buf_info->dma_handle = dma_handle;
5594 
5595                         phys += size;
5596                         virt += size;
5597 
5598                         xrip++;
5599                         index++;
5600                 }
5601         }
5602 
5603         size = sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount;
5604         buf_info = &hba->sli.sli4.HeaderTmplate;
5605         if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5606                 bzero(buf_info, sizeof (MBUF_INFO));
5607                 buf_info->size = size;
5608                 buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
5609                 buf_info->align = ddi_ptob(hba->dip, 1L);
5610                 buf_info->phys = phys;
5611                 buf_info->virt = (void *)virt;
5612                 buf_info->data_handle = data_handle;
5613                 buf_info->dma_handle = dma_handle;
5614         }
5615 
5616 #ifdef FMA_SUPPORT
5617         if (hba->sli.sli4.slim2.dma_handle) {
5618                 if (emlxs_fm_check_dma_handle(hba,
5619                     hba->sli.sli4.slim2.dma_handle)
5620                     != DDI_FM_OK) {
5621                         EMLXS_MSGF(EMLXS_CONTEXT,
5622                             &emlxs_invalid_dma_handle_msg,
5623                             "emlxs_sli4_resource_alloc: hdl=%p",
5624                             hba->sli.sli4.slim2.dma_handle);
5625                         goto failed;
5626                 }
5627         }
5628 #endif
5629 
5630         return (0);
5631 
5632 failed:
5633 
5634         (void) emlxs_sli4_resource_free(hba);
5635         return (ENOMEM);
5636 
5637 } /* emlxs_sli4_resource_alloc */
5638 
5639 
5640 static XRIobj_t *
5641 emlxs_sli4_reserve_xri(emlxs_hba_t *hba,  RPIobj_t *rpip)
5642 {
5643         emlxs_port_t    *port = &PPORT;
5644         XRIobj_t        *xrip;
5645         uint16_t        iotag;
5646 
5647         mutex_enter(&EMLXS_FCTAB_LOCK);
5648 
5649         xrip = hba->sli.sli4.XRIfree_f;
5650 
5651         if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5652                 mutex_exit(&EMLXS_FCTAB_LOCK);
5653 
5654                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5655                     "Unable to reserve XRI");
5656 
5657                 return (NULL);
5658         }
5659 
5660         iotag = xrip->iotag;
5661 
5662         if ((!iotag) ||
5663             ((hba->fc_table[iotag] != NULL) &&
5664             (hba->fc_table[iotag] != STALE_PACKET))) {
5665                 /*
5666                  * No more command slots available, retry later
5667                  */
5668                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5669                     "Adapter Busy. Unable to reserve iotag");
5670 
5671                 mutex_exit(&EMLXS_FCTAB_LOCK);
5672                 return (NULL);
5673         }
5674 
5675         xrip->state = XRI_STATE_ALLOCATED;
5676         xrip->flag = EMLXS_XRI_RESERVED;
5677         xrip->rpip = rpip;
5678         xrip->sbp = NULL;
5679 
5680         if (rpip) {
5681                 rpip->xri_count++;
5682         }
5683 
5684         /* Take it off free list */
5685         (xrip->_b)->_f = xrip->_f;
5686         (xrip->_f)->_b = xrip->_b;
5687         xrip->_f = NULL;
5688         xrip->_b = NULL;
5689         hba->sli.sli4.xrif_count--;
5690 
5691         /* Add it to end of inuse list */
5692         xrip->_b = hba->sli.sli4.XRIinuse_b;
5693         hba->sli.sli4.XRIinuse_b->_f = xrip;
5694         xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5695         hba->sli.sli4.XRIinuse_b = xrip;
5696         hba->sli.sli4.xria_count++;
5697 
5698         mutex_exit(&EMLXS_FCTAB_LOCK);
5699         return (xrip);
5700 
5701 } /* emlxs_sli4_reserve_xri() */
5702 
5703 
5704 extern uint32_t
5705 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri, uint32_t lock)
5706 {
5707         emlxs_port_t    *port = &PPORT;
5708         XRIobj_t *xrip;
5709 
5710         if (lock) {
5711                 mutex_enter(&EMLXS_FCTAB_LOCK);
5712         }
5713 
5714         xrip = emlxs_sli4_find_xri(hba, xri);
5715 
5716         if (!xrip || xrip->state == XRI_STATE_FREE) {
5717                 if (lock) {
5718                         mutex_exit(&EMLXS_FCTAB_LOCK);
5719                 }
5720 
5721                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5722                     "emlxs_sli4_unreserve_xri: xri=%x already freed.",
5723                     xrip->XRI);
5724                 return (0);
5725         }
5726 
5727         if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
5728                 if (lock) {
5729                         mutex_exit(&EMLXS_FCTAB_LOCK);
5730                 }
5731 
5732                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5733                     "emlxs_sli4_unreserve_xri: xri=%x in use.", xrip->XRI);
5734                 return (1);
5735         }
5736 
5737         if (xrip->iotag &&
5738             (hba->fc_table[xrip->iotag] != NULL) &&
5739             (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
5740                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5741                     "emlxs_sli4_unreserve_xri:%x  sbp dropped:%p",
5742                     xrip->XRI, hba->fc_table[xrip->iotag]);
5743 
5744                 hba->fc_table[xrip->iotag] = NULL;
5745                 hba->io_count--;
5746         }
5747 
5748         xrip->state = XRI_STATE_FREE;
5749 
5750         if (xrip->rpip) {
5751                 xrip->rpip->xri_count--;
5752                 xrip->rpip = NULL;
5753         }
5754 
5755         /* Take it off inuse list */
5756         (xrip->_b)->_f = xrip->_f;
5757         (xrip->_f)->_b = xrip->_b;
5758         xrip->_f = NULL;
5759         xrip->_b = NULL;
5760         hba->sli.sli4.xria_count--;
5761 
5762         /* Add it to end of free list */
5763         xrip->_b = hba->sli.sli4.XRIfree_b;
5764         hba->sli.sli4.XRIfree_b->_f = xrip;
5765         xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5766         hba->sli.sli4.XRIfree_b = xrip;
5767         hba->sli.sli4.xrif_count++;
5768 
5769         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5770             "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xrip->XRI);
5771 
5772         if (lock) {
5773                 mutex_exit(&EMLXS_FCTAB_LOCK);
5774         }
5775 
5776         return (0);
5777 
5778 } /* emlxs_sli4_unreserve_xri() */
5779 
5780 
5781 static XRIobj_t *
5782 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5783 {
5784         emlxs_port_t    *port = &PPORT;
5785         uint16_t        iotag;
5786         XRIobj_t        *xrip;
5787 
5788         mutex_enter(&EMLXS_FCTAB_LOCK);
5789 
5790         xrip = emlxs_sli4_find_xri(hba, xri);
5791 
5792         if (!xrip) {
5793                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5794                     "emlxs_sli4_register_xri: XRI not found.");
5795 
5796 
5797                 mutex_exit(&EMLXS_FCTAB_LOCK);
5798                 return (NULL);
5799         }
5800 
5801         if ((xrip->state == XRI_STATE_FREE) ||
5802             !(xrip->flag & EMLXS_XRI_RESERVED)) {
5803 
5804                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5805                     "emlxs_sli4_register_xri: Invalid XRI. xrip=%p "
5806                     "state=%x flag=%x",
5807                     xrip, xrip->state, xrip->flag);
5808 
5809                 mutex_exit(&EMLXS_FCTAB_LOCK);
5810                 return (NULL);
5811         }
5812 
5813         iotag = xrip->iotag;
5814 
5815         if ((!iotag) ||
5816             ((hba->fc_table[iotag] != NULL) &&
5817             (hba->fc_table[iotag] != STALE_PACKET))) {
5818 
5819                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5820                     "emlxs_sli4_register_xri: Invalid fc_table entry. "
5821                     "iotag=%x entry=%p",
5822                     iotag, hba->fc_table[iotag]);
5823 
5824                 mutex_exit(&EMLXS_FCTAB_LOCK);
5825                 return (NULL);
5826         }
5827 
5828         hba->fc_table[iotag] = sbp;
5829         hba->io_count++;
5830 
5831         sbp->iotag = iotag;
5832         sbp->xrip = xrip;
5833 
5834         xrip->flag &= ~EMLXS_XRI_RESERVED;
5835         xrip->sbp = sbp;
5836 
5837         mutex_exit(&EMLXS_FCTAB_LOCK);
5838 
5839         return (xrip);
5840 
5841 } /* emlxs_sli4_register_xri() */
5842 
5843 
5844 /* Performs both reserve and register functions for XRI */
5845 static XRIobj_t *
5846 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rpip)
5847 {
5848         emlxs_port_t    *port = &PPORT;
5849         XRIobj_t        *xrip;
5850         uint16_t        iotag;
5851 
5852         mutex_enter(&EMLXS_FCTAB_LOCK);
5853 
5854         xrip = hba->sli.sli4.XRIfree_f;
5855 
5856         if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5857                 mutex_exit(&EMLXS_FCTAB_LOCK);
5858 
5859                 return (NULL);
5860         }
5861 
5862         /* Get the iotag by registering the packet */
5863         iotag = xrip->iotag;
5864 
5865         if ((!iotag) ||
5866             ((hba->fc_table[iotag] != NULL) &&
5867             (hba->fc_table[iotag] != STALE_PACKET))) {
5868                 /*
5869                  * No more command slots available, retry later
5870                  */
5871                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5872                     "Adapter Busy. Unable to alloc iotag:(0x%x)(%p)",
5873                     iotag, hba->fc_table[iotag]);
5874 
5875                 mutex_exit(&EMLXS_FCTAB_LOCK);
5876                 return (NULL);
5877         }
5878 
5879         hba->fc_table[iotag] = sbp;
5880         hba->io_count++;
5881 
5882         sbp->iotag = iotag;
5883         sbp->xrip = xrip;
5884 
5885         xrip->state = XRI_STATE_ALLOCATED;
5886         xrip->flag = 0;
5887         xrip->rpip = rpip;
5888         xrip->sbp = sbp;
5889 
5890         if (rpip) {
5891                 rpip->xri_count++;
5892         }
5893 
5894         /* Take it off free list */
5895         (xrip->_b)->_f = xrip->_f;
5896         (xrip->_f)->_b = xrip->_b;
5897         xrip->_f = NULL;
5898         xrip->_b = NULL;
5899         hba->sli.sli4.xrif_count--;
5900 
5901         /* Add it to end of inuse list */
5902         xrip->_b = hba->sli.sli4.XRIinuse_b;
5903         hba->sli.sli4.XRIinuse_b->_f = xrip;
5904         xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5905         hba->sli.sli4.XRIinuse_b = xrip;
5906         hba->sli.sli4.xria_count++;
5907 
5908         mutex_exit(&EMLXS_FCTAB_LOCK);
5909 
5910         return (xrip);
5911 
5912 } /* emlxs_sli4_alloc_xri() */
5913 
5914 
5915 /* EMLXS_FCTAB_LOCK must be held to enter */
5916 extern XRIobj_t *
5917 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5918 {
5919         emlxs_port_t    *port = &PPORT;
5920         XRIobj_t        *xrip;
5921 
5922         xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5923         while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5924                 if ((xrip->state >= XRI_STATE_ALLOCATED) &&
5925                     (xrip->XRI == xri)) {
5926                         return (xrip);
5927                 }
5928                 xrip = xrip->_f;
5929         }
5930 
5931         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5932             "Unable to find XRI x%x", xri);
5933 
5934         return (NULL);
5935 
5936 } /* emlxs_sli4_find_xri() */
5937 
5938 
5939 
5940 
5941 extern void
5942 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xrip,
5943     uint8_t lock)
5944 {
5945         emlxs_port_t    *port = &PPORT;
5946 
5947         if (lock) {
5948                 mutex_enter(&EMLXS_FCTAB_LOCK);
5949         }
5950 
5951         if (xrip) {
5952                 if (xrip->state == XRI_STATE_FREE) {
5953                         if (lock) {
5954                                 mutex_exit(&EMLXS_FCTAB_LOCK);
5955                         }
5956                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5957                             "Free XRI:%x, Already freed", xrip->XRI);
5958                         return;
5959                 }
5960 
5961                 if (xrip->iotag &&
5962                     (hba->fc_table[xrip->iotag] != NULL) &&
5963                     (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
5964                         hba->fc_table[xrip->iotag] = NULL;
5965                         hba->io_count--;
5966                 }
5967 
5968                 xrip->state = XRI_STATE_FREE;
5969                 xrip->flag  = 0;
5970 
5971                 if (xrip->rpip) {
5972                         xrip->rpip->xri_count--;
5973                         xrip->rpip = NULL;
5974                 }
5975 
5976                 /* Take it off inuse list */
5977                 (xrip->_b)->_f = xrip->_f;
5978                 (xrip->_f)->_b = xrip->_b;
5979                 xrip->_f = NULL;
5980                 xrip->_b = NULL;
5981                 hba->sli.sli4.xria_count--;
5982 
5983                 /* Add it to end of free list */
5984                 xrip->_b = hba->sli.sli4.XRIfree_b;
5985                 hba->sli.sli4.XRIfree_b->_f = xrip;
5986                 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5987                 hba->sli.sli4.XRIfree_b = xrip;
5988                 hba->sli.sli4.xrif_count++;
5989         }
5990 
5991         if (sbp) {
5992                 if (!(sbp->pkt_flags & PACKET_VALID) ||
5993                     (sbp->pkt_flags &
5994                     (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
5995                         if (lock) {
5996                                 mutex_exit(&EMLXS_FCTAB_LOCK);
5997                         }
5998                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5999                             "Free XRI: sbp invalid. sbp=%p flags=%x xri=%x",
6000                             sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
6001                         return;
6002                 }
6003 
6004                 sbp->xrip = 0;
6005 
6006                 if (xrip && (xrip->iotag != sbp->iotag)) {
6007                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
6008                             "sbp / iotag mismatch %p iotag:%d %d", sbp,
6009                             sbp->iotag, xrip->iotag);
6010                 }
6011 
6012                 if (sbp->iotag) {
6013                         if (sbp == hba->fc_table[sbp->iotag]) {
6014                                 hba->fc_table[sbp->iotag] = NULL;
6015                                 hba->io_count--;
6016                         }
6017                         sbp->iotag = 0;
6018                 }
6019 
6020                 if (lock) {
6021                         mutex_exit(&EMLXS_FCTAB_LOCK);
6022                 }
6023 
6024                 /* Clean up the sbp */
6025                 mutex_enter(&sbp->mtx);
6026 
6027                 if (sbp->pkt_flags & PACKET_IN_TXQ) {
6028                         sbp->pkt_flags &= ~PACKET_IN_TXQ;
6029                         hba->channel_tx_count--;
6030                 }
6031 
6032                 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6033                         sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6034                 }
6035 
6036                 mutex_exit(&sbp->mtx);
6037         } else {
6038                 if (lock) {
6039                         mutex_exit(&EMLXS_FCTAB_LOCK);
6040                 }
6041         }
6042 
6043 } /* emlxs_sli4_free_xri() */
6044 
6045 
6046 static int
6047 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6048 {
6049         MAILBOX4        *mb = (MAILBOX4 *)mbq;
6050         emlxs_port_t    *port = &PPORT;
6051         XRIobj_t        *xrip;
6052         MATCHMAP        *mp;
6053         mbox_req_hdr_t  *hdr_req;
6054         uint32_t        i, cnt, xri_cnt;
6055         uint32_t        size;
6056         IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6057 
6058         bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6059         mbq->bp = NULL;
6060         mbq->mbox_cmpl = NULL;
6061 
6062         if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6063                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6064                     "Unable to POST_SGL. Mailbox cmd=%x  ",
6065                     mb->mbxCommand);
6066                 return (EIO);
6067         }
6068         mbq->nonembed = (void *)mp;
6069 
6070         /*
6071          * Signifies a non embedded command
6072          */
6073         mb->un.varSLIConfig.be.embedded = 0;
6074         mb->mbxCommand = MBX_SLI_CONFIG;
6075         mb->mbxOwner = OWN_HOST;
6076 
6077         hdr_req = (mbox_req_hdr_t *)mp->virt;
6078         post_sgl =
6079             (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6080 
6081 
6082         xrip = hba->sli.sli4.XRIp;
6083         cnt = hba->sli.sli4.XRICount;
6084         while (cnt) {
6085                 bzero((void *) hdr_req, mp->size);
6086                 size = mp->size - IOCTL_HEADER_SZ;
6087 
6088                 mb->un.varSLIConfig.be.payload_length =
6089                     mp->size;
6090                 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6091                     IOCTL_SUBSYSTEM_FCOE;
6092                 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6093                     FCOE_OPCODE_CFG_POST_SGL_PAGES;
6094                 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6095                 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6096 
6097                 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6098                 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6099                 hdr_req->timeout = 0;
6100                 hdr_req->req_length = size;
6101 
6102                 post_sgl->params.request.xri_count = 0;
6103                 post_sgl->params.request.xri_start = xrip->XRI;
6104                 xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6105                     sizeof (FCOE_SGL_PAGES);
6106                 for (i = 0; i < xri_cnt; i++) {
6107 
6108                         post_sgl->params.request.xri_count++;
6109                         post_sgl->params.request.pages[i].sgl_page0.addrLow =
6110                             PADDR_LO(xrip->SGList.phys);
6111                         post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6112                             PADDR_HI(xrip->SGList.phys);
6113                         cnt--;
6114                         xrip++;
6115                         if (cnt == 0) {
6116                                 break;
6117                         }
6118                 }
6119                 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6120                     MBX_SUCCESS) {
6121                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6122                             "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6123                             "XRI cnt:%d start:%d",
6124                             mb->mbxCommand, mb->mbxStatus,
6125                             post_sgl->params.request.xri_count,
6126                             post_sgl->params.request.xri_start);
6127                         emlxs_mem_buf_free(hba, mp);
6128                         mbq->nonembed = NULL;
6129                         return (EIO);
6130                 }
6131         }
6132         emlxs_mem_buf_free(hba, mp);
6133         mbq->nonembed = NULL;
6134         return (0);
6135 
6136 } /* emlxs_sli4_post_sgl_pages() */
6137 
6138 
6139 static int
6140 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6141 {
6142         MAILBOX4        *mb = (MAILBOX4 *)mbq;
6143         emlxs_port_t    *port = &PPORT;
6144         int             i, cnt;
6145         uint64_t        addr;
6146         IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6147 
6148         bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6149         mbq->bp = NULL;
6150         mbq->mbox_cmpl = NULL;
6151 
6152         /*
6153          * Signifies an embedded command
6154          */
6155         mb->un.varSLIConfig.be.embedded = 1;
6156 
6157         mb->mbxCommand = MBX_SLI_CONFIG;
6158         mb->mbxOwner = OWN_HOST;
6159         mb->un.varSLIConfig.be.payload_length =
6160             sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6161         mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6162             IOCTL_SUBSYSTEM_FCOE;
6163         mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6164             FCOE_OPCODE_POST_HDR_TEMPLATES;
6165         mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6166         mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6167             sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6168         post_hdr =
6169             (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6170         addr = hba->sli.sli4.HeaderTmplate.phys;
6171         post_hdr->params.request.num_pages = 0;
6172         i = 0;
6173         cnt = hba->sli.sli4.HeaderTmplate.size;
6174         while (cnt > 0) {
6175                 post_hdr->params.request.num_pages++;
6176                 post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6177                 post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6178                 i++;
6179                 addr += 4096;
6180                 cnt -= 4096;
6181         }
6182         post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6183 
6184         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6185             MBX_SUCCESS) {
6186                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6187                     "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6188                     mb->mbxCommand, mb->mbxStatus);
6189                 return (EIO);
6190         }
6191 emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
6192         return (0);
6193 
6194 } /* emlxs_sli4_post_hdr_tmplates() */
6195 
6196 
6197 static int
6198 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6199 {
6200         MAILBOX4        *mb = (MAILBOX4 *)mbq;
6201         emlxs_port_t    *port = &PPORT;
6202         emlxs_config_t  *cfg = &CFG;
6203         IOCTL_COMMON_EQ_CREATE *eq;
6204         IOCTL_COMMON_CQ_CREATE *cq;
6205         IOCTL_FCOE_WQ_CREATE *wq;
6206         IOCTL_FCOE_RQ_CREATE *rq;
6207         IOCTL_COMMON_MQ_CREATE *mq;
6208         IOCTL_COMMON_MCC_CREATE_EXT *mcc_ext;
6209         emlxs_rqdbu_t   rqdb;
6210         uint16_t i, j;
6211         uint16_t num_cq, total_cq;
6212         uint16_t num_wq, total_wq;
6213 
6214         /*
6215          * The first CQ is reserved for ASYNC events,
6216          * the second is reserved for unsol rcv, the rest
6217          * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6218          */
6219 
6220         /* First initialize queue ordinal mapping */
6221         for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6222                 hba->sli.sli4.eq_map[i] = 0xffff;
6223         }
6224         for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6225                 hba->sli.sli4.cq_map[i] = 0xffff;
6226         }
6227         for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6228                 hba->sli.sli4.wq_map[i] = 0xffff;
6229         }
6230         for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6231                 hba->sli.sli4.rq_map[i] = 0xffff;
6232         }
6233 
6234         total_cq = 0;
6235         total_wq = 0;
6236 
6237         /* Create EQ's */
6238         for (i = 0; i < hba->intr_count; i++) {
6239                 emlxs_mb_eq_create(hba, mbq, i);
6240                 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6241                     MBX_SUCCESS) {
6242                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6243                             "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6244                             i, mb->mbxCommand, mb->mbxStatus);
6245                         return (EIO);
6246                 }
6247                 eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6248                 hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6249                 hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6250                 hba->sli.sli4.eq[i].lastwq = total_wq;
6251 
6252 emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6253                 num_wq = cfg[CFG_NUM_WQ].current;
6254                 num_cq = num_wq;
6255                 if (i == 0) {
6256                         /* One for RQ handling, one for mbox/event handling */
6257                         num_cq += EMLXS_CQ_OFFSET_WQ;
6258                 }
6259 
6260                 for (j = 0; j < num_cq; j++) {
6261                         /* Reuse mbq from previous mbox */
6262                         bzero(mbq, sizeof (MAILBOXQ));
6263 
6264                         hba->sli.sli4.cq[total_cq].eqid =
6265                             hba->sli.sli4.eq[i].qid;
6266 
6267                         emlxs_mb_cq_create(hba, mbq, total_cq);
6268                         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6269                             MBX_SUCCESS) {
6270                                 EMLXS_MSGF(EMLXS_CONTEXT,
6271                                     &emlxs_init_failed_msg, "Unable to Create "
6272                                     "CQ %d: Mailbox cmd=%x status=%x ",
6273                                     total_cq, mb->mbxCommand, mb->mbxStatus);
6274                                 return (EIO);
6275                         }
6276                         cq = (IOCTL_COMMON_CQ_CREATE *)
6277                             &mb->un.varSLIConfig.payload;
6278                         hba->sli.sli4.cq[total_cq].qid =
6279                             cq->params.response.CQId;
6280                         hba->sli.sli4.cq_map[cq->params.response.CQId] =
6281                             total_cq;
6282 
6283                         switch (total_cq) {
6284                         case EMLXS_CQ_MBOX:
6285                                 /* First CQ is for async event handling */
6286                                 hba->sli.sli4.cq[total_cq].type =
6287                                     EMLXS_CQ_TYPE_GROUP1;
6288                                 break;
6289 
6290                         case EMLXS_CQ_RCV:
6291                                 /* Second CQ is for unsol receive handling */
6292                                 hba->sli.sli4.cq[total_cq].type =
6293                                     EMLXS_CQ_TYPE_GROUP2;
6294                                 break;
6295 
6296                         default:
6297                                 /* Setup CQ to channel mapping */
6298                                 hba->sli.sli4.cq[total_cq].type =
6299                                     EMLXS_CQ_TYPE_GROUP2;
6300                                 hba->sli.sli4.cq[total_cq].channelp =
6301                                     &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6302                                 break;
6303                         }
6304 emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6305                         total_cq++;
6306                 }
6307 
6308                 for (j = 0; j < num_wq; j++) {
6309                         /* Reuse mbq from previous mbox */
6310                         bzero(mbq, sizeof (MAILBOXQ));
6311 
6312                         hba->sli.sli4.wq[total_wq].cqid =
6313                             hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6314 
6315                         emlxs_mb_wq_create(hba, mbq, total_wq);
6316                         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6317                             MBX_SUCCESS) {
6318                                 EMLXS_MSGF(EMLXS_CONTEXT,
6319                                     &emlxs_init_failed_msg, "Unable to Create "
6320                                     "WQ %d: Mailbox cmd=%x status=%x ",
6321                                     total_wq, mb->mbxCommand, mb->mbxStatus);
6322                                 return (EIO);
6323                         }
6324                         wq = (IOCTL_FCOE_WQ_CREATE *)
6325                             &mb->un.varSLIConfig.payload;
6326                         hba->sli.sli4.wq[total_wq].qid =
6327                             wq->params.response.WQId;
6328                         hba->sli.sli4.wq_map[wq->params.response.WQId] =
6329                             total_wq;
6330 
6331                         hba->sli.sli4.wq[total_wq].cqid =
6332                             hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6333 emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6334                         total_wq++;
6335                 }
6336                 hba->last_msiid = i;
6337         }
6338 
6339         /* We assume 1 RQ pair will handle ALL incoming data */
6340         /* Create RQs */
6341         for (i = 0; i < EMLXS_MAX_RQS; i++) {
6342                 /* Personalize the RQ */
6343                 switch (i) {
6344                 case 0:
6345                         hba->sli.sli4.rq[i].cqid =
6346                             hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6347                         break;
6348                 case 1:
6349                         hba->sli.sli4.rq[i].cqid =
6350                             hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6351                         break;
6352                 default:
6353                         hba->sli.sli4.rq[i].cqid = 0xffff;
6354                 }
6355 
6356                 /* Reuse mbq from previous mbox */
6357                 bzero(mbq, sizeof (MAILBOXQ));
6358 
6359                 emlxs_mb_rq_create(hba, mbq, i);
6360                 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6361                     MBX_SUCCESS) {
6362                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6363                             "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6364                             i, mb->mbxCommand, mb->mbxStatus);
6365                         return (EIO);
6366                 }
6367                 rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6368                 hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6369                 hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6370 emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
6371 
6372                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6373                     "RQ CREATE: rq[%d].qid=%d cqid=%d",
6374                     i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6375 
6376                 /* Initialize the host_index */
6377                 hba->sli.sli4.rq[i].host_index = 0;
6378 
6379                 /* If Data queue was just created, */
6380                 /* then post buffers using the header qid */
6381                 if ((i & 0x1)) {
6382                         /* Ring the RQ doorbell to post buffers */
6383                         rqdb.word = 0;
6384                         rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6385                         rqdb.db.NumPosted = RQB_COUNT;
6386 
6387                         WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6388 
6389                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6390                             "RQ CREATE: Doorbell rang: qid=%d count=%d",
6391                             hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6392                 }
6393         }
6394 
6395         /* Create MQ */
6396 
6397         /* Personalize the MQ */
6398         hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6399 
6400         /* Reuse mbq from previous mbox */
6401         bzero(mbq, sizeof (MAILBOXQ));
6402 
6403         emlxs_mb_mcc_create_ext(hba, mbq);
6404         if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6405             MBX_SUCCESS) {
6406                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6407                     "Unable to Create MCC_EXT %d: Mailbox cmd=%x status=%x ",
6408                     i, mb->mbxCommand, mb->mbxStatus);
6409 
6410                 /* Reuse mbq from previous mbox */
6411                 bzero(mbq, sizeof (MAILBOXQ));
6412 
6413                 emlxs_mb_mq_create(hba, mbq);
6414                 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6415                     MBX_SUCCESS) {
6416                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6417                             "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6418                             i, mb->mbxCommand, mb->mbxStatus);
6419                         return (EIO);
6420                 }
6421 
6422                 mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6423                 hba->sli.sli4.mq.qid = mq->params.response.MQId;
6424                 return (0);
6425         }
6426 
6427         mcc_ext = (IOCTL_COMMON_MCC_CREATE_EXT *)&mb->un.varSLIConfig.payload;
6428         hba->sli.sli4.mq.qid = mcc_ext->params.response.id;
6429         return (0);
6430 
6431 } /* emlxs_sli4_create_queues() */
6432 
6433 
6434 /*ARGSUSED*/
6435 extern int
6436 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6437 {
6438         int i;
6439 
6440         if (!(hba->flag & FC_FIP_SUPPORTED)) {
6441                 if (!hba->sli.sli4.cfgFCOE.length) {
6442                         /* Nothing specified, so everything matches */
6443                         /* For nonFIP only use index 0 */
6444                         if (fcfrec->fcf_index == 0) {
6445                                 return (1);  /* success */
6446                         }
6447                         return (0);
6448                 }
6449 
6450                 /* Just check FCMap for now */
6451                 if (bcmp((char *)fcfrec->fc_map,
6452                     hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6453                         return (1);  /* success */
6454                 }
6455                 return (0);
6456         }
6457 
6458         /* For FIP mode, the FCF record must match Config Region 23 */
6459 
6460         if (!hba->sli.sli4.cfgFCF.length) {
6461                 /* Nothing specified, so everything matches */
6462                 return (1);  /* success */
6463         }
6464 
6465         /* Just check FabricName for now */
6466         for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6467                 if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6468                     (bcmp((char *)fcfrec->fabric_name_identifier,
6469                     hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0)) {
6470                         return (1);  /* success */
6471                 }
6472         }
6473         return (0);
6474 
6475 } /* emlxs_sli4_check_fcf_config() */
6476 
6477 
6478 extern void
6479 emlxs_sli4_timer(emlxs_hba_t *hba)
6480 {
6481         /* Perform SLI4 level timer checks */
6482 
6483         emlxs_fcf_timer_notify(hba);
6484 
6485         emlxs_sli4_timer_check_mbox(hba);
6486 
6487         return;
6488 
6489 } /* emlxs_sli4_timer() */
6490 
6491 
6492 static void
6493 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6494 {
6495         emlxs_port_t *port = &PPORT;
6496         emlxs_config_t *cfg = &CFG;
6497         MAILBOX *mb = NULL;
6498 
6499         if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6500                 return;
6501         }
6502 
6503         mutex_enter(&EMLXS_PORT_LOCK);
6504 
6505         /* Return if timer hasn't expired */
6506         if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6507                 mutex_exit(&EMLXS_PORT_LOCK);
6508                 return;
6509         }
6510 
6511         /* The first to service the mbox queue will clear the timer */
6512         hba->mbox_timer = 0;
6513 
6514         if (hba->mbox_queue_flag) {
6515                 if (hba->mbox_mbq) {
6516                         mb = (MAILBOX *)hba->mbox_mbq;
6517                 }
6518         }
6519 
6520         if (mb) {
6521                 switch (hba->mbox_queue_flag) {
6522                 case MBX_NOWAIT:
6523                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6524                             "%s: Nowait.",
6525                             emlxs_mb_cmd_xlate(mb->mbxCommand));
6526                         break;
6527 
6528                 case MBX_SLEEP:
6529                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6530                             "%s: mb=%p Sleep.",
6531                             emlxs_mb_cmd_xlate(mb->mbxCommand),
6532                             mb);
6533                         break;
6534 
6535                 case MBX_POLL:
6536                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6537                             "%s: mb=%p Polled.",
6538                             emlxs_mb_cmd_xlate(mb->mbxCommand),
6539                             mb);
6540                         break;
6541 
6542                 default:
6543                         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6544                             "%s: mb=%p (%d).",
6545                             emlxs_mb_cmd_xlate(mb->mbxCommand),
6546                             mb, hba->mbox_queue_flag);
6547                         break;
6548                 }
6549         } else {
6550                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6551         }
6552 
6553         hba->flag |= FC_MBOX_TIMEOUT;
6554         EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6555 
6556         mutex_exit(&EMLXS_PORT_LOCK);
6557 
6558         /* Perform mailbox cleanup */
6559         /* This will wake any sleeping or polling threads */
6560         emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6561 
6562         /* Trigger adapter shutdown */
6563         emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
6564 
6565         return;
6566 
6567 } /* emlxs_sli4_timer_check_mbox() */
6568 
6569 
6570 extern void
6571 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
6572 {
6573         void *msg;
6574 
6575         if (err) {
6576                 msg = &emlxs_sli_err_msg;
6577         } else {
6578                 msg = &emlxs_sli_detail_msg;
6579         }
6580 
6581         if (cnt) {
6582                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6583                     "%s00:  %08x %08x %08x %08x %08x %08x", str, *iptr,
6584                     *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
6585         }
6586         if (cnt > 6) {
6587                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6588                     "%s06:  %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
6589                     *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
6590         }
6591         if (cnt > 12) {
6592                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6593                     "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
6594                     *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
6595         }
6596         if (cnt > 18) {
6597                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6598                     "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
6599                     *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
6600         }
6601         if (cnt > 24) {
6602                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6603                     "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
6604                     *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
6605         }
6606         if (cnt > 30) {
6607                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6608                     "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
6609                     *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
6610         }
6611         if (cnt > 36) {
6612                 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6613                     "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
6614                     *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
6615         }
6616 
6617 } /* emlxs_data_dump() */
6618 
6619 
6620 extern void
6621 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
6622 {
6623         emlxs_port_t *port = &PPORT;
6624         uint32_t ue_h;
6625         uint32_t ue_l;
6626         uint32_t on1;
6627         uint32_t on2;
6628 
6629         ue_l = ddi_get32(hba->pci_acc_handle,
6630             (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6631         ue_h = ddi_get32(hba->pci_acc_handle,
6632             (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6633         on1 = ddi_get32(hba->pci_acc_handle,
6634             (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
6635         on2 = ddi_get32(hba->pci_acc_handle,
6636             (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
6637 
6638         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6639             "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
6640             ue_l, ue_h, on1, on2);
6641 
6642 #ifdef FMA_SUPPORT
6643         /* Access handle validation */
6644         EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6645 #endif  /* FMA_SUPPORT */
6646 
6647 } /* emlxs_ue_dump() */
6648 
6649 
6650 static void
6651 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
6652 {
6653         emlxs_port_t *port = &PPORT;
6654         uint32_t ue_h;
6655         uint32_t ue_l;
6656 
6657         if (hba->flag & FC_HARDWARE_ERROR) {
6658                 return;
6659         }
6660 
6661         ue_l = ddi_get32(hba->pci_acc_handle,
6662             (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6663         ue_h = ddi_get32(hba->pci_acc_handle,
6664             (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6665 
6666         if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
6667             (~hba->sli.sli4.ue_mask_hi & ue_h) ||
6668             (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
6669                 /* Unrecoverable error detected */
6670                 /* Shut the HBA down */
6671                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
6672                     "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
6673                     "maskHigh:%08x",
6674                     ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
6675                     hba->sli.sli4.ue_mask_hi);
6676 
6677                 EMLXS_STATE_CHANGE(hba, FC_ERROR);
6678 
6679                 emlxs_sli4_hba_flush_chipq(hba);
6680 
6681                 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
6682         }
6683 
6684 } /* emlxs_sli4_poll_erratt() */
6685 
6686 
6687 extern uint32_t
6688 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
6689     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6690 {
6691         emlxs_hba_t     *hba = HBA;
6692         NODELIST        *node;
6693         RPIobj_t        *rpip;
6694         uint32_t        rval;
6695 
6696         /* Check for invalid node ids to register */
6697         if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
6698                 return (1);
6699         }
6700 
6701         if (did & 0xff000000) {
6702                 return (1);
6703         }
6704 
6705         if ((rval = emlxs_mb_check_sparm(hba, param))) {
6706                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6707                     "Invalid service parameters. did=%06x rval=%d", did,
6708                     rval);
6709 
6710                 return (1);
6711         }
6712 
6713         /* Check if the node limit has been reached */
6714         if (port->node_count >= hba->max_nodes) {
6715                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6716                     "Limit reached. did=%06x count=%d", did,
6717                     port->node_count);
6718 
6719                 return (1);
6720         }
6721 
6722         node = emlxs_node_find_did(port, did);
6723         rpip = EMLXS_NODE_TO_RPI(port, node);
6724 
6725         rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
6726             (void *)ubp, (void *)iocbq);
6727 
6728         return (rval);
6729 
6730 } /* emlxs_sli4_reg_did() */
6731 
6732 
6733 extern uint32_t
6734 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
6735     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6736 {
6737         RPIobj_t        *rpip;
6738         uint32_t        rval;
6739 
6740         if (!node) {
6741                 /* Unreg all nodes */
6742                 (void) emlxs_sli4_unreg_all_nodes(port);
6743                 return (1);
6744         }
6745 
6746         /* Check for base node */
6747         if (node == &port->node_base) {
6748                 /* Just flush base node */
6749                 (void) emlxs_tx_node_flush(port, &port->node_base,
6750                     0, 0, 0);
6751 
6752                 (void) emlxs_chipq_node_flush(port, 0,
6753                     &port->node_base, 0);
6754 
6755                 port->did = 0;
6756 
6757                 /* Return now */
6758                 return (1);
6759         }
6760 
6761         EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6762             "unreg_node:%p did=%x rpi=%d",
6763             node, node->nlp_DID, node->nlp_Rpi);
6764 
6765         rpip = EMLXS_NODE_TO_RPI(port, node);
6766 
6767         if (!rpip) {
6768                 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6769                     "unreg_node:%p did=%x rpi=%d. RPI not found.",
6770                     node, node->nlp_DID, node->nlp_Rpi);
6771 
6772                 emlxs_node_rm(port, node);
6773                 return (1);
6774         }
6775 
6776         rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
6777             (void *)iocbq);
6778 
6779         return (rval);
6780 
6781 } /* emlxs_sli4_unreg_node() */
6782 
6783 
6784 extern uint32_t
6785 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
6786 {
6787         NODELIST        *nlp;
6788         int             i;
6789         uint32_t        found;
6790 
6791         /* Set the node tags */
6792         /* We will process all nodes with this tag */
6793         rw_enter(&port->node_rwlock, RW_READER);
6794         found = 0;
6795         for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
6796                 nlp = port->node_table[i];
6797                 while (nlp != NULL) {
6798                         found = 1;
6799                         nlp->nlp_tag = 1;
6800                         nlp = nlp->nlp_list_next;
6801                 }
6802         }
6803         rw_exit(&port->node_rwlock);
6804 
6805         if (!found) {
6806                 return (0);
6807         }
6808 
6809         for (;;) {
6810                 rw_enter(&port->node_rwlock, RW_READER);
6811                 found = 0;
6812                 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
6813                         nlp = port->node_table[i];
6814                         while (nlp != NULL) {
6815                                 if (!nlp->nlp_tag) {
6816                                         nlp = nlp->nlp_list_next;
6817                                         continue;
6818                                 }
6819                                 nlp->nlp_tag = 0;
6820                                 found = 1;
6821                                 break;
6822                         }
6823 
6824                         if (found) {
6825                                 break;
6826                         }
6827                 }
6828                 rw_exit(&port->node_rwlock);
6829 
6830                 if (!found) {
6831                         break;
6832                 }
6833 
6834                 (void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
6835         }
6836 
6837         return (0);
6838 
6839 } /* emlxs_sli4_unreg_all_nodes() */