1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2010 QLogic Corporation */ 23 24 /* 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 26 */ 27 28 #pragma ident "Copyright 2010 QLogic Corporation; ql_ioctl.c" 29 30 /* 31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 32 * Fibre Channel Adapter (FCA) driver IOCTL source file. 33 * 34 * *********************************************************************** 35 * * ** 36 * * NOTICE ** 37 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION ** 38 * * ALL RIGHTS RESERVED ** 39 * * ** 40 * *********************************************************************** 41 * 42 */ 43 44 #include <ql_apps.h> 45 #include <ql_api.h> 46 #include <ql_debug.h> 47 #include <ql_init.h> 48 #include <ql_ioctl.h> 49 #include <ql_mbx.h> 50 #include <ql_xioctl.h> 51 52 /* 53 * Local Function Prototypes. 54 */ 55 static int ql_busy_notification(ql_adapter_state_t *); 56 static int ql_idle_notification(ql_adapter_state_t *); 57 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features); 58 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features); 59 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha); 60 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, 61 uint16_t value); 62 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t); 63 static int ql_adm_op(ql_adapter_state_t *, void *, int); 64 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int); 65 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *); 66 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int); 67 static int ql_adm_update_properties(ql_adapter_state_t *); 68 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int); 69 static int ql_adm_loop_reset(ql_adapter_state_t *); 70 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int); 71 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int); 72 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int); 73 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int); 74 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int); 75 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int); 76 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int); 77 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int); 78 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *); 79 80 /* ************************************************************************ */ 81 /* cb_ops functions */ 82 /* ************************************************************************ */ 83 84 /* 85 * ql_open 86 * opens device 87 * 88 * Input: 89 * dev_p = device pointer 90 * flags = open flags 91 * otype = open type 92 * cred_p = credentials pointer 93 * 94 * Returns: 95 * 0 = success 96 * 97 * Context: 98 * Kernel context. 99 */ 100 /* ARGSUSED */ 101 int 102 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p) 103 { 104 ql_adapter_state_t *ha; 105 int rval = 0; 106 107 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p)); 108 if (ha == NULL) { 109 QL_PRINT_2(CE_CONT, "failed, no adapter\n"); 110 return (ENXIO); 111 } 112 113 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 114 115 /* Allow only character opens */ 116 if (otyp != OTYP_CHR) { 117 QL_PRINT_2(CE_CONT, "(%d): failed, open type\n", 118 ha->instance); 119 return (EINVAL); 120 } 121 122 ADAPTER_STATE_LOCK(ha); 123 if (flags & FEXCL && ha->flags & QL_OPENED) { 124 ADAPTER_STATE_UNLOCK(ha); 125 rval = EBUSY; 126 } else { 127 ha->flags |= QL_OPENED; 128 ADAPTER_STATE_UNLOCK(ha); 129 } 130 131 if (rval != 0) { 132 EL(ha, "failed, rval = %xh\n", rval); 133 } else { 134 /*EMPTY*/ 135 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 136 } 137 return (rval); 138 } 139 140 /* 141 * ql_close 142 * opens device 143 * 144 * Input: 145 * dev_p = device pointer 146 * flags = open flags 147 * otype = open type 148 * cred_p = credentials pointer 149 * 150 * Returns: 151 * 0 = success 152 * 153 * Context: 154 * Kernel context. 155 */ 156 /* ARGSUSED */ 157 int 158 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p) 159 { 160 ql_adapter_state_t *ha; 161 int rval = 0; 162 163 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev)); 164 if (ha == NULL) { 165 QL_PRINT_2(CE_CONT, "failed, no adapter\n"); 166 return (ENXIO); 167 } 168 169 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 170 171 if (otyp != OTYP_CHR) { 172 QL_PRINT_2(CE_CONT, "(%d): failed, open type\n", 173 ha->instance); 174 return (EINVAL); 175 } 176 177 ADAPTER_STATE_LOCK(ha); 178 ha->flags &= ~QL_OPENED; 179 ADAPTER_STATE_UNLOCK(ha); 180 181 if (rval != 0) { 182 EL(ha, "failed, rval = %xh\n", rval); 183 } else { 184 /*EMPTY*/ 185 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 186 } 187 return (rval); 188 } 189 190 /* 191 * ql_ioctl 192 * control a character device 193 * 194 * Input: 195 * dev = device number 196 * cmd = function to perform 197 * arg = data type varies with request 198 * mode = flags 199 * cred_p = credentials pointer 200 * rval_p = pointer to result value 201 * 202 * Returns: 203 * 0 = success 204 * 205 * Context: 206 * Kernel context. 207 */ 208 /* ARGSUSED */ 209 int 210 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 211 int *rval_p) 212 { 213 ql_adapter_state_t *ha; 214 int rval = 0; 215 216 if (ddi_in_panic()) { 217 QL_PRINT_2(CE_CONT, "ql_ioctl: ddi_in_panic exit\n"); 218 return (ENOPROTOOPT); 219 } 220 221 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev)); 222 if (ha == NULL) { 223 QL_PRINT_2(CE_CONT, "failed, no adapter\n"); 224 return (ENXIO); 225 } 226 227 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 228 229 /* 230 * Quick clean exit for qla2x00 foapi calls which are 231 * not supported in qlc. 232 */ 233 if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) { 234 QL_PRINT_9(CE_CONT, "failed, fo api not supported\n"); 235 return (ENOTTY); 236 } 237 238 /* PWR management busy. */ 239 rval = ql_busy_notification(ha); 240 if (rval != FC_SUCCESS) { 241 EL(ha, "failed, ql_busy_notification\n"); 242 return (ENXIO); 243 } 244 245 rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p); 246 if (rval == ENOPROTOOPT || rval == EINVAL) { 247 switch (cmd) { 248 case QL_GET_ADAPTER_FEATURE_BITS: { 249 uint16_t bits; 250 251 rval = ql_get_feature_bits(ha, &bits); 252 253 if (!rval && ddi_copyout((void *)&bits, (void *)arg, 254 sizeof (bits), mode)) { 255 rval = EFAULT; 256 } 257 break; 258 } 259 260 case QL_SET_ADAPTER_FEATURE_BITS: { 261 uint16_t bits; 262 263 if (ddi_copyin((void *)arg, (void *)&bits, 264 sizeof (bits), mode)) { 265 rval = EFAULT; 266 break; 267 } 268 269 rval = ql_set_feature_bits(ha, bits); 270 break; 271 } 272 273 case QL_SET_ADAPTER_NVRAM_DEFAULTS: 274 rval = ql_set_nvram_adapter_defaults(ha); 275 break; 276 277 case QL_UTIL_LOAD: 278 rval = ql_nv_util_load(ha, (void *)arg, mode); 279 break; 280 281 case QL_UTIL_DUMP: 282 rval = ql_nv_util_dump(ha, (void *)arg, mode); 283 break; 284 285 case QL_ADM_OP: 286 rval = ql_adm_op(ha, (void *)arg, mode); 287 break; 288 289 default: 290 EL(ha, "unknown command = %d\n", cmd); 291 rval = ENOTTY; 292 break; 293 } 294 } 295 296 /* PWR management idle. */ 297 (void) ql_idle_notification(ha); 298 299 if (rval != 0) { 300 /* 301 * Don't show failures caused by pps polling for 302 * non-existant virtual ports. 303 */ 304 if (cmd != EXT_CC_VPORT_CMD) { 305 EL(ha, "failed, cmd=%d rval=%d\n", cmd, rval); 306 } 307 } else { 308 /*EMPTY*/ 309 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 310 } 311 return (rval); 312 } 313 314 /* 315 * ql_busy_notification 316 * Adapter busy notification. 317 * 318 * Input: 319 * ha = adapter state pointer. 320 * 321 * Returns: 322 * FC_SUCCESS 323 * FC_FAILURE 324 * 325 * Context: 326 * Kernel context. 327 */ 328 static int 329 ql_busy_notification(ql_adapter_state_t *ha) 330 { 331 if (!ha->pm_capable) { 332 return (FC_SUCCESS); 333 } 334 335 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 336 337 QL_PM_LOCK(ha); 338 ha->busy++; 339 QL_PM_UNLOCK(ha); 340 341 if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) { 342 QL_PM_LOCK(ha); 343 ha->busy--; 344 QL_PM_UNLOCK(ha); 345 346 EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE); 347 return (FC_FAILURE); 348 } 349 350 QL_PM_LOCK(ha); 351 if (ha->power_level != PM_LEVEL_D0) { 352 QL_PM_UNLOCK(ha); 353 if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) { 354 QL_PM_LOCK(ha); 355 ha->busy--; 356 QL_PM_UNLOCK(ha); 357 return (FC_FAILURE); 358 } 359 } else { 360 QL_PM_UNLOCK(ha); 361 } 362 363 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 364 365 return (FC_SUCCESS); 366 } 367 368 /* 369 * ql_idle_notification 370 * Adapter idle notification. 371 * 372 * Input: 373 * ha = adapter state pointer. 374 * 375 * Returns: 376 * FC_SUCCESS 377 * FC_FAILURE 378 * 379 * Context: 380 * Kernel context. 381 */ 382 static int 383 ql_idle_notification(ql_adapter_state_t *ha) 384 { 385 if (!ha->pm_capable) { 386 return (FC_SUCCESS); 387 } 388 389 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 390 391 if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) { 392 EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE); 393 return (FC_FAILURE); 394 } 395 396 QL_PM_LOCK(ha); 397 ha->busy--; 398 QL_PM_UNLOCK(ha); 399 400 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 401 402 return (FC_SUCCESS); 403 } 404 405 /* 406 * Get adapter feature bits from NVRAM 407 */ 408 static int 409 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features) 410 { 411 int count; 412 volatile uint16_t data; 413 uint32_t nv_cmd; 414 uint32_t start_addr; 415 int rval; 416 uint32_t offset = offsetof(nvram_t, adapter_features); 417 418 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 419 420 if (CFG_IST(ha, CFG_CTRL_24258081)) { 421 EL(ha, "Not supported for 24xx\n"); 422 return (EINVAL); 423 } 424 425 /* 426 * The offset can't be greater than max of 8 bits and 427 * the following code breaks if the offset isn't at 428 * 2 byte boundary. 429 */ 430 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 431 if (rval != QL_SUCCESS) { 432 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 433 return (EIO); 434 } 435 436 /* 437 * Have the most significant 3 bits represent the read operation 438 * followed by the 8 bits representing the offset at which we 439 * are going to perform the read operation 440 */ 441 offset >>= 1; 442 offset += start_addr; 443 nv_cmd = (offset << 16) | NV_READ_OP; 444 nv_cmd <<= 5; 445 446 /* 447 * Select the chip and feed the command and address 448 */ 449 for (count = 0; count < 11; count++) { 450 if (nv_cmd & BIT_31) { 451 ql_nv_write(ha, NV_DATA_OUT); 452 } else { 453 ql_nv_write(ha, 0); 454 } 455 nv_cmd <<= 1; 456 } 457 458 *features = 0; 459 for (count = 0; count < 16; count++) { 460 WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK); 461 ql_nv_delay(); 462 463 data = RD16_IO_REG(ha, nvram); 464 *features <<= 1; 465 if (data & NV_DATA_IN) { 466 *features = (uint16_t)(*features | 0x1); 467 } 468 469 WRT16_IO_REG(ha, nvram, NV_SELECT); 470 ql_nv_delay(); 471 } 472 473 /* 474 * Deselect the chip 475 */ 476 WRT16_IO_REG(ha, nvram, NV_DESELECT); 477 478 ql_release_nvram(ha); 479 480 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 481 482 return (0); 483 } 484 485 /* 486 * Set adapter feature bits in NVRAM 487 */ 488 static int 489 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features) 490 { 491 int rval; 492 uint32_t count; 493 nvram_t *nv; 494 uint16_t *wptr; 495 uint8_t *bptr; 496 uint8_t csum; 497 uint32_t start_addr; 498 499 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 500 501 if (CFG_IST(ha, CFG_CTRL_24258081)) { 502 EL(ha, "Not supported for 24xx\n"); 503 return (EINVAL); 504 } 505 506 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP); 507 508 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 509 if (rval != QL_SUCCESS) { 510 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 511 kmem_free(nv, sizeof (*nv)); 512 return (EIO); 513 } 514 rval = 0; 515 516 /* 517 * Read off the whole NVRAM 518 */ 519 wptr = (uint16_t *)nv; 520 csum = 0; 521 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 522 *wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr); 523 csum = (uint8_t)(csum + (uint8_t)*wptr); 524 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8)); 525 wptr++; 526 } 527 528 /* 529 * If the checksum is BAD then fail it right here. 530 */ 531 if (csum) { 532 kmem_free(nv, sizeof (*nv)); 533 ql_release_nvram(ha); 534 return (EBADF); 535 } 536 537 nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8); 538 nv->adapter_features[1] = (uint8_t)(features & 0xFF); 539 540 /* 541 * Recompute the chesksum now 542 */ 543 bptr = (uint8_t *)nv; 544 for (count = 0; count < sizeof (nvram_t) - 1; count++) { 545 csum = (uint8_t)(csum + *bptr++); 546 } 547 csum = (uint8_t)(~csum + 1); 548 nv->checksum = csum; 549 550 /* 551 * Now load the NVRAM 552 */ 553 wptr = (uint16_t *)nv; 554 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 555 ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++); 556 } 557 558 /* 559 * Read NVRAM and verify the contents 560 */ 561 wptr = (uint16_t *)nv; 562 csum = 0; 563 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 564 if (ql_get_nvram_word(ha, count + start_addr) != *wptr) { 565 rval = EIO; 566 break; 567 } 568 csum = (uint8_t)(csum + (uint8_t)*wptr); 569 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8)); 570 wptr++; 571 } 572 573 if (csum) { 574 rval = EINVAL; 575 } 576 577 kmem_free(nv, sizeof (*nv)); 578 ql_release_nvram(ha); 579 580 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 581 582 return (rval); 583 } 584 585 /* 586 * Fix this function to update just feature bits and checksum in NVRAM 587 */ 588 static int 589 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha) 590 { 591 int rval; 592 uint32_t count; 593 uint32_t start_addr; 594 595 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 596 597 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 598 if (rval != QL_SUCCESS) { 599 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 600 return (EIO); 601 } 602 rval = 0; 603 604 if (CFG_IST(ha, CFG_CTRL_24258081)) { 605 nvram_24xx_t *nv; 606 uint32_t *longptr; 607 uint32_t csum = 0; 608 609 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP); 610 611 nv->nvram_version[0] = LSB(ICB_24XX_VERSION); 612 nv->nvram_version[1] = MSB(ICB_24XX_VERSION); 613 614 nv->version[0] = 1; 615 nv->max_frame_length[1] = 8; 616 nv->execution_throttle[0] = 16; 617 nv->login_retry_count[0] = 8; 618 619 nv->firmware_options_1[0] = BIT_2 | BIT_1; 620 nv->firmware_options_1[1] = BIT_5; 621 nv->firmware_options_2[0] = BIT_5; 622 nv->firmware_options_2[1] = BIT_4; 623 nv->firmware_options_3[1] = BIT_6; 624 625 /* 626 * Set default host adapter parameters 627 */ 628 nv->host_p[0] = BIT_4 | BIT_1; 629 nv->host_p[1] = BIT_3 | BIT_2; 630 nv->reset_delay = 5; 631 nv->max_luns_per_target[0] = 128; 632 nv->port_down_retry_count[0] = 30; 633 nv->link_down_timeout[0] = 30; 634 635 /* 636 * compute the chesksum now 637 */ 638 longptr = (uint32_t *)nv; 639 csum = 0; 640 for (count = 0; count < (sizeof (nvram_24xx_t)/4)-1; count++) { 641 csum += *longptr; 642 longptr++; 643 } 644 csum = (uint32_t)(~csum + 1); 645 LITTLE_ENDIAN_32((long)csum); 646 *longptr = csum; 647 648 /* 649 * Now load the NVRAM 650 */ 651 longptr = (uint32_t *)nv; 652 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) { 653 (void) ql_24xx_load_nvram(ha, 654 (uint32_t)(count + start_addr), *longptr++); 655 } 656 657 /* 658 * Read NVRAM and verify the contents 659 */ 660 csum = 0; 661 longptr = (uint32_t *)nv; 662 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) { 663 rval = ql_24xx_read_flash(ha, count + start_addr, 664 longptr); 665 if (rval != QL_SUCCESS) { 666 EL(ha, "24xx_read_flash failed=%xh\n", rval); 667 break; 668 } 669 csum += *longptr; 670 } 671 672 if (csum) { 673 rval = EINVAL; 674 } 675 kmem_free(nv, sizeof (nvram_24xx_t)); 676 } else { 677 nvram_t *nv; 678 uint16_t *wptr; 679 uint8_t *bptr; 680 uint8_t csum; 681 682 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP); 683 /* 684 * Set default initialization control block. 685 */ 686 nv->parameter_block_version = ICB_VERSION; 687 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1; 688 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2; 689 690 nv->max_frame_length[1] = 4; 691 nv->max_iocb_allocation[1] = 1; 692 nv->execution_throttle[0] = 16; 693 nv->login_retry_count = 8; 694 nv->port_name[0] = 33; 695 nv->port_name[3] = 224; 696 nv->port_name[4] = 139; 697 nv->login_timeout = 4; 698 699 /* 700 * Set default host adapter parameters 701 */ 702 nv->host_p[0] = BIT_1; 703 nv->host_p[1] = BIT_2; 704 nv->reset_delay = 5; 705 nv->port_down_retry_count = 8; 706 nv->maximum_luns_per_target[0] = 8; 707 708 /* 709 * compute the chesksum now 710 */ 711 bptr = (uint8_t *)nv; 712 csum = 0; 713 for (count = 0; count < sizeof (nvram_t) - 1; count++) { 714 csum = (uint8_t)(csum + *bptr++); 715 } 716 csum = (uint8_t)(~csum + 1); 717 nv->checksum = csum; 718 719 /* 720 * Now load the NVRAM 721 */ 722 wptr = (uint16_t *)nv; 723 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 724 ql_load_nvram(ha, (uint8_t)(count + start_addr), 725 *wptr++); 726 } 727 728 /* 729 * Read NVRAM and verify the contents 730 */ 731 wptr = (uint16_t *)nv; 732 csum = 0; 733 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 734 if (ql_get_nvram_word(ha, count + start_addr) != 735 *wptr) { 736 rval = EIO; 737 break; 738 } 739 csum = (uint8_t)(csum + (uint8_t)*wptr); 740 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8)); 741 wptr++; 742 } 743 if (csum) { 744 rval = EINVAL; 745 } 746 kmem_free(nv, sizeof (*nv)); 747 } 748 ql_release_nvram(ha); 749 750 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 751 752 return (rval); 753 } 754 755 static void 756 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value) 757 { 758 int count; 759 volatile uint16_t word; 760 volatile uint32_t nv_cmd; 761 762 ql_nv_write(ha, NV_DATA_OUT); 763 ql_nv_write(ha, 0); 764 ql_nv_write(ha, 0); 765 766 for (word = 0; word < 8; word++) { 767 ql_nv_write(ha, NV_DATA_OUT); 768 } 769 770 /* 771 * Deselect the chip 772 */ 773 WRT16_IO_REG(ha, nvram, NV_DESELECT); 774 ql_nv_delay(); 775 776 /* 777 * Erase Location 778 */ 779 nv_cmd = (addr << 16) | NV_ERASE_OP; 780 nv_cmd <<= 5; 781 for (count = 0; count < 11; count++) { 782 if (nv_cmd & BIT_31) { 783 ql_nv_write(ha, NV_DATA_OUT); 784 } else { 785 ql_nv_write(ha, 0); 786 } 787 nv_cmd <<= 1; 788 } 789 790 /* 791 * Wait for Erase to Finish 792 */ 793 WRT16_IO_REG(ha, nvram, NV_DESELECT); 794 ql_nv_delay(); 795 WRT16_IO_REG(ha, nvram, NV_SELECT); 796 word = 0; 797 while ((word & NV_DATA_IN) == 0) { 798 ql_nv_delay(); 799 word = RD16_IO_REG(ha, nvram); 800 } 801 WRT16_IO_REG(ha, nvram, NV_DESELECT); 802 ql_nv_delay(); 803 804 /* 805 * Write data now 806 */ 807 nv_cmd = (addr << 16) | NV_WRITE_OP; 808 nv_cmd |= value; 809 nv_cmd <<= 5; 810 for (count = 0; count < 27; count++) { 811 if (nv_cmd & BIT_31) { 812 ql_nv_write(ha, NV_DATA_OUT); 813 } else { 814 ql_nv_write(ha, 0); 815 } 816 nv_cmd <<= 1; 817 } 818 819 /* 820 * Wait for NVRAM to become ready 821 */ 822 WRT16_IO_REG(ha, nvram, NV_DESELECT); 823 ql_nv_delay(); 824 WRT16_IO_REG(ha, nvram, NV_SELECT); 825 word = 0; 826 while ((word & NV_DATA_IN) == 0) { 827 ql_nv_delay(); 828 word = RD16_IO_REG(ha, nvram); 829 } 830 WRT16_IO_REG(ha, nvram, NV_DESELECT); 831 ql_nv_delay(); 832 833 /* 834 * Disable writes 835 */ 836 ql_nv_write(ha, NV_DATA_OUT); 837 for (count = 0; count < 10; count++) { 838 ql_nv_write(ha, 0); 839 } 840 841 /* 842 * Deselect the chip now 843 */ 844 WRT16_IO_REG(ha, nvram, NV_DESELECT); 845 } 846 847 /* 848 * ql_24xx_load_nvram 849 * Enable NVRAM and writes a 32bit word to ISP24xx NVRAM. 850 * 851 * Input: 852 * ha: adapter state pointer. 853 * addr: NVRAM address. 854 * value: data. 855 * 856 * Returns: 857 * ql local function return status code. 858 * 859 * Context: 860 * Kernel context. 861 */ 862 static int 863 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value) 864 { 865 int rval; 866 867 /* Enable flash write. */ 868 if (!(CFG_IST(ha, CFG_CTRL_8081))) { 869 WRT32_IO_REG(ha, ctrl_status, 870 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE); 871 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */ 872 } 873 874 /* Disable NVRAM write-protection. */ 875 if (CFG_IST(ha, CFG_CTRL_2422)) { 876 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0); 877 } else { 878 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) { 879 EL(ha, "unprotect_flash failed, rval=%xh\n", rval); 880 return (rval); 881 } 882 } 883 884 /* Write to flash. */ 885 rval = ql_24xx_write_flash(ha, addr, value); 886 887 /* Enable NVRAM write-protection. */ 888 if (CFG_IST(ha, CFG_CTRL_2422)) { 889 /* TODO: Check if 0x8c is correct -- sb: 0x9c ? */ 890 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c); 891 } else { 892 ql_24xx_protect_flash(ha); 893 } 894 895 /* Disable flash write. */ 896 if (!(CFG_IST(ha, CFG_CTRL_81XX))) { 897 WRT32_IO_REG(ha, ctrl_status, 898 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE); 899 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */ 900 } 901 902 return (rval); 903 } 904 905 /* 906 * ql_nv_util_load 907 * Loads NVRAM from application. 908 * 909 * Input: 910 * ha = adapter state pointer. 911 * bp = user buffer address. 912 * 913 * Returns: 914 * 915 * Context: 916 * Kernel context. 917 */ 918 int 919 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode) 920 { 921 uint8_t cnt; 922 void *nv; 923 uint16_t *wptr; 924 uint16_t data; 925 uint32_t start_addr, *lptr, data32; 926 nvram_t *nptr; 927 int rval; 928 929 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 930 931 nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP); 932 933 if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) { 934 EL(ha, "Buffer copy failed\n"); 935 kmem_free(nv, ha->nvram_cache->size); 936 return (EFAULT); 937 } 938 939 /* See if the buffer passed to us looks sane */ 940 nptr = (nvram_t *)nv; 941 if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' || 942 nptr->id[3] != ' ') { 943 EL(ha, "failed, buffer sanity check\n"); 944 kmem_free(nv, ha->nvram_cache->size); 945 return (EINVAL); 946 } 947 948 /* Quiesce I/O */ 949 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 950 EL(ha, "ql_stall_driver failed\n"); 951 kmem_free(nv, ha->nvram_cache->size); 952 return (EBUSY); 953 } 954 955 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 956 if (rval != QL_SUCCESS) { 957 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 958 kmem_free(nv, ha->nvram_cache->size); 959 ql_restart_driver(ha); 960 return (EIO); 961 } 962 963 /* Load NVRAM. */ 964 if (CFG_IST(ha, CFG_CTRL_258081)) { 965 GLOBAL_HW_UNLOCK(); 966 start_addr &= ~ha->flash_data_addr; 967 start_addr <<= 2; 968 if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size, 969 start_addr, mode)) != QL_SUCCESS) { 970 EL(ha, "nvram load failed, rval = %0xh\n", rval); 971 } 972 GLOBAL_HW_LOCK(); 973 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 974 lptr = (uint32_t *)nv; 975 for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) { 976 data32 = *lptr++; 977 LITTLE_ENDIAN_32(&data32); 978 rval = ql_24xx_load_nvram(ha, cnt + start_addr, 979 data32); 980 if (rval != QL_SUCCESS) { 981 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval); 982 break; 983 } 984 } 985 } else { 986 wptr = (uint16_t *)nv; 987 for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) { 988 data = *wptr++; 989 LITTLE_ENDIAN_16(&data); 990 ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data); 991 } 992 } 993 /* switch to the new one */ 994 NVRAM_CACHE_LOCK(ha); 995 996 kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size); 997 ha->nvram_cache->cache = (void *)nptr; 998 999 NVRAM_CACHE_UNLOCK(ha); 1000 1001 ql_release_nvram(ha); 1002 ql_restart_driver(ha); 1003 1004 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1005 1006 if (rval == QL_SUCCESS) { 1007 return (0); 1008 } 1009 1010 return (EFAULT); 1011 } 1012 1013 /* 1014 * ql_nv_util_dump 1015 * Dumps NVRAM to application. 1016 * 1017 * Input: 1018 * ha = adapter state pointer. 1019 * bp = user buffer address. 1020 * 1021 * Returns: 1022 * 1023 * Context: 1024 * Kernel context. 1025 */ 1026 int 1027 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode) 1028 { 1029 uint32_t start_addr; 1030 int rval2, rval = 0; 1031 1032 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1033 1034 if (ha->nvram_cache == NULL || 1035 ha->nvram_cache->size == NULL || 1036 ha->nvram_cache->cache == NULL) { 1037 EL(ha, "failed, kmem_zalloc\n"); 1038 return (ENOMEM); 1039 } else if (ha->nvram_cache->valid != 1) { 1040 1041 /* Quiesce I/O */ 1042 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1043 EL(ha, "ql_stall_driver failed\n"); 1044 return (EBUSY); 1045 } 1046 1047 rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 1048 if (rval2 != QL_SUCCESS) { 1049 EL(ha, "failed, ql_lock_nvram=%xh\n", rval2); 1050 ql_restart_driver(ha); 1051 return (EIO); 1052 } 1053 NVRAM_CACHE_LOCK(ha); 1054 1055 rval2 = ql_get_nvram(ha, ha->nvram_cache->cache, 1056 start_addr, ha->nvram_cache->size); 1057 if (rval2 != QL_SUCCESS) { 1058 rval = rval2; 1059 } else { 1060 ha->nvram_cache->valid = 1; 1061 EL(ha, "nvram cache now valid."); 1062 } 1063 1064 NVRAM_CACHE_UNLOCK(ha); 1065 1066 ql_release_nvram(ha); 1067 ql_restart_driver(ha); 1068 1069 if (rval != 0) { 1070 EL(ha, "failed to dump nvram, rval=%x\n", rval); 1071 return (rval); 1072 } 1073 } 1074 1075 if (ddi_copyout(ha->nvram_cache->cache, bp, 1076 ha->nvram_cache->size, mode) != 0) { 1077 EL(ha, "Buffer copy failed\n"); 1078 return (EFAULT); 1079 } 1080 1081 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1082 1083 return (0); 1084 } 1085 1086 int 1087 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr, 1088 uint32_t size) 1089 { 1090 int rval = QL_SUCCESS; 1091 int cnt; 1092 /* Dump NVRAM. */ 1093 if (CFG_IST(ha, CFG_CTRL_24258081)) { 1094 uint32_t *lptr = (uint32_t *)dest_addr; 1095 1096 for (cnt = 0; cnt < size / 4; cnt++) { 1097 rval = ql_24xx_read_flash(ha, src_addr++, lptr); 1098 if (rval != QL_SUCCESS) { 1099 EL(ha, "read_flash failed=%xh\n", rval); 1100 rval = EAGAIN; 1101 break; 1102 } 1103 LITTLE_ENDIAN_32(lptr); 1104 lptr++; 1105 } 1106 } else { 1107 uint16_t data; 1108 uint16_t *wptr = (uint16_t *)dest_addr; 1109 1110 for (cnt = 0; cnt < size / 2; cnt++) { 1111 data = (uint16_t)ql_get_nvram_word(ha, cnt + 1112 src_addr); 1113 LITTLE_ENDIAN_16(&data); 1114 *wptr++ = data; 1115 } 1116 } 1117 return (rval); 1118 } 1119 1120 /* 1121 * ql_vpd_load 1122 * Loads VPD from application. 1123 * 1124 * Input: 1125 * ha = adapter state pointer. 1126 * bp = user buffer address. 1127 * 1128 * Returns: 1129 * 1130 * Context: 1131 * Kernel context. 1132 */ 1133 int 1134 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode) 1135 { 1136 uint8_t cnt; 1137 uint8_t *vpd, *vpdptr, *vbuf; 1138 uint32_t start_addr, vpd_size, *lptr, data32; 1139 int rval; 1140 1141 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1142 1143 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 1144 EL(ha, "unsupported adapter feature\n"); 1145 return (ENOTSUP); 1146 } 1147 1148 vpd_size = QL_24XX_VPD_SIZE; 1149 1150 vpd = kmem_zalloc(vpd_size, KM_SLEEP); 1151 1152 if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) { 1153 EL(ha, "Buffer copy failed\n"); 1154 kmem_free(vpd, vpd_size); 1155 return (EFAULT); 1156 } 1157 1158 /* Sanity check the user supplied data via checksum */ 1159 if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) { 1160 EL(ha, "vpd RV tag missing\n"); 1161 kmem_free(vpd, vpd_size); 1162 return (EINVAL); 1163 } 1164 1165 vpdptr += 3; 1166 cnt = 0; 1167 vbuf = vpd; 1168 while (vbuf <= vpdptr) { 1169 cnt += *vbuf++; 1170 } 1171 if (cnt != 0) { 1172 EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n", 1173 (uint8_t)cnt, (uintptr_t)vpdptr); 1174 kmem_free(vpd, vpd_size); 1175 return (EINVAL); 1176 } 1177 1178 /* Quiesce I/O */ 1179 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1180 EL(ha, "ql_stall_driver failed\n"); 1181 kmem_free(vpd, vpd_size); 1182 return (EBUSY); 1183 } 1184 1185 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA); 1186 if (rval != QL_SUCCESS) { 1187 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 1188 kmem_free(vpd, vpd_size); 1189 ql_restart_driver(ha); 1190 return (EIO); 1191 } 1192 1193 /* Load VPD. */ 1194 if (CFG_IST(ha, CFG_CTRL_258081)) { 1195 GLOBAL_HW_UNLOCK(); 1196 start_addr &= ~ha->flash_data_addr; 1197 start_addr <<= 2; 1198 if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr, 1199 mode)) != QL_SUCCESS) { 1200 EL(ha, "vpd load error: %xh\n", rval); 1201 } 1202 GLOBAL_HW_LOCK(); 1203 } else { 1204 lptr = (uint32_t *)vpd; 1205 for (cnt = 0; cnt < vpd_size / 4; cnt++) { 1206 data32 = *lptr++; 1207 LITTLE_ENDIAN_32(&data32); 1208 rval = ql_24xx_load_nvram(ha, cnt + start_addr, 1209 data32); 1210 if (rval != QL_SUCCESS) { 1211 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval); 1212 break; 1213 } 1214 } 1215 } 1216 1217 kmem_free(vpd, vpd_size); 1218 1219 /* Update the vcache */ 1220 CACHE_LOCK(ha); 1221 1222 if (rval != QL_SUCCESS) { 1223 EL(ha, "failed, load\n"); 1224 } else if ((ha->vcache == NULL) && ((ha->vcache = 1225 kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) { 1226 EL(ha, "failed, kmem_zalloc2\n"); 1227 } else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) { 1228 EL(ha, "Buffer copy2 failed\n"); 1229 kmem_free(ha->vcache, vpd_size); 1230 ha->vcache = NULL; 1231 } 1232 1233 CACHE_UNLOCK(ha); 1234 1235 ql_release_nvram(ha); 1236 ql_restart_driver(ha); 1237 1238 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1239 1240 if (rval == QL_SUCCESS) { 1241 return (0); 1242 } 1243 1244 return (EFAULT); 1245 } 1246 1247 /* 1248 * ql_vpd_dump 1249 * Dumps VPD to application buffer. 1250 * 1251 * Input: 1252 * ha = adapter state pointer. 1253 * bp = user buffer address. 1254 * 1255 * Returns: 1256 * 1257 * Context: 1258 * Kernel context. 1259 */ 1260 int 1261 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode) 1262 { 1263 uint8_t cnt; 1264 void *vpd; 1265 uint32_t start_addr, vpd_size, *lptr; 1266 int rval = 0; 1267 1268 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1269 1270 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 1271 EL(ha, "unsupported adapter feature\n"); 1272 return (EACCES); 1273 } 1274 1275 vpd_size = QL_24XX_VPD_SIZE; 1276 1277 CACHE_LOCK(ha); 1278 1279 if (ha->vcache != NULL) { 1280 /* copy back the vpd cache data */ 1281 if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) { 1282 EL(ha, "Buffer copy failed\n"); 1283 rval = EFAULT; 1284 } 1285 CACHE_UNLOCK(ha); 1286 return (rval); 1287 } 1288 1289 vpd = kmem_zalloc(vpd_size, KM_SLEEP); 1290 1291 /* Quiesce I/O */ 1292 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1293 CACHE_UNLOCK(ha); 1294 EL(ha, "ql_stall_driver failed\n"); 1295 kmem_free(vpd, vpd_size); 1296 return (EBUSY); 1297 } 1298 1299 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA); 1300 if (rval != QL_SUCCESS) { 1301 CACHE_UNLOCK(ha); 1302 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 1303 kmem_free(vpd, vpd_size); 1304 ql_restart_driver(ha); 1305 return (EIO); 1306 } 1307 1308 /* Dump VPD. */ 1309 lptr = (uint32_t *)vpd; 1310 1311 for (cnt = 0; cnt < vpd_size / 4; cnt++) { 1312 rval = ql_24xx_read_flash(ha, start_addr++, lptr); 1313 if (rval != QL_SUCCESS) { 1314 EL(ha, "read_flash failed=%xh\n", rval); 1315 rval = EAGAIN; 1316 break; 1317 } 1318 LITTLE_ENDIAN_32(lptr); 1319 lptr++; 1320 } 1321 1322 ql_release_nvram(ha); 1323 ql_restart_driver(ha); 1324 1325 if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) { 1326 CACHE_UNLOCK(ha); 1327 EL(ha, "Buffer copy failed\n"); 1328 kmem_free(vpd, vpd_size); 1329 return (EFAULT); 1330 } 1331 1332 ha->vcache = vpd; 1333 1334 CACHE_UNLOCK(ha); 1335 1336 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1337 1338 if (rval != QL_SUCCESS) { 1339 return (EFAULT); 1340 } else { 1341 return (0); 1342 } 1343 } 1344 1345 /* 1346 * ql_vpd_findtag 1347 * Search the passed vpd buffer for the requested VPD tag type. 1348 * 1349 * Input: 1350 * ha = adapter state pointer. 1351 * vpdbuf = Pointer to start of the buffer to search 1352 * op = VPD opcode to find (must be NULL terminated). 1353 * 1354 * Returns: 1355 * Pointer to the opcode in the buffer if opcode found. 1356 * NULL if opcode is not found. 1357 * 1358 * Context: 1359 * Kernel context. 1360 */ 1361 static uint8_t * 1362 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode) 1363 { 1364 uint8_t *vpd = vpdbuf; 1365 uint8_t *end = vpdbuf + QL_24XX_VPD_SIZE; 1366 uint32_t found = 0; 1367 1368 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1369 1370 if (vpdbuf == NULL || opcode == NULL) { 1371 EL(ha, "null parameter passed!\n"); 1372 return (NULL); 1373 } 1374 1375 while (vpd < end) { 1376 1377 /* check for end of vpd */ 1378 if (vpd[0] == VPD_TAG_END) { 1379 if (opcode[0] == VPD_TAG_END) { 1380 found = 1; 1381 } else { 1382 found = 0; 1383 } 1384 break; 1385 } 1386 1387 /* check opcode */ 1388 if (bcmp(opcode, vpd, strlen(opcode)) == 0) { 1389 /* found opcode requested */ 1390 found = 1; 1391 break; 1392 } 1393 1394 /* 1395 * Didn't find the opcode, so calculate start of 1396 * next tag. Depending on the current tag type, 1397 * the length field can be 1 or 2 bytes 1398 */ 1399 if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) { 1400 vpd += (vpd[2] << 8) + vpd[1] + 3; 1401 } else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) { 1402 vpd += 3; 1403 } else { 1404 vpd += vpd[2] +3; 1405 } 1406 } 1407 1408 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1409 1410 return (found == 1 ? vpd : NULL); 1411 } 1412 1413 /* 1414 * ql_vpd_lookup 1415 * Return the VPD data for the request VPD tag 1416 * 1417 * Input: 1418 * ha = adapter state pointer. 1419 * opcode = VPD opcode to find (must be NULL terminated). 1420 * bp = Pointer to returned data buffer. 1421 * bplen = Length of returned data buffer. 1422 * 1423 * Returns: 1424 * Length of data copied into returned data buffer. 1425 * >0 = VPD data field (NULL terminated) 1426 * 0 = no data. 1427 * -1 = Could not find opcode in vpd buffer / error. 1428 * 1429 * Context: 1430 * Kernel context. 1431 * 1432 * NB: The opcode buffer and the bp buffer *could* be the same buffer! 1433 * 1434 */ 1435 int32_t 1436 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp, 1437 int32_t bplen) 1438 { 1439 uint8_t *vpd; 1440 uint8_t *vpdbuf; 1441 int32_t len = -1; 1442 1443 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1444 1445 if (opcode == NULL || bp == NULL || bplen < 1) { 1446 EL(ha, "invalid parameter passed: opcode=%ph, " 1447 "bp=%ph, bplen=%xh\n", opcode, bp, bplen); 1448 return (len); 1449 } 1450 1451 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 1452 return (len); 1453 } 1454 1455 if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE, 1456 KM_SLEEP)) == NULL) { 1457 EL(ha, "unable to allocate vpd memory\n"); 1458 return (len); 1459 } 1460 1461 if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) { 1462 kmem_free(vpdbuf, QL_24XX_VPD_SIZE); 1463 EL(ha, "unable to retrieve VPD data\n"); 1464 return (len); 1465 } 1466 1467 if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) { 1468 /* 1469 * Found the tag 1470 */ 1471 if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT || 1472 *opcode == VPD_TAG_LRTC) { 1473 /* 1474 * we found it, but the tag doesn't have a data 1475 * field. 1476 */ 1477 len = 0; 1478 } else if (!(strncmp((char *)vpd, (char *) 1479 VPD_TAG_PRODID, 1))) { 1480 len = vpd[2] << 8; 1481 len += vpd[1]; 1482 } else { 1483 len = vpd[2]; 1484 } 1485 1486 /* 1487 * make sure that the vpd len doesn't exceed the 1488 * vpd end 1489 */ 1490 if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) { 1491 EL(ha, "vpd tag len (%xh) exceeds vpd buffer " 1492 "length\n", len); 1493 len = -1; 1494 } 1495 } 1496 1497 if (len >= 0) { 1498 /* 1499 * make sure we don't exceed callers buffer space len 1500 */ 1501 if (len > bplen) { 1502 len = bplen-1; 1503 } 1504 1505 /* copy the data back */ 1506 (void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len); 1507 bp[len] = NULL; 1508 } else { 1509 /* error -- couldn't find tag */ 1510 bp[0] = NULL; 1511 if (opcode[1] != NULL) { 1512 EL(ha, "unable to find tag '%s'\n", opcode); 1513 } else { 1514 EL(ha, "unable to find tag '%xh'\n", opcode[0]); 1515 } 1516 } 1517 1518 kmem_free(vpdbuf, QL_24XX_VPD_SIZE); 1519 1520 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1521 1522 return (len); 1523 } 1524 1525 /* 1526 * ql_r_m_w_flash 1527 * Read modify write from user space to flash. 1528 * 1529 * Input: 1530 * ha: adapter state pointer. 1531 * dp: source byte pointer. 1532 * bc: byte count. 1533 * faddr: flash byte address. 1534 * mode: flags. 1535 * 1536 * Returns: 1537 * ql local function return status code. 1538 * 1539 * Context: 1540 * Kernel context. 1541 */ 1542 int 1543 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr, 1544 int mode) 1545 { 1546 uint8_t *bp; 1547 uint32_t xfer, bsize, saddr, ofst; 1548 int rval = 0; 1549 1550 QL_PRINT_9(CE_CONT, "(%d): started, dp=%ph, faddr=%xh, bc=%xh\n", 1551 ha->instance, (void *)dp, faddr, bc); 1552 1553 bsize = ha->xioctl->fdesc.block_size; 1554 saddr = faddr & ~(bsize - 1); 1555 ofst = faddr & (bsize - 1); 1556 1557 bp = kmem_zalloc(bsize, KM_SLEEP); 1558 1559 while (bc) { 1560 xfer = bc > bsize ? bsize : bc; 1561 if (ofst + xfer > bsize) { 1562 xfer = bsize - ofst; 1563 } 1564 QL_PRINT_9(CE_CONT, "(%d): dp=%ph, saddr=%xh, bc=%xh, " 1565 "ofst=%xh, xfer=%xh\n", ha->instance, (void *)dp, saddr, 1566 bc, ofst, xfer); 1567 1568 if (ofst || xfer < bsize) { 1569 /* Dump Flash sector. */ 1570 if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) != 1571 QL_SUCCESS) { 1572 EL(ha, "dump_flash status=%x\n", rval); 1573 break; 1574 } 1575 } 1576 1577 /* Set new data. */ 1578 if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer, 1579 mode)) != 0) { 1580 EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, " 1581 "xfer=%xh\n", rval, (void *)dp, ofst, xfer); 1582 rval = QL_FUNCTION_FAILED; 1583 break; 1584 } 1585 1586 /* Write to flash. */ 1587 if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) != 1588 QL_SUCCESS) { 1589 EL(ha, "load_flash status=%x\n", rval); 1590 break; 1591 } 1592 bc -= xfer; 1593 dp += xfer; 1594 saddr += bsize; 1595 ofst = 0; 1596 } 1597 1598 kmem_free(bp, bsize); 1599 1600 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1601 1602 return (rval); 1603 } 1604 1605 /* 1606 * ql_adm_op 1607 * Performs qladm utility operations 1608 * 1609 * Input: 1610 * ha: adapter state pointer. 1611 * arg: driver_op_t structure pointer. 1612 * mode: flags. 1613 * 1614 * Returns: 1615 * 1616 * Context: 1617 * Kernel context. 1618 */ 1619 static int 1620 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode) 1621 { 1622 ql_adm_op_t dop; 1623 int rval = 0; 1624 1625 if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) { 1626 EL(ha, "failed, driver_op_t ddi_copyin\n"); 1627 return (EFAULT); 1628 } 1629 1630 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%xh, buffer=%llx," 1631 " length=%xh, option=%xh\n", ha->instance, dop.cmd, dop.buffer, 1632 dop.length, dop.option); 1633 1634 switch (dop.cmd) { 1635 case QL_ADAPTER_INFO: 1636 rval = ql_adm_adapter_info(ha, &dop, mode); 1637 break; 1638 1639 case QL_EXTENDED_LOGGING: 1640 rval = ql_adm_extended_logging(ha, &dop); 1641 break; 1642 1643 case QL_LOOP_RESET: 1644 rval = ql_adm_loop_reset(ha); 1645 break; 1646 1647 case QL_DEVICE_LIST: 1648 rval = ql_adm_device_list(ha, &dop, mode); 1649 break; 1650 1651 case QL_PROP_UPDATE_INT: 1652 rval = ql_adm_prop_update_int(ha, &dop, mode); 1653 break; 1654 1655 case QL_UPDATE_PROPERTIES: 1656 rval = ql_adm_update_properties(ha); 1657 break; 1658 1659 case QL_FW_DUMP: 1660 rval = ql_adm_fw_dump(ha, &dop, arg, mode); 1661 break; 1662 1663 case QL_NVRAM_LOAD: 1664 rval = ql_adm_nvram_load(ha, &dop, mode); 1665 break; 1666 1667 case QL_NVRAM_DUMP: 1668 rval = ql_adm_nvram_dump(ha, &dop, mode); 1669 break; 1670 1671 case QL_FLASH_LOAD: 1672 rval = ql_adm_flash_load(ha, &dop, mode); 1673 break; 1674 1675 case QL_VPD_LOAD: 1676 rval = ql_adm_vpd_load(ha, &dop, mode); 1677 break; 1678 1679 case QL_VPD_DUMP: 1680 rval = ql_adm_vpd_dump(ha, &dop, mode); 1681 break; 1682 1683 case QL_VPD_GETTAG: 1684 rval = ql_adm_vpd_gettag(ha, &dop, mode); 1685 break; 1686 1687 case QL_UPD_FWMODULE: 1688 rval = ql_adm_updfwmodule(ha, &dop, mode); 1689 break; 1690 1691 default: 1692 EL(ha, "unsupported driver op cmd: %x\n", dop.cmd); 1693 return (EINVAL); 1694 } 1695 1696 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1697 1698 return (rval); 1699 } 1700 1701 /* 1702 * ql_adm_adapter_info 1703 * Performs qladm QL_ADAPTER_INFO command 1704 * 1705 * Input: 1706 * ha: adapter state pointer. 1707 * dop: ql_adm_op_t structure pointer. 1708 * mode: flags. 1709 * 1710 * Returns: 1711 * 1712 * Context: 1713 * Kernel context. 1714 */ 1715 static int 1716 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 1717 { 1718 ql_adapter_info_t hba; 1719 uint8_t *dp; 1720 uint32_t length; 1721 int rval, i; 1722 1723 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1724 1725 hba.device_id = ha->device_id; 1726 1727 dp = CFG_IST(ha, CFG_CTRL_24258081) ? 1728 &ha->init_ctrl_blk.cb24.port_name[0] : 1729 &ha->init_ctrl_blk.cb.port_name[0]; 1730 bcopy(dp, hba.wwpn, 8); 1731 1732 hba.d_id = ha->d_id.b24; 1733 1734 if (ha->xioctl->fdesc.flash_size == 0 && 1735 !(CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id)) { 1736 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1737 EL(ha, "ql_stall_driver failed\n"); 1738 return (EBUSY); 1739 } 1740 1741 if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) { 1742 EL(ha, "ql_setup_flash failed=%xh\n", rval); 1743 if (rval == QL_FUNCTION_TIMEOUT) { 1744 return (EBUSY); 1745 } 1746 return (EIO); 1747 } 1748 1749 /* Resume I/O */ 1750 if (CFG_IST(ha, CFG_CTRL_24258081)) { 1751 ql_restart_driver(ha); 1752 } else { 1753 EL(ha, "isp_abort_needed for restart\n"); 1754 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 1755 DRIVER_STALL); 1756 } 1757 } 1758 hba.flash_size = ha->xioctl->fdesc.flash_size; 1759 1760 (void) strcpy(hba.driver_ver, QL_VERSION); 1761 1762 (void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version, 1763 ha->fw_minor_version, ha->fw_subminor_version); 1764 1765 bzero(hba.fcode_ver, sizeof (hba.fcode_ver)); 1766 1767 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 1768 rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, 1769 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i); 1770 length = i; 1771 if (rval != DDI_PROP_SUCCESS) { 1772 EL(ha, "failed, ddi_getlongprop=%xh\n", rval); 1773 } else { 1774 if (length > (uint32_t)sizeof (hba.fcode_ver)) { 1775 length = sizeof (hba.fcode_ver) - 1; 1776 } 1777 bcopy((void *)dp, (void *)hba.fcode_ver, length); 1778 kmem_free(dp, length); 1779 } 1780 1781 if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer, 1782 dop->length, mode) != 0) { 1783 EL(ha, "failed, ddi_copyout\n"); 1784 return (EFAULT); 1785 } 1786 1787 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1788 1789 return (0); 1790 } 1791 1792 /* 1793 * ql_adm_extended_logging 1794 * Performs qladm QL_EXTENDED_LOGGING command 1795 * 1796 * Input: 1797 * ha: adapter state pointer. 1798 * dop: ql_adm_op_t structure pointer. 1799 * 1800 * Returns: 1801 * 1802 * Context: 1803 * Kernel context. 1804 */ 1805 static int 1806 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop) 1807 { 1808 char prop_name[MAX_PROP_LENGTH]; 1809 int rval; 1810 1811 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1812 1813 (void) sprintf(prop_name, "hba%d-extended-logging", ha->instance); 1814 1815 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/ 1816 rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name, 1817 (int)dop->option); 1818 if (rval != DDI_PROP_SUCCESS) { 1819 EL(ha, "failed, prop_update = %xh\n", rval); 1820 return (EINVAL); 1821 } else { 1822 dop->option ? 1823 (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) : 1824 (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING); 1825 } 1826 1827 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1828 1829 return (0); 1830 } 1831 1832 /* 1833 * ql_adm_loop_reset 1834 * Performs qladm QL_LOOP_RESET command 1835 * 1836 * Input: 1837 * ha: adapter state pointer. 1838 * 1839 * Returns: 1840 * 1841 * Context: 1842 * Kernel context. 1843 */ 1844 static int 1845 ql_adm_loop_reset(ql_adapter_state_t *ha) 1846 { 1847 int rval; 1848 1849 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1850 1851 if (ha->task_daemon_flags & LOOP_DOWN) { 1852 (void) ql_full_login_lip(ha); 1853 } else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) { 1854 EL(ha, "failed, ql_initiate_lip=%xh\n", rval); 1855 return (EIO); 1856 } 1857 1858 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1859 1860 return (0); 1861 } 1862 1863 /* 1864 * ql_adm_device_list 1865 * Performs qladm QL_DEVICE_LIST command 1866 * 1867 * Input: 1868 * ha: adapter state pointer. 1869 * dop: ql_adm_op_t structure pointer. 1870 * mode: flags. 1871 * 1872 * Returns: 1873 * 1874 * Context: 1875 * Kernel context. 1876 */ 1877 static int 1878 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 1879 { 1880 ql_device_info_t dev; 1881 ql_link_t *link; 1882 ql_tgt_t *tq; 1883 uint32_t index, cnt; 1884 1885 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1886 1887 cnt = 0; 1888 dev.address = 0xffffffff; 1889 1890 /* Scan port list for requested target and fill in the values */ 1891 for (link = NULL, index = 0; 1892 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) { 1893 for (link = ha->dev[index].first; link != NULL; 1894 link = link->next) { 1895 tq = link->base_address; 1896 1897 if (!VALID_TARGET_ID(ha, tq->loop_id)) { 1898 continue; 1899 } 1900 if (cnt != dop->option) { 1901 cnt++; 1902 continue; 1903 } 1904 /* fill in the values */ 1905 bcopy(tq->port_name, dev.wwpn, 8); 1906 dev.address = tq->d_id.b24; 1907 dev.loop_id = tq->loop_id; 1908 if (tq->flags & TQF_TAPE_DEVICE) { 1909 dev.type = FCT_TAPE; 1910 } else if (tq->flags & TQF_INITIATOR_DEVICE) { 1911 dev.type = FCT_INITIATOR; 1912 } else { 1913 dev.type = FCT_TARGET; 1914 } 1915 break; 1916 } 1917 } 1918 1919 if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer, 1920 dop->length, mode) != 0) { 1921 EL(ha, "failed, ddi_copyout\n"); 1922 return (EFAULT); 1923 } 1924 1925 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1926 1927 return (0); 1928 } 1929 1930 /* 1931 * ql_adm_update_properties 1932 * Performs qladm QL_UPDATE_PROPERTIES command 1933 * 1934 * Input: 1935 * ha: adapter state pointer. 1936 * 1937 * Returns: 1938 * 1939 * Context: 1940 * Kernel context. 1941 */ 1942 static int 1943 ql_adm_update_properties(ql_adapter_state_t *ha) 1944 { 1945 ql_comb_init_cb_t init_ctrl_blk; 1946 ql_comb_ip_init_cb_t ip_init_ctrl_blk; 1947 1948 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1949 1950 /* Stall driver instance. */ 1951 (void) ql_stall_driver(ha, 0); 1952 1953 /* Save init control blocks. */ 1954 bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t)); 1955 bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk, 1956 sizeof (ql_comb_ip_init_cb_t)); 1957 1958 /* Update PCI configration. */ 1959 (void) ql_pci_sbus_config(ha); 1960 1961 /* Get configuration properties. */ 1962 (void) ql_nvram_config(ha); 1963 1964 /* Check for init firmware required. */ 1965 if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk, 1966 sizeof (ql_comb_init_cb_t)) != 0 || 1967 bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk, 1968 sizeof (ql_comb_ip_init_cb_t)) != 0) { 1969 1970 EL(ha, "isp_abort_needed\n"); 1971 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 1972 TASK_DAEMON_LOCK(ha); 1973 ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED; 1974 TASK_DAEMON_UNLOCK(ha); 1975 } 1976 1977 /* Update AEN queue. */ 1978 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1979 ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL); 1980 } 1981 1982 /* Restart driver instance. */ 1983 ql_restart_driver(ha); 1984 1985 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1986 1987 return (0); 1988 } 1989 1990 /* 1991 * ql_adm_prop_update_int 1992 * Performs qladm QL_PROP_UPDATE_INT command 1993 * 1994 * Input: 1995 * ha: adapter state pointer. 1996 * dop: ql_adm_op_t structure pointer. 1997 * mode: flags. 1998 * 1999 * Returns: 2000 * 2001 * Context: 2002 * Kernel context. 2003 */ 2004 static int 2005 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2006 { 2007 char *prop_name; 2008 int rval; 2009 2010 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2011 2012 prop_name = kmem_zalloc(dop->length, KM_SLEEP); 2013 2014 if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length, 2015 mode) != 0) { 2016 EL(ha, "failed, prop_name ddi_copyin\n"); 2017 kmem_free(prop_name, dop->length); 2018 return (EFAULT); 2019 } 2020 2021 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 2022 if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name, 2023 (int)dop->option)) != DDI_PROP_SUCCESS) { 2024 EL(ha, "failed, prop_update=%xh\n", rval); 2025 kmem_free(prop_name, dop->length); 2026 return (EINVAL); 2027 } 2028 2029 kmem_free(prop_name, dop->length); 2030 2031 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2032 2033 return (0); 2034 } 2035 2036 /* 2037 * ql_adm_fw_dump 2038 * Performs qladm QL_FW_DUMP command 2039 * 2040 * Input: 2041 * ha: adapter state pointer. 2042 * dop: ql_adm_op_t structure pointer. 2043 * udop: user space ql_adm_op_t structure pointer. 2044 * mode: flags. 2045 * 2046 * Returns: 2047 * 2048 * Context: 2049 * Kernel context. 2050 */ 2051 static int 2052 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode) 2053 { 2054 caddr_t dmp; 2055 2056 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2057 2058 if (dop->length < ha->risc_dump_size) { 2059 EL(ha, "failed, incorrect length=%xh, size=%xh\n", 2060 dop->length, ha->risc_dump_size); 2061 return (EINVAL); 2062 } 2063 2064 if (ha->ql_dump_state & QL_DUMP_VALID) { 2065 dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP); 2066 2067 dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp); 2068 if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer, 2069 dop->length, mode) != 0) { 2070 EL(ha, "failed, ddi_copyout\n"); 2071 kmem_free(dmp, ha->risc_dump_size); 2072 return (EFAULT); 2073 } 2074 2075 kmem_free(dmp, ha->risc_dump_size); 2076 ha->ql_dump_state |= QL_DUMP_UPLOADED; 2077 2078 } else { 2079 EL(ha, "failed, no dump file\n"); 2080 dop->length = 0; 2081 } 2082 2083 if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) { 2084 EL(ha, "failed, driver_op_t ddi_copyout\n"); 2085 return (EFAULT); 2086 } 2087 2088 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2089 2090 return (0); 2091 } 2092 2093 /* 2094 * ql_adm_nvram_dump 2095 * Performs qladm QL_NVRAM_DUMP command 2096 * 2097 * Input: 2098 * ha: adapter state pointer. 2099 * dop: ql_adm_op_t structure pointer. 2100 * mode: flags. 2101 * 2102 * Returns: 2103 * 2104 * Context: 2105 * Kernel context. 2106 */ 2107 static int 2108 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2109 { 2110 int rval; 2111 2112 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2113 2114 if (dop->length < ha->nvram_cache->size) { 2115 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, 2116 ha->nvram_cache->size); 2117 return (EINVAL); 2118 } 2119 2120 if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer, 2121 mode)) != 0) { 2122 EL(ha, "failed, ql_nv_util_dump\n"); 2123 } else { 2124 /*EMPTY*/ 2125 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2126 } 2127 2128 return (rval); 2129 } 2130 2131 /* 2132 * ql_adm_nvram_load 2133 * Performs qladm QL_NVRAM_LOAD command 2134 * 2135 * Input: 2136 * ha: adapter state pointer. 2137 * dop: ql_adm_op_t structure pointer. 2138 * mode: flags. 2139 * 2140 * Returns: 2141 * 2142 * Context: 2143 * Kernel context. 2144 */ 2145 static int 2146 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2147 { 2148 int rval; 2149 2150 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2151 2152 if (dop->length < ha->nvram_cache->size) { 2153 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, 2154 ha->nvram_cache->size); 2155 return (EINVAL); 2156 } 2157 2158 if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer, 2159 mode)) != 0) { 2160 EL(ha, "failed, ql_nv_util_dump\n"); 2161 } else { 2162 /*EMPTY*/ 2163 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2164 } 2165 2166 return (rval); 2167 } 2168 2169 /* 2170 * ql_adm_flash_load 2171 * Performs qladm QL_FLASH_LOAD command 2172 * 2173 * Input: 2174 * ha: adapter state pointer. 2175 * dop: ql_adm_op_t structure pointer. 2176 * mode: flags. 2177 * 2178 * Returns: 2179 * 2180 * Context: 2181 * Kernel context. 2182 */ 2183 static int 2184 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2185 { 2186 uint8_t *dp; 2187 int rval; 2188 2189 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2190 2191 dp = kmem_zalloc(dop->length, KM_SLEEP); 2192 2193 if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length, 2194 mode) != 0) { 2195 EL(ha, "ddi_copyin failed\n"); 2196 kmem_free(dp, dop->length); 2197 return (EFAULT); 2198 } 2199 2200 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 2201 EL(ha, "ql_stall_driver failed\n"); 2202 kmem_free(dp, dop->length); 2203 return (EBUSY); 2204 } 2205 2206 rval = (CFG_IST(ha, CFG_CTRL_24258081) ? 2207 ql_24xx_load_flash(ha, dp, dop->length, dop->option) : 2208 ql_load_flash(ha, dp, dop->length)); 2209 2210 ql_restart_driver(ha); 2211 2212 kmem_free(dp, dop->length); 2213 2214 if (rval != QL_SUCCESS) { 2215 EL(ha, "failed\n"); 2216 return (EIO); 2217 } 2218 2219 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2220 2221 return (0); 2222 } 2223 2224 /* 2225 * ql_adm_vpd_dump 2226 * Performs qladm QL_VPD_DUMP command 2227 * 2228 * Input: 2229 * ha: adapter state pointer. 2230 * dop: ql_adm_op_t structure pointer. 2231 * mode: flags. 2232 * 2233 * Returns: 2234 * 2235 * Context: 2236 * Kernel context. 2237 */ 2238 static int 2239 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2240 { 2241 int rval; 2242 2243 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2244 2245 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2246 EL(ha, "hba does not support VPD\n"); 2247 return (EINVAL); 2248 } 2249 2250 if (dop->length < QL_24XX_VPD_SIZE) { 2251 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, 2252 QL_24XX_VPD_SIZE); 2253 return (EINVAL); 2254 } 2255 2256 if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode)) 2257 != 0) { 2258 EL(ha, "failed, ql_vpd_dump\n"); 2259 } else { 2260 /*EMPTY*/ 2261 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2262 } 2263 2264 return (rval); 2265 } 2266 2267 /* 2268 * ql_adm_vpd_load 2269 * Performs qladm QL_VPD_LOAD command 2270 * 2271 * Input: 2272 * ha: adapter state pointer. 2273 * dop: ql_adm_op_t structure pointer. 2274 * mode: flags. 2275 * 2276 * Returns: 2277 * 2278 * Context: 2279 * Kernel context. 2280 */ 2281 static int 2282 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2283 { 2284 int rval; 2285 2286 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2287 2288 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2289 EL(ha, "hba does not support VPD\n"); 2290 return (EINVAL); 2291 } 2292 2293 if (dop->length < QL_24XX_VPD_SIZE) { 2294 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, 2295 QL_24XX_VPD_SIZE); 2296 return (EINVAL); 2297 } 2298 2299 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode)) 2300 != 0) { 2301 EL(ha, "failed, ql_vpd_dump\n"); 2302 } else { 2303 /*EMPTY*/ 2304 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2305 } 2306 2307 return (rval); 2308 } 2309 2310 /* 2311 * ql_adm_vpd_gettag 2312 * Performs qladm QL_VPD_GETTAG command 2313 * 2314 * Input: 2315 * ha: adapter state pointer. 2316 * dop: ql_adm_op_t structure pointer. 2317 * mode: flags. 2318 * 2319 * Returns: 2320 * 2321 * Context: 2322 * Kernel context. 2323 */ 2324 static int 2325 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2326 { 2327 int rval = 0; 2328 uint8_t *lbuf; 2329 2330 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2331 2332 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2333 EL(ha, "hba does not support VPD\n"); 2334 return (EINVAL); 2335 } 2336 2337 if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) { 2338 EL(ha, "mem alloc failure of %xh bytes\n", dop->length); 2339 rval = EFAULT; 2340 } else { 2341 if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf, 2342 dop->length, mode) != 0) { 2343 EL(ha, "ddi_copyin failed\n"); 2344 kmem_free(lbuf, dop->length); 2345 return (EFAULT); 2346 } 2347 2348 if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t) 2349 dop->length)) < 0) { 2350 EL(ha, "failed vpd_lookup\n"); 2351 } else { 2352 if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer, 2353 strlen((int8_t *)lbuf)+1, mode) != 0) { 2354 EL(ha, "failed, ddi_copyout\n"); 2355 rval = EFAULT; 2356 } else { 2357 rval = 0; 2358 } 2359 } 2360 kmem_free(lbuf, dop->length); 2361 } 2362 2363 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2364 2365 return (rval); 2366 } 2367 2368 /* 2369 * ql_adm_updfwmodule 2370 * Performs qladm QL_UPD_FWMODULE command 2371 * 2372 * Input: 2373 * ha: adapter state pointer. 2374 * dop: ql_adm_op_t structure pointer. 2375 * mode: flags. 2376 * 2377 * Returns: 2378 * 2379 * Context: 2380 * Kernel context. 2381 */ 2382 /* ARGSUSED */ 2383 static int 2384 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2385 { 2386 int rval = DDI_SUCCESS; 2387 ql_link_t *link; 2388 ql_adapter_state_t *ha2 = NULL; 2389 uint16_t fw_class = (uint16_t)dop->option; 2390 2391 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2392 2393 /* zero the firmware module reference count */ 2394 for (link = ql_hba.first; link != NULL; link = link->next) { 2395 ha2 = link->base_address; 2396 if (fw_class == ha2->fw_class) { 2397 if ((rval = ddi_modclose(ha2->fw_module)) != 2398 DDI_SUCCESS) { 2399 EL(ha2, "modclose rval=%xh\n", rval); 2400 break; 2401 } 2402 ha2->fw_module = NULL; 2403 } 2404 } 2405 2406 /* reload the f/w modules */ 2407 for (link = ql_hba.first; link != NULL; link = link->next) { 2408 ha2 = link->base_address; 2409 2410 if ((fw_class == ha2->fw_class) && (ha2->fw_class == NULL)) { 2411 if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) != 2412 QL_SUCCESS) { 2413 EL(ha2, "unable to load f/w module: '%x' " 2414 "(rval=%xh)\n", ha2->fw_class, rval); 2415 rval = EFAULT; 2416 } else { 2417 EL(ha2, "f/w module updated: '%x'\n", 2418 ha2->fw_class); 2419 } 2420 2421 EL(ha2, "isp abort needed (%d)\n", ha->instance); 2422 2423 ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0); 2424 2425 rval = 0; 2426 } 2427 } 2428 2429 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2430 2431 return (rval); 2432 }