1 /* 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2009, Intel Corporation 8 * All rights reserved. 9 */ 10 11 /* 12 * Copyright (c) 2006 13 * Copyright (c) 2007 14 * Damien Bergamini <damien.bergamini@free.fr> 15 * 16 * Permission to use, copy, modify, and distribute this software for any 17 * purpose with or without fee is hereby granted, provided that the above 18 * copyright notice and this permission notice appear in all copies. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 27 */ 28 29 /* 30 * Intel(R) WiFi Link 6000 Driver 31 */ 32 33 #include <sys/types.h> 34 #include <sys/byteorder.h> 35 #include <sys/conf.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stat.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/strsubr.h> 41 #include <sys/ethernet.h> 42 #include <inet/common.h> 43 #include <inet/nd.h> 44 #include <inet/mi.h> 45 #include <sys/note.h> 46 #include <sys/stream.h> 47 #include <sys/strsun.h> 48 #include <sys/modctl.h> 49 #include <sys/devops.h> 50 #include <sys/dlpi.h> 51 #include <sys/mac_provider.h> 52 #include <sys/mac_wifi.h> 53 #include <sys/net80211.h> 54 #include <sys/net80211_proto.h> 55 #include <sys/varargs.h> 56 #include <sys/policy.h> 57 #include <sys/pci.h> 58 59 #include "iwp_calibration.h" 60 #include "iwp_hw.h" 61 #include "iwp_eeprom.h" 62 #include "iwp_var.h" 63 #include <inet/wifi_ioctl.h> 64 65 #ifdef DEBUG 66 #define IWP_DEBUG_80211 (1 << 0) 67 #define IWP_DEBUG_CMD (1 << 1) 68 #define IWP_DEBUG_DMA (1 << 2) 69 #define IWP_DEBUG_EEPROM (1 << 3) 70 #define IWP_DEBUG_FW (1 << 4) 71 #define IWP_DEBUG_HW (1 << 5) 72 #define IWP_DEBUG_INTR (1 << 6) 73 #define IWP_DEBUG_MRR (1 << 7) 74 #define IWP_DEBUG_PIO (1 << 8) 75 #define IWP_DEBUG_RX (1 << 9) 76 #define IWP_DEBUG_SCAN (1 << 10) 77 #define IWP_DEBUG_TX (1 << 11) 78 #define IWP_DEBUG_RATECTL (1 << 12) 79 #define IWP_DEBUG_RADIO (1 << 13) 80 #define IWP_DEBUG_RESUME (1 << 14) 81 #define IWP_DEBUG_CALIBRATION (1 << 15) 82 /* 83 * if want to see debug message of a given section, 84 * please set this flag to one of above values 85 */ 86 uint32_t iwp_dbg_flags = 0; 87 #define IWP_DBG(x) \ 88 iwp_dbg x 89 #else 90 #define IWP_DBG(x) 91 #endif 92 93 static void *iwp_soft_state_p = NULL; 94 95 /* 96 * ucode will be compiled into driver image 97 */ 98 static uint8_t iwp_fw_bin [] = { 99 #include "fw-iw/iwp.ucode" 100 }; 101 102 /* 103 * DMA attributes for a shared page 104 */ 105 static ddi_dma_attr_t sh_dma_attr = { 106 DMA_ATTR_V0, /* version of this structure */ 107 0, /* lowest usable address */ 108 0xffffffffU, /* highest usable address */ 109 0xffffffffU, /* maximum DMAable byte count */ 110 0x1000, /* alignment in bytes */ 111 0x1000, /* burst sizes (any?) */ 112 1, /* minimum transfer */ 113 0xffffffffU, /* maximum transfer */ 114 0xffffffffU, /* maximum segment length */ 115 1, /* maximum number of segments */ 116 1, /* granularity */ 117 0, /* flags (reserved) */ 118 }; 119 120 /* 121 * DMA attributes for a keep warm DRAM descriptor 122 */ 123 static ddi_dma_attr_t kw_dma_attr = { 124 DMA_ATTR_V0, /* version of this structure */ 125 0, /* lowest usable address */ 126 0xffffffffU, /* highest usable address */ 127 0xffffffffU, /* maximum DMAable byte count */ 128 0x1000, /* alignment in bytes */ 129 0x1000, /* burst sizes (any?) */ 130 1, /* minimum transfer */ 131 0xffffffffU, /* maximum transfer */ 132 0xffffffffU, /* maximum segment length */ 133 1, /* maximum number of segments */ 134 1, /* granularity */ 135 0, /* flags (reserved) */ 136 }; 137 138 /* 139 * DMA attributes for a ring descriptor 140 */ 141 static ddi_dma_attr_t ring_desc_dma_attr = { 142 DMA_ATTR_V0, /* version of this structure */ 143 0, /* lowest usable address */ 144 0xffffffffU, /* highest usable address */ 145 0xffffffffU, /* maximum DMAable byte count */ 146 0x100, /* alignment in bytes */ 147 0x100, /* burst sizes (any?) */ 148 1, /* minimum transfer */ 149 0xffffffffU, /* maximum transfer */ 150 0xffffffffU, /* maximum segment length */ 151 1, /* maximum number of segments */ 152 1, /* granularity */ 153 0, /* flags (reserved) */ 154 }; 155 156 /* 157 * DMA attributes for a cmd 158 */ 159 static ddi_dma_attr_t cmd_dma_attr = { 160 DMA_ATTR_V0, /* version of this structure */ 161 0, /* lowest usable address */ 162 0xffffffffU, /* highest usable address */ 163 0xffffffffU, /* maximum DMAable byte count */ 164 4, /* alignment in bytes */ 165 0x100, /* burst sizes (any?) */ 166 1, /* minimum transfer */ 167 0xffffffffU, /* maximum transfer */ 168 0xffffffffU, /* maximum segment length */ 169 1, /* maximum number of segments */ 170 1, /* granularity */ 171 0, /* flags (reserved) */ 172 }; 173 174 /* 175 * DMA attributes for a rx buffer 176 */ 177 static ddi_dma_attr_t rx_buffer_dma_attr = { 178 DMA_ATTR_V0, /* version of this structure */ 179 0, /* lowest usable address */ 180 0xffffffffU, /* highest usable address */ 181 0xffffffffU, /* maximum DMAable byte count */ 182 0x100, /* alignment in bytes */ 183 0x100, /* burst sizes (any?) */ 184 1, /* minimum transfer */ 185 0xffffffffU, /* maximum transfer */ 186 0xffffffffU, /* maximum segment length */ 187 1, /* maximum number of segments */ 188 1, /* granularity */ 189 0, /* flags (reserved) */ 190 }; 191 192 /* 193 * DMA attributes for a tx buffer. 194 * the maximum number of segments is 4 for the hardware. 195 * now all the wifi drivers put the whole frame in a single 196 * descriptor, so we define the maximum number of segments 1, 197 * just the same as the rx_buffer. we consider leverage the HW 198 * ability in the future, that is why we don't define rx and tx 199 * buffer_dma_attr as the same. 200 */ 201 static ddi_dma_attr_t tx_buffer_dma_attr = { 202 DMA_ATTR_V0, /* version of this structure */ 203 0, /* lowest usable address */ 204 0xffffffffU, /* highest usable address */ 205 0xffffffffU, /* maximum DMAable byte count */ 206 4, /* alignment in bytes */ 207 0x100, /* burst sizes (any?) */ 208 1, /* minimum transfer */ 209 0xffffffffU, /* maximum transfer */ 210 0xffffffffU, /* maximum segment length */ 211 1, /* maximum number of segments */ 212 1, /* granularity */ 213 0, /* flags (reserved) */ 214 }; 215 216 /* 217 * DMA attributes for text and data part in the firmware 218 */ 219 static ddi_dma_attr_t fw_dma_attr = { 220 DMA_ATTR_V0, /* version of this structure */ 221 0, /* lowest usable address */ 222 0xffffffffU, /* highest usable address */ 223 0x7fffffff, /* maximum DMAable byte count */ 224 0x10, /* alignment in bytes */ 225 0x100, /* burst sizes (any?) */ 226 1, /* minimum transfer */ 227 0xffffffffU, /* maximum transfer */ 228 0xffffffffU, /* maximum segment length */ 229 1, /* maximum number of segments */ 230 1, /* granularity */ 231 0, /* flags (reserved) */ 232 }; 233 234 /* 235 * regs access attributes 236 */ 237 static ddi_device_acc_attr_t iwp_reg_accattr = { 238 DDI_DEVICE_ATTR_V0, 239 DDI_STRUCTURE_LE_ACC, 240 DDI_STRICTORDER_ACC, 241 DDI_DEFAULT_ACC 242 }; 243 244 /* 245 * DMA access attributes for descriptor 246 */ 247 static ddi_device_acc_attr_t iwp_dma_descattr = { 248 DDI_DEVICE_ATTR_V0, 249 DDI_STRUCTURE_LE_ACC, 250 DDI_STRICTORDER_ACC, 251 DDI_DEFAULT_ACC 252 }; 253 254 /* 255 * DMA access attributes 256 */ 257 static ddi_device_acc_attr_t iwp_dma_accattr = { 258 DDI_DEVICE_ATTR_V0, 259 DDI_NEVERSWAP_ACC, 260 DDI_STRICTORDER_ACC, 261 DDI_DEFAULT_ACC 262 }; 263 264 static int iwp_ring_init(iwp_sc_t *); 265 static void iwp_ring_free(iwp_sc_t *); 266 static int iwp_alloc_shared(iwp_sc_t *); 267 static void iwp_free_shared(iwp_sc_t *); 268 static int iwp_alloc_kw(iwp_sc_t *); 269 static void iwp_free_kw(iwp_sc_t *); 270 static int iwp_alloc_fw_dma(iwp_sc_t *); 271 static void iwp_free_fw_dma(iwp_sc_t *); 272 static int iwp_alloc_rx_ring(iwp_sc_t *); 273 static void iwp_reset_rx_ring(iwp_sc_t *); 274 static void iwp_free_rx_ring(iwp_sc_t *); 275 static int iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *, 276 int, int); 277 static void iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *); 278 static void iwp_free_tx_ring(iwp_tx_ring_t *); 279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *); 280 static void iwp_node_free(ieee80211_node_t *); 281 static int iwp_newstate(ieee80211com_t *, enum ieee80211_state, int); 282 static void iwp_mac_access_enter(iwp_sc_t *); 283 static void iwp_mac_access_exit(iwp_sc_t *); 284 static uint32_t iwp_reg_read(iwp_sc_t *, uint32_t); 285 static void iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t); 286 static int iwp_load_init_firmware(iwp_sc_t *); 287 static int iwp_load_run_firmware(iwp_sc_t *); 288 static void iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *); 289 static void iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *); 290 static uint_t iwp_intr(caddr_t, caddr_t); 291 static int iwp_eep_load(iwp_sc_t *); 292 static void iwp_get_mac_from_eep(iwp_sc_t *); 293 static int iwp_eep_sem_down(iwp_sc_t *); 294 static void iwp_eep_sem_up(iwp_sc_t *); 295 static uint_t iwp_rx_softintr(caddr_t, caddr_t); 296 static uint8_t iwp_rate_to_plcp(int); 297 static int iwp_cmd(iwp_sc_t *, int, const void *, int, int); 298 static void iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t); 299 static int iwp_hw_set_before_auth(iwp_sc_t *); 300 static int iwp_scan(iwp_sc_t *); 301 static int iwp_config(iwp_sc_t *); 302 static void iwp_stop_master(iwp_sc_t *); 303 static int iwp_power_up(iwp_sc_t *); 304 static int iwp_preinit(iwp_sc_t *); 305 static int iwp_init(iwp_sc_t *); 306 static void iwp_stop(iwp_sc_t *); 307 static int iwp_quiesce(dev_info_t *t); 308 static void iwp_amrr_init(iwp_amrr_t *); 309 static void iwp_amrr_timeout(iwp_sc_t *); 310 static void iwp_amrr_ratectl(void *, ieee80211_node_t *); 311 static void iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *); 312 static void iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *); 313 static void iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *); 314 static void iwp_release_calib_buffer(iwp_sc_t *); 315 static int iwp_init_common(iwp_sc_t *); 316 static uint8_t *iwp_eep_addr_trans(iwp_sc_t *, uint32_t); 317 static int iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t); 318 static int iwp_alive_common(iwp_sc_t *); 319 static void iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *); 320 static int iwp_attach(dev_info_t *, ddi_attach_cmd_t); 321 static int iwp_detach(dev_info_t *, ddi_detach_cmd_t); 322 static void iwp_destroy_locks(iwp_sc_t *); 323 static int iwp_send(ieee80211com_t *, mblk_t *, uint8_t); 324 static void iwp_thread(iwp_sc_t *); 325 static int iwp_run_state_config(iwp_sc_t *); 326 static int iwp_fast_recover(iwp_sc_t *); 327 static void iwp_overwrite_ic_default(iwp_sc_t *); 328 static int iwp_add_ap_sta(iwp_sc_t *); 329 static int iwp_alloc_dma_mem(iwp_sc_t *, size_t, 330 ddi_dma_attr_t *, ddi_device_acc_attr_t *, 331 uint_t, iwp_dma_t *); 332 static void iwp_free_dma_mem(iwp_dma_t *); 333 static int iwp_eep_ver_chk(iwp_sc_t *); 334 static void iwp_set_chip_param(iwp_sc_t *); 335 336 /* 337 * GLD specific operations 338 */ 339 static int iwp_m_stat(void *, uint_t, uint64_t *); 340 static int iwp_m_start(void *); 341 static void iwp_m_stop(void *); 342 static int iwp_m_unicst(void *, const uint8_t *); 343 static int iwp_m_multicst(void *, boolean_t, const uint8_t *); 344 static int iwp_m_promisc(void *, boolean_t); 345 static mblk_t *iwp_m_tx(void *, mblk_t *); 346 static void iwp_m_ioctl(void *, queue_t *, mblk_t *); 347 static int iwp_m_setprop(void *arg, const char *pr_name, 348 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf); 349 static int iwp_m_getprop(void *arg, const char *pr_name, 350 mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf); 351 static void iwp_m_propinfo(void *, const char *, mac_prop_id_t, 352 mac_prop_info_handle_t); 353 354 /* 355 * Supported rates for 802.11b/g modes (in 500Kbps unit). 356 */ 357 static const struct ieee80211_rateset iwp_rateset_11b = 358 { 4, { 2, 4, 11, 22 } }; 359 360 static const struct ieee80211_rateset iwp_rateset_11g = 361 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 362 363 /* 364 * For mfthread only 365 */ 366 extern pri_t minclsyspri; 367 368 #define DRV_NAME_SP "iwp" 369 370 /* 371 * Module Loading Data & Entry Points 372 */ 373 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach, 374 iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce); 375 376 static struct modldrv iwp_modldrv = { 377 &mod_driverops, 378 "Intel(R) PumaPeak driver(N)", 379 &iwp_devops 380 }; 381 382 static struct modlinkage iwp_modlinkage = { 383 MODREV_1, 384 &iwp_modldrv, 385 NULL 386 }; 387 388 int 389 _init(void) 390 { 391 int status; 392 393 status = ddi_soft_state_init(&iwp_soft_state_p, 394 sizeof (iwp_sc_t), 1); 395 if (status != DDI_SUCCESS) { 396 return (status); 397 } 398 399 mac_init_ops(&iwp_devops, DRV_NAME_SP); 400 status = mod_install(&iwp_modlinkage); 401 if (status != DDI_SUCCESS) { 402 mac_fini_ops(&iwp_devops); 403 ddi_soft_state_fini(&iwp_soft_state_p); 404 } 405 406 return (status); 407 } 408 409 int 410 _fini(void) 411 { 412 int status; 413 414 status = mod_remove(&iwp_modlinkage); 415 if (DDI_SUCCESS == status) { 416 mac_fini_ops(&iwp_devops); 417 ddi_soft_state_fini(&iwp_soft_state_p); 418 } 419 420 return (status); 421 } 422 423 int 424 _info(struct modinfo *mip) 425 { 426 return (mod_info(&iwp_modlinkage, mip)); 427 } 428 429 /* 430 * Mac Call Back entries 431 */ 432 mac_callbacks_t iwp_m_callbacks = { 433 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO, 434 iwp_m_stat, 435 iwp_m_start, 436 iwp_m_stop, 437 iwp_m_promisc, 438 iwp_m_multicst, 439 iwp_m_unicst, 440 iwp_m_tx, 441 NULL, 442 iwp_m_ioctl, 443 NULL, 444 NULL, 445 NULL, 446 iwp_m_setprop, 447 iwp_m_getprop, 448 iwp_m_propinfo 449 }; 450 451 #ifdef DEBUG 452 void 453 iwp_dbg(uint32_t flags, const char *fmt, ...) 454 { 455 va_list ap; 456 457 if (flags & iwp_dbg_flags) { 458 va_start(ap, fmt); 459 vcmn_err(CE_NOTE, fmt, ap); 460 va_end(ap); 461 } 462 } 463 #endif /* DEBUG */ 464 465 /* 466 * device operations 467 */ 468 int 469 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 470 { 471 iwp_sc_t *sc; 472 ieee80211com_t *ic; 473 int instance, i; 474 char strbuf[32]; 475 wifi_data_t wd = { 0 }; 476 mac_register_t *macp; 477 int intr_type; 478 int intr_count; 479 int intr_actual; 480 int err = DDI_FAILURE; 481 482 switch (cmd) { 483 case DDI_ATTACH: 484 break; 485 case DDI_RESUME: 486 instance = ddi_get_instance(dip); 487 sc = ddi_get_soft_state(iwp_soft_state_p, 488 instance); 489 ASSERT(sc != NULL); 490 491 if (sc->sc_flags & IWP_F_RUNNING) { 492 (void) iwp_init(sc); 493 } 494 495 atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND); 496 497 IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): " 498 "resume\n")); 499 return (DDI_SUCCESS); 500 default: 501 goto attach_fail1; 502 } 503 504 instance = ddi_get_instance(dip); 505 err = ddi_soft_state_zalloc(iwp_soft_state_p, instance); 506 if (err != DDI_SUCCESS) { 507 cmn_err(CE_WARN, "iwp_attach(): " 508 "failed to allocate soft state\n"); 509 goto attach_fail1; 510 } 511 512 sc = ddi_get_soft_state(iwp_soft_state_p, instance); 513 ASSERT(sc != NULL); 514 515 sc->sc_dip = dip; 516 517 /* 518 * map configure space 519 */ 520 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0, 521 &iwp_reg_accattr, &sc->sc_cfg_handle); 522 if (err != DDI_SUCCESS) { 523 cmn_err(CE_WARN, "iwp_attach(): " 524 "failed to map config spaces regs\n"); 525 goto attach_fail2; 526 } 527 528 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle, 529 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID)); 530 if ((sc->sc_dev_id != 0x422B) && 531 (sc->sc_dev_id != 0x422C) && 532 (sc->sc_dev_id != 0x4238) && 533 (sc->sc_dev_id != 0x4239) && 534 (sc->sc_dev_id != 0x008d) && 535 (sc->sc_dev_id != 0x008e)) { 536 cmn_err(CE_WARN, "iwp_attach(): " 537 "Do not support this device\n"); 538 goto attach_fail3; 539 } 540 541 iwp_set_chip_param(sc); 542 543 sc->sc_rev = ddi_get8(sc->sc_cfg_handle, 544 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID)); 545 546 /* 547 * keep from disturbing C3 state of CPU 548 */ 549 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 550 PCI_CFG_RETRY_TIMEOUT), 0); 551 552 /* 553 * determine the size of buffer for frame and command to ucode 554 */ 555 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle, 556 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ)); 557 if (!sc->sc_clsz) { 558 sc->sc_clsz = 16; 559 } 560 sc->sc_clsz = (sc->sc_clsz << 2); 561 562 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) + 563 IEEE80211_MTU + IEEE80211_CRC_LEN + 564 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 565 IEEE80211_WEP_CRCLEN), sc->sc_clsz); 566 567 /* 568 * Map operating registers 569 */ 570 err = ddi_regs_map_setup(dip, 1, &sc->sc_base, 571 0, 0, &iwp_reg_accattr, &sc->sc_handle); 572 if (err != DDI_SUCCESS) { 573 cmn_err(CE_WARN, "iwp_attach(): " 574 "failed to map device regs\n"); 575 goto attach_fail3; 576 } 577 578 /* 579 * this is used to differentiate type of hardware 580 */ 581 sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV); 582 583 err = ddi_intr_get_supported_types(dip, &intr_type); 584 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) { 585 cmn_err(CE_WARN, "iwp_attach(): " 586 "fixed type interrupt is not supported\n"); 587 goto attach_fail4; 588 } 589 590 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count); 591 if ((err != DDI_SUCCESS) || (intr_count != 1)) { 592 cmn_err(CE_WARN, "iwp_attach(): " 593 "no fixed interrupts\n"); 594 goto attach_fail4; 595 } 596 597 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 598 599 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0, 600 intr_count, &intr_actual, 0); 601 if ((err != DDI_SUCCESS) || (intr_actual != 1)) { 602 cmn_err(CE_WARN, "iwp_attach(): " 603 "ddi_intr_alloc() failed 0x%x\n", err); 604 goto attach_fail5; 605 } 606 607 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri); 608 if (err != DDI_SUCCESS) { 609 cmn_err(CE_WARN, "iwp_attach(): " 610 "ddi_intr_get_pri() failed 0x%x\n", err); 611 goto attach_fail6; 612 } 613 614 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, 615 DDI_INTR_PRI(sc->sc_intr_pri)); 616 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, 617 DDI_INTR_PRI(sc->sc_intr_pri)); 618 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER, 619 DDI_INTR_PRI(sc->sc_intr_pri)); 620 621 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL); 622 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL); 623 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL); 624 625 /* 626 * initialize the mfthread 627 */ 628 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL); 629 sc->sc_mf_thread = NULL; 630 sc->sc_mf_thread_switch = 0; 631 632 /* 633 * Allocate shared buffer for communication between driver and ucode. 634 */ 635 err = iwp_alloc_shared(sc); 636 if (err != DDI_SUCCESS) { 637 cmn_err(CE_WARN, "iwp_attach(): " 638 "failed to allocate shared page\n"); 639 goto attach_fail7; 640 } 641 642 (void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t)); 643 644 /* 645 * Allocate keep warm page. 646 */ 647 err = iwp_alloc_kw(sc); 648 if (err != DDI_SUCCESS) { 649 cmn_err(CE_WARN, "iwp_attach(): " 650 "failed to allocate keep warm page\n"); 651 goto attach_fail8; 652 } 653 654 /* 655 * Do some necessary hardware initializations. 656 */ 657 err = iwp_preinit(sc); 658 if (err != IWP_SUCCESS) { 659 cmn_err(CE_WARN, "iwp_attach(): " 660 "failed to initialize hardware\n"); 661 goto attach_fail9; 662 } 663 664 /* 665 * get hardware configurations from eeprom 666 */ 667 err = iwp_eep_load(sc); 668 if (err != IWP_SUCCESS) { 669 cmn_err(CE_WARN, "iwp_attach(): " 670 "failed to load eeprom\n"); 671 goto attach_fail9; 672 } 673 674 /* 675 * calibration information from EEPROM 676 */ 677 sc->sc_eep_calib = (struct iwp_eep_calibration *) 678 iwp_eep_addr_trans(sc, EEP_CALIBRATION); 679 680 err = iwp_eep_ver_chk(sc); 681 if (err != IWP_SUCCESS) { 682 goto attach_fail9; 683 } 684 685 /* 686 * get MAC address of this chipset 687 */ 688 iwp_get_mac_from_eep(sc); 689 690 691 /* 692 * initialize TX and RX ring buffers 693 */ 694 err = iwp_ring_init(sc); 695 if (err != DDI_SUCCESS) { 696 cmn_err(CE_WARN, "iwp_attach(): " 697 "failed to allocate and initialize ring\n"); 698 goto attach_fail9; 699 } 700 701 sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin; 702 703 /* 704 * copy ucode to dma buffer 705 */ 706 err = iwp_alloc_fw_dma(sc); 707 if (err != DDI_SUCCESS) { 708 cmn_err(CE_WARN, "iwp_attach(): " 709 "failed to allocate firmware dma\n"); 710 goto attach_fail10; 711 } 712 713 /* 714 * Initialize the wifi part, which will be used by 715 * 802.11 module 716 */ 717 ic = &sc->sc_ic; 718 ic->ic_phytype = IEEE80211_T_OFDM; 719 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 720 ic->ic_state = IEEE80211_S_INIT; 721 ic->ic_maxrssi = 100; /* experimental number */ 722 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT | 723 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT; 724 725 /* 726 * Support WPA/WPA2 727 */ 728 ic->ic_caps |= IEEE80211_C_WPA; 729 730 /* 731 * set supported .11b and .11g rates 732 */ 733 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b; 734 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g; 735 736 /* 737 * set supported .11b and .11g channels (1 through 11) 738 */ 739 for (i = 1; i <= 11; i++) { 740 ic->ic_sup_channels[i].ich_freq = 741 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 742 ic->ic_sup_channels[i].ich_flags = 743 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 744 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ | 745 IEEE80211_CHAN_PASSIVE; 746 } 747 748 ic->ic_ibss_chan = &ic->ic_sup_channels[0]; 749 ic->ic_xmit = iwp_send; 750 751 /* 752 * attach to 802.11 module 753 */ 754 ieee80211_attach(ic); 755 756 /* 757 * different instance has different WPA door 758 */ 759 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 760 ddi_driver_name(dip), 761 ddi_get_instance(dip)); 762 763 /* 764 * Overwrite 80211 default configurations. 765 */ 766 iwp_overwrite_ic_default(sc); 767 768 /* 769 * initialize 802.11 module 770 */ 771 ieee80211_media_init(ic); 772 773 /* 774 * initialize default tx key 775 */ 776 ic->ic_def_txkey = 0; 777 778 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX, 779 iwp_rx_softintr, (caddr_t)sc); 780 if (err != DDI_SUCCESS) { 781 cmn_err(CE_WARN, "iwp_attach(): " 782 "add soft interrupt failed\n"); 783 goto attach_fail12; 784 } 785 786 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr, 787 (caddr_t)sc, NULL); 788 if (err != DDI_SUCCESS) { 789 cmn_err(CE_WARN, "iwp_attach(): " 790 "ddi_intr_add_handle() failed\n"); 791 goto attach_fail13; 792 } 793 794 err = ddi_intr_enable(sc->sc_intr_htable[0]); 795 if (err != DDI_SUCCESS) { 796 cmn_err(CE_WARN, "iwp_attach(): " 797 "ddi_intr_enable() failed\n"); 798 goto attach_fail14; 799 } 800 801 /* 802 * Initialize pointer to device specific functions 803 */ 804 wd.wd_secalloc = WIFI_SEC_NONE; 805 wd.wd_opmode = ic->ic_opmode; 806 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr); 807 808 /* 809 * create relation to GLD 810 */ 811 macp = mac_alloc(MAC_VERSION); 812 if (NULL == macp) { 813 cmn_err(CE_WARN, "iwp_attach(): " 814 "failed to do mac_alloc()\n"); 815 goto attach_fail15; 816 } 817 818 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 819 macp->m_driver = sc; 820 macp->m_dip = dip; 821 macp->m_src_addr = ic->ic_macaddr; 822 macp->m_callbacks = &iwp_m_callbacks; 823 macp->m_min_sdu = 0; 824 macp->m_max_sdu = IEEE80211_MTU; 825 macp->m_pdata = &wd; 826 macp->m_pdata_size = sizeof (wd); 827 828 /* 829 * Register the macp to mac 830 */ 831 err = mac_register(macp, &ic->ic_mach); 832 mac_free(macp); 833 if (err != DDI_SUCCESS) { 834 cmn_err(CE_WARN, "iwp_attach(): " 835 "failed to do mac_register()\n"); 836 goto attach_fail15; 837 } 838 839 /* 840 * Create minor node of type DDI_NT_NET_WIFI 841 */ 842 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance); 843 err = ddi_create_minor_node(dip, strbuf, S_IFCHR, 844 instance + 1, DDI_NT_NET_WIFI, 0); 845 if (err != DDI_SUCCESS) { 846 cmn_err(CE_WARN, "iwp_attach(): " 847 "failed to do ddi_create_minor_node()\n"); 848 } 849 850 /* 851 * Notify link is down now 852 */ 853 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 854 855 /* 856 * create the mf thread to handle the link status, 857 * recovery fatal error, etc. 858 */ 859 sc->sc_mf_thread_switch = 1; 860 if (NULL == sc->sc_mf_thread) { 861 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0, 862 iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri); 863 } 864 865 atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED); 866 867 return (DDI_SUCCESS); 868 869 attach_fail15: 870 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 871 attach_fail14: 872 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 873 attach_fail13: 874 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 875 sc->sc_soft_hdl = NULL; 876 attach_fail12: 877 ieee80211_detach(ic); 878 attach_fail11: 879 iwp_free_fw_dma(sc); 880 attach_fail10: 881 iwp_ring_free(sc); 882 attach_fail9: 883 iwp_free_kw(sc); 884 attach_fail8: 885 iwp_free_shared(sc); 886 attach_fail7: 887 iwp_destroy_locks(sc); 888 attach_fail6: 889 (void) ddi_intr_free(sc->sc_intr_htable[0]); 890 attach_fail5: 891 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 892 attach_fail4: 893 ddi_regs_map_free(&sc->sc_handle); 894 attach_fail3: 895 ddi_regs_map_free(&sc->sc_cfg_handle); 896 attach_fail2: 897 ddi_soft_state_free(iwp_soft_state_p, instance); 898 attach_fail1: 899 return (DDI_FAILURE); 900 } 901 902 int 903 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 904 { 905 iwp_sc_t *sc; 906 ieee80211com_t *ic; 907 int err; 908 909 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip)); 910 ASSERT(sc != NULL); 911 ic = &sc->sc_ic; 912 913 switch (cmd) { 914 case DDI_DETACH: 915 break; 916 case DDI_SUSPEND: 917 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER); 918 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL); 919 920 atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND); 921 922 if (sc->sc_flags & IWP_F_RUNNING) { 923 iwp_stop(sc); 924 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 925 926 } 927 928 IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): " 929 "suspend\n")); 930 return (DDI_SUCCESS); 931 default: 932 return (DDI_FAILURE); 933 } 934 935 if (!(sc->sc_flags & IWP_F_ATTACHED)) { 936 return (DDI_FAILURE); 937 } 938 939 /* 940 * Destroy the mf_thread 941 */ 942 sc->sc_mf_thread_switch = 0; 943 944 mutex_enter(&sc->sc_mt_lock); 945 while (sc->sc_mf_thread != NULL) { 946 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) { 947 break; 948 } 949 } 950 mutex_exit(&sc->sc_mt_lock); 951 952 err = mac_disable(sc->sc_ic.ic_mach); 953 if (err != DDI_SUCCESS) { 954 return (err); 955 } 956 957 /* 958 * stop chipset 959 */ 960 iwp_stop(sc); 961 962 DELAY(500000); 963 964 /* 965 * release buffer for calibration 966 */ 967 iwp_release_calib_buffer(sc); 968 969 /* 970 * Unregiste from GLD 971 */ 972 (void) mac_unregister(sc->sc_ic.ic_mach); 973 974 mutex_enter(&sc->sc_glock); 975 iwp_free_fw_dma(sc); 976 iwp_ring_free(sc); 977 iwp_free_kw(sc); 978 iwp_free_shared(sc); 979 mutex_exit(&sc->sc_glock); 980 981 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 982 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 983 (void) ddi_intr_free(sc->sc_intr_htable[0]); 984 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 985 986 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 987 sc->sc_soft_hdl = NULL; 988 989 /* 990 * detach from 80211 module 991 */ 992 ieee80211_detach(&sc->sc_ic); 993 994 iwp_destroy_locks(sc); 995 996 ddi_regs_map_free(&sc->sc_handle); 997 ddi_regs_map_free(&sc->sc_cfg_handle); 998 ddi_remove_minor_node(dip, NULL); 999 ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip)); 1000 1001 return (DDI_SUCCESS); 1002 } 1003 1004 /* 1005 * destroy all locks 1006 */ 1007 static void 1008 iwp_destroy_locks(iwp_sc_t *sc) 1009 { 1010 cv_destroy(&sc->sc_mt_cv); 1011 cv_destroy(&sc->sc_cmd_cv); 1012 cv_destroy(&sc->sc_put_seg_cv); 1013 cv_destroy(&sc->sc_ucode_cv); 1014 mutex_destroy(&sc->sc_mt_lock); 1015 mutex_destroy(&sc->sc_tx_lock); 1016 mutex_destroy(&sc->sc_glock); 1017 } 1018 1019 /* 1020 * Allocate an area of memory and a DMA handle for accessing it 1021 */ 1022 static int 1023 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize, 1024 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p, 1025 uint_t dma_flags, iwp_dma_t *dma_p) 1026 { 1027 caddr_t vaddr; 1028 int err = DDI_FAILURE; 1029 1030 /* 1031 * Allocate handle 1032 */ 1033 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p, 1034 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 1035 if (err != DDI_SUCCESS) { 1036 dma_p->dma_hdl = NULL; 1037 return (DDI_FAILURE); 1038 } 1039 1040 /* 1041 * Allocate memory 1042 */ 1043 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p, 1044 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 1045 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl); 1046 if (err != DDI_SUCCESS) { 1047 ddi_dma_free_handle(&dma_p->dma_hdl); 1048 dma_p->dma_hdl = NULL; 1049 dma_p->acc_hdl = NULL; 1050 return (DDI_FAILURE); 1051 } 1052 1053 /* 1054 * Bind the two together 1055 */ 1056 dma_p->mem_va = vaddr; 1057 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1058 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL, 1059 &dma_p->cookie, &dma_p->ncookies); 1060 if (err != DDI_DMA_MAPPED) { 1061 ddi_dma_mem_free(&dma_p->acc_hdl); 1062 ddi_dma_free_handle(&dma_p->dma_hdl); 1063 dma_p->acc_hdl = NULL; 1064 dma_p->dma_hdl = NULL; 1065 return (DDI_FAILURE); 1066 } 1067 1068 dma_p->nslots = ~0U; 1069 dma_p->size = ~0U; 1070 dma_p->token = ~0U; 1071 dma_p->offset = 0; 1072 return (DDI_SUCCESS); 1073 } 1074 1075 /* 1076 * Free one allocated area of DMAable memory 1077 */ 1078 static void 1079 iwp_free_dma_mem(iwp_dma_t *dma_p) 1080 { 1081 if (dma_p->dma_hdl != NULL) { 1082 if (dma_p->ncookies) { 1083 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1084 dma_p->ncookies = 0; 1085 } 1086 ddi_dma_free_handle(&dma_p->dma_hdl); 1087 dma_p->dma_hdl = NULL; 1088 } 1089 1090 if (dma_p->acc_hdl != NULL) { 1091 ddi_dma_mem_free(&dma_p->acc_hdl); 1092 dma_p->acc_hdl = NULL; 1093 } 1094 } 1095 1096 /* 1097 * copy ucode into dma buffers 1098 */ 1099 static int 1100 iwp_alloc_fw_dma(iwp_sc_t *sc) 1101 { 1102 int err = DDI_FAILURE; 1103 iwp_dma_t *dma_p; 1104 char *t; 1105 1106 /* 1107 * firmware image layout: 1108 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->| 1109 */ 1110 1111 /* 1112 * Check firmware image size. 1113 */ 1114 if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) { 1115 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1116 "firmware init text size 0x%x is too large\n", 1117 LE_32(sc->sc_hdr->init_textsz)); 1118 1119 goto fail; 1120 } 1121 1122 if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) { 1123 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1124 "firmware init data size 0x%x is too large\n", 1125 LE_32(sc->sc_hdr->init_datasz)); 1126 1127 goto fail; 1128 } 1129 1130 if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) { 1131 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1132 "firmware text size 0x%x is too large\n", 1133 LE_32(sc->sc_hdr->textsz)); 1134 1135 goto fail; 1136 } 1137 1138 if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) { 1139 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1140 "firmware data size 0x%x is too large\n", 1141 LE_32(sc->sc_hdr->datasz)); 1142 1143 goto fail; 1144 } 1145 1146 /* 1147 * copy text of runtime ucode 1148 */ 1149 t = (char *)(sc->sc_hdr + 1); 1150 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz), 1151 &fw_dma_attr, &iwp_dma_accattr, 1152 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1153 &sc->sc_dma_fw_text); 1154 if (err != DDI_SUCCESS) { 1155 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1156 "failed to allocate text dma memory.\n"); 1157 goto fail; 1158 } 1159 1160 dma_p = &sc->sc_dma_fw_text; 1161 1162 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): " 1163 "text[ncookies:%d addr:%lx size:%lx]\n", 1164 dma_p->ncookies, dma_p->cookie.dmac_address, 1165 dma_p->cookie.dmac_size)); 1166 1167 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz)); 1168 1169 /* 1170 * copy data and bak-data of runtime ucode 1171 */ 1172 t += LE_32(sc->sc_hdr->textsz); 1173 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1174 &fw_dma_attr, &iwp_dma_accattr, 1175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1176 &sc->sc_dma_fw_data); 1177 if (err != DDI_SUCCESS) { 1178 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1179 "failed to allocate data dma memory\n"); 1180 goto fail; 1181 } 1182 1183 dma_p = &sc->sc_dma_fw_data; 1184 1185 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): " 1186 "data[ncookies:%d addr:%lx size:%lx]\n", 1187 dma_p->ncookies, dma_p->cookie.dmac_address, 1188 dma_p->cookie.dmac_size)); 1189 1190 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1191 1192 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1193 &fw_dma_attr, &iwp_dma_accattr, 1194 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1195 &sc->sc_dma_fw_data_bak); 1196 if (err != DDI_SUCCESS) { 1197 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1198 "failed to allocate data bakup dma memory\n"); 1199 goto fail; 1200 } 1201 1202 dma_p = &sc->sc_dma_fw_data_bak; 1203 1204 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): " 1205 "data_bak[ncookies:%d addr:%lx " 1206 "size:%lx]\n", 1207 dma_p->ncookies, dma_p->cookie.dmac_address, 1208 dma_p->cookie.dmac_size)); 1209 1210 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1211 1212 /* 1213 * copy text of init ucode 1214 */ 1215 t += LE_32(sc->sc_hdr->datasz); 1216 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz), 1217 &fw_dma_attr, &iwp_dma_accattr, 1218 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1219 &sc->sc_dma_fw_init_text); 1220 if (err != DDI_SUCCESS) { 1221 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1222 "failed to allocate init text dma memory\n"); 1223 goto fail; 1224 } 1225 1226 dma_p = &sc->sc_dma_fw_init_text; 1227 1228 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): " 1229 "init_text[ncookies:%d addr:%lx " 1230 "size:%lx]\n", 1231 dma_p->ncookies, dma_p->cookie.dmac_address, 1232 dma_p->cookie.dmac_size)); 1233 1234 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz)); 1235 1236 /* 1237 * copy data of init ucode 1238 */ 1239 t += LE_32(sc->sc_hdr->init_textsz); 1240 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz), 1241 &fw_dma_attr, &iwp_dma_accattr, 1242 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1243 &sc->sc_dma_fw_init_data); 1244 if (err != DDI_SUCCESS) { 1245 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): " 1246 "failed to allocate init data dma memory\n"); 1247 goto fail; 1248 } 1249 1250 dma_p = &sc->sc_dma_fw_init_data; 1251 1252 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): " 1253 "init_data[ncookies:%d addr:%lx " 1254 "size:%lx]\n", 1255 dma_p->ncookies, dma_p->cookie.dmac_address, 1256 dma_p->cookie.dmac_size)); 1257 1258 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz)); 1259 1260 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz); 1261 fail: 1262 return (err); 1263 } 1264 1265 static void 1266 iwp_free_fw_dma(iwp_sc_t *sc) 1267 { 1268 iwp_free_dma_mem(&sc->sc_dma_fw_text); 1269 iwp_free_dma_mem(&sc->sc_dma_fw_data); 1270 iwp_free_dma_mem(&sc->sc_dma_fw_data_bak); 1271 iwp_free_dma_mem(&sc->sc_dma_fw_init_text); 1272 iwp_free_dma_mem(&sc->sc_dma_fw_init_data); 1273 } 1274 1275 /* 1276 * Allocate a shared buffer between host and NIC. 1277 */ 1278 static int 1279 iwp_alloc_shared(iwp_sc_t *sc) 1280 { 1281 #ifdef DEBUG 1282 iwp_dma_t *dma_p; 1283 #endif 1284 int err = DDI_FAILURE; 1285 1286 /* 1287 * must be aligned on a 4K-page boundary 1288 */ 1289 err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t), 1290 &sh_dma_attr, &iwp_dma_descattr, 1291 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1292 &sc->sc_dma_sh); 1293 if (err != DDI_SUCCESS) { 1294 goto fail; 1295 } 1296 1297 sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va; 1298 1299 #ifdef DEBUG 1300 dma_p = &sc->sc_dma_sh; 1301 #endif 1302 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): " 1303 "sh[ncookies:%d addr:%lx size:%lx]\n", 1304 dma_p->ncookies, dma_p->cookie.dmac_address, 1305 dma_p->cookie.dmac_size)); 1306 1307 return (err); 1308 fail: 1309 iwp_free_shared(sc); 1310 return (err); 1311 } 1312 1313 static void 1314 iwp_free_shared(iwp_sc_t *sc) 1315 { 1316 iwp_free_dma_mem(&sc->sc_dma_sh); 1317 } 1318 1319 /* 1320 * Allocate a keep warm page. 1321 */ 1322 static int 1323 iwp_alloc_kw(iwp_sc_t *sc) 1324 { 1325 #ifdef DEBUG 1326 iwp_dma_t *dma_p; 1327 #endif 1328 int err = DDI_FAILURE; 1329 1330 /* 1331 * must be aligned on a 4K-page boundary 1332 */ 1333 err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE, 1334 &kw_dma_attr, &iwp_dma_descattr, 1335 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1336 &sc->sc_dma_kw); 1337 if (err != DDI_SUCCESS) { 1338 goto fail; 1339 } 1340 1341 #ifdef DEBUG 1342 dma_p = &sc->sc_dma_kw; 1343 #endif 1344 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): " 1345 "kw[ncookies:%d addr:%lx size:%lx]\n", 1346 dma_p->ncookies, dma_p->cookie.dmac_address, 1347 dma_p->cookie.dmac_size)); 1348 1349 return (err); 1350 fail: 1351 iwp_free_kw(sc); 1352 return (err); 1353 } 1354 1355 static void 1356 iwp_free_kw(iwp_sc_t *sc) 1357 { 1358 iwp_free_dma_mem(&sc->sc_dma_kw); 1359 } 1360 1361 /* 1362 * initialize RX ring buffers 1363 */ 1364 static int 1365 iwp_alloc_rx_ring(iwp_sc_t *sc) 1366 { 1367 iwp_rx_ring_t *ring; 1368 iwp_rx_data_t *data; 1369 #ifdef DEBUG 1370 iwp_dma_t *dma_p; 1371 #endif 1372 int i, err = DDI_FAILURE; 1373 1374 ring = &sc->sc_rxq; 1375 ring->cur = 0; 1376 1377 /* 1378 * allocate RX description ring buffer 1379 */ 1380 err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t), 1381 &ring_desc_dma_attr, &iwp_dma_descattr, 1382 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1383 &ring->dma_desc); 1384 if (err != DDI_SUCCESS) { 1385 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): " 1386 "dma alloc rx ring desc " 1387 "failed\n")); 1388 goto fail; 1389 } 1390 1391 ring->desc = (uint32_t *)ring->dma_desc.mem_va; 1392 #ifdef DEBUG 1393 dma_p = &ring->dma_desc; 1394 #endif 1395 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): " 1396 "rx bd[ncookies:%d addr:%lx size:%lx]\n", 1397 dma_p->ncookies, dma_p->cookie.dmac_address, 1398 dma_p->cookie.dmac_size)); 1399 1400 /* 1401 * Allocate Rx frame buffers. 1402 */ 1403 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1404 data = &ring->data[i]; 1405 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1406 &rx_buffer_dma_attr, &iwp_dma_accattr, 1407 DDI_DMA_READ | DDI_DMA_STREAMING, 1408 &data->dma_data); 1409 if (err != DDI_SUCCESS) { 1410 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): " 1411 "dma alloc rx ring " 1412 "buf[%d] failed\n", i)); 1413 goto fail; 1414 } 1415 /* 1416 * the physical address bit [8-36] are used, 1417 * instead of bit [0-31] in 3945. 1418 */ 1419 ring->desc[i] = (uint32_t) 1420 (data->dma_data.cookie.dmac_address >> 8); 1421 } 1422 1423 #ifdef DEBUG 1424 dma_p = &ring->data[0].dma_data; 1425 #endif 1426 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): " 1427 "rx buffer[0][ncookies:%d addr:%lx " 1428 "size:%lx]\n", 1429 dma_p->ncookies, dma_p->cookie.dmac_address, 1430 dma_p->cookie.dmac_size)); 1431 1432 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1433 1434 return (err); 1435 1436 fail: 1437 iwp_free_rx_ring(sc); 1438 return (err); 1439 } 1440 1441 /* 1442 * disable RX ring 1443 */ 1444 static void 1445 iwp_reset_rx_ring(iwp_sc_t *sc) 1446 { 1447 int n; 1448 1449 iwp_mac_access_enter(sc); 1450 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 1451 for (n = 0; n < 2000; n++) { 1452 if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) { 1453 break; 1454 } 1455 DELAY(1000); 1456 } 1457 #ifdef DEBUG 1458 if (2000 == n) { 1459 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): " 1460 "timeout resetting Rx ring\n")); 1461 } 1462 #endif 1463 iwp_mac_access_exit(sc); 1464 1465 sc->sc_rxq.cur = 0; 1466 } 1467 1468 static void 1469 iwp_free_rx_ring(iwp_sc_t *sc) 1470 { 1471 int i; 1472 1473 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1474 if (sc->sc_rxq.data[i].dma_data.dma_hdl) { 1475 IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data, 1476 DDI_DMA_SYNC_FORCPU); 1477 } 1478 1479 iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data); 1480 } 1481 1482 if (sc->sc_rxq.dma_desc.dma_hdl) { 1483 IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV); 1484 } 1485 1486 iwp_free_dma_mem(&sc->sc_rxq.dma_desc); 1487 } 1488 1489 /* 1490 * initialize TX ring buffers 1491 */ 1492 static int 1493 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring, 1494 int slots, int qid) 1495 { 1496 iwp_tx_data_t *data; 1497 iwp_tx_desc_t *desc_h; 1498 uint32_t paddr_desc_h; 1499 iwp_cmd_t *cmd_h; 1500 uint32_t paddr_cmd_h; 1501 #ifdef DEBUG 1502 iwp_dma_t *dma_p; 1503 #endif 1504 int i, err = DDI_FAILURE; 1505 ring->qid = qid; 1506 ring->count = TFD_QUEUE_SIZE_MAX; 1507 ring->window = slots; 1508 ring->queued = 0; 1509 ring->cur = 0; 1510 ring->desc_cur = 0; 1511 1512 /* 1513 * allocate buffer for TX descriptor ring 1514 */ 1515 err = iwp_alloc_dma_mem(sc, 1516 TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t), 1517 &ring_desc_dma_attr, &iwp_dma_descattr, 1518 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1519 &ring->dma_desc); 1520 if (err != DDI_SUCCESS) { 1521 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1522 "dma alloc tx ring desc[%d] " 1523 "failed\n", qid)); 1524 goto fail; 1525 } 1526 1527 #ifdef DEBUG 1528 dma_p = &ring->dma_desc; 1529 #endif 1530 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1531 "tx bd[ncookies:%d addr:%lx size:%lx]\n", 1532 dma_p->ncookies, dma_p->cookie.dmac_address, 1533 dma_p->cookie.dmac_size)); 1534 1535 desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va; 1536 paddr_desc_h = ring->dma_desc.cookie.dmac_address; 1537 1538 /* 1539 * allocate buffer for ucode command 1540 */ 1541 err = iwp_alloc_dma_mem(sc, 1542 TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t), 1543 &cmd_dma_attr, &iwp_dma_accattr, 1544 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1545 &ring->dma_cmd); 1546 if (err != DDI_SUCCESS) { 1547 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1548 "dma alloc tx ring cmd[%d]" 1549 " failed\n", qid)); 1550 goto fail; 1551 } 1552 1553 #ifdef DEBUG 1554 dma_p = &ring->dma_cmd; 1555 #endif 1556 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1557 "tx cmd[ncookies:%d addr:%lx size:%lx]\n", 1558 dma_p->ncookies, dma_p->cookie.dmac_address, 1559 dma_p->cookie.dmac_size)); 1560 1561 cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va; 1562 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address; 1563 1564 /* 1565 * Allocate Tx frame buffers. 1566 */ 1567 ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX, 1568 KM_NOSLEEP); 1569 if (NULL == ring->data) { 1570 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1571 "could not allocate " 1572 "tx data slots\n")); 1573 goto fail; 1574 } 1575 1576 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) { 1577 data = &ring->data[i]; 1578 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1579 &tx_buffer_dma_attr, &iwp_dma_accattr, 1580 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1581 &data->dma_data); 1582 if (err != DDI_SUCCESS) { 1583 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1584 "dma alloc tx " 1585 "ring buf[%d] failed\n", i)); 1586 goto fail; 1587 } 1588 1589 data->desc = desc_h + i; 1590 data->paddr_desc = paddr_desc_h + 1591 _PTRDIFF(data->desc, desc_h); 1592 data->cmd = cmd_h + i; 1593 data->paddr_cmd = paddr_cmd_h + 1594 _PTRDIFF(data->cmd, cmd_h); 1595 } 1596 #ifdef DEBUG 1597 dma_p = &ring->data[0].dma_data; 1598 #endif 1599 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): " 1600 "tx buffer[0][ncookies:%d addr:%lx " 1601 "size:%lx]\n", 1602 dma_p->ncookies, dma_p->cookie.dmac_address, 1603 dma_p->cookie.dmac_size)); 1604 1605 return (err); 1606 1607 fail: 1608 iwp_free_tx_ring(ring); 1609 1610 return (err); 1611 } 1612 1613 /* 1614 * disable TX ring 1615 */ 1616 static void 1617 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring) 1618 { 1619 iwp_tx_data_t *data; 1620 int i, n; 1621 1622 iwp_mac_access_enter(sc); 1623 1624 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0); 1625 for (n = 0; n < 200; n++) { 1626 if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) & 1627 IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) { 1628 break; 1629 } 1630 DELAY(10); 1631 } 1632 1633 #ifdef DEBUG 1634 if (200 == n) { 1635 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): " 1636 "timeout reset tx ring %d\n", 1637 ring->qid)); 1638 } 1639 #endif 1640 1641 iwp_mac_access_exit(sc); 1642 1643 /* by pass, if it's quiesce */ 1644 if (!(sc->sc_flags & IWP_F_QUIESCED)) { 1645 for (i = 0; i < ring->count; i++) { 1646 data = &ring->data[i]; 1647 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 1648 } 1649 } 1650 1651 ring->queued = 0; 1652 ring->cur = 0; 1653 ring->desc_cur = 0; 1654 } 1655 1656 static void 1657 iwp_free_tx_ring(iwp_tx_ring_t *ring) 1658 { 1659 int i; 1660 1661 if (ring->dma_desc.dma_hdl != NULL) { 1662 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1663 } 1664 iwp_free_dma_mem(&ring->dma_desc); 1665 1666 if (ring->dma_cmd.dma_hdl != NULL) { 1667 IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV); 1668 } 1669 iwp_free_dma_mem(&ring->dma_cmd); 1670 1671 if (ring->data != NULL) { 1672 for (i = 0; i < ring->count; i++) { 1673 if (ring->data[i].dma_data.dma_hdl) { 1674 IWP_DMA_SYNC(ring->data[i].dma_data, 1675 DDI_DMA_SYNC_FORDEV); 1676 } 1677 iwp_free_dma_mem(&ring->data[i].dma_data); 1678 } 1679 kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t)); 1680 } 1681 } 1682 1683 /* 1684 * initialize TX and RX ring 1685 */ 1686 static int 1687 iwp_ring_init(iwp_sc_t *sc) 1688 { 1689 int i, err = DDI_FAILURE; 1690 1691 for (i = 0; i < IWP_NUM_QUEUES; i++) { 1692 if (IWP_CMD_QUEUE_NUM == i) { 1693 continue; 1694 } 1695 1696 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS, 1697 i); 1698 if (err != DDI_SUCCESS) { 1699 goto fail; 1700 } 1701 } 1702 1703 /* 1704 * initialize command queue 1705 */ 1706 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM], 1707 TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM); 1708 if (err != DDI_SUCCESS) { 1709 goto fail; 1710 } 1711 1712 err = iwp_alloc_rx_ring(sc); 1713 if (err != DDI_SUCCESS) { 1714 goto fail; 1715 } 1716 1717 fail: 1718 return (err); 1719 } 1720 1721 static void 1722 iwp_ring_free(iwp_sc_t *sc) 1723 { 1724 int i = IWP_NUM_QUEUES; 1725 1726 iwp_free_rx_ring(sc); 1727 while (--i >= 0) { 1728 iwp_free_tx_ring(&sc->sc_txq[i]); 1729 } 1730 } 1731 1732 /* ARGSUSED */ 1733 static ieee80211_node_t * 1734 iwp_node_alloc(ieee80211com_t *ic) 1735 { 1736 iwp_amrr_t *amrr; 1737 1738 amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP); 1739 1740 iwp_amrr_init(amrr); 1741 1742 return (&amrr->in); 1743 } 1744 1745 static void 1746 iwp_node_free(ieee80211_node_t *in) 1747 { 1748 ieee80211com_t *ic; 1749 1750 if ((NULL == in) || 1751 (NULL == in->in_ic)) { 1752 cmn_err(CE_WARN, "iwp_node_free() " 1753 "Got a NULL point from Net80211 module\n"); 1754 return; 1755 } 1756 ic = in->in_ic; 1757 1758 if (ic->ic_node_cleanup != NULL) { 1759 ic->ic_node_cleanup(in); 1760 } 1761 1762 if (in->in_wpa_ie != NULL) { 1763 ieee80211_free(in->in_wpa_ie); 1764 } 1765 1766 if (in->in_wme_ie != NULL) { 1767 ieee80211_free(in->in_wme_ie); 1768 } 1769 1770 if (in->in_htcap_ie != NULL) { 1771 ieee80211_free(in->in_htcap_ie); 1772 } 1773 1774 kmem_free(in, sizeof (iwp_amrr_t)); 1775 } 1776 1777 1778 /* 1779 * change station's state. this function will be invoked by 80211 module 1780 * when need to change staton's state. 1781 */ 1782 static int 1783 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1784 { 1785 iwp_sc_t *sc; 1786 ieee80211_node_t *in; 1787 enum ieee80211_state ostate; 1788 iwp_add_sta_t node; 1789 int i, err = IWP_FAIL; 1790 1791 if (NULL == ic) { 1792 return (err); 1793 } 1794 sc = (iwp_sc_t *)ic; 1795 in = ic->ic_bss; 1796 ostate = ic->ic_state; 1797 1798 mutex_enter(&sc->sc_glock); 1799 1800 switch (nstate) { 1801 case IEEE80211_S_SCAN: 1802 switch (ostate) { 1803 case IEEE80211_S_INIT: 1804 atomic_or_32(&sc->sc_flags, IWP_F_SCANNING); 1805 iwp_set_led(sc, 2, 10, 2); 1806 1807 /* 1808 * clear association to receive beacons from 1809 * all BSS'es 1810 */ 1811 sc->sc_config.assoc_id = 0; 1812 sc->sc_config.filter_flags &= 1813 ~LE_32(RXON_FILTER_ASSOC_MSK); 1814 1815 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): " 1816 "config chan %d " 1817 "flags %x filter_flags %x\n", 1818 LE_16(sc->sc_config.chan), 1819 LE_32(sc->sc_config.flags), 1820 LE_32(sc->sc_config.filter_flags))); 1821 1822 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config, 1823 sizeof (iwp_rxon_cmd_t), 1); 1824 if (err != IWP_SUCCESS) { 1825 cmn_err(CE_WARN, "iwp_newstate(): " 1826 "could not clear association\n"); 1827 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1828 mutex_exit(&sc->sc_glock); 1829 return (err); 1830 } 1831 1832 /* add broadcast node to send probe request */ 1833 (void) memset(&node, 0, sizeof (node)); 1834 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN); 1835 node.sta.sta_id = IWP_BROADCAST_ID; 1836 err = iwp_cmd(sc, REPLY_ADD_STA, &node, 1837 sizeof (node), 1); 1838 if (err != IWP_SUCCESS) { 1839 cmn_err(CE_WARN, "iwp_newstate(): " 1840 "could not add broadcast node\n"); 1841 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1842 mutex_exit(&sc->sc_glock); 1843 return (err); 1844 } 1845 break; 1846 case IEEE80211_S_SCAN: 1847 mutex_exit(&sc->sc_glock); 1848 /* step to next channel before actual FW scan */ 1849 err = sc->sc_newstate(ic, nstate, arg); 1850 mutex_enter(&sc->sc_glock); 1851 if ((err != 0) || ((err = iwp_scan(sc)) != 0)) { 1852 cmn_err(CE_WARN, "iwp_newstate(): " 1853 "could not initiate scan\n"); 1854 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1855 ieee80211_cancel_scan(ic); 1856 } 1857 mutex_exit(&sc->sc_glock); 1858 return (err); 1859 default: 1860 break; 1861 } 1862 sc->sc_clk = 0; 1863 break; 1864 1865 case IEEE80211_S_AUTH: 1866 if (ostate == IEEE80211_S_SCAN) { 1867 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1868 } 1869 1870 /* 1871 * reset state to handle reassociations correctly 1872 */ 1873 sc->sc_config.assoc_id = 0; 1874 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK); 1875 1876 /* 1877 * before sending authentication and association request frame, 1878 * we need do something in the hardware, such as setting the 1879 * channel same to the target AP... 1880 */ 1881 if ((err = iwp_hw_set_before_auth(sc)) != 0) { 1882 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): " 1883 "could not send authentication request\n")); 1884 mutex_exit(&sc->sc_glock); 1885 return (err); 1886 } 1887 break; 1888 1889 case IEEE80211_S_RUN: 1890 if (ostate == IEEE80211_S_SCAN) { 1891 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1892 } 1893 1894 if (IEEE80211_M_MONITOR == ic->ic_opmode) { 1895 /* let LED blink when monitoring */ 1896 iwp_set_led(sc, 2, 10, 10); 1897 break; 1898 } 1899 1900 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): " 1901 "associated.\n")); 1902 1903 err = iwp_run_state_config(sc); 1904 if (err != IWP_SUCCESS) { 1905 cmn_err(CE_WARN, "iwp_newstate(): " 1906 "failed to set up association\n"); 1907 mutex_exit(&sc->sc_glock); 1908 return (err); 1909 } 1910 1911 /* 1912 * start automatic rate control 1913 */ 1914 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) { 1915 atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL); 1916 1917 /* 1918 * set rate to some reasonable initial value 1919 */ 1920 i = in->in_rates.ir_nrates - 1; 1921 while (i > 0 && IEEE80211_RATE(i) > 72) { 1922 i--; 1923 } 1924 in->in_txrate = i; 1925 1926 } else { 1927 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL); 1928 } 1929 1930 /* 1931 * set LED on after associated 1932 */ 1933 iwp_set_led(sc, 2, 0, 1); 1934 break; 1935 1936 case IEEE80211_S_INIT: 1937 if (ostate == IEEE80211_S_SCAN) { 1938 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1939 } 1940 /* 1941 * set LED off after init 1942 */ 1943 iwp_set_led(sc, 2, 1, 0); 1944 break; 1945 1946 case IEEE80211_S_ASSOC: 1947 if (ostate == IEEE80211_S_SCAN) { 1948 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 1949 } 1950 break; 1951 } 1952 1953 mutex_exit(&sc->sc_glock); 1954 1955 return (sc->sc_newstate(ic, nstate, arg)); 1956 } 1957 1958 /* 1959 * exclusive access to mac begin. 1960 */ 1961 static void 1962 iwp_mac_access_enter(iwp_sc_t *sc) 1963 { 1964 uint32_t tmp; 1965 int n; 1966 1967 tmp = IWP_READ(sc, CSR_GP_CNTRL); 1968 IWP_WRITE(sc, CSR_GP_CNTRL, 1969 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1970 1971 /* wait until we succeed */ 1972 for (n = 0; n < 1000; n++) { 1973 if ((IWP_READ(sc, CSR_GP_CNTRL) & 1974 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1975 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) == 1976 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) { 1977 break; 1978 } 1979 DELAY(10); 1980 } 1981 1982 #ifdef DEBUG 1983 if (1000 == n) { 1984 IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): " 1985 "could not lock memory\n")); 1986 } 1987 #endif 1988 } 1989 1990 /* 1991 * exclusive access to mac end. 1992 */ 1993 static void 1994 iwp_mac_access_exit(iwp_sc_t *sc) 1995 { 1996 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL); 1997 IWP_WRITE(sc, CSR_GP_CNTRL, 1998 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1999 } 2000 2001 /* 2002 * this function defined here for future use. 2003 * static uint32_t 2004 * iwp_mem_read(iwp_sc_t *sc, uint32_t addr) 2005 * { 2006 * IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr); 2007 * return (IWP_READ(sc, HBUS_TARG_MEM_RDAT)); 2008 * } 2009 */ 2010 2011 /* 2012 * write mac memory 2013 */ 2014 static void 2015 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data) 2016 { 2017 IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr); 2018 IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data); 2019 } 2020 2021 /* 2022 * read mac register 2023 */ 2024 static uint32_t 2025 iwp_reg_read(iwp_sc_t *sc, uint32_t addr) 2026 { 2027 IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24)); 2028 return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT)); 2029 } 2030 2031 /* 2032 * write mac register 2033 */ 2034 static void 2035 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data) 2036 { 2037 IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24)); 2038 IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data); 2039 } 2040 2041 2042 /* 2043 * steps of loading ucode: 2044 * load init ucode=>init alive=>calibrate=> 2045 * receive calibration result=>reinitialize NIC=> 2046 * load runtime ucode=>runtime alive=> 2047 * send calibration result=>running. 2048 */ 2049 static int 2050 iwp_load_init_firmware(iwp_sc_t *sc) 2051 { 2052 int err = IWP_FAIL; 2053 clock_t clk; 2054 2055 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG); 2056 2057 /* 2058 * load init_text section of uCode to hardware 2059 */ 2060 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address, 2061 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size); 2062 if (err != IWP_SUCCESS) { 2063 cmn_err(CE_WARN, "iwp_load_init_firmware(): " 2064 "failed to write init uCode.\n"); 2065 return (err); 2066 } 2067 2068 clk = ddi_get_lbolt() + drv_usectohz(1000000); 2069 2070 /* wait loading init_text until completed or timeout */ 2071 while (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2072 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) { 2073 break; 2074 } 2075 } 2076 2077 if (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2078 cmn_err(CE_WARN, "iwp_load_init_firmware(): " 2079 "timeout waiting for init uCode load.\n"); 2080 return (IWP_FAIL); 2081 } 2082 2083 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG); 2084 2085 /* 2086 * load init_data section of uCode to hardware 2087 */ 2088 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address, 2089 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size); 2090 if (err != IWP_SUCCESS) { 2091 cmn_err(CE_WARN, "iwp_load_init_firmware(): " 2092 "failed to write init_data uCode.\n"); 2093 return (err); 2094 } 2095 2096 clk = ddi_get_lbolt() + drv_usectohz(1000000); 2097 2098 /* 2099 * wait loading init_data until completed or timeout 2100 */ 2101 while (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2102 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) { 2103 break; 2104 } 2105 } 2106 2107 if (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2108 cmn_err(CE_WARN, "iwp_load_init_firmware(): " 2109 "timeout waiting for init_data uCode load.\n"); 2110 return (IWP_FAIL); 2111 } 2112 2113 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG); 2114 2115 return (err); 2116 } 2117 2118 static int 2119 iwp_load_run_firmware(iwp_sc_t *sc) 2120 { 2121 int err = IWP_FAIL; 2122 clock_t clk; 2123 2124 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG); 2125 2126 /* 2127 * load init_text section of uCode to hardware 2128 */ 2129 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address, 2130 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size); 2131 if (err != IWP_SUCCESS) { 2132 cmn_err(CE_WARN, "iwp_load_run_firmware(): " 2133 "failed to write run uCode.\n"); 2134 return (err); 2135 } 2136 2137 clk = ddi_get_lbolt() + drv_usectohz(1000000); 2138 2139 /* wait loading run_text until completed or timeout */ 2140 while (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2141 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) { 2142 break; 2143 } 2144 } 2145 2146 if (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2147 cmn_err(CE_WARN, "iwp_load_run_firmware(): " 2148 "timeout waiting for run uCode load.\n"); 2149 return (IWP_FAIL); 2150 } 2151 2152 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG); 2153 2154 /* 2155 * load run_data section of uCode to hardware 2156 */ 2157 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address, 2158 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size); 2159 if (err != IWP_SUCCESS) { 2160 cmn_err(CE_WARN, "iwp_load_run_firmware(): " 2161 "failed to write run_data uCode.\n"); 2162 return (err); 2163 } 2164 2165 clk = ddi_get_lbolt() + drv_usectohz(1000000); 2166 2167 /* 2168 * wait loading run_data until completed or timeout 2169 */ 2170 while (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2171 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) { 2172 break; 2173 } 2174 } 2175 2176 if (!(sc->sc_flags & IWP_F_PUT_SEG)) { 2177 cmn_err(CE_WARN, "iwp_load_run_firmware(): " 2178 "timeout waiting for run_data uCode load.\n"); 2179 return (IWP_FAIL); 2180 } 2181 2182 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG); 2183 2184 return (err); 2185 } 2186 2187 /* 2188 * this function will be invoked to receive phy information 2189 * when a frame is received. 2190 */ 2191 static void 2192 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc) 2193 { 2194 2195 sc->sc_rx_phy_res.flag = 1; 2196 2197 (void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1), 2198 sizeof (iwp_rx_phy_res_t)); 2199 } 2200 2201 /* 2202 * this function will be invoked to receive body of frame when 2203 * a frame is received. 2204 */ 2205 static void 2206 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc) 2207 { 2208 ieee80211com_t *ic = &sc->sc_ic; 2209 #ifdef DEBUG 2210 iwp_rx_ring_t *ring = &sc->sc_rxq; 2211 #endif 2212 struct ieee80211_frame *wh; 2213 struct iwp_rx_non_cfg_phy *phyinfo; 2214 struct iwp_rx_mpdu_body_size *mpdu_size; 2215 2216 mblk_t *mp; 2217 int16_t t; 2218 uint16_t len, rssi, agc; 2219 uint32_t temp, crc, *tail; 2220 uint32_t arssi, brssi, crssi, mrssi; 2221 iwp_rx_phy_res_t *stat; 2222 ieee80211_node_t *in; 2223 2224 /* 2225 * assuming not 11n here. cope with 11n in phase-II 2226 */ 2227 mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1); 2228 stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf; 2229 if (stat->cfg_phy_cnt > 20) { 2230 return; 2231 } 2232 2233 phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy; 2234 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]); 2235 agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS; 2236 2237 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]); 2238 arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS; 2239 brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS; 2240 2241 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]); 2242 crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS; 2243 2244 mrssi = MAX(arssi, brssi); 2245 mrssi = MAX(mrssi, crssi); 2246 2247 t = mrssi - agc - IWP_RSSI_OFFSET; 2248 /* 2249 * convert dBm to percentage 2250 */ 2251 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) 2252 / (75 * 75); 2253 if (rssi > 100) { 2254 rssi = 100; 2255 } 2256 if (rssi < 1) { 2257 rssi = 1; 2258 } 2259 2260 /* 2261 * size of frame, not include FCS 2262 */ 2263 len = LE_16(mpdu_size->byte_count); 2264 tail = (uint32_t *)((uint8_t *)(desc + 1) + 2265 sizeof (struct iwp_rx_mpdu_body_size) + len); 2266 bcopy(tail, &crc, 4); 2267 2268 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): " 2269 "rx intr: idx=%d phy_len=%x len=%d " 2270 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x " 2271 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat), 2272 len, stat->rate.r.s.rate, stat->channel, 2273 LE_32(stat->timestampl), stat->non_cfg_phy_cnt, 2274 stat->cfg_phy_cnt, LE_32(crc))); 2275 2276 if ((len < 16) || (len > sc->sc_dmabuf_sz)) { 2277 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): " 2278 "rx frame oversize\n")); 2279 return; 2280 } 2281 2282 /* 2283 * discard Rx frames with bad CRC 2284 */ 2285 if ((LE_32(crc) & 2286 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) != 2287 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) { 2288 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): " 2289 "rx crc error tail: %x\n", 2290 LE_32(crc))); 2291 sc->sc_rx_err++; 2292 return; 2293 } 2294 2295 wh = (struct ieee80211_frame *) 2296 ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size)); 2297 2298 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) { 2299 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2); 2300 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): " 2301 "rx : association id = %x\n", 2302 sc->sc_assoc_id)); 2303 } 2304 2305 #ifdef DEBUG 2306 if (iwp_dbg_flags & IWP_DEBUG_RX) { 2307 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0); 2308 } 2309 #endif 2310 2311 in = ieee80211_find_rxnode(ic, wh); 2312 mp = allocb(len, BPRI_MED); 2313 if (mp) { 2314 (void) memcpy(mp->b_wptr, wh, len); 2315 mp->b_wptr += len; 2316 2317 /* 2318 * send the frame to the 802.11 layer 2319 */ 2320 (void) ieee80211_input(ic, mp, in, rssi, 0); 2321 } else { 2322 sc->sc_rx_nobuf++; 2323 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): " 2324 "alloc rx buf failed\n")); 2325 } 2326 2327 /* 2328 * release node reference 2329 */ 2330 ieee80211_free_node(in); 2331 } 2332 2333 /* 2334 * process correlative affairs after a frame is sent. 2335 */ 2336 static void 2337 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc) 2338 { 2339 ieee80211com_t *ic = &sc->sc_ic; 2340 iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3]; 2341 iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1); 2342 iwp_amrr_t *amrr; 2343 2344 if (NULL == ic->ic_bss) { 2345 return; 2346 } 2347 2348 amrr = (iwp_amrr_t *)ic->ic_bss; 2349 2350 amrr->txcnt++; 2351 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): " 2352 "tx: %d cnt\n", amrr->txcnt)); 2353 2354 if (stat->ntries > 0) { 2355 amrr->retrycnt++; 2356 sc->sc_tx_retries++; 2357 IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): " 2358 "tx: %d retries\n", 2359 sc->sc_tx_retries)); 2360 } 2361 2362 mutex_enter(&sc->sc_mt_lock); 2363 sc->sc_tx_timer = 0; 2364 mutex_exit(&sc->sc_mt_lock); 2365 2366 mutex_enter(&sc->sc_tx_lock); 2367 2368 ring->queued--; 2369 if (ring->queued < 0) { 2370 ring->queued = 0; 2371 } 2372 2373 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) { 2374 sc->sc_need_reschedule = 0; 2375 mutex_exit(&sc->sc_tx_lock); 2376 mac_tx_update(ic->ic_mach); 2377 mutex_enter(&sc->sc_tx_lock); 2378 } 2379 2380 mutex_exit(&sc->sc_tx_lock); 2381 } 2382 2383 /* 2384 * inform a given command has been executed 2385 */ 2386 static void 2387 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc) 2388 { 2389 if ((desc->hdr.qid & 7) != 4) { 2390 return; 2391 } 2392 2393 if (sc->sc_cmd_accum > 0) { 2394 sc->sc_cmd_accum--; 2395 return; 2396 } 2397 2398 mutex_enter(&sc->sc_glock); 2399 2400 sc->sc_cmd_flag = SC_CMD_FLG_DONE; 2401 2402 cv_signal(&sc->sc_cmd_cv); 2403 2404 mutex_exit(&sc->sc_glock); 2405 2406 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): " 2407 "qid=%x idx=%d flags=%x type=0x%x\n", 2408 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags, 2409 desc->hdr.type)); 2410 } 2411 2412 /* 2413 * this function will be invoked when alive notification occur. 2414 */ 2415 static void 2416 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc) 2417 { 2418 uint32_t rv; 2419 struct iwp_calib_cfg_cmd cmd; 2420 struct iwp_alive_resp *ar = 2421 (struct iwp_alive_resp *)(desc + 1); 2422 struct iwp_calib_results *res_p = &sc->sc_calib_results; 2423 2424 /* 2425 * the microcontroller is ready 2426 */ 2427 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): " 2428 "microcode alive notification minor: %x major: %x type: " 2429 "%x subtype: %x\n", 2430 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype)); 2431 2432 #ifdef DEBUG 2433 if (LE_32(ar->is_valid) != UCODE_VALID_OK) { 2434 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): " 2435 "microcontroller initialization failed\n")); 2436 } 2437 #endif 2438 2439 /* 2440 * determine if init alive or runtime alive. 2441 */ 2442 if (INITIALIZE_SUBTYPE == ar->ver_subtype) { 2443 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): " 2444 "initialization alive received.\n")); 2445 2446 (void) memcpy(&sc->sc_card_alive_init, ar, 2447 sizeof (struct iwp_init_alive_resp)); 2448 2449 /* 2450 * necessary configuration to NIC 2451 */ 2452 mutex_enter(&sc->sc_glock); 2453 2454 rv = iwp_alive_common(sc); 2455 if (rv != IWP_SUCCESS) { 2456 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2457 "common alive process failed in init alive.\n"); 2458 mutex_exit(&sc->sc_glock); 2459 return; 2460 } 2461 2462 (void) memset(&cmd, 0, sizeof (cmd)); 2463 2464 cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL; 2465 cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL; 2466 cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL; 2467 cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL; 2468 2469 /* 2470 * require ucode execute calibration 2471 */ 2472 rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1); 2473 if (rv != IWP_SUCCESS) { 2474 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2475 "failed to send calibration configure command.\n"); 2476 mutex_exit(&sc->sc_glock); 2477 return; 2478 } 2479 2480 mutex_exit(&sc->sc_glock); 2481 2482 } else { /* runtime alive */ 2483 2484 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): " 2485 "runtime alive received.\n")); 2486 2487 (void) memcpy(&sc->sc_card_alive_run, ar, 2488 sizeof (struct iwp_alive_resp)); 2489 2490 mutex_enter(&sc->sc_glock); 2491 2492 /* 2493 * necessary configuration to NIC 2494 */ 2495 rv = iwp_alive_common(sc); 2496 if (rv != IWP_SUCCESS) { 2497 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2498 "common alive process failed in run alive.\n"); 2499 mutex_exit(&sc->sc_glock); 2500 return; 2501 } 2502 2503 /* 2504 * send the result of local oscilator calibration to uCode. 2505 */ 2506 if (res_p->lo_res != NULL) { 2507 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 2508 res_p->lo_res, res_p->lo_res_len, 1); 2509 if (rv != IWP_SUCCESS) { 2510 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2511 "failed to send local" 2512 "oscilator calibration command.\n"); 2513 mutex_exit(&sc->sc_glock); 2514 return; 2515 } 2516 2517 DELAY(1000); 2518 } 2519 2520 /* 2521 * send the result of TX IQ calibration to uCode. 2522 */ 2523 if (res_p->tx_iq_res != NULL) { 2524 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 2525 res_p->tx_iq_res, res_p->tx_iq_res_len, 1); 2526 if (rv != IWP_SUCCESS) { 2527 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2528 "failed to send TX IQ" 2529 "calibration command.\n"); 2530 mutex_exit(&sc->sc_glock); 2531 return; 2532 } 2533 2534 DELAY(1000); 2535 } 2536 2537 /* 2538 * send the result of TX IQ perd calibration to uCode. 2539 */ 2540 if (res_p->tx_iq_perd_res != NULL) { 2541 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 2542 res_p->tx_iq_perd_res, 2543 res_p->tx_iq_perd_res_len, 1); 2544 if (rv != IWP_SUCCESS) { 2545 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2546 "failed to send TX IQ perd" 2547 "calibration command.\n"); 2548 mutex_exit(&sc->sc_glock); 2549 return; 2550 } 2551 2552 DELAY(1000); 2553 } 2554 2555 /* 2556 * send the result of Base Band calibration to uCode. 2557 */ 2558 if (res_p->base_band_res != NULL) { 2559 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 2560 res_p->base_band_res, 2561 res_p->base_band_res_len, 1); 2562 if (rv != IWP_SUCCESS) { 2563 cmn_err(CE_WARN, "iwp_ucode_alive(): " 2564 "failed to send Base Band" 2565 "calibration command.\n"); 2566 mutex_exit(&sc->sc_glock); 2567 return; 2568 } 2569 2570 DELAY(1000); 2571 } 2572 2573 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT); 2574 cv_signal(&sc->sc_ucode_cv); 2575 2576 mutex_exit(&sc->sc_glock); 2577 } 2578 2579 } 2580 2581 /* 2582 * deal with receiving frames, command response 2583 * and all notifications from ucode. 2584 */ 2585 /* ARGSUSED */ 2586 static uint_t 2587 iwp_rx_softintr(caddr_t arg, caddr_t unused) 2588 { 2589 iwp_sc_t *sc; 2590 ieee80211com_t *ic; 2591 iwp_rx_desc_t *desc; 2592 iwp_rx_data_t *data; 2593 uint32_t index; 2594 2595 if (NULL == arg) { 2596 return (DDI_INTR_UNCLAIMED); 2597 } 2598 sc = (iwp_sc_t *)arg; 2599 ic = &sc->sc_ic; 2600 2601 /* 2602 * firmware has moved the index of the rx queue, driver get it, 2603 * and deal with it. 2604 */ 2605 index = (sc->sc_shared->val0) & 0xfff; 2606 2607 while (sc->sc_rxq.cur != index) { 2608 data = &sc->sc_rxq.data[sc->sc_rxq.cur]; 2609 desc = (iwp_rx_desc_t *)data->dma_data.mem_va; 2610 2611 IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): " 2612 "rx notification index = %d" 2613 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n", 2614 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx, 2615 desc->hdr.flags, desc->hdr.type, LE_32(desc->len))); 2616 2617 /* 2618 * a command other than a tx need to be replied 2619 */ 2620 if (!(desc->hdr.qid & 0x80) && 2621 (desc->hdr.type != REPLY_SCAN_CMD) && 2622 (desc->hdr.type != REPLY_TX)) { 2623 iwp_cmd_intr(sc, desc); 2624 } 2625 2626 switch (desc->hdr.type) { 2627 case REPLY_RX_PHY_CMD: 2628 iwp_rx_phy_intr(sc, desc); 2629 break; 2630 2631 case REPLY_RX_MPDU_CMD: 2632 iwp_rx_mpdu_intr(sc, desc); 2633 break; 2634 2635 case REPLY_TX: 2636 iwp_tx_intr(sc, desc); 2637 break; 2638 2639 case REPLY_ALIVE: 2640 iwp_ucode_alive(sc, desc); 2641 break; 2642 2643 case CARD_STATE_NOTIFICATION: 2644 { 2645 uint32_t *status = (uint32_t *)(desc + 1); 2646 2647 IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): " 2648 "state changed to %x\n", 2649 LE_32(*status))); 2650 2651 if (LE_32(*status) & 1) { 2652 /* 2653 * the radio button has to be pushed(OFF). It 2654 * is considered as a hw error, the 2655 * iwp_thread() tries to recover it after the 2656 * button is pushed again(ON) 2657 */ 2658 cmn_err(CE_NOTE, "iwp_rx_softintr(): " 2659 "radio transmitter is off\n"); 2660 sc->sc_ostate = sc->sc_ic.ic_state; 2661 ieee80211_new_state(&sc->sc_ic, 2662 IEEE80211_S_INIT, -1); 2663 atomic_or_32(&sc->sc_flags, 2664 IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF); 2665 } 2666 2667 break; 2668 } 2669 2670 case SCAN_START_NOTIFICATION: 2671 { 2672 iwp_start_scan_t *scan = 2673 (iwp_start_scan_t *)(desc + 1); 2674 2675 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): " 2676 "scanning channel %d status %x\n", 2677 scan->chan, LE_32(scan->status))); 2678 2679 ic->ic_curchan = &ic->ic_sup_channels[scan->chan]; 2680 break; 2681 } 2682 2683 case SCAN_COMPLETE_NOTIFICATION: 2684 { 2685 #ifdef DEBUG 2686 iwp_stop_scan_t *scan = 2687 (iwp_stop_scan_t *)(desc + 1); 2688 2689 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): " 2690 "completed channel %d (burst of %d) status %02x\n", 2691 scan->chan, scan->nchan, scan->status)); 2692 #endif 2693 2694 sc->sc_scan_pending++; 2695 break; 2696 } 2697 2698 case STATISTICS_NOTIFICATION: 2699 { 2700 /* 2701 * handle statistics notification 2702 */ 2703 break; 2704 } 2705 2706 case CALIBRATION_RES_NOTIFICATION: 2707 iwp_save_calib_result(sc, desc); 2708 break; 2709 2710 case CALIBRATION_COMPLETE_NOTIFICATION: 2711 mutex_enter(&sc->sc_glock); 2712 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT); 2713 cv_signal(&sc->sc_ucode_cv); 2714 mutex_exit(&sc->sc_glock); 2715 break; 2716 2717 case MISSED_BEACONS_NOTIFICATION: 2718 { 2719 struct iwp_beacon_missed *miss = 2720 (struct iwp_beacon_missed *)(desc + 1); 2721 2722 if ((ic->ic_state == IEEE80211_S_RUN) && 2723 (LE_32(miss->consecutive) > 50)) { 2724 cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): " 2725 "beacon missed %d/%d\n", 2726 LE_32(miss->consecutive), 2727 LE_32(miss->total)); 2728 (void) ieee80211_new_state(ic, 2729 IEEE80211_S_INIT, -1); 2730 } 2731 break; 2732 } 2733 } 2734 2735 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE; 2736 } 2737 2738 /* 2739 * driver dealt with what received in rx queue and tell the information 2740 * to the firmware. 2741 */ 2742 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1; 2743 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7)); 2744 2745 /* 2746 * re-enable interrupts 2747 */ 2748 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2749 2750 return (DDI_INTR_CLAIMED); 2751 } 2752 2753 /* 2754 * the handle of interrupt 2755 */ 2756 /* ARGSUSED */ 2757 static uint_t 2758 iwp_intr(caddr_t arg, caddr_t unused) 2759 { 2760 iwp_sc_t *sc; 2761 uint32_t r, rfh; 2762 2763 if (NULL == arg) { 2764 return (DDI_INTR_UNCLAIMED); 2765 } 2766 sc = (iwp_sc_t *)arg; 2767 2768 r = IWP_READ(sc, CSR_INT); 2769 if (0 == r || 0xffffffff == r) { 2770 return (DDI_INTR_UNCLAIMED); 2771 } 2772 2773 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): " 2774 "interrupt reg %x\n", r)); 2775 2776 rfh = IWP_READ(sc, CSR_FH_INT_STATUS); 2777 2778 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): " 2779 "FH interrupt reg %x\n", rfh)); 2780 2781 /* 2782 * disable interrupts 2783 */ 2784 IWP_WRITE(sc, CSR_INT_MASK, 0); 2785 2786 /* 2787 * ack interrupts 2788 */ 2789 IWP_WRITE(sc, CSR_INT, r); 2790 IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh); 2791 2792 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) { 2793 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): " 2794 "fatal firmware error\n")); 2795 iwp_stop(sc); 2796 sc->sc_ostate = sc->sc_ic.ic_state; 2797 2798 /* notify upper layer */ 2799 if (!IWP_CHK_FAST_RECOVER(sc)) { 2800 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 2801 } 2802 2803 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER); 2804 return (DDI_INTR_CLAIMED); 2805 } 2806 2807 if (r & BIT_INT_RF_KILL) { 2808 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL); 2809 if (tmp & (1 << 27)) { 2810 cmn_err(CE_NOTE, "RF switch: radio on\n"); 2811 } 2812 } 2813 2814 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) || 2815 (rfh & FH_INT_RX_MASK)) { 2816 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL); 2817 return (DDI_INTR_CLAIMED); 2818 } 2819 2820 if (r & BIT_INT_FH_TX) { 2821 mutex_enter(&sc->sc_glock); 2822 atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG); 2823 cv_signal(&sc->sc_put_seg_cv); 2824 mutex_exit(&sc->sc_glock); 2825 } 2826 2827 #ifdef DEBUG 2828 if (r & BIT_INT_ALIVE) { 2829 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): " 2830 "firmware initialized.\n")); 2831 } 2832 #endif 2833 2834 /* 2835 * re-enable interrupts 2836 */ 2837 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2838 2839 return (DDI_INTR_CLAIMED); 2840 } 2841 2842 static uint8_t 2843 iwp_rate_to_plcp(int rate) 2844 { 2845 uint8_t ret; 2846 2847 switch (rate) { 2848 /* 2849 * CCK rates 2850 */ 2851 case 2: 2852 ret = 0xa; 2853 break; 2854 2855 case 4: 2856 ret = 0x14; 2857 break; 2858 2859 case 11: 2860 ret = 0x37; 2861 break; 2862 2863 case 22: 2864 ret = 0x6e; 2865 break; 2866 2867 /* 2868 * OFDM rates 2869 */ 2870 case 12: 2871 ret = 0xd; 2872 break; 2873 2874 case 18: 2875 ret = 0xf; 2876 break; 2877 2878 case 24: 2879 ret = 0x5; 2880 break; 2881 2882 case 36: 2883 ret = 0x7; 2884 break; 2885 2886 case 48: 2887 ret = 0x9; 2888 break; 2889 2890 case 72: 2891 ret = 0xb; 2892 break; 2893 2894 case 96: 2895 ret = 0x1; 2896 break; 2897 2898 case 108: 2899 ret = 0x3; 2900 break; 2901 2902 default: 2903 ret = 0; 2904 break; 2905 } 2906 2907 return (ret); 2908 } 2909 2910 /* 2911 * invoked by GLD send frames 2912 */ 2913 static mblk_t * 2914 iwp_m_tx(void *arg, mblk_t *mp) 2915 { 2916 iwp_sc_t *sc; 2917 ieee80211com_t *ic; 2918 mblk_t *next; 2919 2920 if (NULL == arg) { 2921 return (NULL); 2922 } 2923 sc = (iwp_sc_t *)arg; 2924 ic = &sc->sc_ic; 2925 2926 if (sc->sc_flags & IWP_F_SUSPEND) { 2927 freemsgchain(mp); 2928 return (NULL); 2929 } 2930 2931 if (ic->ic_state != IEEE80211_S_RUN) { 2932 freemsgchain(mp); 2933 return (NULL); 2934 } 2935 2936 if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) && 2937 IWP_CHK_FAST_RECOVER(sc)) { 2938 IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): " 2939 "hold queue\n")); 2940 return (mp); 2941 } 2942 2943 2944 while (mp != NULL) { 2945 next = mp->b_next; 2946 mp->b_next = NULL; 2947 if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) { 2948 mp->b_next = next; 2949 break; 2950 } 2951 mp = next; 2952 } 2953 2954 return (mp); 2955 } 2956 2957 /* 2958 * send frames 2959 */ 2960 static int 2961 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type) 2962 { 2963 iwp_sc_t *sc; 2964 iwp_tx_ring_t *ring; 2965 iwp_tx_desc_t *desc; 2966 iwp_tx_data_t *data; 2967 iwp_tx_data_t *desc_data; 2968 iwp_cmd_t *cmd; 2969 iwp_tx_cmd_t *tx; 2970 ieee80211_node_t *in; 2971 struct ieee80211_frame *wh; 2972 struct ieee80211_key *k = NULL; 2973 mblk_t *m, *m0; 2974 int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS; 2975 uint16_t masks = 0; 2976 uint32_t rate, s_id = 0; 2977 2978 if (NULL == ic) { 2979 return (IWP_FAIL); 2980 } 2981 sc = (iwp_sc_t *)ic; 2982 2983 if (sc->sc_flags & IWP_F_SUSPEND) { 2984 if ((type & IEEE80211_FC0_TYPE_MASK) != 2985 IEEE80211_FC0_TYPE_DATA) { 2986 freemsg(mp); 2987 } 2988 err = IWP_FAIL; 2989 goto exit; 2990 } 2991 2992 mutex_enter(&sc->sc_tx_lock); 2993 ring = &sc->sc_txq[0]; 2994 data = &ring->data[ring->cur]; 2995 cmd = data->cmd; 2996 bzero(cmd, sizeof (*cmd)); 2997 2998 ring->cur = (ring->cur + 1) % ring->count; 2999 3000 /* 3001 * Need reschedule TX if TX buffer is full. 3002 */ 3003 if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) { 3004 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): " 3005 "no txbuf\n")); 3006 3007 sc->sc_need_reschedule = 1; 3008 mutex_exit(&sc->sc_tx_lock); 3009 3010 if ((type & IEEE80211_FC0_TYPE_MASK) != 3011 IEEE80211_FC0_TYPE_DATA) { 3012 freemsg(mp); 3013 } 3014 sc->sc_tx_nobuf++; 3015 err = IWP_FAIL; 3016 goto exit; 3017 } 3018 3019 ring->queued++; 3020 3021 mutex_exit(&sc->sc_tx_lock); 3022 3023 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr); 3024 3025 m = allocb(msgdsize(mp) + 32, BPRI_MED); 3026 if (NULL == m) { /* can not alloc buf, drop this package */ 3027 cmn_err(CE_WARN, "iwp_send(): " 3028 "failed to allocate msgbuf\n"); 3029 freemsg(mp); 3030 3031 mutex_enter(&sc->sc_tx_lock); 3032 ring->queued--; 3033 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) { 3034 sc->sc_need_reschedule = 0; 3035 mutex_exit(&sc->sc_tx_lock); 3036 mac_tx_update(ic->ic_mach); 3037 mutex_enter(&sc->sc_tx_lock); 3038 } 3039 mutex_exit(&sc->sc_tx_lock); 3040 3041 err = IWP_SUCCESS; 3042 goto exit; 3043 } 3044 3045 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) { 3046 mblen = MBLKL(m0); 3047 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen); 3048 off += mblen; 3049 } 3050 3051 m->b_wptr += off; 3052 3053 wh = (struct ieee80211_frame *)m->b_rptr; 3054 3055 /* 3056 * determine send which AP or station in IBSS 3057 */ 3058 in = ieee80211_find_txnode(ic, wh->i_addr1); 3059 if (NULL == in) { 3060 cmn_err(CE_WARN, "iwp_send(): " 3061 "failed to find tx node\n"); 3062 freemsg(mp); 3063 freemsg(m); 3064 sc->sc_tx_err++; 3065 3066 mutex_enter(&sc->sc_tx_lock); 3067 ring->queued--; 3068 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) { 3069 sc->sc_need_reschedule = 0; 3070 mutex_exit(&sc->sc_tx_lock); 3071 mac_tx_update(ic->ic_mach); 3072 mutex_enter(&sc->sc_tx_lock); 3073 } 3074 mutex_exit(&sc->sc_tx_lock); 3075 3076 err = IWP_SUCCESS; 3077 goto exit; 3078 } 3079 3080 /* 3081 * Net80211 module encapsulate outbound data frames. 3082 * Add some feilds of 80211 frame. 3083 */ 3084 if ((type & IEEE80211_FC0_TYPE_MASK) == 3085 IEEE80211_FC0_TYPE_DATA) { 3086 (void) ieee80211_encap(ic, m, in); 3087 } 3088 3089 freemsg(mp); 3090 3091 cmd->hdr.type = REPLY_TX; 3092 cmd->hdr.flags = 0; 3093 cmd->hdr.qid = ring->qid; 3094 3095 tx = (iwp_tx_cmd_t *)cmd->data; 3096 tx->tx_flags = 0; 3097 3098 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3099 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK)); 3100 } else { 3101 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 3102 } 3103 3104 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 3105 k = ieee80211_crypto_encap(ic, m); 3106 if (NULL == k) { 3107 freemsg(m); 3108 sc->sc_tx_err++; 3109 3110 mutex_enter(&sc->sc_tx_lock); 3111 ring->queued--; 3112 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) { 3113 sc->sc_need_reschedule = 0; 3114 mutex_exit(&sc->sc_tx_lock); 3115 mac_tx_update(ic->ic_mach); 3116 mutex_enter(&sc->sc_tx_lock); 3117 } 3118 mutex_exit(&sc->sc_tx_lock); 3119 3120 err = IWP_SUCCESS; 3121 goto exit; 3122 } 3123 3124 /* packet header may have moved, reset our local pointer */ 3125 wh = (struct ieee80211_frame *)m->b_rptr; 3126 } 3127 3128 len = msgdsize(m); 3129 3130 #ifdef DEBUG 3131 if (iwp_dbg_flags & IWP_DEBUG_TX) { 3132 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0); 3133 } 3134 #endif 3135 3136 tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT; 3137 tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT; 3138 3139 /* 3140 * specific TX parameters for management frames 3141 */ 3142 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 3143 IEEE80211_FC0_TYPE_MGT) { 3144 /* 3145 * mgmt frames are sent at 1M 3146 */ 3147 if ((in->in_rates.ir_rates[0] & 3148 IEEE80211_RATE_VAL) != 0) { 3149 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL; 3150 } else { 3151 rate = 2; 3152 } 3153 3154 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK); 3155 3156 /* 3157 * tell h/w to set timestamp in probe responses 3158 */ 3159 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 3160 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 3161 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK); 3162 3163 tx->data_retry_limit = 3; 3164 if (tx->data_retry_limit < tx->rts_retry_limit) { 3165 tx->rts_retry_limit = tx->data_retry_limit; 3166 } 3167 } 3168 3169 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 3170 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) || 3171 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 3172 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) { 3173 tx->timeout.pm_frame_timeout = LE_16(3); 3174 } else { 3175 tx->timeout.pm_frame_timeout = LE_16(2); 3176 } 3177 3178 } else { 3179 /* 3180 * do it here for the software way rate scaling. 3181 * later for rate scaling in hardware. 3182 * 3183 * now the txrate is determined in tx cmd flags, set to the 3184 * max value 54M for 11g and 11M for 11b originally. 3185 */ 3186 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) { 3187 rate = ic->ic_fixed_rate; 3188 } else { 3189 if ((in->in_rates.ir_rates[in->in_txrate] & 3190 IEEE80211_RATE_VAL) != 0) { 3191 rate = in->in_rates. 3192 ir_rates[in->in_txrate] & 3193 IEEE80211_RATE_VAL; 3194 } 3195 } 3196 3197 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK); 3198 3199 tx->timeout.pm_frame_timeout = 0; 3200 } 3201 3202 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): " 3203 "tx rate[%d of %d] = %x", 3204 in->in_txrate, in->in_rates.ir_nrates, rate)); 3205 3206 len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4); 3207 if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) { 3208 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK); 3209 } 3210 3211 /* 3212 * retrieve destination node's id 3213 */ 3214 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3215 tx->sta_id = IWP_BROADCAST_ID; 3216 } else { 3217 tx->sta_id = IWP_AP_ID; 3218 } 3219 3220 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) { 3221 masks |= RATE_MCS_CCK_MSK; 3222 } 3223 3224 masks |= RATE_MCS_ANT_B_MSK; 3225 tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks); 3226 3227 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): " 3228 "tx flag = %x", 3229 tx->tx_flags)); 3230 3231 tx->stop_time.life_time = LE_32(0xffffffff); 3232 3233 tx->len = LE_16(len); 3234 3235 tx->dram_lsb_ptr = 3236 LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch)); 3237 tx->dram_msb_ptr = 0; 3238 tx->driver_txop = 0; 3239 tx->next_frame_len = 0; 3240 3241 (void) memcpy(tx + 1, m->b_rptr, hdrlen); 3242 m->b_rptr += hdrlen; 3243 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen); 3244 3245 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): " 3246 "sending data: qid=%d idx=%d len=%d", 3247 ring->qid, ring->cur, len)); 3248 3249 /* 3250 * first segment includes the tx cmd plus the 802.11 header, 3251 * the second includes the remaining of the 802.11 frame. 3252 */ 3253 mutex_enter(&sc->sc_tx_lock); 3254 3255 cmd->hdr.idx = ring->desc_cur; 3256 3257 desc_data = &ring->data[ring->desc_cur]; 3258 desc = desc_data->desc; 3259 bzero(desc, sizeof (*desc)); 3260 desc->val0 = 2 << 24; 3261 desc->pa[0].tb1_addr = data->paddr_cmd; 3262 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) | 3263 ((data->dma_data.cookie.dmac_address & 0xffff) << 16); 3264 desc->pa[0].val2 = 3265 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) | 3266 ((len - hdrlen) << 20); 3267 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): " 3268 "phy addr1 = 0x%x phy addr2 = 0x%x " 3269 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x", 3270 data->paddr_cmd, data->dma_data.cookie.dmac_address, 3271 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2)); 3272 3273 /* 3274 * kick ring 3275 */ 3276 s_id = tx->sta_id; 3277 3278 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3279 tfd_offset[ring->desc_cur].val = 3280 (8 + len) | (s_id << 12); 3281 if (ring->desc_cur < IWP_MAX_WIN_SIZE) { 3282 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3283 tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val = 3284 (8 + len) | (s_id << 12); 3285 } 3286 3287 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 3288 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 3289 3290 ring->desc_cur = (ring->desc_cur + 1) % ring->count; 3291 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur); 3292 3293 mutex_exit(&sc->sc_tx_lock); 3294 freemsg(m); 3295 3296 /* 3297 * release node reference 3298 */ 3299 ieee80211_free_node(in); 3300 3301 ic->ic_stats.is_tx_bytes += len; 3302 ic->ic_stats.is_tx_frags++; 3303 3304 mutex_enter(&sc->sc_mt_lock); 3305 if (0 == sc->sc_tx_timer) { 3306 sc->sc_tx_timer = 4; 3307 } 3308 mutex_exit(&sc->sc_mt_lock); 3309 3310 exit: 3311 return (err); 3312 } 3313 3314 /* 3315 * invoked by GLD to deal with IOCTL affaires 3316 */ 3317 static void 3318 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp) 3319 { 3320 iwp_sc_t *sc; 3321 ieee80211com_t *ic; 3322 int err = EINVAL; 3323 3324 if (NULL == arg) { 3325 return; 3326 } 3327 sc = (iwp_sc_t *)arg; 3328 ic = &sc->sc_ic; 3329 3330 err = ieee80211_ioctl(ic, wq, mp); 3331 if (ENETRESET == err) { 3332 /* 3333 * This is special for the hidden AP connection. 3334 * In any case, we should make sure only one 'scan' 3335 * in the driver for a 'connect' CLI command. So 3336 * when connecting to a hidden AP, the scan is just 3337 * sent out to the air when we know the desired 3338 * essid of the AP we want to connect. 3339 */ 3340 if (ic->ic_des_esslen) { 3341 if (sc->sc_flags & IWP_F_RUNNING) { 3342 iwp_m_stop(sc); 3343 (void) iwp_m_start(sc); 3344 (void) ieee80211_new_state(ic, 3345 IEEE80211_S_SCAN, -1); 3346 } 3347 } 3348 } 3349 } 3350 3351 /* 3352 * Call back functions for get/set proporty 3353 */ 3354 static int 3355 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 3356 uint_t wldp_length, void *wldp_buf) 3357 { 3358 iwp_sc_t *sc; 3359 int err = EINVAL; 3360 3361 if (NULL == arg) { 3362 return (EINVAL); 3363 } 3364 sc = (iwp_sc_t *)arg; 3365 3366 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num, 3367 wldp_length, wldp_buf); 3368 3369 return (err); 3370 } 3371 3372 static void 3373 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 3374 mac_prop_info_handle_t prh) 3375 { 3376 iwp_sc_t *sc; 3377 3378 sc = (iwp_sc_t *)arg; 3379 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh); 3380 } 3381 3382 static int 3383 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 3384 uint_t wldp_length, const void *wldp_buf) 3385 { 3386 iwp_sc_t *sc; 3387 ieee80211com_t *ic; 3388 int err = EINVAL; 3389 3390 if (NULL == arg) { 3391 return (EINVAL); 3392 } 3393 sc = (iwp_sc_t *)arg; 3394 ic = &sc->sc_ic; 3395 3396 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length, 3397 wldp_buf); 3398 3399 if (err == ENETRESET) { 3400 if (ic->ic_des_esslen) { 3401 if (sc->sc_flags & IWP_F_RUNNING) { 3402 iwp_m_stop(sc); 3403 (void) iwp_m_start(sc); 3404 (void) ieee80211_new_state(ic, 3405 IEEE80211_S_SCAN, -1); 3406 } 3407 } 3408 err = 0; 3409 } 3410 return (err); 3411 } 3412 3413 /* 3414 * invoked by GLD supply statistics NIC and driver 3415 */ 3416 static int 3417 iwp_m_stat(void *arg, uint_t stat, uint64_t *val) 3418 { 3419 iwp_sc_t *sc; 3420 ieee80211com_t *ic; 3421 ieee80211_node_t *in; 3422 3423 if (NULL == arg) { 3424 return (EINVAL); 3425 } 3426 sc = (iwp_sc_t *)arg; 3427 ic = &sc->sc_ic; 3428 3429 mutex_enter(&sc->sc_glock); 3430 3431 switch (stat) { 3432 case MAC_STAT_IFSPEED: 3433 in = ic->ic_bss; 3434 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ? 3435 IEEE80211_RATE(in->in_txrate) : 3436 ic->ic_fixed_rate) / 2 * 1000000; 3437 break; 3438 case MAC_STAT_NOXMTBUF: 3439 *val = sc->sc_tx_nobuf; 3440 break; 3441 case MAC_STAT_NORCVBUF: 3442 *val = sc->sc_rx_nobuf; 3443 break; 3444 case MAC_STAT_IERRORS: 3445 *val = sc->sc_rx_err; 3446 break; 3447 case MAC_STAT_RBYTES: 3448 *val = ic->ic_stats.is_rx_bytes; 3449 break; 3450 case MAC_STAT_IPACKETS: 3451 *val = ic->ic_stats.is_rx_frags; 3452 break; 3453 case MAC_STAT_OBYTES: 3454 *val = ic->ic_stats.is_tx_bytes; 3455 break; 3456 case MAC_STAT_OPACKETS: 3457 *val = ic->ic_stats.is_tx_frags; 3458 break; 3459 case MAC_STAT_OERRORS: 3460 case WIFI_STAT_TX_FAILED: 3461 *val = sc->sc_tx_err; 3462 break; 3463 case WIFI_STAT_TX_RETRANS: 3464 *val = sc->sc_tx_retries; 3465 break; 3466 case WIFI_STAT_FCS_ERRORS: 3467 case WIFI_STAT_WEP_ERRORS: 3468 case WIFI_STAT_TX_FRAGS: 3469 case WIFI_STAT_MCAST_TX: 3470 case WIFI_STAT_RTS_SUCCESS: 3471 case WIFI_STAT_RTS_FAILURE: 3472 case WIFI_STAT_ACK_FAILURE: 3473 case WIFI_STAT_RX_FRAGS: 3474 case WIFI_STAT_MCAST_RX: 3475 case WIFI_STAT_RX_DUPS: 3476 mutex_exit(&sc->sc_glock); 3477 return (ieee80211_stat(ic, stat, val)); 3478 default: 3479 mutex_exit(&sc->sc_glock); 3480 return (ENOTSUP); 3481 } 3482 3483 mutex_exit(&sc->sc_glock); 3484 3485 return (IWP_SUCCESS); 3486 3487 } 3488 3489 /* 3490 * invoked by GLD to start or open NIC 3491 */ 3492 static int 3493 iwp_m_start(void *arg) 3494 { 3495 iwp_sc_t *sc; 3496 ieee80211com_t *ic; 3497 int err = IWP_FAIL; 3498 3499 if (NULL == arg) { 3500 return (EINVAL); 3501 } 3502 sc = (iwp_sc_t *)arg; 3503 ic = &sc->sc_ic; 3504 3505 err = iwp_init(sc); 3506 if (err != IWP_SUCCESS) { 3507 /* 3508 * The hw init err(eg. RF is OFF). Return Success to make 3509 * the 'plumb' succeed. The iwp_thread() tries to re-init 3510 * background. 3511 */ 3512 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER); 3513 return (IWP_SUCCESS); 3514 } 3515 3516 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3517 3518 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING); 3519 3520 return (IWP_SUCCESS); 3521 } 3522 3523 /* 3524 * invoked by GLD to stop or down NIC 3525 */ 3526 static void 3527 iwp_m_stop(void *arg) 3528 { 3529 iwp_sc_t *sc; 3530 ieee80211com_t *ic; 3531 3532 if (NULL == arg) { 3533 return; 3534 } 3535 sc = (iwp_sc_t *)arg; 3536 ic = &sc->sc_ic; 3537 3538 iwp_stop(sc); 3539 3540 /* 3541 * release buffer for calibration 3542 */ 3543 iwp_release_calib_buffer(sc); 3544 3545 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3546 3547 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER); 3548 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL); 3549 3550 atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING); 3551 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING); 3552 } 3553 3554 /* 3555 * invoked by GLD to configure NIC 3556 */ 3557 static int 3558 iwp_m_unicst(void *arg, const uint8_t *macaddr) 3559 { 3560 iwp_sc_t *sc; 3561 ieee80211com_t *ic; 3562 int err = IWP_SUCCESS; 3563 3564 if (NULL == arg) { 3565 return (EINVAL); 3566 } 3567 sc = (iwp_sc_t *)arg; 3568 ic = &sc->sc_ic; 3569 3570 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) { 3571 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr); 3572 mutex_enter(&sc->sc_glock); 3573 err = iwp_config(sc); 3574 mutex_exit(&sc->sc_glock); 3575 if (err != IWP_SUCCESS) { 3576 cmn_err(CE_WARN, "iwp_m_unicst(): " 3577 "failed to configure device\n"); 3578 goto fail; 3579 } 3580 } 3581 3582 return (err); 3583 3584 fail: 3585 return (err); 3586 } 3587 3588 /* ARGSUSED */ 3589 static int 3590 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m) 3591 { 3592 return (IWP_SUCCESS); 3593 } 3594 3595 /* ARGSUSED */ 3596 static int 3597 iwp_m_promisc(void *arg, boolean_t on) 3598 { 3599 return (IWP_SUCCESS); 3600 } 3601 3602 /* 3603 * kernel thread to deal with exceptional situation 3604 */ 3605 static void 3606 iwp_thread(iwp_sc_t *sc) 3607 { 3608 ieee80211com_t *ic = &sc->sc_ic; 3609 clock_t clk; 3610 int err, n = 0, timeout = 0; 3611 uint32_t tmp; 3612 #ifdef DEBUG 3613 int times = 0; 3614 #endif 3615 3616 while (sc->sc_mf_thread_switch) { 3617 tmp = IWP_READ(sc, CSR_GP_CNTRL); 3618 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) { 3619 atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF); 3620 } else { 3621 atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF); 3622 } 3623 3624 /* 3625 * If in SUSPEND or the RF is OFF, do nothing. 3626 */ 3627 if (sc->sc_flags & IWP_F_RADIO_OFF) { 3628 delay(drv_usectohz(100000)); 3629 continue; 3630 } 3631 3632 /* 3633 * recovery fatal error 3634 */ 3635 if (ic->ic_mach && 3636 (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) { 3637 3638 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): " 3639 "try to recover fatal hw error: %d\n", times++)); 3640 3641 iwp_stop(sc); 3642 3643 if (IWP_CHK_FAST_RECOVER(sc)) { 3644 /* save runtime configuration */ 3645 bcopy(&sc->sc_config, &sc->sc_config_save, 3646 sizeof (sc->sc_config)); 3647 } else { 3648 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3649 delay(drv_usectohz(2000000 + n*500000)); 3650 } 3651 3652 err = iwp_init(sc); 3653 if (err != IWP_SUCCESS) { 3654 n++; 3655 if (n < 20) { 3656 continue; 3657 } 3658 } 3659 3660 n = 0; 3661 if (!err) { 3662 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING); 3663 } 3664 3665 3666 if (!IWP_CHK_FAST_RECOVER(sc) || 3667 iwp_fast_recover(sc) != IWP_SUCCESS) { 3668 atomic_and_32(&sc->sc_flags, 3669 ~IWP_F_HW_ERR_RECOVER); 3670 3671 delay(drv_usectohz(2000000)); 3672 if (sc->sc_ostate != IEEE80211_S_INIT) { 3673 ieee80211_new_state(ic, 3674 IEEE80211_S_SCAN, 0); 3675 } 3676 } 3677 } 3678 3679 if (ic->ic_mach && 3680 (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) { 3681 IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): " 3682 "wait for probe response\n")); 3683 3684 sc->sc_scan_pending--; 3685 delay(drv_usectohz(200000)); 3686 ieee80211_next_scan(ic); 3687 } 3688 3689 /* 3690 * rate ctl 3691 */ 3692 if (ic->ic_mach && 3693 (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) { 3694 clk = ddi_get_lbolt(); 3695 if (clk > sc->sc_clk + drv_usectohz(1000000)) { 3696 iwp_amrr_timeout(sc); 3697 } 3698 } 3699 3700 delay(drv_usectohz(100000)); 3701 3702 mutex_enter(&sc->sc_mt_lock); 3703 if (sc->sc_tx_timer) { 3704 timeout++; 3705 if (10 == timeout) { 3706 sc->sc_tx_timer--; 3707 if (0 == sc->sc_tx_timer) { 3708 atomic_or_32(&sc->sc_flags, 3709 IWP_F_HW_ERR_RECOVER); 3710 sc->sc_ostate = IEEE80211_S_RUN; 3711 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): " 3712 "try to recover from " 3713 "send fail\n")); 3714 } 3715 timeout = 0; 3716 } 3717 } 3718 mutex_exit(&sc->sc_mt_lock); 3719 } 3720 3721 mutex_enter(&sc->sc_mt_lock); 3722 sc->sc_mf_thread = NULL; 3723 cv_signal(&sc->sc_mt_cv); 3724 mutex_exit(&sc->sc_mt_lock); 3725 } 3726 3727 3728 /* 3729 * Send a command to the ucode. 3730 */ 3731 static int 3732 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async) 3733 { 3734 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM]; 3735 iwp_tx_desc_t *desc; 3736 iwp_cmd_t *cmd; 3737 3738 ASSERT(size <= sizeof (cmd->data)); 3739 ASSERT(mutex_owned(&sc->sc_glock)); 3740 3741 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() " 3742 "code[%d]", code)); 3743 desc = ring->data[ring->cur].desc; 3744 cmd = ring->data[ring->cur].cmd; 3745 3746 cmd->hdr.type = (uint8_t)code; 3747 cmd->hdr.flags = 0; 3748 cmd->hdr.qid = ring->qid; 3749 cmd->hdr.idx = ring->cur; 3750 (void) memcpy(cmd->data, buf, size); 3751 (void) memset(desc, 0, sizeof (*desc)); 3752 3753 desc->val0 = 1 << 24; 3754 desc->pa[0].tb1_addr = 3755 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff); 3756 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0; 3757 3758 if (async) { 3759 sc->sc_cmd_accum++; 3760 } 3761 3762 /* 3763 * kick cmd ring XXX 3764 */ 3765 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3766 tfd_offset[ring->cur].val = 8; 3767 if (ring->cur < IWP_MAX_WIN_SIZE) { 3768 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3769 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8; 3770 } 3771 ring->cur = (ring->cur + 1) % ring->count; 3772 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3773 3774 if (async) { 3775 return (IWP_SUCCESS); 3776 } else { 3777 clock_t clk; 3778 3779 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3780 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) { 3781 if (cv_timedwait(&sc->sc_cmd_cv, 3782 &sc->sc_glock, clk) < 0) { 3783 break; 3784 } 3785 } 3786 3787 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) { 3788 sc->sc_cmd_flag = SC_CMD_FLG_NONE; 3789 return (IWP_SUCCESS); 3790 } else { 3791 sc->sc_cmd_flag = SC_CMD_FLG_NONE; 3792 return (IWP_FAIL); 3793 } 3794 } 3795 } 3796 3797 /* 3798 * require ucode seting led of NIC 3799 */ 3800 static void 3801 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on) 3802 { 3803 iwp_led_cmd_t led; 3804 3805 led.interval = LE_32(100000); /* unit: 100ms */ 3806 led.id = id; 3807 led.off = off; 3808 led.on = on; 3809 3810 (void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1); 3811 } 3812 3813 /* 3814 * necessary setting to NIC before authentication 3815 */ 3816 static int 3817 iwp_hw_set_before_auth(iwp_sc_t *sc) 3818 { 3819 ieee80211com_t *ic = &sc->sc_ic; 3820 ieee80211_node_t *in = ic->ic_bss; 3821 int err = IWP_FAIL; 3822 3823 /* 3824 * update adapter's configuration according 3825 * the info of target AP 3826 */ 3827 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid); 3828 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan)); 3829 3830 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0; 3831 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0; 3832 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0; 3833 3834 if (IEEE80211_MODE_11B == ic->ic_curmode) { 3835 sc->sc_config.cck_basic_rates = 0x03; 3836 sc->sc_config.ofdm_basic_rates = 0; 3837 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) && 3838 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) { 3839 sc->sc_config.cck_basic_rates = 0; 3840 sc->sc_config.ofdm_basic_rates = 0x15; 3841 } else { /* assume 802.11b/g */ 3842 sc->sc_config.cck_basic_rates = 0x0f; 3843 sc->sc_config.ofdm_basic_rates = 0xff; 3844 } 3845 3846 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 3847 RXON_FLG_SHORT_SLOT_MSK); 3848 3849 if (ic->ic_flags & IEEE80211_F_SHSLOT) { 3850 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK); 3851 } else { 3852 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK); 3853 } 3854 3855 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 3856 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 3857 } else { 3858 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK); 3859 } 3860 3861 IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): " 3862 "config chan %d flags %x " 3863 "filter_flags %x cck %x ofdm %x" 3864 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n", 3865 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags), 3866 LE_32(sc->sc_config.filter_flags), 3867 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates, 3868 sc->sc_config.bssid[0], sc->sc_config.bssid[1], 3869 sc->sc_config.bssid[2], sc->sc_config.bssid[3], 3870 sc->sc_config.bssid[4], sc->sc_config.bssid[5])); 3871 3872 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config, 3873 sizeof (iwp_rxon_cmd_t), 1); 3874 if (err != IWP_SUCCESS) { 3875 cmn_err(CE_WARN, "iwp_hw_set_before_auth(): " 3876 "failed to config chan%d\n", sc->sc_config.chan); 3877 return (err); 3878 } 3879 3880 /* 3881 * add default AP node 3882 */ 3883 err = iwp_add_ap_sta(sc); 3884 if (err != IWP_SUCCESS) { 3885 return (err); 3886 } 3887 3888 3889 return (err); 3890 } 3891 3892 /* 3893 * Send a scan request(assembly scan cmd) to the firmware. 3894 */ 3895 static int 3896 iwp_scan(iwp_sc_t *sc) 3897 { 3898 ieee80211com_t *ic = &sc->sc_ic; 3899 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM]; 3900 iwp_tx_desc_t *desc; 3901 iwp_tx_data_t *data; 3902 iwp_cmd_t *cmd; 3903 iwp_scan_hdr_t *hdr; 3904 iwp_scan_chan_t chan; 3905 struct ieee80211_frame *wh; 3906 ieee80211_node_t *in = ic->ic_bss; 3907 uint8_t essid[IEEE80211_NWID_LEN+1]; 3908 struct ieee80211_rateset *rs; 3909 enum ieee80211_phymode mode; 3910 uint8_t *frm; 3911 int i, pktlen, nrates; 3912 3913 data = &ring->data[ring->cur]; 3914 desc = data->desc; 3915 cmd = (iwp_cmd_t *)data->dma_data.mem_va; 3916 3917 cmd->hdr.type = REPLY_SCAN_CMD; 3918 cmd->hdr.flags = 0; 3919 cmd->hdr.qid = ring->qid; 3920 cmd->hdr.idx = ring->cur | 0x40; 3921 3922 hdr = (iwp_scan_hdr_t *)cmd->data; 3923 (void) memset(hdr, 0, sizeof (iwp_scan_hdr_t)); 3924 hdr->nchan = 1; 3925 hdr->quiet_time = LE_16(50); 3926 hdr->quiet_plcp_th = LE_16(1); 3927 3928 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK); 3929 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3930 (0x7 << RXON_RX_CHAIN_VALID_POS) | 3931 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3932 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3933 3934 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK); 3935 hdr->tx_cmd.sta_id = IWP_BROADCAST_ID; 3936 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff); 3937 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2)); 3938 hdr->tx_cmd.rate.r.rate_n_flags |= 3939 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK); 3940 hdr->direct_scan[0].len = ic->ic_des_esslen; 3941 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID; 3942 3943 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3944 RXON_FILTER_BCON_AWARE_MSK); 3945 3946 if (ic->ic_des_esslen) { 3947 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen); 3948 essid[ic->ic_des_esslen] = '\0'; 3949 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): " 3950 "directed scan %s\n", essid)); 3951 3952 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid, 3953 ic->ic_des_esslen); 3954 } else { 3955 bzero(hdr->direct_scan[0].ssid, 3956 sizeof (hdr->direct_scan[0].ssid)); 3957 } 3958 3959 /* 3960 * a probe request frame is required after the REPLY_SCAN_CMD 3961 */ 3962 wh = (struct ieee80211_frame *)(hdr + 1); 3963 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3964 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3965 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3966 (void) memset(wh->i_addr1, 0xff, 6); 3967 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr); 3968 (void) memset(wh->i_addr3, 0xff, 6); 3969 *(uint16_t *)&wh->i_dur[0] = 0; 3970 *(uint16_t *)&wh->i_seq[0] = 0; 3971 3972 frm = (uint8_t *)(wh + 1); 3973 3974 /* 3975 * essid IE 3976 */ 3977 if (in->in_esslen) { 3978 bcopy(in->in_essid, essid, in->in_esslen); 3979 essid[in->in_esslen] = '\0'; 3980 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): " 3981 "probe with ESSID %s\n", 3982 essid)); 3983 } 3984 *frm++ = IEEE80211_ELEMID_SSID; 3985 *frm++ = in->in_esslen; 3986 (void) memcpy(frm, in->in_essid, in->in_esslen); 3987 frm += in->in_esslen; 3988 3989 mode = ieee80211_chan2mode(ic, ic->ic_curchan); 3990 rs = &ic->ic_sup_rates[mode]; 3991 3992 /* 3993 * supported rates IE 3994 */ 3995 *frm++ = IEEE80211_ELEMID_RATES; 3996 nrates = rs->ir_nrates; 3997 if (nrates > IEEE80211_RATE_SIZE) { 3998 nrates = IEEE80211_RATE_SIZE; 3999 } 4000 4001 *frm++ = (uint8_t)nrates; 4002 (void) memcpy(frm, rs->ir_rates, nrates); 4003 frm += nrates; 4004 4005 /* 4006 * supported xrates IE 4007 */ 4008 if (rs->ir_nrates > IEEE80211_RATE_SIZE) { 4009 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE; 4010 *frm++ = IEEE80211_ELEMID_XRATES; 4011 *frm++ = (uint8_t)nrates; 4012 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates); 4013 frm += nrates; 4014 } 4015 4016 /* 4017 * optionnal IE (usually for wpa) 4018 */ 4019 if (ic->ic_opt_ie != NULL) { 4020 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len); 4021 frm += ic->ic_opt_ie_len; 4022 } 4023 4024 /* setup length of probe request */ 4025 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh)); 4026 hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) + 4027 LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t)); 4028 4029 /* 4030 * the attribute of the scan channels are required after the probe 4031 * request frame. 4032 */ 4033 for (i = 1; i <= hdr->nchan; i++) { 4034 if (ic->ic_des_esslen) { 4035 chan.type = LE_32(3); 4036 } else { 4037 chan.type = LE_32(1); 4038 } 4039 4040 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan)); 4041 chan.tpc.tx_gain = 0x28; 4042 chan.tpc.dsp_atten = 110; 4043 chan.active_dwell = LE_16(50); 4044 chan.passive_dwell = LE_16(120); 4045 4046 bcopy(&chan, frm, sizeof (iwp_scan_chan_t)); 4047 frm += sizeof (iwp_scan_chan_t); 4048 } 4049 4050 pktlen = _PTRDIFF(frm, cmd); 4051 4052 (void) memset(desc, 0, sizeof (*desc)); 4053 desc->val0 = 1 << 24; 4054 desc->pa[0].tb1_addr = 4055 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff); 4056 desc->pa[0].val1 = (pktlen << 4) & 0xfff0; 4057 4058 /* 4059 * maybe for cmd, filling the byte cnt table is not necessary. 4060 * anyway, we fill it here. 4061 */ 4062 sc->sc_shared->queues_byte_cnt_tbls[ring->qid] 4063 .tfd_offset[ring->cur].val = 8; 4064 if (ring->cur < IWP_MAX_WIN_SIZE) { 4065 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 4066 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8; 4067 } 4068 4069 /* 4070 * kick cmd ring 4071 */ 4072 ring->cur = (ring->cur + 1) % ring->count; 4073 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4074 4075 return (IWP_SUCCESS); 4076 } 4077 4078 /* 4079 * configure NIC by using ucode commands after loading ucode. 4080 */ 4081 static int 4082 iwp_config(iwp_sc_t *sc) 4083 { 4084 ieee80211com_t *ic = &sc->sc_ic; 4085 iwp_powertable_cmd_t powertable; 4086 iwp_bt_cmd_t bt; 4087 iwp_add_sta_t node; 4088 iwp_rem_sta_t rm_sta; 4089 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 4090 int err = IWP_FAIL; 4091 4092 /* 4093 * set power mode. Disable power management at present, do it later 4094 */ 4095 (void) memset(&powertable, 0, sizeof (powertable)); 4096 powertable.flags = LE_16(0x8); 4097 err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable, 4098 sizeof (powertable), 0); 4099 if (err != IWP_SUCCESS) { 4100 cmn_err(CE_WARN, "iwp_config(): " 4101 "failed to set power mode\n"); 4102 return (err); 4103 } 4104 4105 /* 4106 * configure bt coexistence 4107 */ 4108 (void) memset(&bt, 0, sizeof (bt)); 4109 bt.flags = 3; 4110 bt.lead_time = 0xaa; 4111 bt.max_kill = 1; 4112 err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt, 4113 sizeof (bt), 0); 4114 if (err != IWP_SUCCESS) { 4115 cmn_err(CE_WARN, "iwp_config(): " 4116 "failed to configurate bt coexistence\n"); 4117 return (err); 4118 } 4119 4120 /* 4121 * configure rxon 4122 */ 4123 (void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t)); 4124 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr); 4125 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr); 4126 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan)); 4127 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK); 4128 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | 4129 RXON_FLG_CHANNEL_MODE_PURE_40_MSK)); 4130 4131 switch (ic->ic_opmode) { 4132 case IEEE80211_M_STA: 4133 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS; 4134 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 4135 RXON_FILTER_DIS_DECRYPT_MSK | 4136 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 4137 break; 4138 case IEEE80211_M_IBSS: 4139 case IEEE80211_M_AHDEMO: 4140 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 4141 4142 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 4143 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 4144 RXON_FILTER_DIS_DECRYPT_MSK | 4145 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 4146 break; 4147 case IEEE80211_M_HOSTAP: 4148 sc->sc_config.dev_type = RXON_DEV_TYPE_AP; 4149 break; 4150 case IEEE80211_M_MONITOR: 4151 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER; 4152 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 4153 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); 4154 break; 4155 } 4156 4157 /* 4158 * Support all CCK rates. 4159 */ 4160 sc->sc_config.cck_basic_rates = 0x0f; 4161 4162 /* 4163 * Support all OFDM rates. 4164 */ 4165 sc->sc_config.ofdm_basic_rates = 0xff; 4166 4167 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK | 4168 (0x7 << RXON_RX_CHAIN_VALID_POS) | 4169 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) | 4170 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 4171 4172 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config, 4173 sizeof (iwp_rxon_cmd_t), 0); 4174 if (err != IWP_SUCCESS) { 4175 cmn_err(CE_WARN, "iwp_config(): " 4176 "failed to set configure command\n"); 4177 return (err); 4178 } 4179 4180 /* 4181 * remove all nodes in NIC 4182 */ 4183 (void) memset(&rm_sta, 0, sizeof (rm_sta)); 4184 rm_sta.num_sta = 1; 4185 (void) memcpy(rm_sta.addr, bcast, 6); 4186 4187 err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0); 4188 if (err != IWP_SUCCESS) { 4189 cmn_err(CE_WARN, "iwp_config(): " 4190 "failed to remove broadcast node in hardware.\n"); 4191 return (err); 4192 } 4193 4194 /* 4195 * add broadcast node so that we can send broadcast frame 4196 */ 4197 (void) memset(&node, 0, sizeof (node)); 4198 (void) memset(node.sta.addr, 0xff, 6); 4199 node.mode = 0; 4200 node.sta.sta_id = IWP_BROADCAST_ID; 4201 node.station_flags = 0; 4202 4203 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0); 4204 if (err != IWP_SUCCESS) { 4205 cmn_err(CE_WARN, "iwp_config(): " 4206 "failed to add broadcast node\n"); 4207 return (err); 4208 } 4209 4210 return (err); 4211 } 4212 4213 /* 4214 * quiesce(9E) entry point. 4215 * This function is called when the system is single-threaded at high 4216 * PIL with preemption disabled. Therefore, this function must not be 4217 * blocked. 4218 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 4219 * DDI_FAILURE indicates an error condition and should almost never happen. 4220 */ 4221 static int 4222 iwp_quiesce(dev_info_t *dip) 4223 { 4224 iwp_sc_t *sc; 4225 4226 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip)); 4227 if (NULL == sc) { 4228 return (DDI_FAILURE); 4229 } 4230 4231 #ifdef DEBUG 4232 /* by pass any messages, if it's quiesce */ 4233 iwp_dbg_flags = 0; 4234 #endif 4235 4236 /* 4237 * No more blocking is allowed while we are in the 4238 * quiesce(9E) entry point. 4239 */ 4240 atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED); 4241 4242 /* 4243 * Disable and mask all interrupts. 4244 */ 4245 iwp_stop(sc); 4246 4247 return (DDI_SUCCESS); 4248 } 4249 4250 static void 4251 iwp_stop_master(iwp_sc_t *sc) 4252 { 4253 uint32_t tmp; 4254 int n; 4255 4256 tmp = IWP_READ(sc, CSR_RESET); 4257 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER); 4258 4259 tmp = IWP_READ(sc, CSR_GP_CNTRL); 4260 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) == 4261 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) { 4262 return; 4263 } 4264 4265 for (n = 0; n < 2000; n++) { 4266 if (IWP_READ(sc, CSR_RESET) & 4267 CSR_RESET_REG_FLAG_MASTER_DISABLED) { 4268 break; 4269 } 4270 DELAY(1000); 4271 } 4272 4273 #ifdef DEBUG 4274 if (2000 == n) { 4275 IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): " 4276 "timeout waiting for master stop\n")); 4277 } 4278 #endif 4279 } 4280 4281 static int 4282 iwp_power_up(iwp_sc_t *sc) 4283 { 4284 uint32_t tmp; 4285 4286 iwp_mac_access_enter(sc); 4287 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL); 4288 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC; 4289 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN; 4290 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp); 4291 iwp_mac_access_exit(sc); 4292 4293 DELAY(5000); 4294 return (IWP_SUCCESS); 4295 } 4296 4297 /* 4298 * hardware initialization 4299 */ 4300 static int 4301 iwp_preinit(iwp_sc_t *sc) 4302 { 4303 int n; 4304 uint8_t vlink; 4305 uint16_t radio_cfg; 4306 uint32_t tmp; 4307 4308 /* 4309 * clear any pending interrupts 4310 */ 4311 IWP_WRITE(sc, CSR_INT, 0xffffffff); 4312 4313 tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS); 4314 IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS, 4315 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 4316 4317 tmp = IWP_READ(sc, CSR_GP_CNTRL); 4318 IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4319 4320 /* 4321 * wait for clock ready 4322 */ 4323 for (n = 0; n < 1000; n++) { 4324 if (IWP_READ(sc, CSR_GP_CNTRL) & 4325 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) { 4326 break; 4327 } 4328 DELAY(10); 4329 } 4330 4331 if (1000 == n) { 4332 return (ETIMEDOUT); 4333 } 4334 4335 iwp_mac_access_enter(sc); 4336 4337 iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT); 4338 4339 DELAY(20); 4340 tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT); 4341 iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp | 4342 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE); 4343 iwp_mac_access_exit(sc); 4344 4345 radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION); 4346 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) { 4347 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG); 4348 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, 4349 tmp | SP_RADIO_TYPE_MSK(radio_cfg) | 4350 SP_RADIO_STEP_MSK(radio_cfg) | 4351 SP_RADIO_DASH_MSK(radio_cfg)); 4352 } else { 4353 cmn_err(CE_WARN, "iwp_preinit(): " 4354 "radio configuration information in eeprom is wrong\n"); 4355 return (IWP_FAIL); 4356 } 4357 4358 4359 IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32); 4360 4361 (void) iwp_power_up(sc); 4362 4363 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) { 4364 tmp = ddi_get32(sc->sc_cfg_handle, 4365 (uint32_t *)(sc->sc_cfg_base + 0xe8)); 4366 ddi_put32(sc->sc_cfg_handle, 4367 (uint32_t *)(sc->sc_cfg_base + 0xe8), 4368 tmp & ~(1 << 11)); 4369 } 4370 4371 vlink = ddi_get8(sc->sc_cfg_handle, 4372 (uint8_t *)(sc->sc_cfg_base + 0xf0)); 4373 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0), 4374 vlink & ~2); 4375 4376 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG); 4377 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 4378 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI; 4379 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp); 4380 4381 /* 4382 * make sure power supply on each part of the hardware 4383 */ 4384 iwp_mac_access_enter(sc); 4385 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL); 4386 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 4387 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp); 4388 DELAY(5); 4389 4390 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL); 4391 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 4392 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp); 4393 iwp_mac_access_exit(sc); 4394 4395 if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) { 4396 IWP_WRITE(sc, CSR_GP_DRIVER_REG, 4397 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX); 4398 } 4399 4400 if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) { 4401 4402 IWP_WRITE(sc, CSR_GP_DRIVER_REG, 4403 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); 4404 } 4405 4406 return (IWP_SUCCESS); 4407 } 4408 4409 /* 4410 * set up semphore flag to own EEPROM 4411 */ 4412 static int 4413 iwp_eep_sem_down(iwp_sc_t *sc) 4414 { 4415 int count1, count2; 4416 uint32_t tmp; 4417 4418 for (count1 = 0; count1 < 1000; count1++) { 4419 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG); 4420 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, 4421 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM); 4422 4423 for (count2 = 0; count2 < 2; count2++) { 4424 if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) & 4425 CSR_HW_IF_CONFIG_REG_EEP_SEM) { 4426 return (IWP_SUCCESS); 4427 } 4428 DELAY(10000); 4429 } 4430 } 4431 return (IWP_FAIL); 4432 } 4433 4434 /* 4435 * reset semphore flag to release EEPROM 4436 */ 4437 static void 4438 iwp_eep_sem_up(iwp_sc_t *sc) 4439 { 4440 uint32_t tmp; 4441 4442 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG); 4443 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, 4444 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM)); 4445 } 4446 4447 /* 4448 * This function read all infomation from eeprom 4449 */ 4450 static int 4451 iwp_eep_load(iwp_sc_t *sc) 4452 { 4453 int i, rr; 4454 uint32_t rv, tmp, eep_gp; 4455 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map); 4456 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map; 4457 4458 /* 4459 * read eeprom gp register in CSR 4460 */ 4461 eep_gp = IWP_READ(sc, CSR_EEPROM_GP); 4462 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) == 4463 CSR_EEPROM_GP_BAD_SIGNATURE) { 4464 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): " 4465 "not find eeprom\n")); 4466 return (IWP_FAIL); 4467 } 4468 4469 rr = iwp_eep_sem_down(sc); 4470 if (rr != 0) { 4471 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): " 4472 "driver failed to own EEPROM\n")); 4473 return (IWP_FAIL); 4474 } 4475 4476 for (addr = 0; addr < eep_sz; addr += 2) { 4477 IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1); 4478 tmp = IWP_READ(sc, CSR_EEPROM_REG); 4479 IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2)); 4480 4481 for (i = 0; i < 10; i++) { 4482 rv = IWP_READ(sc, CSR_EEPROM_REG); 4483 if (rv & 1) { 4484 break; 4485 } 4486 DELAY(10); 4487 } 4488 4489 if (!(rv & 1)) { 4490 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): " 4491 "time out when read eeprome\n")); 4492 iwp_eep_sem_up(sc); 4493 return (IWP_FAIL); 4494 } 4495 4496 eep_p[addr/2] = LE_16(rv >> 16); 4497 } 4498 4499 iwp_eep_sem_up(sc); 4500 return (IWP_SUCCESS); 4501 } 4502 4503 /* 4504 * initialize mac address in ieee80211com_t struct 4505 */ 4506 static void 4507 iwp_get_mac_from_eep(iwp_sc_t *sc) 4508 { 4509 ieee80211com_t *ic = &sc->sc_ic; 4510 4511 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]); 4512 4513 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): " 4514 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n", 4515 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2], 4516 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5])); 4517 } 4518 4519 /* 4520 * main initialization function 4521 */ 4522 static int 4523 iwp_init(iwp_sc_t *sc) 4524 { 4525 int err = IWP_FAIL; 4526 clock_t clk; 4527 4528 /* 4529 * release buffer for calibration 4530 */ 4531 iwp_release_calib_buffer(sc); 4532 4533 mutex_enter(&sc->sc_glock); 4534 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT); 4535 4536 err = iwp_init_common(sc); 4537 if (err != IWP_SUCCESS) { 4538 mutex_exit(&sc->sc_glock); 4539 return (IWP_FAIL); 4540 } 4541 4542 /* 4543 * backup ucode data part for future use. 4544 */ 4545 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va, 4546 sc->sc_dma_fw_data.mem_va, 4547 sc->sc_dma_fw_data.alength); 4548 4549 /* load firmware init segment into NIC */ 4550 err = iwp_load_init_firmware(sc); 4551 if (err != IWP_SUCCESS) { 4552 cmn_err(CE_WARN, "iwp_init(): " 4553 "failed to setup init firmware\n"); 4554 mutex_exit(&sc->sc_glock); 4555 return (IWP_FAIL); 4556 } 4557 4558 /* 4559 * now press "execute" start running 4560 */ 4561 IWP_WRITE(sc, CSR_RESET, 0); 4562 4563 clk = ddi_get_lbolt() + drv_usectohz(1000000); 4564 while (!(sc->sc_flags & IWP_F_FW_INIT)) { 4565 if (cv_timedwait(&sc->sc_ucode_cv, 4566 &sc->sc_glock, clk) < 0) { 4567 break; 4568 } 4569 } 4570 4571 if (!(sc->sc_flags & IWP_F_FW_INIT)) { 4572 cmn_err(CE_WARN, "iwp_init(): " 4573 "failed to process init alive.\n"); 4574 mutex_exit(&sc->sc_glock); 4575 return (IWP_FAIL); 4576 } 4577 4578 mutex_exit(&sc->sc_glock); 4579 4580 /* 4581 * stop chipset for initializing chipset again 4582 */ 4583 iwp_stop(sc); 4584 4585 mutex_enter(&sc->sc_glock); 4586 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT); 4587 4588 err = iwp_init_common(sc); 4589 if (err != IWP_SUCCESS) { 4590 mutex_exit(&sc->sc_glock); 4591 return (IWP_FAIL); 4592 } 4593 4594 /* 4595 * load firmware run segment into NIC 4596 */ 4597 err = iwp_load_run_firmware(sc); 4598 if (err != IWP_SUCCESS) { 4599 cmn_err(CE_WARN, "iwp_init(): " 4600 "failed to setup run firmware\n"); 4601 mutex_exit(&sc->sc_glock); 4602 return (IWP_FAIL); 4603 } 4604 4605 /* 4606 * now press "execute" start running 4607 */ 4608 IWP_WRITE(sc, CSR_RESET, 0); 4609 4610 clk = ddi_get_lbolt() + drv_usectohz(1000000); 4611 while (!(sc->sc_flags & IWP_F_FW_INIT)) { 4612 if (cv_timedwait(&sc->sc_ucode_cv, 4613 &sc->sc_glock, clk) < 0) { 4614 break; 4615 } 4616 } 4617 4618 if (!(sc->sc_flags & IWP_F_FW_INIT)) { 4619 cmn_err(CE_WARN, "iwp_init(): " 4620 "failed to process runtime alive.\n"); 4621 mutex_exit(&sc->sc_glock); 4622 return (IWP_FAIL); 4623 } 4624 4625 mutex_exit(&sc->sc_glock); 4626 4627 DELAY(1000); 4628 4629 mutex_enter(&sc->sc_glock); 4630 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT); 4631 4632 /* 4633 * at this point, the firmware is loaded OK, then config the hardware 4634 * with the ucode API, including rxon, txpower, etc. 4635 */ 4636 err = iwp_config(sc); 4637 if (err) { 4638 cmn_err(CE_WARN, "iwp_init(): " 4639 "failed to configure device\n"); 4640 mutex_exit(&sc->sc_glock); 4641 return (IWP_FAIL); 4642 } 4643 4644 /* 4645 * at this point, hardware may receive beacons :) 4646 */ 4647 mutex_exit(&sc->sc_glock); 4648 return (IWP_SUCCESS); 4649 } 4650 4651 /* 4652 * stop or disable NIC 4653 */ 4654 static void 4655 iwp_stop(iwp_sc_t *sc) 4656 { 4657 uint32_t tmp; 4658 int i; 4659 4660 /* by pass if it's quiesced */ 4661 if (!(sc->sc_flags & IWP_F_QUIESCED)) { 4662 mutex_enter(&sc->sc_glock); 4663 } 4664 4665 IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 4666 /* 4667 * disable interrupts 4668 */ 4669 IWP_WRITE(sc, CSR_INT_MASK, 0); 4670 IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK); 4671 IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff); 4672 4673 /* 4674 * reset all Tx rings 4675 */ 4676 for (i = 0; i < IWP_NUM_QUEUES; i++) { 4677 iwp_reset_tx_ring(sc, &sc->sc_txq[i]); 4678 } 4679 4680 /* 4681 * reset Rx ring 4682 */ 4683 iwp_reset_rx_ring(sc); 4684 4685 iwp_mac_access_enter(sc); 4686 iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT); 4687 iwp_mac_access_exit(sc); 4688 4689 DELAY(5); 4690 4691 iwp_stop_master(sc); 4692 4693 mutex_enter(&sc->sc_mt_lock); 4694 sc->sc_tx_timer = 0; 4695 mutex_exit(&sc->sc_mt_lock); 4696 4697 tmp = IWP_READ(sc, CSR_RESET); 4698 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET); 4699 4700 /* by pass if it's quiesced */ 4701 if (!(sc->sc_flags & IWP_F_QUIESCED)) { 4702 mutex_exit(&sc->sc_glock); 4703 } 4704 } 4705 4706 /* 4707 * Naive implementation of the Adaptive Multi Rate Retry algorithm: 4708 * "IEEE 802.11 Rate Adaptation: A Practical Approach" 4709 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti 4710 * INRIA Sophia - Projet Planete 4711 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html 4712 */ 4713 #define is_success(amrr) \ 4714 ((amrr)->retrycnt < (amrr)->txcnt / 10) 4715 #define is_failure(amrr) \ 4716 ((amrr)->retrycnt > (amrr)->txcnt / 3) 4717 #define is_enough(amrr) \ 4718 ((amrr)->txcnt > 200) 4719 #define not_very_few(amrr) \ 4720 ((amrr)->txcnt > 40) 4721 #define is_min_rate(in) \ 4722 (0 == (in)->in_txrate) 4723 #define is_max_rate(in) \ 4724 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate) 4725 #define increase_rate(in) \ 4726 ((in)->in_txrate++) 4727 #define decrease_rate(in) \ 4728 ((in)->in_txrate--) 4729 #define reset_cnt(amrr) \ 4730 { (amrr)->txcnt = (amrr)->retrycnt = 0; } 4731 4732 #define IWP_AMRR_MIN_SUCCESS_THRESHOLD 1 4733 #define IWP_AMRR_MAX_SUCCESS_THRESHOLD 15 4734 4735 static void 4736 iwp_amrr_init(iwp_amrr_t *amrr) 4737 { 4738 amrr->success = 0; 4739 amrr->recovery = 0; 4740 amrr->txcnt = amrr->retrycnt = 0; 4741 amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD; 4742 } 4743 4744 static void 4745 iwp_amrr_timeout(iwp_sc_t *sc) 4746 { 4747 ieee80211com_t *ic = &sc->sc_ic; 4748 4749 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): " 4750 "enter\n")); 4751 4752 if (IEEE80211_M_STA == ic->ic_opmode) { 4753 iwp_amrr_ratectl(NULL, ic->ic_bss); 4754 } else { 4755 ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL); 4756 } 4757 4758 sc->sc_clk = ddi_get_lbolt(); 4759 } 4760 4761 /* ARGSUSED */ 4762 static void 4763 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in) 4764 { 4765 iwp_amrr_t *amrr = (iwp_amrr_t *)in; 4766 int need_change = 0; 4767 4768 if (is_success(amrr) && is_enough(amrr)) { 4769 amrr->success++; 4770 if (amrr->success >= amrr->success_threshold && 4771 !is_max_rate(in)) { 4772 amrr->recovery = 1; 4773 amrr->success = 0; 4774 increase_rate(in); 4775 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): " 4776 "AMRR increasing rate %d " 4777 "(txcnt=%d retrycnt=%d)\n", 4778 in->in_txrate, amrr->txcnt, 4779 amrr->retrycnt)); 4780 need_change = 1; 4781 } else { 4782 amrr->recovery = 0; 4783 } 4784 } else if (not_very_few(amrr) && is_failure(amrr)) { 4785 amrr->success = 0; 4786 if (!is_min_rate(in)) { 4787 if (amrr->recovery) { 4788 amrr->success_threshold++; 4789 if (amrr->success_threshold > 4790 IWP_AMRR_MAX_SUCCESS_THRESHOLD) { 4791 amrr->success_threshold = 4792 IWP_AMRR_MAX_SUCCESS_THRESHOLD; 4793 } 4794 } else { 4795 amrr->success_threshold = 4796 IWP_AMRR_MIN_SUCCESS_THRESHOLD; 4797 } 4798 decrease_rate(in); 4799 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): " 4800 "AMRR decreasing rate %d " 4801 "(txcnt=%d retrycnt=%d)\n", 4802 in->in_txrate, amrr->txcnt, 4803 amrr->retrycnt)); 4804 need_change = 1; 4805 } 4806 amrr->recovery = 0; /* paper is incorrect */ 4807 } 4808 4809 if (is_enough(amrr) || need_change) { 4810 reset_cnt(amrr); 4811 } 4812 } 4813 4814 /* 4815 * translate indirect address in eeprom to direct address 4816 * in eeprom and return address of entry whos indirect address 4817 * is indi_addr 4818 */ 4819 static uint8_t * 4820 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr) 4821 { 4822 uint32_t di_addr; 4823 uint16_t temp; 4824 4825 if (!(indi_addr & INDIRECT_ADDRESS)) { 4826 di_addr = indi_addr; 4827 return (&sc->sc_eep_map[di_addr]); 4828 } 4829 4830 switch (indi_addr & INDIRECT_TYPE_MSK) { 4831 case INDIRECT_GENERAL: 4832 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL); 4833 break; 4834 case INDIRECT_HOST: 4835 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST); 4836 break; 4837 case INDIRECT_REGULATORY: 4838 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY); 4839 break; 4840 case INDIRECT_CALIBRATION: 4841 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION); 4842 break; 4843 case INDIRECT_PROCESS_ADJST: 4844 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST); 4845 break; 4846 case INDIRECT_OTHERS: 4847 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS); 4848 break; 4849 default: 4850 temp = 0; 4851 cmn_err(CE_WARN, "iwp_eep_addr_trans(): " 4852 "incorrect indirect eeprom address.\n"); 4853 break; 4854 } 4855 4856 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1); 4857 4858 return (&sc->sc_eep_map[di_addr]); 4859 } 4860 4861 /* 4862 * loade a section of ucode into NIC 4863 */ 4864 static int 4865 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len) 4866 { 4867 4868 iwp_mac_access_enter(sc); 4869 4870 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL), 4871 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 4872 4873 IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d); 4874 4875 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL), 4876 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK)); 4877 4878 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len); 4879 4880 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL), 4881 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 4882 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 4883 IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 4884 4885 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL), 4886 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 4887 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL | 4888 IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 4889 4890 iwp_mac_access_exit(sc); 4891 4892 return (IWP_SUCCESS); 4893 } 4894 4895 /* 4896 * necessary setting during alive notification 4897 */ 4898 static int 4899 iwp_alive_common(iwp_sc_t *sc) 4900 { 4901 uint32_t base; 4902 uint32_t i; 4903 iwp_wimax_coex_cmd_t w_cmd; 4904 iwp_calibration_crystal_cmd_t c_cmd; 4905 uint32_t rv = IWP_FAIL; 4906 4907 /* 4908 * initialize SCD related registers to make TX work. 4909 */ 4910 iwp_mac_access_enter(sc); 4911 4912 /* 4913 * read sram address of data base. 4914 */ 4915 sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR); 4916 4917 for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET; 4918 base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET; 4919 base += 4) { 4920 iwp_mem_write(sc, base, 0); 4921 } 4922 4923 for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET; 4924 base += 4) { 4925 iwp_mem_write(sc, base, 0); 4926 } 4927 4928 for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) { 4929 iwp_mem_write(sc, base + i, 0); 4930 } 4931 4932 iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR, 4933 sc->sc_dma_sh.cookie.dmac_address >> 10); 4934 4935 iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL, 4936 IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES)); 4937 4938 iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0); 4939 4940 for (i = 0; i < IWP_NUM_QUEUES; i++) { 4941 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0); 4942 IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8)); 4943 iwp_mem_write(sc, sc->sc_scd_base + 4944 IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0); 4945 iwp_mem_write(sc, sc->sc_scd_base + 4946 IWP_SCD_CONTEXT_QUEUE_OFFSET(i) + 4947 sizeof (uint32_t), 4948 ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 4949 IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 4950 ((SCD_FRAME_LIMIT << 4951 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 4952 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 4953 } 4954 4955 iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1); 4956 4957 iwp_reg_write(sc, (IWP_SCD_BASE + 0x10), 4958 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 4959 4960 IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8)); 4961 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0); 4962 4963 /* 4964 * queue 0-7 map to FIFO 0-7 and 4965 * all queues work under FIFO mode(none-scheduler_ack) 4966 */ 4967 for (i = 0; i < 4; i++) { 4968 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i), 4969 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 4970 ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) | 4971 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) | 4972 IWP_SCD_QUEUE_STTS_REG_MSK); 4973 } 4974 4975 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM), 4976 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 4977 (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) | 4978 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) | 4979 IWP_SCD_QUEUE_STTS_REG_MSK); 4980 4981 for (i = 5; i < 7; i++) { 4982 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i), 4983 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 4984 (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) | 4985 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) | 4986 IWP_SCD_QUEUE_STTS_REG_MSK); 4987 } 4988 4989 iwp_mac_access_exit(sc); 4990 4991 (void) memset(&w_cmd, 0, sizeof (w_cmd)); 4992 4993 rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1); 4994 if (rv != IWP_SUCCESS) { 4995 cmn_err(CE_WARN, "iwp_alive_common(): " 4996 "failed to send wimax coexist command.\n"); 4997 return (rv); 4998 } 4999 5000 (void) memset(&c_cmd, 0, sizeof (c_cmd)); 5001 5002 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD; 5003 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]); 5004 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]); 5005 5006 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1); 5007 if (rv != IWP_SUCCESS) { 5008 cmn_err(CE_WARN, "iwp_alive_common(): " 5009 "failed to send crystal frq calibration command.\n"); 5010 return (rv); 5011 } 5012 5013 /* 5014 * make sure crystal frequency calibration ready 5015 * before next operations. 5016 */ 5017 DELAY(1000); 5018 5019 return (IWP_SUCCESS); 5020 } 5021 5022 /* 5023 * save results of calibration from ucode 5024 */ 5025 static void 5026 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc) 5027 { 5028 struct iwp_calib_results *res_p = &sc->sc_calib_results; 5029 struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1); 5030 int len = LE_32(desc->len); 5031 5032 /* 5033 * ensure the size of buffer is not too big 5034 */ 5035 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4; 5036 5037 switch (calib_hdr->op_code) { 5038 case PHY_CALIBRATE_LO_CMD: 5039 if (NULL == res_p->lo_res) { 5040 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP); 5041 } 5042 5043 if (NULL == res_p->lo_res) { 5044 cmn_err(CE_WARN, "iwp_save_calib_result(): " 5045 "failed to allocate memory.\n"); 5046 return; 5047 } 5048 5049 res_p->lo_res_len = len; 5050 (void) memcpy(res_p->lo_res, calib_hdr, len); 5051 break; 5052 case PHY_CALIBRATE_TX_IQ_CMD: 5053 if (NULL == res_p->tx_iq_res) { 5054 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP); 5055 } 5056 5057 if (NULL == res_p->tx_iq_res) { 5058 cmn_err(CE_WARN, "iwp_save_calib_result(): " 5059 "failed to allocate memory.\n"); 5060 return; 5061 } 5062 5063 res_p->tx_iq_res_len = len; 5064 (void) memcpy(res_p->tx_iq_res, calib_hdr, len); 5065 break; 5066 case PHY_CALIBRATE_TX_IQ_PERD_CMD: 5067 if (NULL == res_p->tx_iq_perd_res) { 5068 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP); 5069 } 5070 5071 if (NULL == res_p->tx_iq_perd_res) { 5072 cmn_err(CE_WARN, "iwp_save_calib_result(): " 5073 "failed to allocate memory.\n"); 5074 } 5075 5076 res_p->tx_iq_perd_res_len = len; 5077 (void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len); 5078 break; 5079 case PHY_CALIBRATE_BASE_BAND_CMD: 5080 if (NULL == res_p->base_band_res) { 5081 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP); 5082 } 5083 5084 if (NULL == res_p->base_band_res) { 5085 cmn_err(CE_WARN, "iwp_save_calib_result(): " 5086 "failed to allocate memory.\n"); 5087 } 5088 5089 res_p->base_band_res_len = len; 5090 (void) memcpy(res_p->base_band_res, calib_hdr, len); 5091 break; 5092 default: 5093 cmn_err(CE_WARN, "iwp_save_calib_result(): " 5094 "incorrect calibration type(%d).\n", calib_hdr->op_code); 5095 break; 5096 } 5097 5098 } 5099 5100 static void 5101 iwp_release_calib_buffer(iwp_sc_t *sc) 5102 { 5103 if (sc->sc_calib_results.lo_res != NULL) { 5104 kmem_free(sc->sc_calib_results.lo_res, 5105 sc->sc_calib_results.lo_res_len); 5106 sc->sc_calib_results.lo_res = NULL; 5107 } 5108 5109 if (sc->sc_calib_results.tx_iq_res != NULL) { 5110 kmem_free(sc->sc_calib_results.tx_iq_res, 5111 sc->sc_calib_results.tx_iq_res_len); 5112 sc->sc_calib_results.tx_iq_res = NULL; 5113 } 5114 5115 if (sc->sc_calib_results.tx_iq_perd_res != NULL) { 5116 kmem_free(sc->sc_calib_results.tx_iq_perd_res, 5117 sc->sc_calib_results.tx_iq_perd_res_len); 5118 sc->sc_calib_results.tx_iq_perd_res = NULL; 5119 } 5120 5121 if (sc->sc_calib_results.base_band_res != NULL) { 5122 kmem_free(sc->sc_calib_results.base_band_res, 5123 sc->sc_calib_results.base_band_res_len); 5124 sc->sc_calib_results.base_band_res = NULL; 5125 } 5126 5127 } 5128 5129 /* 5130 * common section of intialization 5131 */ 5132 static int 5133 iwp_init_common(iwp_sc_t *sc) 5134 { 5135 int32_t qid; 5136 uint32_t tmp; 5137 5138 (void) iwp_preinit(sc); 5139 5140 tmp = IWP_READ(sc, CSR_GP_CNTRL); 5141 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) { 5142 cmn_err(CE_NOTE, "iwp_init_common(): " 5143 "radio transmitter is off\n"); 5144 return (IWP_FAIL); 5145 } 5146 5147 /* 5148 * init Rx ring 5149 */ 5150 iwp_mac_access_enter(sc); 5151 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 5152 5153 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 5154 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 5155 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8); 5156 5157 IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG, 5158 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address + 5159 offsetof(struct iwp_shared, val0)) >> 4)); 5160 5161 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 5162 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 5163 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 5164 IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 5165 (RX_QUEUE_SIZE_LOG << 5166 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 5167 iwp_mac_access_exit(sc); 5168 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 5169 (RX_QUEUE_SIZE - 1) & ~0x7); 5170 5171 /* 5172 * init Tx rings 5173 */ 5174 iwp_mac_access_enter(sc); 5175 iwp_reg_write(sc, IWP_SCD_TXFACT, 0); 5176 5177 /* 5178 * keep warm page 5179 */ 5180 IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG, 5181 sc->sc_dma_kw.cookie.dmac_address >> 4); 5182 5183 for (qid = 0; qid < IWP_NUM_QUEUES; qid++) { 5184 IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid), 5185 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8); 5186 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid), 5187 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 5188 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 5189 } 5190 5191 iwp_mac_access_exit(sc); 5192 5193 /* 5194 * clear "radio off" and "disable command" bits 5195 */ 5196 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 5197 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, 5198 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 5199 5200 /* 5201 * clear any pending interrupts 5202 */ 5203 IWP_WRITE(sc, CSR_INT, 0xffffffff); 5204 5205 /* 5206 * enable interrupts 5207 */ 5208 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 5209 5210 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 5211 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 5212 5213 return (IWP_SUCCESS); 5214 } 5215 5216 static int 5217 iwp_fast_recover(iwp_sc_t *sc) 5218 { 5219 ieee80211com_t *ic = &sc->sc_ic; 5220 int err = IWP_FAIL; 5221 5222 mutex_enter(&sc->sc_glock); 5223 5224 /* restore runtime configuration */ 5225 bcopy(&sc->sc_config_save, &sc->sc_config, 5226 sizeof (sc->sc_config)); 5227 5228 sc->sc_config.assoc_id = 0; 5229 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK); 5230 5231 if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) { 5232 cmn_err(CE_WARN, "iwp_fast_recover(): " 5233 "could not setup authentication\n"); 5234 mutex_exit(&sc->sc_glock); 5235 return (err); 5236 } 5237 5238 bcopy(&sc->sc_config_save, &sc->sc_config, 5239 sizeof (sc->sc_config)); 5240 5241 /* update adapter's configuration */ 5242 err = iwp_run_state_config(sc); 5243 if (err != IWP_SUCCESS) { 5244 cmn_err(CE_WARN, "iwp_fast_recover(): " 5245 "failed to setup association\n"); 5246 mutex_exit(&sc->sc_glock); 5247 return (err); 5248 } 5249 /* set LED on */ 5250 iwp_set_led(sc, 2, 0, 1); 5251 5252 mutex_exit(&sc->sc_glock); 5253 5254 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER); 5255 5256 /* start queue */ 5257 IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): " 5258 "resume xmit\n")); 5259 mac_tx_update(ic->ic_mach); 5260 5261 return (IWP_SUCCESS); 5262 } 5263 5264 static int 5265 iwp_run_state_config(iwp_sc_t *sc) 5266 { 5267 struct ieee80211com *ic = &sc->sc_ic; 5268 ieee80211_node_t *in = ic->ic_bss; 5269 int err = IWP_FAIL; 5270 5271 /* 5272 * update adapter's configuration 5273 */ 5274 sc->sc_config.assoc_id = in->in_associd & 0x3fff; 5275 5276 /* 5277 * short preamble/slot time are 5278 * negotiated when associating 5279 */ 5280 sc->sc_config.flags &= 5281 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 5282 RXON_FLG_SHORT_SLOT_MSK); 5283 5284 if (ic->ic_flags & IEEE80211_F_SHSLOT) { 5285 sc->sc_config.flags |= 5286 LE_32(RXON_FLG_SHORT_SLOT_MSK); 5287 } 5288 5289 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 5290 sc->sc_config.flags |= 5291 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 5292 } 5293 5294 sc->sc_config.filter_flags |= 5295 LE_32(RXON_FILTER_ASSOC_MSK); 5296 5297 if (ic->ic_opmode != IEEE80211_M_STA) { 5298 sc->sc_config.filter_flags |= 5299 LE_32(RXON_FILTER_BCON_AWARE_MSK); 5300 } 5301 5302 IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): " 5303 "config chan %d flags %x" 5304 " filter_flags %x\n", 5305 sc->sc_config.chan, sc->sc_config.flags, 5306 sc->sc_config.filter_flags)); 5307 5308 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config, 5309 sizeof (iwp_rxon_cmd_t), 1); 5310 if (err != IWP_SUCCESS) { 5311 cmn_err(CE_WARN, "iwp_run_state_config(): " 5312 "could not update configuration\n"); 5313 return (err); 5314 } 5315 5316 return (err); 5317 } 5318 5319 /* 5320 * This function overwrites default configurations of 5321 * ieee80211com structure in Net80211 module. 5322 */ 5323 static void 5324 iwp_overwrite_ic_default(iwp_sc_t *sc) 5325 { 5326 ieee80211com_t *ic = &sc->sc_ic; 5327 5328 sc->sc_newstate = ic->ic_newstate; 5329 ic->ic_newstate = iwp_newstate; 5330 ic->ic_node_alloc = iwp_node_alloc; 5331 ic->ic_node_free = iwp_node_free; 5332 } 5333 5334 5335 /* 5336 * This function adds AP station into hardware. 5337 */ 5338 static int 5339 iwp_add_ap_sta(iwp_sc_t *sc) 5340 { 5341 ieee80211com_t *ic = &sc->sc_ic; 5342 ieee80211_node_t *in = ic->ic_bss; 5343 iwp_add_sta_t node; 5344 int err = IWP_FAIL; 5345 5346 /* 5347 * Add AP node into hardware. 5348 */ 5349 (void) memset(&node, 0, sizeof (node)); 5350 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid); 5351 node.mode = STA_MODE_ADD_MSK; 5352 node.sta.sta_id = IWP_AP_ID; 5353 5354 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 5355 if (err != IWP_SUCCESS) { 5356 cmn_err(CE_WARN, "iwp_add_ap_sta(): " 5357 "failed to add AP node\n"); 5358 return (err); 5359 } 5360 5361 return (err); 5362 } 5363 5364 /* 5365 * Check EEPROM version and Calibration version. 5366 */ 5367 static int 5368 iwp_eep_ver_chk(iwp_sc_t *sc) 5369 { 5370 if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) || 5371 (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) { 5372 cmn_err(CE_WARN, "iwp_eep_ver_chk(): " 5373 "unsupported eeprom detected\n"); 5374 return (IWP_FAIL); 5375 } 5376 5377 return (IWP_SUCCESS); 5378 } 5379 5380 /* 5381 * Determine parameters for all supported chips. 5382 */ 5383 static void 5384 iwp_set_chip_param(iwp_sc_t *sc) 5385 { 5386 if ((0x008d == sc->sc_dev_id) || 5387 (0x008e == sc->sc_dev_id)) { 5388 sc->sc_chip_param.phy_mode = PHY_MODE_G | 5389 PHY_MODE_A | PHY_MODE_N; 5390 5391 sc->sc_chip_param.tx_ant = ANT_A | ANT_B; 5392 sc->sc_chip_param.rx_ant = ANT_A | ANT_B; 5393 5394 sc->sc_chip_param.pa_type = PA_TYPE_MIX; 5395 } 5396 5397 if ((0x422c == sc->sc_dev_id) || 5398 (0x4239 == sc->sc_dev_id)) { 5399 sc->sc_chip_param.phy_mode = PHY_MODE_G | 5400 PHY_MODE_A | PHY_MODE_N; 5401 5402 sc->sc_chip_param.tx_ant = ANT_B | ANT_C; 5403 sc->sc_chip_param.rx_ant = ANT_B | ANT_C; 5404 5405 sc->sc_chip_param.pa_type = PA_TYPE_INTER; 5406 } 5407 5408 if ((0x422b == sc->sc_dev_id) || 5409 (0x4238 == sc->sc_dev_id)) { 5410 sc->sc_chip_param.phy_mode = PHY_MODE_G | 5411 PHY_MODE_A | PHY_MODE_N; 5412 5413 sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C; 5414 sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C; 5415 5416 sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM; 5417 } 5418 }