Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/iwp/iwp.c
+++ new/usr/src/uts/common/io/iwp/iwp.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2009, Intel Corporation
8 8 * All rights reserved.
9 9 */
10 10
11 11 /*
12 12 * Copyright (c) 2006
13 13 * Copyright (c) 2007
14 14 * Damien Bergamini <damien.bergamini@free.fr>
15 15 *
16 16 * Permission to use, copy, modify, and distribute this software for any
17 17 * purpose with or without fee is hereby granted, provided that the above
18 18 * copyright notice and this permission notice appear in all copies.
19 19 *
20 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 27 */
28 28
29 29 /*
30 30 * Intel(R) WiFi Link 6000 Driver
31 31 */
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/byteorder.h>
35 35 #include <sys/conf.h>
36 36 #include <sys/cmn_err.h>
37 37 #include <sys/stat.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/strsubr.h>
41 41 #include <sys/ethernet.h>
42 42 #include <inet/common.h>
43 43 #include <inet/nd.h>
44 44 #include <inet/mi.h>
45 45 #include <sys/note.h>
46 46 #include <sys/stream.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/modctl.h>
49 49 #include <sys/devops.h>
50 50 #include <sys/dlpi.h>
51 51 #include <sys/mac_provider.h>
52 52 #include <sys/mac_wifi.h>
53 53 #include <sys/net80211.h>
54 54 #include <sys/net80211_proto.h>
55 55 #include <sys/varargs.h>
56 56 #include <sys/policy.h>
57 57 #include <sys/pci.h>
58 58
59 59 #include "iwp_calibration.h"
60 60 #include "iwp_hw.h"
61 61 #include "iwp_eeprom.h"
62 62 #include "iwp_var.h"
63 63 #include <inet/wifi_ioctl.h>
64 64
65 65 #ifdef DEBUG
66 66 #define IWP_DEBUG_80211 (1 << 0)
67 67 #define IWP_DEBUG_CMD (1 << 1)
68 68 #define IWP_DEBUG_DMA (1 << 2)
69 69 #define IWP_DEBUG_EEPROM (1 << 3)
70 70 #define IWP_DEBUG_FW (1 << 4)
71 71 #define IWP_DEBUG_HW (1 << 5)
72 72 #define IWP_DEBUG_INTR (1 << 6)
73 73 #define IWP_DEBUG_MRR (1 << 7)
74 74 #define IWP_DEBUG_PIO (1 << 8)
75 75 #define IWP_DEBUG_RX (1 << 9)
76 76 #define IWP_DEBUG_SCAN (1 << 10)
77 77 #define IWP_DEBUG_TX (1 << 11)
78 78 #define IWP_DEBUG_RATECTL (1 << 12)
79 79 #define IWP_DEBUG_RADIO (1 << 13)
80 80 #define IWP_DEBUG_RESUME (1 << 14)
81 81 #define IWP_DEBUG_CALIBRATION (1 << 15)
82 82 /*
83 83 * if want to see debug message of a given section,
84 84 * please set this flag to one of above values
85 85 */
86 86 uint32_t iwp_dbg_flags = 0;
87 87 #define IWP_DBG(x) \
88 88 iwp_dbg x
89 89 #else
90 90 #define IWP_DBG(x)
91 91 #endif
92 92
93 93 static void *iwp_soft_state_p = NULL;
94 94
95 95 /*
96 96 * ucode will be compiled into driver image
97 97 */
98 98 static uint8_t iwp_fw_bin [] = {
99 99 #include "fw-iw/iwp.ucode"
100 100 };
101 101
102 102 /*
103 103 * DMA attributes for a shared page
104 104 */
105 105 static ddi_dma_attr_t sh_dma_attr = {
106 106 DMA_ATTR_V0, /* version of this structure */
107 107 0, /* lowest usable address */
108 108 0xffffffffU, /* highest usable address */
109 109 0xffffffffU, /* maximum DMAable byte count */
110 110 0x1000, /* alignment in bytes */
111 111 0x1000, /* burst sizes (any?) */
112 112 1, /* minimum transfer */
113 113 0xffffffffU, /* maximum transfer */
114 114 0xffffffffU, /* maximum segment length */
115 115 1, /* maximum number of segments */
116 116 1, /* granularity */
117 117 0, /* flags (reserved) */
118 118 };
119 119
120 120 /*
121 121 * DMA attributes for a keep warm DRAM descriptor
122 122 */
123 123 static ddi_dma_attr_t kw_dma_attr = {
124 124 DMA_ATTR_V0, /* version of this structure */
125 125 0, /* lowest usable address */
126 126 0xffffffffU, /* highest usable address */
127 127 0xffffffffU, /* maximum DMAable byte count */
128 128 0x1000, /* alignment in bytes */
129 129 0x1000, /* burst sizes (any?) */
130 130 1, /* minimum transfer */
131 131 0xffffffffU, /* maximum transfer */
132 132 0xffffffffU, /* maximum segment length */
133 133 1, /* maximum number of segments */
134 134 1, /* granularity */
135 135 0, /* flags (reserved) */
136 136 };
137 137
138 138 /*
139 139 * DMA attributes for a ring descriptor
140 140 */
141 141 static ddi_dma_attr_t ring_desc_dma_attr = {
142 142 DMA_ATTR_V0, /* version of this structure */
143 143 0, /* lowest usable address */
144 144 0xffffffffU, /* highest usable address */
145 145 0xffffffffU, /* maximum DMAable byte count */
146 146 0x100, /* alignment in bytes */
147 147 0x100, /* burst sizes (any?) */
148 148 1, /* minimum transfer */
149 149 0xffffffffU, /* maximum transfer */
150 150 0xffffffffU, /* maximum segment length */
151 151 1, /* maximum number of segments */
152 152 1, /* granularity */
153 153 0, /* flags (reserved) */
154 154 };
155 155
156 156 /*
157 157 * DMA attributes for a cmd
158 158 */
159 159 static ddi_dma_attr_t cmd_dma_attr = {
160 160 DMA_ATTR_V0, /* version of this structure */
161 161 0, /* lowest usable address */
162 162 0xffffffffU, /* highest usable address */
163 163 0xffffffffU, /* maximum DMAable byte count */
164 164 4, /* alignment in bytes */
165 165 0x100, /* burst sizes (any?) */
166 166 1, /* minimum transfer */
167 167 0xffffffffU, /* maximum transfer */
168 168 0xffffffffU, /* maximum segment length */
169 169 1, /* maximum number of segments */
170 170 1, /* granularity */
171 171 0, /* flags (reserved) */
172 172 };
173 173
174 174 /*
175 175 * DMA attributes for a rx buffer
176 176 */
177 177 static ddi_dma_attr_t rx_buffer_dma_attr = {
178 178 DMA_ATTR_V0, /* version of this structure */
179 179 0, /* lowest usable address */
180 180 0xffffffffU, /* highest usable address */
181 181 0xffffffffU, /* maximum DMAable byte count */
182 182 0x100, /* alignment in bytes */
183 183 0x100, /* burst sizes (any?) */
184 184 1, /* minimum transfer */
185 185 0xffffffffU, /* maximum transfer */
186 186 0xffffffffU, /* maximum segment length */
187 187 1, /* maximum number of segments */
188 188 1, /* granularity */
189 189 0, /* flags (reserved) */
190 190 };
191 191
192 192 /*
193 193 * DMA attributes for a tx buffer.
194 194 * the maximum number of segments is 4 for the hardware.
195 195 * now all the wifi drivers put the whole frame in a single
196 196 * descriptor, so we define the maximum number of segments 1,
197 197 * just the same as the rx_buffer. we consider leverage the HW
198 198 * ability in the future, that is why we don't define rx and tx
199 199 * buffer_dma_attr as the same.
200 200 */
201 201 static ddi_dma_attr_t tx_buffer_dma_attr = {
202 202 DMA_ATTR_V0, /* version of this structure */
203 203 0, /* lowest usable address */
204 204 0xffffffffU, /* highest usable address */
205 205 0xffffffffU, /* maximum DMAable byte count */
206 206 4, /* alignment in bytes */
207 207 0x100, /* burst sizes (any?) */
208 208 1, /* minimum transfer */
209 209 0xffffffffU, /* maximum transfer */
210 210 0xffffffffU, /* maximum segment length */
211 211 1, /* maximum number of segments */
212 212 1, /* granularity */
213 213 0, /* flags (reserved) */
214 214 };
215 215
216 216 /*
217 217 * DMA attributes for text and data part in the firmware
218 218 */
219 219 static ddi_dma_attr_t fw_dma_attr = {
220 220 DMA_ATTR_V0, /* version of this structure */
221 221 0, /* lowest usable address */
222 222 0xffffffffU, /* highest usable address */
223 223 0x7fffffff, /* maximum DMAable byte count */
224 224 0x10, /* alignment in bytes */
225 225 0x100, /* burst sizes (any?) */
226 226 1, /* minimum transfer */
227 227 0xffffffffU, /* maximum transfer */
228 228 0xffffffffU, /* maximum segment length */
229 229 1, /* maximum number of segments */
230 230 1, /* granularity */
231 231 0, /* flags (reserved) */
232 232 };
233 233
234 234 /*
235 235 * regs access attributes
236 236 */
237 237 static ddi_device_acc_attr_t iwp_reg_accattr = {
238 238 DDI_DEVICE_ATTR_V0,
239 239 DDI_STRUCTURE_LE_ACC,
240 240 DDI_STRICTORDER_ACC,
241 241 DDI_DEFAULT_ACC
242 242 };
243 243
244 244 /*
245 245 * DMA access attributes for descriptor
246 246 */
247 247 static ddi_device_acc_attr_t iwp_dma_descattr = {
248 248 DDI_DEVICE_ATTR_V0,
249 249 DDI_STRUCTURE_LE_ACC,
250 250 DDI_STRICTORDER_ACC,
251 251 DDI_DEFAULT_ACC
252 252 };
253 253
254 254 /*
255 255 * DMA access attributes
256 256 */
257 257 static ddi_device_acc_attr_t iwp_dma_accattr = {
258 258 DDI_DEVICE_ATTR_V0,
259 259 DDI_NEVERSWAP_ACC,
260 260 DDI_STRICTORDER_ACC,
261 261 DDI_DEFAULT_ACC
262 262 };
263 263
264 264 static int iwp_ring_init(iwp_sc_t *);
265 265 static void iwp_ring_free(iwp_sc_t *);
266 266 static int iwp_alloc_shared(iwp_sc_t *);
267 267 static void iwp_free_shared(iwp_sc_t *);
268 268 static int iwp_alloc_kw(iwp_sc_t *);
269 269 static void iwp_free_kw(iwp_sc_t *);
270 270 static int iwp_alloc_fw_dma(iwp_sc_t *);
271 271 static void iwp_free_fw_dma(iwp_sc_t *);
272 272 static int iwp_alloc_rx_ring(iwp_sc_t *);
273 273 static void iwp_reset_rx_ring(iwp_sc_t *);
274 274 static void iwp_free_rx_ring(iwp_sc_t *);
275 275 static int iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
276 276 int, int);
277 277 static void iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
278 278 static void iwp_free_tx_ring(iwp_tx_ring_t *);
279 279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
280 280 static void iwp_node_free(ieee80211_node_t *);
281 281 static int iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
282 282 static void iwp_mac_access_enter(iwp_sc_t *);
283 283 static void iwp_mac_access_exit(iwp_sc_t *);
284 284 static uint32_t iwp_reg_read(iwp_sc_t *, uint32_t);
285 285 static void iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
286 286 static int iwp_load_init_firmware(iwp_sc_t *);
287 287 static int iwp_load_run_firmware(iwp_sc_t *);
288 288 static void iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
289 289 static void iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
290 290 static uint_t iwp_intr(caddr_t, caddr_t);
291 291 static int iwp_eep_load(iwp_sc_t *);
292 292 static void iwp_get_mac_from_eep(iwp_sc_t *);
293 293 static int iwp_eep_sem_down(iwp_sc_t *);
294 294 static void iwp_eep_sem_up(iwp_sc_t *);
295 295 static uint_t iwp_rx_softintr(caddr_t, caddr_t);
296 296 static uint8_t iwp_rate_to_plcp(int);
297 297 static int iwp_cmd(iwp_sc_t *, int, const void *, int, int);
298 298 static void iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
299 299 static int iwp_hw_set_before_auth(iwp_sc_t *);
300 300 static int iwp_scan(iwp_sc_t *);
301 301 static int iwp_config(iwp_sc_t *);
302 302 static void iwp_stop_master(iwp_sc_t *);
303 303 static int iwp_power_up(iwp_sc_t *);
304 304 static int iwp_preinit(iwp_sc_t *);
305 305 static int iwp_init(iwp_sc_t *);
306 306 static void iwp_stop(iwp_sc_t *);
307 307 static int iwp_quiesce(dev_info_t *t);
308 308 static void iwp_amrr_init(iwp_amrr_t *);
309 309 static void iwp_amrr_timeout(iwp_sc_t *);
310 310 static void iwp_amrr_ratectl(void *, ieee80211_node_t *);
311 311 static void iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
312 312 static void iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
313 313 static void iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
314 314 static void iwp_release_calib_buffer(iwp_sc_t *);
315 315 static int iwp_init_common(iwp_sc_t *);
316 316 static uint8_t *iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
317 317 static int iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
318 318 static int iwp_alive_common(iwp_sc_t *);
319 319 static void iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
320 320 static int iwp_attach(dev_info_t *, ddi_attach_cmd_t);
321 321 static int iwp_detach(dev_info_t *, ddi_detach_cmd_t);
322 322 static void iwp_destroy_locks(iwp_sc_t *);
323 323 static int iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
324 324 static void iwp_thread(iwp_sc_t *);
325 325 static int iwp_run_state_config(iwp_sc_t *);
326 326 static int iwp_fast_recover(iwp_sc_t *);
327 327 static void iwp_overwrite_ic_default(iwp_sc_t *);
328 328 static int iwp_add_ap_sta(iwp_sc_t *);
329 329 static int iwp_alloc_dma_mem(iwp_sc_t *, size_t,
330 330 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
331 331 uint_t, iwp_dma_t *);
332 332 static void iwp_free_dma_mem(iwp_dma_t *);
333 333 static int iwp_eep_ver_chk(iwp_sc_t *);
334 334 static void iwp_set_chip_param(iwp_sc_t *);
335 335
336 336 /*
337 337 * GLD specific operations
338 338 */
339 339 static int iwp_m_stat(void *, uint_t, uint64_t *);
340 340 static int iwp_m_start(void *);
341 341 static void iwp_m_stop(void *);
342 342 static int iwp_m_unicst(void *, const uint8_t *);
343 343 static int iwp_m_multicst(void *, boolean_t, const uint8_t *);
344 344 static int iwp_m_promisc(void *, boolean_t);
345 345 static mblk_t *iwp_m_tx(void *, mblk_t *);
346 346 static void iwp_m_ioctl(void *, queue_t *, mblk_t *);
347 347 static int iwp_m_setprop(void *arg, const char *pr_name,
348 348 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
349 349 static int iwp_m_getprop(void *arg, const char *pr_name,
350 350 mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
351 351 static void iwp_m_propinfo(void *, const char *, mac_prop_id_t,
352 352 mac_prop_info_handle_t);
353 353
354 354 /*
355 355 * Supported rates for 802.11b/g modes (in 500Kbps unit).
356 356 */
357 357 static const struct ieee80211_rateset iwp_rateset_11b =
358 358 { 4, { 2, 4, 11, 22 } };
359 359
360 360 static const struct ieee80211_rateset iwp_rateset_11g =
361 361 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
362 362
363 363 /*
364 364 * For mfthread only
365 365 */
366 366 extern pri_t minclsyspri;
367 367
368 368 #define DRV_NAME_SP "iwp"
369 369
370 370 /*
371 371 * Module Loading Data & Entry Points
372 372 */
373 373 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
374 374 iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
375 375
376 376 static struct modldrv iwp_modldrv = {
377 377 &mod_driverops,
378 378 "Intel(R) PumaPeak driver(N)",
379 379 &iwp_devops
380 380 };
381 381
382 382 static struct modlinkage iwp_modlinkage = {
383 383 MODREV_1,
384 384 &iwp_modldrv,
385 385 NULL
386 386 };
387 387
388 388 int
389 389 _init(void)
390 390 {
391 391 int status;
392 392
393 393 status = ddi_soft_state_init(&iwp_soft_state_p,
394 394 sizeof (iwp_sc_t), 1);
395 395 if (status != DDI_SUCCESS) {
396 396 return (status);
397 397 }
398 398
399 399 mac_init_ops(&iwp_devops, DRV_NAME_SP);
400 400 status = mod_install(&iwp_modlinkage);
401 401 if (status != DDI_SUCCESS) {
402 402 mac_fini_ops(&iwp_devops);
403 403 ddi_soft_state_fini(&iwp_soft_state_p);
404 404 }
405 405
406 406 return (status);
407 407 }
408 408
409 409 int
410 410 _fini(void)
411 411 {
412 412 int status;
413 413
414 414 status = mod_remove(&iwp_modlinkage);
415 415 if (DDI_SUCCESS == status) {
416 416 mac_fini_ops(&iwp_devops);
417 417 ddi_soft_state_fini(&iwp_soft_state_p);
418 418 }
419 419
420 420 return (status);
421 421 }
422 422
423 423 int
424 424 _info(struct modinfo *mip)
425 425 {
426 426 return (mod_info(&iwp_modlinkage, mip));
427 427 }
428 428
429 429 /*
430 430 * Mac Call Back entries
431 431 */
432 432 mac_callbacks_t iwp_m_callbacks = {
433 433 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
434 434 iwp_m_stat,
435 435 iwp_m_start,
436 436 iwp_m_stop,
437 437 iwp_m_promisc,
438 438 iwp_m_multicst,
439 439 iwp_m_unicst,
440 440 iwp_m_tx,
441 441 NULL,
442 442 iwp_m_ioctl,
443 443 NULL,
444 444 NULL,
445 445 NULL,
446 446 iwp_m_setprop,
447 447 iwp_m_getprop,
448 448 iwp_m_propinfo
449 449 };
450 450
451 451 #ifdef DEBUG
452 452 void
453 453 iwp_dbg(uint32_t flags, const char *fmt, ...)
454 454 {
455 455 va_list ap;
456 456
457 457 if (flags & iwp_dbg_flags) {
458 458 va_start(ap, fmt);
459 459 vcmn_err(CE_NOTE, fmt, ap);
460 460 va_end(ap);
461 461 }
462 462 }
463 463 #endif /* DEBUG */
464 464
465 465 /*
466 466 * device operations
467 467 */
468 468 int
469 469 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
470 470 {
471 471 iwp_sc_t *sc;
472 472 ieee80211com_t *ic;
473 473 int instance, i;
474 474 char strbuf[32];
475 475 wifi_data_t wd = { 0 };
476 476 mac_register_t *macp;
477 477 int intr_type;
478 478 int intr_count;
479 479 int intr_actual;
480 480 int err = DDI_FAILURE;
481 481
482 482 switch (cmd) {
483 483 case DDI_ATTACH:
484 484 break;
485 485 case DDI_RESUME:
486 486 instance = ddi_get_instance(dip);
487 487 sc = ddi_get_soft_state(iwp_soft_state_p,
488 488 instance);
489 489 ASSERT(sc != NULL);
490 490
491 491 if (sc->sc_flags & IWP_F_RUNNING) {
492 492 (void) iwp_init(sc);
493 493 }
494 494
495 495 atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
496 496
497 497 IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
498 498 "resume\n"));
499 499 return (DDI_SUCCESS);
500 500 default:
501 501 goto attach_fail1;
502 502 }
503 503
504 504 instance = ddi_get_instance(dip);
505 505 err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
506 506 if (err != DDI_SUCCESS) {
507 507 cmn_err(CE_WARN, "iwp_attach(): "
508 508 "failed to allocate soft state\n");
509 509 goto attach_fail1;
510 510 }
511 511
512 512 sc = ddi_get_soft_state(iwp_soft_state_p, instance);
513 513 ASSERT(sc != NULL);
514 514
515 515 sc->sc_dip = dip;
516 516
517 517 /*
518 518 * map configure space
519 519 */
520 520 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
521 521 &iwp_reg_accattr, &sc->sc_cfg_handle);
522 522 if (err != DDI_SUCCESS) {
523 523 cmn_err(CE_WARN, "iwp_attach(): "
524 524 "failed to map config spaces regs\n");
525 525 goto attach_fail2;
526 526 }
527 527
528 528 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
529 529 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
530 530 if ((sc->sc_dev_id != 0x422B) &&
531 531 (sc->sc_dev_id != 0x422C) &&
532 532 (sc->sc_dev_id != 0x4238) &&
533 533 (sc->sc_dev_id != 0x4239) &&
534 534 (sc->sc_dev_id != 0x008d) &&
535 535 (sc->sc_dev_id != 0x008e)) {
536 536 cmn_err(CE_WARN, "iwp_attach(): "
537 537 "Do not support this device\n");
538 538 goto attach_fail3;
539 539 }
540 540
541 541 iwp_set_chip_param(sc);
542 542
543 543 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
544 544 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
545 545
546 546 /*
547 547 * keep from disturbing C3 state of CPU
548 548 */
549 549 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
550 550 PCI_CFG_RETRY_TIMEOUT), 0);
551 551
552 552 /*
553 553 * determine the size of buffer for frame and command to ucode
554 554 */
555 555 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
556 556 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
557 557 if (!sc->sc_clsz) {
558 558 sc->sc_clsz = 16;
559 559 }
560 560 sc->sc_clsz = (sc->sc_clsz << 2);
561 561
562 562 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
563 563 IEEE80211_MTU + IEEE80211_CRC_LEN +
564 564 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
565 565 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
566 566
567 567 /*
568 568 * Map operating registers
569 569 */
570 570 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
571 571 0, 0, &iwp_reg_accattr, &sc->sc_handle);
572 572 if (err != DDI_SUCCESS) {
573 573 cmn_err(CE_WARN, "iwp_attach(): "
574 574 "failed to map device regs\n");
575 575 goto attach_fail3;
576 576 }
577 577
578 578 /*
579 579 * this is used to differentiate type of hardware
580 580 */
581 581 sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
582 582
583 583 err = ddi_intr_get_supported_types(dip, &intr_type);
584 584 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
585 585 cmn_err(CE_WARN, "iwp_attach(): "
586 586 "fixed type interrupt is not supported\n");
587 587 goto attach_fail4;
588 588 }
589 589
590 590 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
591 591 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
592 592 cmn_err(CE_WARN, "iwp_attach(): "
593 593 "no fixed interrupts\n");
594 594 goto attach_fail4;
595 595 }
596 596
597 597 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
598 598
599 599 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
600 600 intr_count, &intr_actual, 0);
601 601 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
602 602 cmn_err(CE_WARN, "iwp_attach(): "
603 603 "ddi_intr_alloc() failed 0x%x\n", err);
604 604 goto attach_fail5;
605 605 }
606 606
607 607 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
608 608 if (err != DDI_SUCCESS) {
609 609 cmn_err(CE_WARN, "iwp_attach(): "
610 610 "ddi_intr_get_pri() failed 0x%x\n", err);
611 611 goto attach_fail6;
612 612 }
613 613
614 614 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
615 615 DDI_INTR_PRI(sc->sc_intr_pri));
616 616 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
617 617 DDI_INTR_PRI(sc->sc_intr_pri));
618 618 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
619 619 DDI_INTR_PRI(sc->sc_intr_pri));
620 620
621 621 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
622 622 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
623 623 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
624 624
625 625 /*
626 626 * initialize the mfthread
627 627 */
628 628 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
629 629 sc->sc_mf_thread = NULL;
630 630 sc->sc_mf_thread_switch = 0;
631 631
632 632 /*
633 633 * Allocate shared buffer for communication between driver and ucode.
634 634 */
635 635 err = iwp_alloc_shared(sc);
636 636 if (err != DDI_SUCCESS) {
637 637 cmn_err(CE_WARN, "iwp_attach(): "
638 638 "failed to allocate shared page\n");
639 639 goto attach_fail7;
640 640 }
641 641
642 642 (void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
643 643
644 644 /*
645 645 * Allocate keep warm page.
646 646 */
647 647 err = iwp_alloc_kw(sc);
648 648 if (err != DDI_SUCCESS) {
649 649 cmn_err(CE_WARN, "iwp_attach(): "
650 650 "failed to allocate keep warm page\n");
651 651 goto attach_fail8;
652 652 }
653 653
654 654 /*
655 655 * Do some necessary hardware initializations.
656 656 */
657 657 err = iwp_preinit(sc);
658 658 if (err != IWP_SUCCESS) {
659 659 cmn_err(CE_WARN, "iwp_attach(): "
660 660 "failed to initialize hardware\n");
661 661 goto attach_fail9;
662 662 }
663 663
664 664 /*
665 665 * get hardware configurations from eeprom
666 666 */
667 667 err = iwp_eep_load(sc);
668 668 if (err != IWP_SUCCESS) {
669 669 cmn_err(CE_WARN, "iwp_attach(): "
670 670 "failed to load eeprom\n");
671 671 goto attach_fail9;
672 672 }
673 673
674 674 /*
675 675 * calibration information from EEPROM
676 676 */
677 677 sc->sc_eep_calib = (struct iwp_eep_calibration *)
678 678 iwp_eep_addr_trans(sc, EEP_CALIBRATION);
679 679
680 680 err = iwp_eep_ver_chk(sc);
681 681 if (err != IWP_SUCCESS) {
682 682 goto attach_fail9;
683 683 }
684 684
685 685 /*
686 686 * get MAC address of this chipset
687 687 */
688 688 iwp_get_mac_from_eep(sc);
689 689
690 690
691 691 /*
692 692 * initialize TX and RX ring buffers
693 693 */
694 694 err = iwp_ring_init(sc);
695 695 if (err != DDI_SUCCESS) {
696 696 cmn_err(CE_WARN, "iwp_attach(): "
697 697 "failed to allocate and initialize ring\n");
698 698 goto attach_fail9;
699 699 }
700 700
701 701 sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
702 702
703 703 /*
704 704 * copy ucode to dma buffer
705 705 */
706 706 err = iwp_alloc_fw_dma(sc);
707 707 if (err != DDI_SUCCESS) {
708 708 cmn_err(CE_WARN, "iwp_attach(): "
709 709 "failed to allocate firmware dma\n");
710 710 goto attach_fail10;
711 711 }
712 712
713 713 /*
714 714 * Initialize the wifi part, which will be used by
715 715 * 802.11 module
716 716 */
717 717 ic = &sc->sc_ic;
718 718 ic->ic_phytype = IEEE80211_T_OFDM;
719 719 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
720 720 ic->ic_state = IEEE80211_S_INIT;
721 721 ic->ic_maxrssi = 100; /* experimental number */
722 722 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
723 723 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
724 724
725 725 /*
726 726 * Support WPA/WPA2
727 727 */
728 728 ic->ic_caps |= IEEE80211_C_WPA;
729 729
730 730 /*
731 731 * set supported .11b and .11g rates
732 732 */
733 733 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
734 734 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
735 735
736 736 /*
737 737 * set supported .11b and .11g channels (1 through 11)
738 738 */
739 739 for (i = 1; i <= 11; i++) {
740 740 ic->ic_sup_channels[i].ich_freq =
741 741 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
742 742 ic->ic_sup_channels[i].ich_flags =
743 743 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
744 744 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
745 745 IEEE80211_CHAN_PASSIVE;
746 746 }
747 747
748 748 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
749 749 ic->ic_xmit = iwp_send;
750 750
751 751 /*
752 752 * attach to 802.11 module
753 753 */
754 754 ieee80211_attach(ic);
755 755
756 756 /*
757 757 * different instance has different WPA door
758 758 */
759 759 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
760 760 ddi_driver_name(dip),
761 761 ddi_get_instance(dip));
762 762
763 763 /*
764 764 * Overwrite 80211 default configurations.
765 765 */
766 766 iwp_overwrite_ic_default(sc);
767 767
768 768 /*
769 769 * initialize 802.11 module
770 770 */
771 771 ieee80211_media_init(ic);
772 772
773 773 /*
774 774 * initialize default tx key
775 775 */
776 776 ic->ic_def_txkey = 0;
777 777
778 778 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
779 779 iwp_rx_softintr, (caddr_t)sc);
780 780 if (err != DDI_SUCCESS) {
781 781 cmn_err(CE_WARN, "iwp_attach(): "
782 782 "add soft interrupt failed\n");
783 783 goto attach_fail12;
784 784 }
785 785
786 786 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
787 787 (caddr_t)sc, NULL);
788 788 if (err != DDI_SUCCESS) {
789 789 cmn_err(CE_WARN, "iwp_attach(): "
790 790 "ddi_intr_add_handle() failed\n");
791 791 goto attach_fail13;
792 792 }
793 793
794 794 err = ddi_intr_enable(sc->sc_intr_htable[0]);
795 795 if (err != DDI_SUCCESS) {
796 796 cmn_err(CE_WARN, "iwp_attach(): "
797 797 "ddi_intr_enable() failed\n");
798 798 goto attach_fail14;
799 799 }
800 800
801 801 /*
802 802 * Initialize pointer to device specific functions
803 803 */
804 804 wd.wd_secalloc = WIFI_SEC_NONE;
805 805 wd.wd_opmode = ic->ic_opmode;
806 806 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
807 807
808 808 /*
809 809 * create relation to GLD
810 810 */
811 811 macp = mac_alloc(MAC_VERSION);
812 812 if (NULL == macp) {
813 813 cmn_err(CE_WARN, "iwp_attach(): "
814 814 "failed to do mac_alloc()\n");
815 815 goto attach_fail15;
816 816 }
817 817
818 818 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
819 819 macp->m_driver = sc;
820 820 macp->m_dip = dip;
821 821 macp->m_src_addr = ic->ic_macaddr;
822 822 macp->m_callbacks = &iwp_m_callbacks;
823 823 macp->m_min_sdu = 0;
824 824 macp->m_max_sdu = IEEE80211_MTU;
825 825 macp->m_pdata = &wd;
826 826 macp->m_pdata_size = sizeof (wd);
827 827
828 828 /*
829 829 * Register the macp to mac
830 830 */
831 831 err = mac_register(macp, &ic->ic_mach);
832 832 mac_free(macp);
833 833 if (err != DDI_SUCCESS) {
834 834 cmn_err(CE_WARN, "iwp_attach(): "
835 835 "failed to do mac_register()\n");
836 836 goto attach_fail15;
837 837 }
838 838
839 839 /*
840 840 * Create minor node of type DDI_NT_NET_WIFI
841 841 */
842 842 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
843 843 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
844 844 instance + 1, DDI_NT_NET_WIFI, 0);
845 845 if (err != DDI_SUCCESS) {
846 846 cmn_err(CE_WARN, "iwp_attach(): "
847 847 "failed to do ddi_create_minor_node()\n");
848 848 }
849 849
850 850 /*
851 851 * Notify link is down now
852 852 */
853 853 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
854 854
855 855 /*
856 856 * create the mf thread to handle the link status,
857 857 * recovery fatal error, etc.
858 858 */
859 859 sc->sc_mf_thread_switch = 1;
860 860 if (NULL == sc->sc_mf_thread) {
861 861 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
862 862 iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
863 863 }
864 864
865 865 atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
866 866
867 867 return (DDI_SUCCESS);
868 868
869 869 attach_fail15:
870 870 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
871 871 attach_fail14:
872 872 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
873 873 attach_fail13:
874 874 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
875 875 sc->sc_soft_hdl = NULL;
876 876 attach_fail12:
877 877 ieee80211_detach(ic);
878 878 attach_fail11:
879 879 iwp_free_fw_dma(sc);
880 880 attach_fail10:
881 881 iwp_ring_free(sc);
882 882 attach_fail9:
883 883 iwp_free_kw(sc);
884 884 attach_fail8:
885 885 iwp_free_shared(sc);
886 886 attach_fail7:
887 887 iwp_destroy_locks(sc);
888 888 attach_fail6:
889 889 (void) ddi_intr_free(sc->sc_intr_htable[0]);
890 890 attach_fail5:
891 891 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
892 892 attach_fail4:
893 893 ddi_regs_map_free(&sc->sc_handle);
894 894 attach_fail3:
895 895 ddi_regs_map_free(&sc->sc_cfg_handle);
896 896 attach_fail2:
897 897 ddi_soft_state_free(iwp_soft_state_p, instance);
898 898 attach_fail1:
899 899 return (DDI_FAILURE);
900 900 }
901 901
902 902 int
903 903 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
904 904 {
905 905 iwp_sc_t *sc;
906 906 ieee80211com_t *ic;
907 907 int err;
908 908
909 909 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
910 910 ASSERT(sc != NULL);
911 911 ic = &sc->sc_ic;
912 912
913 913 switch (cmd) {
914 914 case DDI_DETACH:
915 915 break;
916 916 case DDI_SUSPEND:
917 917 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
918 918 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
919 919
920 920 atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
921 921
922 922 if (sc->sc_flags & IWP_F_RUNNING) {
923 923 iwp_stop(sc);
924 924 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
925 925
926 926 }
927 927
928 928 IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
929 929 "suspend\n"));
930 930 return (DDI_SUCCESS);
931 931 default:
932 932 return (DDI_FAILURE);
933 933 }
934 934
935 935 if (!(sc->sc_flags & IWP_F_ATTACHED)) {
936 936 return (DDI_FAILURE);
937 937 }
938 938
939 939 /*
940 940 * Destroy the mf_thread
941 941 */
942 942 sc->sc_mf_thread_switch = 0;
943 943
944 944 mutex_enter(&sc->sc_mt_lock);
945 945 while (sc->sc_mf_thread != NULL) {
946 946 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
947 947 break;
948 948 }
949 949 }
950 950 mutex_exit(&sc->sc_mt_lock);
951 951
952 952 err = mac_disable(sc->sc_ic.ic_mach);
953 953 if (err != DDI_SUCCESS) {
954 954 return (err);
955 955 }
956 956
957 957 /*
958 958 * stop chipset
959 959 */
960 960 iwp_stop(sc);
961 961
962 962 DELAY(500000);
963 963
964 964 /*
965 965 * release buffer for calibration
966 966 */
967 967 iwp_release_calib_buffer(sc);
968 968
969 969 /*
970 970 * Unregiste from GLD
971 971 */
972 972 (void) mac_unregister(sc->sc_ic.ic_mach);
973 973
974 974 mutex_enter(&sc->sc_glock);
975 975 iwp_free_fw_dma(sc);
976 976 iwp_ring_free(sc);
977 977 iwp_free_kw(sc);
978 978 iwp_free_shared(sc);
979 979 mutex_exit(&sc->sc_glock);
980 980
981 981 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
982 982 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
983 983 (void) ddi_intr_free(sc->sc_intr_htable[0]);
984 984 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
985 985
986 986 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
987 987 sc->sc_soft_hdl = NULL;
988 988
989 989 /*
990 990 * detach from 80211 module
991 991 */
992 992 ieee80211_detach(&sc->sc_ic);
993 993
994 994 iwp_destroy_locks(sc);
995 995
996 996 ddi_regs_map_free(&sc->sc_handle);
997 997 ddi_regs_map_free(&sc->sc_cfg_handle);
998 998 ddi_remove_minor_node(dip, NULL);
999 999 ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
1000 1000
1001 1001 return (DDI_SUCCESS);
1002 1002 }
1003 1003
1004 1004 /*
1005 1005 * destroy all locks
1006 1006 */
1007 1007 static void
1008 1008 iwp_destroy_locks(iwp_sc_t *sc)
1009 1009 {
1010 1010 cv_destroy(&sc->sc_mt_cv);
1011 1011 cv_destroy(&sc->sc_cmd_cv);
1012 1012 cv_destroy(&sc->sc_put_seg_cv);
1013 1013 cv_destroy(&sc->sc_ucode_cv);
1014 1014 mutex_destroy(&sc->sc_mt_lock);
1015 1015 mutex_destroy(&sc->sc_tx_lock);
1016 1016 mutex_destroy(&sc->sc_glock);
1017 1017 }
1018 1018
1019 1019 /*
1020 1020 * Allocate an area of memory and a DMA handle for accessing it
1021 1021 */
1022 1022 static int
1023 1023 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1024 1024 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1025 1025 uint_t dma_flags, iwp_dma_t *dma_p)
1026 1026 {
1027 1027 caddr_t vaddr;
1028 1028 int err = DDI_FAILURE;
1029 1029
1030 1030 /*
1031 1031 * Allocate handle
1032 1032 */
1033 1033 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1034 1034 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1035 1035 if (err != DDI_SUCCESS) {
1036 1036 dma_p->dma_hdl = NULL;
1037 1037 return (DDI_FAILURE);
1038 1038 }
1039 1039
1040 1040 /*
1041 1041 * Allocate memory
1042 1042 */
1043 1043 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1044 1044 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1045 1045 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1046 1046 if (err != DDI_SUCCESS) {
1047 1047 ddi_dma_free_handle(&dma_p->dma_hdl);
1048 1048 dma_p->dma_hdl = NULL;
1049 1049 dma_p->acc_hdl = NULL;
1050 1050 return (DDI_FAILURE);
1051 1051 }
1052 1052
1053 1053 /*
1054 1054 * Bind the two together
1055 1055 */
1056 1056 dma_p->mem_va = vaddr;
1057 1057 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1058 1058 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1059 1059 &dma_p->cookie, &dma_p->ncookies);
1060 1060 if (err != DDI_DMA_MAPPED) {
1061 1061 ddi_dma_mem_free(&dma_p->acc_hdl);
1062 1062 ddi_dma_free_handle(&dma_p->dma_hdl);
1063 1063 dma_p->acc_hdl = NULL;
1064 1064 dma_p->dma_hdl = NULL;
1065 1065 return (DDI_FAILURE);
1066 1066 }
1067 1067
1068 1068 dma_p->nslots = ~0U;
1069 1069 dma_p->size = ~0U;
1070 1070 dma_p->token = ~0U;
1071 1071 dma_p->offset = 0;
1072 1072 return (DDI_SUCCESS);
1073 1073 }
1074 1074
1075 1075 /*
1076 1076 * Free one allocated area of DMAable memory
1077 1077 */
1078 1078 static void
1079 1079 iwp_free_dma_mem(iwp_dma_t *dma_p)
1080 1080 {
1081 1081 if (dma_p->dma_hdl != NULL) {
1082 1082 if (dma_p->ncookies) {
1083 1083 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1084 1084 dma_p->ncookies = 0;
1085 1085 }
1086 1086 ddi_dma_free_handle(&dma_p->dma_hdl);
1087 1087 dma_p->dma_hdl = NULL;
1088 1088 }
1089 1089
1090 1090 if (dma_p->acc_hdl != NULL) {
1091 1091 ddi_dma_mem_free(&dma_p->acc_hdl);
1092 1092 dma_p->acc_hdl = NULL;
1093 1093 }
1094 1094 }
1095 1095
1096 1096 /*
1097 1097 * copy ucode into dma buffers
1098 1098 */
1099 1099 static int
1100 1100 iwp_alloc_fw_dma(iwp_sc_t *sc)
1101 1101 {
1102 1102 int err = DDI_FAILURE;
1103 1103 iwp_dma_t *dma_p;
1104 1104 char *t;
1105 1105
1106 1106 /*
1107 1107 * firmware image layout:
1108 1108 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1109 1109 */
1110 1110
1111 1111 /*
1112 1112 * Check firmware image size.
1113 1113 */
1114 1114 if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1115 1115 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1116 1116 "firmware init text size 0x%x is too large\n",
1117 1117 LE_32(sc->sc_hdr->init_textsz));
1118 1118
1119 1119 goto fail;
1120 1120 }
1121 1121
1122 1122 if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1123 1123 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1124 1124 "firmware init data size 0x%x is too large\n",
1125 1125 LE_32(sc->sc_hdr->init_datasz));
1126 1126
1127 1127 goto fail;
1128 1128 }
1129 1129
1130 1130 if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1131 1131 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1132 1132 "firmware text size 0x%x is too large\n",
1133 1133 LE_32(sc->sc_hdr->textsz));
1134 1134
1135 1135 goto fail;
1136 1136 }
1137 1137
1138 1138 if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1139 1139 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1140 1140 "firmware data size 0x%x is too large\n",
1141 1141 LE_32(sc->sc_hdr->datasz));
1142 1142
1143 1143 goto fail;
1144 1144 }
1145 1145
1146 1146 /*
1147 1147 * copy text of runtime ucode
1148 1148 */
1149 1149 t = (char *)(sc->sc_hdr + 1);
1150 1150 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1151 1151 &fw_dma_attr, &iwp_dma_accattr,
1152 1152 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1153 1153 &sc->sc_dma_fw_text);
1154 1154 if (err != DDI_SUCCESS) {
1155 1155 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1156 1156 "failed to allocate text dma memory.\n");
1157 1157 goto fail;
1158 1158 }
1159 1159
1160 1160 dma_p = &sc->sc_dma_fw_text;
1161 1161
1162 1162 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1163 1163 "text[ncookies:%d addr:%lx size:%lx]\n",
1164 1164 dma_p->ncookies, dma_p->cookie.dmac_address,
1165 1165 dma_p->cookie.dmac_size));
1166 1166
1167 1167 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1168 1168
1169 1169 /*
1170 1170 * copy data and bak-data of runtime ucode
1171 1171 */
1172 1172 t += LE_32(sc->sc_hdr->textsz);
1173 1173 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1174 1174 &fw_dma_attr, &iwp_dma_accattr,
1175 1175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1176 1176 &sc->sc_dma_fw_data);
1177 1177 if (err != DDI_SUCCESS) {
1178 1178 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1179 1179 "failed to allocate data dma memory\n");
1180 1180 goto fail;
1181 1181 }
1182 1182
1183 1183 dma_p = &sc->sc_dma_fw_data;
1184 1184
1185 1185 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1186 1186 "data[ncookies:%d addr:%lx size:%lx]\n",
1187 1187 dma_p->ncookies, dma_p->cookie.dmac_address,
1188 1188 dma_p->cookie.dmac_size));
1189 1189
1190 1190 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1191 1191
1192 1192 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1193 1193 &fw_dma_attr, &iwp_dma_accattr,
1194 1194 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1195 1195 &sc->sc_dma_fw_data_bak);
1196 1196 if (err != DDI_SUCCESS) {
1197 1197 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1198 1198 "failed to allocate data bakup dma memory\n");
1199 1199 goto fail;
1200 1200 }
1201 1201
1202 1202 dma_p = &sc->sc_dma_fw_data_bak;
1203 1203
1204 1204 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1205 1205 "data_bak[ncookies:%d addr:%lx "
1206 1206 "size:%lx]\n",
1207 1207 dma_p->ncookies, dma_p->cookie.dmac_address,
1208 1208 dma_p->cookie.dmac_size));
1209 1209
1210 1210 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1211 1211
1212 1212 /*
1213 1213 * copy text of init ucode
1214 1214 */
1215 1215 t += LE_32(sc->sc_hdr->datasz);
1216 1216 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1217 1217 &fw_dma_attr, &iwp_dma_accattr,
1218 1218 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1219 1219 &sc->sc_dma_fw_init_text);
1220 1220 if (err != DDI_SUCCESS) {
1221 1221 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1222 1222 "failed to allocate init text dma memory\n");
1223 1223 goto fail;
1224 1224 }
1225 1225
1226 1226 dma_p = &sc->sc_dma_fw_init_text;
1227 1227
1228 1228 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1229 1229 "init_text[ncookies:%d addr:%lx "
1230 1230 "size:%lx]\n",
1231 1231 dma_p->ncookies, dma_p->cookie.dmac_address,
1232 1232 dma_p->cookie.dmac_size));
1233 1233
1234 1234 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1235 1235
1236 1236 /*
1237 1237 * copy data of init ucode
1238 1238 */
1239 1239 t += LE_32(sc->sc_hdr->init_textsz);
1240 1240 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1241 1241 &fw_dma_attr, &iwp_dma_accattr,
1242 1242 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1243 1243 &sc->sc_dma_fw_init_data);
1244 1244 if (err != DDI_SUCCESS) {
1245 1245 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1246 1246 "failed to allocate init data dma memory\n");
1247 1247 goto fail;
1248 1248 }
1249 1249
1250 1250 dma_p = &sc->sc_dma_fw_init_data;
1251 1251
1252 1252 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1253 1253 "init_data[ncookies:%d addr:%lx "
1254 1254 "size:%lx]\n",
1255 1255 dma_p->ncookies, dma_p->cookie.dmac_address,
1256 1256 dma_p->cookie.dmac_size));
1257 1257
1258 1258 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1259 1259
1260 1260 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1261 1261 fail:
1262 1262 return (err);
1263 1263 }
1264 1264
1265 1265 static void
1266 1266 iwp_free_fw_dma(iwp_sc_t *sc)
1267 1267 {
1268 1268 iwp_free_dma_mem(&sc->sc_dma_fw_text);
1269 1269 iwp_free_dma_mem(&sc->sc_dma_fw_data);
1270 1270 iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1271 1271 iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1272 1272 iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1273 1273 }
1274 1274
1275 1275 /*
1276 1276 * Allocate a shared buffer between host and NIC.
1277 1277 */
1278 1278 static int
1279 1279 iwp_alloc_shared(iwp_sc_t *sc)
1280 1280 {
1281 1281 #ifdef DEBUG
1282 1282 iwp_dma_t *dma_p;
1283 1283 #endif
1284 1284 int err = DDI_FAILURE;
1285 1285
1286 1286 /*
1287 1287 * must be aligned on a 4K-page boundary
1288 1288 */
1289 1289 err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1290 1290 &sh_dma_attr, &iwp_dma_descattr,
1291 1291 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1292 1292 &sc->sc_dma_sh);
1293 1293 if (err != DDI_SUCCESS) {
1294 1294 goto fail;
1295 1295 }
1296 1296
1297 1297 sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1298 1298
1299 1299 #ifdef DEBUG
1300 1300 dma_p = &sc->sc_dma_sh;
1301 1301 #endif
1302 1302 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1303 1303 "sh[ncookies:%d addr:%lx size:%lx]\n",
1304 1304 dma_p->ncookies, dma_p->cookie.dmac_address,
1305 1305 dma_p->cookie.dmac_size));
1306 1306
1307 1307 return (err);
1308 1308 fail:
1309 1309 iwp_free_shared(sc);
1310 1310 return (err);
1311 1311 }
1312 1312
1313 1313 static void
1314 1314 iwp_free_shared(iwp_sc_t *sc)
1315 1315 {
1316 1316 iwp_free_dma_mem(&sc->sc_dma_sh);
1317 1317 }
1318 1318
1319 1319 /*
1320 1320 * Allocate a keep warm page.
1321 1321 */
1322 1322 static int
1323 1323 iwp_alloc_kw(iwp_sc_t *sc)
1324 1324 {
1325 1325 #ifdef DEBUG
1326 1326 iwp_dma_t *dma_p;
1327 1327 #endif
1328 1328 int err = DDI_FAILURE;
1329 1329
1330 1330 /*
1331 1331 * must be aligned on a 4K-page boundary
1332 1332 */
1333 1333 err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1334 1334 &kw_dma_attr, &iwp_dma_descattr,
1335 1335 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1336 1336 &sc->sc_dma_kw);
1337 1337 if (err != DDI_SUCCESS) {
1338 1338 goto fail;
1339 1339 }
1340 1340
1341 1341 #ifdef DEBUG
1342 1342 dma_p = &sc->sc_dma_kw;
1343 1343 #endif
1344 1344 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1345 1345 "kw[ncookies:%d addr:%lx size:%lx]\n",
1346 1346 dma_p->ncookies, dma_p->cookie.dmac_address,
1347 1347 dma_p->cookie.dmac_size));
1348 1348
1349 1349 return (err);
1350 1350 fail:
1351 1351 iwp_free_kw(sc);
1352 1352 return (err);
1353 1353 }
1354 1354
1355 1355 static void
1356 1356 iwp_free_kw(iwp_sc_t *sc)
1357 1357 {
1358 1358 iwp_free_dma_mem(&sc->sc_dma_kw);
1359 1359 }
1360 1360
1361 1361 /*
1362 1362 * initialize RX ring buffers
1363 1363 */
1364 1364 static int
1365 1365 iwp_alloc_rx_ring(iwp_sc_t *sc)
1366 1366 {
1367 1367 iwp_rx_ring_t *ring;
1368 1368 iwp_rx_data_t *data;
1369 1369 #ifdef DEBUG
1370 1370 iwp_dma_t *dma_p;
1371 1371 #endif
1372 1372 int i, err = DDI_FAILURE;
1373 1373
1374 1374 ring = &sc->sc_rxq;
1375 1375 ring->cur = 0;
1376 1376
1377 1377 /*
1378 1378 * allocate RX description ring buffer
1379 1379 */
1380 1380 err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1381 1381 &ring_desc_dma_attr, &iwp_dma_descattr,
1382 1382 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1383 1383 &ring->dma_desc);
1384 1384 if (err != DDI_SUCCESS) {
1385 1385 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1386 1386 "dma alloc rx ring desc "
1387 1387 "failed\n"));
1388 1388 goto fail;
1389 1389 }
1390 1390
1391 1391 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1392 1392 #ifdef DEBUG
1393 1393 dma_p = &ring->dma_desc;
1394 1394 #endif
1395 1395 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1396 1396 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1397 1397 dma_p->ncookies, dma_p->cookie.dmac_address,
1398 1398 dma_p->cookie.dmac_size));
1399 1399
1400 1400 /*
1401 1401 * Allocate Rx frame buffers.
1402 1402 */
1403 1403 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1404 1404 data = &ring->data[i];
1405 1405 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1406 1406 &rx_buffer_dma_attr, &iwp_dma_accattr,
1407 1407 DDI_DMA_READ | DDI_DMA_STREAMING,
1408 1408 &data->dma_data);
1409 1409 if (err != DDI_SUCCESS) {
1410 1410 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1411 1411 "dma alloc rx ring "
1412 1412 "buf[%d] failed\n", i));
1413 1413 goto fail;
1414 1414 }
1415 1415 /*
1416 1416 * the physical address bit [8-36] are used,
1417 1417 * instead of bit [0-31] in 3945.
1418 1418 */
1419 1419 ring->desc[i] = (uint32_t)
1420 1420 (data->dma_data.cookie.dmac_address >> 8);
1421 1421 }
1422 1422
1423 1423 #ifdef DEBUG
1424 1424 dma_p = &ring->data[0].dma_data;
1425 1425 #endif
1426 1426 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1427 1427 "rx buffer[0][ncookies:%d addr:%lx "
1428 1428 "size:%lx]\n",
1429 1429 dma_p->ncookies, dma_p->cookie.dmac_address,
1430 1430 dma_p->cookie.dmac_size));
1431 1431
1432 1432 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1433 1433
1434 1434 return (err);
1435 1435
1436 1436 fail:
1437 1437 iwp_free_rx_ring(sc);
1438 1438 return (err);
1439 1439 }
1440 1440
1441 1441 /*
1442 1442 * disable RX ring
1443 1443 */
1444 1444 static void
1445 1445 iwp_reset_rx_ring(iwp_sc_t *sc)
1446 1446 {
1447 1447 int n;
1448 1448
1449 1449 iwp_mac_access_enter(sc);
1450 1450 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1451 1451 for (n = 0; n < 2000; n++) {
1452 1452 if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1453 1453 break;
1454 1454 }
1455 1455 DELAY(1000);
1456 1456 }
1457 1457 #ifdef DEBUG
1458 1458 if (2000 == n) {
1459 1459 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1460 1460 "timeout resetting Rx ring\n"));
1461 1461 }
1462 1462 #endif
1463 1463 iwp_mac_access_exit(sc);
1464 1464
1465 1465 sc->sc_rxq.cur = 0;
1466 1466 }
1467 1467
1468 1468 static void
1469 1469 iwp_free_rx_ring(iwp_sc_t *sc)
1470 1470 {
1471 1471 int i;
1472 1472
1473 1473 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1474 1474 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1475 1475 IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1476 1476 DDI_DMA_SYNC_FORCPU);
1477 1477 }
1478 1478
1479 1479 iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1480 1480 }
1481 1481
1482 1482 if (sc->sc_rxq.dma_desc.dma_hdl) {
1483 1483 IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1484 1484 }
1485 1485
1486 1486 iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1487 1487 }
1488 1488
1489 1489 /*
1490 1490 * initialize TX ring buffers
1491 1491 */
1492 1492 static int
1493 1493 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1494 1494 int slots, int qid)
1495 1495 {
1496 1496 iwp_tx_data_t *data;
1497 1497 iwp_tx_desc_t *desc_h;
1498 1498 uint32_t paddr_desc_h;
1499 1499 iwp_cmd_t *cmd_h;
1500 1500 uint32_t paddr_cmd_h;
1501 1501 #ifdef DEBUG
1502 1502 iwp_dma_t *dma_p;
1503 1503 #endif
1504 1504 int i, err = DDI_FAILURE;
1505 1505 ring->qid = qid;
1506 1506 ring->count = TFD_QUEUE_SIZE_MAX;
1507 1507 ring->window = slots;
1508 1508 ring->queued = 0;
1509 1509 ring->cur = 0;
1510 1510 ring->desc_cur = 0;
1511 1511
1512 1512 /*
1513 1513 * allocate buffer for TX descriptor ring
1514 1514 */
1515 1515 err = iwp_alloc_dma_mem(sc,
1516 1516 TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1517 1517 &ring_desc_dma_attr, &iwp_dma_descattr,
1518 1518 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1519 1519 &ring->dma_desc);
1520 1520 if (err != DDI_SUCCESS) {
1521 1521 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1522 1522 "dma alloc tx ring desc[%d] "
1523 1523 "failed\n", qid));
1524 1524 goto fail;
1525 1525 }
1526 1526
1527 1527 #ifdef DEBUG
1528 1528 dma_p = &ring->dma_desc;
1529 1529 #endif
1530 1530 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1531 1531 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1532 1532 dma_p->ncookies, dma_p->cookie.dmac_address,
1533 1533 dma_p->cookie.dmac_size));
1534 1534
1535 1535 desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1536 1536 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1537 1537
1538 1538 /*
1539 1539 * allocate buffer for ucode command
1540 1540 */
1541 1541 err = iwp_alloc_dma_mem(sc,
1542 1542 TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1543 1543 &cmd_dma_attr, &iwp_dma_accattr,
1544 1544 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1545 1545 &ring->dma_cmd);
1546 1546 if (err != DDI_SUCCESS) {
1547 1547 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1548 1548 "dma alloc tx ring cmd[%d]"
1549 1549 " failed\n", qid));
1550 1550 goto fail;
1551 1551 }
1552 1552
1553 1553 #ifdef DEBUG
1554 1554 dma_p = &ring->dma_cmd;
1555 1555 #endif
1556 1556 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1557 1557 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1558 1558 dma_p->ncookies, dma_p->cookie.dmac_address,
1559 1559 dma_p->cookie.dmac_size));
1560 1560
1561 1561 cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1562 1562 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1563 1563
1564 1564 /*
1565 1565 * Allocate Tx frame buffers.
1566 1566 */
1567 1567 ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1568 1568 KM_NOSLEEP);
1569 1569 if (NULL == ring->data) {
1570 1570 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1571 1571 "could not allocate "
1572 1572 "tx data slots\n"));
1573 1573 goto fail;
1574 1574 }
1575 1575
1576 1576 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1577 1577 data = &ring->data[i];
1578 1578 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1579 1579 &tx_buffer_dma_attr, &iwp_dma_accattr,
1580 1580 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1581 1581 &data->dma_data);
1582 1582 if (err != DDI_SUCCESS) {
1583 1583 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1584 1584 "dma alloc tx "
1585 1585 "ring buf[%d] failed\n", i));
1586 1586 goto fail;
1587 1587 }
1588 1588
1589 1589 data->desc = desc_h + i;
1590 1590 data->paddr_desc = paddr_desc_h +
1591 1591 _PTRDIFF(data->desc, desc_h);
1592 1592 data->cmd = cmd_h + i;
1593 1593 data->paddr_cmd = paddr_cmd_h +
1594 1594 _PTRDIFF(data->cmd, cmd_h);
1595 1595 }
1596 1596 #ifdef DEBUG
1597 1597 dma_p = &ring->data[0].dma_data;
1598 1598 #endif
1599 1599 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1600 1600 "tx buffer[0][ncookies:%d addr:%lx "
1601 1601 "size:%lx]\n",
1602 1602 dma_p->ncookies, dma_p->cookie.dmac_address,
1603 1603 dma_p->cookie.dmac_size));
1604 1604
1605 1605 return (err);
1606 1606
1607 1607 fail:
1608 1608 iwp_free_tx_ring(ring);
1609 1609
1610 1610 return (err);
1611 1611 }
1612 1612
1613 1613 /*
1614 1614 * disable TX ring
1615 1615 */
1616 1616 static void
1617 1617 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1618 1618 {
1619 1619 iwp_tx_data_t *data;
1620 1620 int i, n;
1621 1621
1622 1622 iwp_mac_access_enter(sc);
1623 1623
1624 1624 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1625 1625 for (n = 0; n < 200; n++) {
1626 1626 if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1627 1627 IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1628 1628 break;
1629 1629 }
1630 1630 DELAY(10);
1631 1631 }
1632 1632
1633 1633 #ifdef DEBUG
1634 1634 if (200 == n) {
1635 1635 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1636 1636 "timeout reset tx ring %d\n",
1637 1637 ring->qid));
1638 1638 }
1639 1639 #endif
1640 1640
1641 1641 iwp_mac_access_exit(sc);
1642 1642
1643 1643 /* by pass, if it's quiesce */
1644 1644 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1645 1645 for (i = 0; i < ring->count; i++) {
1646 1646 data = &ring->data[i];
1647 1647 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1648 1648 }
1649 1649 }
1650 1650
1651 1651 ring->queued = 0;
1652 1652 ring->cur = 0;
1653 1653 ring->desc_cur = 0;
1654 1654 }
1655 1655
1656 1656 static void
1657 1657 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1658 1658 {
1659 1659 int i;
1660 1660
1661 1661 if (ring->dma_desc.dma_hdl != NULL) {
1662 1662 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1663 1663 }
1664 1664 iwp_free_dma_mem(&ring->dma_desc);
1665 1665
1666 1666 if (ring->dma_cmd.dma_hdl != NULL) {
1667 1667 IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1668 1668 }
1669 1669 iwp_free_dma_mem(&ring->dma_cmd);
1670 1670
1671 1671 if (ring->data != NULL) {
1672 1672 for (i = 0; i < ring->count; i++) {
1673 1673 if (ring->data[i].dma_data.dma_hdl) {
1674 1674 IWP_DMA_SYNC(ring->data[i].dma_data,
1675 1675 DDI_DMA_SYNC_FORDEV);
1676 1676 }
1677 1677 iwp_free_dma_mem(&ring->data[i].dma_data);
1678 1678 }
1679 1679 kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1680 1680 }
1681 1681 }
1682 1682
1683 1683 /*
1684 1684 * initialize TX and RX ring
1685 1685 */
1686 1686 static int
1687 1687 iwp_ring_init(iwp_sc_t *sc)
1688 1688 {
1689 1689 int i, err = DDI_FAILURE;
1690 1690
1691 1691 for (i = 0; i < IWP_NUM_QUEUES; i++) {
1692 1692 if (IWP_CMD_QUEUE_NUM == i) {
1693 1693 continue;
1694 1694 }
1695 1695
1696 1696 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1697 1697 i);
1698 1698 if (err != DDI_SUCCESS) {
1699 1699 goto fail;
1700 1700 }
1701 1701 }
1702 1702
1703 1703 /*
1704 1704 * initialize command queue
1705 1705 */
1706 1706 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1707 1707 TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1708 1708 if (err != DDI_SUCCESS) {
1709 1709 goto fail;
1710 1710 }
1711 1711
1712 1712 err = iwp_alloc_rx_ring(sc);
1713 1713 if (err != DDI_SUCCESS) {
1714 1714 goto fail;
1715 1715 }
1716 1716
1717 1717 fail:
1718 1718 return (err);
1719 1719 }
1720 1720
1721 1721 static void
1722 1722 iwp_ring_free(iwp_sc_t *sc)
1723 1723 {
1724 1724 int i = IWP_NUM_QUEUES;
1725 1725
1726 1726 iwp_free_rx_ring(sc);
1727 1727 while (--i >= 0) {
1728 1728 iwp_free_tx_ring(&sc->sc_txq[i]);
1729 1729 }
1730 1730 }
1731 1731
1732 1732 /* ARGSUSED */
1733 1733 static ieee80211_node_t *
1734 1734 iwp_node_alloc(ieee80211com_t *ic)
1735 1735 {
1736 1736 iwp_amrr_t *amrr;
1737 1737
1738 1738 amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1739 1739 if (NULL == amrr) {
1740 1740 cmn_err(CE_WARN, "iwp_node_alloc(): "
1741 1741 "failed to allocate memory for amrr structure\n");
1742 1742 return (NULL);
1743 1743 }
1744 1744
1745 1745 iwp_amrr_init(amrr);
1746 1746
1747 1747 return (&amrr->in);
1748 1748 }
1749 1749
1750 1750 static void
1751 1751 iwp_node_free(ieee80211_node_t *in)
1752 1752 {
1753 1753 ieee80211com_t *ic;
1754 1754
1755 1755 if ((NULL == in) ||
1756 1756 (NULL == in->in_ic)) {
1757 1757 cmn_err(CE_WARN, "iwp_node_free() "
1758 1758 "Got a NULL point from Net80211 module\n");
1759 1759 return;
1760 1760 }
1761 1761 ic = in->in_ic;
1762 1762
1763 1763 if (ic->ic_node_cleanup != NULL) {
1764 1764 ic->ic_node_cleanup(in);
1765 1765 }
1766 1766
1767 1767 if (in->in_wpa_ie != NULL) {
1768 1768 ieee80211_free(in->in_wpa_ie);
1769 1769 }
1770 1770
1771 1771 if (in->in_wme_ie != NULL) {
1772 1772 ieee80211_free(in->in_wme_ie);
1773 1773 }
1774 1774
1775 1775 if (in->in_htcap_ie != NULL) {
1776 1776 ieee80211_free(in->in_htcap_ie);
1777 1777 }
1778 1778
1779 1779 kmem_free(in, sizeof (iwp_amrr_t));
1780 1780 }
1781 1781
1782 1782
1783 1783 /*
1784 1784 * change station's state. this function will be invoked by 80211 module
1785 1785 * when need to change staton's state.
1786 1786 */
1787 1787 static int
1788 1788 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1789 1789 {
1790 1790 iwp_sc_t *sc;
1791 1791 ieee80211_node_t *in;
1792 1792 enum ieee80211_state ostate;
1793 1793 iwp_add_sta_t node;
1794 1794 int i, err = IWP_FAIL;
1795 1795
1796 1796 if (NULL == ic) {
1797 1797 return (err);
1798 1798 }
1799 1799 sc = (iwp_sc_t *)ic;
1800 1800 in = ic->ic_bss;
1801 1801 ostate = ic->ic_state;
1802 1802
1803 1803 mutex_enter(&sc->sc_glock);
1804 1804
1805 1805 switch (nstate) {
1806 1806 case IEEE80211_S_SCAN:
1807 1807 switch (ostate) {
1808 1808 case IEEE80211_S_INIT:
1809 1809 atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1810 1810 iwp_set_led(sc, 2, 10, 2);
1811 1811
1812 1812 /*
1813 1813 * clear association to receive beacons from
1814 1814 * all BSS'es
1815 1815 */
1816 1816 sc->sc_config.assoc_id = 0;
1817 1817 sc->sc_config.filter_flags &=
1818 1818 ~LE_32(RXON_FILTER_ASSOC_MSK);
1819 1819
1820 1820 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1821 1821 "config chan %d "
1822 1822 "flags %x filter_flags %x\n",
1823 1823 LE_16(sc->sc_config.chan),
1824 1824 LE_32(sc->sc_config.flags),
1825 1825 LE_32(sc->sc_config.filter_flags)));
1826 1826
1827 1827 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1828 1828 sizeof (iwp_rxon_cmd_t), 1);
1829 1829 if (err != IWP_SUCCESS) {
1830 1830 cmn_err(CE_WARN, "iwp_newstate(): "
1831 1831 "could not clear association\n");
1832 1832 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1833 1833 mutex_exit(&sc->sc_glock);
1834 1834 return (err);
1835 1835 }
1836 1836
1837 1837 /* add broadcast node to send probe request */
1838 1838 (void) memset(&node, 0, sizeof (node));
1839 1839 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1840 1840 node.sta.sta_id = IWP_BROADCAST_ID;
1841 1841 err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1842 1842 sizeof (node), 1);
1843 1843 if (err != IWP_SUCCESS) {
1844 1844 cmn_err(CE_WARN, "iwp_newstate(): "
1845 1845 "could not add broadcast node\n");
1846 1846 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1847 1847 mutex_exit(&sc->sc_glock);
1848 1848 return (err);
1849 1849 }
1850 1850 break;
1851 1851 case IEEE80211_S_SCAN:
1852 1852 mutex_exit(&sc->sc_glock);
1853 1853 /* step to next channel before actual FW scan */
1854 1854 err = sc->sc_newstate(ic, nstate, arg);
1855 1855 mutex_enter(&sc->sc_glock);
1856 1856 if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1857 1857 cmn_err(CE_WARN, "iwp_newstate(): "
1858 1858 "could not initiate scan\n");
1859 1859 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1860 1860 ieee80211_cancel_scan(ic);
1861 1861 }
1862 1862 mutex_exit(&sc->sc_glock);
1863 1863 return (err);
1864 1864 default:
1865 1865 break;
1866 1866 }
1867 1867 sc->sc_clk = 0;
1868 1868 break;
1869 1869
1870 1870 case IEEE80211_S_AUTH:
1871 1871 if (ostate == IEEE80211_S_SCAN) {
1872 1872 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1873 1873 }
1874 1874
1875 1875 /*
1876 1876 * reset state to handle reassociations correctly
1877 1877 */
1878 1878 sc->sc_config.assoc_id = 0;
1879 1879 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1880 1880
1881 1881 /*
1882 1882 * before sending authentication and association request frame,
1883 1883 * we need do something in the hardware, such as setting the
1884 1884 * channel same to the target AP...
1885 1885 */
1886 1886 if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1887 1887 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1888 1888 "could not send authentication request\n"));
1889 1889 mutex_exit(&sc->sc_glock);
1890 1890 return (err);
1891 1891 }
1892 1892 break;
1893 1893
1894 1894 case IEEE80211_S_RUN:
1895 1895 if (ostate == IEEE80211_S_SCAN) {
1896 1896 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1897 1897 }
1898 1898
1899 1899 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1900 1900 /* let LED blink when monitoring */
1901 1901 iwp_set_led(sc, 2, 10, 10);
1902 1902 break;
1903 1903 }
1904 1904
1905 1905 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1906 1906 "associated.\n"));
1907 1907
1908 1908 err = iwp_run_state_config(sc);
1909 1909 if (err != IWP_SUCCESS) {
1910 1910 cmn_err(CE_WARN, "iwp_newstate(): "
1911 1911 "failed to set up association\n");
1912 1912 mutex_exit(&sc->sc_glock);
1913 1913 return (err);
1914 1914 }
1915 1915
1916 1916 /*
1917 1917 * start automatic rate control
1918 1918 */
1919 1919 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1920 1920 atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1921 1921
1922 1922 /*
1923 1923 * set rate to some reasonable initial value
1924 1924 */
1925 1925 i = in->in_rates.ir_nrates - 1;
1926 1926 while (i > 0 && IEEE80211_RATE(i) > 72) {
1927 1927 i--;
1928 1928 }
1929 1929 in->in_txrate = i;
1930 1930
1931 1931 } else {
1932 1932 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1933 1933 }
1934 1934
1935 1935 /*
1936 1936 * set LED on after associated
1937 1937 */
1938 1938 iwp_set_led(sc, 2, 0, 1);
1939 1939 break;
1940 1940
1941 1941 case IEEE80211_S_INIT:
1942 1942 if (ostate == IEEE80211_S_SCAN) {
1943 1943 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1944 1944 }
1945 1945 /*
1946 1946 * set LED off after init
1947 1947 */
1948 1948 iwp_set_led(sc, 2, 1, 0);
1949 1949 break;
1950 1950
1951 1951 case IEEE80211_S_ASSOC:
1952 1952 if (ostate == IEEE80211_S_SCAN) {
1953 1953 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1954 1954 }
1955 1955 break;
1956 1956 }
1957 1957
1958 1958 mutex_exit(&sc->sc_glock);
1959 1959
1960 1960 return (sc->sc_newstate(ic, nstate, arg));
1961 1961 }
1962 1962
1963 1963 /*
1964 1964 * exclusive access to mac begin.
1965 1965 */
1966 1966 static void
1967 1967 iwp_mac_access_enter(iwp_sc_t *sc)
1968 1968 {
1969 1969 uint32_t tmp;
1970 1970 int n;
1971 1971
1972 1972 tmp = IWP_READ(sc, CSR_GP_CNTRL);
1973 1973 IWP_WRITE(sc, CSR_GP_CNTRL,
1974 1974 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1975 1975
1976 1976 /* wait until we succeed */
1977 1977 for (n = 0; n < 1000; n++) {
1978 1978 if ((IWP_READ(sc, CSR_GP_CNTRL) &
1979 1979 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1980 1980 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1981 1981 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1982 1982 break;
1983 1983 }
1984 1984 DELAY(10);
1985 1985 }
1986 1986
1987 1987 #ifdef DEBUG
1988 1988 if (1000 == n) {
1989 1989 IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1990 1990 "could not lock memory\n"));
1991 1991 }
1992 1992 #endif
1993 1993 }
1994 1994
1995 1995 /*
1996 1996 * exclusive access to mac end.
1997 1997 */
1998 1998 static void
1999 1999 iwp_mac_access_exit(iwp_sc_t *sc)
2000 2000 {
2001 2001 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2002 2002 IWP_WRITE(sc, CSR_GP_CNTRL,
2003 2003 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2004 2004 }
2005 2005
2006 2006 /*
2007 2007 * this function defined here for future use.
2008 2008 * static uint32_t
2009 2009 * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2010 2010 * {
2011 2011 * IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2012 2012 * return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2013 2013 * }
2014 2014 */
2015 2015
2016 2016 /*
2017 2017 * write mac memory
2018 2018 */
2019 2019 static void
2020 2020 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2021 2021 {
2022 2022 IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2023 2023 IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2024 2024 }
2025 2025
2026 2026 /*
2027 2027 * read mac register
2028 2028 */
2029 2029 static uint32_t
2030 2030 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2031 2031 {
2032 2032 IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2033 2033 return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2034 2034 }
2035 2035
2036 2036 /*
2037 2037 * write mac register
2038 2038 */
2039 2039 static void
2040 2040 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2041 2041 {
2042 2042 IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2043 2043 IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2044 2044 }
2045 2045
2046 2046
2047 2047 /*
2048 2048 * steps of loading ucode:
2049 2049 * load init ucode=>init alive=>calibrate=>
2050 2050 * receive calibration result=>reinitialize NIC=>
2051 2051 * load runtime ucode=>runtime alive=>
2052 2052 * send calibration result=>running.
2053 2053 */
2054 2054 static int
2055 2055 iwp_load_init_firmware(iwp_sc_t *sc)
2056 2056 {
2057 2057 int err = IWP_FAIL;
2058 2058 clock_t clk;
2059 2059
2060 2060 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2061 2061
2062 2062 /*
↓ open down ↓ |
2062 lines elided |
↑ open up ↑ |
2063 2063 * load init_text section of uCode to hardware
2064 2064 */
2065 2065 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2066 2066 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2067 2067 if (err != IWP_SUCCESS) {
2068 2068 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2069 2069 "failed to write init uCode.\n");
2070 2070 return (err);
2071 2071 }
2072 2072
2073 - clk = ddi_get_lbolt() + drv_usectohz(1000000);
2073 + clk = ddi_get_lbolt() + drv_sectohz(1);
2074 2074
2075 2075 /* wait loading init_text until completed or timeout */
2076 2076 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2077 2077 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2078 2078 break;
2079 2079 }
2080 2080 }
2081 2081
2082 2082 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2083 2083 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2084 2084 "timeout waiting for init uCode load.\n");
2085 2085 return (IWP_FAIL);
2086 2086 }
2087 2087
2088 2088 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2089 2089
2090 2090 /*
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2091 2091 * load init_data section of uCode to hardware
2092 2092 */
2093 2093 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2094 2094 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2095 2095 if (err != IWP_SUCCESS) {
2096 2096 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2097 2097 "failed to write init_data uCode.\n");
2098 2098 return (err);
2099 2099 }
2100 2100
2101 - clk = ddi_get_lbolt() + drv_usectohz(1000000);
2101 + clk = ddi_get_lbolt() + drv_sectohz(1);
2102 2102
2103 2103 /*
2104 2104 * wait loading init_data until completed or timeout
2105 2105 */
2106 2106 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2107 2107 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2108 2108 break;
2109 2109 }
2110 2110 }
2111 2111
2112 2112 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2113 2113 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2114 2114 "timeout waiting for init_data uCode load.\n");
2115 2115 return (IWP_FAIL);
2116 2116 }
2117 2117
2118 2118 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2119 2119
2120 2120 return (err);
2121 2121 }
2122 2122
2123 2123 static int
2124 2124 iwp_load_run_firmware(iwp_sc_t *sc)
2125 2125 {
2126 2126 int err = IWP_FAIL;
2127 2127 clock_t clk;
2128 2128
2129 2129 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2130 2130
2131 2131 /*
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
2132 2132 * load init_text section of uCode to hardware
2133 2133 */
2134 2134 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2135 2135 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2136 2136 if (err != IWP_SUCCESS) {
2137 2137 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2138 2138 "failed to write run uCode.\n");
2139 2139 return (err);
2140 2140 }
2141 2141
2142 - clk = ddi_get_lbolt() + drv_usectohz(1000000);
2142 + clk = ddi_get_lbolt() + drv_sectohz(1);
2143 2143
2144 2144 /* wait loading run_text until completed or timeout */
2145 2145 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2146 2146 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2147 2147 break;
2148 2148 }
2149 2149 }
2150 2150
2151 2151 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2152 2152 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2153 2153 "timeout waiting for run uCode load.\n");
2154 2154 return (IWP_FAIL);
2155 2155 }
2156 2156
2157 2157 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2158 2158
2159 2159 /*
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2160 2160 * load run_data section of uCode to hardware
2161 2161 */
2162 2162 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2163 2163 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2164 2164 if (err != IWP_SUCCESS) {
2165 2165 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2166 2166 "failed to write run_data uCode.\n");
2167 2167 return (err);
2168 2168 }
2169 2169
2170 - clk = ddi_get_lbolt() + drv_usectohz(1000000);
2170 + clk = ddi_get_lbolt() + drv_sectohz(1);
2171 2171
2172 2172 /*
2173 2173 * wait loading run_data until completed or timeout
2174 2174 */
2175 2175 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2176 2176 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2177 2177 break;
2178 2178 }
2179 2179 }
2180 2180
2181 2181 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2182 2182 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2183 2183 "timeout waiting for run_data uCode load.\n");
2184 2184 return (IWP_FAIL);
2185 2185 }
2186 2186
2187 2187 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2188 2188
2189 2189 return (err);
2190 2190 }
2191 2191
2192 2192 /*
2193 2193 * this function will be invoked to receive phy information
2194 2194 * when a frame is received.
2195 2195 */
2196 2196 static void
2197 2197 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2198 2198 {
2199 2199
2200 2200 sc->sc_rx_phy_res.flag = 1;
2201 2201
2202 2202 (void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2203 2203 sizeof (iwp_rx_phy_res_t));
2204 2204 }
2205 2205
2206 2206 /*
2207 2207 * this function will be invoked to receive body of frame when
2208 2208 * a frame is received.
2209 2209 */
2210 2210 static void
2211 2211 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2212 2212 {
2213 2213 ieee80211com_t *ic = &sc->sc_ic;
2214 2214 #ifdef DEBUG
2215 2215 iwp_rx_ring_t *ring = &sc->sc_rxq;
2216 2216 #endif
2217 2217 struct ieee80211_frame *wh;
2218 2218 struct iwp_rx_non_cfg_phy *phyinfo;
2219 2219 struct iwp_rx_mpdu_body_size *mpdu_size;
2220 2220
2221 2221 mblk_t *mp;
2222 2222 int16_t t;
2223 2223 uint16_t len, rssi, agc;
2224 2224 uint32_t temp, crc, *tail;
2225 2225 uint32_t arssi, brssi, crssi, mrssi;
2226 2226 iwp_rx_phy_res_t *stat;
2227 2227 ieee80211_node_t *in;
2228 2228
2229 2229 /*
2230 2230 * assuming not 11n here. cope with 11n in phase-II
2231 2231 */
2232 2232 mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2233 2233 stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2234 2234 if (stat->cfg_phy_cnt > 20) {
2235 2235 return;
2236 2236 }
2237 2237
2238 2238 phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2239 2239 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2240 2240 agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2241 2241
2242 2242 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2243 2243 arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2244 2244 brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2245 2245
2246 2246 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2247 2247 crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2248 2248
2249 2249 mrssi = MAX(arssi, brssi);
2250 2250 mrssi = MAX(mrssi, crssi);
2251 2251
2252 2252 t = mrssi - agc - IWP_RSSI_OFFSET;
2253 2253 /*
2254 2254 * convert dBm to percentage
2255 2255 */
2256 2256 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2257 2257 / (75 * 75);
2258 2258 if (rssi > 100) {
2259 2259 rssi = 100;
2260 2260 }
2261 2261 if (rssi < 1) {
2262 2262 rssi = 1;
2263 2263 }
2264 2264
2265 2265 /*
2266 2266 * size of frame, not include FCS
2267 2267 */
2268 2268 len = LE_16(mpdu_size->byte_count);
2269 2269 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2270 2270 sizeof (struct iwp_rx_mpdu_body_size) + len);
2271 2271 bcopy(tail, &crc, 4);
2272 2272
2273 2273 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2274 2274 "rx intr: idx=%d phy_len=%x len=%d "
2275 2275 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2276 2276 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2277 2277 len, stat->rate.r.s.rate, stat->channel,
2278 2278 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2279 2279 stat->cfg_phy_cnt, LE_32(crc)));
2280 2280
2281 2281 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2282 2282 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2283 2283 "rx frame oversize\n"));
2284 2284 return;
2285 2285 }
2286 2286
2287 2287 /*
2288 2288 * discard Rx frames with bad CRC
2289 2289 */
2290 2290 if ((LE_32(crc) &
2291 2291 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2292 2292 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2293 2293 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2294 2294 "rx crc error tail: %x\n",
2295 2295 LE_32(crc)));
2296 2296 sc->sc_rx_err++;
2297 2297 return;
2298 2298 }
2299 2299
2300 2300 wh = (struct ieee80211_frame *)
2301 2301 ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2302 2302
2303 2303 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2304 2304 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2305 2305 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2306 2306 "rx : association id = %x\n",
2307 2307 sc->sc_assoc_id));
2308 2308 }
2309 2309
2310 2310 #ifdef DEBUG
2311 2311 if (iwp_dbg_flags & IWP_DEBUG_RX) {
2312 2312 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2313 2313 }
2314 2314 #endif
2315 2315
2316 2316 in = ieee80211_find_rxnode(ic, wh);
2317 2317 mp = allocb(len, BPRI_MED);
2318 2318 if (mp) {
2319 2319 (void) memcpy(mp->b_wptr, wh, len);
2320 2320 mp->b_wptr += len;
2321 2321
2322 2322 /*
2323 2323 * send the frame to the 802.11 layer
2324 2324 */
2325 2325 (void) ieee80211_input(ic, mp, in, rssi, 0);
2326 2326 } else {
2327 2327 sc->sc_rx_nobuf++;
2328 2328 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2329 2329 "alloc rx buf failed\n"));
2330 2330 }
2331 2331
2332 2332 /*
2333 2333 * release node reference
2334 2334 */
2335 2335 ieee80211_free_node(in);
2336 2336 }
2337 2337
2338 2338 /*
2339 2339 * process correlative affairs after a frame is sent.
2340 2340 */
2341 2341 static void
2342 2342 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2343 2343 {
2344 2344 ieee80211com_t *ic = &sc->sc_ic;
2345 2345 iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2346 2346 iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2347 2347 iwp_amrr_t *amrr;
2348 2348
2349 2349 if (NULL == ic->ic_bss) {
2350 2350 return;
2351 2351 }
2352 2352
2353 2353 amrr = (iwp_amrr_t *)ic->ic_bss;
2354 2354
2355 2355 amrr->txcnt++;
2356 2356 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2357 2357 "tx: %d cnt\n", amrr->txcnt));
2358 2358
2359 2359 if (stat->ntries > 0) {
2360 2360 amrr->retrycnt++;
2361 2361 sc->sc_tx_retries++;
2362 2362 IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2363 2363 "tx: %d retries\n",
2364 2364 sc->sc_tx_retries));
2365 2365 }
2366 2366
2367 2367 mutex_enter(&sc->sc_mt_lock);
2368 2368 sc->sc_tx_timer = 0;
2369 2369 mutex_exit(&sc->sc_mt_lock);
2370 2370
2371 2371 mutex_enter(&sc->sc_tx_lock);
2372 2372
2373 2373 ring->queued--;
2374 2374 if (ring->queued < 0) {
2375 2375 ring->queued = 0;
2376 2376 }
2377 2377
2378 2378 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2379 2379 sc->sc_need_reschedule = 0;
2380 2380 mutex_exit(&sc->sc_tx_lock);
2381 2381 mac_tx_update(ic->ic_mach);
2382 2382 mutex_enter(&sc->sc_tx_lock);
2383 2383 }
2384 2384
2385 2385 mutex_exit(&sc->sc_tx_lock);
2386 2386 }
2387 2387
2388 2388 /*
2389 2389 * inform a given command has been executed
2390 2390 */
2391 2391 static void
2392 2392 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2393 2393 {
2394 2394 if ((desc->hdr.qid & 7) != 4) {
2395 2395 return;
2396 2396 }
2397 2397
2398 2398 if (sc->sc_cmd_accum > 0) {
2399 2399 sc->sc_cmd_accum--;
2400 2400 return;
2401 2401 }
2402 2402
2403 2403 mutex_enter(&sc->sc_glock);
2404 2404
2405 2405 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2406 2406
2407 2407 cv_signal(&sc->sc_cmd_cv);
2408 2408
2409 2409 mutex_exit(&sc->sc_glock);
2410 2410
2411 2411 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2412 2412 "qid=%x idx=%d flags=%x type=0x%x\n",
2413 2413 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2414 2414 desc->hdr.type));
2415 2415 }
2416 2416
2417 2417 /*
2418 2418 * this function will be invoked when alive notification occur.
2419 2419 */
2420 2420 static void
2421 2421 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2422 2422 {
2423 2423 uint32_t rv;
2424 2424 struct iwp_calib_cfg_cmd cmd;
2425 2425 struct iwp_alive_resp *ar =
2426 2426 (struct iwp_alive_resp *)(desc + 1);
2427 2427 struct iwp_calib_results *res_p = &sc->sc_calib_results;
2428 2428
2429 2429 /*
2430 2430 * the microcontroller is ready
2431 2431 */
2432 2432 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2433 2433 "microcode alive notification minor: %x major: %x type: "
2434 2434 "%x subtype: %x\n",
2435 2435 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2436 2436
2437 2437 #ifdef DEBUG
2438 2438 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2439 2439 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2440 2440 "microcontroller initialization failed\n"));
2441 2441 }
2442 2442 #endif
2443 2443
2444 2444 /*
2445 2445 * determine if init alive or runtime alive.
2446 2446 */
2447 2447 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2448 2448 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2449 2449 "initialization alive received.\n"));
2450 2450
2451 2451 (void) memcpy(&sc->sc_card_alive_init, ar,
2452 2452 sizeof (struct iwp_init_alive_resp));
2453 2453
2454 2454 /*
2455 2455 * necessary configuration to NIC
2456 2456 */
2457 2457 mutex_enter(&sc->sc_glock);
2458 2458
2459 2459 rv = iwp_alive_common(sc);
2460 2460 if (rv != IWP_SUCCESS) {
2461 2461 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2462 2462 "common alive process failed in init alive.\n");
2463 2463 mutex_exit(&sc->sc_glock);
2464 2464 return;
2465 2465 }
2466 2466
2467 2467 (void) memset(&cmd, 0, sizeof (cmd));
2468 2468
2469 2469 cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2470 2470 cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2471 2471 cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2472 2472 cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2473 2473
2474 2474 /*
2475 2475 * require ucode execute calibration
2476 2476 */
2477 2477 rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2478 2478 if (rv != IWP_SUCCESS) {
2479 2479 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2480 2480 "failed to send calibration configure command.\n");
2481 2481 mutex_exit(&sc->sc_glock);
2482 2482 return;
2483 2483 }
2484 2484
2485 2485 mutex_exit(&sc->sc_glock);
2486 2486
2487 2487 } else { /* runtime alive */
2488 2488
2489 2489 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2490 2490 "runtime alive received.\n"));
2491 2491
2492 2492 (void) memcpy(&sc->sc_card_alive_run, ar,
2493 2493 sizeof (struct iwp_alive_resp));
2494 2494
2495 2495 mutex_enter(&sc->sc_glock);
2496 2496
2497 2497 /*
2498 2498 * necessary configuration to NIC
2499 2499 */
2500 2500 rv = iwp_alive_common(sc);
2501 2501 if (rv != IWP_SUCCESS) {
2502 2502 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2503 2503 "common alive process failed in run alive.\n");
2504 2504 mutex_exit(&sc->sc_glock);
2505 2505 return;
2506 2506 }
2507 2507
2508 2508 /*
2509 2509 * send the result of local oscilator calibration to uCode.
2510 2510 */
2511 2511 if (res_p->lo_res != NULL) {
2512 2512 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2513 2513 res_p->lo_res, res_p->lo_res_len, 1);
2514 2514 if (rv != IWP_SUCCESS) {
2515 2515 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2516 2516 "failed to send local"
2517 2517 "oscilator calibration command.\n");
2518 2518 mutex_exit(&sc->sc_glock);
2519 2519 return;
2520 2520 }
2521 2521
2522 2522 DELAY(1000);
2523 2523 }
2524 2524
2525 2525 /*
2526 2526 * send the result of TX IQ calibration to uCode.
2527 2527 */
2528 2528 if (res_p->tx_iq_res != NULL) {
2529 2529 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2530 2530 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2531 2531 if (rv != IWP_SUCCESS) {
2532 2532 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2533 2533 "failed to send TX IQ"
2534 2534 "calibration command.\n");
2535 2535 mutex_exit(&sc->sc_glock);
2536 2536 return;
2537 2537 }
2538 2538
2539 2539 DELAY(1000);
2540 2540 }
2541 2541
2542 2542 /*
2543 2543 * send the result of TX IQ perd calibration to uCode.
2544 2544 */
2545 2545 if (res_p->tx_iq_perd_res != NULL) {
2546 2546 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2547 2547 res_p->tx_iq_perd_res,
2548 2548 res_p->tx_iq_perd_res_len, 1);
2549 2549 if (rv != IWP_SUCCESS) {
2550 2550 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2551 2551 "failed to send TX IQ perd"
2552 2552 "calibration command.\n");
2553 2553 mutex_exit(&sc->sc_glock);
2554 2554 return;
2555 2555 }
2556 2556
2557 2557 DELAY(1000);
2558 2558 }
2559 2559
2560 2560 /*
2561 2561 * send the result of Base Band calibration to uCode.
2562 2562 */
2563 2563 if (res_p->base_band_res != NULL) {
2564 2564 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2565 2565 res_p->base_band_res,
2566 2566 res_p->base_band_res_len, 1);
2567 2567 if (rv != IWP_SUCCESS) {
2568 2568 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2569 2569 "failed to send Base Band"
2570 2570 "calibration command.\n");
2571 2571 mutex_exit(&sc->sc_glock);
2572 2572 return;
2573 2573 }
2574 2574
2575 2575 DELAY(1000);
2576 2576 }
2577 2577
2578 2578 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2579 2579 cv_signal(&sc->sc_ucode_cv);
2580 2580
2581 2581 mutex_exit(&sc->sc_glock);
2582 2582 }
2583 2583
2584 2584 }
2585 2585
2586 2586 /*
2587 2587 * deal with receiving frames, command response
2588 2588 * and all notifications from ucode.
2589 2589 */
2590 2590 /* ARGSUSED */
2591 2591 static uint_t
2592 2592 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2593 2593 {
2594 2594 iwp_sc_t *sc;
2595 2595 ieee80211com_t *ic;
2596 2596 iwp_rx_desc_t *desc;
2597 2597 iwp_rx_data_t *data;
2598 2598 uint32_t index;
2599 2599
2600 2600 if (NULL == arg) {
2601 2601 return (DDI_INTR_UNCLAIMED);
2602 2602 }
2603 2603 sc = (iwp_sc_t *)arg;
2604 2604 ic = &sc->sc_ic;
2605 2605
2606 2606 /*
2607 2607 * firmware has moved the index of the rx queue, driver get it,
2608 2608 * and deal with it.
2609 2609 */
2610 2610 index = (sc->sc_shared->val0) & 0xfff;
2611 2611
2612 2612 while (sc->sc_rxq.cur != index) {
2613 2613 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2614 2614 desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2615 2615
2616 2616 IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2617 2617 "rx notification index = %d"
2618 2618 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2619 2619 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2620 2620 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2621 2621
2622 2622 /*
2623 2623 * a command other than a tx need to be replied
2624 2624 */
2625 2625 if (!(desc->hdr.qid & 0x80) &&
2626 2626 (desc->hdr.type != REPLY_SCAN_CMD) &&
2627 2627 (desc->hdr.type != REPLY_TX)) {
2628 2628 iwp_cmd_intr(sc, desc);
2629 2629 }
2630 2630
2631 2631 switch (desc->hdr.type) {
2632 2632 case REPLY_RX_PHY_CMD:
2633 2633 iwp_rx_phy_intr(sc, desc);
2634 2634 break;
2635 2635
2636 2636 case REPLY_RX_MPDU_CMD:
2637 2637 iwp_rx_mpdu_intr(sc, desc);
2638 2638 break;
2639 2639
2640 2640 case REPLY_TX:
2641 2641 iwp_tx_intr(sc, desc);
2642 2642 break;
2643 2643
2644 2644 case REPLY_ALIVE:
2645 2645 iwp_ucode_alive(sc, desc);
2646 2646 break;
2647 2647
2648 2648 case CARD_STATE_NOTIFICATION:
2649 2649 {
2650 2650 uint32_t *status = (uint32_t *)(desc + 1);
2651 2651
2652 2652 IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2653 2653 "state changed to %x\n",
2654 2654 LE_32(*status)));
2655 2655
2656 2656 if (LE_32(*status) & 1) {
2657 2657 /*
2658 2658 * the radio button has to be pushed(OFF). It
2659 2659 * is considered as a hw error, the
2660 2660 * iwp_thread() tries to recover it after the
2661 2661 * button is pushed again(ON)
2662 2662 */
2663 2663 cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2664 2664 "radio transmitter is off\n");
2665 2665 sc->sc_ostate = sc->sc_ic.ic_state;
2666 2666 ieee80211_new_state(&sc->sc_ic,
2667 2667 IEEE80211_S_INIT, -1);
2668 2668 atomic_or_32(&sc->sc_flags,
2669 2669 IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2670 2670 }
2671 2671
2672 2672 break;
2673 2673 }
2674 2674
2675 2675 case SCAN_START_NOTIFICATION:
2676 2676 {
2677 2677 iwp_start_scan_t *scan =
2678 2678 (iwp_start_scan_t *)(desc + 1);
2679 2679
2680 2680 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2681 2681 "scanning channel %d status %x\n",
2682 2682 scan->chan, LE_32(scan->status)));
2683 2683
2684 2684 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2685 2685 break;
2686 2686 }
2687 2687
2688 2688 case SCAN_COMPLETE_NOTIFICATION:
2689 2689 {
2690 2690 #ifdef DEBUG
2691 2691 iwp_stop_scan_t *scan =
2692 2692 (iwp_stop_scan_t *)(desc + 1);
2693 2693
2694 2694 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2695 2695 "completed channel %d (burst of %d) status %02x\n",
2696 2696 scan->chan, scan->nchan, scan->status));
2697 2697 #endif
2698 2698
2699 2699 sc->sc_scan_pending++;
2700 2700 break;
2701 2701 }
2702 2702
2703 2703 case STATISTICS_NOTIFICATION:
2704 2704 {
2705 2705 /*
2706 2706 * handle statistics notification
2707 2707 */
2708 2708 break;
2709 2709 }
2710 2710
2711 2711 case CALIBRATION_RES_NOTIFICATION:
2712 2712 iwp_save_calib_result(sc, desc);
2713 2713 break;
2714 2714
2715 2715 case CALIBRATION_COMPLETE_NOTIFICATION:
2716 2716 mutex_enter(&sc->sc_glock);
2717 2717 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2718 2718 cv_signal(&sc->sc_ucode_cv);
2719 2719 mutex_exit(&sc->sc_glock);
2720 2720 break;
2721 2721
2722 2722 case MISSED_BEACONS_NOTIFICATION:
2723 2723 {
2724 2724 struct iwp_beacon_missed *miss =
2725 2725 (struct iwp_beacon_missed *)(desc + 1);
2726 2726
2727 2727 if ((ic->ic_state == IEEE80211_S_RUN) &&
2728 2728 (LE_32(miss->consecutive) > 50)) {
2729 2729 cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2730 2730 "beacon missed %d/%d\n",
2731 2731 LE_32(miss->consecutive),
2732 2732 LE_32(miss->total));
2733 2733 (void) ieee80211_new_state(ic,
2734 2734 IEEE80211_S_INIT, -1);
2735 2735 }
2736 2736 break;
2737 2737 }
2738 2738 }
2739 2739
2740 2740 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2741 2741 }
2742 2742
2743 2743 /*
2744 2744 * driver dealt with what received in rx queue and tell the information
2745 2745 * to the firmware.
2746 2746 */
2747 2747 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2748 2748 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2749 2749
2750 2750 /*
2751 2751 * re-enable interrupts
2752 2752 */
2753 2753 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2754 2754
2755 2755 return (DDI_INTR_CLAIMED);
2756 2756 }
2757 2757
2758 2758 /*
2759 2759 * the handle of interrupt
2760 2760 */
2761 2761 /* ARGSUSED */
2762 2762 static uint_t
2763 2763 iwp_intr(caddr_t arg, caddr_t unused)
2764 2764 {
2765 2765 iwp_sc_t *sc;
2766 2766 uint32_t r, rfh;
2767 2767
2768 2768 if (NULL == arg) {
2769 2769 return (DDI_INTR_UNCLAIMED);
2770 2770 }
2771 2771 sc = (iwp_sc_t *)arg;
2772 2772
2773 2773 r = IWP_READ(sc, CSR_INT);
2774 2774 if (0 == r || 0xffffffff == r) {
2775 2775 return (DDI_INTR_UNCLAIMED);
2776 2776 }
2777 2777
2778 2778 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2779 2779 "interrupt reg %x\n", r));
2780 2780
2781 2781 rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2782 2782
2783 2783 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2784 2784 "FH interrupt reg %x\n", rfh));
2785 2785
2786 2786 /*
2787 2787 * disable interrupts
2788 2788 */
2789 2789 IWP_WRITE(sc, CSR_INT_MASK, 0);
2790 2790
2791 2791 /*
2792 2792 * ack interrupts
2793 2793 */
2794 2794 IWP_WRITE(sc, CSR_INT, r);
2795 2795 IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2796 2796
2797 2797 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2798 2798 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2799 2799 "fatal firmware error\n"));
2800 2800 iwp_stop(sc);
2801 2801 sc->sc_ostate = sc->sc_ic.ic_state;
2802 2802
2803 2803 /* notify upper layer */
2804 2804 if (!IWP_CHK_FAST_RECOVER(sc)) {
2805 2805 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2806 2806 }
2807 2807
2808 2808 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2809 2809 return (DDI_INTR_CLAIMED);
2810 2810 }
2811 2811
2812 2812 if (r & BIT_INT_RF_KILL) {
2813 2813 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2814 2814 if (tmp & (1 << 27)) {
2815 2815 cmn_err(CE_NOTE, "RF switch: radio on\n");
2816 2816 }
2817 2817 }
2818 2818
2819 2819 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2820 2820 (rfh & FH_INT_RX_MASK)) {
2821 2821 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2822 2822 return (DDI_INTR_CLAIMED);
2823 2823 }
2824 2824
2825 2825 if (r & BIT_INT_FH_TX) {
2826 2826 mutex_enter(&sc->sc_glock);
2827 2827 atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2828 2828 cv_signal(&sc->sc_put_seg_cv);
2829 2829 mutex_exit(&sc->sc_glock);
2830 2830 }
2831 2831
2832 2832 #ifdef DEBUG
2833 2833 if (r & BIT_INT_ALIVE) {
2834 2834 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2835 2835 "firmware initialized.\n"));
2836 2836 }
2837 2837 #endif
2838 2838
2839 2839 /*
2840 2840 * re-enable interrupts
2841 2841 */
2842 2842 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2843 2843
2844 2844 return (DDI_INTR_CLAIMED);
2845 2845 }
2846 2846
2847 2847 static uint8_t
2848 2848 iwp_rate_to_plcp(int rate)
2849 2849 {
2850 2850 uint8_t ret;
2851 2851
2852 2852 switch (rate) {
2853 2853 /*
2854 2854 * CCK rates
2855 2855 */
2856 2856 case 2:
2857 2857 ret = 0xa;
2858 2858 break;
2859 2859
2860 2860 case 4:
2861 2861 ret = 0x14;
2862 2862 break;
2863 2863
2864 2864 case 11:
2865 2865 ret = 0x37;
2866 2866 break;
2867 2867
2868 2868 case 22:
2869 2869 ret = 0x6e;
2870 2870 break;
2871 2871
2872 2872 /*
2873 2873 * OFDM rates
2874 2874 */
2875 2875 case 12:
2876 2876 ret = 0xd;
2877 2877 break;
2878 2878
2879 2879 case 18:
2880 2880 ret = 0xf;
2881 2881 break;
2882 2882
2883 2883 case 24:
2884 2884 ret = 0x5;
2885 2885 break;
2886 2886
2887 2887 case 36:
2888 2888 ret = 0x7;
2889 2889 break;
2890 2890
2891 2891 case 48:
2892 2892 ret = 0x9;
2893 2893 break;
2894 2894
2895 2895 case 72:
2896 2896 ret = 0xb;
2897 2897 break;
2898 2898
2899 2899 case 96:
2900 2900 ret = 0x1;
2901 2901 break;
2902 2902
2903 2903 case 108:
2904 2904 ret = 0x3;
2905 2905 break;
2906 2906
2907 2907 default:
2908 2908 ret = 0;
2909 2909 break;
2910 2910 }
2911 2911
2912 2912 return (ret);
2913 2913 }
2914 2914
2915 2915 /*
2916 2916 * invoked by GLD send frames
2917 2917 */
2918 2918 static mblk_t *
2919 2919 iwp_m_tx(void *arg, mblk_t *mp)
2920 2920 {
2921 2921 iwp_sc_t *sc;
2922 2922 ieee80211com_t *ic;
2923 2923 mblk_t *next;
2924 2924
2925 2925 if (NULL == arg) {
2926 2926 return (NULL);
2927 2927 }
2928 2928 sc = (iwp_sc_t *)arg;
2929 2929 ic = &sc->sc_ic;
2930 2930
2931 2931 if (sc->sc_flags & IWP_F_SUSPEND) {
2932 2932 freemsgchain(mp);
2933 2933 return (NULL);
2934 2934 }
2935 2935
2936 2936 if (ic->ic_state != IEEE80211_S_RUN) {
2937 2937 freemsgchain(mp);
2938 2938 return (NULL);
2939 2939 }
2940 2940
2941 2941 if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2942 2942 IWP_CHK_FAST_RECOVER(sc)) {
2943 2943 IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2944 2944 "hold queue\n"));
2945 2945 return (mp);
2946 2946 }
2947 2947
2948 2948
2949 2949 while (mp != NULL) {
2950 2950 next = mp->b_next;
2951 2951 mp->b_next = NULL;
2952 2952 if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2953 2953 mp->b_next = next;
2954 2954 break;
2955 2955 }
2956 2956 mp = next;
2957 2957 }
2958 2958
2959 2959 return (mp);
2960 2960 }
2961 2961
2962 2962 /*
2963 2963 * send frames
2964 2964 */
2965 2965 static int
2966 2966 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2967 2967 {
2968 2968 iwp_sc_t *sc;
2969 2969 iwp_tx_ring_t *ring;
2970 2970 iwp_tx_desc_t *desc;
2971 2971 iwp_tx_data_t *data;
2972 2972 iwp_tx_data_t *desc_data;
2973 2973 iwp_cmd_t *cmd;
2974 2974 iwp_tx_cmd_t *tx;
2975 2975 ieee80211_node_t *in;
2976 2976 struct ieee80211_frame *wh;
2977 2977 struct ieee80211_key *k = NULL;
2978 2978 mblk_t *m, *m0;
2979 2979 int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2980 2980 uint16_t masks = 0;
2981 2981 uint32_t rate, s_id = 0;
2982 2982
2983 2983 if (NULL == ic) {
2984 2984 return (IWP_FAIL);
2985 2985 }
2986 2986 sc = (iwp_sc_t *)ic;
2987 2987
2988 2988 if (sc->sc_flags & IWP_F_SUSPEND) {
2989 2989 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2990 2990 IEEE80211_FC0_TYPE_DATA) {
2991 2991 freemsg(mp);
2992 2992 }
2993 2993 err = IWP_FAIL;
2994 2994 goto exit;
2995 2995 }
2996 2996
2997 2997 mutex_enter(&sc->sc_tx_lock);
2998 2998 ring = &sc->sc_txq[0];
2999 2999 data = &ring->data[ring->cur];
3000 3000 cmd = data->cmd;
3001 3001 bzero(cmd, sizeof (*cmd));
3002 3002
3003 3003 ring->cur = (ring->cur + 1) % ring->count;
3004 3004
3005 3005 /*
3006 3006 * Need reschedule TX if TX buffer is full.
3007 3007 */
3008 3008 if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3009 3009 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3010 3010 "no txbuf\n"));
3011 3011
3012 3012 sc->sc_need_reschedule = 1;
3013 3013 mutex_exit(&sc->sc_tx_lock);
3014 3014
3015 3015 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3016 3016 IEEE80211_FC0_TYPE_DATA) {
3017 3017 freemsg(mp);
3018 3018 }
3019 3019 sc->sc_tx_nobuf++;
3020 3020 err = IWP_FAIL;
3021 3021 goto exit;
3022 3022 }
3023 3023
3024 3024 ring->queued++;
3025 3025
3026 3026 mutex_exit(&sc->sc_tx_lock);
3027 3027
3028 3028 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3029 3029
3030 3030 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3031 3031 if (NULL == m) { /* can not alloc buf, drop this package */
3032 3032 cmn_err(CE_WARN, "iwp_send(): "
3033 3033 "failed to allocate msgbuf\n");
3034 3034 freemsg(mp);
3035 3035
3036 3036 mutex_enter(&sc->sc_tx_lock);
3037 3037 ring->queued--;
3038 3038 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3039 3039 sc->sc_need_reschedule = 0;
3040 3040 mutex_exit(&sc->sc_tx_lock);
3041 3041 mac_tx_update(ic->ic_mach);
3042 3042 mutex_enter(&sc->sc_tx_lock);
3043 3043 }
3044 3044 mutex_exit(&sc->sc_tx_lock);
3045 3045
3046 3046 err = IWP_SUCCESS;
3047 3047 goto exit;
3048 3048 }
3049 3049
3050 3050 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3051 3051 mblen = MBLKL(m0);
3052 3052 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3053 3053 off += mblen;
3054 3054 }
3055 3055
3056 3056 m->b_wptr += off;
3057 3057
3058 3058 wh = (struct ieee80211_frame *)m->b_rptr;
3059 3059
3060 3060 /*
3061 3061 * determine send which AP or station in IBSS
3062 3062 */
3063 3063 in = ieee80211_find_txnode(ic, wh->i_addr1);
3064 3064 if (NULL == in) {
3065 3065 cmn_err(CE_WARN, "iwp_send(): "
3066 3066 "failed to find tx node\n");
3067 3067 freemsg(mp);
3068 3068 freemsg(m);
3069 3069 sc->sc_tx_err++;
3070 3070
3071 3071 mutex_enter(&sc->sc_tx_lock);
3072 3072 ring->queued--;
3073 3073 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3074 3074 sc->sc_need_reschedule = 0;
3075 3075 mutex_exit(&sc->sc_tx_lock);
3076 3076 mac_tx_update(ic->ic_mach);
3077 3077 mutex_enter(&sc->sc_tx_lock);
3078 3078 }
3079 3079 mutex_exit(&sc->sc_tx_lock);
3080 3080
3081 3081 err = IWP_SUCCESS;
3082 3082 goto exit;
3083 3083 }
3084 3084
3085 3085 /*
3086 3086 * Net80211 module encapsulate outbound data frames.
3087 3087 * Add some feilds of 80211 frame.
3088 3088 */
3089 3089 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3090 3090 IEEE80211_FC0_TYPE_DATA) {
3091 3091 (void) ieee80211_encap(ic, m, in);
3092 3092 }
3093 3093
3094 3094 freemsg(mp);
3095 3095
3096 3096 cmd->hdr.type = REPLY_TX;
3097 3097 cmd->hdr.flags = 0;
3098 3098 cmd->hdr.qid = ring->qid;
3099 3099
3100 3100 tx = (iwp_tx_cmd_t *)cmd->data;
3101 3101 tx->tx_flags = 0;
3102 3102
3103 3103 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3104 3104 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3105 3105 } else {
3106 3106 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3107 3107 }
3108 3108
3109 3109 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3110 3110 k = ieee80211_crypto_encap(ic, m);
3111 3111 if (NULL == k) {
3112 3112 freemsg(m);
3113 3113 sc->sc_tx_err++;
3114 3114
3115 3115 mutex_enter(&sc->sc_tx_lock);
3116 3116 ring->queued--;
3117 3117 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3118 3118 sc->sc_need_reschedule = 0;
3119 3119 mutex_exit(&sc->sc_tx_lock);
3120 3120 mac_tx_update(ic->ic_mach);
3121 3121 mutex_enter(&sc->sc_tx_lock);
3122 3122 }
3123 3123 mutex_exit(&sc->sc_tx_lock);
3124 3124
3125 3125 err = IWP_SUCCESS;
3126 3126 goto exit;
3127 3127 }
3128 3128
3129 3129 /* packet header may have moved, reset our local pointer */
3130 3130 wh = (struct ieee80211_frame *)m->b_rptr;
3131 3131 }
3132 3132
3133 3133 len = msgdsize(m);
3134 3134
3135 3135 #ifdef DEBUG
3136 3136 if (iwp_dbg_flags & IWP_DEBUG_TX) {
3137 3137 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3138 3138 }
3139 3139 #endif
3140 3140
3141 3141 tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3142 3142 tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3143 3143
3144 3144 /*
3145 3145 * specific TX parameters for management frames
3146 3146 */
3147 3147 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3148 3148 IEEE80211_FC0_TYPE_MGT) {
3149 3149 /*
3150 3150 * mgmt frames are sent at 1M
3151 3151 */
3152 3152 if ((in->in_rates.ir_rates[0] &
3153 3153 IEEE80211_RATE_VAL) != 0) {
3154 3154 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3155 3155 } else {
3156 3156 rate = 2;
3157 3157 }
3158 3158
3159 3159 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3160 3160
3161 3161 /*
3162 3162 * tell h/w to set timestamp in probe responses
3163 3163 */
3164 3164 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3165 3165 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3166 3166 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3167 3167
3168 3168 tx->data_retry_limit = 3;
3169 3169 if (tx->data_retry_limit < tx->rts_retry_limit) {
3170 3170 tx->rts_retry_limit = tx->data_retry_limit;
3171 3171 }
3172 3172 }
3173 3173
3174 3174 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3175 3175 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3176 3176 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3177 3177 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3178 3178 tx->timeout.pm_frame_timeout = LE_16(3);
3179 3179 } else {
3180 3180 tx->timeout.pm_frame_timeout = LE_16(2);
3181 3181 }
3182 3182
3183 3183 } else {
3184 3184 /*
3185 3185 * do it here for the software way rate scaling.
3186 3186 * later for rate scaling in hardware.
3187 3187 *
3188 3188 * now the txrate is determined in tx cmd flags, set to the
3189 3189 * max value 54M for 11g and 11M for 11b originally.
3190 3190 */
3191 3191 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3192 3192 rate = ic->ic_fixed_rate;
3193 3193 } else {
3194 3194 if ((in->in_rates.ir_rates[in->in_txrate] &
3195 3195 IEEE80211_RATE_VAL) != 0) {
3196 3196 rate = in->in_rates.
3197 3197 ir_rates[in->in_txrate] &
3198 3198 IEEE80211_RATE_VAL;
3199 3199 }
3200 3200 }
3201 3201
3202 3202 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3203 3203
3204 3204 tx->timeout.pm_frame_timeout = 0;
3205 3205 }
3206 3206
3207 3207 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3208 3208 "tx rate[%d of %d] = %x",
3209 3209 in->in_txrate, in->in_rates.ir_nrates, rate));
3210 3210
3211 3211 len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3212 3212 if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3213 3213 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3214 3214 }
3215 3215
3216 3216 /*
3217 3217 * retrieve destination node's id
3218 3218 */
3219 3219 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3220 3220 tx->sta_id = IWP_BROADCAST_ID;
3221 3221 } else {
3222 3222 tx->sta_id = IWP_AP_ID;
3223 3223 }
3224 3224
3225 3225 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3226 3226 masks |= RATE_MCS_CCK_MSK;
3227 3227 }
3228 3228
3229 3229 masks |= RATE_MCS_ANT_B_MSK;
3230 3230 tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3231 3231
3232 3232 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3233 3233 "tx flag = %x",
3234 3234 tx->tx_flags));
3235 3235
3236 3236 tx->stop_time.life_time = LE_32(0xffffffff);
3237 3237
3238 3238 tx->len = LE_16(len);
3239 3239
3240 3240 tx->dram_lsb_ptr =
3241 3241 LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3242 3242 tx->dram_msb_ptr = 0;
3243 3243 tx->driver_txop = 0;
3244 3244 tx->next_frame_len = 0;
3245 3245
3246 3246 (void) memcpy(tx + 1, m->b_rptr, hdrlen);
3247 3247 m->b_rptr += hdrlen;
3248 3248 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3249 3249
3250 3250 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3251 3251 "sending data: qid=%d idx=%d len=%d",
3252 3252 ring->qid, ring->cur, len));
3253 3253
3254 3254 /*
3255 3255 * first segment includes the tx cmd plus the 802.11 header,
3256 3256 * the second includes the remaining of the 802.11 frame.
3257 3257 */
3258 3258 mutex_enter(&sc->sc_tx_lock);
3259 3259
3260 3260 cmd->hdr.idx = ring->desc_cur;
3261 3261
3262 3262 desc_data = &ring->data[ring->desc_cur];
3263 3263 desc = desc_data->desc;
3264 3264 bzero(desc, sizeof (*desc));
3265 3265 desc->val0 = 2 << 24;
3266 3266 desc->pa[0].tb1_addr = data->paddr_cmd;
3267 3267 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3268 3268 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3269 3269 desc->pa[0].val2 =
3270 3270 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3271 3271 ((len - hdrlen) << 20);
3272 3272 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3273 3273 "phy addr1 = 0x%x phy addr2 = 0x%x "
3274 3274 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3275 3275 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3276 3276 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3277 3277
3278 3278 /*
3279 3279 * kick ring
3280 3280 */
3281 3281 s_id = tx->sta_id;
3282 3282
3283 3283 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3284 3284 tfd_offset[ring->desc_cur].val =
3285 3285 (8 + len) | (s_id << 12);
3286 3286 if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3287 3287 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3288 3288 tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3289 3289 (8 + len) | (s_id << 12);
3290 3290 }
3291 3291
3292 3292 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3293 3293 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3294 3294
3295 3295 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3296 3296 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3297 3297
3298 3298 mutex_exit(&sc->sc_tx_lock);
3299 3299 freemsg(m);
3300 3300
3301 3301 /*
3302 3302 * release node reference
3303 3303 */
3304 3304 ieee80211_free_node(in);
3305 3305
3306 3306 ic->ic_stats.is_tx_bytes += len;
3307 3307 ic->ic_stats.is_tx_frags++;
3308 3308
3309 3309 mutex_enter(&sc->sc_mt_lock);
3310 3310 if (0 == sc->sc_tx_timer) {
3311 3311 sc->sc_tx_timer = 4;
3312 3312 }
3313 3313 mutex_exit(&sc->sc_mt_lock);
3314 3314
3315 3315 exit:
3316 3316 return (err);
3317 3317 }
3318 3318
3319 3319 /*
3320 3320 * invoked by GLD to deal with IOCTL affaires
3321 3321 */
3322 3322 static void
3323 3323 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3324 3324 {
3325 3325 iwp_sc_t *sc;
3326 3326 ieee80211com_t *ic;
3327 3327 int err = EINVAL;
3328 3328
3329 3329 if (NULL == arg) {
3330 3330 return;
3331 3331 }
3332 3332 sc = (iwp_sc_t *)arg;
3333 3333 ic = &sc->sc_ic;
3334 3334
3335 3335 err = ieee80211_ioctl(ic, wq, mp);
3336 3336 if (ENETRESET == err) {
3337 3337 /*
3338 3338 * This is special for the hidden AP connection.
3339 3339 * In any case, we should make sure only one 'scan'
3340 3340 * in the driver for a 'connect' CLI command. So
3341 3341 * when connecting to a hidden AP, the scan is just
3342 3342 * sent out to the air when we know the desired
3343 3343 * essid of the AP we want to connect.
3344 3344 */
3345 3345 if (ic->ic_des_esslen) {
3346 3346 if (sc->sc_flags & IWP_F_RUNNING) {
3347 3347 iwp_m_stop(sc);
3348 3348 (void) iwp_m_start(sc);
3349 3349 (void) ieee80211_new_state(ic,
3350 3350 IEEE80211_S_SCAN, -1);
3351 3351 }
3352 3352 }
3353 3353 }
3354 3354 }
3355 3355
3356 3356 /*
3357 3357 * Call back functions for get/set proporty
3358 3358 */
3359 3359 static int
3360 3360 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3361 3361 uint_t wldp_length, void *wldp_buf)
3362 3362 {
3363 3363 iwp_sc_t *sc;
3364 3364 int err = EINVAL;
3365 3365
3366 3366 if (NULL == arg) {
3367 3367 return (EINVAL);
3368 3368 }
3369 3369 sc = (iwp_sc_t *)arg;
3370 3370
3371 3371 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3372 3372 wldp_length, wldp_buf);
3373 3373
3374 3374 return (err);
3375 3375 }
3376 3376
3377 3377 static void
3378 3378 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3379 3379 mac_prop_info_handle_t prh)
3380 3380 {
3381 3381 iwp_sc_t *sc;
3382 3382
3383 3383 sc = (iwp_sc_t *)arg;
3384 3384 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3385 3385 }
3386 3386
3387 3387 static int
3388 3388 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3389 3389 uint_t wldp_length, const void *wldp_buf)
3390 3390 {
3391 3391 iwp_sc_t *sc;
3392 3392 ieee80211com_t *ic;
3393 3393 int err = EINVAL;
3394 3394
3395 3395 if (NULL == arg) {
3396 3396 return (EINVAL);
3397 3397 }
3398 3398 sc = (iwp_sc_t *)arg;
3399 3399 ic = &sc->sc_ic;
3400 3400
3401 3401 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3402 3402 wldp_buf);
3403 3403
3404 3404 if (err == ENETRESET) {
3405 3405 if (ic->ic_des_esslen) {
3406 3406 if (sc->sc_flags & IWP_F_RUNNING) {
3407 3407 iwp_m_stop(sc);
3408 3408 (void) iwp_m_start(sc);
3409 3409 (void) ieee80211_new_state(ic,
3410 3410 IEEE80211_S_SCAN, -1);
3411 3411 }
3412 3412 }
3413 3413 err = 0;
3414 3414 }
3415 3415 return (err);
3416 3416 }
3417 3417
3418 3418 /*
3419 3419 * invoked by GLD supply statistics NIC and driver
3420 3420 */
3421 3421 static int
3422 3422 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3423 3423 {
3424 3424 iwp_sc_t *sc;
3425 3425 ieee80211com_t *ic;
3426 3426 ieee80211_node_t *in;
3427 3427
3428 3428 if (NULL == arg) {
3429 3429 return (EINVAL);
3430 3430 }
3431 3431 sc = (iwp_sc_t *)arg;
3432 3432 ic = &sc->sc_ic;
3433 3433
3434 3434 mutex_enter(&sc->sc_glock);
3435 3435
3436 3436 switch (stat) {
3437 3437 case MAC_STAT_IFSPEED:
3438 3438 in = ic->ic_bss;
3439 3439 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3440 3440 IEEE80211_RATE(in->in_txrate) :
3441 3441 ic->ic_fixed_rate) / 2 * 1000000;
3442 3442 break;
3443 3443 case MAC_STAT_NOXMTBUF:
3444 3444 *val = sc->sc_tx_nobuf;
3445 3445 break;
3446 3446 case MAC_STAT_NORCVBUF:
3447 3447 *val = sc->sc_rx_nobuf;
3448 3448 break;
3449 3449 case MAC_STAT_IERRORS:
3450 3450 *val = sc->sc_rx_err;
3451 3451 break;
3452 3452 case MAC_STAT_RBYTES:
3453 3453 *val = ic->ic_stats.is_rx_bytes;
3454 3454 break;
3455 3455 case MAC_STAT_IPACKETS:
3456 3456 *val = ic->ic_stats.is_rx_frags;
3457 3457 break;
3458 3458 case MAC_STAT_OBYTES:
3459 3459 *val = ic->ic_stats.is_tx_bytes;
3460 3460 break;
3461 3461 case MAC_STAT_OPACKETS:
3462 3462 *val = ic->ic_stats.is_tx_frags;
3463 3463 break;
3464 3464 case MAC_STAT_OERRORS:
3465 3465 case WIFI_STAT_TX_FAILED:
3466 3466 *val = sc->sc_tx_err;
3467 3467 break;
3468 3468 case WIFI_STAT_TX_RETRANS:
3469 3469 *val = sc->sc_tx_retries;
3470 3470 break;
3471 3471 case WIFI_STAT_FCS_ERRORS:
3472 3472 case WIFI_STAT_WEP_ERRORS:
3473 3473 case WIFI_STAT_TX_FRAGS:
3474 3474 case WIFI_STAT_MCAST_TX:
3475 3475 case WIFI_STAT_RTS_SUCCESS:
3476 3476 case WIFI_STAT_RTS_FAILURE:
3477 3477 case WIFI_STAT_ACK_FAILURE:
3478 3478 case WIFI_STAT_RX_FRAGS:
3479 3479 case WIFI_STAT_MCAST_RX:
3480 3480 case WIFI_STAT_RX_DUPS:
3481 3481 mutex_exit(&sc->sc_glock);
3482 3482 return (ieee80211_stat(ic, stat, val));
3483 3483 default:
3484 3484 mutex_exit(&sc->sc_glock);
3485 3485 return (ENOTSUP);
3486 3486 }
3487 3487
3488 3488 mutex_exit(&sc->sc_glock);
3489 3489
3490 3490 return (IWP_SUCCESS);
3491 3491
3492 3492 }
3493 3493
3494 3494 /*
3495 3495 * invoked by GLD to start or open NIC
3496 3496 */
3497 3497 static int
3498 3498 iwp_m_start(void *arg)
3499 3499 {
3500 3500 iwp_sc_t *sc;
3501 3501 ieee80211com_t *ic;
3502 3502 int err = IWP_FAIL;
3503 3503
3504 3504 if (NULL == arg) {
3505 3505 return (EINVAL);
3506 3506 }
3507 3507 sc = (iwp_sc_t *)arg;
3508 3508 ic = &sc->sc_ic;
3509 3509
3510 3510 err = iwp_init(sc);
3511 3511 if (err != IWP_SUCCESS) {
3512 3512 /*
3513 3513 * The hw init err(eg. RF is OFF). Return Success to make
3514 3514 * the 'plumb' succeed. The iwp_thread() tries to re-init
3515 3515 * background.
3516 3516 */
3517 3517 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3518 3518 return (IWP_SUCCESS);
3519 3519 }
3520 3520
3521 3521 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3522 3522
3523 3523 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3524 3524
3525 3525 return (IWP_SUCCESS);
3526 3526 }
3527 3527
3528 3528 /*
3529 3529 * invoked by GLD to stop or down NIC
3530 3530 */
3531 3531 static void
3532 3532 iwp_m_stop(void *arg)
3533 3533 {
3534 3534 iwp_sc_t *sc;
3535 3535 ieee80211com_t *ic;
3536 3536
3537 3537 if (NULL == arg) {
3538 3538 return;
3539 3539 }
3540 3540 sc = (iwp_sc_t *)arg;
3541 3541 ic = &sc->sc_ic;
3542 3542
3543 3543 iwp_stop(sc);
3544 3544
3545 3545 /*
3546 3546 * release buffer for calibration
3547 3547 */
3548 3548 iwp_release_calib_buffer(sc);
3549 3549
3550 3550 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3551 3551
3552 3552 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3553 3553 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3554 3554
3555 3555 atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3556 3556 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3557 3557 }
3558 3558
3559 3559 /*
3560 3560 * invoked by GLD to configure NIC
3561 3561 */
3562 3562 static int
3563 3563 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3564 3564 {
3565 3565 iwp_sc_t *sc;
3566 3566 ieee80211com_t *ic;
3567 3567 int err = IWP_SUCCESS;
3568 3568
3569 3569 if (NULL == arg) {
3570 3570 return (EINVAL);
3571 3571 }
3572 3572 sc = (iwp_sc_t *)arg;
3573 3573 ic = &sc->sc_ic;
3574 3574
3575 3575 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3576 3576 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3577 3577 mutex_enter(&sc->sc_glock);
3578 3578 err = iwp_config(sc);
3579 3579 mutex_exit(&sc->sc_glock);
3580 3580 if (err != IWP_SUCCESS) {
3581 3581 cmn_err(CE_WARN, "iwp_m_unicst(): "
3582 3582 "failed to configure device\n");
3583 3583 goto fail;
3584 3584 }
3585 3585 }
3586 3586
3587 3587 return (err);
3588 3588
3589 3589 fail:
3590 3590 return (err);
3591 3591 }
3592 3592
3593 3593 /* ARGSUSED */
3594 3594 static int
3595 3595 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3596 3596 {
3597 3597 return (IWP_SUCCESS);
3598 3598 }
3599 3599
3600 3600 /* ARGSUSED */
3601 3601 static int
3602 3602 iwp_m_promisc(void *arg, boolean_t on)
3603 3603 {
3604 3604 return (IWP_SUCCESS);
3605 3605 }
3606 3606
3607 3607 /*
3608 3608 * kernel thread to deal with exceptional situation
3609 3609 */
3610 3610 static void
3611 3611 iwp_thread(iwp_sc_t *sc)
3612 3612 {
3613 3613 ieee80211com_t *ic = &sc->sc_ic;
3614 3614 clock_t clk;
3615 3615 int err, n = 0, timeout = 0;
3616 3616 uint32_t tmp;
3617 3617 #ifdef DEBUG
3618 3618 int times = 0;
3619 3619 #endif
3620 3620
3621 3621 while (sc->sc_mf_thread_switch) {
3622 3622 tmp = IWP_READ(sc, CSR_GP_CNTRL);
3623 3623 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3624 3624 atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3625 3625 } else {
3626 3626 atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3627 3627 }
3628 3628
3629 3629 /*
3630 3630 * If in SUSPEND or the RF is OFF, do nothing.
3631 3631 */
3632 3632 if (sc->sc_flags & IWP_F_RADIO_OFF) {
3633 3633 delay(drv_usectohz(100000));
3634 3634 continue;
3635 3635 }
3636 3636
3637 3637 /*
3638 3638 * recovery fatal error
3639 3639 */
3640 3640 if (ic->ic_mach &&
3641 3641 (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3642 3642
3643 3643 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3644 3644 "try to recover fatal hw error: %d\n", times++));
3645 3645
3646 3646 iwp_stop(sc);
3647 3647
3648 3648 if (IWP_CHK_FAST_RECOVER(sc)) {
3649 3649 /* save runtime configuration */
3650 3650 bcopy(&sc->sc_config, &sc->sc_config_save,
3651 3651 sizeof (sc->sc_config));
3652 3652 } else {
3653 3653 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3654 3654 delay(drv_usectohz(2000000 + n*500000));
3655 3655 }
3656 3656
3657 3657 err = iwp_init(sc);
3658 3658 if (err != IWP_SUCCESS) {
3659 3659 n++;
3660 3660 if (n < 20) {
3661 3661 continue;
3662 3662 }
3663 3663 }
3664 3664
3665 3665 n = 0;
3666 3666 if (!err) {
3667 3667 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3668 3668 }
3669 3669
3670 3670
3671 3671 if (!IWP_CHK_FAST_RECOVER(sc) ||
3672 3672 iwp_fast_recover(sc) != IWP_SUCCESS) {
3673 3673 atomic_and_32(&sc->sc_flags,
3674 3674 ~IWP_F_HW_ERR_RECOVER);
3675 3675
3676 3676 delay(drv_usectohz(2000000));
3677 3677 if (sc->sc_ostate != IEEE80211_S_INIT) {
3678 3678 ieee80211_new_state(ic,
3679 3679 IEEE80211_S_SCAN, 0);
3680 3680 }
3681 3681 }
3682 3682 }
3683 3683
3684 3684 if (ic->ic_mach &&
3685 3685 (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3686 3686 IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3687 3687 "wait for probe response\n"));
3688 3688
3689 3689 sc->sc_scan_pending--;
↓ open down ↓ |
1509 lines elided |
↑ open up ↑ |
3690 3690 delay(drv_usectohz(200000));
3691 3691 ieee80211_next_scan(ic);
3692 3692 }
3693 3693
3694 3694 /*
3695 3695 * rate ctl
3696 3696 */
3697 3697 if (ic->ic_mach &&
3698 3698 (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3699 3699 clk = ddi_get_lbolt();
3700 - if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3700 + if (clk > sc->sc_clk + drv_sectohz(1)) {
3701 3701 iwp_amrr_timeout(sc);
3702 3702 }
3703 3703 }
3704 3704
3705 3705 delay(drv_usectohz(100000));
3706 3706
3707 3707 mutex_enter(&sc->sc_mt_lock);
3708 3708 if (sc->sc_tx_timer) {
3709 3709 timeout++;
3710 3710 if (10 == timeout) {
3711 3711 sc->sc_tx_timer--;
3712 3712 if (0 == sc->sc_tx_timer) {
3713 3713 atomic_or_32(&sc->sc_flags,
3714 3714 IWP_F_HW_ERR_RECOVER);
3715 3715 sc->sc_ostate = IEEE80211_S_RUN;
3716 3716 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3717 3717 "try to recover from "
3718 3718 "send fail\n"));
3719 3719 }
3720 3720 timeout = 0;
3721 3721 }
3722 3722 }
3723 3723 mutex_exit(&sc->sc_mt_lock);
3724 3724 }
3725 3725
3726 3726 mutex_enter(&sc->sc_mt_lock);
3727 3727 sc->sc_mf_thread = NULL;
3728 3728 cv_signal(&sc->sc_mt_cv);
3729 3729 mutex_exit(&sc->sc_mt_lock);
3730 3730 }
3731 3731
3732 3732
3733 3733 /*
3734 3734 * Send a command to the ucode.
3735 3735 */
3736 3736 static int
3737 3737 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3738 3738 {
3739 3739 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3740 3740 iwp_tx_desc_t *desc;
3741 3741 iwp_cmd_t *cmd;
3742 3742
3743 3743 ASSERT(size <= sizeof (cmd->data));
3744 3744 ASSERT(mutex_owned(&sc->sc_glock));
3745 3745
3746 3746 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3747 3747 "code[%d]", code));
3748 3748 desc = ring->data[ring->cur].desc;
3749 3749 cmd = ring->data[ring->cur].cmd;
3750 3750
3751 3751 cmd->hdr.type = (uint8_t)code;
3752 3752 cmd->hdr.flags = 0;
3753 3753 cmd->hdr.qid = ring->qid;
3754 3754 cmd->hdr.idx = ring->cur;
3755 3755 (void) memcpy(cmd->data, buf, size);
3756 3756 (void) memset(desc, 0, sizeof (*desc));
3757 3757
3758 3758 desc->val0 = 1 << 24;
3759 3759 desc->pa[0].tb1_addr =
3760 3760 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3761 3761 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3762 3762
3763 3763 if (async) {
3764 3764 sc->sc_cmd_accum++;
3765 3765 }
3766 3766
3767 3767 /*
3768 3768 * kick cmd ring XXX
3769 3769 */
3770 3770 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3771 3771 tfd_offset[ring->cur].val = 8;
3772 3772 if (ring->cur < IWP_MAX_WIN_SIZE) {
3773 3773 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3774 3774 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3775 3775 }
3776 3776 ring->cur = (ring->cur + 1) % ring->count;
3777 3777 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3778 3778
3779 3779 if (async) {
3780 3780 return (IWP_SUCCESS);
3781 3781 } else {
3782 3782 clock_t clk;
3783 3783
3784 3784 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3785 3785 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3786 3786 if (cv_timedwait(&sc->sc_cmd_cv,
3787 3787 &sc->sc_glock, clk) < 0) {
3788 3788 break;
3789 3789 }
3790 3790 }
3791 3791
3792 3792 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3793 3793 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3794 3794 return (IWP_SUCCESS);
3795 3795 } else {
3796 3796 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3797 3797 return (IWP_FAIL);
3798 3798 }
3799 3799 }
3800 3800 }
3801 3801
3802 3802 /*
3803 3803 * require ucode seting led of NIC
3804 3804 */
3805 3805 static void
3806 3806 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3807 3807 {
3808 3808 iwp_led_cmd_t led;
3809 3809
3810 3810 led.interval = LE_32(100000); /* unit: 100ms */
3811 3811 led.id = id;
3812 3812 led.off = off;
3813 3813 led.on = on;
3814 3814
3815 3815 (void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3816 3816 }
3817 3817
3818 3818 /*
3819 3819 * necessary setting to NIC before authentication
3820 3820 */
3821 3821 static int
3822 3822 iwp_hw_set_before_auth(iwp_sc_t *sc)
3823 3823 {
3824 3824 ieee80211com_t *ic = &sc->sc_ic;
3825 3825 ieee80211_node_t *in = ic->ic_bss;
3826 3826 int err = IWP_FAIL;
3827 3827
3828 3828 /*
3829 3829 * update adapter's configuration according
3830 3830 * the info of target AP
3831 3831 */
3832 3832 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3833 3833 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3834 3834
3835 3835 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3836 3836 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3837 3837 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3838 3838
3839 3839 if (IEEE80211_MODE_11B == ic->ic_curmode) {
3840 3840 sc->sc_config.cck_basic_rates = 0x03;
3841 3841 sc->sc_config.ofdm_basic_rates = 0;
3842 3842 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3843 3843 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3844 3844 sc->sc_config.cck_basic_rates = 0;
3845 3845 sc->sc_config.ofdm_basic_rates = 0x15;
3846 3846 } else { /* assume 802.11b/g */
3847 3847 sc->sc_config.cck_basic_rates = 0x0f;
3848 3848 sc->sc_config.ofdm_basic_rates = 0xff;
3849 3849 }
3850 3850
3851 3851 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3852 3852 RXON_FLG_SHORT_SLOT_MSK);
3853 3853
3854 3854 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3855 3855 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3856 3856 } else {
3857 3857 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3858 3858 }
3859 3859
3860 3860 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3861 3861 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3862 3862 } else {
3863 3863 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3864 3864 }
3865 3865
3866 3866 IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3867 3867 "config chan %d flags %x "
3868 3868 "filter_flags %x cck %x ofdm %x"
3869 3869 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3870 3870 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3871 3871 LE_32(sc->sc_config.filter_flags),
3872 3872 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3873 3873 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3874 3874 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3875 3875 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3876 3876
3877 3877 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3878 3878 sizeof (iwp_rxon_cmd_t), 1);
3879 3879 if (err != IWP_SUCCESS) {
3880 3880 cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3881 3881 "failed to config chan%d\n", sc->sc_config.chan);
3882 3882 return (err);
3883 3883 }
3884 3884
3885 3885 /*
3886 3886 * add default AP node
3887 3887 */
3888 3888 err = iwp_add_ap_sta(sc);
3889 3889 if (err != IWP_SUCCESS) {
3890 3890 return (err);
3891 3891 }
3892 3892
3893 3893
3894 3894 return (err);
3895 3895 }
3896 3896
3897 3897 /*
3898 3898 * Send a scan request(assembly scan cmd) to the firmware.
3899 3899 */
3900 3900 static int
3901 3901 iwp_scan(iwp_sc_t *sc)
3902 3902 {
3903 3903 ieee80211com_t *ic = &sc->sc_ic;
3904 3904 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3905 3905 iwp_tx_desc_t *desc;
3906 3906 iwp_tx_data_t *data;
3907 3907 iwp_cmd_t *cmd;
3908 3908 iwp_scan_hdr_t *hdr;
3909 3909 iwp_scan_chan_t chan;
3910 3910 struct ieee80211_frame *wh;
3911 3911 ieee80211_node_t *in = ic->ic_bss;
3912 3912 uint8_t essid[IEEE80211_NWID_LEN+1];
3913 3913 struct ieee80211_rateset *rs;
3914 3914 enum ieee80211_phymode mode;
3915 3915 uint8_t *frm;
3916 3916 int i, pktlen, nrates;
3917 3917
3918 3918 data = &ring->data[ring->cur];
3919 3919 desc = data->desc;
3920 3920 cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3921 3921
3922 3922 cmd->hdr.type = REPLY_SCAN_CMD;
3923 3923 cmd->hdr.flags = 0;
3924 3924 cmd->hdr.qid = ring->qid;
3925 3925 cmd->hdr.idx = ring->cur | 0x40;
3926 3926
3927 3927 hdr = (iwp_scan_hdr_t *)cmd->data;
3928 3928 (void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3929 3929 hdr->nchan = 1;
3930 3930 hdr->quiet_time = LE_16(50);
3931 3931 hdr->quiet_plcp_th = LE_16(1);
3932 3932
3933 3933 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3934 3934 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3935 3935 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3936 3936 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3937 3937 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3938 3938
3939 3939 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3940 3940 hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3941 3941 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3942 3942 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3943 3943 hdr->tx_cmd.rate.r.rate_n_flags |=
3944 3944 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3945 3945 hdr->direct_scan[0].len = ic->ic_des_esslen;
3946 3946 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
3947 3947
3948 3948 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3949 3949 RXON_FILTER_BCON_AWARE_MSK);
3950 3950
3951 3951 if (ic->ic_des_esslen) {
3952 3952 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3953 3953 essid[ic->ic_des_esslen] = '\0';
3954 3954 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3955 3955 "directed scan %s\n", essid));
3956 3956
3957 3957 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3958 3958 ic->ic_des_esslen);
3959 3959 } else {
3960 3960 bzero(hdr->direct_scan[0].ssid,
3961 3961 sizeof (hdr->direct_scan[0].ssid));
3962 3962 }
3963 3963
3964 3964 /*
3965 3965 * a probe request frame is required after the REPLY_SCAN_CMD
3966 3966 */
3967 3967 wh = (struct ieee80211_frame *)(hdr + 1);
3968 3968 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3969 3969 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3970 3970 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3971 3971 (void) memset(wh->i_addr1, 0xff, 6);
3972 3972 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3973 3973 (void) memset(wh->i_addr3, 0xff, 6);
3974 3974 *(uint16_t *)&wh->i_dur[0] = 0;
3975 3975 *(uint16_t *)&wh->i_seq[0] = 0;
3976 3976
3977 3977 frm = (uint8_t *)(wh + 1);
3978 3978
3979 3979 /*
3980 3980 * essid IE
3981 3981 */
3982 3982 if (in->in_esslen) {
3983 3983 bcopy(in->in_essid, essid, in->in_esslen);
3984 3984 essid[in->in_esslen] = '\0';
3985 3985 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3986 3986 "probe with ESSID %s\n",
3987 3987 essid));
3988 3988 }
3989 3989 *frm++ = IEEE80211_ELEMID_SSID;
3990 3990 *frm++ = in->in_esslen;
3991 3991 (void) memcpy(frm, in->in_essid, in->in_esslen);
3992 3992 frm += in->in_esslen;
3993 3993
3994 3994 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3995 3995 rs = &ic->ic_sup_rates[mode];
3996 3996
3997 3997 /*
3998 3998 * supported rates IE
3999 3999 */
4000 4000 *frm++ = IEEE80211_ELEMID_RATES;
4001 4001 nrates = rs->ir_nrates;
4002 4002 if (nrates > IEEE80211_RATE_SIZE) {
4003 4003 nrates = IEEE80211_RATE_SIZE;
4004 4004 }
4005 4005
4006 4006 *frm++ = (uint8_t)nrates;
4007 4007 (void) memcpy(frm, rs->ir_rates, nrates);
4008 4008 frm += nrates;
4009 4009
4010 4010 /*
4011 4011 * supported xrates IE
4012 4012 */
4013 4013 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4014 4014 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4015 4015 *frm++ = IEEE80211_ELEMID_XRATES;
4016 4016 *frm++ = (uint8_t)nrates;
4017 4017 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4018 4018 frm += nrates;
4019 4019 }
4020 4020
4021 4021 /*
4022 4022 * optionnal IE (usually for wpa)
4023 4023 */
4024 4024 if (ic->ic_opt_ie != NULL) {
4025 4025 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4026 4026 frm += ic->ic_opt_ie_len;
4027 4027 }
4028 4028
4029 4029 /* setup length of probe request */
4030 4030 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4031 4031 hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4032 4032 LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4033 4033
4034 4034 /*
4035 4035 * the attribute of the scan channels are required after the probe
4036 4036 * request frame.
4037 4037 */
4038 4038 for (i = 1; i <= hdr->nchan; i++) {
4039 4039 if (ic->ic_des_esslen) {
4040 4040 chan.type = LE_32(3);
4041 4041 } else {
4042 4042 chan.type = LE_32(1);
4043 4043 }
4044 4044
4045 4045 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4046 4046 chan.tpc.tx_gain = 0x28;
4047 4047 chan.tpc.dsp_atten = 110;
4048 4048 chan.active_dwell = LE_16(50);
4049 4049 chan.passive_dwell = LE_16(120);
4050 4050
4051 4051 bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4052 4052 frm += sizeof (iwp_scan_chan_t);
4053 4053 }
4054 4054
4055 4055 pktlen = _PTRDIFF(frm, cmd);
4056 4056
4057 4057 (void) memset(desc, 0, sizeof (*desc));
4058 4058 desc->val0 = 1 << 24;
4059 4059 desc->pa[0].tb1_addr =
4060 4060 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4061 4061 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4062 4062
4063 4063 /*
4064 4064 * maybe for cmd, filling the byte cnt table is not necessary.
4065 4065 * anyway, we fill it here.
4066 4066 */
4067 4067 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4068 4068 .tfd_offset[ring->cur].val = 8;
4069 4069 if (ring->cur < IWP_MAX_WIN_SIZE) {
4070 4070 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4071 4071 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4072 4072 }
4073 4073
4074 4074 /*
4075 4075 * kick cmd ring
4076 4076 */
4077 4077 ring->cur = (ring->cur + 1) % ring->count;
4078 4078 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4079 4079
4080 4080 return (IWP_SUCCESS);
4081 4081 }
4082 4082
4083 4083 /*
4084 4084 * configure NIC by using ucode commands after loading ucode.
4085 4085 */
4086 4086 static int
4087 4087 iwp_config(iwp_sc_t *sc)
4088 4088 {
4089 4089 ieee80211com_t *ic = &sc->sc_ic;
4090 4090 iwp_powertable_cmd_t powertable;
4091 4091 iwp_bt_cmd_t bt;
4092 4092 iwp_add_sta_t node;
4093 4093 iwp_rem_sta_t rm_sta;
4094 4094 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4095 4095 int err = IWP_FAIL;
4096 4096
4097 4097 /*
4098 4098 * set power mode. Disable power management at present, do it later
4099 4099 */
4100 4100 (void) memset(&powertable, 0, sizeof (powertable));
4101 4101 powertable.flags = LE_16(0x8);
4102 4102 err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4103 4103 sizeof (powertable), 0);
4104 4104 if (err != IWP_SUCCESS) {
4105 4105 cmn_err(CE_WARN, "iwp_config(): "
4106 4106 "failed to set power mode\n");
4107 4107 return (err);
4108 4108 }
4109 4109
4110 4110 /*
4111 4111 * configure bt coexistence
4112 4112 */
4113 4113 (void) memset(&bt, 0, sizeof (bt));
4114 4114 bt.flags = 3;
4115 4115 bt.lead_time = 0xaa;
4116 4116 bt.max_kill = 1;
4117 4117 err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4118 4118 sizeof (bt), 0);
4119 4119 if (err != IWP_SUCCESS) {
4120 4120 cmn_err(CE_WARN, "iwp_config(): "
4121 4121 "failed to configurate bt coexistence\n");
4122 4122 return (err);
4123 4123 }
4124 4124
4125 4125 /*
4126 4126 * configure rxon
4127 4127 */
4128 4128 (void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4129 4129 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4130 4130 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4131 4131 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4132 4132 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4133 4133 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4134 4134 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4135 4135
4136 4136 switch (ic->ic_opmode) {
4137 4137 case IEEE80211_M_STA:
4138 4138 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4139 4139 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4140 4140 RXON_FILTER_DIS_DECRYPT_MSK |
4141 4141 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4142 4142 break;
4143 4143 case IEEE80211_M_IBSS:
4144 4144 case IEEE80211_M_AHDEMO:
4145 4145 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4146 4146
4147 4147 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4148 4148 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4149 4149 RXON_FILTER_DIS_DECRYPT_MSK |
4150 4150 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4151 4151 break;
4152 4152 case IEEE80211_M_HOSTAP:
4153 4153 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4154 4154 break;
4155 4155 case IEEE80211_M_MONITOR:
4156 4156 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4157 4157 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4158 4158 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4159 4159 break;
4160 4160 }
4161 4161
4162 4162 /*
4163 4163 * Support all CCK rates.
4164 4164 */
4165 4165 sc->sc_config.cck_basic_rates = 0x0f;
4166 4166
4167 4167 /*
4168 4168 * Support all OFDM rates.
4169 4169 */
4170 4170 sc->sc_config.ofdm_basic_rates = 0xff;
4171 4171
4172 4172 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4173 4173 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4174 4174 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4175 4175 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4176 4176
4177 4177 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4178 4178 sizeof (iwp_rxon_cmd_t), 0);
4179 4179 if (err != IWP_SUCCESS) {
4180 4180 cmn_err(CE_WARN, "iwp_config(): "
4181 4181 "failed to set configure command\n");
4182 4182 return (err);
4183 4183 }
4184 4184
4185 4185 /*
4186 4186 * remove all nodes in NIC
4187 4187 */
4188 4188 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4189 4189 rm_sta.num_sta = 1;
4190 4190 (void) memcpy(rm_sta.addr, bcast, 6);
4191 4191
4192 4192 err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4193 4193 if (err != IWP_SUCCESS) {
4194 4194 cmn_err(CE_WARN, "iwp_config(): "
4195 4195 "failed to remove broadcast node in hardware.\n");
4196 4196 return (err);
4197 4197 }
4198 4198
4199 4199 /*
4200 4200 * add broadcast node so that we can send broadcast frame
4201 4201 */
4202 4202 (void) memset(&node, 0, sizeof (node));
4203 4203 (void) memset(node.sta.addr, 0xff, 6);
4204 4204 node.mode = 0;
4205 4205 node.sta.sta_id = IWP_BROADCAST_ID;
4206 4206 node.station_flags = 0;
4207 4207
4208 4208 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4209 4209 if (err != IWP_SUCCESS) {
4210 4210 cmn_err(CE_WARN, "iwp_config(): "
4211 4211 "failed to add broadcast node\n");
4212 4212 return (err);
4213 4213 }
4214 4214
4215 4215 return (err);
4216 4216 }
4217 4217
4218 4218 /*
4219 4219 * quiesce(9E) entry point.
4220 4220 * This function is called when the system is single-threaded at high
4221 4221 * PIL with preemption disabled. Therefore, this function must not be
4222 4222 * blocked.
4223 4223 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4224 4224 * DDI_FAILURE indicates an error condition and should almost never happen.
4225 4225 */
4226 4226 static int
4227 4227 iwp_quiesce(dev_info_t *dip)
4228 4228 {
4229 4229 iwp_sc_t *sc;
4230 4230
4231 4231 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4232 4232 if (NULL == sc) {
4233 4233 return (DDI_FAILURE);
4234 4234 }
4235 4235
4236 4236 #ifdef DEBUG
4237 4237 /* by pass any messages, if it's quiesce */
4238 4238 iwp_dbg_flags = 0;
4239 4239 #endif
4240 4240
4241 4241 /*
4242 4242 * No more blocking is allowed while we are in the
4243 4243 * quiesce(9E) entry point.
4244 4244 */
4245 4245 atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4246 4246
4247 4247 /*
4248 4248 * Disable and mask all interrupts.
4249 4249 */
4250 4250 iwp_stop(sc);
4251 4251
4252 4252 return (DDI_SUCCESS);
4253 4253 }
4254 4254
4255 4255 static void
4256 4256 iwp_stop_master(iwp_sc_t *sc)
4257 4257 {
4258 4258 uint32_t tmp;
4259 4259 int n;
4260 4260
4261 4261 tmp = IWP_READ(sc, CSR_RESET);
4262 4262 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4263 4263
4264 4264 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4265 4265 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4266 4266 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4267 4267 return;
4268 4268 }
4269 4269
4270 4270 for (n = 0; n < 2000; n++) {
4271 4271 if (IWP_READ(sc, CSR_RESET) &
4272 4272 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4273 4273 break;
4274 4274 }
4275 4275 DELAY(1000);
4276 4276 }
4277 4277
4278 4278 #ifdef DEBUG
4279 4279 if (2000 == n) {
4280 4280 IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4281 4281 "timeout waiting for master stop\n"));
4282 4282 }
4283 4283 #endif
4284 4284 }
4285 4285
4286 4286 static int
4287 4287 iwp_power_up(iwp_sc_t *sc)
4288 4288 {
4289 4289 uint32_t tmp;
4290 4290
4291 4291 iwp_mac_access_enter(sc);
4292 4292 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4293 4293 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4294 4294 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4295 4295 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4296 4296 iwp_mac_access_exit(sc);
4297 4297
4298 4298 DELAY(5000);
4299 4299 return (IWP_SUCCESS);
4300 4300 }
4301 4301
4302 4302 /*
4303 4303 * hardware initialization
4304 4304 */
4305 4305 static int
4306 4306 iwp_preinit(iwp_sc_t *sc)
4307 4307 {
4308 4308 int n;
4309 4309 uint8_t vlink;
4310 4310 uint16_t radio_cfg;
4311 4311 uint32_t tmp;
4312 4312
4313 4313 /*
4314 4314 * clear any pending interrupts
4315 4315 */
4316 4316 IWP_WRITE(sc, CSR_INT, 0xffffffff);
4317 4317
4318 4318 tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4319 4319 IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4320 4320 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4321 4321
4322 4322 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4323 4323 IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4324 4324
4325 4325 /*
4326 4326 * wait for clock ready
4327 4327 */
4328 4328 for (n = 0; n < 1000; n++) {
4329 4329 if (IWP_READ(sc, CSR_GP_CNTRL) &
4330 4330 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4331 4331 break;
4332 4332 }
4333 4333 DELAY(10);
4334 4334 }
4335 4335
4336 4336 if (1000 == n) {
4337 4337 return (ETIMEDOUT);
4338 4338 }
4339 4339
4340 4340 iwp_mac_access_enter(sc);
4341 4341
4342 4342 iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4343 4343
4344 4344 DELAY(20);
4345 4345 tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4346 4346 iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4347 4347 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4348 4348 iwp_mac_access_exit(sc);
4349 4349
4350 4350 radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4351 4351 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4352 4352 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4353 4353 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4354 4354 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4355 4355 SP_RADIO_STEP_MSK(radio_cfg) |
4356 4356 SP_RADIO_DASH_MSK(radio_cfg));
4357 4357 } else {
4358 4358 cmn_err(CE_WARN, "iwp_preinit(): "
4359 4359 "radio configuration information in eeprom is wrong\n");
4360 4360 return (IWP_FAIL);
4361 4361 }
4362 4362
4363 4363
4364 4364 IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4365 4365
4366 4366 (void) iwp_power_up(sc);
4367 4367
4368 4368 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4369 4369 tmp = ddi_get32(sc->sc_cfg_handle,
4370 4370 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4371 4371 ddi_put32(sc->sc_cfg_handle,
4372 4372 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4373 4373 tmp & ~(1 << 11));
4374 4374 }
4375 4375
4376 4376 vlink = ddi_get8(sc->sc_cfg_handle,
4377 4377 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4378 4378 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4379 4379 vlink & ~2);
4380 4380
4381 4381 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4382 4382 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4383 4383 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4384 4384 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4385 4385
4386 4386 /*
4387 4387 * make sure power supply on each part of the hardware
4388 4388 */
4389 4389 iwp_mac_access_enter(sc);
4390 4390 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4391 4391 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4392 4392 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4393 4393 DELAY(5);
4394 4394
4395 4395 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4396 4396 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4397 4397 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4398 4398 iwp_mac_access_exit(sc);
4399 4399
4400 4400 if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4401 4401 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4402 4402 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4403 4403 }
4404 4404
4405 4405 if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4406 4406
4407 4407 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4408 4408 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4409 4409 }
4410 4410
4411 4411 return (IWP_SUCCESS);
4412 4412 }
4413 4413
4414 4414 /*
4415 4415 * set up semphore flag to own EEPROM
4416 4416 */
4417 4417 static int
4418 4418 iwp_eep_sem_down(iwp_sc_t *sc)
4419 4419 {
4420 4420 int count1, count2;
4421 4421 uint32_t tmp;
4422 4422
4423 4423 for (count1 = 0; count1 < 1000; count1++) {
4424 4424 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4425 4425 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4426 4426 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4427 4427
4428 4428 for (count2 = 0; count2 < 2; count2++) {
4429 4429 if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4430 4430 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4431 4431 return (IWP_SUCCESS);
4432 4432 }
4433 4433 DELAY(10000);
4434 4434 }
4435 4435 }
4436 4436 return (IWP_FAIL);
4437 4437 }
4438 4438
4439 4439 /*
4440 4440 * reset semphore flag to release EEPROM
4441 4441 */
4442 4442 static void
4443 4443 iwp_eep_sem_up(iwp_sc_t *sc)
4444 4444 {
4445 4445 uint32_t tmp;
4446 4446
4447 4447 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4448 4448 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4449 4449 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4450 4450 }
4451 4451
4452 4452 /*
4453 4453 * This function read all infomation from eeprom
4454 4454 */
4455 4455 static int
4456 4456 iwp_eep_load(iwp_sc_t *sc)
4457 4457 {
4458 4458 int i, rr;
4459 4459 uint32_t rv, tmp, eep_gp;
4460 4460 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4461 4461 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4462 4462
4463 4463 /*
4464 4464 * read eeprom gp register in CSR
4465 4465 */
4466 4466 eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4467 4467 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4468 4468 CSR_EEPROM_GP_BAD_SIGNATURE) {
4469 4469 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4470 4470 "not find eeprom\n"));
4471 4471 return (IWP_FAIL);
4472 4472 }
4473 4473
4474 4474 rr = iwp_eep_sem_down(sc);
4475 4475 if (rr != 0) {
4476 4476 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4477 4477 "driver failed to own EEPROM\n"));
4478 4478 return (IWP_FAIL);
4479 4479 }
4480 4480
4481 4481 for (addr = 0; addr < eep_sz; addr += 2) {
4482 4482 IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4483 4483 tmp = IWP_READ(sc, CSR_EEPROM_REG);
4484 4484 IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4485 4485
4486 4486 for (i = 0; i < 10; i++) {
4487 4487 rv = IWP_READ(sc, CSR_EEPROM_REG);
4488 4488 if (rv & 1) {
4489 4489 break;
4490 4490 }
4491 4491 DELAY(10);
4492 4492 }
4493 4493
4494 4494 if (!(rv & 1)) {
4495 4495 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4496 4496 "time out when read eeprome\n"));
4497 4497 iwp_eep_sem_up(sc);
4498 4498 return (IWP_FAIL);
4499 4499 }
4500 4500
4501 4501 eep_p[addr/2] = LE_16(rv >> 16);
4502 4502 }
4503 4503
4504 4504 iwp_eep_sem_up(sc);
4505 4505 return (IWP_SUCCESS);
4506 4506 }
4507 4507
4508 4508 /*
4509 4509 * initialize mac address in ieee80211com_t struct
4510 4510 */
4511 4511 static void
4512 4512 iwp_get_mac_from_eep(iwp_sc_t *sc)
4513 4513 {
4514 4514 ieee80211com_t *ic = &sc->sc_ic;
4515 4515
4516 4516 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4517 4517
4518 4518 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4519 4519 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4520 4520 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4521 4521 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4522 4522 }
4523 4523
4524 4524 /*
4525 4525 * main initialization function
4526 4526 */
4527 4527 static int
4528 4528 iwp_init(iwp_sc_t *sc)
4529 4529 {
4530 4530 int err = IWP_FAIL;
4531 4531 clock_t clk;
4532 4532
4533 4533 /*
4534 4534 * release buffer for calibration
4535 4535 */
4536 4536 iwp_release_calib_buffer(sc);
4537 4537
4538 4538 mutex_enter(&sc->sc_glock);
4539 4539 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4540 4540
4541 4541 err = iwp_init_common(sc);
4542 4542 if (err != IWP_SUCCESS) {
4543 4543 mutex_exit(&sc->sc_glock);
4544 4544 return (IWP_FAIL);
4545 4545 }
4546 4546
4547 4547 /*
4548 4548 * backup ucode data part for future use.
4549 4549 */
4550 4550 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4551 4551 sc->sc_dma_fw_data.mem_va,
4552 4552 sc->sc_dma_fw_data.alength);
4553 4553
4554 4554 /* load firmware init segment into NIC */
4555 4555 err = iwp_load_init_firmware(sc);
4556 4556 if (err != IWP_SUCCESS) {
4557 4557 cmn_err(CE_WARN, "iwp_init(): "
↓ open down ↓ |
847 lines elided |
↑ open up ↑ |
4558 4558 "failed to setup init firmware\n");
4559 4559 mutex_exit(&sc->sc_glock);
4560 4560 return (IWP_FAIL);
4561 4561 }
4562 4562
4563 4563 /*
4564 4564 * now press "execute" start running
4565 4565 */
4566 4566 IWP_WRITE(sc, CSR_RESET, 0);
4567 4567
4568 - clk = ddi_get_lbolt() + drv_usectohz(1000000);
4568 + clk = ddi_get_lbolt() + drv_sectohz(1);
4569 4569 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4570 4570 if (cv_timedwait(&sc->sc_ucode_cv,
4571 4571 &sc->sc_glock, clk) < 0) {
4572 4572 break;
4573 4573 }
4574 4574 }
4575 4575
4576 4576 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4577 4577 cmn_err(CE_WARN, "iwp_init(): "
4578 4578 "failed to process init alive.\n");
4579 4579 mutex_exit(&sc->sc_glock);
4580 4580 return (IWP_FAIL);
4581 4581 }
4582 4582
4583 4583 mutex_exit(&sc->sc_glock);
4584 4584
4585 4585 /*
4586 4586 * stop chipset for initializing chipset again
4587 4587 */
4588 4588 iwp_stop(sc);
4589 4589
4590 4590 mutex_enter(&sc->sc_glock);
4591 4591 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4592 4592
4593 4593 err = iwp_init_common(sc);
4594 4594 if (err != IWP_SUCCESS) {
4595 4595 mutex_exit(&sc->sc_glock);
4596 4596 return (IWP_FAIL);
4597 4597 }
4598 4598
4599 4599 /*
4600 4600 * load firmware run segment into NIC
4601 4601 */
4602 4602 err = iwp_load_run_firmware(sc);
4603 4603 if (err != IWP_SUCCESS) {
4604 4604 cmn_err(CE_WARN, "iwp_init(): "
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
4605 4605 "failed to setup run firmware\n");
4606 4606 mutex_exit(&sc->sc_glock);
4607 4607 return (IWP_FAIL);
4608 4608 }
4609 4609
4610 4610 /*
4611 4611 * now press "execute" start running
4612 4612 */
4613 4613 IWP_WRITE(sc, CSR_RESET, 0);
4614 4614
4615 - clk = ddi_get_lbolt() + drv_usectohz(1000000);
4615 + clk = ddi_get_lbolt() + drv_sectohz(1);
4616 4616 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4617 4617 if (cv_timedwait(&sc->sc_ucode_cv,
4618 4618 &sc->sc_glock, clk) < 0) {
4619 4619 break;
4620 4620 }
4621 4621 }
4622 4622
4623 4623 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4624 4624 cmn_err(CE_WARN, "iwp_init(): "
4625 4625 "failed to process runtime alive.\n");
4626 4626 mutex_exit(&sc->sc_glock);
4627 4627 return (IWP_FAIL);
4628 4628 }
4629 4629
4630 4630 mutex_exit(&sc->sc_glock);
4631 4631
4632 4632 DELAY(1000);
4633 4633
4634 4634 mutex_enter(&sc->sc_glock);
4635 4635 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4636 4636
4637 4637 /*
4638 4638 * at this point, the firmware is loaded OK, then config the hardware
4639 4639 * with the ucode API, including rxon, txpower, etc.
4640 4640 */
4641 4641 err = iwp_config(sc);
4642 4642 if (err) {
4643 4643 cmn_err(CE_WARN, "iwp_init(): "
4644 4644 "failed to configure device\n");
4645 4645 mutex_exit(&sc->sc_glock);
4646 4646 return (IWP_FAIL);
4647 4647 }
4648 4648
4649 4649 /*
4650 4650 * at this point, hardware may receive beacons :)
4651 4651 */
4652 4652 mutex_exit(&sc->sc_glock);
4653 4653 return (IWP_SUCCESS);
4654 4654 }
4655 4655
4656 4656 /*
4657 4657 * stop or disable NIC
4658 4658 */
4659 4659 static void
4660 4660 iwp_stop(iwp_sc_t *sc)
4661 4661 {
4662 4662 uint32_t tmp;
4663 4663 int i;
4664 4664
4665 4665 /* by pass if it's quiesced */
4666 4666 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4667 4667 mutex_enter(&sc->sc_glock);
4668 4668 }
4669 4669
4670 4670 IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4671 4671 /*
4672 4672 * disable interrupts
4673 4673 */
4674 4674 IWP_WRITE(sc, CSR_INT_MASK, 0);
4675 4675 IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4676 4676 IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4677 4677
4678 4678 /*
4679 4679 * reset all Tx rings
4680 4680 */
4681 4681 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4682 4682 iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4683 4683 }
4684 4684
4685 4685 /*
4686 4686 * reset Rx ring
4687 4687 */
4688 4688 iwp_reset_rx_ring(sc);
4689 4689
4690 4690 iwp_mac_access_enter(sc);
4691 4691 iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4692 4692 iwp_mac_access_exit(sc);
4693 4693
4694 4694 DELAY(5);
4695 4695
4696 4696 iwp_stop_master(sc);
4697 4697
4698 4698 mutex_enter(&sc->sc_mt_lock);
4699 4699 sc->sc_tx_timer = 0;
4700 4700 mutex_exit(&sc->sc_mt_lock);
4701 4701
4702 4702 tmp = IWP_READ(sc, CSR_RESET);
4703 4703 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4704 4704
4705 4705 /* by pass if it's quiesced */
4706 4706 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4707 4707 mutex_exit(&sc->sc_glock);
4708 4708 }
4709 4709 }
4710 4710
4711 4711 /*
4712 4712 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4713 4713 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4714 4714 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4715 4715 * INRIA Sophia - Projet Planete
4716 4716 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4717 4717 */
4718 4718 #define is_success(amrr) \
4719 4719 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4720 4720 #define is_failure(amrr) \
4721 4721 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4722 4722 #define is_enough(amrr) \
4723 4723 ((amrr)->txcnt > 200)
4724 4724 #define not_very_few(amrr) \
4725 4725 ((amrr)->txcnt > 40)
4726 4726 #define is_min_rate(in) \
4727 4727 (0 == (in)->in_txrate)
4728 4728 #define is_max_rate(in) \
4729 4729 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4730 4730 #define increase_rate(in) \
4731 4731 ((in)->in_txrate++)
4732 4732 #define decrease_rate(in) \
4733 4733 ((in)->in_txrate--)
4734 4734 #define reset_cnt(amrr) \
4735 4735 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
4736 4736
4737 4737 #define IWP_AMRR_MIN_SUCCESS_THRESHOLD 1
4738 4738 #define IWP_AMRR_MAX_SUCCESS_THRESHOLD 15
4739 4739
4740 4740 static void
4741 4741 iwp_amrr_init(iwp_amrr_t *amrr)
4742 4742 {
4743 4743 amrr->success = 0;
4744 4744 amrr->recovery = 0;
4745 4745 amrr->txcnt = amrr->retrycnt = 0;
4746 4746 amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4747 4747 }
4748 4748
4749 4749 static void
4750 4750 iwp_amrr_timeout(iwp_sc_t *sc)
4751 4751 {
4752 4752 ieee80211com_t *ic = &sc->sc_ic;
4753 4753
4754 4754 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4755 4755 "enter\n"));
4756 4756
4757 4757 if (IEEE80211_M_STA == ic->ic_opmode) {
4758 4758 iwp_amrr_ratectl(NULL, ic->ic_bss);
4759 4759 } else {
4760 4760 ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4761 4761 }
4762 4762
4763 4763 sc->sc_clk = ddi_get_lbolt();
4764 4764 }
4765 4765
4766 4766 /* ARGSUSED */
4767 4767 static void
4768 4768 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4769 4769 {
4770 4770 iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4771 4771 int need_change = 0;
4772 4772
4773 4773 if (is_success(amrr) && is_enough(amrr)) {
4774 4774 amrr->success++;
4775 4775 if (amrr->success >= amrr->success_threshold &&
4776 4776 !is_max_rate(in)) {
4777 4777 amrr->recovery = 1;
4778 4778 amrr->success = 0;
4779 4779 increase_rate(in);
4780 4780 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4781 4781 "AMRR increasing rate %d "
4782 4782 "(txcnt=%d retrycnt=%d)\n",
4783 4783 in->in_txrate, amrr->txcnt,
4784 4784 amrr->retrycnt));
4785 4785 need_change = 1;
4786 4786 } else {
4787 4787 amrr->recovery = 0;
4788 4788 }
4789 4789 } else if (not_very_few(amrr) && is_failure(amrr)) {
4790 4790 amrr->success = 0;
4791 4791 if (!is_min_rate(in)) {
4792 4792 if (amrr->recovery) {
4793 4793 amrr->success_threshold++;
4794 4794 if (amrr->success_threshold >
4795 4795 IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4796 4796 amrr->success_threshold =
4797 4797 IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4798 4798 }
4799 4799 } else {
4800 4800 amrr->success_threshold =
4801 4801 IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4802 4802 }
4803 4803 decrease_rate(in);
4804 4804 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4805 4805 "AMRR decreasing rate %d "
4806 4806 "(txcnt=%d retrycnt=%d)\n",
4807 4807 in->in_txrate, amrr->txcnt,
4808 4808 amrr->retrycnt));
4809 4809 need_change = 1;
4810 4810 }
4811 4811 amrr->recovery = 0; /* paper is incorrect */
4812 4812 }
4813 4813
4814 4814 if (is_enough(amrr) || need_change) {
4815 4815 reset_cnt(amrr);
4816 4816 }
4817 4817 }
4818 4818
4819 4819 /*
4820 4820 * translate indirect address in eeprom to direct address
4821 4821 * in eeprom and return address of entry whos indirect address
4822 4822 * is indi_addr
4823 4823 */
4824 4824 static uint8_t *
4825 4825 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4826 4826 {
4827 4827 uint32_t di_addr;
4828 4828 uint16_t temp;
4829 4829
4830 4830 if (!(indi_addr & INDIRECT_ADDRESS)) {
4831 4831 di_addr = indi_addr;
4832 4832 return (&sc->sc_eep_map[di_addr]);
4833 4833 }
4834 4834
4835 4835 switch (indi_addr & INDIRECT_TYPE_MSK) {
4836 4836 case INDIRECT_GENERAL:
4837 4837 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4838 4838 break;
4839 4839 case INDIRECT_HOST:
4840 4840 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4841 4841 break;
4842 4842 case INDIRECT_REGULATORY:
4843 4843 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4844 4844 break;
4845 4845 case INDIRECT_CALIBRATION:
4846 4846 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4847 4847 break;
4848 4848 case INDIRECT_PROCESS_ADJST:
4849 4849 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4850 4850 break;
4851 4851 case INDIRECT_OTHERS:
4852 4852 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4853 4853 break;
4854 4854 default:
4855 4855 temp = 0;
4856 4856 cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4857 4857 "incorrect indirect eeprom address.\n");
4858 4858 break;
4859 4859 }
4860 4860
4861 4861 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4862 4862
4863 4863 return (&sc->sc_eep_map[di_addr]);
4864 4864 }
4865 4865
4866 4866 /*
4867 4867 * loade a section of ucode into NIC
4868 4868 */
4869 4869 static int
4870 4870 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4871 4871 {
4872 4872
4873 4873 iwp_mac_access_enter(sc);
4874 4874
4875 4875 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4876 4876 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4877 4877
4878 4878 IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4879 4879
4880 4880 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4881 4881 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4882 4882
4883 4883 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4884 4884
4885 4885 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4886 4886 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4887 4887 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4888 4888 IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4889 4889
4890 4890 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4891 4891 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4892 4892 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4893 4893 IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4894 4894
4895 4895 iwp_mac_access_exit(sc);
4896 4896
4897 4897 return (IWP_SUCCESS);
4898 4898 }
4899 4899
4900 4900 /*
4901 4901 * necessary setting during alive notification
4902 4902 */
4903 4903 static int
4904 4904 iwp_alive_common(iwp_sc_t *sc)
4905 4905 {
4906 4906 uint32_t base;
4907 4907 uint32_t i;
4908 4908 iwp_wimax_coex_cmd_t w_cmd;
4909 4909 iwp_calibration_crystal_cmd_t c_cmd;
4910 4910 uint32_t rv = IWP_FAIL;
4911 4911
4912 4912 /*
4913 4913 * initialize SCD related registers to make TX work.
4914 4914 */
4915 4915 iwp_mac_access_enter(sc);
4916 4916
4917 4917 /*
4918 4918 * read sram address of data base.
4919 4919 */
4920 4920 sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4921 4921
4922 4922 for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4923 4923 base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4924 4924 base += 4) {
4925 4925 iwp_mem_write(sc, base, 0);
4926 4926 }
4927 4927
4928 4928 for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4929 4929 base += 4) {
4930 4930 iwp_mem_write(sc, base, 0);
4931 4931 }
4932 4932
4933 4933 for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4934 4934 iwp_mem_write(sc, base + i, 0);
4935 4935 }
4936 4936
4937 4937 iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4938 4938 sc->sc_dma_sh.cookie.dmac_address >> 10);
4939 4939
4940 4940 iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4941 4941 IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4942 4942
4943 4943 iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4944 4944
4945 4945 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4946 4946 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4947 4947 IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4948 4948 iwp_mem_write(sc, sc->sc_scd_base +
4949 4949 IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4950 4950 iwp_mem_write(sc, sc->sc_scd_base +
4951 4951 IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4952 4952 sizeof (uint32_t),
4953 4953 ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4954 4954 IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4955 4955 ((SCD_FRAME_LIMIT <<
4956 4956 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 4957 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4958 4958 }
4959 4959
4960 4960 iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4961 4961
4962 4962 iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4963 4963 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4964 4964
4965 4965 IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4966 4966 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4967 4967
4968 4968 /*
4969 4969 * queue 0-7 map to FIFO 0-7 and
4970 4970 * all queues work under FIFO mode(none-scheduler_ack)
4971 4971 */
4972 4972 for (i = 0; i < 4; i++) {
4973 4973 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4974 4974 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4975 4975 ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4976 4976 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4977 4977 IWP_SCD_QUEUE_STTS_REG_MSK);
4978 4978 }
4979 4979
4980 4980 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4981 4981 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4982 4982 (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4983 4983 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4984 4984 IWP_SCD_QUEUE_STTS_REG_MSK);
4985 4985
4986 4986 for (i = 5; i < 7; i++) {
4987 4987 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4988 4988 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4989 4989 (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4990 4990 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4991 4991 IWP_SCD_QUEUE_STTS_REG_MSK);
4992 4992 }
4993 4993
4994 4994 iwp_mac_access_exit(sc);
4995 4995
4996 4996 (void) memset(&w_cmd, 0, sizeof (w_cmd));
4997 4997
4998 4998 rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
4999 4999 if (rv != IWP_SUCCESS) {
5000 5000 cmn_err(CE_WARN, "iwp_alive_common(): "
5001 5001 "failed to send wimax coexist command.\n");
5002 5002 return (rv);
5003 5003 }
5004 5004
5005 5005 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5006 5006
5007 5007 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5008 5008 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5009 5009 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5010 5010
5011 5011 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5012 5012 if (rv != IWP_SUCCESS) {
5013 5013 cmn_err(CE_WARN, "iwp_alive_common(): "
5014 5014 "failed to send crystal frq calibration command.\n");
5015 5015 return (rv);
5016 5016 }
5017 5017
5018 5018 /*
5019 5019 * make sure crystal frequency calibration ready
5020 5020 * before next operations.
5021 5021 */
5022 5022 DELAY(1000);
5023 5023
5024 5024 return (IWP_SUCCESS);
5025 5025 }
5026 5026
5027 5027 /*
5028 5028 * save results of calibration from ucode
5029 5029 */
5030 5030 static void
5031 5031 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5032 5032 {
5033 5033 struct iwp_calib_results *res_p = &sc->sc_calib_results;
5034 5034 struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5035 5035 int len = LE_32(desc->len);
5036 5036
5037 5037 /*
5038 5038 * ensure the size of buffer is not too big
5039 5039 */
5040 5040 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5041 5041
5042 5042 switch (calib_hdr->op_code) {
5043 5043 case PHY_CALIBRATE_LO_CMD:
5044 5044 if (NULL == res_p->lo_res) {
5045 5045 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5046 5046 }
5047 5047
5048 5048 if (NULL == res_p->lo_res) {
5049 5049 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5050 5050 "failed to allocate memory.\n");
5051 5051 return;
5052 5052 }
5053 5053
5054 5054 res_p->lo_res_len = len;
5055 5055 (void) memcpy(res_p->lo_res, calib_hdr, len);
5056 5056 break;
5057 5057 case PHY_CALIBRATE_TX_IQ_CMD:
5058 5058 if (NULL == res_p->tx_iq_res) {
5059 5059 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5060 5060 }
5061 5061
5062 5062 if (NULL == res_p->tx_iq_res) {
5063 5063 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5064 5064 "failed to allocate memory.\n");
5065 5065 return;
5066 5066 }
5067 5067
5068 5068 res_p->tx_iq_res_len = len;
5069 5069 (void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5070 5070 break;
5071 5071 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5072 5072 if (NULL == res_p->tx_iq_perd_res) {
5073 5073 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5074 5074 }
5075 5075
5076 5076 if (NULL == res_p->tx_iq_perd_res) {
5077 5077 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5078 5078 "failed to allocate memory.\n");
5079 5079 }
5080 5080
5081 5081 res_p->tx_iq_perd_res_len = len;
5082 5082 (void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5083 5083 break;
5084 5084 case PHY_CALIBRATE_BASE_BAND_CMD:
5085 5085 if (NULL == res_p->base_band_res) {
5086 5086 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5087 5087 }
5088 5088
5089 5089 if (NULL == res_p->base_band_res) {
5090 5090 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5091 5091 "failed to allocate memory.\n");
5092 5092 }
5093 5093
5094 5094 res_p->base_band_res_len = len;
5095 5095 (void) memcpy(res_p->base_band_res, calib_hdr, len);
5096 5096 break;
5097 5097 default:
5098 5098 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5099 5099 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5100 5100 break;
5101 5101 }
5102 5102
5103 5103 }
5104 5104
5105 5105 static void
5106 5106 iwp_release_calib_buffer(iwp_sc_t *sc)
5107 5107 {
5108 5108 if (sc->sc_calib_results.lo_res != NULL) {
5109 5109 kmem_free(sc->sc_calib_results.lo_res,
5110 5110 sc->sc_calib_results.lo_res_len);
5111 5111 sc->sc_calib_results.lo_res = NULL;
5112 5112 }
5113 5113
5114 5114 if (sc->sc_calib_results.tx_iq_res != NULL) {
5115 5115 kmem_free(sc->sc_calib_results.tx_iq_res,
5116 5116 sc->sc_calib_results.tx_iq_res_len);
5117 5117 sc->sc_calib_results.tx_iq_res = NULL;
5118 5118 }
5119 5119
5120 5120 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5121 5121 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5122 5122 sc->sc_calib_results.tx_iq_perd_res_len);
5123 5123 sc->sc_calib_results.tx_iq_perd_res = NULL;
5124 5124 }
5125 5125
5126 5126 if (sc->sc_calib_results.base_band_res != NULL) {
5127 5127 kmem_free(sc->sc_calib_results.base_band_res,
5128 5128 sc->sc_calib_results.base_band_res_len);
5129 5129 sc->sc_calib_results.base_band_res = NULL;
5130 5130 }
5131 5131
5132 5132 }
5133 5133
5134 5134 /*
5135 5135 * common section of intialization
5136 5136 */
5137 5137 static int
5138 5138 iwp_init_common(iwp_sc_t *sc)
5139 5139 {
5140 5140 int32_t qid;
5141 5141 uint32_t tmp;
5142 5142
5143 5143 (void) iwp_preinit(sc);
5144 5144
5145 5145 tmp = IWP_READ(sc, CSR_GP_CNTRL);
5146 5146 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5147 5147 cmn_err(CE_NOTE, "iwp_init_common(): "
5148 5148 "radio transmitter is off\n");
5149 5149 return (IWP_FAIL);
5150 5150 }
5151 5151
5152 5152 /*
5153 5153 * init Rx ring
5154 5154 */
5155 5155 iwp_mac_access_enter(sc);
5156 5156 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5157 5157
5158 5158 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5159 5159 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5160 5160 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5161 5161
5162 5162 IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5163 5163 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5164 5164 offsetof(struct iwp_shared, val0)) >> 4));
5165 5165
5166 5166 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5167 5167 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5168 5168 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5169 5169 IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5170 5170 (RX_QUEUE_SIZE_LOG <<
5171 5171 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5172 5172 iwp_mac_access_exit(sc);
5173 5173 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5174 5174 (RX_QUEUE_SIZE - 1) & ~0x7);
5175 5175
5176 5176 /*
5177 5177 * init Tx rings
5178 5178 */
5179 5179 iwp_mac_access_enter(sc);
5180 5180 iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5181 5181
5182 5182 /*
5183 5183 * keep warm page
5184 5184 */
5185 5185 IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5186 5186 sc->sc_dma_kw.cookie.dmac_address >> 4);
5187 5187
5188 5188 for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5189 5189 IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5190 5190 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5191 5191 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5192 5192 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5193 5193 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5194 5194 }
5195 5195
5196 5196 iwp_mac_access_exit(sc);
5197 5197
5198 5198 /*
5199 5199 * clear "radio off" and "disable command" bits
5200 5200 */
5201 5201 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5202 5202 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5203 5203 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5204 5204
5205 5205 /*
5206 5206 * clear any pending interrupts
5207 5207 */
5208 5208 IWP_WRITE(sc, CSR_INT, 0xffffffff);
5209 5209
5210 5210 /*
5211 5211 * enable interrupts
5212 5212 */
5213 5213 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5214 5214
5215 5215 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5216 5216 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5217 5217
5218 5218 return (IWP_SUCCESS);
5219 5219 }
5220 5220
5221 5221 static int
5222 5222 iwp_fast_recover(iwp_sc_t *sc)
5223 5223 {
5224 5224 ieee80211com_t *ic = &sc->sc_ic;
5225 5225 int err = IWP_FAIL;
5226 5226
5227 5227 mutex_enter(&sc->sc_glock);
5228 5228
5229 5229 /* restore runtime configuration */
5230 5230 bcopy(&sc->sc_config_save, &sc->sc_config,
5231 5231 sizeof (sc->sc_config));
5232 5232
5233 5233 sc->sc_config.assoc_id = 0;
5234 5234 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5235 5235
5236 5236 if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5237 5237 cmn_err(CE_WARN, "iwp_fast_recover(): "
5238 5238 "could not setup authentication\n");
5239 5239 mutex_exit(&sc->sc_glock);
5240 5240 return (err);
5241 5241 }
5242 5242
5243 5243 bcopy(&sc->sc_config_save, &sc->sc_config,
5244 5244 sizeof (sc->sc_config));
5245 5245
5246 5246 /* update adapter's configuration */
5247 5247 err = iwp_run_state_config(sc);
5248 5248 if (err != IWP_SUCCESS) {
5249 5249 cmn_err(CE_WARN, "iwp_fast_recover(): "
5250 5250 "failed to setup association\n");
5251 5251 mutex_exit(&sc->sc_glock);
5252 5252 return (err);
5253 5253 }
5254 5254 /* set LED on */
5255 5255 iwp_set_led(sc, 2, 0, 1);
5256 5256
5257 5257 mutex_exit(&sc->sc_glock);
5258 5258
5259 5259 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5260 5260
5261 5261 /* start queue */
5262 5262 IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5263 5263 "resume xmit\n"));
5264 5264 mac_tx_update(ic->ic_mach);
5265 5265
5266 5266 return (IWP_SUCCESS);
5267 5267 }
5268 5268
5269 5269 static int
5270 5270 iwp_run_state_config(iwp_sc_t *sc)
5271 5271 {
5272 5272 struct ieee80211com *ic = &sc->sc_ic;
5273 5273 ieee80211_node_t *in = ic->ic_bss;
5274 5274 int err = IWP_FAIL;
5275 5275
5276 5276 /*
5277 5277 * update adapter's configuration
5278 5278 */
5279 5279 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5280 5280
5281 5281 /*
5282 5282 * short preamble/slot time are
5283 5283 * negotiated when associating
5284 5284 */
5285 5285 sc->sc_config.flags &=
5286 5286 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5287 5287 RXON_FLG_SHORT_SLOT_MSK);
5288 5288
5289 5289 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5290 5290 sc->sc_config.flags |=
5291 5291 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5292 5292 }
5293 5293
5294 5294 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5295 5295 sc->sc_config.flags |=
5296 5296 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5297 5297 }
5298 5298
5299 5299 sc->sc_config.filter_flags |=
5300 5300 LE_32(RXON_FILTER_ASSOC_MSK);
5301 5301
5302 5302 if (ic->ic_opmode != IEEE80211_M_STA) {
5303 5303 sc->sc_config.filter_flags |=
5304 5304 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5305 5305 }
5306 5306
5307 5307 IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5308 5308 "config chan %d flags %x"
5309 5309 " filter_flags %x\n",
5310 5310 sc->sc_config.chan, sc->sc_config.flags,
5311 5311 sc->sc_config.filter_flags));
5312 5312
5313 5313 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5314 5314 sizeof (iwp_rxon_cmd_t), 1);
5315 5315 if (err != IWP_SUCCESS) {
5316 5316 cmn_err(CE_WARN, "iwp_run_state_config(): "
5317 5317 "could not update configuration\n");
5318 5318 return (err);
5319 5319 }
5320 5320
5321 5321 return (err);
5322 5322 }
5323 5323
5324 5324 /*
5325 5325 * This function overwrites default configurations of
5326 5326 * ieee80211com structure in Net80211 module.
5327 5327 */
5328 5328 static void
5329 5329 iwp_overwrite_ic_default(iwp_sc_t *sc)
5330 5330 {
5331 5331 ieee80211com_t *ic = &sc->sc_ic;
5332 5332
5333 5333 sc->sc_newstate = ic->ic_newstate;
5334 5334 ic->ic_newstate = iwp_newstate;
5335 5335 ic->ic_node_alloc = iwp_node_alloc;
5336 5336 ic->ic_node_free = iwp_node_free;
5337 5337 }
5338 5338
5339 5339
5340 5340 /*
5341 5341 * This function adds AP station into hardware.
5342 5342 */
5343 5343 static int
5344 5344 iwp_add_ap_sta(iwp_sc_t *sc)
5345 5345 {
5346 5346 ieee80211com_t *ic = &sc->sc_ic;
5347 5347 ieee80211_node_t *in = ic->ic_bss;
5348 5348 iwp_add_sta_t node;
5349 5349 int err = IWP_FAIL;
5350 5350
5351 5351 /*
5352 5352 * Add AP node into hardware.
5353 5353 */
5354 5354 (void) memset(&node, 0, sizeof (node));
5355 5355 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5356 5356 node.mode = STA_MODE_ADD_MSK;
5357 5357 node.sta.sta_id = IWP_AP_ID;
5358 5358
5359 5359 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5360 5360 if (err != IWP_SUCCESS) {
5361 5361 cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5362 5362 "failed to add AP node\n");
5363 5363 return (err);
5364 5364 }
5365 5365
5366 5366 return (err);
5367 5367 }
5368 5368
5369 5369 /*
5370 5370 * Check EEPROM version and Calibration version.
5371 5371 */
5372 5372 static int
5373 5373 iwp_eep_ver_chk(iwp_sc_t *sc)
5374 5374 {
5375 5375 if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5376 5376 (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5377 5377 cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5378 5378 "unsupported eeprom detected\n");
5379 5379 return (IWP_FAIL);
5380 5380 }
5381 5381
5382 5382 return (IWP_SUCCESS);
5383 5383 }
5384 5384
5385 5385 /*
5386 5386 * Determine parameters for all supported chips.
5387 5387 */
5388 5388 static void
5389 5389 iwp_set_chip_param(iwp_sc_t *sc)
5390 5390 {
5391 5391 if ((0x008d == sc->sc_dev_id) ||
5392 5392 (0x008e == sc->sc_dev_id)) {
5393 5393 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5394 5394 PHY_MODE_A | PHY_MODE_N;
5395 5395
5396 5396 sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5397 5397 sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5398 5398
5399 5399 sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5400 5400 }
5401 5401
5402 5402 if ((0x422c == sc->sc_dev_id) ||
5403 5403 (0x4239 == sc->sc_dev_id)) {
5404 5404 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5405 5405 PHY_MODE_A | PHY_MODE_N;
5406 5406
5407 5407 sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5408 5408 sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5409 5409
5410 5410 sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5411 5411 }
5412 5412
5413 5413 if ((0x422b == sc->sc_dev_id) ||
5414 5414 (0x4238 == sc->sc_dev_id)) {
5415 5415 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5416 5416 PHY_MODE_A | PHY_MODE_N;
5417 5417
5418 5418 sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5419 5419 sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5420 5420
5421 5421 sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5422 5422 }
5423 5423 }
↓ open down ↓ |
798 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX