Print this page
5253 kmem_alloc/kmem_zalloc won't fail with KM_SLEEP
5254 getrbuf won't fail with KM_SLEEP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/iwp/iwp.c
+++ new/usr/src/uts/common/io/iwp/iwp.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2009, Intel Corporation
8 8 * All rights reserved.
9 9 */
10 10
11 11 /*
12 12 * Copyright (c) 2006
13 13 * Copyright (c) 2007
14 14 * Damien Bergamini <damien.bergamini@free.fr>
15 15 *
16 16 * Permission to use, copy, modify, and distribute this software for any
17 17 * purpose with or without fee is hereby granted, provided that the above
18 18 * copyright notice and this permission notice appear in all copies.
19 19 *
20 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 27 */
28 28
29 29 /*
30 30 * Intel(R) WiFi Link 6000 Driver
31 31 */
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/byteorder.h>
35 35 #include <sys/conf.h>
36 36 #include <sys/cmn_err.h>
37 37 #include <sys/stat.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/strsubr.h>
41 41 #include <sys/ethernet.h>
42 42 #include <inet/common.h>
43 43 #include <inet/nd.h>
44 44 #include <inet/mi.h>
45 45 #include <sys/note.h>
46 46 #include <sys/stream.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/modctl.h>
49 49 #include <sys/devops.h>
50 50 #include <sys/dlpi.h>
51 51 #include <sys/mac_provider.h>
52 52 #include <sys/mac_wifi.h>
53 53 #include <sys/net80211.h>
54 54 #include <sys/net80211_proto.h>
55 55 #include <sys/varargs.h>
56 56 #include <sys/policy.h>
57 57 #include <sys/pci.h>
58 58
59 59 #include "iwp_calibration.h"
60 60 #include "iwp_hw.h"
61 61 #include "iwp_eeprom.h"
62 62 #include "iwp_var.h"
63 63 #include <inet/wifi_ioctl.h>
64 64
65 65 #ifdef DEBUG
66 66 #define IWP_DEBUG_80211 (1 << 0)
67 67 #define IWP_DEBUG_CMD (1 << 1)
68 68 #define IWP_DEBUG_DMA (1 << 2)
69 69 #define IWP_DEBUG_EEPROM (1 << 3)
70 70 #define IWP_DEBUG_FW (1 << 4)
71 71 #define IWP_DEBUG_HW (1 << 5)
72 72 #define IWP_DEBUG_INTR (1 << 6)
73 73 #define IWP_DEBUG_MRR (1 << 7)
74 74 #define IWP_DEBUG_PIO (1 << 8)
75 75 #define IWP_DEBUG_RX (1 << 9)
76 76 #define IWP_DEBUG_SCAN (1 << 10)
77 77 #define IWP_DEBUG_TX (1 << 11)
78 78 #define IWP_DEBUG_RATECTL (1 << 12)
79 79 #define IWP_DEBUG_RADIO (1 << 13)
80 80 #define IWP_DEBUG_RESUME (1 << 14)
81 81 #define IWP_DEBUG_CALIBRATION (1 << 15)
82 82 /*
83 83 * if want to see debug message of a given section,
84 84 * please set this flag to one of above values
85 85 */
86 86 uint32_t iwp_dbg_flags = 0;
87 87 #define IWP_DBG(x) \
88 88 iwp_dbg x
89 89 #else
90 90 #define IWP_DBG(x)
91 91 #endif
92 92
93 93 static void *iwp_soft_state_p = NULL;
94 94
95 95 /*
96 96 * ucode will be compiled into driver image
97 97 */
98 98 static uint8_t iwp_fw_bin [] = {
99 99 #include "fw-iw/iwp.ucode"
100 100 };
101 101
102 102 /*
103 103 * DMA attributes for a shared page
104 104 */
105 105 static ddi_dma_attr_t sh_dma_attr = {
106 106 DMA_ATTR_V0, /* version of this structure */
107 107 0, /* lowest usable address */
108 108 0xffffffffU, /* highest usable address */
109 109 0xffffffffU, /* maximum DMAable byte count */
110 110 0x1000, /* alignment in bytes */
111 111 0x1000, /* burst sizes (any?) */
112 112 1, /* minimum transfer */
113 113 0xffffffffU, /* maximum transfer */
114 114 0xffffffffU, /* maximum segment length */
115 115 1, /* maximum number of segments */
116 116 1, /* granularity */
117 117 0, /* flags (reserved) */
118 118 };
119 119
120 120 /*
121 121 * DMA attributes for a keep warm DRAM descriptor
122 122 */
123 123 static ddi_dma_attr_t kw_dma_attr = {
124 124 DMA_ATTR_V0, /* version of this structure */
125 125 0, /* lowest usable address */
126 126 0xffffffffU, /* highest usable address */
127 127 0xffffffffU, /* maximum DMAable byte count */
128 128 0x1000, /* alignment in bytes */
129 129 0x1000, /* burst sizes (any?) */
130 130 1, /* minimum transfer */
131 131 0xffffffffU, /* maximum transfer */
132 132 0xffffffffU, /* maximum segment length */
133 133 1, /* maximum number of segments */
134 134 1, /* granularity */
135 135 0, /* flags (reserved) */
136 136 };
137 137
138 138 /*
139 139 * DMA attributes for a ring descriptor
140 140 */
141 141 static ddi_dma_attr_t ring_desc_dma_attr = {
142 142 DMA_ATTR_V0, /* version of this structure */
143 143 0, /* lowest usable address */
144 144 0xffffffffU, /* highest usable address */
145 145 0xffffffffU, /* maximum DMAable byte count */
146 146 0x100, /* alignment in bytes */
147 147 0x100, /* burst sizes (any?) */
148 148 1, /* minimum transfer */
149 149 0xffffffffU, /* maximum transfer */
150 150 0xffffffffU, /* maximum segment length */
151 151 1, /* maximum number of segments */
152 152 1, /* granularity */
153 153 0, /* flags (reserved) */
154 154 };
155 155
156 156 /*
157 157 * DMA attributes for a cmd
158 158 */
159 159 static ddi_dma_attr_t cmd_dma_attr = {
160 160 DMA_ATTR_V0, /* version of this structure */
161 161 0, /* lowest usable address */
162 162 0xffffffffU, /* highest usable address */
163 163 0xffffffffU, /* maximum DMAable byte count */
164 164 4, /* alignment in bytes */
165 165 0x100, /* burst sizes (any?) */
166 166 1, /* minimum transfer */
167 167 0xffffffffU, /* maximum transfer */
168 168 0xffffffffU, /* maximum segment length */
169 169 1, /* maximum number of segments */
170 170 1, /* granularity */
171 171 0, /* flags (reserved) */
172 172 };
173 173
174 174 /*
175 175 * DMA attributes for a rx buffer
176 176 */
177 177 static ddi_dma_attr_t rx_buffer_dma_attr = {
178 178 DMA_ATTR_V0, /* version of this structure */
179 179 0, /* lowest usable address */
180 180 0xffffffffU, /* highest usable address */
181 181 0xffffffffU, /* maximum DMAable byte count */
182 182 0x100, /* alignment in bytes */
183 183 0x100, /* burst sizes (any?) */
184 184 1, /* minimum transfer */
185 185 0xffffffffU, /* maximum transfer */
186 186 0xffffffffU, /* maximum segment length */
187 187 1, /* maximum number of segments */
188 188 1, /* granularity */
189 189 0, /* flags (reserved) */
190 190 };
191 191
192 192 /*
193 193 * DMA attributes for a tx buffer.
194 194 * the maximum number of segments is 4 for the hardware.
195 195 * now all the wifi drivers put the whole frame in a single
196 196 * descriptor, so we define the maximum number of segments 1,
197 197 * just the same as the rx_buffer. we consider leverage the HW
198 198 * ability in the future, that is why we don't define rx and tx
199 199 * buffer_dma_attr as the same.
200 200 */
201 201 static ddi_dma_attr_t tx_buffer_dma_attr = {
202 202 DMA_ATTR_V0, /* version of this structure */
203 203 0, /* lowest usable address */
204 204 0xffffffffU, /* highest usable address */
205 205 0xffffffffU, /* maximum DMAable byte count */
206 206 4, /* alignment in bytes */
207 207 0x100, /* burst sizes (any?) */
208 208 1, /* minimum transfer */
209 209 0xffffffffU, /* maximum transfer */
210 210 0xffffffffU, /* maximum segment length */
211 211 1, /* maximum number of segments */
212 212 1, /* granularity */
213 213 0, /* flags (reserved) */
214 214 };
215 215
216 216 /*
217 217 * DMA attributes for text and data part in the firmware
218 218 */
219 219 static ddi_dma_attr_t fw_dma_attr = {
220 220 DMA_ATTR_V0, /* version of this structure */
221 221 0, /* lowest usable address */
222 222 0xffffffffU, /* highest usable address */
223 223 0x7fffffff, /* maximum DMAable byte count */
224 224 0x10, /* alignment in bytes */
225 225 0x100, /* burst sizes (any?) */
226 226 1, /* minimum transfer */
227 227 0xffffffffU, /* maximum transfer */
228 228 0xffffffffU, /* maximum segment length */
229 229 1, /* maximum number of segments */
230 230 1, /* granularity */
231 231 0, /* flags (reserved) */
232 232 };
233 233
234 234 /*
235 235 * regs access attributes
236 236 */
237 237 static ddi_device_acc_attr_t iwp_reg_accattr = {
238 238 DDI_DEVICE_ATTR_V0,
239 239 DDI_STRUCTURE_LE_ACC,
240 240 DDI_STRICTORDER_ACC,
241 241 DDI_DEFAULT_ACC
242 242 };
243 243
244 244 /*
245 245 * DMA access attributes for descriptor
246 246 */
247 247 static ddi_device_acc_attr_t iwp_dma_descattr = {
248 248 DDI_DEVICE_ATTR_V0,
249 249 DDI_STRUCTURE_LE_ACC,
250 250 DDI_STRICTORDER_ACC,
251 251 DDI_DEFAULT_ACC
252 252 };
253 253
254 254 /*
255 255 * DMA access attributes
256 256 */
257 257 static ddi_device_acc_attr_t iwp_dma_accattr = {
258 258 DDI_DEVICE_ATTR_V0,
259 259 DDI_NEVERSWAP_ACC,
260 260 DDI_STRICTORDER_ACC,
261 261 DDI_DEFAULT_ACC
262 262 };
263 263
264 264 static int iwp_ring_init(iwp_sc_t *);
265 265 static void iwp_ring_free(iwp_sc_t *);
266 266 static int iwp_alloc_shared(iwp_sc_t *);
267 267 static void iwp_free_shared(iwp_sc_t *);
268 268 static int iwp_alloc_kw(iwp_sc_t *);
269 269 static void iwp_free_kw(iwp_sc_t *);
270 270 static int iwp_alloc_fw_dma(iwp_sc_t *);
271 271 static void iwp_free_fw_dma(iwp_sc_t *);
272 272 static int iwp_alloc_rx_ring(iwp_sc_t *);
273 273 static void iwp_reset_rx_ring(iwp_sc_t *);
274 274 static void iwp_free_rx_ring(iwp_sc_t *);
275 275 static int iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
276 276 int, int);
277 277 static void iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
278 278 static void iwp_free_tx_ring(iwp_tx_ring_t *);
279 279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
280 280 static void iwp_node_free(ieee80211_node_t *);
281 281 static int iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
282 282 static void iwp_mac_access_enter(iwp_sc_t *);
283 283 static void iwp_mac_access_exit(iwp_sc_t *);
284 284 static uint32_t iwp_reg_read(iwp_sc_t *, uint32_t);
285 285 static void iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
286 286 static int iwp_load_init_firmware(iwp_sc_t *);
287 287 static int iwp_load_run_firmware(iwp_sc_t *);
288 288 static void iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
289 289 static void iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
290 290 static uint_t iwp_intr(caddr_t, caddr_t);
291 291 static int iwp_eep_load(iwp_sc_t *);
292 292 static void iwp_get_mac_from_eep(iwp_sc_t *);
293 293 static int iwp_eep_sem_down(iwp_sc_t *);
294 294 static void iwp_eep_sem_up(iwp_sc_t *);
295 295 static uint_t iwp_rx_softintr(caddr_t, caddr_t);
296 296 static uint8_t iwp_rate_to_plcp(int);
297 297 static int iwp_cmd(iwp_sc_t *, int, const void *, int, int);
298 298 static void iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
299 299 static int iwp_hw_set_before_auth(iwp_sc_t *);
300 300 static int iwp_scan(iwp_sc_t *);
301 301 static int iwp_config(iwp_sc_t *);
302 302 static void iwp_stop_master(iwp_sc_t *);
303 303 static int iwp_power_up(iwp_sc_t *);
304 304 static int iwp_preinit(iwp_sc_t *);
305 305 static int iwp_init(iwp_sc_t *);
306 306 static void iwp_stop(iwp_sc_t *);
307 307 static int iwp_quiesce(dev_info_t *t);
308 308 static void iwp_amrr_init(iwp_amrr_t *);
309 309 static void iwp_amrr_timeout(iwp_sc_t *);
310 310 static void iwp_amrr_ratectl(void *, ieee80211_node_t *);
311 311 static void iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
312 312 static void iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
313 313 static void iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
314 314 static void iwp_release_calib_buffer(iwp_sc_t *);
315 315 static int iwp_init_common(iwp_sc_t *);
316 316 static uint8_t *iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
317 317 static int iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
318 318 static int iwp_alive_common(iwp_sc_t *);
319 319 static void iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
320 320 static int iwp_attach(dev_info_t *, ddi_attach_cmd_t);
321 321 static int iwp_detach(dev_info_t *, ddi_detach_cmd_t);
322 322 static void iwp_destroy_locks(iwp_sc_t *);
323 323 static int iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
324 324 static void iwp_thread(iwp_sc_t *);
325 325 static int iwp_run_state_config(iwp_sc_t *);
326 326 static int iwp_fast_recover(iwp_sc_t *);
327 327 static void iwp_overwrite_ic_default(iwp_sc_t *);
328 328 static int iwp_add_ap_sta(iwp_sc_t *);
329 329 static int iwp_alloc_dma_mem(iwp_sc_t *, size_t,
330 330 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
331 331 uint_t, iwp_dma_t *);
332 332 static void iwp_free_dma_mem(iwp_dma_t *);
333 333 static int iwp_eep_ver_chk(iwp_sc_t *);
334 334 static void iwp_set_chip_param(iwp_sc_t *);
335 335
336 336 /*
337 337 * GLD specific operations
338 338 */
339 339 static int iwp_m_stat(void *, uint_t, uint64_t *);
340 340 static int iwp_m_start(void *);
341 341 static void iwp_m_stop(void *);
342 342 static int iwp_m_unicst(void *, const uint8_t *);
343 343 static int iwp_m_multicst(void *, boolean_t, const uint8_t *);
344 344 static int iwp_m_promisc(void *, boolean_t);
345 345 static mblk_t *iwp_m_tx(void *, mblk_t *);
346 346 static void iwp_m_ioctl(void *, queue_t *, mblk_t *);
347 347 static int iwp_m_setprop(void *arg, const char *pr_name,
348 348 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
349 349 static int iwp_m_getprop(void *arg, const char *pr_name,
350 350 mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
351 351 static void iwp_m_propinfo(void *, const char *, mac_prop_id_t,
352 352 mac_prop_info_handle_t);
353 353
354 354 /*
355 355 * Supported rates for 802.11b/g modes (in 500Kbps unit).
356 356 */
357 357 static const struct ieee80211_rateset iwp_rateset_11b =
358 358 { 4, { 2, 4, 11, 22 } };
359 359
360 360 static const struct ieee80211_rateset iwp_rateset_11g =
361 361 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
362 362
363 363 /*
364 364 * For mfthread only
365 365 */
366 366 extern pri_t minclsyspri;
367 367
368 368 #define DRV_NAME_SP "iwp"
369 369
370 370 /*
371 371 * Module Loading Data & Entry Points
372 372 */
373 373 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
374 374 iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
375 375
376 376 static struct modldrv iwp_modldrv = {
377 377 &mod_driverops,
378 378 "Intel(R) PumaPeak driver(N)",
379 379 &iwp_devops
380 380 };
381 381
382 382 static struct modlinkage iwp_modlinkage = {
383 383 MODREV_1,
384 384 &iwp_modldrv,
385 385 NULL
386 386 };
387 387
388 388 int
389 389 _init(void)
390 390 {
391 391 int status;
392 392
393 393 status = ddi_soft_state_init(&iwp_soft_state_p,
394 394 sizeof (iwp_sc_t), 1);
395 395 if (status != DDI_SUCCESS) {
396 396 return (status);
397 397 }
398 398
399 399 mac_init_ops(&iwp_devops, DRV_NAME_SP);
400 400 status = mod_install(&iwp_modlinkage);
401 401 if (status != DDI_SUCCESS) {
402 402 mac_fini_ops(&iwp_devops);
403 403 ddi_soft_state_fini(&iwp_soft_state_p);
404 404 }
405 405
406 406 return (status);
407 407 }
408 408
409 409 int
410 410 _fini(void)
411 411 {
412 412 int status;
413 413
414 414 status = mod_remove(&iwp_modlinkage);
415 415 if (DDI_SUCCESS == status) {
416 416 mac_fini_ops(&iwp_devops);
417 417 ddi_soft_state_fini(&iwp_soft_state_p);
418 418 }
419 419
420 420 return (status);
421 421 }
422 422
423 423 int
424 424 _info(struct modinfo *mip)
425 425 {
426 426 return (mod_info(&iwp_modlinkage, mip));
427 427 }
428 428
429 429 /*
430 430 * Mac Call Back entries
431 431 */
432 432 mac_callbacks_t iwp_m_callbacks = {
433 433 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
434 434 iwp_m_stat,
435 435 iwp_m_start,
436 436 iwp_m_stop,
437 437 iwp_m_promisc,
438 438 iwp_m_multicst,
439 439 iwp_m_unicst,
440 440 iwp_m_tx,
441 441 NULL,
442 442 iwp_m_ioctl,
443 443 NULL,
444 444 NULL,
445 445 NULL,
446 446 iwp_m_setprop,
447 447 iwp_m_getprop,
448 448 iwp_m_propinfo
449 449 };
450 450
451 451 #ifdef DEBUG
452 452 void
453 453 iwp_dbg(uint32_t flags, const char *fmt, ...)
454 454 {
455 455 va_list ap;
456 456
457 457 if (flags & iwp_dbg_flags) {
458 458 va_start(ap, fmt);
459 459 vcmn_err(CE_NOTE, fmt, ap);
460 460 va_end(ap);
461 461 }
462 462 }
463 463 #endif /* DEBUG */
464 464
465 465 /*
466 466 * device operations
467 467 */
468 468 int
469 469 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
470 470 {
471 471 iwp_sc_t *sc;
472 472 ieee80211com_t *ic;
473 473 int instance, i;
474 474 char strbuf[32];
475 475 wifi_data_t wd = { 0 };
476 476 mac_register_t *macp;
477 477 int intr_type;
478 478 int intr_count;
479 479 int intr_actual;
480 480 int err = DDI_FAILURE;
481 481
482 482 switch (cmd) {
483 483 case DDI_ATTACH:
484 484 break;
485 485 case DDI_RESUME:
486 486 instance = ddi_get_instance(dip);
487 487 sc = ddi_get_soft_state(iwp_soft_state_p,
488 488 instance);
489 489 ASSERT(sc != NULL);
490 490
491 491 if (sc->sc_flags & IWP_F_RUNNING) {
492 492 (void) iwp_init(sc);
493 493 }
494 494
495 495 atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
496 496
497 497 IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
498 498 "resume\n"));
499 499 return (DDI_SUCCESS);
500 500 default:
501 501 goto attach_fail1;
502 502 }
503 503
504 504 instance = ddi_get_instance(dip);
505 505 err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
506 506 if (err != DDI_SUCCESS) {
507 507 cmn_err(CE_WARN, "iwp_attach(): "
508 508 "failed to allocate soft state\n");
509 509 goto attach_fail1;
510 510 }
511 511
512 512 sc = ddi_get_soft_state(iwp_soft_state_p, instance);
513 513 ASSERT(sc != NULL);
514 514
515 515 sc->sc_dip = dip;
516 516
517 517 /*
518 518 * map configure space
519 519 */
520 520 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
521 521 &iwp_reg_accattr, &sc->sc_cfg_handle);
522 522 if (err != DDI_SUCCESS) {
523 523 cmn_err(CE_WARN, "iwp_attach(): "
524 524 "failed to map config spaces regs\n");
525 525 goto attach_fail2;
526 526 }
527 527
528 528 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
529 529 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
530 530 if ((sc->sc_dev_id != 0x422B) &&
531 531 (sc->sc_dev_id != 0x422C) &&
532 532 (sc->sc_dev_id != 0x4238) &&
533 533 (sc->sc_dev_id != 0x4239) &&
534 534 (sc->sc_dev_id != 0x008d) &&
535 535 (sc->sc_dev_id != 0x008e)) {
536 536 cmn_err(CE_WARN, "iwp_attach(): "
537 537 "Do not support this device\n");
538 538 goto attach_fail3;
539 539 }
540 540
541 541 iwp_set_chip_param(sc);
542 542
543 543 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
544 544 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
545 545
546 546 /*
547 547 * keep from disturbing C3 state of CPU
548 548 */
549 549 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
550 550 PCI_CFG_RETRY_TIMEOUT), 0);
551 551
552 552 /*
553 553 * determine the size of buffer for frame and command to ucode
554 554 */
555 555 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
556 556 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
557 557 if (!sc->sc_clsz) {
558 558 sc->sc_clsz = 16;
559 559 }
560 560 sc->sc_clsz = (sc->sc_clsz << 2);
561 561
562 562 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
563 563 IEEE80211_MTU + IEEE80211_CRC_LEN +
564 564 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
565 565 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
566 566
567 567 /*
568 568 * Map operating registers
569 569 */
570 570 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
571 571 0, 0, &iwp_reg_accattr, &sc->sc_handle);
572 572 if (err != DDI_SUCCESS) {
573 573 cmn_err(CE_WARN, "iwp_attach(): "
574 574 "failed to map device regs\n");
575 575 goto attach_fail3;
576 576 }
577 577
578 578 /*
579 579 * this is used to differentiate type of hardware
580 580 */
581 581 sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
582 582
583 583 err = ddi_intr_get_supported_types(dip, &intr_type);
584 584 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
585 585 cmn_err(CE_WARN, "iwp_attach(): "
586 586 "fixed type interrupt is not supported\n");
587 587 goto attach_fail4;
588 588 }
589 589
590 590 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
591 591 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
592 592 cmn_err(CE_WARN, "iwp_attach(): "
593 593 "no fixed interrupts\n");
594 594 goto attach_fail4;
595 595 }
596 596
597 597 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
598 598
599 599 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
600 600 intr_count, &intr_actual, 0);
601 601 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
602 602 cmn_err(CE_WARN, "iwp_attach(): "
603 603 "ddi_intr_alloc() failed 0x%x\n", err);
604 604 goto attach_fail5;
605 605 }
606 606
607 607 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
608 608 if (err != DDI_SUCCESS) {
609 609 cmn_err(CE_WARN, "iwp_attach(): "
610 610 "ddi_intr_get_pri() failed 0x%x\n", err);
611 611 goto attach_fail6;
612 612 }
613 613
614 614 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
615 615 DDI_INTR_PRI(sc->sc_intr_pri));
616 616 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
617 617 DDI_INTR_PRI(sc->sc_intr_pri));
618 618 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
619 619 DDI_INTR_PRI(sc->sc_intr_pri));
620 620
621 621 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
622 622 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
623 623 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
624 624
625 625 /*
626 626 * initialize the mfthread
627 627 */
628 628 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
629 629 sc->sc_mf_thread = NULL;
630 630 sc->sc_mf_thread_switch = 0;
631 631
632 632 /*
633 633 * Allocate shared buffer for communication between driver and ucode.
634 634 */
635 635 err = iwp_alloc_shared(sc);
636 636 if (err != DDI_SUCCESS) {
637 637 cmn_err(CE_WARN, "iwp_attach(): "
638 638 "failed to allocate shared page\n");
639 639 goto attach_fail7;
640 640 }
641 641
642 642 (void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
643 643
644 644 /*
645 645 * Allocate keep warm page.
646 646 */
647 647 err = iwp_alloc_kw(sc);
648 648 if (err != DDI_SUCCESS) {
649 649 cmn_err(CE_WARN, "iwp_attach(): "
650 650 "failed to allocate keep warm page\n");
651 651 goto attach_fail8;
652 652 }
653 653
654 654 /*
655 655 * Do some necessary hardware initializations.
656 656 */
657 657 err = iwp_preinit(sc);
658 658 if (err != IWP_SUCCESS) {
659 659 cmn_err(CE_WARN, "iwp_attach(): "
660 660 "failed to initialize hardware\n");
661 661 goto attach_fail9;
662 662 }
663 663
664 664 /*
665 665 * get hardware configurations from eeprom
666 666 */
667 667 err = iwp_eep_load(sc);
668 668 if (err != IWP_SUCCESS) {
669 669 cmn_err(CE_WARN, "iwp_attach(): "
670 670 "failed to load eeprom\n");
671 671 goto attach_fail9;
672 672 }
673 673
674 674 /*
675 675 * calibration information from EEPROM
676 676 */
677 677 sc->sc_eep_calib = (struct iwp_eep_calibration *)
678 678 iwp_eep_addr_trans(sc, EEP_CALIBRATION);
679 679
680 680 err = iwp_eep_ver_chk(sc);
681 681 if (err != IWP_SUCCESS) {
682 682 goto attach_fail9;
683 683 }
684 684
685 685 /*
686 686 * get MAC address of this chipset
687 687 */
688 688 iwp_get_mac_from_eep(sc);
689 689
690 690
691 691 /*
692 692 * initialize TX and RX ring buffers
693 693 */
694 694 err = iwp_ring_init(sc);
695 695 if (err != DDI_SUCCESS) {
696 696 cmn_err(CE_WARN, "iwp_attach(): "
697 697 "failed to allocate and initialize ring\n");
698 698 goto attach_fail9;
699 699 }
700 700
701 701 sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
702 702
703 703 /*
704 704 * copy ucode to dma buffer
705 705 */
706 706 err = iwp_alloc_fw_dma(sc);
707 707 if (err != DDI_SUCCESS) {
708 708 cmn_err(CE_WARN, "iwp_attach(): "
709 709 "failed to allocate firmware dma\n");
710 710 goto attach_fail10;
711 711 }
712 712
713 713 /*
714 714 * Initialize the wifi part, which will be used by
715 715 * 802.11 module
716 716 */
717 717 ic = &sc->sc_ic;
718 718 ic->ic_phytype = IEEE80211_T_OFDM;
719 719 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
720 720 ic->ic_state = IEEE80211_S_INIT;
721 721 ic->ic_maxrssi = 100; /* experimental number */
722 722 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
723 723 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
724 724
725 725 /*
726 726 * Support WPA/WPA2
727 727 */
728 728 ic->ic_caps |= IEEE80211_C_WPA;
729 729
730 730 /*
731 731 * set supported .11b and .11g rates
732 732 */
733 733 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
734 734 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
735 735
736 736 /*
737 737 * set supported .11b and .11g channels (1 through 11)
738 738 */
739 739 for (i = 1; i <= 11; i++) {
740 740 ic->ic_sup_channels[i].ich_freq =
741 741 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
742 742 ic->ic_sup_channels[i].ich_flags =
743 743 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
744 744 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
745 745 IEEE80211_CHAN_PASSIVE;
746 746 }
747 747
748 748 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
749 749 ic->ic_xmit = iwp_send;
750 750
751 751 /*
752 752 * attach to 802.11 module
753 753 */
754 754 ieee80211_attach(ic);
755 755
756 756 /*
757 757 * different instance has different WPA door
758 758 */
759 759 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
760 760 ddi_driver_name(dip),
761 761 ddi_get_instance(dip));
762 762
763 763 /*
764 764 * Overwrite 80211 default configurations.
765 765 */
766 766 iwp_overwrite_ic_default(sc);
767 767
768 768 /*
769 769 * initialize 802.11 module
770 770 */
771 771 ieee80211_media_init(ic);
772 772
773 773 /*
774 774 * initialize default tx key
775 775 */
776 776 ic->ic_def_txkey = 0;
777 777
778 778 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
779 779 iwp_rx_softintr, (caddr_t)sc);
780 780 if (err != DDI_SUCCESS) {
781 781 cmn_err(CE_WARN, "iwp_attach(): "
782 782 "add soft interrupt failed\n");
783 783 goto attach_fail12;
784 784 }
785 785
786 786 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
787 787 (caddr_t)sc, NULL);
788 788 if (err != DDI_SUCCESS) {
789 789 cmn_err(CE_WARN, "iwp_attach(): "
790 790 "ddi_intr_add_handle() failed\n");
791 791 goto attach_fail13;
792 792 }
793 793
794 794 err = ddi_intr_enable(sc->sc_intr_htable[0]);
795 795 if (err != DDI_SUCCESS) {
796 796 cmn_err(CE_WARN, "iwp_attach(): "
797 797 "ddi_intr_enable() failed\n");
798 798 goto attach_fail14;
799 799 }
800 800
801 801 /*
802 802 * Initialize pointer to device specific functions
803 803 */
804 804 wd.wd_secalloc = WIFI_SEC_NONE;
805 805 wd.wd_opmode = ic->ic_opmode;
806 806 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
807 807
808 808 /*
809 809 * create relation to GLD
810 810 */
811 811 macp = mac_alloc(MAC_VERSION);
812 812 if (NULL == macp) {
813 813 cmn_err(CE_WARN, "iwp_attach(): "
814 814 "failed to do mac_alloc()\n");
815 815 goto attach_fail15;
816 816 }
817 817
818 818 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
819 819 macp->m_driver = sc;
820 820 macp->m_dip = dip;
821 821 macp->m_src_addr = ic->ic_macaddr;
822 822 macp->m_callbacks = &iwp_m_callbacks;
823 823 macp->m_min_sdu = 0;
824 824 macp->m_max_sdu = IEEE80211_MTU;
825 825 macp->m_pdata = &wd;
826 826 macp->m_pdata_size = sizeof (wd);
827 827
828 828 /*
829 829 * Register the macp to mac
830 830 */
831 831 err = mac_register(macp, &ic->ic_mach);
832 832 mac_free(macp);
833 833 if (err != DDI_SUCCESS) {
834 834 cmn_err(CE_WARN, "iwp_attach(): "
835 835 "failed to do mac_register()\n");
836 836 goto attach_fail15;
837 837 }
838 838
839 839 /*
840 840 * Create minor node of type DDI_NT_NET_WIFI
841 841 */
842 842 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
843 843 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
844 844 instance + 1, DDI_NT_NET_WIFI, 0);
845 845 if (err != DDI_SUCCESS) {
846 846 cmn_err(CE_WARN, "iwp_attach(): "
847 847 "failed to do ddi_create_minor_node()\n");
848 848 }
849 849
850 850 /*
851 851 * Notify link is down now
852 852 */
853 853 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
854 854
855 855 /*
856 856 * create the mf thread to handle the link status,
857 857 * recovery fatal error, etc.
858 858 */
859 859 sc->sc_mf_thread_switch = 1;
860 860 if (NULL == sc->sc_mf_thread) {
861 861 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
862 862 iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
863 863 }
864 864
865 865 atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
866 866
867 867 return (DDI_SUCCESS);
868 868
869 869 attach_fail15:
870 870 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
871 871 attach_fail14:
872 872 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
873 873 attach_fail13:
874 874 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
875 875 sc->sc_soft_hdl = NULL;
876 876 attach_fail12:
877 877 ieee80211_detach(ic);
878 878 attach_fail11:
879 879 iwp_free_fw_dma(sc);
880 880 attach_fail10:
881 881 iwp_ring_free(sc);
882 882 attach_fail9:
883 883 iwp_free_kw(sc);
884 884 attach_fail8:
885 885 iwp_free_shared(sc);
886 886 attach_fail7:
887 887 iwp_destroy_locks(sc);
888 888 attach_fail6:
889 889 (void) ddi_intr_free(sc->sc_intr_htable[0]);
890 890 attach_fail5:
891 891 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
892 892 attach_fail4:
893 893 ddi_regs_map_free(&sc->sc_handle);
894 894 attach_fail3:
895 895 ddi_regs_map_free(&sc->sc_cfg_handle);
896 896 attach_fail2:
897 897 ddi_soft_state_free(iwp_soft_state_p, instance);
898 898 attach_fail1:
899 899 return (DDI_FAILURE);
900 900 }
901 901
902 902 int
903 903 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
904 904 {
905 905 iwp_sc_t *sc;
906 906 ieee80211com_t *ic;
907 907 int err;
908 908
909 909 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
910 910 ASSERT(sc != NULL);
911 911 ic = &sc->sc_ic;
912 912
913 913 switch (cmd) {
914 914 case DDI_DETACH:
915 915 break;
916 916 case DDI_SUSPEND:
917 917 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
918 918 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
919 919
920 920 atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
921 921
922 922 if (sc->sc_flags & IWP_F_RUNNING) {
923 923 iwp_stop(sc);
924 924 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
925 925
926 926 }
927 927
928 928 IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
929 929 "suspend\n"));
930 930 return (DDI_SUCCESS);
931 931 default:
932 932 return (DDI_FAILURE);
933 933 }
934 934
935 935 if (!(sc->sc_flags & IWP_F_ATTACHED)) {
936 936 return (DDI_FAILURE);
937 937 }
938 938
939 939 /*
940 940 * Destroy the mf_thread
941 941 */
942 942 sc->sc_mf_thread_switch = 0;
943 943
944 944 mutex_enter(&sc->sc_mt_lock);
945 945 while (sc->sc_mf_thread != NULL) {
946 946 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
947 947 break;
948 948 }
949 949 }
950 950 mutex_exit(&sc->sc_mt_lock);
951 951
952 952 err = mac_disable(sc->sc_ic.ic_mach);
953 953 if (err != DDI_SUCCESS) {
954 954 return (err);
955 955 }
956 956
957 957 /*
958 958 * stop chipset
959 959 */
960 960 iwp_stop(sc);
961 961
962 962 DELAY(500000);
963 963
964 964 /*
965 965 * release buffer for calibration
966 966 */
967 967 iwp_release_calib_buffer(sc);
968 968
969 969 /*
970 970 * Unregiste from GLD
971 971 */
972 972 (void) mac_unregister(sc->sc_ic.ic_mach);
973 973
974 974 mutex_enter(&sc->sc_glock);
975 975 iwp_free_fw_dma(sc);
976 976 iwp_ring_free(sc);
977 977 iwp_free_kw(sc);
978 978 iwp_free_shared(sc);
979 979 mutex_exit(&sc->sc_glock);
980 980
981 981 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
982 982 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
983 983 (void) ddi_intr_free(sc->sc_intr_htable[0]);
984 984 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
985 985
986 986 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
987 987 sc->sc_soft_hdl = NULL;
988 988
989 989 /*
990 990 * detach from 80211 module
991 991 */
992 992 ieee80211_detach(&sc->sc_ic);
993 993
994 994 iwp_destroy_locks(sc);
995 995
996 996 ddi_regs_map_free(&sc->sc_handle);
997 997 ddi_regs_map_free(&sc->sc_cfg_handle);
998 998 ddi_remove_minor_node(dip, NULL);
999 999 ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
1000 1000
1001 1001 return (DDI_SUCCESS);
1002 1002 }
1003 1003
1004 1004 /*
1005 1005 * destroy all locks
1006 1006 */
1007 1007 static void
1008 1008 iwp_destroy_locks(iwp_sc_t *sc)
1009 1009 {
1010 1010 cv_destroy(&sc->sc_mt_cv);
1011 1011 cv_destroy(&sc->sc_cmd_cv);
1012 1012 cv_destroy(&sc->sc_put_seg_cv);
1013 1013 cv_destroy(&sc->sc_ucode_cv);
1014 1014 mutex_destroy(&sc->sc_mt_lock);
1015 1015 mutex_destroy(&sc->sc_tx_lock);
1016 1016 mutex_destroy(&sc->sc_glock);
1017 1017 }
1018 1018
1019 1019 /*
1020 1020 * Allocate an area of memory and a DMA handle for accessing it
1021 1021 */
1022 1022 static int
1023 1023 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1024 1024 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1025 1025 uint_t dma_flags, iwp_dma_t *dma_p)
1026 1026 {
1027 1027 caddr_t vaddr;
1028 1028 int err = DDI_FAILURE;
1029 1029
1030 1030 /*
1031 1031 * Allocate handle
1032 1032 */
1033 1033 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1034 1034 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1035 1035 if (err != DDI_SUCCESS) {
1036 1036 dma_p->dma_hdl = NULL;
1037 1037 return (DDI_FAILURE);
1038 1038 }
1039 1039
1040 1040 /*
1041 1041 * Allocate memory
1042 1042 */
1043 1043 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1044 1044 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1045 1045 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1046 1046 if (err != DDI_SUCCESS) {
1047 1047 ddi_dma_free_handle(&dma_p->dma_hdl);
1048 1048 dma_p->dma_hdl = NULL;
1049 1049 dma_p->acc_hdl = NULL;
1050 1050 return (DDI_FAILURE);
1051 1051 }
1052 1052
1053 1053 /*
1054 1054 * Bind the two together
1055 1055 */
1056 1056 dma_p->mem_va = vaddr;
1057 1057 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1058 1058 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1059 1059 &dma_p->cookie, &dma_p->ncookies);
1060 1060 if (err != DDI_DMA_MAPPED) {
1061 1061 ddi_dma_mem_free(&dma_p->acc_hdl);
1062 1062 ddi_dma_free_handle(&dma_p->dma_hdl);
1063 1063 dma_p->acc_hdl = NULL;
1064 1064 dma_p->dma_hdl = NULL;
1065 1065 return (DDI_FAILURE);
1066 1066 }
1067 1067
1068 1068 dma_p->nslots = ~0U;
1069 1069 dma_p->size = ~0U;
1070 1070 dma_p->token = ~0U;
1071 1071 dma_p->offset = 0;
1072 1072 return (DDI_SUCCESS);
1073 1073 }
1074 1074
1075 1075 /*
1076 1076 * Free one allocated area of DMAable memory
1077 1077 */
1078 1078 static void
1079 1079 iwp_free_dma_mem(iwp_dma_t *dma_p)
1080 1080 {
1081 1081 if (dma_p->dma_hdl != NULL) {
1082 1082 if (dma_p->ncookies) {
1083 1083 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1084 1084 dma_p->ncookies = 0;
1085 1085 }
1086 1086 ddi_dma_free_handle(&dma_p->dma_hdl);
1087 1087 dma_p->dma_hdl = NULL;
1088 1088 }
1089 1089
1090 1090 if (dma_p->acc_hdl != NULL) {
1091 1091 ddi_dma_mem_free(&dma_p->acc_hdl);
1092 1092 dma_p->acc_hdl = NULL;
1093 1093 }
1094 1094 }
1095 1095
1096 1096 /*
1097 1097 * copy ucode into dma buffers
1098 1098 */
1099 1099 static int
1100 1100 iwp_alloc_fw_dma(iwp_sc_t *sc)
1101 1101 {
1102 1102 int err = DDI_FAILURE;
1103 1103 iwp_dma_t *dma_p;
1104 1104 char *t;
1105 1105
1106 1106 /*
1107 1107 * firmware image layout:
1108 1108 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1109 1109 */
1110 1110
1111 1111 /*
1112 1112 * Check firmware image size.
1113 1113 */
1114 1114 if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1115 1115 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1116 1116 "firmware init text size 0x%x is too large\n",
1117 1117 LE_32(sc->sc_hdr->init_textsz));
1118 1118
1119 1119 goto fail;
1120 1120 }
1121 1121
1122 1122 if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1123 1123 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1124 1124 "firmware init data size 0x%x is too large\n",
1125 1125 LE_32(sc->sc_hdr->init_datasz));
1126 1126
1127 1127 goto fail;
1128 1128 }
1129 1129
1130 1130 if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1131 1131 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1132 1132 "firmware text size 0x%x is too large\n",
1133 1133 LE_32(sc->sc_hdr->textsz));
1134 1134
1135 1135 goto fail;
1136 1136 }
1137 1137
1138 1138 if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1139 1139 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1140 1140 "firmware data size 0x%x is too large\n",
1141 1141 LE_32(sc->sc_hdr->datasz));
1142 1142
1143 1143 goto fail;
1144 1144 }
1145 1145
1146 1146 /*
1147 1147 * copy text of runtime ucode
1148 1148 */
1149 1149 t = (char *)(sc->sc_hdr + 1);
1150 1150 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1151 1151 &fw_dma_attr, &iwp_dma_accattr,
1152 1152 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1153 1153 &sc->sc_dma_fw_text);
1154 1154 if (err != DDI_SUCCESS) {
1155 1155 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1156 1156 "failed to allocate text dma memory.\n");
1157 1157 goto fail;
1158 1158 }
1159 1159
1160 1160 dma_p = &sc->sc_dma_fw_text;
1161 1161
1162 1162 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1163 1163 "text[ncookies:%d addr:%lx size:%lx]\n",
1164 1164 dma_p->ncookies, dma_p->cookie.dmac_address,
1165 1165 dma_p->cookie.dmac_size));
1166 1166
1167 1167 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1168 1168
1169 1169 /*
1170 1170 * copy data and bak-data of runtime ucode
1171 1171 */
1172 1172 t += LE_32(sc->sc_hdr->textsz);
1173 1173 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1174 1174 &fw_dma_attr, &iwp_dma_accattr,
1175 1175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1176 1176 &sc->sc_dma_fw_data);
1177 1177 if (err != DDI_SUCCESS) {
1178 1178 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1179 1179 "failed to allocate data dma memory\n");
1180 1180 goto fail;
1181 1181 }
1182 1182
1183 1183 dma_p = &sc->sc_dma_fw_data;
1184 1184
1185 1185 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1186 1186 "data[ncookies:%d addr:%lx size:%lx]\n",
1187 1187 dma_p->ncookies, dma_p->cookie.dmac_address,
1188 1188 dma_p->cookie.dmac_size));
1189 1189
1190 1190 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1191 1191
1192 1192 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1193 1193 &fw_dma_attr, &iwp_dma_accattr,
1194 1194 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1195 1195 &sc->sc_dma_fw_data_bak);
1196 1196 if (err != DDI_SUCCESS) {
1197 1197 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1198 1198 "failed to allocate data bakup dma memory\n");
1199 1199 goto fail;
1200 1200 }
1201 1201
1202 1202 dma_p = &sc->sc_dma_fw_data_bak;
1203 1203
1204 1204 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1205 1205 "data_bak[ncookies:%d addr:%lx "
1206 1206 "size:%lx]\n",
1207 1207 dma_p->ncookies, dma_p->cookie.dmac_address,
1208 1208 dma_p->cookie.dmac_size));
1209 1209
1210 1210 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1211 1211
1212 1212 /*
1213 1213 * copy text of init ucode
1214 1214 */
1215 1215 t += LE_32(sc->sc_hdr->datasz);
1216 1216 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1217 1217 &fw_dma_attr, &iwp_dma_accattr,
1218 1218 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1219 1219 &sc->sc_dma_fw_init_text);
1220 1220 if (err != DDI_SUCCESS) {
1221 1221 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1222 1222 "failed to allocate init text dma memory\n");
1223 1223 goto fail;
1224 1224 }
1225 1225
1226 1226 dma_p = &sc->sc_dma_fw_init_text;
1227 1227
1228 1228 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1229 1229 "init_text[ncookies:%d addr:%lx "
1230 1230 "size:%lx]\n",
1231 1231 dma_p->ncookies, dma_p->cookie.dmac_address,
1232 1232 dma_p->cookie.dmac_size));
1233 1233
1234 1234 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1235 1235
1236 1236 /*
1237 1237 * copy data of init ucode
1238 1238 */
1239 1239 t += LE_32(sc->sc_hdr->init_textsz);
1240 1240 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1241 1241 &fw_dma_attr, &iwp_dma_accattr,
1242 1242 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1243 1243 &sc->sc_dma_fw_init_data);
1244 1244 if (err != DDI_SUCCESS) {
1245 1245 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1246 1246 "failed to allocate init data dma memory\n");
1247 1247 goto fail;
1248 1248 }
1249 1249
1250 1250 dma_p = &sc->sc_dma_fw_init_data;
1251 1251
1252 1252 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1253 1253 "init_data[ncookies:%d addr:%lx "
1254 1254 "size:%lx]\n",
1255 1255 dma_p->ncookies, dma_p->cookie.dmac_address,
1256 1256 dma_p->cookie.dmac_size));
1257 1257
1258 1258 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1259 1259
1260 1260 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1261 1261 fail:
1262 1262 return (err);
1263 1263 }
1264 1264
1265 1265 static void
1266 1266 iwp_free_fw_dma(iwp_sc_t *sc)
1267 1267 {
1268 1268 iwp_free_dma_mem(&sc->sc_dma_fw_text);
1269 1269 iwp_free_dma_mem(&sc->sc_dma_fw_data);
1270 1270 iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1271 1271 iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1272 1272 iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1273 1273 }
1274 1274
1275 1275 /*
1276 1276 * Allocate a shared buffer between host and NIC.
1277 1277 */
1278 1278 static int
1279 1279 iwp_alloc_shared(iwp_sc_t *sc)
1280 1280 {
1281 1281 #ifdef DEBUG
1282 1282 iwp_dma_t *dma_p;
1283 1283 #endif
1284 1284 int err = DDI_FAILURE;
1285 1285
1286 1286 /*
1287 1287 * must be aligned on a 4K-page boundary
1288 1288 */
1289 1289 err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1290 1290 &sh_dma_attr, &iwp_dma_descattr,
1291 1291 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1292 1292 &sc->sc_dma_sh);
1293 1293 if (err != DDI_SUCCESS) {
1294 1294 goto fail;
1295 1295 }
1296 1296
1297 1297 sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1298 1298
1299 1299 #ifdef DEBUG
1300 1300 dma_p = &sc->sc_dma_sh;
1301 1301 #endif
1302 1302 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1303 1303 "sh[ncookies:%d addr:%lx size:%lx]\n",
1304 1304 dma_p->ncookies, dma_p->cookie.dmac_address,
1305 1305 dma_p->cookie.dmac_size));
1306 1306
1307 1307 return (err);
1308 1308 fail:
1309 1309 iwp_free_shared(sc);
1310 1310 return (err);
1311 1311 }
1312 1312
1313 1313 static void
1314 1314 iwp_free_shared(iwp_sc_t *sc)
1315 1315 {
1316 1316 iwp_free_dma_mem(&sc->sc_dma_sh);
1317 1317 }
1318 1318
1319 1319 /*
1320 1320 * Allocate a keep warm page.
1321 1321 */
1322 1322 static int
1323 1323 iwp_alloc_kw(iwp_sc_t *sc)
1324 1324 {
1325 1325 #ifdef DEBUG
1326 1326 iwp_dma_t *dma_p;
1327 1327 #endif
1328 1328 int err = DDI_FAILURE;
1329 1329
1330 1330 /*
1331 1331 * must be aligned on a 4K-page boundary
1332 1332 */
1333 1333 err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1334 1334 &kw_dma_attr, &iwp_dma_descattr,
1335 1335 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1336 1336 &sc->sc_dma_kw);
1337 1337 if (err != DDI_SUCCESS) {
1338 1338 goto fail;
1339 1339 }
1340 1340
1341 1341 #ifdef DEBUG
1342 1342 dma_p = &sc->sc_dma_kw;
1343 1343 #endif
1344 1344 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1345 1345 "kw[ncookies:%d addr:%lx size:%lx]\n",
1346 1346 dma_p->ncookies, dma_p->cookie.dmac_address,
1347 1347 dma_p->cookie.dmac_size));
1348 1348
1349 1349 return (err);
1350 1350 fail:
1351 1351 iwp_free_kw(sc);
1352 1352 return (err);
1353 1353 }
1354 1354
1355 1355 static void
1356 1356 iwp_free_kw(iwp_sc_t *sc)
1357 1357 {
1358 1358 iwp_free_dma_mem(&sc->sc_dma_kw);
1359 1359 }
1360 1360
1361 1361 /*
1362 1362 * initialize RX ring buffers
1363 1363 */
1364 1364 static int
1365 1365 iwp_alloc_rx_ring(iwp_sc_t *sc)
1366 1366 {
1367 1367 iwp_rx_ring_t *ring;
1368 1368 iwp_rx_data_t *data;
1369 1369 #ifdef DEBUG
1370 1370 iwp_dma_t *dma_p;
1371 1371 #endif
1372 1372 int i, err = DDI_FAILURE;
1373 1373
1374 1374 ring = &sc->sc_rxq;
1375 1375 ring->cur = 0;
1376 1376
1377 1377 /*
1378 1378 * allocate RX description ring buffer
1379 1379 */
1380 1380 err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1381 1381 &ring_desc_dma_attr, &iwp_dma_descattr,
1382 1382 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1383 1383 &ring->dma_desc);
1384 1384 if (err != DDI_SUCCESS) {
1385 1385 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1386 1386 "dma alloc rx ring desc "
1387 1387 "failed\n"));
1388 1388 goto fail;
1389 1389 }
1390 1390
1391 1391 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1392 1392 #ifdef DEBUG
1393 1393 dma_p = &ring->dma_desc;
1394 1394 #endif
1395 1395 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1396 1396 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1397 1397 dma_p->ncookies, dma_p->cookie.dmac_address,
1398 1398 dma_p->cookie.dmac_size));
1399 1399
1400 1400 /*
1401 1401 * Allocate Rx frame buffers.
1402 1402 */
1403 1403 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1404 1404 data = &ring->data[i];
1405 1405 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1406 1406 &rx_buffer_dma_attr, &iwp_dma_accattr,
1407 1407 DDI_DMA_READ | DDI_DMA_STREAMING,
1408 1408 &data->dma_data);
1409 1409 if (err != DDI_SUCCESS) {
1410 1410 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1411 1411 "dma alloc rx ring "
1412 1412 "buf[%d] failed\n", i));
1413 1413 goto fail;
1414 1414 }
1415 1415 /*
1416 1416 * the physical address bit [8-36] are used,
1417 1417 * instead of bit [0-31] in 3945.
1418 1418 */
1419 1419 ring->desc[i] = (uint32_t)
1420 1420 (data->dma_data.cookie.dmac_address >> 8);
1421 1421 }
1422 1422
1423 1423 #ifdef DEBUG
1424 1424 dma_p = &ring->data[0].dma_data;
1425 1425 #endif
1426 1426 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1427 1427 "rx buffer[0][ncookies:%d addr:%lx "
1428 1428 "size:%lx]\n",
1429 1429 dma_p->ncookies, dma_p->cookie.dmac_address,
1430 1430 dma_p->cookie.dmac_size));
1431 1431
1432 1432 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1433 1433
1434 1434 return (err);
1435 1435
1436 1436 fail:
1437 1437 iwp_free_rx_ring(sc);
1438 1438 return (err);
1439 1439 }
1440 1440
1441 1441 /*
1442 1442 * disable RX ring
1443 1443 */
1444 1444 static void
1445 1445 iwp_reset_rx_ring(iwp_sc_t *sc)
1446 1446 {
1447 1447 int n;
1448 1448
1449 1449 iwp_mac_access_enter(sc);
1450 1450 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1451 1451 for (n = 0; n < 2000; n++) {
1452 1452 if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1453 1453 break;
1454 1454 }
1455 1455 DELAY(1000);
1456 1456 }
1457 1457 #ifdef DEBUG
1458 1458 if (2000 == n) {
1459 1459 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1460 1460 "timeout resetting Rx ring\n"));
1461 1461 }
1462 1462 #endif
1463 1463 iwp_mac_access_exit(sc);
1464 1464
1465 1465 sc->sc_rxq.cur = 0;
1466 1466 }
1467 1467
1468 1468 static void
1469 1469 iwp_free_rx_ring(iwp_sc_t *sc)
1470 1470 {
1471 1471 int i;
1472 1472
1473 1473 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1474 1474 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1475 1475 IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1476 1476 DDI_DMA_SYNC_FORCPU);
1477 1477 }
1478 1478
1479 1479 iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1480 1480 }
1481 1481
1482 1482 if (sc->sc_rxq.dma_desc.dma_hdl) {
1483 1483 IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1484 1484 }
1485 1485
1486 1486 iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1487 1487 }
1488 1488
1489 1489 /*
1490 1490 * initialize TX ring buffers
1491 1491 */
1492 1492 static int
1493 1493 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1494 1494 int slots, int qid)
1495 1495 {
1496 1496 iwp_tx_data_t *data;
1497 1497 iwp_tx_desc_t *desc_h;
1498 1498 uint32_t paddr_desc_h;
1499 1499 iwp_cmd_t *cmd_h;
1500 1500 uint32_t paddr_cmd_h;
1501 1501 #ifdef DEBUG
1502 1502 iwp_dma_t *dma_p;
1503 1503 #endif
1504 1504 int i, err = DDI_FAILURE;
1505 1505 ring->qid = qid;
1506 1506 ring->count = TFD_QUEUE_SIZE_MAX;
1507 1507 ring->window = slots;
1508 1508 ring->queued = 0;
1509 1509 ring->cur = 0;
1510 1510 ring->desc_cur = 0;
1511 1511
1512 1512 /*
1513 1513 * allocate buffer for TX descriptor ring
1514 1514 */
1515 1515 err = iwp_alloc_dma_mem(sc,
1516 1516 TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1517 1517 &ring_desc_dma_attr, &iwp_dma_descattr,
1518 1518 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1519 1519 &ring->dma_desc);
1520 1520 if (err != DDI_SUCCESS) {
1521 1521 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1522 1522 "dma alloc tx ring desc[%d] "
1523 1523 "failed\n", qid));
1524 1524 goto fail;
1525 1525 }
1526 1526
1527 1527 #ifdef DEBUG
1528 1528 dma_p = &ring->dma_desc;
1529 1529 #endif
1530 1530 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1531 1531 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1532 1532 dma_p->ncookies, dma_p->cookie.dmac_address,
1533 1533 dma_p->cookie.dmac_size));
1534 1534
1535 1535 desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1536 1536 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1537 1537
1538 1538 /*
1539 1539 * allocate buffer for ucode command
1540 1540 */
1541 1541 err = iwp_alloc_dma_mem(sc,
1542 1542 TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1543 1543 &cmd_dma_attr, &iwp_dma_accattr,
1544 1544 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1545 1545 &ring->dma_cmd);
1546 1546 if (err != DDI_SUCCESS) {
1547 1547 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1548 1548 "dma alloc tx ring cmd[%d]"
1549 1549 " failed\n", qid));
1550 1550 goto fail;
1551 1551 }
1552 1552
1553 1553 #ifdef DEBUG
1554 1554 dma_p = &ring->dma_cmd;
1555 1555 #endif
1556 1556 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1557 1557 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1558 1558 dma_p->ncookies, dma_p->cookie.dmac_address,
1559 1559 dma_p->cookie.dmac_size));
1560 1560
1561 1561 cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1562 1562 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1563 1563
1564 1564 /*
1565 1565 * Allocate Tx frame buffers.
1566 1566 */
1567 1567 ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1568 1568 KM_NOSLEEP);
1569 1569 if (NULL == ring->data) {
1570 1570 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1571 1571 "could not allocate "
1572 1572 "tx data slots\n"));
1573 1573 goto fail;
1574 1574 }
1575 1575
1576 1576 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1577 1577 data = &ring->data[i];
1578 1578 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1579 1579 &tx_buffer_dma_attr, &iwp_dma_accattr,
1580 1580 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1581 1581 &data->dma_data);
1582 1582 if (err != DDI_SUCCESS) {
1583 1583 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1584 1584 "dma alloc tx "
1585 1585 "ring buf[%d] failed\n", i));
1586 1586 goto fail;
1587 1587 }
1588 1588
1589 1589 data->desc = desc_h + i;
1590 1590 data->paddr_desc = paddr_desc_h +
1591 1591 _PTRDIFF(data->desc, desc_h);
1592 1592 data->cmd = cmd_h + i;
1593 1593 data->paddr_cmd = paddr_cmd_h +
1594 1594 _PTRDIFF(data->cmd, cmd_h);
1595 1595 }
1596 1596 #ifdef DEBUG
1597 1597 dma_p = &ring->data[0].dma_data;
1598 1598 #endif
1599 1599 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1600 1600 "tx buffer[0][ncookies:%d addr:%lx "
1601 1601 "size:%lx]\n",
1602 1602 dma_p->ncookies, dma_p->cookie.dmac_address,
1603 1603 dma_p->cookie.dmac_size));
1604 1604
1605 1605 return (err);
1606 1606
1607 1607 fail:
1608 1608 iwp_free_tx_ring(ring);
1609 1609
1610 1610 return (err);
1611 1611 }
1612 1612
1613 1613 /*
1614 1614 * disable TX ring
1615 1615 */
1616 1616 static void
1617 1617 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1618 1618 {
1619 1619 iwp_tx_data_t *data;
1620 1620 int i, n;
1621 1621
1622 1622 iwp_mac_access_enter(sc);
1623 1623
1624 1624 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1625 1625 for (n = 0; n < 200; n++) {
1626 1626 if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1627 1627 IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1628 1628 break;
1629 1629 }
1630 1630 DELAY(10);
1631 1631 }
1632 1632
1633 1633 #ifdef DEBUG
1634 1634 if (200 == n) {
1635 1635 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1636 1636 "timeout reset tx ring %d\n",
1637 1637 ring->qid));
1638 1638 }
1639 1639 #endif
1640 1640
1641 1641 iwp_mac_access_exit(sc);
1642 1642
1643 1643 /* by pass, if it's quiesce */
1644 1644 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1645 1645 for (i = 0; i < ring->count; i++) {
1646 1646 data = &ring->data[i];
1647 1647 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1648 1648 }
1649 1649 }
1650 1650
1651 1651 ring->queued = 0;
1652 1652 ring->cur = 0;
1653 1653 ring->desc_cur = 0;
1654 1654 }
1655 1655
1656 1656 static void
1657 1657 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1658 1658 {
1659 1659 int i;
1660 1660
1661 1661 if (ring->dma_desc.dma_hdl != NULL) {
1662 1662 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1663 1663 }
1664 1664 iwp_free_dma_mem(&ring->dma_desc);
1665 1665
1666 1666 if (ring->dma_cmd.dma_hdl != NULL) {
1667 1667 IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1668 1668 }
1669 1669 iwp_free_dma_mem(&ring->dma_cmd);
1670 1670
1671 1671 if (ring->data != NULL) {
1672 1672 for (i = 0; i < ring->count; i++) {
1673 1673 if (ring->data[i].dma_data.dma_hdl) {
1674 1674 IWP_DMA_SYNC(ring->data[i].dma_data,
1675 1675 DDI_DMA_SYNC_FORDEV);
1676 1676 }
1677 1677 iwp_free_dma_mem(&ring->data[i].dma_data);
1678 1678 }
1679 1679 kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1680 1680 }
1681 1681 }
1682 1682
1683 1683 /*
1684 1684 * initialize TX and RX ring
1685 1685 */
1686 1686 static int
1687 1687 iwp_ring_init(iwp_sc_t *sc)
1688 1688 {
1689 1689 int i, err = DDI_FAILURE;
1690 1690
1691 1691 for (i = 0; i < IWP_NUM_QUEUES; i++) {
1692 1692 if (IWP_CMD_QUEUE_NUM == i) {
1693 1693 continue;
1694 1694 }
1695 1695
1696 1696 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1697 1697 i);
1698 1698 if (err != DDI_SUCCESS) {
1699 1699 goto fail;
1700 1700 }
1701 1701 }
1702 1702
1703 1703 /*
1704 1704 * initialize command queue
1705 1705 */
1706 1706 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1707 1707 TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1708 1708 if (err != DDI_SUCCESS) {
1709 1709 goto fail;
1710 1710 }
1711 1711
1712 1712 err = iwp_alloc_rx_ring(sc);
1713 1713 if (err != DDI_SUCCESS) {
1714 1714 goto fail;
1715 1715 }
1716 1716
1717 1717 fail:
1718 1718 return (err);
1719 1719 }
1720 1720
1721 1721 static void
1722 1722 iwp_ring_free(iwp_sc_t *sc)
1723 1723 {
1724 1724 int i = IWP_NUM_QUEUES;
1725 1725
1726 1726 iwp_free_rx_ring(sc);
1727 1727 while (--i >= 0) {
1728 1728 iwp_free_tx_ring(&sc->sc_txq[i]);
↓ open down ↓ |
1728 lines elided |
↑ open up ↑ |
1729 1729 }
1730 1730 }
1731 1731
1732 1732 /* ARGSUSED */
1733 1733 static ieee80211_node_t *
1734 1734 iwp_node_alloc(ieee80211com_t *ic)
1735 1735 {
1736 1736 iwp_amrr_t *amrr;
1737 1737
1738 1738 amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1739 - if (NULL == amrr) {
1740 - cmn_err(CE_WARN, "iwp_node_alloc(): "
1741 - "failed to allocate memory for amrr structure\n");
1742 - return (NULL);
1743 - }
1744 1739
1745 1740 iwp_amrr_init(amrr);
1746 1741
1747 1742 return (&amrr->in);
1748 1743 }
1749 1744
1750 1745 static void
1751 1746 iwp_node_free(ieee80211_node_t *in)
1752 1747 {
1753 1748 ieee80211com_t *ic;
1754 1749
1755 1750 if ((NULL == in) ||
1756 1751 (NULL == in->in_ic)) {
1757 1752 cmn_err(CE_WARN, "iwp_node_free() "
1758 1753 "Got a NULL point from Net80211 module\n");
1759 1754 return;
1760 1755 }
1761 1756 ic = in->in_ic;
1762 1757
1763 1758 if (ic->ic_node_cleanup != NULL) {
1764 1759 ic->ic_node_cleanup(in);
1765 1760 }
1766 1761
1767 1762 if (in->in_wpa_ie != NULL) {
1768 1763 ieee80211_free(in->in_wpa_ie);
1769 1764 }
1770 1765
1771 1766 if (in->in_wme_ie != NULL) {
1772 1767 ieee80211_free(in->in_wme_ie);
1773 1768 }
1774 1769
1775 1770 if (in->in_htcap_ie != NULL) {
1776 1771 ieee80211_free(in->in_htcap_ie);
1777 1772 }
1778 1773
1779 1774 kmem_free(in, sizeof (iwp_amrr_t));
1780 1775 }
1781 1776
1782 1777
1783 1778 /*
1784 1779 * change station's state. this function will be invoked by 80211 module
1785 1780 * when need to change staton's state.
1786 1781 */
1787 1782 static int
1788 1783 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1789 1784 {
1790 1785 iwp_sc_t *sc;
1791 1786 ieee80211_node_t *in;
1792 1787 enum ieee80211_state ostate;
1793 1788 iwp_add_sta_t node;
1794 1789 int i, err = IWP_FAIL;
1795 1790
1796 1791 if (NULL == ic) {
1797 1792 return (err);
1798 1793 }
1799 1794 sc = (iwp_sc_t *)ic;
1800 1795 in = ic->ic_bss;
1801 1796 ostate = ic->ic_state;
1802 1797
1803 1798 mutex_enter(&sc->sc_glock);
1804 1799
1805 1800 switch (nstate) {
1806 1801 case IEEE80211_S_SCAN:
1807 1802 switch (ostate) {
1808 1803 case IEEE80211_S_INIT:
1809 1804 atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1810 1805 iwp_set_led(sc, 2, 10, 2);
1811 1806
1812 1807 /*
1813 1808 * clear association to receive beacons from
1814 1809 * all BSS'es
1815 1810 */
1816 1811 sc->sc_config.assoc_id = 0;
1817 1812 sc->sc_config.filter_flags &=
1818 1813 ~LE_32(RXON_FILTER_ASSOC_MSK);
1819 1814
1820 1815 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1821 1816 "config chan %d "
1822 1817 "flags %x filter_flags %x\n",
1823 1818 LE_16(sc->sc_config.chan),
1824 1819 LE_32(sc->sc_config.flags),
1825 1820 LE_32(sc->sc_config.filter_flags)));
1826 1821
1827 1822 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1828 1823 sizeof (iwp_rxon_cmd_t), 1);
1829 1824 if (err != IWP_SUCCESS) {
1830 1825 cmn_err(CE_WARN, "iwp_newstate(): "
1831 1826 "could not clear association\n");
1832 1827 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1833 1828 mutex_exit(&sc->sc_glock);
1834 1829 return (err);
1835 1830 }
1836 1831
1837 1832 /* add broadcast node to send probe request */
1838 1833 (void) memset(&node, 0, sizeof (node));
1839 1834 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1840 1835 node.sta.sta_id = IWP_BROADCAST_ID;
1841 1836 err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1842 1837 sizeof (node), 1);
1843 1838 if (err != IWP_SUCCESS) {
1844 1839 cmn_err(CE_WARN, "iwp_newstate(): "
1845 1840 "could not add broadcast node\n");
1846 1841 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1847 1842 mutex_exit(&sc->sc_glock);
1848 1843 return (err);
1849 1844 }
1850 1845 break;
1851 1846 case IEEE80211_S_SCAN:
1852 1847 mutex_exit(&sc->sc_glock);
1853 1848 /* step to next channel before actual FW scan */
1854 1849 err = sc->sc_newstate(ic, nstate, arg);
1855 1850 mutex_enter(&sc->sc_glock);
1856 1851 if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1857 1852 cmn_err(CE_WARN, "iwp_newstate(): "
1858 1853 "could not initiate scan\n");
1859 1854 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1860 1855 ieee80211_cancel_scan(ic);
1861 1856 }
1862 1857 mutex_exit(&sc->sc_glock);
1863 1858 return (err);
1864 1859 default:
1865 1860 break;
1866 1861 }
1867 1862 sc->sc_clk = 0;
1868 1863 break;
1869 1864
1870 1865 case IEEE80211_S_AUTH:
1871 1866 if (ostate == IEEE80211_S_SCAN) {
1872 1867 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1873 1868 }
1874 1869
1875 1870 /*
1876 1871 * reset state to handle reassociations correctly
1877 1872 */
1878 1873 sc->sc_config.assoc_id = 0;
1879 1874 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1880 1875
1881 1876 /*
1882 1877 * before sending authentication and association request frame,
1883 1878 * we need do something in the hardware, such as setting the
1884 1879 * channel same to the target AP...
1885 1880 */
1886 1881 if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1887 1882 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1888 1883 "could not send authentication request\n"));
1889 1884 mutex_exit(&sc->sc_glock);
1890 1885 return (err);
1891 1886 }
1892 1887 break;
1893 1888
1894 1889 case IEEE80211_S_RUN:
1895 1890 if (ostate == IEEE80211_S_SCAN) {
1896 1891 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1897 1892 }
1898 1893
1899 1894 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1900 1895 /* let LED blink when monitoring */
1901 1896 iwp_set_led(sc, 2, 10, 10);
1902 1897 break;
1903 1898 }
1904 1899
1905 1900 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1906 1901 "associated.\n"));
1907 1902
1908 1903 err = iwp_run_state_config(sc);
1909 1904 if (err != IWP_SUCCESS) {
1910 1905 cmn_err(CE_WARN, "iwp_newstate(): "
1911 1906 "failed to set up association\n");
1912 1907 mutex_exit(&sc->sc_glock);
1913 1908 return (err);
1914 1909 }
1915 1910
1916 1911 /*
1917 1912 * start automatic rate control
1918 1913 */
1919 1914 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1920 1915 atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1921 1916
1922 1917 /*
1923 1918 * set rate to some reasonable initial value
1924 1919 */
1925 1920 i = in->in_rates.ir_nrates - 1;
1926 1921 while (i > 0 && IEEE80211_RATE(i) > 72) {
1927 1922 i--;
1928 1923 }
1929 1924 in->in_txrate = i;
1930 1925
1931 1926 } else {
1932 1927 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1933 1928 }
1934 1929
1935 1930 /*
1936 1931 * set LED on after associated
1937 1932 */
1938 1933 iwp_set_led(sc, 2, 0, 1);
1939 1934 break;
1940 1935
1941 1936 case IEEE80211_S_INIT:
1942 1937 if (ostate == IEEE80211_S_SCAN) {
1943 1938 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1944 1939 }
1945 1940 /*
1946 1941 * set LED off after init
1947 1942 */
1948 1943 iwp_set_led(sc, 2, 1, 0);
1949 1944 break;
1950 1945
1951 1946 case IEEE80211_S_ASSOC:
1952 1947 if (ostate == IEEE80211_S_SCAN) {
1953 1948 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1954 1949 }
1955 1950 break;
1956 1951 }
1957 1952
1958 1953 mutex_exit(&sc->sc_glock);
1959 1954
1960 1955 return (sc->sc_newstate(ic, nstate, arg));
1961 1956 }
1962 1957
1963 1958 /*
1964 1959 * exclusive access to mac begin.
1965 1960 */
1966 1961 static void
1967 1962 iwp_mac_access_enter(iwp_sc_t *sc)
1968 1963 {
1969 1964 uint32_t tmp;
1970 1965 int n;
1971 1966
1972 1967 tmp = IWP_READ(sc, CSR_GP_CNTRL);
1973 1968 IWP_WRITE(sc, CSR_GP_CNTRL,
1974 1969 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1975 1970
1976 1971 /* wait until we succeed */
1977 1972 for (n = 0; n < 1000; n++) {
1978 1973 if ((IWP_READ(sc, CSR_GP_CNTRL) &
1979 1974 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1980 1975 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1981 1976 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1982 1977 break;
1983 1978 }
1984 1979 DELAY(10);
1985 1980 }
1986 1981
1987 1982 #ifdef DEBUG
1988 1983 if (1000 == n) {
1989 1984 IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1990 1985 "could not lock memory\n"));
1991 1986 }
1992 1987 #endif
1993 1988 }
1994 1989
1995 1990 /*
1996 1991 * exclusive access to mac end.
1997 1992 */
1998 1993 static void
1999 1994 iwp_mac_access_exit(iwp_sc_t *sc)
2000 1995 {
2001 1996 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2002 1997 IWP_WRITE(sc, CSR_GP_CNTRL,
2003 1998 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2004 1999 }
2005 2000
2006 2001 /*
2007 2002 * this function defined here for future use.
2008 2003 * static uint32_t
2009 2004 * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2010 2005 * {
2011 2006 * IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2012 2007 * return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2013 2008 * }
2014 2009 */
2015 2010
2016 2011 /*
2017 2012 * write mac memory
2018 2013 */
2019 2014 static void
2020 2015 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2021 2016 {
2022 2017 IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2023 2018 IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2024 2019 }
2025 2020
2026 2021 /*
2027 2022 * read mac register
2028 2023 */
2029 2024 static uint32_t
2030 2025 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2031 2026 {
2032 2027 IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2033 2028 return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2034 2029 }
2035 2030
2036 2031 /*
2037 2032 * write mac register
2038 2033 */
2039 2034 static void
2040 2035 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2041 2036 {
2042 2037 IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2043 2038 IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2044 2039 }
2045 2040
2046 2041
2047 2042 /*
2048 2043 * steps of loading ucode:
2049 2044 * load init ucode=>init alive=>calibrate=>
2050 2045 * receive calibration result=>reinitialize NIC=>
2051 2046 * load runtime ucode=>runtime alive=>
2052 2047 * send calibration result=>running.
2053 2048 */
2054 2049 static int
2055 2050 iwp_load_init_firmware(iwp_sc_t *sc)
2056 2051 {
2057 2052 int err = IWP_FAIL;
2058 2053 clock_t clk;
2059 2054
2060 2055 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2061 2056
2062 2057 /*
2063 2058 * load init_text section of uCode to hardware
2064 2059 */
2065 2060 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2066 2061 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2067 2062 if (err != IWP_SUCCESS) {
2068 2063 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2069 2064 "failed to write init uCode.\n");
2070 2065 return (err);
2071 2066 }
2072 2067
2073 2068 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2074 2069
2075 2070 /* wait loading init_text until completed or timeout */
2076 2071 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2077 2072 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2078 2073 break;
2079 2074 }
2080 2075 }
2081 2076
2082 2077 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2083 2078 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2084 2079 "timeout waiting for init uCode load.\n");
2085 2080 return (IWP_FAIL);
2086 2081 }
2087 2082
2088 2083 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2089 2084
2090 2085 /*
2091 2086 * load init_data section of uCode to hardware
2092 2087 */
2093 2088 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2094 2089 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2095 2090 if (err != IWP_SUCCESS) {
2096 2091 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2097 2092 "failed to write init_data uCode.\n");
2098 2093 return (err);
2099 2094 }
2100 2095
2101 2096 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2102 2097
2103 2098 /*
2104 2099 * wait loading init_data until completed or timeout
2105 2100 */
2106 2101 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2107 2102 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2108 2103 break;
2109 2104 }
2110 2105 }
2111 2106
2112 2107 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2113 2108 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2114 2109 "timeout waiting for init_data uCode load.\n");
2115 2110 return (IWP_FAIL);
2116 2111 }
2117 2112
2118 2113 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2119 2114
2120 2115 return (err);
2121 2116 }
2122 2117
2123 2118 static int
2124 2119 iwp_load_run_firmware(iwp_sc_t *sc)
2125 2120 {
2126 2121 int err = IWP_FAIL;
2127 2122 clock_t clk;
2128 2123
2129 2124 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2130 2125
2131 2126 /*
2132 2127 * load init_text section of uCode to hardware
2133 2128 */
2134 2129 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2135 2130 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2136 2131 if (err != IWP_SUCCESS) {
2137 2132 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2138 2133 "failed to write run uCode.\n");
2139 2134 return (err);
2140 2135 }
2141 2136
2142 2137 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2143 2138
2144 2139 /* wait loading run_text until completed or timeout */
2145 2140 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2146 2141 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2147 2142 break;
2148 2143 }
2149 2144 }
2150 2145
2151 2146 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2152 2147 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2153 2148 "timeout waiting for run uCode load.\n");
2154 2149 return (IWP_FAIL);
2155 2150 }
2156 2151
2157 2152 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2158 2153
2159 2154 /*
2160 2155 * load run_data section of uCode to hardware
2161 2156 */
2162 2157 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2163 2158 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2164 2159 if (err != IWP_SUCCESS) {
2165 2160 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2166 2161 "failed to write run_data uCode.\n");
2167 2162 return (err);
2168 2163 }
2169 2164
2170 2165 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2171 2166
2172 2167 /*
2173 2168 * wait loading run_data until completed or timeout
2174 2169 */
2175 2170 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2176 2171 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2177 2172 break;
2178 2173 }
2179 2174 }
2180 2175
2181 2176 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2182 2177 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2183 2178 "timeout waiting for run_data uCode load.\n");
2184 2179 return (IWP_FAIL);
2185 2180 }
2186 2181
2187 2182 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2188 2183
2189 2184 return (err);
2190 2185 }
2191 2186
2192 2187 /*
2193 2188 * this function will be invoked to receive phy information
2194 2189 * when a frame is received.
2195 2190 */
2196 2191 static void
2197 2192 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2198 2193 {
2199 2194
2200 2195 sc->sc_rx_phy_res.flag = 1;
2201 2196
2202 2197 (void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2203 2198 sizeof (iwp_rx_phy_res_t));
2204 2199 }
2205 2200
2206 2201 /*
2207 2202 * this function will be invoked to receive body of frame when
2208 2203 * a frame is received.
2209 2204 */
2210 2205 static void
2211 2206 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2212 2207 {
2213 2208 ieee80211com_t *ic = &sc->sc_ic;
2214 2209 #ifdef DEBUG
2215 2210 iwp_rx_ring_t *ring = &sc->sc_rxq;
2216 2211 #endif
2217 2212 struct ieee80211_frame *wh;
2218 2213 struct iwp_rx_non_cfg_phy *phyinfo;
2219 2214 struct iwp_rx_mpdu_body_size *mpdu_size;
2220 2215
2221 2216 mblk_t *mp;
2222 2217 int16_t t;
2223 2218 uint16_t len, rssi, agc;
2224 2219 uint32_t temp, crc, *tail;
2225 2220 uint32_t arssi, brssi, crssi, mrssi;
2226 2221 iwp_rx_phy_res_t *stat;
2227 2222 ieee80211_node_t *in;
2228 2223
2229 2224 /*
2230 2225 * assuming not 11n here. cope with 11n in phase-II
2231 2226 */
2232 2227 mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2233 2228 stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2234 2229 if (stat->cfg_phy_cnt > 20) {
2235 2230 return;
2236 2231 }
2237 2232
2238 2233 phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2239 2234 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2240 2235 agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2241 2236
2242 2237 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2243 2238 arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2244 2239 brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2245 2240
2246 2241 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2247 2242 crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2248 2243
2249 2244 mrssi = MAX(arssi, brssi);
2250 2245 mrssi = MAX(mrssi, crssi);
2251 2246
2252 2247 t = mrssi - agc - IWP_RSSI_OFFSET;
2253 2248 /*
2254 2249 * convert dBm to percentage
2255 2250 */
2256 2251 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2257 2252 / (75 * 75);
2258 2253 if (rssi > 100) {
2259 2254 rssi = 100;
2260 2255 }
2261 2256 if (rssi < 1) {
2262 2257 rssi = 1;
2263 2258 }
2264 2259
2265 2260 /*
2266 2261 * size of frame, not include FCS
2267 2262 */
2268 2263 len = LE_16(mpdu_size->byte_count);
2269 2264 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2270 2265 sizeof (struct iwp_rx_mpdu_body_size) + len);
2271 2266 bcopy(tail, &crc, 4);
2272 2267
2273 2268 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2274 2269 "rx intr: idx=%d phy_len=%x len=%d "
2275 2270 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2276 2271 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2277 2272 len, stat->rate.r.s.rate, stat->channel,
2278 2273 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2279 2274 stat->cfg_phy_cnt, LE_32(crc)));
2280 2275
2281 2276 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2282 2277 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2283 2278 "rx frame oversize\n"));
2284 2279 return;
2285 2280 }
2286 2281
2287 2282 /*
2288 2283 * discard Rx frames with bad CRC
2289 2284 */
2290 2285 if ((LE_32(crc) &
2291 2286 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2292 2287 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2293 2288 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2294 2289 "rx crc error tail: %x\n",
2295 2290 LE_32(crc)));
2296 2291 sc->sc_rx_err++;
2297 2292 return;
2298 2293 }
2299 2294
2300 2295 wh = (struct ieee80211_frame *)
2301 2296 ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2302 2297
2303 2298 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2304 2299 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2305 2300 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2306 2301 "rx : association id = %x\n",
2307 2302 sc->sc_assoc_id));
2308 2303 }
2309 2304
2310 2305 #ifdef DEBUG
2311 2306 if (iwp_dbg_flags & IWP_DEBUG_RX) {
2312 2307 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2313 2308 }
2314 2309 #endif
2315 2310
2316 2311 in = ieee80211_find_rxnode(ic, wh);
2317 2312 mp = allocb(len, BPRI_MED);
2318 2313 if (mp) {
2319 2314 (void) memcpy(mp->b_wptr, wh, len);
2320 2315 mp->b_wptr += len;
2321 2316
2322 2317 /*
2323 2318 * send the frame to the 802.11 layer
2324 2319 */
2325 2320 (void) ieee80211_input(ic, mp, in, rssi, 0);
2326 2321 } else {
2327 2322 sc->sc_rx_nobuf++;
2328 2323 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2329 2324 "alloc rx buf failed\n"));
2330 2325 }
2331 2326
2332 2327 /*
2333 2328 * release node reference
2334 2329 */
2335 2330 ieee80211_free_node(in);
2336 2331 }
2337 2332
2338 2333 /*
2339 2334 * process correlative affairs after a frame is sent.
2340 2335 */
2341 2336 static void
2342 2337 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2343 2338 {
2344 2339 ieee80211com_t *ic = &sc->sc_ic;
2345 2340 iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2346 2341 iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2347 2342 iwp_amrr_t *amrr;
2348 2343
2349 2344 if (NULL == ic->ic_bss) {
2350 2345 return;
2351 2346 }
2352 2347
2353 2348 amrr = (iwp_amrr_t *)ic->ic_bss;
2354 2349
2355 2350 amrr->txcnt++;
2356 2351 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2357 2352 "tx: %d cnt\n", amrr->txcnt));
2358 2353
2359 2354 if (stat->ntries > 0) {
2360 2355 amrr->retrycnt++;
2361 2356 sc->sc_tx_retries++;
2362 2357 IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2363 2358 "tx: %d retries\n",
2364 2359 sc->sc_tx_retries));
2365 2360 }
2366 2361
2367 2362 mutex_enter(&sc->sc_mt_lock);
2368 2363 sc->sc_tx_timer = 0;
2369 2364 mutex_exit(&sc->sc_mt_lock);
2370 2365
2371 2366 mutex_enter(&sc->sc_tx_lock);
2372 2367
2373 2368 ring->queued--;
2374 2369 if (ring->queued < 0) {
2375 2370 ring->queued = 0;
2376 2371 }
2377 2372
2378 2373 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2379 2374 sc->sc_need_reschedule = 0;
2380 2375 mutex_exit(&sc->sc_tx_lock);
2381 2376 mac_tx_update(ic->ic_mach);
2382 2377 mutex_enter(&sc->sc_tx_lock);
2383 2378 }
2384 2379
2385 2380 mutex_exit(&sc->sc_tx_lock);
2386 2381 }
2387 2382
2388 2383 /*
2389 2384 * inform a given command has been executed
2390 2385 */
2391 2386 static void
2392 2387 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2393 2388 {
2394 2389 if ((desc->hdr.qid & 7) != 4) {
2395 2390 return;
2396 2391 }
2397 2392
2398 2393 if (sc->sc_cmd_accum > 0) {
2399 2394 sc->sc_cmd_accum--;
2400 2395 return;
2401 2396 }
2402 2397
2403 2398 mutex_enter(&sc->sc_glock);
2404 2399
2405 2400 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2406 2401
2407 2402 cv_signal(&sc->sc_cmd_cv);
2408 2403
2409 2404 mutex_exit(&sc->sc_glock);
2410 2405
2411 2406 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2412 2407 "qid=%x idx=%d flags=%x type=0x%x\n",
2413 2408 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2414 2409 desc->hdr.type));
2415 2410 }
2416 2411
2417 2412 /*
2418 2413 * this function will be invoked when alive notification occur.
2419 2414 */
2420 2415 static void
2421 2416 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2422 2417 {
2423 2418 uint32_t rv;
2424 2419 struct iwp_calib_cfg_cmd cmd;
2425 2420 struct iwp_alive_resp *ar =
2426 2421 (struct iwp_alive_resp *)(desc + 1);
2427 2422 struct iwp_calib_results *res_p = &sc->sc_calib_results;
2428 2423
2429 2424 /*
2430 2425 * the microcontroller is ready
2431 2426 */
2432 2427 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2433 2428 "microcode alive notification minor: %x major: %x type: "
2434 2429 "%x subtype: %x\n",
2435 2430 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2436 2431
2437 2432 #ifdef DEBUG
2438 2433 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2439 2434 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2440 2435 "microcontroller initialization failed\n"));
2441 2436 }
2442 2437 #endif
2443 2438
2444 2439 /*
2445 2440 * determine if init alive or runtime alive.
2446 2441 */
2447 2442 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2448 2443 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2449 2444 "initialization alive received.\n"));
2450 2445
2451 2446 (void) memcpy(&sc->sc_card_alive_init, ar,
2452 2447 sizeof (struct iwp_init_alive_resp));
2453 2448
2454 2449 /*
2455 2450 * necessary configuration to NIC
2456 2451 */
2457 2452 mutex_enter(&sc->sc_glock);
2458 2453
2459 2454 rv = iwp_alive_common(sc);
2460 2455 if (rv != IWP_SUCCESS) {
2461 2456 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2462 2457 "common alive process failed in init alive.\n");
2463 2458 mutex_exit(&sc->sc_glock);
2464 2459 return;
2465 2460 }
2466 2461
2467 2462 (void) memset(&cmd, 0, sizeof (cmd));
2468 2463
2469 2464 cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2470 2465 cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2471 2466 cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2472 2467 cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2473 2468
2474 2469 /*
2475 2470 * require ucode execute calibration
2476 2471 */
2477 2472 rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2478 2473 if (rv != IWP_SUCCESS) {
2479 2474 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2480 2475 "failed to send calibration configure command.\n");
2481 2476 mutex_exit(&sc->sc_glock);
2482 2477 return;
2483 2478 }
2484 2479
2485 2480 mutex_exit(&sc->sc_glock);
2486 2481
2487 2482 } else { /* runtime alive */
2488 2483
2489 2484 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2490 2485 "runtime alive received.\n"));
2491 2486
2492 2487 (void) memcpy(&sc->sc_card_alive_run, ar,
2493 2488 sizeof (struct iwp_alive_resp));
2494 2489
2495 2490 mutex_enter(&sc->sc_glock);
2496 2491
2497 2492 /*
2498 2493 * necessary configuration to NIC
2499 2494 */
2500 2495 rv = iwp_alive_common(sc);
2501 2496 if (rv != IWP_SUCCESS) {
2502 2497 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2503 2498 "common alive process failed in run alive.\n");
2504 2499 mutex_exit(&sc->sc_glock);
2505 2500 return;
2506 2501 }
2507 2502
2508 2503 /*
2509 2504 * send the result of local oscilator calibration to uCode.
2510 2505 */
2511 2506 if (res_p->lo_res != NULL) {
2512 2507 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2513 2508 res_p->lo_res, res_p->lo_res_len, 1);
2514 2509 if (rv != IWP_SUCCESS) {
2515 2510 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2516 2511 "failed to send local"
2517 2512 "oscilator calibration command.\n");
2518 2513 mutex_exit(&sc->sc_glock);
2519 2514 return;
2520 2515 }
2521 2516
2522 2517 DELAY(1000);
2523 2518 }
2524 2519
2525 2520 /*
2526 2521 * send the result of TX IQ calibration to uCode.
2527 2522 */
2528 2523 if (res_p->tx_iq_res != NULL) {
2529 2524 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2530 2525 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2531 2526 if (rv != IWP_SUCCESS) {
2532 2527 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2533 2528 "failed to send TX IQ"
2534 2529 "calibration command.\n");
2535 2530 mutex_exit(&sc->sc_glock);
2536 2531 return;
2537 2532 }
2538 2533
2539 2534 DELAY(1000);
2540 2535 }
2541 2536
2542 2537 /*
2543 2538 * send the result of TX IQ perd calibration to uCode.
2544 2539 */
2545 2540 if (res_p->tx_iq_perd_res != NULL) {
2546 2541 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2547 2542 res_p->tx_iq_perd_res,
2548 2543 res_p->tx_iq_perd_res_len, 1);
2549 2544 if (rv != IWP_SUCCESS) {
2550 2545 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2551 2546 "failed to send TX IQ perd"
2552 2547 "calibration command.\n");
2553 2548 mutex_exit(&sc->sc_glock);
2554 2549 return;
2555 2550 }
2556 2551
2557 2552 DELAY(1000);
2558 2553 }
2559 2554
2560 2555 /*
2561 2556 * send the result of Base Band calibration to uCode.
2562 2557 */
2563 2558 if (res_p->base_band_res != NULL) {
2564 2559 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2565 2560 res_p->base_band_res,
2566 2561 res_p->base_band_res_len, 1);
2567 2562 if (rv != IWP_SUCCESS) {
2568 2563 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2569 2564 "failed to send Base Band"
2570 2565 "calibration command.\n");
2571 2566 mutex_exit(&sc->sc_glock);
2572 2567 return;
2573 2568 }
2574 2569
2575 2570 DELAY(1000);
2576 2571 }
2577 2572
2578 2573 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2579 2574 cv_signal(&sc->sc_ucode_cv);
2580 2575
2581 2576 mutex_exit(&sc->sc_glock);
2582 2577 }
2583 2578
2584 2579 }
2585 2580
2586 2581 /*
2587 2582 * deal with receiving frames, command response
2588 2583 * and all notifications from ucode.
2589 2584 */
2590 2585 /* ARGSUSED */
2591 2586 static uint_t
2592 2587 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2593 2588 {
2594 2589 iwp_sc_t *sc;
2595 2590 ieee80211com_t *ic;
2596 2591 iwp_rx_desc_t *desc;
2597 2592 iwp_rx_data_t *data;
2598 2593 uint32_t index;
2599 2594
2600 2595 if (NULL == arg) {
2601 2596 return (DDI_INTR_UNCLAIMED);
2602 2597 }
2603 2598 sc = (iwp_sc_t *)arg;
2604 2599 ic = &sc->sc_ic;
2605 2600
2606 2601 /*
2607 2602 * firmware has moved the index of the rx queue, driver get it,
2608 2603 * and deal with it.
2609 2604 */
2610 2605 index = (sc->sc_shared->val0) & 0xfff;
2611 2606
2612 2607 while (sc->sc_rxq.cur != index) {
2613 2608 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2614 2609 desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2615 2610
2616 2611 IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2617 2612 "rx notification index = %d"
2618 2613 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2619 2614 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2620 2615 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2621 2616
2622 2617 /*
2623 2618 * a command other than a tx need to be replied
2624 2619 */
2625 2620 if (!(desc->hdr.qid & 0x80) &&
2626 2621 (desc->hdr.type != REPLY_SCAN_CMD) &&
2627 2622 (desc->hdr.type != REPLY_TX)) {
2628 2623 iwp_cmd_intr(sc, desc);
2629 2624 }
2630 2625
2631 2626 switch (desc->hdr.type) {
2632 2627 case REPLY_RX_PHY_CMD:
2633 2628 iwp_rx_phy_intr(sc, desc);
2634 2629 break;
2635 2630
2636 2631 case REPLY_RX_MPDU_CMD:
2637 2632 iwp_rx_mpdu_intr(sc, desc);
2638 2633 break;
2639 2634
2640 2635 case REPLY_TX:
2641 2636 iwp_tx_intr(sc, desc);
2642 2637 break;
2643 2638
2644 2639 case REPLY_ALIVE:
2645 2640 iwp_ucode_alive(sc, desc);
2646 2641 break;
2647 2642
2648 2643 case CARD_STATE_NOTIFICATION:
2649 2644 {
2650 2645 uint32_t *status = (uint32_t *)(desc + 1);
2651 2646
2652 2647 IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2653 2648 "state changed to %x\n",
2654 2649 LE_32(*status)));
2655 2650
2656 2651 if (LE_32(*status) & 1) {
2657 2652 /*
2658 2653 * the radio button has to be pushed(OFF). It
2659 2654 * is considered as a hw error, the
2660 2655 * iwp_thread() tries to recover it after the
2661 2656 * button is pushed again(ON)
2662 2657 */
2663 2658 cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2664 2659 "radio transmitter is off\n");
2665 2660 sc->sc_ostate = sc->sc_ic.ic_state;
2666 2661 ieee80211_new_state(&sc->sc_ic,
2667 2662 IEEE80211_S_INIT, -1);
2668 2663 atomic_or_32(&sc->sc_flags,
2669 2664 IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2670 2665 }
2671 2666
2672 2667 break;
2673 2668 }
2674 2669
2675 2670 case SCAN_START_NOTIFICATION:
2676 2671 {
2677 2672 iwp_start_scan_t *scan =
2678 2673 (iwp_start_scan_t *)(desc + 1);
2679 2674
2680 2675 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2681 2676 "scanning channel %d status %x\n",
2682 2677 scan->chan, LE_32(scan->status)));
2683 2678
2684 2679 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2685 2680 break;
2686 2681 }
2687 2682
2688 2683 case SCAN_COMPLETE_NOTIFICATION:
2689 2684 {
2690 2685 #ifdef DEBUG
2691 2686 iwp_stop_scan_t *scan =
2692 2687 (iwp_stop_scan_t *)(desc + 1);
2693 2688
2694 2689 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2695 2690 "completed channel %d (burst of %d) status %02x\n",
2696 2691 scan->chan, scan->nchan, scan->status));
2697 2692 #endif
2698 2693
2699 2694 sc->sc_scan_pending++;
2700 2695 break;
2701 2696 }
2702 2697
2703 2698 case STATISTICS_NOTIFICATION:
2704 2699 {
2705 2700 /*
2706 2701 * handle statistics notification
2707 2702 */
2708 2703 break;
2709 2704 }
2710 2705
2711 2706 case CALIBRATION_RES_NOTIFICATION:
2712 2707 iwp_save_calib_result(sc, desc);
2713 2708 break;
2714 2709
2715 2710 case CALIBRATION_COMPLETE_NOTIFICATION:
2716 2711 mutex_enter(&sc->sc_glock);
2717 2712 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2718 2713 cv_signal(&sc->sc_ucode_cv);
2719 2714 mutex_exit(&sc->sc_glock);
2720 2715 break;
2721 2716
2722 2717 case MISSED_BEACONS_NOTIFICATION:
2723 2718 {
2724 2719 struct iwp_beacon_missed *miss =
2725 2720 (struct iwp_beacon_missed *)(desc + 1);
2726 2721
2727 2722 if ((ic->ic_state == IEEE80211_S_RUN) &&
2728 2723 (LE_32(miss->consecutive) > 50)) {
2729 2724 cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2730 2725 "beacon missed %d/%d\n",
2731 2726 LE_32(miss->consecutive),
2732 2727 LE_32(miss->total));
2733 2728 (void) ieee80211_new_state(ic,
2734 2729 IEEE80211_S_INIT, -1);
2735 2730 }
2736 2731 break;
2737 2732 }
2738 2733 }
2739 2734
2740 2735 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2741 2736 }
2742 2737
2743 2738 /*
2744 2739 * driver dealt with what received in rx queue and tell the information
2745 2740 * to the firmware.
2746 2741 */
2747 2742 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2748 2743 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2749 2744
2750 2745 /*
2751 2746 * re-enable interrupts
2752 2747 */
2753 2748 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2754 2749
2755 2750 return (DDI_INTR_CLAIMED);
2756 2751 }
2757 2752
2758 2753 /*
2759 2754 * the handle of interrupt
2760 2755 */
2761 2756 /* ARGSUSED */
2762 2757 static uint_t
2763 2758 iwp_intr(caddr_t arg, caddr_t unused)
2764 2759 {
2765 2760 iwp_sc_t *sc;
2766 2761 uint32_t r, rfh;
2767 2762
2768 2763 if (NULL == arg) {
2769 2764 return (DDI_INTR_UNCLAIMED);
2770 2765 }
2771 2766 sc = (iwp_sc_t *)arg;
2772 2767
2773 2768 r = IWP_READ(sc, CSR_INT);
2774 2769 if (0 == r || 0xffffffff == r) {
2775 2770 return (DDI_INTR_UNCLAIMED);
2776 2771 }
2777 2772
2778 2773 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2779 2774 "interrupt reg %x\n", r));
2780 2775
2781 2776 rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2782 2777
2783 2778 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2784 2779 "FH interrupt reg %x\n", rfh));
2785 2780
2786 2781 /*
2787 2782 * disable interrupts
2788 2783 */
2789 2784 IWP_WRITE(sc, CSR_INT_MASK, 0);
2790 2785
2791 2786 /*
2792 2787 * ack interrupts
2793 2788 */
2794 2789 IWP_WRITE(sc, CSR_INT, r);
2795 2790 IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2796 2791
2797 2792 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2798 2793 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2799 2794 "fatal firmware error\n"));
2800 2795 iwp_stop(sc);
2801 2796 sc->sc_ostate = sc->sc_ic.ic_state;
2802 2797
2803 2798 /* notify upper layer */
2804 2799 if (!IWP_CHK_FAST_RECOVER(sc)) {
2805 2800 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2806 2801 }
2807 2802
2808 2803 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2809 2804 return (DDI_INTR_CLAIMED);
2810 2805 }
2811 2806
2812 2807 if (r & BIT_INT_RF_KILL) {
2813 2808 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2814 2809 if (tmp & (1 << 27)) {
2815 2810 cmn_err(CE_NOTE, "RF switch: radio on\n");
2816 2811 }
2817 2812 }
2818 2813
2819 2814 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2820 2815 (rfh & FH_INT_RX_MASK)) {
2821 2816 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2822 2817 return (DDI_INTR_CLAIMED);
2823 2818 }
2824 2819
2825 2820 if (r & BIT_INT_FH_TX) {
2826 2821 mutex_enter(&sc->sc_glock);
2827 2822 atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2828 2823 cv_signal(&sc->sc_put_seg_cv);
2829 2824 mutex_exit(&sc->sc_glock);
2830 2825 }
2831 2826
2832 2827 #ifdef DEBUG
2833 2828 if (r & BIT_INT_ALIVE) {
2834 2829 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2835 2830 "firmware initialized.\n"));
2836 2831 }
2837 2832 #endif
2838 2833
2839 2834 /*
2840 2835 * re-enable interrupts
2841 2836 */
2842 2837 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2843 2838
2844 2839 return (DDI_INTR_CLAIMED);
2845 2840 }
2846 2841
2847 2842 static uint8_t
2848 2843 iwp_rate_to_plcp(int rate)
2849 2844 {
2850 2845 uint8_t ret;
2851 2846
2852 2847 switch (rate) {
2853 2848 /*
2854 2849 * CCK rates
2855 2850 */
2856 2851 case 2:
2857 2852 ret = 0xa;
2858 2853 break;
2859 2854
2860 2855 case 4:
2861 2856 ret = 0x14;
2862 2857 break;
2863 2858
2864 2859 case 11:
2865 2860 ret = 0x37;
2866 2861 break;
2867 2862
2868 2863 case 22:
2869 2864 ret = 0x6e;
2870 2865 break;
2871 2866
2872 2867 /*
2873 2868 * OFDM rates
2874 2869 */
2875 2870 case 12:
2876 2871 ret = 0xd;
2877 2872 break;
2878 2873
2879 2874 case 18:
2880 2875 ret = 0xf;
2881 2876 break;
2882 2877
2883 2878 case 24:
2884 2879 ret = 0x5;
2885 2880 break;
2886 2881
2887 2882 case 36:
2888 2883 ret = 0x7;
2889 2884 break;
2890 2885
2891 2886 case 48:
2892 2887 ret = 0x9;
2893 2888 break;
2894 2889
2895 2890 case 72:
2896 2891 ret = 0xb;
2897 2892 break;
2898 2893
2899 2894 case 96:
2900 2895 ret = 0x1;
2901 2896 break;
2902 2897
2903 2898 case 108:
2904 2899 ret = 0x3;
2905 2900 break;
2906 2901
2907 2902 default:
2908 2903 ret = 0;
2909 2904 break;
2910 2905 }
2911 2906
2912 2907 return (ret);
2913 2908 }
2914 2909
2915 2910 /*
2916 2911 * invoked by GLD send frames
2917 2912 */
2918 2913 static mblk_t *
2919 2914 iwp_m_tx(void *arg, mblk_t *mp)
2920 2915 {
2921 2916 iwp_sc_t *sc;
2922 2917 ieee80211com_t *ic;
2923 2918 mblk_t *next;
2924 2919
2925 2920 if (NULL == arg) {
2926 2921 return (NULL);
2927 2922 }
2928 2923 sc = (iwp_sc_t *)arg;
2929 2924 ic = &sc->sc_ic;
2930 2925
2931 2926 if (sc->sc_flags & IWP_F_SUSPEND) {
2932 2927 freemsgchain(mp);
2933 2928 return (NULL);
2934 2929 }
2935 2930
2936 2931 if (ic->ic_state != IEEE80211_S_RUN) {
2937 2932 freemsgchain(mp);
2938 2933 return (NULL);
2939 2934 }
2940 2935
2941 2936 if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2942 2937 IWP_CHK_FAST_RECOVER(sc)) {
2943 2938 IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2944 2939 "hold queue\n"));
2945 2940 return (mp);
2946 2941 }
2947 2942
2948 2943
2949 2944 while (mp != NULL) {
2950 2945 next = mp->b_next;
2951 2946 mp->b_next = NULL;
2952 2947 if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2953 2948 mp->b_next = next;
2954 2949 break;
2955 2950 }
2956 2951 mp = next;
2957 2952 }
2958 2953
2959 2954 return (mp);
2960 2955 }
2961 2956
2962 2957 /*
2963 2958 * send frames
2964 2959 */
2965 2960 static int
2966 2961 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2967 2962 {
2968 2963 iwp_sc_t *sc;
2969 2964 iwp_tx_ring_t *ring;
2970 2965 iwp_tx_desc_t *desc;
2971 2966 iwp_tx_data_t *data;
2972 2967 iwp_tx_data_t *desc_data;
2973 2968 iwp_cmd_t *cmd;
2974 2969 iwp_tx_cmd_t *tx;
2975 2970 ieee80211_node_t *in;
2976 2971 struct ieee80211_frame *wh;
2977 2972 struct ieee80211_key *k = NULL;
2978 2973 mblk_t *m, *m0;
2979 2974 int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2980 2975 uint16_t masks = 0;
2981 2976 uint32_t rate, s_id = 0;
2982 2977
2983 2978 if (NULL == ic) {
2984 2979 return (IWP_FAIL);
2985 2980 }
2986 2981 sc = (iwp_sc_t *)ic;
2987 2982
2988 2983 if (sc->sc_flags & IWP_F_SUSPEND) {
2989 2984 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2990 2985 IEEE80211_FC0_TYPE_DATA) {
2991 2986 freemsg(mp);
2992 2987 }
2993 2988 err = IWP_FAIL;
2994 2989 goto exit;
2995 2990 }
2996 2991
2997 2992 mutex_enter(&sc->sc_tx_lock);
2998 2993 ring = &sc->sc_txq[0];
2999 2994 data = &ring->data[ring->cur];
3000 2995 cmd = data->cmd;
3001 2996 bzero(cmd, sizeof (*cmd));
3002 2997
3003 2998 ring->cur = (ring->cur + 1) % ring->count;
3004 2999
3005 3000 /*
3006 3001 * Need reschedule TX if TX buffer is full.
3007 3002 */
3008 3003 if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3009 3004 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3010 3005 "no txbuf\n"));
3011 3006
3012 3007 sc->sc_need_reschedule = 1;
3013 3008 mutex_exit(&sc->sc_tx_lock);
3014 3009
3015 3010 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3016 3011 IEEE80211_FC0_TYPE_DATA) {
3017 3012 freemsg(mp);
3018 3013 }
3019 3014 sc->sc_tx_nobuf++;
3020 3015 err = IWP_FAIL;
3021 3016 goto exit;
3022 3017 }
3023 3018
3024 3019 ring->queued++;
3025 3020
3026 3021 mutex_exit(&sc->sc_tx_lock);
3027 3022
3028 3023 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3029 3024
3030 3025 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3031 3026 if (NULL == m) { /* can not alloc buf, drop this package */
3032 3027 cmn_err(CE_WARN, "iwp_send(): "
3033 3028 "failed to allocate msgbuf\n");
3034 3029 freemsg(mp);
3035 3030
3036 3031 mutex_enter(&sc->sc_tx_lock);
3037 3032 ring->queued--;
3038 3033 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3039 3034 sc->sc_need_reschedule = 0;
3040 3035 mutex_exit(&sc->sc_tx_lock);
3041 3036 mac_tx_update(ic->ic_mach);
3042 3037 mutex_enter(&sc->sc_tx_lock);
3043 3038 }
3044 3039 mutex_exit(&sc->sc_tx_lock);
3045 3040
3046 3041 err = IWP_SUCCESS;
3047 3042 goto exit;
3048 3043 }
3049 3044
3050 3045 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3051 3046 mblen = MBLKL(m0);
3052 3047 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3053 3048 off += mblen;
3054 3049 }
3055 3050
3056 3051 m->b_wptr += off;
3057 3052
3058 3053 wh = (struct ieee80211_frame *)m->b_rptr;
3059 3054
3060 3055 /*
3061 3056 * determine send which AP or station in IBSS
3062 3057 */
3063 3058 in = ieee80211_find_txnode(ic, wh->i_addr1);
3064 3059 if (NULL == in) {
3065 3060 cmn_err(CE_WARN, "iwp_send(): "
3066 3061 "failed to find tx node\n");
3067 3062 freemsg(mp);
3068 3063 freemsg(m);
3069 3064 sc->sc_tx_err++;
3070 3065
3071 3066 mutex_enter(&sc->sc_tx_lock);
3072 3067 ring->queued--;
3073 3068 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3074 3069 sc->sc_need_reschedule = 0;
3075 3070 mutex_exit(&sc->sc_tx_lock);
3076 3071 mac_tx_update(ic->ic_mach);
3077 3072 mutex_enter(&sc->sc_tx_lock);
3078 3073 }
3079 3074 mutex_exit(&sc->sc_tx_lock);
3080 3075
3081 3076 err = IWP_SUCCESS;
3082 3077 goto exit;
3083 3078 }
3084 3079
3085 3080 /*
3086 3081 * Net80211 module encapsulate outbound data frames.
3087 3082 * Add some feilds of 80211 frame.
3088 3083 */
3089 3084 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3090 3085 IEEE80211_FC0_TYPE_DATA) {
3091 3086 (void) ieee80211_encap(ic, m, in);
3092 3087 }
3093 3088
3094 3089 freemsg(mp);
3095 3090
3096 3091 cmd->hdr.type = REPLY_TX;
3097 3092 cmd->hdr.flags = 0;
3098 3093 cmd->hdr.qid = ring->qid;
3099 3094
3100 3095 tx = (iwp_tx_cmd_t *)cmd->data;
3101 3096 tx->tx_flags = 0;
3102 3097
3103 3098 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3104 3099 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3105 3100 } else {
3106 3101 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3107 3102 }
3108 3103
3109 3104 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3110 3105 k = ieee80211_crypto_encap(ic, m);
3111 3106 if (NULL == k) {
3112 3107 freemsg(m);
3113 3108 sc->sc_tx_err++;
3114 3109
3115 3110 mutex_enter(&sc->sc_tx_lock);
3116 3111 ring->queued--;
3117 3112 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3118 3113 sc->sc_need_reschedule = 0;
3119 3114 mutex_exit(&sc->sc_tx_lock);
3120 3115 mac_tx_update(ic->ic_mach);
3121 3116 mutex_enter(&sc->sc_tx_lock);
3122 3117 }
3123 3118 mutex_exit(&sc->sc_tx_lock);
3124 3119
3125 3120 err = IWP_SUCCESS;
3126 3121 goto exit;
3127 3122 }
3128 3123
3129 3124 /* packet header may have moved, reset our local pointer */
3130 3125 wh = (struct ieee80211_frame *)m->b_rptr;
3131 3126 }
3132 3127
3133 3128 len = msgdsize(m);
3134 3129
3135 3130 #ifdef DEBUG
3136 3131 if (iwp_dbg_flags & IWP_DEBUG_TX) {
3137 3132 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3138 3133 }
3139 3134 #endif
3140 3135
3141 3136 tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3142 3137 tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3143 3138
3144 3139 /*
3145 3140 * specific TX parameters for management frames
3146 3141 */
3147 3142 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3148 3143 IEEE80211_FC0_TYPE_MGT) {
3149 3144 /*
3150 3145 * mgmt frames are sent at 1M
3151 3146 */
3152 3147 if ((in->in_rates.ir_rates[0] &
3153 3148 IEEE80211_RATE_VAL) != 0) {
3154 3149 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3155 3150 } else {
3156 3151 rate = 2;
3157 3152 }
3158 3153
3159 3154 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3160 3155
3161 3156 /*
3162 3157 * tell h/w to set timestamp in probe responses
3163 3158 */
3164 3159 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3165 3160 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3166 3161 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3167 3162
3168 3163 tx->data_retry_limit = 3;
3169 3164 if (tx->data_retry_limit < tx->rts_retry_limit) {
3170 3165 tx->rts_retry_limit = tx->data_retry_limit;
3171 3166 }
3172 3167 }
3173 3168
3174 3169 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3175 3170 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3176 3171 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3177 3172 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3178 3173 tx->timeout.pm_frame_timeout = LE_16(3);
3179 3174 } else {
3180 3175 tx->timeout.pm_frame_timeout = LE_16(2);
3181 3176 }
3182 3177
3183 3178 } else {
3184 3179 /*
3185 3180 * do it here for the software way rate scaling.
3186 3181 * later for rate scaling in hardware.
3187 3182 *
3188 3183 * now the txrate is determined in tx cmd flags, set to the
3189 3184 * max value 54M for 11g and 11M for 11b originally.
3190 3185 */
3191 3186 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3192 3187 rate = ic->ic_fixed_rate;
3193 3188 } else {
3194 3189 if ((in->in_rates.ir_rates[in->in_txrate] &
3195 3190 IEEE80211_RATE_VAL) != 0) {
3196 3191 rate = in->in_rates.
3197 3192 ir_rates[in->in_txrate] &
3198 3193 IEEE80211_RATE_VAL;
3199 3194 }
3200 3195 }
3201 3196
3202 3197 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3203 3198
3204 3199 tx->timeout.pm_frame_timeout = 0;
3205 3200 }
3206 3201
3207 3202 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3208 3203 "tx rate[%d of %d] = %x",
3209 3204 in->in_txrate, in->in_rates.ir_nrates, rate));
3210 3205
3211 3206 len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3212 3207 if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3213 3208 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3214 3209 }
3215 3210
3216 3211 /*
3217 3212 * retrieve destination node's id
3218 3213 */
3219 3214 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3220 3215 tx->sta_id = IWP_BROADCAST_ID;
3221 3216 } else {
3222 3217 tx->sta_id = IWP_AP_ID;
3223 3218 }
3224 3219
3225 3220 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3226 3221 masks |= RATE_MCS_CCK_MSK;
3227 3222 }
3228 3223
3229 3224 masks |= RATE_MCS_ANT_B_MSK;
3230 3225 tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3231 3226
3232 3227 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3233 3228 "tx flag = %x",
3234 3229 tx->tx_flags));
3235 3230
3236 3231 tx->stop_time.life_time = LE_32(0xffffffff);
3237 3232
3238 3233 tx->len = LE_16(len);
3239 3234
3240 3235 tx->dram_lsb_ptr =
3241 3236 LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3242 3237 tx->dram_msb_ptr = 0;
3243 3238 tx->driver_txop = 0;
3244 3239 tx->next_frame_len = 0;
3245 3240
3246 3241 (void) memcpy(tx + 1, m->b_rptr, hdrlen);
3247 3242 m->b_rptr += hdrlen;
3248 3243 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3249 3244
3250 3245 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3251 3246 "sending data: qid=%d idx=%d len=%d",
3252 3247 ring->qid, ring->cur, len));
3253 3248
3254 3249 /*
3255 3250 * first segment includes the tx cmd plus the 802.11 header,
3256 3251 * the second includes the remaining of the 802.11 frame.
3257 3252 */
3258 3253 mutex_enter(&sc->sc_tx_lock);
3259 3254
3260 3255 cmd->hdr.idx = ring->desc_cur;
3261 3256
3262 3257 desc_data = &ring->data[ring->desc_cur];
3263 3258 desc = desc_data->desc;
3264 3259 bzero(desc, sizeof (*desc));
3265 3260 desc->val0 = 2 << 24;
3266 3261 desc->pa[0].tb1_addr = data->paddr_cmd;
3267 3262 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3268 3263 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3269 3264 desc->pa[0].val2 =
3270 3265 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3271 3266 ((len - hdrlen) << 20);
3272 3267 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3273 3268 "phy addr1 = 0x%x phy addr2 = 0x%x "
3274 3269 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3275 3270 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3276 3271 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3277 3272
3278 3273 /*
3279 3274 * kick ring
3280 3275 */
3281 3276 s_id = tx->sta_id;
3282 3277
3283 3278 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3284 3279 tfd_offset[ring->desc_cur].val =
3285 3280 (8 + len) | (s_id << 12);
3286 3281 if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3287 3282 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3288 3283 tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3289 3284 (8 + len) | (s_id << 12);
3290 3285 }
3291 3286
3292 3287 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3293 3288 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3294 3289
3295 3290 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3296 3291 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3297 3292
3298 3293 mutex_exit(&sc->sc_tx_lock);
3299 3294 freemsg(m);
3300 3295
3301 3296 /*
3302 3297 * release node reference
3303 3298 */
3304 3299 ieee80211_free_node(in);
3305 3300
3306 3301 ic->ic_stats.is_tx_bytes += len;
3307 3302 ic->ic_stats.is_tx_frags++;
3308 3303
3309 3304 mutex_enter(&sc->sc_mt_lock);
3310 3305 if (0 == sc->sc_tx_timer) {
3311 3306 sc->sc_tx_timer = 4;
3312 3307 }
3313 3308 mutex_exit(&sc->sc_mt_lock);
3314 3309
3315 3310 exit:
3316 3311 return (err);
3317 3312 }
3318 3313
3319 3314 /*
3320 3315 * invoked by GLD to deal with IOCTL affaires
3321 3316 */
3322 3317 static void
3323 3318 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3324 3319 {
3325 3320 iwp_sc_t *sc;
3326 3321 ieee80211com_t *ic;
3327 3322 int err = EINVAL;
3328 3323
3329 3324 if (NULL == arg) {
3330 3325 return;
3331 3326 }
3332 3327 sc = (iwp_sc_t *)arg;
3333 3328 ic = &sc->sc_ic;
3334 3329
3335 3330 err = ieee80211_ioctl(ic, wq, mp);
3336 3331 if (ENETRESET == err) {
3337 3332 /*
3338 3333 * This is special for the hidden AP connection.
3339 3334 * In any case, we should make sure only one 'scan'
3340 3335 * in the driver for a 'connect' CLI command. So
3341 3336 * when connecting to a hidden AP, the scan is just
3342 3337 * sent out to the air when we know the desired
3343 3338 * essid of the AP we want to connect.
3344 3339 */
3345 3340 if (ic->ic_des_esslen) {
3346 3341 if (sc->sc_flags & IWP_F_RUNNING) {
3347 3342 iwp_m_stop(sc);
3348 3343 (void) iwp_m_start(sc);
3349 3344 (void) ieee80211_new_state(ic,
3350 3345 IEEE80211_S_SCAN, -1);
3351 3346 }
3352 3347 }
3353 3348 }
3354 3349 }
3355 3350
3356 3351 /*
3357 3352 * Call back functions for get/set proporty
3358 3353 */
3359 3354 static int
3360 3355 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3361 3356 uint_t wldp_length, void *wldp_buf)
3362 3357 {
3363 3358 iwp_sc_t *sc;
3364 3359 int err = EINVAL;
3365 3360
3366 3361 if (NULL == arg) {
3367 3362 return (EINVAL);
3368 3363 }
3369 3364 sc = (iwp_sc_t *)arg;
3370 3365
3371 3366 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3372 3367 wldp_length, wldp_buf);
3373 3368
3374 3369 return (err);
3375 3370 }
3376 3371
3377 3372 static void
3378 3373 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3379 3374 mac_prop_info_handle_t prh)
3380 3375 {
3381 3376 iwp_sc_t *sc;
3382 3377
3383 3378 sc = (iwp_sc_t *)arg;
3384 3379 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3385 3380 }
3386 3381
3387 3382 static int
3388 3383 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3389 3384 uint_t wldp_length, const void *wldp_buf)
3390 3385 {
3391 3386 iwp_sc_t *sc;
3392 3387 ieee80211com_t *ic;
3393 3388 int err = EINVAL;
3394 3389
3395 3390 if (NULL == arg) {
3396 3391 return (EINVAL);
3397 3392 }
3398 3393 sc = (iwp_sc_t *)arg;
3399 3394 ic = &sc->sc_ic;
3400 3395
3401 3396 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3402 3397 wldp_buf);
3403 3398
3404 3399 if (err == ENETRESET) {
3405 3400 if (ic->ic_des_esslen) {
3406 3401 if (sc->sc_flags & IWP_F_RUNNING) {
3407 3402 iwp_m_stop(sc);
3408 3403 (void) iwp_m_start(sc);
3409 3404 (void) ieee80211_new_state(ic,
3410 3405 IEEE80211_S_SCAN, -1);
3411 3406 }
3412 3407 }
3413 3408 err = 0;
3414 3409 }
3415 3410 return (err);
3416 3411 }
3417 3412
3418 3413 /*
3419 3414 * invoked by GLD supply statistics NIC and driver
3420 3415 */
3421 3416 static int
3422 3417 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3423 3418 {
3424 3419 iwp_sc_t *sc;
3425 3420 ieee80211com_t *ic;
3426 3421 ieee80211_node_t *in;
3427 3422
3428 3423 if (NULL == arg) {
3429 3424 return (EINVAL);
3430 3425 }
3431 3426 sc = (iwp_sc_t *)arg;
3432 3427 ic = &sc->sc_ic;
3433 3428
3434 3429 mutex_enter(&sc->sc_glock);
3435 3430
3436 3431 switch (stat) {
3437 3432 case MAC_STAT_IFSPEED:
3438 3433 in = ic->ic_bss;
3439 3434 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3440 3435 IEEE80211_RATE(in->in_txrate) :
3441 3436 ic->ic_fixed_rate) / 2 * 1000000;
3442 3437 break;
3443 3438 case MAC_STAT_NOXMTBUF:
3444 3439 *val = sc->sc_tx_nobuf;
3445 3440 break;
3446 3441 case MAC_STAT_NORCVBUF:
3447 3442 *val = sc->sc_rx_nobuf;
3448 3443 break;
3449 3444 case MAC_STAT_IERRORS:
3450 3445 *val = sc->sc_rx_err;
3451 3446 break;
3452 3447 case MAC_STAT_RBYTES:
3453 3448 *val = ic->ic_stats.is_rx_bytes;
3454 3449 break;
3455 3450 case MAC_STAT_IPACKETS:
3456 3451 *val = ic->ic_stats.is_rx_frags;
3457 3452 break;
3458 3453 case MAC_STAT_OBYTES:
3459 3454 *val = ic->ic_stats.is_tx_bytes;
3460 3455 break;
3461 3456 case MAC_STAT_OPACKETS:
3462 3457 *val = ic->ic_stats.is_tx_frags;
3463 3458 break;
3464 3459 case MAC_STAT_OERRORS:
3465 3460 case WIFI_STAT_TX_FAILED:
3466 3461 *val = sc->sc_tx_err;
3467 3462 break;
3468 3463 case WIFI_STAT_TX_RETRANS:
3469 3464 *val = sc->sc_tx_retries;
3470 3465 break;
3471 3466 case WIFI_STAT_FCS_ERRORS:
3472 3467 case WIFI_STAT_WEP_ERRORS:
3473 3468 case WIFI_STAT_TX_FRAGS:
3474 3469 case WIFI_STAT_MCAST_TX:
3475 3470 case WIFI_STAT_RTS_SUCCESS:
3476 3471 case WIFI_STAT_RTS_FAILURE:
3477 3472 case WIFI_STAT_ACK_FAILURE:
3478 3473 case WIFI_STAT_RX_FRAGS:
3479 3474 case WIFI_STAT_MCAST_RX:
3480 3475 case WIFI_STAT_RX_DUPS:
3481 3476 mutex_exit(&sc->sc_glock);
3482 3477 return (ieee80211_stat(ic, stat, val));
3483 3478 default:
3484 3479 mutex_exit(&sc->sc_glock);
3485 3480 return (ENOTSUP);
3486 3481 }
3487 3482
3488 3483 mutex_exit(&sc->sc_glock);
3489 3484
3490 3485 return (IWP_SUCCESS);
3491 3486
3492 3487 }
3493 3488
3494 3489 /*
3495 3490 * invoked by GLD to start or open NIC
3496 3491 */
3497 3492 static int
3498 3493 iwp_m_start(void *arg)
3499 3494 {
3500 3495 iwp_sc_t *sc;
3501 3496 ieee80211com_t *ic;
3502 3497 int err = IWP_FAIL;
3503 3498
3504 3499 if (NULL == arg) {
3505 3500 return (EINVAL);
3506 3501 }
3507 3502 sc = (iwp_sc_t *)arg;
3508 3503 ic = &sc->sc_ic;
3509 3504
3510 3505 err = iwp_init(sc);
3511 3506 if (err != IWP_SUCCESS) {
3512 3507 /*
3513 3508 * The hw init err(eg. RF is OFF). Return Success to make
3514 3509 * the 'plumb' succeed. The iwp_thread() tries to re-init
3515 3510 * background.
3516 3511 */
3517 3512 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3518 3513 return (IWP_SUCCESS);
3519 3514 }
3520 3515
3521 3516 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3522 3517
3523 3518 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3524 3519
3525 3520 return (IWP_SUCCESS);
3526 3521 }
3527 3522
3528 3523 /*
3529 3524 * invoked by GLD to stop or down NIC
3530 3525 */
3531 3526 static void
3532 3527 iwp_m_stop(void *arg)
3533 3528 {
3534 3529 iwp_sc_t *sc;
3535 3530 ieee80211com_t *ic;
3536 3531
3537 3532 if (NULL == arg) {
3538 3533 return;
3539 3534 }
3540 3535 sc = (iwp_sc_t *)arg;
3541 3536 ic = &sc->sc_ic;
3542 3537
3543 3538 iwp_stop(sc);
3544 3539
3545 3540 /*
3546 3541 * release buffer for calibration
3547 3542 */
3548 3543 iwp_release_calib_buffer(sc);
3549 3544
3550 3545 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3551 3546
3552 3547 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3553 3548 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3554 3549
3555 3550 atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3556 3551 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3557 3552 }
3558 3553
3559 3554 /*
3560 3555 * invoked by GLD to configure NIC
3561 3556 */
3562 3557 static int
3563 3558 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3564 3559 {
3565 3560 iwp_sc_t *sc;
3566 3561 ieee80211com_t *ic;
3567 3562 int err = IWP_SUCCESS;
3568 3563
3569 3564 if (NULL == arg) {
3570 3565 return (EINVAL);
3571 3566 }
3572 3567 sc = (iwp_sc_t *)arg;
3573 3568 ic = &sc->sc_ic;
3574 3569
3575 3570 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3576 3571 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3577 3572 mutex_enter(&sc->sc_glock);
3578 3573 err = iwp_config(sc);
3579 3574 mutex_exit(&sc->sc_glock);
3580 3575 if (err != IWP_SUCCESS) {
3581 3576 cmn_err(CE_WARN, "iwp_m_unicst(): "
3582 3577 "failed to configure device\n");
3583 3578 goto fail;
3584 3579 }
3585 3580 }
3586 3581
3587 3582 return (err);
3588 3583
3589 3584 fail:
3590 3585 return (err);
3591 3586 }
3592 3587
3593 3588 /* ARGSUSED */
3594 3589 static int
3595 3590 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3596 3591 {
3597 3592 return (IWP_SUCCESS);
3598 3593 }
3599 3594
3600 3595 /* ARGSUSED */
3601 3596 static int
3602 3597 iwp_m_promisc(void *arg, boolean_t on)
3603 3598 {
3604 3599 return (IWP_SUCCESS);
3605 3600 }
3606 3601
3607 3602 /*
3608 3603 * kernel thread to deal with exceptional situation
3609 3604 */
3610 3605 static void
3611 3606 iwp_thread(iwp_sc_t *sc)
3612 3607 {
3613 3608 ieee80211com_t *ic = &sc->sc_ic;
3614 3609 clock_t clk;
3615 3610 int err, n = 0, timeout = 0;
3616 3611 uint32_t tmp;
3617 3612 #ifdef DEBUG
3618 3613 int times = 0;
3619 3614 #endif
3620 3615
3621 3616 while (sc->sc_mf_thread_switch) {
3622 3617 tmp = IWP_READ(sc, CSR_GP_CNTRL);
3623 3618 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3624 3619 atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3625 3620 } else {
3626 3621 atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3627 3622 }
3628 3623
3629 3624 /*
3630 3625 * If in SUSPEND or the RF is OFF, do nothing.
3631 3626 */
3632 3627 if (sc->sc_flags & IWP_F_RADIO_OFF) {
3633 3628 delay(drv_usectohz(100000));
3634 3629 continue;
3635 3630 }
3636 3631
3637 3632 /*
3638 3633 * recovery fatal error
3639 3634 */
3640 3635 if (ic->ic_mach &&
3641 3636 (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3642 3637
3643 3638 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3644 3639 "try to recover fatal hw error: %d\n", times++));
3645 3640
3646 3641 iwp_stop(sc);
3647 3642
3648 3643 if (IWP_CHK_FAST_RECOVER(sc)) {
3649 3644 /* save runtime configuration */
3650 3645 bcopy(&sc->sc_config, &sc->sc_config_save,
3651 3646 sizeof (sc->sc_config));
3652 3647 } else {
3653 3648 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3654 3649 delay(drv_usectohz(2000000 + n*500000));
3655 3650 }
3656 3651
3657 3652 err = iwp_init(sc);
3658 3653 if (err != IWP_SUCCESS) {
3659 3654 n++;
3660 3655 if (n < 20) {
3661 3656 continue;
3662 3657 }
3663 3658 }
3664 3659
3665 3660 n = 0;
3666 3661 if (!err) {
3667 3662 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3668 3663 }
3669 3664
3670 3665
3671 3666 if (!IWP_CHK_FAST_RECOVER(sc) ||
3672 3667 iwp_fast_recover(sc) != IWP_SUCCESS) {
3673 3668 atomic_and_32(&sc->sc_flags,
3674 3669 ~IWP_F_HW_ERR_RECOVER);
3675 3670
3676 3671 delay(drv_usectohz(2000000));
3677 3672 if (sc->sc_ostate != IEEE80211_S_INIT) {
3678 3673 ieee80211_new_state(ic,
3679 3674 IEEE80211_S_SCAN, 0);
3680 3675 }
3681 3676 }
3682 3677 }
3683 3678
3684 3679 if (ic->ic_mach &&
3685 3680 (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3686 3681 IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3687 3682 "wait for probe response\n"));
3688 3683
3689 3684 sc->sc_scan_pending--;
3690 3685 delay(drv_usectohz(200000));
3691 3686 ieee80211_next_scan(ic);
3692 3687 }
3693 3688
3694 3689 /*
3695 3690 * rate ctl
3696 3691 */
3697 3692 if (ic->ic_mach &&
3698 3693 (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3699 3694 clk = ddi_get_lbolt();
3700 3695 if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3701 3696 iwp_amrr_timeout(sc);
3702 3697 }
3703 3698 }
3704 3699
3705 3700 delay(drv_usectohz(100000));
3706 3701
3707 3702 mutex_enter(&sc->sc_mt_lock);
3708 3703 if (sc->sc_tx_timer) {
3709 3704 timeout++;
3710 3705 if (10 == timeout) {
3711 3706 sc->sc_tx_timer--;
3712 3707 if (0 == sc->sc_tx_timer) {
3713 3708 atomic_or_32(&sc->sc_flags,
3714 3709 IWP_F_HW_ERR_RECOVER);
3715 3710 sc->sc_ostate = IEEE80211_S_RUN;
3716 3711 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3717 3712 "try to recover from "
3718 3713 "send fail\n"));
3719 3714 }
3720 3715 timeout = 0;
3721 3716 }
3722 3717 }
3723 3718 mutex_exit(&sc->sc_mt_lock);
3724 3719 }
3725 3720
3726 3721 mutex_enter(&sc->sc_mt_lock);
3727 3722 sc->sc_mf_thread = NULL;
3728 3723 cv_signal(&sc->sc_mt_cv);
3729 3724 mutex_exit(&sc->sc_mt_lock);
3730 3725 }
3731 3726
3732 3727
3733 3728 /*
3734 3729 * Send a command to the ucode.
3735 3730 */
3736 3731 static int
3737 3732 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3738 3733 {
3739 3734 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3740 3735 iwp_tx_desc_t *desc;
3741 3736 iwp_cmd_t *cmd;
3742 3737
3743 3738 ASSERT(size <= sizeof (cmd->data));
3744 3739 ASSERT(mutex_owned(&sc->sc_glock));
3745 3740
3746 3741 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3747 3742 "code[%d]", code));
3748 3743 desc = ring->data[ring->cur].desc;
3749 3744 cmd = ring->data[ring->cur].cmd;
3750 3745
3751 3746 cmd->hdr.type = (uint8_t)code;
3752 3747 cmd->hdr.flags = 0;
3753 3748 cmd->hdr.qid = ring->qid;
3754 3749 cmd->hdr.idx = ring->cur;
3755 3750 (void) memcpy(cmd->data, buf, size);
3756 3751 (void) memset(desc, 0, sizeof (*desc));
3757 3752
3758 3753 desc->val0 = 1 << 24;
3759 3754 desc->pa[0].tb1_addr =
3760 3755 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3761 3756 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3762 3757
3763 3758 if (async) {
3764 3759 sc->sc_cmd_accum++;
3765 3760 }
3766 3761
3767 3762 /*
3768 3763 * kick cmd ring XXX
3769 3764 */
3770 3765 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3771 3766 tfd_offset[ring->cur].val = 8;
3772 3767 if (ring->cur < IWP_MAX_WIN_SIZE) {
3773 3768 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3774 3769 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3775 3770 }
3776 3771 ring->cur = (ring->cur + 1) % ring->count;
3777 3772 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3778 3773
3779 3774 if (async) {
3780 3775 return (IWP_SUCCESS);
3781 3776 } else {
3782 3777 clock_t clk;
3783 3778
3784 3779 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3785 3780 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3786 3781 if (cv_timedwait(&sc->sc_cmd_cv,
3787 3782 &sc->sc_glock, clk) < 0) {
3788 3783 break;
3789 3784 }
3790 3785 }
3791 3786
3792 3787 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3793 3788 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3794 3789 return (IWP_SUCCESS);
3795 3790 } else {
3796 3791 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3797 3792 return (IWP_FAIL);
3798 3793 }
3799 3794 }
3800 3795 }
3801 3796
3802 3797 /*
3803 3798 * require ucode seting led of NIC
3804 3799 */
3805 3800 static void
3806 3801 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3807 3802 {
3808 3803 iwp_led_cmd_t led;
3809 3804
3810 3805 led.interval = LE_32(100000); /* unit: 100ms */
3811 3806 led.id = id;
3812 3807 led.off = off;
3813 3808 led.on = on;
3814 3809
3815 3810 (void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3816 3811 }
3817 3812
3818 3813 /*
3819 3814 * necessary setting to NIC before authentication
3820 3815 */
3821 3816 static int
3822 3817 iwp_hw_set_before_auth(iwp_sc_t *sc)
3823 3818 {
3824 3819 ieee80211com_t *ic = &sc->sc_ic;
3825 3820 ieee80211_node_t *in = ic->ic_bss;
3826 3821 int err = IWP_FAIL;
3827 3822
3828 3823 /*
3829 3824 * update adapter's configuration according
3830 3825 * the info of target AP
3831 3826 */
3832 3827 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3833 3828 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3834 3829
3835 3830 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3836 3831 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3837 3832 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3838 3833
3839 3834 if (IEEE80211_MODE_11B == ic->ic_curmode) {
3840 3835 sc->sc_config.cck_basic_rates = 0x03;
3841 3836 sc->sc_config.ofdm_basic_rates = 0;
3842 3837 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3843 3838 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3844 3839 sc->sc_config.cck_basic_rates = 0;
3845 3840 sc->sc_config.ofdm_basic_rates = 0x15;
3846 3841 } else { /* assume 802.11b/g */
3847 3842 sc->sc_config.cck_basic_rates = 0x0f;
3848 3843 sc->sc_config.ofdm_basic_rates = 0xff;
3849 3844 }
3850 3845
3851 3846 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3852 3847 RXON_FLG_SHORT_SLOT_MSK);
3853 3848
3854 3849 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3855 3850 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3856 3851 } else {
3857 3852 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3858 3853 }
3859 3854
3860 3855 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3861 3856 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3862 3857 } else {
3863 3858 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3864 3859 }
3865 3860
3866 3861 IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3867 3862 "config chan %d flags %x "
3868 3863 "filter_flags %x cck %x ofdm %x"
3869 3864 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3870 3865 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3871 3866 LE_32(sc->sc_config.filter_flags),
3872 3867 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3873 3868 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3874 3869 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3875 3870 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3876 3871
3877 3872 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3878 3873 sizeof (iwp_rxon_cmd_t), 1);
3879 3874 if (err != IWP_SUCCESS) {
3880 3875 cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3881 3876 "failed to config chan%d\n", sc->sc_config.chan);
3882 3877 return (err);
3883 3878 }
3884 3879
3885 3880 /*
3886 3881 * add default AP node
3887 3882 */
3888 3883 err = iwp_add_ap_sta(sc);
3889 3884 if (err != IWP_SUCCESS) {
3890 3885 return (err);
3891 3886 }
3892 3887
3893 3888
3894 3889 return (err);
3895 3890 }
3896 3891
3897 3892 /*
3898 3893 * Send a scan request(assembly scan cmd) to the firmware.
3899 3894 */
3900 3895 static int
3901 3896 iwp_scan(iwp_sc_t *sc)
3902 3897 {
3903 3898 ieee80211com_t *ic = &sc->sc_ic;
3904 3899 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3905 3900 iwp_tx_desc_t *desc;
3906 3901 iwp_tx_data_t *data;
3907 3902 iwp_cmd_t *cmd;
3908 3903 iwp_scan_hdr_t *hdr;
3909 3904 iwp_scan_chan_t chan;
3910 3905 struct ieee80211_frame *wh;
3911 3906 ieee80211_node_t *in = ic->ic_bss;
3912 3907 uint8_t essid[IEEE80211_NWID_LEN+1];
3913 3908 struct ieee80211_rateset *rs;
3914 3909 enum ieee80211_phymode mode;
3915 3910 uint8_t *frm;
3916 3911 int i, pktlen, nrates;
3917 3912
3918 3913 data = &ring->data[ring->cur];
3919 3914 desc = data->desc;
3920 3915 cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3921 3916
3922 3917 cmd->hdr.type = REPLY_SCAN_CMD;
3923 3918 cmd->hdr.flags = 0;
3924 3919 cmd->hdr.qid = ring->qid;
3925 3920 cmd->hdr.idx = ring->cur | 0x40;
3926 3921
3927 3922 hdr = (iwp_scan_hdr_t *)cmd->data;
3928 3923 (void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3929 3924 hdr->nchan = 1;
3930 3925 hdr->quiet_time = LE_16(50);
3931 3926 hdr->quiet_plcp_th = LE_16(1);
3932 3927
3933 3928 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3934 3929 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3935 3930 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3936 3931 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3937 3932 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3938 3933
3939 3934 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3940 3935 hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3941 3936 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3942 3937 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3943 3938 hdr->tx_cmd.rate.r.rate_n_flags |=
3944 3939 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3945 3940 hdr->direct_scan[0].len = ic->ic_des_esslen;
3946 3941 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
3947 3942
3948 3943 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3949 3944 RXON_FILTER_BCON_AWARE_MSK);
3950 3945
3951 3946 if (ic->ic_des_esslen) {
3952 3947 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3953 3948 essid[ic->ic_des_esslen] = '\0';
3954 3949 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3955 3950 "directed scan %s\n", essid));
3956 3951
3957 3952 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3958 3953 ic->ic_des_esslen);
3959 3954 } else {
3960 3955 bzero(hdr->direct_scan[0].ssid,
3961 3956 sizeof (hdr->direct_scan[0].ssid));
3962 3957 }
3963 3958
3964 3959 /*
3965 3960 * a probe request frame is required after the REPLY_SCAN_CMD
3966 3961 */
3967 3962 wh = (struct ieee80211_frame *)(hdr + 1);
3968 3963 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3969 3964 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3970 3965 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3971 3966 (void) memset(wh->i_addr1, 0xff, 6);
3972 3967 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3973 3968 (void) memset(wh->i_addr3, 0xff, 6);
3974 3969 *(uint16_t *)&wh->i_dur[0] = 0;
3975 3970 *(uint16_t *)&wh->i_seq[0] = 0;
3976 3971
3977 3972 frm = (uint8_t *)(wh + 1);
3978 3973
3979 3974 /*
3980 3975 * essid IE
3981 3976 */
3982 3977 if (in->in_esslen) {
3983 3978 bcopy(in->in_essid, essid, in->in_esslen);
3984 3979 essid[in->in_esslen] = '\0';
3985 3980 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3986 3981 "probe with ESSID %s\n",
3987 3982 essid));
3988 3983 }
3989 3984 *frm++ = IEEE80211_ELEMID_SSID;
3990 3985 *frm++ = in->in_esslen;
3991 3986 (void) memcpy(frm, in->in_essid, in->in_esslen);
3992 3987 frm += in->in_esslen;
3993 3988
3994 3989 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3995 3990 rs = &ic->ic_sup_rates[mode];
3996 3991
3997 3992 /*
3998 3993 * supported rates IE
3999 3994 */
4000 3995 *frm++ = IEEE80211_ELEMID_RATES;
4001 3996 nrates = rs->ir_nrates;
4002 3997 if (nrates > IEEE80211_RATE_SIZE) {
4003 3998 nrates = IEEE80211_RATE_SIZE;
4004 3999 }
4005 4000
4006 4001 *frm++ = (uint8_t)nrates;
4007 4002 (void) memcpy(frm, rs->ir_rates, nrates);
4008 4003 frm += nrates;
4009 4004
4010 4005 /*
4011 4006 * supported xrates IE
4012 4007 */
4013 4008 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4014 4009 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4015 4010 *frm++ = IEEE80211_ELEMID_XRATES;
4016 4011 *frm++ = (uint8_t)nrates;
4017 4012 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4018 4013 frm += nrates;
4019 4014 }
4020 4015
4021 4016 /*
4022 4017 * optionnal IE (usually for wpa)
4023 4018 */
4024 4019 if (ic->ic_opt_ie != NULL) {
4025 4020 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4026 4021 frm += ic->ic_opt_ie_len;
4027 4022 }
4028 4023
4029 4024 /* setup length of probe request */
4030 4025 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4031 4026 hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4032 4027 LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4033 4028
4034 4029 /*
4035 4030 * the attribute of the scan channels are required after the probe
4036 4031 * request frame.
4037 4032 */
4038 4033 for (i = 1; i <= hdr->nchan; i++) {
4039 4034 if (ic->ic_des_esslen) {
4040 4035 chan.type = LE_32(3);
4041 4036 } else {
4042 4037 chan.type = LE_32(1);
4043 4038 }
4044 4039
4045 4040 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4046 4041 chan.tpc.tx_gain = 0x28;
4047 4042 chan.tpc.dsp_atten = 110;
4048 4043 chan.active_dwell = LE_16(50);
4049 4044 chan.passive_dwell = LE_16(120);
4050 4045
4051 4046 bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4052 4047 frm += sizeof (iwp_scan_chan_t);
4053 4048 }
4054 4049
4055 4050 pktlen = _PTRDIFF(frm, cmd);
4056 4051
4057 4052 (void) memset(desc, 0, sizeof (*desc));
4058 4053 desc->val0 = 1 << 24;
4059 4054 desc->pa[0].tb1_addr =
4060 4055 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4061 4056 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4062 4057
4063 4058 /*
4064 4059 * maybe for cmd, filling the byte cnt table is not necessary.
4065 4060 * anyway, we fill it here.
4066 4061 */
4067 4062 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4068 4063 .tfd_offset[ring->cur].val = 8;
4069 4064 if (ring->cur < IWP_MAX_WIN_SIZE) {
4070 4065 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4071 4066 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4072 4067 }
4073 4068
4074 4069 /*
4075 4070 * kick cmd ring
4076 4071 */
4077 4072 ring->cur = (ring->cur + 1) % ring->count;
4078 4073 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4079 4074
4080 4075 return (IWP_SUCCESS);
4081 4076 }
4082 4077
4083 4078 /*
4084 4079 * configure NIC by using ucode commands after loading ucode.
4085 4080 */
4086 4081 static int
4087 4082 iwp_config(iwp_sc_t *sc)
4088 4083 {
4089 4084 ieee80211com_t *ic = &sc->sc_ic;
4090 4085 iwp_powertable_cmd_t powertable;
4091 4086 iwp_bt_cmd_t bt;
4092 4087 iwp_add_sta_t node;
4093 4088 iwp_rem_sta_t rm_sta;
4094 4089 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4095 4090 int err = IWP_FAIL;
4096 4091
4097 4092 /*
4098 4093 * set power mode. Disable power management at present, do it later
4099 4094 */
4100 4095 (void) memset(&powertable, 0, sizeof (powertable));
4101 4096 powertable.flags = LE_16(0x8);
4102 4097 err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4103 4098 sizeof (powertable), 0);
4104 4099 if (err != IWP_SUCCESS) {
4105 4100 cmn_err(CE_WARN, "iwp_config(): "
4106 4101 "failed to set power mode\n");
4107 4102 return (err);
4108 4103 }
4109 4104
4110 4105 /*
4111 4106 * configure bt coexistence
4112 4107 */
4113 4108 (void) memset(&bt, 0, sizeof (bt));
4114 4109 bt.flags = 3;
4115 4110 bt.lead_time = 0xaa;
4116 4111 bt.max_kill = 1;
4117 4112 err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4118 4113 sizeof (bt), 0);
4119 4114 if (err != IWP_SUCCESS) {
4120 4115 cmn_err(CE_WARN, "iwp_config(): "
4121 4116 "failed to configurate bt coexistence\n");
4122 4117 return (err);
4123 4118 }
4124 4119
4125 4120 /*
4126 4121 * configure rxon
4127 4122 */
4128 4123 (void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4129 4124 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4130 4125 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4131 4126 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4132 4127 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4133 4128 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4134 4129 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4135 4130
4136 4131 switch (ic->ic_opmode) {
4137 4132 case IEEE80211_M_STA:
4138 4133 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4139 4134 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4140 4135 RXON_FILTER_DIS_DECRYPT_MSK |
4141 4136 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4142 4137 break;
4143 4138 case IEEE80211_M_IBSS:
4144 4139 case IEEE80211_M_AHDEMO:
4145 4140 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4146 4141
4147 4142 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4148 4143 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4149 4144 RXON_FILTER_DIS_DECRYPT_MSK |
4150 4145 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4151 4146 break;
4152 4147 case IEEE80211_M_HOSTAP:
4153 4148 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4154 4149 break;
4155 4150 case IEEE80211_M_MONITOR:
4156 4151 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4157 4152 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4158 4153 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4159 4154 break;
4160 4155 }
4161 4156
4162 4157 /*
4163 4158 * Support all CCK rates.
4164 4159 */
4165 4160 sc->sc_config.cck_basic_rates = 0x0f;
4166 4161
4167 4162 /*
4168 4163 * Support all OFDM rates.
4169 4164 */
4170 4165 sc->sc_config.ofdm_basic_rates = 0xff;
4171 4166
4172 4167 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4173 4168 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4174 4169 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4175 4170 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4176 4171
4177 4172 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4178 4173 sizeof (iwp_rxon_cmd_t), 0);
4179 4174 if (err != IWP_SUCCESS) {
4180 4175 cmn_err(CE_WARN, "iwp_config(): "
4181 4176 "failed to set configure command\n");
4182 4177 return (err);
4183 4178 }
4184 4179
4185 4180 /*
4186 4181 * remove all nodes in NIC
4187 4182 */
4188 4183 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4189 4184 rm_sta.num_sta = 1;
4190 4185 (void) memcpy(rm_sta.addr, bcast, 6);
4191 4186
4192 4187 err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4193 4188 if (err != IWP_SUCCESS) {
4194 4189 cmn_err(CE_WARN, "iwp_config(): "
4195 4190 "failed to remove broadcast node in hardware.\n");
4196 4191 return (err);
4197 4192 }
4198 4193
4199 4194 /*
4200 4195 * add broadcast node so that we can send broadcast frame
4201 4196 */
4202 4197 (void) memset(&node, 0, sizeof (node));
4203 4198 (void) memset(node.sta.addr, 0xff, 6);
4204 4199 node.mode = 0;
4205 4200 node.sta.sta_id = IWP_BROADCAST_ID;
4206 4201 node.station_flags = 0;
4207 4202
4208 4203 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4209 4204 if (err != IWP_SUCCESS) {
4210 4205 cmn_err(CE_WARN, "iwp_config(): "
4211 4206 "failed to add broadcast node\n");
4212 4207 return (err);
4213 4208 }
4214 4209
4215 4210 return (err);
4216 4211 }
4217 4212
4218 4213 /*
4219 4214 * quiesce(9E) entry point.
4220 4215 * This function is called when the system is single-threaded at high
4221 4216 * PIL with preemption disabled. Therefore, this function must not be
4222 4217 * blocked.
4223 4218 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4224 4219 * DDI_FAILURE indicates an error condition and should almost never happen.
4225 4220 */
4226 4221 static int
4227 4222 iwp_quiesce(dev_info_t *dip)
4228 4223 {
4229 4224 iwp_sc_t *sc;
4230 4225
4231 4226 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4232 4227 if (NULL == sc) {
4233 4228 return (DDI_FAILURE);
4234 4229 }
4235 4230
4236 4231 #ifdef DEBUG
4237 4232 /* by pass any messages, if it's quiesce */
4238 4233 iwp_dbg_flags = 0;
4239 4234 #endif
4240 4235
4241 4236 /*
4242 4237 * No more blocking is allowed while we are in the
4243 4238 * quiesce(9E) entry point.
4244 4239 */
4245 4240 atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4246 4241
4247 4242 /*
4248 4243 * Disable and mask all interrupts.
4249 4244 */
4250 4245 iwp_stop(sc);
4251 4246
4252 4247 return (DDI_SUCCESS);
4253 4248 }
4254 4249
4255 4250 static void
4256 4251 iwp_stop_master(iwp_sc_t *sc)
4257 4252 {
4258 4253 uint32_t tmp;
4259 4254 int n;
4260 4255
4261 4256 tmp = IWP_READ(sc, CSR_RESET);
4262 4257 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4263 4258
4264 4259 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4265 4260 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4266 4261 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4267 4262 return;
4268 4263 }
4269 4264
4270 4265 for (n = 0; n < 2000; n++) {
4271 4266 if (IWP_READ(sc, CSR_RESET) &
4272 4267 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4273 4268 break;
4274 4269 }
4275 4270 DELAY(1000);
4276 4271 }
4277 4272
4278 4273 #ifdef DEBUG
4279 4274 if (2000 == n) {
4280 4275 IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4281 4276 "timeout waiting for master stop\n"));
4282 4277 }
4283 4278 #endif
4284 4279 }
4285 4280
4286 4281 static int
4287 4282 iwp_power_up(iwp_sc_t *sc)
4288 4283 {
4289 4284 uint32_t tmp;
4290 4285
4291 4286 iwp_mac_access_enter(sc);
4292 4287 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4293 4288 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4294 4289 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4295 4290 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4296 4291 iwp_mac_access_exit(sc);
4297 4292
4298 4293 DELAY(5000);
4299 4294 return (IWP_SUCCESS);
4300 4295 }
4301 4296
4302 4297 /*
4303 4298 * hardware initialization
4304 4299 */
4305 4300 static int
4306 4301 iwp_preinit(iwp_sc_t *sc)
4307 4302 {
4308 4303 int n;
4309 4304 uint8_t vlink;
4310 4305 uint16_t radio_cfg;
4311 4306 uint32_t tmp;
4312 4307
4313 4308 /*
4314 4309 * clear any pending interrupts
4315 4310 */
4316 4311 IWP_WRITE(sc, CSR_INT, 0xffffffff);
4317 4312
4318 4313 tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4319 4314 IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4320 4315 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4321 4316
4322 4317 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4323 4318 IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4324 4319
4325 4320 /*
4326 4321 * wait for clock ready
4327 4322 */
4328 4323 for (n = 0; n < 1000; n++) {
4329 4324 if (IWP_READ(sc, CSR_GP_CNTRL) &
4330 4325 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4331 4326 break;
4332 4327 }
4333 4328 DELAY(10);
4334 4329 }
4335 4330
4336 4331 if (1000 == n) {
4337 4332 return (ETIMEDOUT);
4338 4333 }
4339 4334
4340 4335 iwp_mac_access_enter(sc);
4341 4336
4342 4337 iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4343 4338
4344 4339 DELAY(20);
4345 4340 tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4346 4341 iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4347 4342 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4348 4343 iwp_mac_access_exit(sc);
4349 4344
4350 4345 radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4351 4346 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4352 4347 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4353 4348 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4354 4349 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4355 4350 SP_RADIO_STEP_MSK(radio_cfg) |
4356 4351 SP_RADIO_DASH_MSK(radio_cfg));
4357 4352 } else {
4358 4353 cmn_err(CE_WARN, "iwp_preinit(): "
4359 4354 "radio configuration information in eeprom is wrong\n");
4360 4355 return (IWP_FAIL);
4361 4356 }
4362 4357
4363 4358
4364 4359 IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4365 4360
4366 4361 (void) iwp_power_up(sc);
4367 4362
4368 4363 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4369 4364 tmp = ddi_get32(sc->sc_cfg_handle,
4370 4365 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4371 4366 ddi_put32(sc->sc_cfg_handle,
4372 4367 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4373 4368 tmp & ~(1 << 11));
4374 4369 }
4375 4370
4376 4371 vlink = ddi_get8(sc->sc_cfg_handle,
4377 4372 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4378 4373 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4379 4374 vlink & ~2);
4380 4375
4381 4376 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4382 4377 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4383 4378 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4384 4379 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4385 4380
4386 4381 /*
4387 4382 * make sure power supply on each part of the hardware
4388 4383 */
4389 4384 iwp_mac_access_enter(sc);
4390 4385 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4391 4386 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4392 4387 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4393 4388 DELAY(5);
4394 4389
4395 4390 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4396 4391 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4397 4392 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4398 4393 iwp_mac_access_exit(sc);
4399 4394
4400 4395 if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4401 4396 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4402 4397 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4403 4398 }
4404 4399
4405 4400 if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4406 4401
4407 4402 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4408 4403 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4409 4404 }
4410 4405
4411 4406 return (IWP_SUCCESS);
4412 4407 }
4413 4408
4414 4409 /*
4415 4410 * set up semphore flag to own EEPROM
4416 4411 */
4417 4412 static int
4418 4413 iwp_eep_sem_down(iwp_sc_t *sc)
4419 4414 {
4420 4415 int count1, count2;
4421 4416 uint32_t tmp;
4422 4417
4423 4418 for (count1 = 0; count1 < 1000; count1++) {
4424 4419 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4425 4420 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4426 4421 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4427 4422
4428 4423 for (count2 = 0; count2 < 2; count2++) {
4429 4424 if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4430 4425 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4431 4426 return (IWP_SUCCESS);
4432 4427 }
4433 4428 DELAY(10000);
4434 4429 }
4435 4430 }
4436 4431 return (IWP_FAIL);
4437 4432 }
4438 4433
4439 4434 /*
4440 4435 * reset semphore flag to release EEPROM
4441 4436 */
4442 4437 static void
4443 4438 iwp_eep_sem_up(iwp_sc_t *sc)
4444 4439 {
4445 4440 uint32_t tmp;
4446 4441
4447 4442 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4448 4443 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4449 4444 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4450 4445 }
4451 4446
4452 4447 /*
4453 4448 * This function read all infomation from eeprom
4454 4449 */
4455 4450 static int
4456 4451 iwp_eep_load(iwp_sc_t *sc)
4457 4452 {
4458 4453 int i, rr;
4459 4454 uint32_t rv, tmp, eep_gp;
4460 4455 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4461 4456 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4462 4457
4463 4458 /*
4464 4459 * read eeprom gp register in CSR
4465 4460 */
4466 4461 eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4467 4462 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4468 4463 CSR_EEPROM_GP_BAD_SIGNATURE) {
4469 4464 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4470 4465 "not find eeprom\n"));
4471 4466 return (IWP_FAIL);
4472 4467 }
4473 4468
4474 4469 rr = iwp_eep_sem_down(sc);
4475 4470 if (rr != 0) {
4476 4471 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4477 4472 "driver failed to own EEPROM\n"));
4478 4473 return (IWP_FAIL);
4479 4474 }
4480 4475
4481 4476 for (addr = 0; addr < eep_sz; addr += 2) {
4482 4477 IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4483 4478 tmp = IWP_READ(sc, CSR_EEPROM_REG);
4484 4479 IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4485 4480
4486 4481 for (i = 0; i < 10; i++) {
4487 4482 rv = IWP_READ(sc, CSR_EEPROM_REG);
4488 4483 if (rv & 1) {
4489 4484 break;
4490 4485 }
4491 4486 DELAY(10);
4492 4487 }
4493 4488
4494 4489 if (!(rv & 1)) {
4495 4490 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4496 4491 "time out when read eeprome\n"));
4497 4492 iwp_eep_sem_up(sc);
4498 4493 return (IWP_FAIL);
4499 4494 }
4500 4495
4501 4496 eep_p[addr/2] = LE_16(rv >> 16);
4502 4497 }
4503 4498
4504 4499 iwp_eep_sem_up(sc);
4505 4500 return (IWP_SUCCESS);
4506 4501 }
4507 4502
4508 4503 /*
4509 4504 * initialize mac address in ieee80211com_t struct
4510 4505 */
4511 4506 static void
4512 4507 iwp_get_mac_from_eep(iwp_sc_t *sc)
4513 4508 {
4514 4509 ieee80211com_t *ic = &sc->sc_ic;
4515 4510
4516 4511 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4517 4512
4518 4513 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4519 4514 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4520 4515 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4521 4516 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4522 4517 }
4523 4518
4524 4519 /*
4525 4520 * main initialization function
4526 4521 */
4527 4522 static int
4528 4523 iwp_init(iwp_sc_t *sc)
4529 4524 {
4530 4525 int err = IWP_FAIL;
4531 4526 clock_t clk;
4532 4527
4533 4528 /*
4534 4529 * release buffer for calibration
4535 4530 */
4536 4531 iwp_release_calib_buffer(sc);
4537 4532
4538 4533 mutex_enter(&sc->sc_glock);
4539 4534 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4540 4535
4541 4536 err = iwp_init_common(sc);
4542 4537 if (err != IWP_SUCCESS) {
4543 4538 mutex_exit(&sc->sc_glock);
4544 4539 return (IWP_FAIL);
4545 4540 }
4546 4541
4547 4542 /*
4548 4543 * backup ucode data part for future use.
4549 4544 */
4550 4545 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4551 4546 sc->sc_dma_fw_data.mem_va,
4552 4547 sc->sc_dma_fw_data.alength);
4553 4548
4554 4549 /* load firmware init segment into NIC */
4555 4550 err = iwp_load_init_firmware(sc);
4556 4551 if (err != IWP_SUCCESS) {
4557 4552 cmn_err(CE_WARN, "iwp_init(): "
4558 4553 "failed to setup init firmware\n");
4559 4554 mutex_exit(&sc->sc_glock);
4560 4555 return (IWP_FAIL);
4561 4556 }
4562 4557
4563 4558 /*
4564 4559 * now press "execute" start running
4565 4560 */
4566 4561 IWP_WRITE(sc, CSR_RESET, 0);
4567 4562
4568 4563 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4569 4564 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4570 4565 if (cv_timedwait(&sc->sc_ucode_cv,
4571 4566 &sc->sc_glock, clk) < 0) {
4572 4567 break;
4573 4568 }
4574 4569 }
4575 4570
4576 4571 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4577 4572 cmn_err(CE_WARN, "iwp_init(): "
4578 4573 "failed to process init alive.\n");
4579 4574 mutex_exit(&sc->sc_glock);
4580 4575 return (IWP_FAIL);
4581 4576 }
4582 4577
4583 4578 mutex_exit(&sc->sc_glock);
4584 4579
4585 4580 /*
4586 4581 * stop chipset for initializing chipset again
4587 4582 */
4588 4583 iwp_stop(sc);
4589 4584
4590 4585 mutex_enter(&sc->sc_glock);
4591 4586 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4592 4587
4593 4588 err = iwp_init_common(sc);
4594 4589 if (err != IWP_SUCCESS) {
4595 4590 mutex_exit(&sc->sc_glock);
4596 4591 return (IWP_FAIL);
4597 4592 }
4598 4593
4599 4594 /*
4600 4595 * load firmware run segment into NIC
4601 4596 */
4602 4597 err = iwp_load_run_firmware(sc);
4603 4598 if (err != IWP_SUCCESS) {
4604 4599 cmn_err(CE_WARN, "iwp_init(): "
4605 4600 "failed to setup run firmware\n");
4606 4601 mutex_exit(&sc->sc_glock);
4607 4602 return (IWP_FAIL);
4608 4603 }
4609 4604
4610 4605 /*
4611 4606 * now press "execute" start running
4612 4607 */
4613 4608 IWP_WRITE(sc, CSR_RESET, 0);
4614 4609
4615 4610 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4616 4611 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4617 4612 if (cv_timedwait(&sc->sc_ucode_cv,
4618 4613 &sc->sc_glock, clk) < 0) {
4619 4614 break;
4620 4615 }
4621 4616 }
4622 4617
4623 4618 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4624 4619 cmn_err(CE_WARN, "iwp_init(): "
4625 4620 "failed to process runtime alive.\n");
4626 4621 mutex_exit(&sc->sc_glock);
4627 4622 return (IWP_FAIL);
4628 4623 }
4629 4624
4630 4625 mutex_exit(&sc->sc_glock);
4631 4626
4632 4627 DELAY(1000);
4633 4628
4634 4629 mutex_enter(&sc->sc_glock);
4635 4630 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4636 4631
4637 4632 /*
4638 4633 * at this point, the firmware is loaded OK, then config the hardware
4639 4634 * with the ucode API, including rxon, txpower, etc.
4640 4635 */
4641 4636 err = iwp_config(sc);
4642 4637 if (err) {
4643 4638 cmn_err(CE_WARN, "iwp_init(): "
4644 4639 "failed to configure device\n");
4645 4640 mutex_exit(&sc->sc_glock);
4646 4641 return (IWP_FAIL);
4647 4642 }
4648 4643
4649 4644 /*
4650 4645 * at this point, hardware may receive beacons :)
4651 4646 */
4652 4647 mutex_exit(&sc->sc_glock);
4653 4648 return (IWP_SUCCESS);
4654 4649 }
4655 4650
4656 4651 /*
4657 4652 * stop or disable NIC
4658 4653 */
4659 4654 static void
4660 4655 iwp_stop(iwp_sc_t *sc)
4661 4656 {
4662 4657 uint32_t tmp;
4663 4658 int i;
4664 4659
4665 4660 /* by pass if it's quiesced */
4666 4661 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4667 4662 mutex_enter(&sc->sc_glock);
4668 4663 }
4669 4664
4670 4665 IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4671 4666 /*
4672 4667 * disable interrupts
4673 4668 */
4674 4669 IWP_WRITE(sc, CSR_INT_MASK, 0);
4675 4670 IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4676 4671 IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4677 4672
4678 4673 /*
4679 4674 * reset all Tx rings
4680 4675 */
4681 4676 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4682 4677 iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4683 4678 }
4684 4679
4685 4680 /*
4686 4681 * reset Rx ring
4687 4682 */
4688 4683 iwp_reset_rx_ring(sc);
4689 4684
4690 4685 iwp_mac_access_enter(sc);
4691 4686 iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4692 4687 iwp_mac_access_exit(sc);
4693 4688
4694 4689 DELAY(5);
4695 4690
4696 4691 iwp_stop_master(sc);
4697 4692
4698 4693 mutex_enter(&sc->sc_mt_lock);
4699 4694 sc->sc_tx_timer = 0;
4700 4695 mutex_exit(&sc->sc_mt_lock);
4701 4696
4702 4697 tmp = IWP_READ(sc, CSR_RESET);
4703 4698 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4704 4699
4705 4700 /* by pass if it's quiesced */
4706 4701 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4707 4702 mutex_exit(&sc->sc_glock);
4708 4703 }
4709 4704 }
4710 4705
4711 4706 /*
4712 4707 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4713 4708 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4714 4709 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4715 4710 * INRIA Sophia - Projet Planete
4716 4711 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4717 4712 */
4718 4713 #define is_success(amrr) \
4719 4714 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4720 4715 #define is_failure(amrr) \
4721 4716 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4722 4717 #define is_enough(amrr) \
4723 4718 ((amrr)->txcnt > 200)
4724 4719 #define not_very_few(amrr) \
4725 4720 ((amrr)->txcnt > 40)
4726 4721 #define is_min_rate(in) \
4727 4722 (0 == (in)->in_txrate)
4728 4723 #define is_max_rate(in) \
4729 4724 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4730 4725 #define increase_rate(in) \
4731 4726 ((in)->in_txrate++)
4732 4727 #define decrease_rate(in) \
4733 4728 ((in)->in_txrate--)
4734 4729 #define reset_cnt(amrr) \
4735 4730 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
4736 4731
4737 4732 #define IWP_AMRR_MIN_SUCCESS_THRESHOLD 1
4738 4733 #define IWP_AMRR_MAX_SUCCESS_THRESHOLD 15
4739 4734
4740 4735 static void
4741 4736 iwp_amrr_init(iwp_amrr_t *amrr)
4742 4737 {
4743 4738 amrr->success = 0;
4744 4739 amrr->recovery = 0;
4745 4740 amrr->txcnt = amrr->retrycnt = 0;
4746 4741 amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4747 4742 }
4748 4743
4749 4744 static void
4750 4745 iwp_amrr_timeout(iwp_sc_t *sc)
4751 4746 {
4752 4747 ieee80211com_t *ic = &sc->sc_ic;
4753 4748
4754 4749 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4755 4750 "enter\n"));
4756 4751
4757 4752 if (IEEE80211_M_STA == ic->ic_opmode) {
4758 4753 iwp_amrr_ratectl(NULL, ic->ic_bss);
4759 4754 } else {
4760 4755 ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4761 4756 }
4762 4757
4763 4758 sc->sc_clk = ddi_get_lbolt();
4764 4759 }
4765 4760
4766 4761 /* ARGSUSED */
4767 4762 static void
4768 4763 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4769 4764 {
4770 4765 iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4771 4766 int need_change = 0;
4772 4767
4773 4768 if (is_success(amrr) && is_enough(amrr)) {
4774 4769 amrr->success++;
4775 4770 if (amrr->success >= amrr->success_threshold &&
4776 4771 !is_max_rate(in)) {
4777 4772 amrr->recovery = 1;
4778 4773 amrr->success = 0;
4779 4774 increase_rate(in);
4780 4775 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4781 4776 "AMRR increasing rate %d "
4782 4777 "(txcnt=%d retrycnt=%d)\n",
4783 4778 in->in_txrate, amrr->txcnt,
4784 4779 amrr->retrycnt));
4785 4780 need_change = 1;
4786 4781 } else {
4787 4782 amrr->recovery = 0;
4788 4783 }
4789 4784 } else if (not_very_few(amrr) && is_failure(amrr)) {
4790 4785 amrr->success = 0;
4791 4786 if (!is_min_rate(in)) {
4792 4787 if (amrr->recovery) {
4793 4788 amrr->success_threshold++;
4794 4789 if (amrr->success_threshold >
4795 4790 IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4796 4791 amrr->success_threshold =
4797 4792 IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4798 4793 }
4799 4794 } else {
4800 4795 amrr->success_threshold =
4801 4796 IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4802 4797 }
4803 4798 decrease_rate(in);
4804 4799 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4805 4800 "AMRR decreasing rate %d "
4806 4801 "(txcnt=%d retrycnt=%d)\n",
4807 4802 in->in_txrate, amrr->txcnt,
4808 4803 amrr->retrycnt));
4809 4804 need_change = 1;
4810 4805 }
4811 4806 amrr->recovery = 0; /* paper is incorrect */
4812 4807 }
4813 4808
4814 4809 if (is_enough(amrr) || need_change) {
4815 4810 reset_cnt(amrr);
4816 4811 }
4817 4812 }
4818 4813
4819 4814 /*
4820 4815 * translate indirect address in eeprom to direct address
4821 4816 * in eeprom and return address of entry whos indirect address
4822 4817 * is indi_addr
4823 4818 */
4824 4819 static uint8_t *
4825 4820 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4826 4821 {
4827 4822 uint32_t di_addr;
4828 4823 uint16_t temp;
4829 4824
4830 4825 if (!(indi_addr & INDIRECT_ADDRESS)) {
4831 4826 di_addr = indi_addr;
4832 4827 return (&sc->sc_eep_map[di_addr]);
4833 4828 }
4834 4829
4835 4830 switch (indi_addr & INDIRECT_TYPE_MSK) {
4836 4831 case INDIRECT_GENERAL:
4837 4832 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4838 4833 break;
4839 4834 case INDIRECT_HOST:
4840 4835 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4841 4836 break;
4842 4837 case INDIRECT_REGULATORY:
4843 4838 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4844 4839 break;
4845 4840 case INDIRECT_CALIBRATION:
4846 4841 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4847 4842 break;
4848 4843 case INDIRECT_PROCESS_ADJST:
4849 4844 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4850 4845 break;
4851 4846 case INDIRECT_OTHERS:
4852 4847 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4853 4848 break;
4854 4849 default:
4855 4850 temp = 0;
4856 4851 cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4857 4852 "incorrect indirect eeprom address.\n");
4858 4853 break;
4859 4854 }
4860 4855
4861 4856 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4862 4857
4863 4858 return (&sc->sc_eep_map[di_addr]);
4864 4859 }
4865 4860
4866 4861 /*
4867 4862 * loade a section of ucode into NIC
4868 4863 */
4869 4864 static int
4870 4865 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4871 4866 {
4872 4867
4873 4868 iwp_mac_access_enter(sc);
4874 4869
4875 4870 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4876 4871 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4877 4872
4878 4873 IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4879 4874
4880 4875 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4881 4876 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4882 4877
4883 4878 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4884 4879
4885 4880 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4886 4881 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4887 4882 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4888 4883 IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4889 4884
4890 4885 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4891 4886 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4892 4887 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4893 4888 IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4894 4889
4895 4890 iwp_mac_access_exit(sc);
4896 4891
4897 4892 return (IWP_SUCCESS);
4898 4893 }
4899 4894
4900 4895 /*
4901 4896 * necessary setting during alive notification
4902 4897 */
4903 4898 static int
4904 4899 iwp_alive_common(iwp_sc_t *sc)
4905 4900 {
4906 4901 uint32_t base;
4907 4902 uint32_t i;
4908 4903 iwp_wimax_coex_cmd_t w_cmd;
4909 4904 iwp_calibration_crystal_cmd_t c_cmd;
4910 4905 uint32_t rv = IWP_FAIL;
4911 4906
4912 4907 /*
4913 4908 * initialize SCD related registers to make TX work.
4914 4909 */
4915 4910 iwp_mac_access_enter(sc);
4916 4911
4917 4912 /*
4918 4913 * read sram address of data base.
4919 4914 */
4920 4915 sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4921 4916
4922 4917 for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4923 4918 base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4924 4919 base += 4) {
4925 4920 iwp_mem_write(sc, base, 0);
4926 4921 }
4927 4922
4928 4923 for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4929 4924 base += 4) {
4930 4925 iwp_mem_write(sc, base, 0);
4931 4926 }
4932 4927
4933 4928 for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4934 4929 iwp_mem_write(sc, base + i, 0);
4935 4930 }
4936 4931
4937 4932 iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4938 4933 sc->sc_dma_sh.cookie.dmac_address >> 10);
4939 4934
4940 4935 iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4941 4936 IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4942 4937
4943 4938 iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4944 4939
4945 4940 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4946 4941 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4947 4942 IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4948 4943 iwp_mem_write(sc, sc->sc_scd_base +
4949 4944 IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4950 4945 iwp_mem_write(sc, sc->sc_scd_base +
4951 4946 IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4952 4947 sizeof (uint32_t),
4953 4948 ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4954 4949 IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4955 4950 ((SCD_FRAME_LIMIT <<
4956 4951 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 4952 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4958 4953 }
4959 4954
4960 4955 iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4961 4956
4962 4957 iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4963 4958 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4964 4959
4965 4960 IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4966 4961 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4967 4962
4968 4963 /*
4969 4964 * queue 0-7 map to FIFO 0-7 and
4970 4965 * all queues work under FIFO mode(none-scheduler_ack)
4971 4966 */
4972 4967 for (i = 0; i < 4; i++) {
4973 4968 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4974 4969 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4975 4970 ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4976 4971 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4977 4972 IWP_SCD_QUEUE_STTS_REG_MSK);
4978 4973 }
4979 4974
4980 4975 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4981 4976 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4982 4977 (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4983 4978 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4984 4979 IWP_SCD_QUEUE_STTS_REG_MSK);
4985 4980
4986 4981 for (i = 5; i < 7; i++) {
4987 4982 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4988 4983 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4989 4984 (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4990 4985 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4991 4986 IWP_SCD_QUEUE_STTS_REG_MSK);
4992 4987 }
4993 4988
4994 4989 iwp_mac_access_exit(sc);
4995 4990
4996 4991 (void) memset(&w_cmd, 0, sizeof (w_cmd));
4997 4992
4998 4993 rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
4999 4994 if (rv != IWP_SUCCESS) {
5000 4995 cmn_err(CE_WARN, "iwp_alive_common(): "
5001 4996 "failed to send wimax coexist command.\n");
5002 4997 return (rv);
5003 4998 }
5004 4999
5005 5000 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5006 5001
5007 5002 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5008 5003 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5009 5004 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5010 5005
5011 5006 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5012 5007 if (rv != IWP_SUCCESS) {
5013 5008 cmn_err(CE_WARN, "iwp_alive_common(): "
5014 5009 "failed to send crystal frq calibration command.\n");
5015 5010 return (rv);
5016 5011 }
5017 5012
5018 5013 /*
5019 5014 * make sure crystal frequency calibration ready
5020 5015 * before next operations.
5021 5016 */
5022 5017 DELAY(1000);
5023 5018
5024 5019 return (IWP_SUCCESS);
5025 5020 }
5026 5021
5027 5022 /*
5028 5023 * save results of calibration from ucode
5029 5024 */
5030 5025 static void
5031 5026 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5032 5027 {
5033 5028 struct iwp_calib_results *res_p = &sc->sc_calib_results;
5034 5029 struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5035 5030 int len = LE_32(desc->len);
5036 5031
5037 5032 /*
5038 5033 * ensure the size of buffer is not too big
5039 5034 */
5040 5035 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5041 5036
5042 5037 switch (calib_hdr->op_code) {
5043 5038 case PHY_CALIBRATE_LO_CMD:
5044 5039 if (NULL == res_p->lo_res) {
5045 5040 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5046 5041 }
5047 5042
5048 5043 if (NULL == res_p->lo_res) {
5049 5044 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5050 5045 "failed to allocate memory.\n");
5051 5046 return;
5052 5047 }
5053 5048
5054 5049 res_p->lo_res_len = len;
5055 5050 (void) memcpy(res_p->lo_res, calib_hdr, len);
5056 5051 break;
5057 5052 case PHY_CALIBRATE_TX_IQ_CMD:
5058 5053 if (NULL == res_p->tx_iq_res) {
5059 5054 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5060 5055 }
5061 5056
5062 5057 if (NULL == res_p->tx_iq_res) {
5063 5058 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5064 5059 "failed to allocate memory.\n");
5065 5060 return;
5066 5061 }
5067 5062
5068 5063 res_p->tx_iq_res_len = len;
5069 5064 (void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5070 5065 break;
5071 5066 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5072 5067 if (NULL == res_p->tx_iq_perd_res) {
5073 5068 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5074 5069 }
5075 5070
5076 5071 if (NULL == res_p->tx_iq_perd_res) {
5077 5072 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5078 5073 "failed to allocate memory.\n");
5079 5074 }
5080 5075
5081 5076 res_p->tx_iq_perd_res_len = len;
5082 5077 (void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5083 5078 break;
5084 5079 case PHY_CALIBRATE_BASE_BAND_CMD:
5085 5080 if (NULL == res_p->base_band_res) {
5086 5081 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5087 5082 }
5088 5083
5089 5084 if (NULL == res_p->base_band_res) {
5090 5085 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5091 5086 "failed to allocate memory.\n");
5092 5087 }
5093 5088
5094 5089 res_p->base_band_res_len = len;
5095 5090 (void) memcpy(res_p->base_band_res, calib_hdr, len);
5096 5091 break;
5097 5092 default:
5098 5093 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5099 5094 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5100 5095 break;
5101 5096 }
5102 5097
5103 5098 }
5104 5099
5105 5100 static void
5106 5101 iwp_release_calib_buffer(iwp_sc_t *sc)
5107 5102 {
5108 5103 if (sc->sc_calib_results.lo_res != NULL) {
5109 5104 kmem_free(sc->sc_calib_results.lo_res,
5110 5105 sc->sc_calib_results.lo_res_len);
5111 5106 sc->sc_calib_results.lo_res = NULL;
5112 5107 }
5113 5108
5114 5109 if (sc->sc_calib_results.tx_iq_res != NULL) {
5115 5110 kmem_free(sc->sc_calib_results.tx_iq_res,
5116 5111 sc->sc_calib_results.tx_iq_res_len);
5117 5112 sc->sc_calib_results.tx_iq_res = NULL;
5118 5113 }
5119 5114
5120 5115 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5121 5116 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5122 5117 sc->sc_calib_results.tx_iq_perd_res_len);
5123 5118 sc->sc_calib_results.tx_iq_perd_res = NULL;
5124 5119 }
5125 5120
5126 5121 if (sc->sc_calib_results.base_band_res != NULL) {
5127 5122 kmem_free(sc->sc_calib_results.base_band_res,
5128 5123 sc->sc_calib_results.base_band_res_len);
5129 5124 sc->sc_calib_results.base_band_res = NULL;
5130 5125 }
5131 5126
5132 5127 }
5133 5128
5134 5129 /*
5135 5130 * common section of intialization
5136 5131 */
5137 5132 static int
5138 5133 iwp_init_common(iwp_sc_t *sc)
5139 5134 {
5140 5135 int32_t qid;
5141 5136 uint32_t tmp;
5142 5137
5143 5138 (void) iwp_preinit(sc);
5144 5139
5145 5140 tmp = IWP_READ(sc, CSR_GP_CNTRL);
5146 5141 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5147 5142 cmn_err(CE_NOTE, "iwp_init_common(): "
5148 5143 "radio transmitter is off\n");
5149 5144 return (IWP_FAIL);
5150 5145 }
5151 5146
5152 5147 /*
5153 5148 * init Rx ring
5154 5149 */
5155 5150 iwp_mac_access_enter(sc);
5156 5151 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5157 5152
5158 5153 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5159 5154 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5160 5155 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5161 5156
5162 5157 IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5163 5158 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5164 5159 offsetof(struct iwp_shared, val0)) >> 4));
5165 5160
5166 5161 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5167 5162 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5168 5163 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5169 5164 IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5170 5165 (RX_QUEUE_SIZE_LOG <<
5171 5166 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5172 5167 iwp_mac_access_exit(sc);
5173 5168 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5174 5169 (RX_QUEUE_SIZE - 1) & ~0x7);
5175 5170
5176 5171 /*
5177 5172 * init Tx rings
5178 5173 */
5179 5174 iwp_mac_access_enter(sc);
5180 5175 iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5181 5176
5182 5177 /*
5183 5178 * keep warm page
5184 5179 */
5185 5180 IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5186 5181 sc->sc_dma_kw.cookie.dmac_address >> 4);
5187 5182
5188 5183 for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5189 5184 IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5190 5185 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5191 5186 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5192 5187 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5193 5188 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5194 5189 }
5195 5190
5196 5191 iwp_mac_access_exit(sc);
5197 5192
5198 5193 /*
5199 5194 * clear "radio off" and "disable command" bits
5200 5195 */
5201 5196 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5202 5197 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5203 5198 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5204 5199
5205 5200 /*
5206 5201 * clear any pending interrupts
5207 5202 */
5208 5203 IWP_WRITE(sc, CSR_INT, 0xffffffff);
5209 5204
5210 5205 /*
5211 5206 * enable interrupts
5212 5207 */
5213 5208 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5214 5209
5215 5210 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5216 5211 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5217 5212
5218 5213 return (IWP_SUCCESS);
5219 5214 }
5220 5215
5221 5216 static int
5222 5217 iwp_fast_recover(iwp_sc_t *sc)
5223 5218 {
5224 5219 ieee80211com_t *ic = &sc->sc_ic;
5225 5220 int err = IWP_FAIL;
5226 5221
5227 5222 mutex_enter(&sc->sc_glock);
5228 5223
5229 5224 /* restore runtime configuration */
5230 5225 bcopy(&sc->sc_config_save, &sc->sc_config,
5231 5226 sizeof (sc->sc_config));
5232 5227
5233 5228 sc->sc_config.assoc_id = 0;
5234 5229 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5235 5230
5236 5231 if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5237 5232 cmn_err(CE_WARN, "iwp_fast_recover(): "
5238 5233 "could not setup authentication\n");
5239 5234 mutex_exit(&sc->sc_glock);
5240 5235 return (err);
5241 5236 }
5242 5237
5243 5238 bcopy(&sc->sc_config_save, &sc->sc_config,
5244 5239 sizeof (sc->sc_config));
5245 5240
5246 5241 /* update adapter's configuration */
5247 5242 err = iwp_run_state_config(sc);
5248 5243 if (err != IWP_SUCCESS) {
5249 5244 cmn_err(CE_WARN, "iwp_fast_recover(): "
5250 5245 "failed to setup association\n");
5251 5246 mutex_exit(&sc->sc_glock);
5252 5247 return (err);
5253 5248 }
5254 5249 /* set LED on */
5255 5250 iwp_set_led(sc, 2, 0, 1);
5256 5251
5257 5252 mutex_exit(&sc->sc_glock);
5258 5253
5259 5254 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5260 5255
5261 5256 /* start queue */
5262 5257 IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5263 5258 "resume xmit\n"));
5264 5259 mac_tx_update(ic->ic_mach);
5265 5260
5266 5261 return (IWP_SUCCESS);
5267 5262 }
5268 5263
5269 5264 static int
5270 5265 iwp_run_state_config(iwp_sc_t *sc)
5271 5266 {
5272 5267 struct ieee80211com *ic = &sc->sc_ic;
5273 5268 ieee80211_node_t *in = ic->ic_bss;
5274 5269 int err = IWP_FAIL;
5275 5270
5276 5271 /*
5277 5272 * update adapter's configuration
5278 5273 */
5279 5274 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5280 5275
5281 5276 /*
5282 5277 * short preamble/slot time are
5283 5278 * negotiated when associating
5284 5279 */
5285 5280 sc->sc_config.flags &=
5286 5281 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5287 5282 RXON_FLG_SHORT_SLOT_MSK);
5288 5283
5289 5284 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5290 5285 sc->sc_config.flags |=
5291 5286 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5292 5287 }
5293 5288
5294 5289 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5295 5290 sc->sc_config.flags |=
5296 5291 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5297 5292 }
5298 5293
5299 5294 sc->sc_config.filter_flags |=
5300 5295 LE_32(RXON_FILTER_ASSOC_MSK);
5301 5296
5302 5297 if (ic->ic_opmode != IEEE80211_M_STA) {
5303 5298 sc->sc_config.filter_flags |=
5304 5299 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5305 5300 }
5306 5301
5307 5302 IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5308 5303 "config chan %d flags %x"
5309 5304 " filter_flags %x\n",
5310 5305 sc->sc_config.chan, sc->sc_config.flags,
5311 5306 sc->sc_config.filter_flags));
5312 5307
5313 5308 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5314 5309 sizeof (iwp_rxon_cmd_t), 1);
5315 5310 if (err != IWP_SUCCESS) {
5316 5311 cmn_err(CE_WARN, "iwp_run_state_config(): "
5317 5312 "could not update configuration\n");
5318 5313 return (err);
5319 5314 }
5320 5315
5321 5316 return (err);
5322 5317 }
5323 5318
5324 5319 /*
5325 5320 * This function overwrites default configurations of
5326 5321 * ieee80211com structure in Net80211 module.
5327 5322 */
5328 5323 static void
5329 5324 iwp_overwrite_ic_default(iwp_sc_t *sc)
5330 5325 {
5331 5326 ieee80211com_t *ic = &sc->sc_ic;
5332 5327
5333 5328 sc->sc_newstate = ic->ic_newstate;
5334 5329 ic->ic_newstate = iwp_newstate;
5335 5330 ic->ic_node_alloc = iwp_node_alloc;
5336 5331 ic->ic_node_free = iwp_node_free;
5337 5332 }
5338 5333
5339 5334
5340 5335 /*
5341 5336 * This function adds AP station into hardware.
5342 5337 */
5343 5338 static int
5344 5339 iwp_add_ap_sta(iwp_sc_t *sc)
5345 5340 {
5346 5341 ieee80211com_t *ic = &sc->sc_ic;
5347 5342 ieee80211_node_t *in = ic->ic_bss;
5348 5343 iwp_add_sta_t node;
5349 5344 int err = IWP_FAIL;
5350 5345
5351 5346 /*
5352 5347 * Add AP node into hardware.
5353 5348 */
5354 5349 (void) memset(&node, 0, sizeof (node));
5355 5350 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5356 5351 node.mode = STA_MODE_ADD_MSK;
5357 5352 node.sta.sta_id = IWP_AP_ID;
5358 5353
5359 5354 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5360 5355 if (err != IWP_SUCCESS) {
5361 5356 cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5362 5357 "failed to add AP node\n");
5363 5358 return (err);
5364 5359 }
5365 5360
5366 5361 return (err);
5367 5362 }
5368 5363
5369 5364 /*
5370 5365 * Check EEPROM version and Calibration version.
5371 5366 */
5372 5367 static int
5373 5368 iwp_eep_ver_chk(iwp_sc_t *sc)
5374 5369 {
5375 5370 if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5376 5371 (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5377 5372 cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5378 5373 "unsupported eeprom detected\n");
5379 5374 return (IWP_FAIL);
5380 5375 }
5381 5376
5382 5377 return (IWP_SUCCESS);
5383 5378 }
5384 5379
5385 5380 /*
5386 5381 * Determine parameters for all supported chips.
5387 5382 */
5388 5383 static void
5389 5384 iwp_set_chip_param(iwp_sc_t *sc)
5390 5385 {
5391 5386 if ((0x008d == sc->sc_dev_id) ||
5392 5387 (0x008e == sc->sc_dev_id)) {
5393 5388 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5394 5389 PHY_MODE_A | PHY_MODE_N;
5395 5390
5396 5391 sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5397 5392 sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5398 5393
5399 5394 sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5400 5395 }
5401 5396
5402 5397 if ((0x422c == sc->sc_dev_id) ||
5403 5398 (0x4239 == sc->sc_dev_id)) {
5404 5399 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5405 5400 PHY_MODE_A | PHY_MODE_N;
5406 5401
5407 5402 sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5408 5403 sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5409 5404
5410 5405 sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5411 5406 }
5412 5407
5413 5408 if ((0x422b == sc->sc_dev_id) ||
5414 5409 (0x4238 == sc->sc_dev_id)) {
5415 5410 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5416 5411 PHY_MODE_A | PHY_MODE_N;
5417 5412
5418 5413 sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5419 5414 sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5420 5415
5421 5416 sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5422 5417 }
5423 5418 }
↓ open down ↓ |
3670 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX