Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ntxn/unm_nic_main.c
+++ new/usr/src/uts/common/io/ntxn/unm_nic_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 NetXen, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29 #include <sys/types.h>
30 30 #include <sys/conf.h>
31 31 #include <sys/debug.h>
32 32 #include <sys/stropts.h>
33 33 #include <sys/stream.h>
34 34 #include <sys/strlog.h>
35 35 #include <sys/kmem.h>
36 36 #include <sys/stat.h>
37 37 #include <sys/kstat.h>
38 38 #include <sys/vtrace.h>
39 39 #include <sys/dlpi.h>
40 40 #include <sys/strsun.h>
41 41 #include <sys/ethernet.h>
42 42 #include <sys/modctl.h>
43 43 #include <sys/errno.h>
44 44 #include <sys/dditypes.h>
45 45 #include <sys/ddi.h>
46 46 #include <sys/sunddi.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/pci.h>
49 49
50 50 #include <sys/gld.h>
51 51 #include <netinet/in.h>
52 52 #include <inet/ip.h>
53 53 #include <inet/tcp.h>
54 54
55 55 #include <sys/rwlock.h>
56 56 #include <sys/mutex.h>
57 57 #include <sys/pattr.h>
58 58 #include <sys/strsubr.h>
59 59 #include <sys/ddi_impldefs.h>
60 60 #include<sys/task.h>
61 61
62 62 #include "unm_nic_hw.h"
63 63 #include "unm_nic.h"
64 64
65 65 #include "nic_phan_reg.h"
66 66 #include "unm_nic_ioctl.h"
67 67 #include "nic_cmn.h"
68 68 #include "unm_version.h"
69 69 #include "unm_brdcfg.h"
70 70
71 71 #if defined(lint)
72 72 #undef MBLKL
73 73 #define MBLKL(_mp_) ((uintptr_t)(_mp_)->b_wptr - (uintptr_t)(_mp_)->b_rptr)
74 74 #endif /* lint */
75 75
76 76 #undef UNM_LOOPBACK
77 77 #undef SINGLE_DMA_BUF
78 78
79 79 #define UNM_ADAPTER_UP_MAGIC 777
80 80 #define VLAN_TAGSZ 0x4
81 81
82 82 #define index2rxbuf(_rdp_, _idx_) ((_rdp_)->rx_buf_pool + (_idx_))
83 83 #define rxbuf2index(_rdp_, _bufp_) ((_bufp_) - (_rdp_)->rx_buf_pool)
84 84
85 85 /*
86 86 * Receive ISR processes NX_RX_MAXBUFS incoming packets at most, then posts
87 87 * as many buffers as packets processed. This loop repeats as required to
88 88 * process all incoming packets delivered in a single interrupt. Higher
89 89 * value of NX_RX_MAXBUFS improves performance by posting rx buffers less
90 90 * frequently, but at the cost of not posting quickly enough when card is
91 91 * running out of rx buffers.
92 92 */
93 93 #define NX_RX_THRESHOLD 32
94 94 #define NX_RX_MAXBUFS 128
95 95 #define NX_MAX_TXCOMPS 256
96 96
97 97 extern int create_rxtx_rings(unm_adapter *adapter);
98 98 extern void destroy_rxtx_rings(unm_adapter *adapter);
99 99
100 100 static void unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
101 101 uint32_t ringid);
102 102 static mblk_t *unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc);
103 103 static int unm_process_rcv_ring(unm_adapter *, int);
104 104 static int unm_process_cmd_ring(struct unm_adapter_s *adapter);
105 105
106 106 static int unm_nic_do_ioctl(unm_adapter *adapter, queue_t *q, mblk_t *mp);
107 107 static void unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q,
108 108 mblk_t *mp);
109 109
110 110 /* GLDv3 interface functions */
111 111 static int ntxn_m_start(void *);
112 112 static void ntxn_m_stop(void *);
113 113 static int ntxn_m_multicst(void *, boolean_t, const uint8_t *);
114 114 static int ntxn_m_promisc(void *, boolean_t);
115 115 static int ntxn_m_stat(void *arg, uint_t stat, uint64_t *val);
116 116 static mblk_t *ntxn_m_tx(void *, mblk_t *);
117 117 static void ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
118 118 static boolean_t ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data);
119 119
120 120 /*
121 121 * Allocates DMA handle, virtual memory and binds them
122 122 * returns size of actual memory binded and the physical address.
123 123 */
124 124 int
125 125 unm_pci_alloc_consistent(unm_adapter *adapter,
126 126 int size, caddr_t *address, ddi_dma_cookie_t *cookie,
127 127 ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep)
128 128 {
129 129 int err;
130 130 uint32_t ncookies;
131 131 size_t ring_len;
132 132 uint_t dma_flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
133 133
134 134 *dma_handle = NULL;
135 135
136 136 if (size <= 0)
137 137 return (DDI_ENOMEM);
138 138
139 139 err = ddi_dma_alloc_handle(adapter->dip,
140 140 &adapter->gc_dma_attr_desc,
141 141 DDI_DMA_DONTWAIT, NULL, dma_handle);
142 142 if (err != DDI_SUCCESS) {
143 143 cmn_err(CE_WARN, "!%s: %s: ddi_dma_alloc_handle FAILED:"
144 144 " %d", unm_nic_driver_name, __func__, err);
145 145 return (DDI_ENOMEM);
146 146 }
147 147
148 148 err = ddi_dma_mem_alloc(*dma_handle,
149 149 size, &adapter->gc_attr_desc,
150 150 dma_flags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
151 151 DDI_DMA_DONTWAIT, NULL, address, &ring_len,
152 152 handlep);
153 153 if (err != DDI_SUCCESS) {
154 154 cmn_err(CE_WARN, "!%s: %s: ddi_dma_mem_alloc failed:"
155 155 "ret %d, request size: %d",
156 156 unm_nic_driver_name, __func__, err, size);
157 157 ddi_dma_free_handle(dma_handle);
158 158 return (DDI_ENOMEM);
159 159 }
160 160
161 161 if (ring_len < size) {
162 162 cmn_err(CE_WARN, "%s: %s: could not allocate required "
163 163 "memory :%d\n", unm_nic_driver_name,
164 164 __func__, err);
165 165 ddi_dma_mem_free(handlep);
166 166 ddi_dma_free_handle(dma_handle);
167 167 return (DDI_FAILURE);
168 168 }
169 169
170 170 (void) memset(*address, 0, size);
171 171
172 172 if (((err = ddi_dma_addr_bind_handle(*dma_handle,
173 173 NULL, *address, ring_len,
174 174 dma_flags,
175 175 DDI_DMA_DONTWAIT, NULL,
176 176 cookie, &ncookies)) != DDI_DMA_MAPPED) ||
177 177 (ncookies != 1)) {
178 178 cmn_err(CE_WARN,
179 179 "!%s: %s: ddi_dma_addr_bind_handle FAILED: %d",
180 180 unm_nic_driver_name, __func__, err);
181 181 ddi_dma_mem_free(handlep);
182 182 ddi_dma_free_handle(dma_handle);
183 183 return (DDI_FAILURE);
184 184 }
185 185
186 186 return (DDI_SUCCESS);
187 187 }
188 188
189 189 /*
190 190 * Unbinds the memory, frees the DMA handle and at the end, frees the memory
191 191 */
192 192 void
193 193 unm_pci_free_consistent(ddi_dma_handle_t *dma_handle,
194 194 ddi_acc_handle_t *acc_handle)
195 195 {
196 196 int err;
197 197
198 198 err = ddi_dma_unbind_handle(*dma_handle);
199 199 if (err != DDI_SUCCESS) {
200 200 cmn_err(CE_WARN, "%s: Error unbinding memory\n", __func__);
201 201 return;
202 202 }
203 203
204 204 ddi_dma_mem_free(acc_handle);
205 205 ddi_dma_free_handle(dma_handle);
206 206 }
207 207
208 208 static uint32_t msi_tgt_status[] = {
209 209 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
210 210 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
211 211 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
212 212 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
213 213 };
214 214
215 215 static void
216 216 unm_nic_disable_int(unm_adapter *adapter)
217 217 {
218 218 __uint32_t temp = 0;
219 219
220 220 adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
221 221 &temp, 4);
222 222 }
223 223
224 224 static inline int
225 225 unm_nic_clear_int(unm_adapter *adapter)
226 226 {
227 227 uint32_t mask, temp, our_int, status;
228 228
229 229 UNM_READ_LOCK(&adapter->adapter_lock);
230 230
231 231 /* check whether it's our interrupt */
232 232 if (!UNM_IS_MSI_FAMILY(adapter)) {
233 233
234 234 /* Legacy Interrupt case */
235 235 adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
236 236 &status);
237 237
238 238 if (!(status & adapter->legacy_intr.int_vec_bit)) {
239 239 UNM_READ_UNLOCK(&adapter->adapter_lock);
240 240 return (-1);
241 241 }
242 242
243 243 if (adapter->ahw.revision_id >= NX_P3_B1) {
244 244 adapter->unm_nic_pci_read_immediate(adapter,
245 245 ISR_INT_STATE_REG, &temp);
246 246 if (!ISR_IS_LEGACY_INTR_TRIGGERED(temp)) {
247 247 UNM_READ_UNLOCK(&adapter->adapter_lock);
248 248 return (-1);
249 249 }
250 250 } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
251 251 our_int = adapter->unm_nic_pci_read_normalize(adapter,
252 252 CRB_INT_VECTOR);
253 253
254 254 /* FIXME: Assumes pci_func is same as ctx */
255 255 if ((our_int & (0x80 << adapter->portnum)) == 0) {
256 256 if (our_int != 0) {
257 257 /* not our interrupt */
258 258 UNM_READ_UNLOCK(&adapter->adapter_lock);
259 259 return (-1);
260 260 }
261 261 }
262 262 temp = our_int & ~((u32)(0x80 << adapter->portnum));
263 263 adapter->unm_nic_pci_write_normalize(adapter,
264 264 CRB_INT_VECTOR, temp);
265 265 }
266 266
267 267 if (adapter->fw_major < 4)
268 268 unm_nic_disable_int(adapter);
269 269
270 270 /* claim interrupt */
271 271 temp = 0xffffffff;
272 272 adapter->unm_nic_pci_write_immediate(adapter,
273 273 adapter->legacy_intr.tgt_status_reg, &temp);
274 274
275 275 adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
276 276 &mask);
277 277
278 278 /*
279 279 * Read again to make sure the legacy interrupt message got
280 280 * flushed out
281 281 */
282 282 adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
283 283 &mask);
284 284 } else if (adapter->flags & UNM_NIC_MSI_ENABLED) {
285 285 /* clear interrupt */
286 286 temp = 0xffffffff;
287 287 adapter->unm_nic_pci_write_immediate(adapter,
288 288 msi_tgt_status[adapter->ahw.pci_func], &temp);
289 289 }
290 290
291 291 UNM_READ_UNLOCK(&adapter->adapter_lock);
292 292
293 293 return (0);
294 294 }
295 295
296 296 static void
297 297 unm_nic_enable_int(unm_adapter *adapter)
298 298 {
299 299 u32 temp = 1;
300 300
301 301 adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
302 302 &temp, 4);
303 303
304 304 if (!UNM_IS_MSI_FAMILY(adapter)) {
305 305 u32 mask = 0xfbff;
306 306
307 307 adapter->unm_nic_pci_write_immediate(adapter,
308 308 adapter->legacy_intr.tgt_mask_reg, &mask);
309 309 }
310 310 }
311 311
312 312 static void
313 313 unm_free_hw_resources(unm_adapter *adapter)
314 314 {
315 315 unm_recv_context_t *recv_ctx;
316 316 unm_rcv_desc_ctx_t *rcv_desc;
317 317 int ctx, ring;
318 318
319 319 if (adapter->context_alloced == 1) {
320 320 netxen_destroy_rxtx(adapter);
321 321 adapter->context_alloced = 0;
322 322 }
323 323
324 324 if (adapter->ctxDesc != NULL) {
325 325 unm_pci_free_consistent(&adapter->ctxDesc_dma_handle,
326 326 &adapter->ctxDesc_acc_handle);
327 327 adapter->ctxDesc = NULL;
328 328 }
329 329
330 330 if (adapter->ahw.cmdDescHead != NULL) {
331 331 unm_pci_free_consistent(&adapter->ahw.cmd_desc_dma_handle,
332 332 &adapter->ahw.cmd_desc_acc_handle);
333 333 adapter->ahw.cmdDesc_physAddr = NULL;
334 334 adapter->ahw.cmdDescHead = NULL;
335 335 }
336 336
337 337 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
338 338 recv_ctx = &adapter->recv_ctx[ctx];
339 339 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
340 340 rcv_desc = &recv_ctx->rcv_desc[ring];
341 341
342 342 if (rcv_desc->desc_head != NULL) {
343 343 unm_pci_free_consistent(
344 344 &rcv_desc->rx_desc_dma_handle,
345 345 &rcv_desc->rx_desc_acc_handle);
346 346 rcv_desc->desc_head = NULL;
347 347 rcv_desc->phys_addr = NULL;
348 348 }
349 349 }
350 350
351 351 if (recv_ctx->rcvStatusDescHead != NULL) {
352 352 unm_pci_free_consistent(
353 353 &recv_ctx->status_desc_dma_handle,
354 354 &recv_ctx->status_desc_acc_handle);
355 355 recv_ctx->rcvStatusDesc_physAddr = NULL;
356 356 recv_ctx->rcvStatusDescHead = NULL;
357 357 }
358 358 }
359 359 }
360 360
361 361 static void
362 362 cleanup_adapter(struct unm_adapter_s *adapter)
363 363 {
364 364 ddi_regs_map_free(&(adapter->regs_handle));
365 365 ddi_regs_map_free(&(adapter->db_handle));
366 366 kmem_free(adapter, sizeof (unm_adapter));
367 367 }
368 368
369 369 void
370 370 unm_nic_remove(unm_adapter *adapter)
371 371 {
372 372 mac_link_update(adapter->mach, LINK_STATE_DOWN);
373 373 unm_nic_stop_port(adapter);
374 374
375 375 if (adapter->interrupt_crb) {
376 376 UNM_READ_LOCK(&adapter->adapter_lock);
377 377 unm_nic_disable_int(adapter);
378 378 UNM_READ_UNLOCK(&adapter->adapter_lock);
379 379 }
380 380 (void) untimeout(adapter->watchdog_timer);
381 381
382 382 unm_free_hw_resources(adapter);
383 383
384 384 if (adapter->is_up == UNM_ADAPTER_UP_MAGIC)
385 385 destroy_rxtx_rings(adapter);
386 386
387 387 if (adapter->portnum == 0)
388 388 unm_free_dummy_dma(adapter);
389 389
390 390 unm_destroy_intr(adapter);
391 391
392 392 ddi_set_driver_private(adapter->dip, NULL);
393 393 cleanup_adapter(adapter);
394 394 }
395 395
396 396 static int
397 397 init_firmware(unm_adapter *adapter)
398 398 {
399 399 uint32_t state = 0, loops = 0, tempout;
400 400
401 401 /* Window 1 call */
402 402 UNM_READ_LOCK(&adapter->adapter_lock);
403 403 state = adapter->unm_nic_pci_read_normalize(adapter, CRB_CMDPEG_STATE);
404 404 UNM_READ_UNLOCK(&adapter->adapter_lock);
405 405
406 406 if (state == PHAN_INITIALIZE_ACK)
407 407 return (0);
408 408
409 409 while (state != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
410 410 drv_usecwait(100);
411 411 /* Window 1 call */
412 412 UNM_READ_LOCK(&adapter->adapter_lock);
413 413 state = adapter->unm_nic_pci_read_normalize(adapter,
414 414 CRB_CMDPEG_STATE);
415 415 UNM_READ_UNLOCK(&adapter->adapter_lock);
416 416 loops++;
417 417 }
418 418
419 419 if (loops >= 200000) {
420 420 cmn_err(CE_WARN, "%s%d: CmdPeg init incomplete:%x\n",
421 421 adapter->name, adapter->instance, state);
422 422 return (-EIO);
423 423 }
424 424
425 425 /* Window 1 call */
426 426 UNM_READ_LOCK(&adapter->adapter_lock);
427 427 tempout = INTR_SCHEME_PERPORT;
428 428 adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_CAPABILITIES_HOST,
429 429 &tempout, 4);
430 430 tempout = MSI_MODE_MULTIFUNC;
431 431 adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_MSI_MODE_HOST,
432 432 &tempout, 4);
433 433 tempout = MPORT_MULTI_FUNCTION_MODE;
434 434 adapter->unm_nic_hw_write_wx(adapter, CRB_MPORT_MODE, &tempout, 4);
435 435 tempout = PHAN_INITIALIZE_ACK;
436 436 adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, &tempout, 4);
437 437 UNM_READ_UNLOCK(&adapter->adapter_lock);
438 438
439 439 return (0);
440 440 }
441 441
442 442 /*
443 443 * Utility to synchronize with receive peg.
444 444 * Returns 0 on sucess
445 445 * -EIO on error
446 446 */
447 447 int
448 448 receive_peg_ready(struct unm_adapter_s *adapter)
449 449 {
450 450 uint32_t state = 0;
451 451 int loops = 0, err = 0;
452 452
453 453 /* Window 1 call */
454 454 UNM_READ_LOCK(&adapter->adapter_lock);
455 455 state = adapter->unm_nic_pci_read_normalize(adapter, CRB_RCVPEG_STATE);
456 456 UNM_READ_UNLOCK(&adapter->adapter_lock);
457 457
458 458 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 20000)) {
459 459 drv_usecwait(100);
460 460 /* Window 1 call */
461 461
462 462 UNM_READ_LOCK(&adapter->adapter_lock);
463 463 state = adapter->unm_nic_pci_read_normalize(adapter,
464 464 CRB_RCVPEG_STATE);
465 465 UNM_READ_UNLOCK(&adapter->adapter_lock);
466 466
467 467 loops++;
468 468 }
469 469
470 470 if (loops >= 20000) {
471 471 cmn_err(CE_WARN, "Receive Peg initialization incomplete 0x%x\n",
472 472 state);
473 473 err = -EIO;
474 474 }
475 475
476 476 return (err);
477 477 }
478 478
479 479 /*
480 480 * check if the firmware has been downloaded and ready to run and
481 481 * setup the address for the descriptors in the adapter
482 482 */
483 483 static int
484 484 unm_nic_hw_resources(unm_adapter *adapter)
485 485 {
486 486 hardware_context *hw = &adapter->ahw;
487 487 void *addr;
488 488 int err;
489 489 int ctx, ring;
490 490 unm_recv_context_t *recv_ctx;
491 491 unm_rcv_desc_ctx_t *rcv_desc;
492 492 ddi_dma_cookie_t cookie;
493 493 int size;
494 494
495 495 if (err = receive_peg_ready(adapter))
496 496 return (err);
497 497
498 498 size = (sizeof (RingContext) + sizeof (uint32_t));
499 499
500 500 err = unm_pci_alloc_consistent(adapter,
501 501 size, (caddr_t *)&addr, &cookie,
502 502 &adapter->ctxDesc_dma_handle,
503 503 &adapter->ctxDesc_acc_handle);
504 504 if (err != DDI_SUCCESS) {
505 505 cmn_err(CE_WARN, "Failed to allocate HW context\n");
506 506 return (err);
507 507 }
508 508
509 509 adapter->ctxDesc_physAddr = cookie.dmac_laddress;
510 510
511 511 (void) memset(addr, 0, sizeof (RingContext));
512 512
513 513 adapter->ctxDesc = (RingContext *) addr;
514 514 adapter->ctxDesc->CtxId = adapter->portnum;
515 515 adapter->ctxDesc->CMD_CONSUMER_OFFSET =
516 516 adapter->ctxDesc_physAddr + sizeof (RingContext);
517 517 adapter->cmdConsumer =
518 518 (uint32_t *)(uintptr_t)(((char *)addr) + sizeof (RingContext));
519 519
520 520 ASSERT(!((unsigned long)adapter->ctxDesc_physAddr & 0x3f));
521 521
522 522 /*
523 523 * Allocate command descriptor ring.
524 524 */
525 525 size = (sizeof (cmdDescType0_t) * adapter->MaxTxDescCount);
526 526 err = unm_pci_alloc_consistent(adapter,
527 527 size, (caddr_t *)&addr, &cookie,
528 528 &hw->cmd_desc_dma_handle,
529 529 &hw->cmd_desc_acc_handle);
530 530 if (err != DDI_SUCCESS) {
531 531 cmn_err(CE_WARN, "Failed to allocate cmd desc ring\n");
532 532 return (err);
533 533 }
534 534
535 535 hw->cmdDesc_physAddr = cookie.dmac_laddress;
536 536 hw->cmdDescHead = (cmdDescType0_t *)addr;
537 537
538 538 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
539 539 recv_ctx = &adapter->recv_ctx[ctx];
540 540
541 541 size = (sizeof (statusDesc_t)* adapter->MaxRxDescCount);
542 542 err = unm_pci_alloc_consistent(adapter,
543 543 size, (caddr_t *)&addr,
544 544 &recv_ctx->status_desc_dma_cookie,
545 545 &recv_ctx->status_desc_dma_handle,
546 546 &recv_ctx->status_desc_acc_handle);
547 547 if (err != DDI_SUCCESS) {
548 548 cmn_err(CE_WARN, "Failed to allocate sts desc ring\n");
549 549 goto free_cmd_desc;
550 550 }
551 551
552 552 (void) memset(addr, 0, size);
553 553 recv_ctx->rcvStatusDesc_physAddr =
554 554 recv_ctx->status_desc_dma_cookie.dmac_laddress;
555 555 recv_ctx->rcvStatusDescHead = (statusDesc_t *)addr;
556 556
557 557 /* rds rings */
558 558 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
559 559 rcv_desc = &recv_ctx->rcv_desc[ring];
560 560
561 561 size = (sizeof (rcvDesc_t) * adapter->MaxRxDescCount);
562 562 err = unm_pci_alloc_consistent(adapter,
563 563 size, (caddr_t *)&addr,
564 564 &rcv_desc->rx_desc_dma_cookie,
565 565 &rcv_desc->rx_desc_dma_handle,
566 566 &rcv_desc->rx_desc_acc_handle);
567 567 if (err != DDI_SUCCESS) {
568 568 cmn_err(CE_WARN, "Failed to allocate "
569 569 "rx desc ring %d\n", ring);
570 570 goto free_status_desc;
571 571 }
572 572
573 573 rcv_desc->phys_addr =
574 574 rcv_desc->rx_desc_dma_cookie.dmac_laddress;
575 575 rcv_desc->desc_head = (rcvDesc_t *)addr;
576 576 }
577 577 }
578 578
579 579 if (err = netxen_create_rxtx(adapter))
580 580 goto free_statusrx_desc;
581 581 adapter->context_alloced = 1;
582 582
583 583 return (DDI_SUCCESS);
584 584
585 585 free_statusrx_desc:
586 586 free_status_desc:
587 587 free_cmd_desc:
588 588 unm_free_hw_resources(adapter);
589 589
590 590 return (err);
591 591 }
592 592
593 593 void unm_desc_dma_sync(ddi_dma_handle_t handle, uint_t start, uint_t count,
594 594 uint_t range, uint_t unit_size, uint_t direction)
595 595 {
596 596 if ((start + count) < range) {
597 597 (void) ddi_dma_sync(handle, start * unit_size,
598 598 count * unit_size, direction);
599 599 } else {
600 600 (void) ddi_dma_sync(handle, start * unit_size, 0, direction);
601 601 (void) ddi_dma_sync(handle, 0,
602 602 (start + count - range) * unit_size, DDI_DMA_SYNC_FORCPU);
603 603 }
604 604 }
605 605
606 606 static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET,
607 607 CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2,
608 608 CRB_CMD_PRODUCER_OFFSET_3 };
609 609
610 610 static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET,
611 611 CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2,
612 612 CRB_CMD_CONSUMER_OFFSET_3 };
613 613
614 614 void
615 615 unm_nic_update_cmd_producer(struct unm_adapter_s *adapter,
616 616 uint32_t crb_producer)
617 617 {
618 618 int data = crb_producer;
619 619
620 620 if (adapter->crb_addr_cmd_producer) {
621 621 UNM_READ_LOCK(&adapter->adapter_lock);
622 622 adapter->unm_nic_hw_write_wx(adapter,
623 623 adapter->crb_addr_cmd_producer, &data, 4);
624 624 UNM_READ_UNLOCK(&adapter->adapter_lock);
625 625 }
626 626 }
627 627
628 628 static void
629 629 unm_nic_update_cmd_consumer(struct unm_adapter_s *adapter,
630 630 uint32_t crb_producer)
631 631 {
632 632 int data = crb_producer;
633 633
634 634 if (adapter->crb_addr_cmd_consumer)
635 635 adapter->unm_nic_hw_write_wx(adapter,
636 636 adapter->crb_addr_cmd_consumer, &data, 4);
637 637 }
638 638
639 639 /*
640 640 * Looks for type of packet and sets opcode accordingly
641 641 * so that checksum offload can be used.
642 642 */
643 643 static void
644 644 unm_tx_csum(cmdDescType0_t *desc, mblk_t *mp, pktinfo_t *pktinfo)
645 645 {
646 646 if (pktinfo->mac_hlen == sizeof (struct ether_vlan_header))
647 647 desc->u1.s1.flags = FLAGS_VLAN_TAGGED;
648 648
649 649 if (pktinfo->etype == htons(ETHERTYPE_IP)) {
650 650 uint32_t start, flags;
651 651
652 652 mac_hcksum_get(mp, &start, NULL, NULL, NULL, &flags);
653 653 if ((flags & (HCK_FULLCKSUM | HCK_IPV4_HDRCKSUM)) == 0)
654 654 return;
655 655
656 656 /*
657 657 * For TCP/UDP, ask hardware to do both IP header and
658 658 * full checksum, even if stack has already done one or
659 659 * the other. Hardware will always get it correct even
660 660 * if stack has already done it.
661 661 */
662 662 switch (pktinfo->l4_proto) {
663 663 case IPPROTO_TCP:
664 664 desc->u1.s1.opcode = TX_TCP_PKT;
665 665 break;
666 666 case IPPROTO_UDP:
667 667 desc->u1.s1.opcode = TX_UDP_PKT;
668 668 break;
669 669 default:
670 670 /* Must be here with HCK_IPV4_HDRCKSUM */
671 671 desc->u1.s1.opcode = TX_IP_PKT;
672 672 return;
673 673 }
674 674
675 675 desc->u1.s1.ipHdrOffset = pktinfo->mac_hlen;
676 676 desc->u1.s1.tcpHdrOffset = pktinfo->mac_hlen + pktinfo->ip_hlen;
677 677 }
678 678 }
679 679
680 680 /*
681 681 * For IP/UDP/TCP checksum offload, this checks for MAC+IP header in one
682 682 * contiguous block ending at 8 byte aligned address as required by hardware.
683 683 * Caller assumes pktinfo->total_len will be updated by this function and
684 684 * if pktinfo->etype is set to 0, it will need to linearize the mblk and
685 685 * invoke unm_update_pkt_info() to determine ethertype, IP header len and
686 686 * protocol.
687 687 */
688 688 static boolean_t
689 689 unm_get_pkt_info(mblk_t *mp, pktinfo_t *pktinfo)
690 690 {
691 691 mblk_t *bp;
692 692 ushort_t type;
693 693
694 694 (void) memset(pktinfo, 0, sizeof (pktinfo_t));
695 695
696 696 for (bp = mp; bp != NULL; bp = bp->b_cont) {
697 697 if (MBLKL(bp) == 0)
698 698 continue;
699 699 pktinfo->mblk_no++;
700 700 pktinfo->total_len += MBLKL(bp);
701 701 }
702 702
703 703 if (MBLKL(mp) < (sizeof (struct ether_header) + sizeof (ipha_t)))
704 704 return (B_FALSE);
705 705
706 706 /*
707 707 * We just need non 1 byte aligned address, since ether_type is
708 708 * ushort.
709 709 */
710 710 if ((uintptr_t)mp->b_rptr & 1)
711 711 return (B_FALSE);
712 712
713 713 type = ((struct ether_header *)(uintptr_t)mp->b_rptr)->ether_type;
714 714 if (type == htons(ETHERTYPE_VLAN)) {
715 715 if (MBLKL(mp) < (sizeof (struct ether_vlan_header) +
716 716 sizeof (ipha_t)))
717 717 return (B_FALSE);
718 718 type = ((struct ether_vlan_header *) \
719 719 (uintptr_t)mp->b_rptr)->ether_type;
720 720 pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
721 721 } else {
722 722 pktinfo->mac_hlen = sizeof (struct ether_header);
723 723 }
724 724 pktinfo->etype = type;
725 725
726 726 if (pktinfo->etype == htons(ETHERTYPE_IP)) {
727 727 uchar_t *ip_off = mp->b_rptr + pktinfo->mac_hlen;
728 728
729 729 pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ip_off);
730 730 pktinfo->l4_proto =
731 731 ((ipha_t *)(uintptr_t)ip_off)->ipha_protocol;
732 732
733 733 /* IP header not aligned to quadward boundary? */
734 734 if ((unsigned long)(ip_off + pktinfo->ip_hlen) % 8 != 0)
735 735 return (B_FALSE);
736 736 }
737 737
738 738 return (B_TRUE);
739 739 }
740 740
741 741 static void
742 742 unm_update_pkt_info(char *ptr, pktinfo_t *pktinfo)
743 743 {
744 744 ushort_t type;
745 745
746 746 type = ((struct ether_header *)(uintptr_t)ptr)->ether_type;
747 747 if (type == htons(ETHERTYPE_VLAN)) {
748 748 type = ((struct ether_vlan_header *)(uintptr_t)ptr)->ether_type;
749 749 pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
750 750 } else {
751 751 pktinfo->mac_hlen = sizeof (struct ether_header);
752 752 }
753 753 pktinfo->etype = type;
754 754
755 755 if (pktinfo->etype == htons(ETHERTYPE_IP)) {
756 756 char *ipp = ptr + pktinfo->mac_hlen;
757 757
758 758 pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ipp);
759 759 pktinfo->l4_proto = ((ipha_t *)(uintptr_t)ipp)->ipha_protocol;
760 760 }
761 761 }
762 762
763 763 static boolean_t
764 764 unm_send_copy(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
765 765 {
766 766 hardware_context *hw;
767 767 u32 producer = 0;
768 768 cmdDescType0_t *hwdesc;
769 769 struct unm_cmd_buffer *pbuf = NULL;
770 770 u32 mblen;
771 771 int no_of_desc = 1;
772 772 int MaxTxDescCount;
773 773 mblk_t *bp;
774 774 char *txb;
775 775
776 776 hw = &adapter->ahw;
777 777 MaxTxDescCount = adapter->MaxTxDescCount;
778 778
779 779 UNM_SPIN_LOCK(&adapter->tx_lock);
780 780 membar_enter();
781 781
782 782 if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
783 783 MaxTxDescCount) <= 2) {
784 784 adapter->stats.outofcmddesc++;
785 785 adapter->resched_needed = 1;
786 786 membar_exit();
787 787 UNM_SPIN_UNLOCK(&adapter->tx_lock);
788 788 return (B_FALSE);
789 789 }
790 790 adapter->freecmds -= no_of_desc;
791 791
792 792 producer = adapter->cmdProducer;
793 793
794 794 adapter->cmdProducer = get_index_range(adapter->cmdProducer,
795 795 MaxTxDescCount, no_of_desc);
796 796
797 797 hwdesc = &hw->cmdDescHead[producer];
798 798 (void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
799 799 pbuf = &adapter->cmd_buf_arr[producer];
800 800
801 801 pbuf->msg = NULL;
802 802 pbuf->head = NULL;
803 803 pbuf->tail = NULL;
804 804
805 805 txb = pbuf->dma_area.vaddr;
806 806
807 807 for (bp = mp; bp != NULL; bp = bp->b_cont) {
808 808 if ((mblen = MBLKL(bp)) == 0)
809 809 continue;
810 810 bcopy(bp->b_rptr, txb, mblen);
811 811 txb += mblen;
812 812 }
813 813
814 814 /*
815 815 * Determine metadata if not previously done due to fragmented mblk.
816 816 */
817 817 if (pktinfo->etype == 0)
818 818 unm_update_pkt_info(pbuf->dma_area.vaddr, pktinfo);
819 819
820 820 (void) ddi_dma_sync(pbuf->dma_area.dma_hdl,
821 821 0, pktinfo->total_len, DDI_DMA_SYNC_FORDEV);
822 822
823 823 /* hwdesc->u1.s1.tcpHdrOffset = 0; */
824 824 /* hwdesc->mss = 0; */
825 825 hwdesc->u1.s1.opcode = TX_ETHER_PKT;
826 826 hwdesc->u3.s1.port = adapter->portnum;
827 827 hwdesc->u3.s1.ctx_id = adapter->portnum;
828 828
829 829 hwdesc->u6.s1.buffer1Length = pktinfo->total_len;
830 830 hwdesc->u5.AddrBuffer1 = pbuf->dma_area.dma_addr;
831 831 hwdesc->u1.s1.numOfBuffers = 1;
832 832 hwdesc->u1.s1.totalLength = pktinfo->total_len;
833 833
834 834 unm_tx_csum(hwdesc, mp, pktinfo);
835 835
836 836 unm_desc_dma_sync(hw->cmd_desc_dma_handle,
837 837 producer,
838 838 no_of_desc,
839 839 MaxTxDescCount,
840 840 sizeof (cmdDescType0_t),
841 841 DDI_DMA_SYNC_FORDEV);
842 842
843 843 hw->cmdProducer = adapter->cmdProducer;
844 844 unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
845 845
846 846 adapter->stats.txbytes += pktinfo->total_len;
847 847 adapter->stats.xmitfinished++;
848 848 adapter->stats.txcopyed++;
849 849 UNM_SPIN_UNLOCK(&adapter->tx_lock);
850 850
851 851 freemsg(mp);
852 852 return (B_TRUE);
853 853 }
854 854
855 855 /* Should be called with adapter->tx_lock held. */
856 856 static void
857 857 unm_return_dma_handle(unm_adapter *adapter, unm_dmah_node_t *head,
858 858 unm_dmah_node_t *tail, uint32_t num)
859 859 {
860 860 ASSERT(tail != NULL);
861 861 tail->next = adapter->dmahdl_pool;
862 862 adapter->dmahdl_pool = head;
863 863 adapter->freehdls += num;
864 864 }
865 865
866 866 static unm_dmah_node_t *
867 867 unm_reserve_dma_handle(unm_adapter* adapter)
868 868 {
869 869 unm_dmah_node_t *dmah = NULL;
870 870
871 871 dmah = adapter->dmahdl_pool;
872 872 if (dmah != NULL) {
873 873 adapter->dmahdl_pool = dmah->next;
874 874 dmah->next = NULL;
875 875 adapter->freehdls--;
876 876 membar_exit();
877 877 }
878 878
879 879 return (dmah);
880 880 }
881 881
882 882 static boolean_t
883 883 unm_send_mapped(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
884 884 {
885 885 hardware_context *hw;
886 886 u32 producer = 0;
887 887 u32 saved_producer = 0;
888 888 cmdDescType0_t *hwdesc;
889 889 struct unm_cmd_buffer *pbuf = NULL;
890 890 int no_of_desc;
891 891 int k;
892 892 int MaxTxDescCount;
893 893 mblk_t *bp;
894 894
895 895 unm_dmah_node_t *dmah, *head = NULL, *tail = NULL, *hdlp;
896 896 ddi_dma_cookie_t cookie[MAX_COOKIES_PER_CMD + 1];
897 897 int ret, i;
898 898 uint32_t hdl_reserved = 0;
899 899 uint32_t mblen;
900 900 uint32_t ncookies, index = 0, total_cookies = 0;
901 901
902 902 MaxTxDescCount = adapter->MaxTxDescCount;
903 903
904 904 UNM_SPIN_LOCK(&adapter->tx_lock);
905 905
906 906 /* bind all the mblks of the packet first */
907 907 for (bp = mp; bp != NULL; bp = bp->b_cont) {
908 908 mblen = MBLKL(bp);
909 909 if (mblen == 0)
910 910 continue;
911 911
912 912 dmah = unm_reserve_dma_handle(adapter);
913 913 if (dmah == NULL) {
914 914 adapter->stats.outoftxdmahdl++;
915 915 goto err_map;
916 916 }
917 917
918 918 ret = ddi_dma_addr_bind_handle(dmah->dmahdl,
919 919 NULL, (caddr_t)bp->b_rptr, mblen,
920 920 DDI_DMA_STREAMING | DDI_DMA_WRITE,
921 921 DDI_DMA_DONTWAIT, NULL, &cookie[index], &ncookies);
922 922
923 923 if (ret != DDI_DMA_MAPPED)
924 924 goto err_map;
925 925
926 926 if (tail == NULL) {
927 927 head = tail = dmah;
928 928 } else {
929 929 tail->next = dmah;
930 930 tail = dmah;
931 931 }
932 932 hdl_reserved++;
933 933
934 934 total_cookies += ncookies;
935 935 if (total_cookies > MAX_COOKIES_PER_CMD) {
936 936 dmah = NULL;
937 937 goto err_map;
938 938 }
939 939
940 940 if (index == 0) {
941 941 size_t hsize = cookie[0].dmac_size;
942 942
943 943 /*
944 944 * For TCP/UDP packets with checksum offload,
945 945 * MAC/IP headers need to be contiguous. Otherwise,
946 946 * there must be at least 16 bytes in the first
947 947 * descriptor.
948 948 */
949 949 if ((pktinfo->l4_proto == IPPROTO_TCP) ||
950 950 (pktinfo->l4_proto == IPPROTO_UDP)) {
951 951 if (hsize < (pktinfo->mac_hlen +
952 952 pktinfo->ip_hlen)) {
953 953 dmah = NULL;
954 954 goto err_map;
955 955 }
956 956 } else {
957 957 if (hsize < 16) {
958 958 dmah = NULL;
959 959 goto err_map;
960 960 }
961 961 }
962 962 }
963 963
964 964 index++;
965 965 ncookies--;
966 966 for (i = 0; i < ncookies; i++, index++)
967 967 ddi_dma_nextcookie(dmah->dmahdl, &cookie[index]);
968 968 }
969 969
970 970 dmah = NULL;
971 971 hw = &adapter->ahw;
972 972 no_of_desc = (total_cookies + 3) >> 2;
973 973
974 974 membar_enter();
975 975 if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
976 976 MaxTxDescCount) < no_of_desc+2) {
977 977 /*
978 978 * If we are going to be trying the copy path, no point
979 979 * scheduling an upcall when Tx resources are freed.
980 980 */
981 981 if (pktinfo->total_len > adapter->maxmtu) {
982 982 adapter->stats.outofcmddesc++;
983 983 adapter->resched_needed = 1;
984 984 }
985 985 membar_exit();
986 986 goto err_alloc_desc;
987 987 }
988 988 adapter->freecmds -= no_of_desc;
989 989
990 990 /* Copy the descriptors into the hardware */
991 991 producer = adapter->cmdProducer;
992 992 saved_producer = producer;
993 993 hwdesc = &hw->cmdDescHead[producer];
994 994 (void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
995 995 pbuf = &adapter->cmd_buf_arr[producer];
996 996
997 997 pbuf->msg = mp;
998 998 pbuf->head = head;
999 999 pbuf->tail = tail;
1000 1000
1001 1001 hwdesc->u1.s1.numOfBuffers = total_cookies;
1002 1002 hwdesc->u1.s1.opcode = TX_ETHER_PKT;
1003 1003 hwdesc->u3.s1.port = adapter->portnum;
1004 1004 /* hwdesc->u1.s1.tcpHdrOffset = 0; */
1005 1005 /* hwdesc->mss = 0; */
1006 1006 hwdesc->u3.s1.ctx_id = adapter->portnum;
1007 1007 hwdesc->u1.s1.totalLength = pktinfo->total_len;
1008 1008 unm_tx_csum(hwdesc, mp, pktinfo);
1009 1009
1010 1010 for (i = k = 0; i < total_cookies; i++) {
1011 1011 if (k == 4) {
1012 1012 /* Move to the next descriptor */
1013 1013 k = 0;
1014 1014 producer = get_next_index(producer, MaxTxDescCount);
1015 1015 hwdesc = &hw->cmdDescHead[producer];
1016 1016 (void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1017 1017 }
1018 1018
1019 1019 switch (k) {
1020 1020 case 0:
1021 1021 hwdesc->u6.s1.buffer1Length = cookie[i].dmac_size;
1022 1022 hwdesc->u5.AddrBuffer1 = cookie[i].dmac_laddress;
1023 1023 break;
1024 1024 case 1:
1025 1025 hwdesc->u6.s1.buffer2Length = cookie[i].dmac_size;
1026 1026 hwdesc->u2.AddrBuffer2 = cookie[i].dmac_laddress;
1027 1027 break;
1028 1028 case 2:
1029 1029 hwdesc->u6.s1.buffer3Length = cookie[i].dmac_size;
1030 1030 hwdesc->u4.AddrBuffer3 = cookie[i].dmac_laddress;
1031 1031 break;
1032 1032 case 3:
1033 1033 hwdesc->u6.s1.buffer4Length = cookie[i].dmac_size;
1034 1034 hwdesc->u7.AddrBuffer4 = cookie[i].dmac_laddress;
1035 1035 break;
1036 1036 }
1037 1037 k++;
1038 1038 }
1039 1039
1040 1040 unm_desc_dma_sync(hw->cmd_desc_dma_handle, saved_producer, no_of_desc,
1041 1041 MaxTxDescCount, sizeof (cmdDescType0_t), DDI_DMA_SYNC_FORDEV);
1042 1042
1043 1043 adapter->cmdProducer = get_next_index(producer, MaxTxDescCount);
1044 1044 hw->cmdProducer = adapter->cmdProducer;
1045 1045 unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
1046 1046
1047 1047 adapter->stats.txbytes += pktinfo->total_len;
1048 1048 adapter->stats.xmitfinished++;
1049 1049 adapter->stats.txmapped++;
1050 1050 UNM_SPIN_UNLOCK(&adapter->tx_lock);
1051 1051 return (B_TRUE);
1052 1052
1053 1053 err_alloc_desc:
1054 1054 err_map:
1055 1055
1056 1056 hdlp = head;
1057 1057 while (hdlp != NULL) {
1058 1058 (void) ddi_dma_unbind_handle(hdlp->dmahdl);
1059 1059 hdlp = hdlp->next;
1060 1060 }
1061 1061
1062 1062 /*
1063 1063 * add the reserved but bind failed one to the list to be returned
1064 1064 */
1065 1065 if (dmah != NULL) {
1066 1066 if (tail == NULL)
1067 1067 head = tail = dmah;
1068 1068 else {
1069 1069 tail->next = dmah;
1070 1070 tail = dmah;
1071 1071 }
1072 1072 hdl_reserved++;
1073 1073 }
1074 1074
1075 1075 if (head != NULL)
1076 1076 unm_return_dma_handle(adapter, head, tail, hdl_reserved);
1077 1077
1078 1078 UNM_SPIN_UNLOCK(&adapter->tx_lock);
1079 1079 return (B_FALSE);
1080 1080 }
1081 1081
1082 1082 static boolean_t
1083 1083 unm_nic_xmit_frame(unm_adapter *adapter, mblk_t *mp)
1084 1084 {
1085 1085 pktinfo_t pktinfo;
1086 1086 boolean_t status = B_FALSE, send_mapped;
1087 1087
1088 1088 adapter->stats.xmitcalled++;
1089 1089
1090 1090 send_mapped = unm_get_pkt_info(mp, &pktinfo);
1091 1091
1092 1092 if (pktinfo.total_len <= adapter->tx_bcopy_threshold ||
1093 1093 pktinfo.mblk_no >= MAX_COOKIES_PER_CMD)
1094 1094 send_mapped = B_FALSE;
1095 1095
1096 1096 if (send_mapped == B_TRUE)
1097 1097 status = unm_send_mapped(adapter, mp, &pktinfo);
1098 1098
1099 1099 if (status != B_TRUE) {
1100 1100 if (pktinfo.total_len <= adapter->maxmtu)
1101 1101 return (unm_send_copy(adapter, mp, &pktinfo));
1102 1102
1103 1103 /* message too large */
1104 1104 freemsg(mp);
1105 1105 adapter->stats.txdropped++;
1106 1106 status = B_TRUE;
1107 1107 }
1108 1108
1109 1109 return (status);
1110 1110 }
1111 1111
1112 1112 static int
1113 1113 unm_nic_check_temp(struct unm_adapter_s *adapter)
1114 1114 {
1115 1115 uint32_t temp, temp_state, temp_val;
1116 1116 int rv = 0;
1117 1117
1118 1118 if ((adapter->ahw.revision_id == NX_P3_A2) ||
1119 1119 (adapter->ahw.revision_id == NX_P3_A0))
1120 1120 return (0);
1121 1121
1122 1122 temp = adapter->unm_nic_pci_read_normalize(adapter, CRB_TEMP_STATE);
1123 1123
1124 1124 temp_state = nx_get_temp_state(temp);
1125 1125 temp_val = nx_get_temp_val(temp);
1126 1126
1127 1127 if (temp_state == NX_TEMP_PANIC) {
1128 1128 cmn_err(CE_WARN, "%s: Device temperature %d C exceeds "
1129 1129 "maximum allowed, device has been shut down\n",
1130 1130 unm_nic_driver_name, temp_val);
1131 1131 rv = 1;
1132 1132 } else if (temp_state == NX_TEMP_WARN) {
1133 1133 if (adapter->temp == NX_TEMP_NORMAL) {
1134 1134 cmn_err(CE_WARN, "%s: Device temperature %d C exceeds"
1135 1135 "operating range. Immediate action needed.\n",
1136 1136 unm_nic_driver_name, temp_val);
1137 1137 }
1138 1138 } else {
1139 1139 if (adapter->temp == NX_TEMP_WARN) {
1140 1140 cmn_err(CE_WARN, "%s: Device temperature is now %d "
1141 1141 "degrees C in normal range.\n",
1142 1142 unm_nic_driver_name, temp_val);
1143 1143 }
1144 1144 }
1145 1145
1146 1146 adapter->temp = temp_state;
1147 1147 return (rv);
1148 1148 }
1149 1149
1150 1150 static void
1151 1151 unm_watchdog(unsigned long v)
1152 1152 {
1153 1153 unm_adapter *adapter = (unm_adapter *)v;
1154 1154
1155 1155 if ((adapter->portnum == 0) && unm_nic_check_temp(adapter)) {
1156 1156 /*
1157 1157 * We return without turning on the netdev queue as there
1158 1158 * was an overheated device
↓ open down ↓ |
1158 lines elided |
↑ open up ↑ |
1159 1159 */
1160 1160 return;
1161 1161 }
1162 1162
1163 1163 unm_nic_handle_phy_intr(adapter);
1164 1164
1165 1165 /*
1166 1166 * This function schedules a call for itself.
1167 1167 */
1168 1168 adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1169 - (void *)adapter, 2 * drv_usectohz(1000000));
1169 + (void *)adapter, drv_sectohz(2));
1170 1170
1171 1171 }
1172 1172
1173 1173 static void unm_nic_clear_stats(unm_adapter *adapter)
1174 1174 {
1175 1175 (void) memset(&adapter->stats, 0, sizeof (adapter->stats));
1176 1176 }
1177 1177
1178 1178 static void
1179 1179 unm_nic_poll(unm_adapter *adapter)
1180 1180 {
1181 1181 int work_done, tx_complete;
1182 1182
1183 1183 adapter->stats.polled++;
1184 1184
1185 1185 loop:
1186 1186 tx_complete = unm_process_cmd_ring(adapter);
1187 1187 work_done = unm_process_rcv_ring(adapter, NX_RX_MAXBUFS);
1188 1188 if ((!tx_complete) || (!(work_done < NX_RX_MAXBUFS)))
1189 1189 goto loop;
1190 1190
1191 1191 UNM_READ_LOCK(&adapter->adapter_lock);
1192 1192 unm_nic_enable_int(adapter);
1193 1193 UNM_READ_UNLOCK(&adapter->adapter_lock);
1194 1194 }
1195 1195
1196 1196 /* ARGSUSED */
1197 1197 uint_t
1198 1198 unm_intr(caddr_t data, caddr_t arg)
1199 1199 {
1200 1200 unm_adapter *adapter = (unm_adapter *)(uintptr_t)data;
1201 1201
1202 1202 if (unm_nic_clear_int(adapter))
1203 1203 return (DDI_INTR_UNCLAIMED);
1204 1204
1205 1205 unm_nic_poll(adapter);
1206 1206 return (DDI_INTR_CLAIMED);
1207 1207 }
1208 1208
1209 1209 /*
1210 1210 * This is invoked from receive isr. Due to the single threaded nature
1211 1211 * of the invocation, pool_lock acquisition is not neccesary to protect
1212 1212 * pool_list.
1213 1213 */
1214 1214 static void
1215 1215 unm_free_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc, unm_rx_buffer_t *rx_buffer)
1216 1216 {
1217 1217 /* mutex_enter(rcv_desc->pool_lock); */
1218 1218 rx_buffer->next = rcv_desc->pool_list;
1219 1219 rcv_desc->pool_list = rx_buffer;
1220 1220 rcv_desc->rx_buf_free++;
1221 1221 /* mutex_exit(rcv_desc->pool_lock); */
1222 1222 }
1223 1223
1224 1224 /*
1225 1225 * unm_process_rcv() send the received packet to the protocol stack.
1226 1226 */
1227 1227 static mblk_t *
1228 1228 unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc)
1229 1229 {
1230 1230 unm_recv_context_t *recv_ctx = &(adapter->recv_ctx[0]);
1231 1231 unm_rx_buffer_t *rx_buffer;
1232 1232 mblk_t *mp;
1233 1233 u32 desc_ctx = desc->u1.s1.type;
1234 1234 unm_rcv_desc_ctx_t *rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
1235 1235 u32 pkt_length = desc->u1.s1.totalLength;
1236 1236 int poff = desc->u1.s1.pkt_offset;
1237 1237 int index, cksum_flags, docopy;
1238 1238 int index_lo = desc->u1.s1.referenceHandle_lo;
1239 1239 char *vaddr;
1240 1240
1241 1241 index = ((desc->u1.s1.referenceHandle_hi << 4) | index_lo);
1242 1242
1243 1243 rx_buffer = index2rxbuf(rcv_desc, index);
1244 1244
1245 1245 if (rx_buffer == NULL) {
1246 1246 cmn_err(CE_WARN, "\r\nNULL rx_buffer idx=%d", index);
1247 1247 return (NULL);
1248 1248 }
1249 1249 vaddr = (char *)rx_buffer->dma_info.vaddr;
1250 1250 if (vaddr == NULL) {
1251 1251 cmn_err(CE_WARN, "\r\nNULL vaddr");
1252 1252 return (NULL);
1253 1253 }
1254 1254 rcv_desc->rx_desc_handled++;
1255 1255 rcv_desc->rx_buf_card--;
1256 1256
1257 1257 (void) ddi_dma_sync(rx_buffer->dma_info.dma_hdl, 0,
1258 1258 pkt_length + poff + (adapter->ahw.cut_through ? 0 :
1259 1259 IP_ALIGNMENT_BYTES), DDI_DMA_SYNC_FORCPU);
1260 1260
1261 1261 /*
1262 1262 * Copy packet into new allocated message buffer, if pkt_length
1263 1263 * is below copy threshold.
1264 1264 */
1265 1265 docopy = (pkt_length <= adapter->rx_bcopy_threshold) ? 1 : 0;
1266 1266
1267 1267 /*
1268 1268 * If card is running out of rx buffers, then attempt to allocate
1269 1269 * new mblk so we can feed this rx buffer back to card (we
1270 1270 * _could_ look at what's pending on free and recycle lists).
1271 1271 */
1272 1272 if (rcv_desc->rx_buf_card < NX_RX_THRESHOLD) {
1273 1273 docopy = 1;
1274 1274 adapter->stats.rxbufshort++;
1275 1275 }
1276 1276
1277 1277 if (docopy == 1) {
1278 1278 if ((mp = allocb(pkt_length + IP_ALIGNMENT_BYTES, 0)) == NULL) {
1279 1279 adapter->stats.allocbfailed++;
1280 1280 goto freebuf;
1281 1281 }
1282 1282
1283 1283 mp->b_rptr += IP_ALIGNMENT_BYTES;
1284 1284 vaddr += poff;
1285 1285 bcopy(vaddr, mp->b_rptr, pkt_length);
1286 1286 adapter->stats.rxcopyed++;
1287 1287 unm_free_rx_buffer(rcv_desc, rx_buffer);
1288 1288 } else {
1289 1289 mp = (mblk_t *)rx_buffer->mp;
1290 1290 if (mp == NULL) {
1291 1291 mp = desballoc(rx_buffer->dma_info.vaddr,
1292 1292 rcv_desc->dma_size, 0, &rx_buffer->rx_recycle);
1293 1293 if (mp == NULL) {
1294 1294 adapter->stats.desballocfailed++;
1295 1295 goto freebuf;
1296 1296 }
1297 1297 rx_buffer->mp = mp;
1298 1298 }
1299 1299 mp->b_rptr += poff;
1300 1300 adapter->stats.rxmapped++;
1301 1301 }
1302 1302
1303 1303 mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + pkt_length);
1304 1304
1305 1305 if (desc->u1.s1.status == STATUS_CKSUM_OK) {
1306 1306 adapter->stats.csummed++;
1307 1307 cksum_flags =
1308 1308 HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM_OK;
1309 1309 } else {
1310 1310 cksum_flags = 0;
1311 1311 }
1312 1312 mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
1313 1313
1314 1314 adapter->stats.no_rcv++;
1315 1315 adapter->stats.rxbytes += pkt_length;
1316 1316 adapter->stats.uphappy++;
1317 1317
1318 1318 return (mp);
1319 1319
1320 1320 freebuf:
1321 1321 unm_free_rx_buffer(rcv_desc, rx_buffer);
1322 1322 return (NULL);
1323 1323 }
1324 1324
1325 1325 /* Process Receive status ring */
1326 1326 static int
1327 1327 unm_process_rcv_ring(unm_adapter *adapter, int max)
1328 1328 {
1329 1329 unm_recv_context_t *recv_ctx = &(adapter->recv_ctx[0]);
1330 1330 statusDesc_t *desc_head = recv_ctx->rcvStatusDescHead;
1331 1331 statusDesc_t *desc = NULL;
1332 1332 uint32_t consumer, start;
1333 1333 int count = 0, ring;
1334 1334 mblk_t *mp;
1335 1335
1336 1336 start = consumer = recv_ctx->statusRxConsumer;
1337 1337
1338 1338 unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start, max,
1339 1339 adapter->MaxRxDescCount, sizeof (statusDesc_t),
1340 1340 DDI_DMA_SYNC_FORCPU);
1341 1341
1342 1342 while (count < max) {
1343 1343 desc = &desc_head[consumer];
1344 1344 if (!(desc->u1.s1.owner & STATUS_OWNER_HOST))
1345 1345 break;
1346 1346
1347 1347 mp = unm_process_rcv(adapter, desc);
1348 1348 desc->u1.s1.owner = STATUS_OWNER_PHANTOM;
1349 1349
1350 1350 consumer = (consumer + 1) % adapter->MaxRxDescCount;
1351 1351 count++;
1352 1352 if (mp != NULL)
1353 1353 mac_rx(adapter->mach, NULL, mp);
1354 1354 }
1355 1355
1356 1356 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1357 1357 if (recv_ctx->rcv_desc[ring].rx_desc_handled > 0)
1358 1358 unm_post_rx_buffers_nodb(adapter, ring);
1359 1359 }
1360 1360
1361 1361 if (count) {
1362 1362 unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start,
1363 1363 count, adapter->MaxRxDescCount, sizeof (statusDesc_t),
1364 1364 DDI_DMA_SYNC_FORDEV);
1365 1365
1366 1366 /* update the consumer index in phantom */
1367 1367 recv_ctx->statusRxConsumer = consumer;
1368 1368
1369 1369 UNM_READ_LOCK(&adapter->adapter_lock);
1370 1370 adapter->unm_nic_hw_write_wx(adapter,
1371 1371 recv_ctx->host_sds_consumer, &consumer, 4);
1372 1372 UNM_READ_UNLOCK(&adapter->adapter_lock);
1373 1373 }
1374 1374
1375 1375 return (count);
1376 1376 }
1377 1377
1378 1378 /* Process Command status ring */
1379 1379 static int
1380 1380 unm_process_cmd_ring(struct unm_adapter_s *adapter)
1381 1381 {
1382 1382 u32 last_consumer;
1383 1383 u32 consumer;
1384 1384 int count = 0;
1385 1385 struct unm_cmd_buffer *buffer;
1386 1386 int done;
1387 1387 unm_dmah_node_t *dmah, *head = NULL, *tail = NULL;
1388 1388 uint32_t free_hdls = 0;
1389 1389
1390 1390 (void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1391 1391 sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1392 1392
1393 1393 last_consumer = adapter->lastCmdConsumer;
1394 1394 consumer = *(adapter->cmdConsumer);
1395 1395
1396 1396 while (last_consumer != consumer) {
1397 1397 buffer = &adapter->cmd_buf_arr[last_consumer];
1398 1398 if (buffer->head != NULL) {
1399 1399 dmah = buffer->head;
1400 1400 while (dmah != NULL) {
1401 1401 (void) ddi_dma_unbind_handle(dmah->dmahdl);
1402 1402 dmah = dmah->next;
1403 1403 free_hdls++;
1404 1404 }
1405 1405
1406 1406 if (head == NULL) {
1407 1407 head = buffer->head;
1408 1408 tail = buffer->tail;
1409 1409 } else {
1410 1410 tail->next = buffer->head;
1411 1411 tail = buffer->tail;
1412 1412 }
1413 1413
1414 1414 buffer->head = NULL;
1415 1415 buffer->tail = NULL;
1416 1416
1417 1417 if (buffer->msg != NULL) {
1418 1418 freemsg(buffer->msg);
1419 1419 buffer->msg = NULL;
1420 1420 }
1421 1421 }
1422 1422
1423 1423 last_consumer = get_next_index(last_consumer,
1424 1424 adapter->MaxTxDescCount);
1425 1425 if (++count > NX_MAX_TXCOMPS)
1426 1426 break;
1427 1427 }
1428 1428
1429 1429 if (count) {
1430 1430 int doresched;
1431 1431
1432 1432 UNM_SPIN_LOCK(&adapter->tx_lock);
1433 1433 adapter->lastCmdConsumer = last_consumer;
1434 1434 adapter->freecmds += count;
1435 1435 membar_exit();
1436 1436
1437 1437 doresched = adapter->resched_needed;
1438 1438 if (doresched)
1439 1439 adapter->resched_needed = 0;
1440 1440
1441 1441 if (head != NULL)
1442 1442 unm_return_dma_handle(adapter, head, tail, free_hdls);
1443 1443
1444 1444 UNM_SPIN_UNLOCK(&adapter->tx_lock);
1445 1445
1446 1446 if (doresched)
1447 1447 mac_tx_update(adapter->mach);
1448 1448 }
1449 1449
1450 1450 (void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1451 1451 sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1452 1452
1453 1453 consumer = *(adapter->cmdConsumer);
1454 1454 done = (adapter->lastCmdConsumer == consumer);
1455 1455
1456 1456 return (done);
1457 1457 }
1458 1458
1459 1459 /*
1460 1460 * This is invoked from receive isr, and at initialization time when no
1461 1461 * rx buffers have been posted to card. Due to the single threaded nature
1462 1462 * of the invocation, pool_lock acquisition is not neccesary to protect
1463 1463 * pool_list.
1464 1464 */
1465 1465 static unm_rx_buffer_t *
1466 1466 unm_reserve_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc)
1467 1467 {
1468 1468 unm_rx_buffer_t *rx_buffer = NULL;
1469 1469
1470 1470 /* mutex_enter(rcv_desc->pool_lock); */
1471 1471 if (rcv_desc->rx_buf_free) {
1472 1472 rx_buffer = rcv_desc->pool_list;
1473 1473 rcv_desc->pool_list = rx_buffer->next;
1474 1474 rx_buffer->next = NULL;
1475 1475 rcv_desc->rx_buf_free--;
1476 1476 } else {
1477 1477 mutex_enter(rcv_desc->recycle_lock);
1478 1478
1479 1479 if (rcv_desc->rx_buf_recycle) {
1480 1480 rcv_desc->pool_list = rcv_desc->recycle_list;
1481 1481 rcv_desc->recycle_list = NULL;
1482 1482 rcv_desc->rx_buf_free += rcv_desc->rx_buf_recycle;
1483 1483 rcv_desc->rx_buf_recycle = 0;
1484 1484
1485 1485 rx_buffer = rcv_desc->pool_list;
1486 1486 rcv_desc->pool_list = rx_buffer->next;
1487 1487 rx_buffer->next = NULL;
1488 1488 rcv_desc->rx_buf_free--;
1489 1489 }
1490 1490
1491 1491 mutex_exit(rcv_desc->recycle_lock);
1492 1492 }
1493 1493
1494 1494 /* mutex_exit(rcv_desc->pool_lock); */
1495 1495 return (rx_buffer);
1496 1496 }
1497 1497
1498 1498 static void
1499 1499 post_rx_doorbell(struct unm_adapter_s *adapter, uint32_t ringid, int count)
1500 1500 {
1501 1501 #define UNM_RCV_PEG_DB_ID 2
1502 1502 #define UNM_RCV_PRODUCER_OFFSET 0
1503 1503 ctx_msg msg = {0};
1504 1504
1505 1505 /*
1506 1506 * Write a doorbell msg to tell phanmon of change in
1507 1507 * receive ring producer
1508 1508 */
1509 1509 msg.PegId = UNM_RCV_PEG_DB_ID;
1510 1510 msg.privId = 1;
1511 1511 msg.Count = count;
1512 1512 msg.CtxId = adapter->portnum;
1513 1513 msg.Opcode = UNM_RCV_PRODUCER(ringid);
1514 1514 dbwritel(*((__uint32_t *)&msg),
1515 1515 (void *)(DB_NORMALIZE(adapter, UNM_RCV_PRODUCER_OFFSET)));
1516 1516 }
1517 1517
1518 1518 static int
1519 1519 unm_post_rx_buffers(struct unm_adapter_s *adapter, uint32_t ringid)
1520 1520 {
1521 1521 unm_recv_context_t *recv_ctx = &(adapter->recv_ctx[0]);
1522 1522 unm_rcv_desc_ctx_t *rcv_desc = &recv_ctx->rcv_desc[ringid];
1523 1523 unm_rx_buffer_t *rx_buffer;
1524 1524 rcvDesc_t *pdesc;
1525 1525 int count;
1526 1526
1527 1527 for (count = 0; count < rcv_desc->MaxRxDescCount; count++) {
1528 1528 rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1529 1529 if (rx_buffer != NULL) {
1530 1530 pdesc = &rcv_desc->desc_head[count];
1531 1531 pdesc->referenceHandle = rxbuf2index(rcv_desc,
1532 1532 rx_buffer);
1533 1533 pdesc->flags = ringid;
1534 1534 pdesc->bufferLength = rcv_desc->dma_size;
1535 1535 pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1536 1536 }
1537 1537 else
1538 1538 return (DDI_FAILURE);
1539 1539 }
1540 1540
1541 1541 rcv_desc->producer = count % rcv_desc->MaxRxDescCount;
1542 1542 count--;
1543 1543 unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle,
1544 1544 0, /* start */
1545 1545 count, /* count */
1546 1546 count, /* range */
1547 1547 sizeof (rcvDesc_t), /* unit_size */
1548 1548 DDI_DMA_SYNC_FORDEV); /* direction */
1549 1549
1550 1550 rcv_desc->rx_buf_card = rcv_desc->MaxRxDescCount;
1551 1551 UNM_READ_LOCK(&adapter->adapter_lock);
1552 1552 adapter->unm_nic_hw_write_wx(adapter, rcv_desc->host_rx_producer,
1553 1553 &count, 4);
1554 1554 if (adapter->fw_major < 4)
1555 1555 post_rx_doorbell(adapter, ringid, count);
1556 1556 UNM_READ_UNLOCK(&adapter->adapter_lock);
1557 1557
1558 1558 return (DDI_SUCCESS);
1559 1559 }
1560 1560
1561 1561 static void
1562 1562 unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
1563 1563 uint32_t ringid)
1564 1564 {
1565 1565 unm_recv_context_t *recv_ctx = &(adapter->recv_ctx[0]);
1566 1566 unm_rcv_desc_ctx_t *rcv_desc = &recv_ctx->rcv_desc[ringid];
1567 1567 struct unm_rx_buffer *rx_buffer;
1568 1568 rcvDesc_t *pdesc;
1569 1569 int count, producer = rcv_desc->producer;
1570 1570 int last_producer = producer;
1571 1571
1572 1572 for (count = 0; count < rcv_desc->rx_desc_handled; count++) {
1573 1573 rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1574 1574 if (rx_buffer != NULL) {
1575 1575 pdesc = &rcv_desc->desc_head[producer];
1576 1576 pdesc->referenceHandle = rxbuf2index(rcv_desc,
1577 1577 rx_buffer);
1578 1578 pdesc->flags = ringid;
1579 1579 pdesc->bufferLength = rcv_desc->dma_size;
1580 1580 pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1581 1581 } else {
1582 1582 adapter->stats.outofrxbuf++;
1583 1583 break;
1584 1584 }
1585 1585 producer = get_next_index(producer, rcv_desc->MaxRxDescCount);
1586 1586 }
1587 1587
1588 1588 /* if we did allocate buffers, then write the count to Phantom */
1589 1589 if (count) {
1590 1590 /* Sync rx ring, considering case for wrap around */
1591 1591 unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle, last_producer,
1592 1592 count, rcv_desc->MaxRxDescCount, sizeof (rcvDesc_t),
1593 1593 DDI_DMA_SYNC_FORDEV);
1594 1594
1595 1595 rcv_desc->producer = producer;
1596 1596 rcv_desc->rx_desc_handled -= count;
1597 1597 rcv_desc->rx_buf_card += count;
1598 1598
1599 1599 producer = (producer - 1) % rcv_desc->MaxRxDescCount;
1600 1600 UNM_READ_LOCK(&adapter->adapter_lock);
1601 1601 adapter->unm_nic_hw_write_wx(adapter,
1602 1602 rcv_desc->host_rx_producer, &producer, 4);
1603 1603 UNM_READ_UNLOCK(&adapter->adapter_lock);
1604 1604 }
1605 1605 }
1606 1606
1607 1607 int
1608 1608 unm_nic_fill_statistics_128M(struct unm_adapter_s *adapter,
1609 1609 struct unm_statistics *unm_stats)
1610 1610 {
1611 1611 void *addr;
1612 1612 if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1613 1613 UNM_WRITE_LOCK(&adapter->adapter_lock);
1614 1614 unm_nic_pci_change_crbwindow_128M(adapter, 0);
1615 1615
1616 1616 /* LINTED: E_FALSE_LOGICAL_EXPR */
1617 1617 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_BYTE_CNT,
1618 1618 &(unm_stats->tx_bytes));
1619 1619 /* LINTED: E_FALSE_LOGICAL_EXPR */
1620 1620 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_FRAME_CNT,
1621 1621 &(unm_stats->tx_packets));
1622 1622 /* LINTED: E_FALSE_LOGICAL_EXPR */
1623 1623 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_BYTE_CNT,
1624 1624 &(unm_stats->rx_bytes));
1625 1625 /* LINTED: E_FALSE_LOGICAL_EXPR */
1626 1626 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_FRAME_CNT,
1627 1627 &(unm_stats->rx_packets));
1628 1628 /* LINTED: E_FALSE_LOGICAL_EXPR */
1629 1629 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_AGGR_ERROR_CNT,
1630 1630 &(unm_stats->rx_errors));
1631 1631 /* LINTED: E_FALSE_LOGICAL_EXPR */
1632 1632 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_CRC_ERROR_CNT,
1633 1633 &(unm_stats->rx_CRC_errors));
1634 1634 /* LINTED: E_FALSE_LOGICAL_EXPR */
1635 1635 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1636 1636 &(unm_stats->rx_long_length_error));
1637 1637 /* LINTED: E_FALSE_LOGICAL_EXPR */
1638 1638 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1639 1639 &(unm_stats->rx_short_length_error));
1640 1640
1641 1641 /*
1642 1642 * For reading rx_MAC_error bit different procedure
1643 1643 * UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_TEST_MUX_CTL, 0x15);
1644 1644 * UNM_NIC_LOCKED_READ_REG((UNM_CRB_NIU + 0xC0), &temp);
1645 1645 * unm_stats->rx_MAC_errors = temp & 0xff;
1646 1646 */
1647 1647
1648 1648 unm_nic_pci_change_crbwindow_128M(adapter, 1);
1649 1649 UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1650 1650 } else {
1651 1651 UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1652 1652 unm_stats->tx_bytes = adapter->stats.txbytes;
1653 1653 unm_stats->tx_packets = adapter->stats.xmitedframes +
1654 1654 adapter->stats.xmitfinished;
1655 1655 unm_stats->rx_bytes = adapter->stats.rxbytes;
1656 1656 unm_stats->rx_packets = adapter->stats.no_rcv;
1657 1657 unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1658 1658 unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1659 1659 unm_stats->rx_short_length_error = adapter->stats.uplcong;
1660 1660 unm_stats->rx_long_length_error = adapter->stats.uphcong;
1661 1661 unm_stats->rx_CRC_errors = 0;
1662 1662 unm_stats->rx_MAC_errors = 0;
1663 1663 UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1664 1664 }
1665 1665 return (0);
1666 1666 }
1667 1667
1668 1668 int
1669 1669 unm_nic_fill_statistics_2M(struct unm_adapter_s *adapter,
1670 1670 struct unm_statistics *unm_stats)
1671 1671 {
1672 1672 if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1673 1673 (void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1674 1674 &(unm_stats->tx_bytes), 4);
1675 1675 (void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1676 1676 &(unm_stats->tx_packets), 4);
1677 1677 (void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1678 1678 &(unm_stats->rx_bytes), 4);
1679 1679 (void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1680 1680 &(unm_stats->rx_packets), 4);
1681 1681 (void) unm_nic_hw_read_wx_2M(adapter,
1682 1682 UNM_NIU_XGE_AGGR_ERROR_CNT, &(unm_stats->rx_errors), 4);
1683 1683 (void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1684 1684 &(unm_stats->rx_CRC_errors), 4);
1685 1685 (void) unm_nic_hw_read_wx_2M(adapter,
1686 1686 UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1687 1687 &(unm_stats->rx_long_length_error), 4);
1688 1688 (void) unm_nic_hw_read_wx_2M(adapter,
1689 1689 UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1690 1690 &(unm_stats->rx_short_length_error), 4);
1691 1691 } else {
1692 1692 UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1693 1693 unm_stats->tx_bytes = adapter->stats.txbytes;
1694 1694 unm_stats->tx_packets = adapter->stats.xmitedframes +
1695 1695 adapter->stats.xmitfinished;
1696 1696 unm_stats->rx_bytes = adapter->stats.rxbytes;
1697 1697 unm_stats->rx_packets = adapter->stats.no_rcv;
1698 1698 unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1699 1699 unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1700 1700 unm_stats->rx_short_length_error = adapter->stats.uplcong;
1701 1701 unm_stats->rx_long_length_error = adapter->stats.uphcong;
1702 1702 unm_stats->rx_CRC_errors = 0;
1703 1703 unm_stats->rx_MAC_errors = 0;
1704 1704 UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1705 1705 }
1706 1706 return (0);
1707 1707 }
1708 1708
1709 1709 int
1710 1710 unm_nic_clear_statistics_128M(struct unm_adapter_s *adapter)
1711 1711 {
1712 1712 void *addr;
1713 1713 int data = 0;
1714 1714
1715 1715 UNM_WRITE_LOCK(&adapter->adapter_lock);
1716 1716 unm_nic_pci_change_crbwindow_128M(adapter, 0);
1717 1717
1718 1718 /* LINTED: E_FALSE_LOGICAL_EXPR */
1719 1719 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_BYTE_CNT, &data);
1720 1720 /* LINTED: E_FALSE_LOGICAL_EXPR */
1721 1721 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_FRAME_CNT, &data);
1722 1722 /* LINTED: E_FALSE_LOGICAL_EXPR */
1723 1723 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_BYTE_CNT, &data);
1724 1724 /* LINTED: E_FALSE_LOGICAL_EXPR */
1725 1725 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_FRAME_CNT, &data);
1726 1726 /* LINTED: E_FALSE_LOGICAL_EXPR */
1727 1727 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_AGGR_ERROR_CNT, &data);
1728 1728 /* LINTED: E_FALSE_LOGICAL_EXPR */
1729 1729 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_CRC_ERROR_CNT, &data);
1730 1730 /* LINTED: E_FALSE_LOGICAL_EXPR */
1731 1731 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR, &data);
1732 1732 /* LINTED: E_FALSE_LOGICAL_EXPR */
1733 1733 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR, &data);
1734 1734
1735 1735 unm_nic_pci_change_crbwindow_128M(adapter, 1);
1736 1736 UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1737 1737 unm_nic_clear_stats(adapter);
1738 1738 return (0);
1739 1739 }
1740 1740
1741 1741 int
1742 1742 unm_nic_clear_statistics_2M(struct unm_adapter_s *adapter)
1743 1743 {
1744 1744 int data = 0;
1745 1745
1746 1746 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1747 1747 &data, 4);
1748 1748 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1749 1749 &data, 4);
1750 1750 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1751 1751 &data, 4);
1752 1752 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1753 1753 &data, 4);
1754 1754 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_AGGR_ERROR_CNT,
1755 1755 &data, 4);
1756 1756 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1757 1757 &data, 4);
1758 1758 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1759 1759 &data, 4);
1760 1760 (void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1761 1761 &data, 4);
1762 1762 unm_nic_clear_stats(adapter);
1763 1763 return (0);
1764 1764 }
1765 1765
1766 1766 /*
1767 1767 * unm_nic_ioctl () We provide the tcl/phanmon support
1768 1768 * through these ioctls.
1769 1769 */
1770 1770 static void
1771 1771 unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q, mblk_t *mp)
1772 1772 {
1773 1773 void *ptr;
1774 1774
1775 1775 switch (cmd) {
1776 1776 case UNM_NIC_CMD:
1777 1777 (void) unm_nic_do_ioctl(adapter, q, mp);
1778 1778 break;
1779 1779
1780 1780 case UNM_NIC_NAME:
1781 1781 ptr = (void *) mp->b_cont->b_rptr;
1782 1782
1783 1783 /*
1784 1784 * Phanmon checks for "UNM-UNM" string
1785 1785 * Replace the hardcoded value with appropriate macro
1786 1786 */
1787 1787 DPRINTF(-1, (CE_CONT, "UNM_NIC_NAME ioctl executed %d %d\n",
1788 1788 cmd, __LINE__));
1789 1789 (void) memcpy(ptr, "UNM-UNM", 10);
1790 1790 miocack(q, mp, 10, 0);
1791 1791 break;
1792 1792
1793 1793 default:
1794 1794 cmn_err(CE_WARN, "Netxen ioctl cmd %x not supported\n", cmd);
1795 1795
1796 1796 miocnak(q, mp, 0, EINVAL);
1797 1797 break;
1798 1798 }
1799 1799 }
1800 1800
1801 1801 int
1802 1802 unm_nic_resume(unm_adapter *adapter)
1803 1803 {
1804 1804
1805 1805 adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1806 1806 (void *) adapter, 50000);
1807 1807
1808 1808 if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1809 1809 (void) ddi_intr_block_enable(&adapter->intr_handle, 1);
1810 1810 else
1811 1811 (void) ddi_intr_enable(adapter->intr_handle);
1812 1812 UNM_READ_LOCK(&adapter->adapter_lock);
1813 1813 unm_nic_enable_int(adapter);
1814 1814 UNM_READ_UNLOCK(&adapter->adapter_lock);
1815 1815
1816 1816 mac_link_update(adapter->mach, LINK_STATE_UP);
1817 1817
1818 1818 return (DDI_SUCCESS);
1819 1819 }
1820 1820
1821 1821 int
1822 1822 unm_nic_suspend(unm_adapter *adapter)
1823 1823 {
1824 1824 mac_link_update(adapter->mach, LINK_STATE_DOWN);
1825 1825
1826 1826 (void) untimeout(adapter->watchdog_timer);
1827 1827
1828 1828 UNM_READ_LOCK(&adapter->adapter_lock);
1829 1829 unm_nic_disable_int(adapter);
1830 1830 UNM_READ_UNLOCK(&adapter->adapter_lock);
1831 1831 if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1832 1832 (void) ddi_intr_block_disable(&adapter->intr_handle, 1);
1833 1833 else
1834 1834 (void) ddi_intr_disable(adapter->intr_handle);
1835 1835
1836 1836 return (DDI_SUCCESS);
1837 1837 }
1838 1838
1839 1839 static int
1840 1840 unm_nic_do_ioctl(unm_adapter *adapter, queue_t *wq, mblk_t *mp)
1841 1841 {
1842 1842 unm_nic_ioctl_data_t data;
1843 1843 struct unm_nic_ioctl_data *up_data;
1844 1844 ddi_acc_handle_t conf_handle;
1845 1845 int retval = 0;
1846 1846 uint64_t efuse_chip_id = 0;
1847 1847 char *ptr1;
1848 1848 short *ptr2;
1849 1849 int *ptr4;
1850 1850
1851 1851 up_data = (struct unm_nic_ioctl_data *)(mp->b_cont->b_rptr);
1852 1852 (void) memcpy(&data, (void **)(uintptr_t)(mp->b_cont->b_rptr),
1853 1853 sizeof (data));
1854 1854
1855 1855 /* Shouldn't access beyond legal limits of "char u[64];" member */
1856 1856 if (data.size > sizeof (data.uabc)) {
1857 1857 /* evil user tried to crash the kernel */
1858 1858 cmn_err(CE_WARN, "bad size: %d\n", data.size);
1859 1859 retval = GLD_BADARG;
1860 1860 goto error_out;
1861 1861 }
1862 1862
1863 1863 switch (data.cmd) {
1864 1864 case unm_nic_cmd_pci_read:
1865 1865
1866 1866 if ((retval = adapter->unm_nic_hw_read_ioctl(adapter,
1867 1867 data.off, up_data, data.size))) {
1868 1868 DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_read_wx "
1869 1869 "returned %d\n", __FUNCTION__, __LINE__, retval));
1870 1870
1871 1871 retval = data.rv;
1872 1872 goto error_out;
1873 1873 }
1874 1874
1875 1875 data.rv = 0;
1876 1876 break;
1877 1877
1878 1878 case unm_nic_cmd_pci_write:
1879 1879 if ((data.rv = adapter->unm_nic_hw_write_ioctl(adapter,
1880 1880 data.off, &(data.uabc), data.size))) {
1881 1881 DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_write_wx "
1882 1882 "returned %d\n", __FUNCTION__,
1883 1883 __LINE__, data.rv));
1884 1884 retval = data.rv;
1885 1885 goto error_out;
1886 1886 }
1887 1887 data.size = 0;
1888 1888 break;
1889 1889
1890 1890 case unm_nic_cmd_pci_mem_read:
1891 1891 if ((data.rv = adapter->unm_nic_pci_mem_read(adapter,
1892 1892 data.off, up_data, data.size))) {
1893 1893 DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_pci_mem_read "
1894 1894 "returned %d\n", __FUNCTION__,
1895 1895 __LINE__, data.rv));
1896 1896 retval = data.rv;
1897 1897 goto error_out;
1898 1898 }
1899 1899 data.rv = 0;
1900 1900 break;
1901 1901
1902 1902 case unm_nic_cmd_pci_mem_write:
1903 1903 if ((data.rv = adapter->unm_nic_pci_mem_write(adapter,
1904 1904 data.off, &(data.uabc), data.size))) {
1905 1905 DPRINTF(-1, (CE_WARN,
1906 1906 "%s(%d) unm_nic_cmd_pci_mem_write "
1907 1907 "returned %d\n",
1908 1908 __FUNCTION__, __LINE__, data.rv));
1909 1909 retval = data.rv;
1910 1910 goto error_out;
1911 1911 }
1912 1912
1913 1913 data.size = 0;
1914 1914 data.rv = 0;
1915 1915 break;
1916 1916
1917 1917 case unm_nic_cmd_pci_config_read:
1918 1918
1919 1919 if (adapter->pci_cfg_handle != NULL) {
1920 1920 conf_handle = adapter->pci_cfg_handle;
1921 1921
1922 1922 } else if ((retval = pci_config_setup(adapter->dip,
1923 1923 &conf_handle)) != DDI_SUCCESS) {
1924 1924 DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1925 1925 " error:%d\n", unm_nic_driver_name, retval));
1926 1926 goto error_out;
1927 1927
1928 1928 } else
1929 1929 adapter->pci_cfg_handle = conf_handle;
1930 1930
1931 1931 switch (data.size) {
1932 1932 case 1:
1933 1933 ptr1 = (char *)up_data;
1934 1934 *ptr1 = (char)pci_config_get8(conf_handle, data.off);
1935 1935 break;
1936 1936 case 2:
1937 1937 ptr2 = (short *)up_data;
1938 1938 *ptr2 = (short)pci_config_get16(conf_handle, data.off);
1939 1939 break;
1940 1940 case 4:
1941 1941 ptr4 = (int *)up_data;
1942 1942 *ptr4 = (int)pci_config_get32(conf_handle, data.off);
1943 1943 break;
1944 1944 }
1945 1945
1946 1946 break;
1947 1947
1948 1948 case unm_nic_cmd_pci_config_write:
1949 1949
1950 1950 if (adapter->pci_cfg_handle != NULL) {
1951 1951 conf_handle = adapter->pci_cfg_handle;
1952 1952 } else if ((retval = pci_config_setup(adapter->dip,
1953 1953 &conf_handle)) != DDI_SUCCESS) {
1954 1954 DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1955 1955 " error:%d\n", unm_nic_driver_name, retval));
1956 1956 goto error_out;
1957 1957 } else {
1958 1958 adapter->pci_cfg_handle = conf_handle;
1959 1959 }
1960 1960
1961 1961 switch (data.size) {
1962 1962 case 1:
1963 1963 pci_config_put8(conf_handle,
1964 1964 data.off, *(char *)&(data.uabc));
1965 1965 break;
1966 1966 case 2:
1967 1967 pci_config_put16(conf_handle,
1968 1968 data.off, *(short *)(uintptr_t)&(data.uabc));
1969 1969 break;
1970 1970 case 4:
1971 1971 pci_config_put32(conf_handle,
1972 1972 data.off, *(u32 *)(uintptr_t)&(data.uabc));
1973 1973 break;
1974 1974 }
1975 1975 data.size = 0;
1976 1976 break;
1977 1977
1978 1978 case unm_nic_cmd_get_stats:
1979 1979 data.rv = adapter->unm_nic_fill_statistics(adapter,
1980 1980 (struct unm_statistics *)up_data);
1981 1981 data.size = sizeof (struct unm_statistics);
1982 1982
1983 1983 break;
1984 1984
1985 1985 case unm_nic_cmd_clear_stats:
1986 1986 data.rv = adapter->unm_nic_clear_statistics(adapter);
1987 1987 break;
1988 1988
1989 1989 case unm_nic_cmd_get_version:
1990 1990 (void) memcpy(up_data, UNM_NIC_VERSIONID,
1991 1991 sizeof (UNM_NIC_VERSIONID));
1992 1992 data.size = sizeof (UNM_NIC_VERSIONID);
1993 1993
1994 1994 break;
1995 1995
1996 1996 case unm_nic_cmd_get_phy_type:
1997 1997 cmn_err(CE_WARN, "unm_nic_cmd_get_phy_type unimplemented\n");
1998 1998 break;
1999 1999
2000 2000 case unm_nic_cmd_efuse_chip_id:
2001 2001 efuse_chip_id = adapter->unm_nic_pci_read_normalize(adapter,
2002 2002 UNM_EFUSE_CHIP_ID_HIGH);
2003 2003 efuse_chip_id <<= 32;
2004 2004 efuse_chip_id |= adapter->unm_nic_pci_read_normalize(adapter,
2005 2005 UNM_EFUSE_CHIP_ID_LOW);
2006 2006 (void) memcpy(up_data, &efuse_chip_id, sizeof (uint64_t));
2007 2007 data.rv = 0;
2008 2008 break;
2009 2009
2010 2010 default:
2011 2011 cmn_err(CE_WARN, "%s%d: bad command %d\n", adapter->name,
2012 2012 adapter->instance, data.cmd);
2013 2013 data.rv = GLD_NOTSUPPORTED;
2014 2014 data.size = 0;
2015 2015 goto error_out;
2016 2016 }
2017 2017
2018 2018 work_done:
2019 2019 miocack(wq, mp, data.size, data.rv);
2020 2020 return (DDI_SUCCESS);
2021 2021
2022 2022 error_out:
2023 2023 cmn_err(CE_WARN, "%s(%d) ioctl error\n", __FUNCTION__, data.cmd);
2024 2024 miocnak(wq, mp, 0, EINVAL);
2025 2025 return (retval);
2026 2026 }
2027 2027
2028 2028 /*
2029 2029 * Local datatype for defining tables of (Offset, Name) pairs
2030 2030 */
2031 2031 typedef struct {
2032 2032 offset_t index;
2033 2033 char *name;
2034 2034 } unm_ksindex_t;
2035 2035
2036 2036 static const unm_ksindex_t unm_kstat[] = {
2037 2037 { 0, "freehdls" },
2038 2038 { 1, "freecmds" },
2039 2039 { 2, "tx_bcopy_threshold" },
2040 2040 { 3, "rx_bcopy_threshold" },
2041 2041 { 4, "xmitcalled" },
2042 2042 { 5, "xmitedframes" },
2043 2043 { 6, "xmitfinished" },
2044 2044 { 7, "txbytes" },
2045 2045 { 8, "txcopyed" },
2046 2046 { 9, "txmapped" },
2047 2047 { 10, "outoftxdmahdl" },
2048 2048 { 11, "outofcmddesc" },
2049 2049 { 12, "txdropped" },
2050 2050 { 13, "polled" },
2051 2051 { 14, "uphappy" },
2052 2052 { 15, "updropped" },
2053 2053 { 16, "csummed" },
2054 2054 { 17, "no_rcv" },
2055 2055 { 18, "rxbytes" },
2056 2056 { 19, "rxcopyed" },
2057 2057 { 20, "rxmapped" },
2058 2058 { 21, "desballocfailed" },
2059 2059 { 22, "outofrxbuf" },
2060 2060 { 23, "promiscmode" },
2061 2061 { 24, "rxbufshort" },
2062 2062 { 25, "allocbfailed" },
2063 2063 { -1, NULL }
2064 2064 };
2065 2065
2066 2066 static int
2067 2067 unm_kstat_update(kstat_t *ksp, int flag)
2068 2068 {
2069 2069 unm_adapter *adapter;
2070 2070 kstat_named_t *knp;
2071 2071
2072 2072 if (flag != KSTAT_READ)
2073 2073 return (EACCES);
2074 2074
2075 2075 adapter = ksp->ks_private;
2076 2076 knp = ksp->ks_data;
2077 2077
2078 2078 (knp++)->value.ui32 = adapter->freehdls;
2079 2079 (knp++)->value.ui64 = adapter->freecmds;
2080 2080 (knp++)->value.ui64 = adapter->tx_bcopy_threshold;
2081 2081 (knp++)->value.ui64 = adapter->rx_bcopy_threshold;
2082 2082
2083 2083 (knp++)->value.ui64 = adapter->stats.xmitcalled;
2084 2084 (knp++)->value.ui64 = adapter->stats.xmitedframes;
2085 2085 (knp++)->value.ui64 = adapter->stats.xmitfinished;
2086 2086 (knp++)->value.ui64 = adapter->stats.txbytes;
2087 2087 (knp++)->value.ui64 = adapter->stats.txcopyed;
2088 2088 (knp++)->value.ui64 = adapter->stats.txmapped;
2089 2089 (knp++)->value.ui64 = adapter->stats.outoftxdmahdl;
2090 2090 (knp++)->value.ui64 = adapter->stats.outofcmddesc;
2091 2091 (knp++)->value.ui64 = adapter->stats.txdropped;
2092 2092 (knp++)->value.ui64 = adapter->stats.polled;
2093 2093 (knp++)->value.ui64 = adapter->stats.uphappy;
2094 2094 (knp++)->value.ui64 = adapter->stats.updropped;
2095 2095 (knp++)->value.ui64 = adapter->stats.csummed;
2096 2096 (knp++)->value.ui64 = adapter->stats.no_rcv;
2097 2097 (knp++)->value.ui64 = adapter->stats.rxbytes;
2098 2098 (knp++)->value.ui64 = adapter->stats.rxcopyed;
2099 2099 (knp++)->value.ui64 = adapter->stats.rxmapped;
2100 2100 (knp++)->value.ui64 = adapter->stats.desballocfailed;
2101 2101 (knp++)->value.ui64 = adapter->stats.outofrxbuf;
2102 2102 (knp++)->value.ui64 = adapter->stats.promiscmode;
2103 2103 (knp++)->value.ui64 = adapter->stats.rxbufshort;
2104 2104 (knp++)->value.ui64 = adapter->stats.allocbfailed;
2105 2105
2106 2106 return (0);
2107 2107 }
2108 2108
2109 2109 static kstat_t *
2110 2110 unm_setup_named_kstat(unm_adapter *adapter, int instance, char *name,
2111 2111 const unm_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
2112 2112 {
2113 2113 kstat_t *ksp;
2114 2114 kstat_named_t *knp;
2115 2115 char *np;
2116 2116 int type;
2117 2117 int count = 0;
2118 2118
2119 2119 size /= sizeof (unm_ksindex_t);
2120 2120 ksp = kstat_create(unm_nic_driver_name, instance, name, "net",
2121 2121 KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT);
2122 2122 if (ksp == NULL)
2123 2123 return (NULL);
2124 2124
2125 2125 ksp->ks_private = adapter;
2126 2126 ksp->ks_update = update;
2127 2127 for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
2128 2128 count++;
2129 2129 switch (*np) {
2130 2130 default:
2131 2131 type = KSTAT_DATA_UINT64;
2132 2132 break;
2133 2133 case '%':
2134 2134 np += 1;
2135 2135 type = KSTAT_DATA_UINT32;
2136 2136 break;
2137 2137 case '$':
2138 2138 np += 1;
2139 2139 type = KSTAT_DATA_STRING;
2140 2140 break;
2141 2141 case '&':
2142 2142 np += 1;
2143 2143 type = KSTAT_DATA_CHAR;
2144 2144 break;
2145 2145 }
2146 2146 kstat_named_init(knp, np, type);
2147 2147 }
2148 2148 kstat_install(ksp);
2149 2149
2150 2150 return (ksp);
2151 2151 }
2152 2152
2153 2153 void
2154 2154 unm_init_kstats(unm_adapter* adapter, int instance)
2155 2155 {
2156 2156 adapter->kstats[0] = unm_setup_named_kstat(adapter,
2157 2157 instance, "kstatinfo", unm_kstat,
2158 2158 sizeof (unm_kstat), unm_kstat_update);
2159 2159 }
2160 2160
2161 2161 void
2162 2162 unm_fini_kstats(unm_adapter* adapter)
2163 2163 {
2164 2164
2165 2165 if (adapter->kstats[0] != NULL) {
2166 2166 kstat_delete(adapter->kstats[0]);
2167 2167 adapter->kstats[0] = NULL;
2168 2168 }
2169 2169 }
2170 2170
2171 2171 static int
2172 2172 unm_nic_set_pauseparam(unm_adapter *adapter, unm_pauseparam_t *pause)
2173 2173 {
2174 2174 int ret = 0;
2175 2175
2176 2176 if (adapter->ahw.board_type == UNM_NIC_GBE) {
2177 2177 if (unm_niu_gbe_set_rx_flow_ctl(adapter, pause->rx_pause))
2178 2178 ret = -EIO;
2179 2179
2180 2180 if (unm_niu_gbe_set_tx_flow_ctl(adapter, pause->tx_pause))
2181 2181 ret = -EIO;
2182 2182
2183 2183 } else if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2184 2184 if (unm_niu_xg_set_tx_flow_ctl(adapter, pause->tx_pause))
2185 2185 ret = -EIO;
2186 2186 } else
2187 2187 ret = -EIO;
2188 2188
2189 2189 return (ret);
2190 2190 }
2191 2191
2192 2192 /*
2193 2193 * GLD/MAC interfaces
2194 2194 */
2195 2195 static int
2196 2196 ntxn_m_start(void *arg)
2197 2197 {
2198 2198 unm_adapter *adapter = arg;
2199 2199 int ring;
2200 2200
2201 2201 UNM_SPIN_LOCK(&adapter->lock);
2202 2202 if (adapter->is_up == UNM_ADAPTER_UP_MAGIC) {
2203 2203 UNM_SPIN_UNLOCK(&adapter->lock);
2204 2204 return (DDI_SUCCESS);
2205 2205 }
2206 2206
2207 2207 if (create_rxtx_rings(adapter) != DDI_SUCCESS) {
2208 2208 UNM_SPIN_UNLOCK(&adapter->lock);
2209 2209 return (DDI_FAILURE);
2210 2210 }
2211 2211
2212 2212 if (init_firmware(adapter) != DDI_SUCCESS) {
2213 2213 UNM_SPIN_UNLOCK(&adapter->lock);
2214 2214 cmn_err(CE_WARN, "%s%d: Failed to init firmware\n",
2215 2215 adapter->name, adapter->instance);
2216 2216 goto dest_rings;
2217 2217 }
2218 2218
2219 2219 unm_nic_clear_stats(adapter);
2220 2220
2221 2221 if (unm_nic_hw_resources(adapter) != 0) {
2222 2222 UNM_SPIN_UNLOCK(&adapter->lock);
2223 2223 cmn_err(CE_WARN, "%s%d: Error setting hw resources\n",
2224 2224 adapter->name, adapter->instance);
2225 2225 goto dest_rings;
2226 2226 }
2227 2227
2228 2228 if (adapter->fw_major < 4) {
2229 2229 adapter->crb_addr_cmd_producer =
2230 2230 crb_cmd_producer[adapter->portnum];
2231 2231 adapter->crb_addr_cmd_consumer =
2232 2232 crb_cmd_consumer[adapter->portnum];
2233 2233 unm_nic_update_cmd_producer(adapter, 0);
2234 2234 unm_nic_update_cmd_consumer(adapter, 0);
2235 2235 }
2236 2236
2237 2237 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
2238 2238 if (unm_post_rx_buffers(adapter, ring) != DDI_SUCCESS) {
2239 2239 UNM_SPIN_UNLOCK(&adapter->lock);
2240 2240 goto free_hw_res;
2241 2241 }
2242 2242 }
2243 2243
2244 2244 if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) {
2245 2245 UNM_SPIN_UNLOCK(&adapter->lock);
2246 2246 cmn_err(CE_WARN, "%s%d: Could not set mac address\n",
2247 2247 adapter->name, adapter->instance);
2248 2248 goto free_hw_res;
2249 2249 }
2250 2250
2251 2251 if (unm_nic_init_port(adapter) != 0) {
2252 2252 UNM_SPIN_UNLOCK(&adapter->lock);
2253 2253 cmn_err(CE_WARN, "%s%d: Could not initialize port\n",
2254 2254 adapter->name, adapter->instance);
2255 2255 goto free_hw_res;
2256 2256 }
2257 2257
2258 2258 unm_nic_set_link_parameters(adapter);
2259 2259
2260 2260 /*
2261 2261 * P2 and P3 should be handled similarly.
2262 2262 */
2263 2263 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
2264 2264 if (unm_nic_set_promisc_mode(adapter) != 0) {
2265 2265 UNM_SPIN_UNLOCK(&adapter->lock);
2266 2266 cmn_err(CE_WARN, "%s%d: Could not set promisc mode\n",
2267 2267 adapter->name, adapter->instance);
2268 2268 goto stop_and_free;
2269 2269 }
2270 2270 } else {
2271 2271 nx_p3_nic_set_multi(adapter);
2272 2272 }
2273 2273 adapter->stats.promiscmode = 1;
2274 2274
2275 2275 if (unm_nic_set_mtu(adapter, adapter->mtu) != 0) {
2276 2276 UNM_SPIN_UNLOCK(&adapter->lock);
2277 2277 cmn_err(CE_WARN, "%s%d: Could not set mtu\n",
2278 2278 adapter->name, adapter->instance);
2279 2279 goto stop_and_free;
2280 2280 }
2281 2281
2282 2282 adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
2283 2283 (void *)adapter, 0);
2284 2284
2285 2285 adapter->is_up = UNM_ADAPTER_UP_MAGIC;
2286 2286
2287 2287 if (adapter->intr_type == DDI_INTR_TYPE_MSI)
2288 2288 (void) ddi_intr_block_enable(&adapter->intr_handle, 1);
2289 2289 else
2290 2290 (void) ddi_intr_enable(adapter->intr_handle);
2291 2291 unm_nic_enable_int(adapter);
2292 2292
2293 2293 UNM_SPIN_UNLOCK(&adapter->lock);
2294 2294 return (GLD_SUCCESS);
2295 2295
2296 2296 stop_and_free:
2297 2297 unm_nic_stop_port(adapter);
2298 2298 free_hw_res:
2299 2299 unm_free_hw_resources(adapter);
2300 2300 dest_rings:
2301 2301 destroy_rxtx_rings(adapter);
2302 2302 return (DDI_FAILURE);
2303 2303 }
2304 2304
2305 2305
2306 2306 /*
2307 2307 * This code is kept here for reference so as to
2308 2308 * see if something different is required to be done
2309 2309 * in GLDV3. This will be deleted later.
2310 2310 */
2311 2311 /* ARGSUSED */
2312 2312 static void
2313 2313 ntxn_m_stop(void *arg)
2314 2314 {
2315 2315 }
2316 2316
2317 2317 /*ARGSUSED*/
2318 2318 static int
2319 2319 ntxn_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
2320 2320 {
2321 2321 /*
2322 2322 * When we correctly implement this, invoke nx_p3_nic_set_multi()
2323 2323 * or nx_p2_nic_set_multi() here.
2324 2324 */
2325 2325 return (GLD_SUCCESS);
2326 2326 }
2327 2327
2328 2328 /*ARGSUSED*/
2329 2329 static int
2330 2330 ntxn_m_promisc(void *arg, boolean_t on)
2331 2331 {
2332 2332 #if 0
2333 2333 int err = 0;
2334 2334 struct unm_adapter_s *adapter = arg;
2335 2335
2336 2336 err = on ? unm_nic_set_promisc_mode(adapter) :
2337 2337 unm_nic_unset_promisc_mode(adapter);
2338 2338
2339 2339 if (err)
2340 2340 return (GLD_FAILURE);
2341 2341 #endif
2342 2342
2343 2343 return (GLD_SUCCESS);
2344 2344 }
2345 2345
2346 2346 static int
2347 2347 ntxn_m_stat(void *arg, uint_t stat, uint64_t *val)
2348 2348 {
2349 2349 struct unm_adapter_s *adapter = arg;
2350 2350 struct unm_adapter_stats *portstat = &adapter->stats;
2351 2351
2352 2352 switch (stat) {
2353 2353 case MAC_STAT_IFSPEED:
2354 2354 if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2355 2355 /* 10 Gigs */
2356 2356 *val = 10000000000ULL;
2357 2357 } else {
2358 2358 /* 1 Gig */
2359 2359 *val = 1000000000;
2360 2360 }
2361 2361 break;
2362 2362
2363 2363 case MAC_STAT_MULTIRCV:
2364 2364 *val = 0;
2365 2365 break;
2366 2366
2367 2367 case MAC_STAT_BRDCSTRCV:
2368 2368 case MAC_STAT_BRDCSTXMT:
2369 2369 *val = 0;
2370 2370 break;
2371 2371
2372 2372 case MAC_STAT_NORCVBUF:
2373 2373 *val = portstat->updropped;
2374 2374 break;
2375 2375
2376 2376 case MAC_STAT_NOXMTBUF:
2377 2377 *val = portstat->txdropped;
2378 2378 break;
2379 2379
2380 2380 case MAC_STAT_RBYTES:
2381 2381 *val = portstat->rxbytes;
2382 2382 break;
2383 2383
2384 2384 case MAC_STAT_OBYTES:
2385 2385 *val = portstat->txbytes;
2386 2386 break;
2387 2387
2388 2388 case MAC_STAT_OPACKETS:
2389 2389 *val = portstat->xmitedframes;
2390 2390 break;
2391 2391
2392 2392 case MAC_STAT_IPACKETS:
2393 2393 *val = portstat->uphappy;
2394 2394 break;
2395 2395
2396 2396 case MAC_STAT_OERRORS:
2397 2397 *val = portstat->xmitcalled - portstat->xmitedframes;
2398 2398 break;
2399 2399
2400 2400 case ETHER_STAT_LINK_DUPLEX:
2401 2401 *val = LINK_DUPLEX_FULL;
2402 2402 break;
2403 2403
2404 2404 default:
2405 2405 /*
2406 2406 * Shouldn't reach here...
2407 2407 */
2408 2408 *val = 0;
2409 2409 DPRINTF(0, (CE_WARN, ": unrecognized parameter = %d, value "
2410 2410 "returned 1\n", stat));
2411 2411
2412 2412 }
2413 2413
2414 2414 return (0);
2415 2415 }
2416 2416
2417 2417 static int
2418 2418 ntxn_m_unicst(void *arg, const uint8_t *mac)
2419 2419 {
2420 2420 struct unm_adapter_s *adapter = arg;
2421 2421
2422 2422 DPRINTF(-1, (CE_CONT, "%s: called\n", __func__));
2423 2423
2424 2424 if (unm_nic_macaddr_set(adapter, (uint8_t *)mac))
2425 2425 return (EAGAIN);
2426 2426 bcopy(mac, adapter->mac_addr, ETHERADDRL);
2427 2427
2428 2428 return (0);
2429 2429 }
2430 2430
2431 2431 static mblk_t *
2432 2432 ntxn_m_tx(void *arg, mblk_t *mp)
2433 2433 {
2434 2434 unm_adapter *adapter = arg;
2435 2435 mblk_t *next;
2436 2436
2437 2437 while (mp != NULL) {
2438 2438 next = mp->b_next;
2439 2439 mp->b_next = NULL;
2440 2440
2441 2441 if (unm_nic_xmit_frame(adapter, mp) != B_TRUE) {
2442 2442 mp->b_next = next;
2443 2443 break;
2444 2444 }
2445 2445 mp = next;
2446 2446 adapter->stats.xmitedframes++;
2447 2447 }
2448 2448
2449 2449 return (mp);
2450 2450 }
2451 2451
2452 2452 static void
2453 2453 ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2454 2454 {
2455 2455 int cmd;
2456 2456 struct iocblk *iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
2457 2457 struct unm_adapter_s *adapter = (struct unm_adapter_s *)arg;
2458 2458 enum ioc_reply status = IOC_DONE;
2459 2459
2460 2460 iocp->ioc_error = 0;
2461 2461 cmd = iocp->ioc_cmd;
2462 2462
2463 2463 if (cmd == ND_GET || cmd == ND_SET) {
2464 2464 status = unm_nd_ioctl(adapter, wq, mp, iocp);
2465 2465 switch (status) {
2466 2466 default:
2467 2467 case IOC_INVAL:
2468 2468 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2469 2469 EINVAL : iocp->ioc_error);
2470 2470 break;
2471 2471
2472 2472 case IOC_DONE:
2473 2473 break;
2474 2474
2475 2475 case IOC_RESTART_ACK:
2476 2476 case IOC_ACK:
2477 2477 miocack(wq, mp, 0, 0);
2478 2478 break;
2479 2479
2480 2480 case IOC_RESTART_REPLY:
2481 2481 case IOC_REPLY:
2482 2482 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2483 2483 M_IOCACK : M_IOCNAK;
2484 2484 qreply(wq, mp);
2485 2485 break;
2486 2486 }
2487 2487 } else if (cmd <= UNM_NIC_NAME && cmd >= UNM_CMD_START) {
2488 2488 unm_nic_ioctl(adapter, cmd, wq, mp);
2489 2489 return;
2490 2490 } else {
2491 2491 miocnak(wq, mp, 0, EINVAL);
2492 2492 return;
2493 2493 }
2494 2494 }
2495 2495
2496 2496 /* ARGSUSED */
2497 2497 static boolean_t
2498 2498 ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2499 2499 {
2500 2500 switch (cap) {
2501 2501 case MAC_CAPAB_HCKSUM:
2502 2502 {
2503 2503 uint32_t *txflags = cap_data;
2504 2504
2505 2505 *txflags = (HCKSUM_ENABLE |
2506 2506 HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM);
2507 2507 }
2508 2508 break;
2509 2509
2510 2510 #ifdef SOLARIS11
2511 2511 case MAC_CAPAB_ANCHOR_VNIC:
2512 2512 case MAC_CAPAB_MULTIFACTADDR:
2513 2513 #else
2514 2514 case MAC_CAPAB_POLL:
2515 2515 case MAC_CAPAB_MULTIADDRESS:
2516 2516 #endif
2517 2517 default:
2518 2518 return (B_FALSE);
2519 2519 }
2520 2520
2521 2521 return (B_TRUE);
2522 2522 }
2523 2523
2524 2524 #define NETXEN_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
2525 2525
2526 2526 static mac_callbacks_t ntxn_m_callbacks = {
2527 2527 NETXEN_M_CALLBACK_FLAGS,
2528 2528 ntxn_m_stat,
2529 2529 ntxn_m_start,
2530 2530 ntxn_m_stop,
2531 2531 ntxn_m_promisc,
2532 2532 ntxn_m_multicst,
2533 2533 ntxn_m_unicst,
2534 2534 ntxn_m_tx,
2535 2535 NULL, /* mc_reserved */
2536 2536 ntxn_m_ioctl,
2537 2537 ntxn_m_getcapab,
2538 2538 NULL, /* mc_open */
2539 2539 NULL, /* mc_close */
2540 2540 NULL, /* mc_setprop */
2541 2541 NULL /* mc_getprop */
2542 2542 };
2543 2543
2544 2544 int
2545 2545 unm_register_mac(unm_adapter *adapter)
2546 2546 {
2547 2547 int ret;
2548 2548 mac_register_t *macp;
2549 2549 unm_pauseparam_t pause;
2550 2550
2551 2551 dev_info_t *dip = adapter->dip;
2552 2552
2553 2553 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2554 2554 cmn_err(CE_WARN, "Memory not available\n");
2555 2555 return (DDI_FAILURE);
2556 2556 }
2557 2557
2558 2558 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2559 2559 macp->m_driver = adapter;
2560 2560 macp->m_dip = dip;
2561 2561 macp->m_instance = adapter->instance;
2562 2562 macp->m_src_addr = adapter->mac_addr;
2563 2563 macp->m_callbacks = &ntxn_m_callbacks;
2564 2564 macp->m_min_sdu = 0;
2565 2565 macp->m_max_sdu = adapter->mtu;
2566 2566 #ifdef SOLARIS11
2567 2567 macp->m_margin = VLAN_TAGSZ;
2568 2568 #endif /* SOLARIS11 */
2569 2569
2570 2570 ret = mac_register(macp, &adapter->mach);
2571 2571 mac_free(macp);
2572 2572 if (ret != 0) {
2573 2573 cmn_err(CE_WARN, "mac_register failed for port %d\n",
2574 2574 adapter->portnum);
2575 2575 return (DDI_FAILURE);
2576 2576 }
2577 2577
2578 2578 unm_init_kstats(adapter, adapter->instance);
2579 2579
2580 2580 /* Register NDD-tweakable parameters */
2581 2581 if (unm_nd_init(adapter)) {
2582 2582 cmn_err(CE_WARN, "unm_nd_init() failed");
2583 2583 return (DDI_FAILURE);
2584 2584 }
2585 2585
2586 2586 pause.rx_pause = adapter->nd_params[PARAM_ADV_PAUSE_CAP].ndp_val;
2587 2587 pause.tx_pause = adapter->nd_params[PARAM_ADV_ASYM_PAUSE_CAP].ndp_val;
2588 2588
2589 2589 if (unm_nic_set_pauseparam(adapter, &pause)) {
2590 2590 cmn_err(CE_WARN, "\nBad Pause settings RX %d, Tx %d",
2591 2591 pause.rx_pause, pause.tx_pause);
2592 2592 }
2593 2593 adapter->nd_params[PARAM_PAUSE_CAP].ndp_val = pause.rx_pause;
2594 2594 adapter->nd_params[PARAM_ASYM_PAUSE_CAP].ndp_val = pause.tx_pause;
2595 2595
2596 2596 return (DDI_SUCCESS);
2597 2597 }
↓ open down ↓ |
1418 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX