Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/myri10ge/drv/myri10ge.c
+++ new/usr/src/uts/common/io/myri10ge/drv/myri10ge.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright 2007-2009 Myricom, Inc. All rights reserved.
29 29 * Use is subject to license terms.
30 30 */
31 31
32 32 /*
33 33 * Copyright (c) 2014, Joyent, Inc.
34 34 */
35 35
36 36 #ifndef lint
37 37 static const char __idstring[] =
38 38 "@(#)$Id: myri10ge.c,v 1.186 2009-06-29 13:47:22 gallatin Exp $";
39 39 #endif
40 40
41 41 #define MXGEFW_NDIS
42 42 #include "myri10ge_var.h"
43 43 #include "rss_eth_z8e.h"
44 44 #include "rss_ethp_z8e.h"
45 45 #include "mcp_gen_header.h"
46 46
47 47 #define MYRI10GE_MAX_ETHER_MTU 9014
48 48 #define MYRI10GE_MAX_GLD_MTU 9000
49 49 #define MYRI10GE_MIN_GLD_MTU 1500
50 50
51 51 #define MYRI10GE_ETH_STOPPED 0
52 52 #define MYRI10GE_ETH_STOPPING 1
53 53 #define MYRI10GE_ETH_STARTING 2
54 54 #define MYRI10GE_ETH_RUNNING 3
55 55 #define MYRI10GE_ETH_OPEN_FAILED 4
56 56 #define MYRI10GE_ETH_SUSPENDED_RUNNING 5
57 57
58 58 static int myri10ge_small_bytes = 510;
59 59 static int myri10ge_intr_coal_delay = 125;
60 60 static int myri10ge_flow_control = 1;
61 61 #if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__
62 62 static int myri10ge_nvidia_ecrc_enable = 1;
63 63 #endif
64 64 static int myri10ge_mtu_override = 0;
65 65 static int myri10ge_tx_copylen = 512;
66 66 static int myri10ge_deassert_wait = 1;
67 67 static int myri10ge_verbose = 0;
68 68 static int myri10ge_watchdog_reset = 0;
69 69 static int myri10ge_use_msix = 1;
70 70 static int myri10ge_max_slices = -1;
71 71 static int myri10ge_use_msi = 1;
72 72 int myri10ge_force_firmware = 0;
73 73 static boolean_t myri10ge_use_lso = B_TRUE;
74 74 static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
75 75 static int myri10ge_tx_hash = 1;
76 76 static int myri10ge_lro = 0;
77 77 static int myri10ge_lro_cnt = 8;
78 78 int myri10ge_lro_max_aggr = 2;
79 79 static int myri10ge_lso_copy = 0;
80 80 static mblk_t *myri10ge_send_wrapper(void *arg, mblk_t *mp);
81 81 int myri10ge_tx_handles_initial = 128;
82 82
83 83 static kmutex_t myri10ge_param_lock;
84 84 static void* myri10ge_db_lastfree;
85 85
86 86 static int myri10ge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
87 87 static int myri10ge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
88 88 static int myri10ge_quiesce(dev_info_t *dip);
89 89
90 90 DDI_DEFINE_STREAM_OPS(myri10ge_ops, nulldev, nulldev, myri10ge_attach,
91 91 myri10ge_detach, nodev, NULL, D_MP, NULL, myri10ge_quiesce);
92 92
93 93
94 94 static struct modldrv modldrv = {
95 95 &mod_driverops,
96 96 "Myricom 10G driver (10GbE)",
97 97 &myri10ge_ops,
98 98 };
99 99
100 100
101 101 static struct modlinkage modlinkage = {
102 102 MODREV_1,
103 103 {&modldrv, NULL},
104 104 };
105 105
106 106 unsigned char myri10ge_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
107 107
108 108 static ddi_dma_attr_t myri10ge_misc_dma_attr = {
109 109 DMA_ATTR_V0, /* version number. */
110 110 (uint64_t)0, /* low address */
111 111 (uint64_t)0xffffffffffffffffULL, /* high address */
112 112 (uint64_t)0x7ffffff, /* address counter max */
113 113 (uint64_t)4096, /* alignment */
114 114 (uint_t)0x7f, /* burstsizes for 32b and 64b xfers */
115 115 (uint32_t)0x1, /* minimum transfer size */
116 116 (uint64_t)0x7fffffff, /* maximum transfer size */
117 117 (uint64_t)0x7fffffff, /* maximum segment size */
118 118 1, /* scatter/gather list length */
119 119 1, /* granularity */
120 120 0 /* attribute flags */
121 121 };
122 122
123 123 /*
124 124 * The Myri10GE NIC has the following constraints on receive buffers:
125 125 * 1) Buffers which cross a 4KB boundary must be aligned to 4KB
126 126 * 2) Buffers which are not aligned to 4KB must not cross a 4KB boundary
127 127 */
128 128
129 129 static ddi_dma_attr_t myri10ge_rx_jumbo_dma_attr = {
130 130 DMA_ATTR_V0, /* version number. */
131 131 (uint64_t)0, /* low address */
132 132 (uint64_t)0xffffffffffffffffULL, /* high address */
133 133 (uint64_t)0x7ffffff, /* address counter max */
134 134 (uint64_t)4096, /* alignment */
135 135 (uint_t)0x7f, /* burstsizes for 32b and 64b xfers */
136 136 (uint32_t)0x1, /* minimum transfer size */
137 137 (uint64_t)0x7fffffff, /* maximum transfer size */
138 138 UINT64_MAX, /* maximum segment size */
139 139 1, /* scatter/gather list length */
140 140 1, /* granularity */
141 141 0 /* attribute flags */
142 142 };
143 143
144 144 static ddi_dma_attr_t myri10ge_rx_std_dma_attr = {
145 145 DMA_ATTR_V0, /* version number. */
146 146 (uint64_t)0, /* low address */
147 147 (uint64_t)0xffffffffffffffffULL, /* high address */
148 148 (uint64_t)0x7ffffff, /* address counter max */
149 149 #if defined sparc64 || defined __sparcv9
150 150 (uint64_t)4096, /* alignment */
151 151 #else
152 152 (uint64_t)0x80, /* alignment */
153 153 #endif
154 154 (uint_t)0x7f, /* burstsizes for 32b and 64b xfers */
155 155 (uint32_t)0x1, /* minimum transfer size */
156 156 (uint64_t)0x7fffffff, /* maximum transfer size */
157 157 #if defined sparc64 || defined __sparcv9
158 158 UINT64_MAX, /* maximum segment size */
159 159 #else
160 160 (uint64_t)0xfff, /* maximum segment size */
161 161 #endif
162 162 1, /* scatter/gather list length */
163 163 1, /* granularity */
164 164 0 /* attribute flags */
165 165 };
166 166
167 167 static ddi_dma_attr_t myri10ge_tx_dma_attr = {
168 168 DMA_ATTR_V0, /* version number. */
169 169 (uint64_t)0, /* low address */
170 170 (uint64_t)0xffffffffffffffffULL, /* high address */
171 171 (uint64_t)0x7ffffff, /* address counter max */
172 172 (uint64_t)1, /* alignment */
173 173 (uint_t)0x7f, /* burstsizes for 32b and 64b xfers */
174 174 (uint32_t)0x1, /* minimum transfer size */
175 175 (uint64_t)0x7fffffff, /* maximum transfer size */
176 176 UINT64_MAX, /* maximum segment size */
177 177 INT32_MAX, /* scatter/gather list length */
178 178 1, /* granularity */
179 179 0 /* attribute flags */
180 180 };
181 181
182 182 #if defined sparc64 || defined __sparcv9
183 183 #define WC 0
184 184 #else
185 185 #define WC 1
186 186 #endif
187 187
188 188 struct ddi_device_acc_attr myri10ge_dev_access_attr = {
189 189 DDI_DEVICE_ATTR_V0, /* version */
190 190 DDI_NEVERSWAP_ACC, /* endian flash */
191 191 #if WC
192 192 DDI_MERGING_OK_ACC /* data order */
193 193 #else
194 194 DDI_STRICTORDER_ACC
195 195 #endif
196 196 };
197 197
198 198 static void myri10ge_watchdog(void *arg);
199 199
200 200 #ifdef MYRICOM_PRIV
201 201 int myri10ge_mtu = MYRI10GE_MAX_ETHER_MTU + MXGEFW_PAD + VLAN_TAGSZ;
202 202 #define MYRI10GE_DEFAULT_GLD_MTU MYRI10GE_MAX_GLD_MTU
203 203 #else
204 204 int myri10ge_mtu = ETHERMAX + MXGEFW_PAD + VLAN_TAGSZ;
205 205 #define MYRI10GE_DEFAULT_GLD_MTU MYRI10GE_MIN_GLD_MTU
206 206 #endif
207 207 int myri10ge_bigbufs_initial = 1024;
208 208 int myri10ge_bigbufs_max = 4096;
209 209
210 210
211 211 caddr_t
212 212 myri10ge_dma_alloc(dev_info_t *dip, size_t len,
213 213 ddi_dma_attr_t *attr, ddi_device_acc_attr_t *accattr,
214 214 uint_t alloc_flags, int bind_flags, struct myri10ge_dma_stuff *dma,
215 215 int warn, int (*wait)(caddr_t))
216 216 {
217 217 caddr_t kaddr;
218 218 size_t real_length;
219 219 ddi_dma_cookie_t cookie;
220 220 uint_t count;
221 221 int err;
222 222
223 223 err = ddi_dma_alloc_handle(dip, attr, wait,
224 224 NULL, &dma->handle);
225 225 if (err != DDI_SUCCESS) {
226 226 if (warn)
227 227 cmn_err(CE_WARN,
228 228 "myri10ge: ddi_dma_alloc_handle failed\n");
229 229 goto abort_with_nothing;
230 230 }
231 231
232 232 err = ddi_dma_mem_alloc(dma->handle, len, accattr, alloc_flags,
233 233 wait, NULL, &kaddr, &real_length,
234 234 &dma->acc_handle);
235 235 if (err != DDI_SUCCESS) {
236 236 if (warn)
237 237 cmn_err(CE_WARN,
238 238 "myri10ge: ddi_dma_mem_alloc failed\n");
239 239 goto abort_with_handle;
240 240 }
241 241
242 242 err = ddi_dma_addr_bind_handle(dma->handle, NULL, kaddr, len,
243 243 bind_flags, wait, NULL, &cookie, &count);
244 244
245 245 if (err != DDI_SUCCESS) {
246 246 if (warn)
247 247 cmn_err(CE_WARN,
248 248 "myri10ge: ddi_dma_addr_bind_handle failed\n");
249 249 goto abort_with_mem;
250 250 }
251 251
252 252 if (count != 1) {
253 253 if (warn)
254 254 cmn_err(CE_WARN,
255 255 "myri10ge: got too many dma segments ");
256 256 goto abort_with_bind;
257 257 }
258 258 dma->low = htonl(MYRI10GE_LOWPART_TO_U32(cookie.dmac_laddress));
259 259 dma->high = htonl(MYRI10GE_HIGHPART_TO_U32(cookie.dmac_laddress));
260 260 return (kaddr);
261 261
262 262 abort_with_bind:
263 263 (void) ddi_dma_unbind_handle(dma->handle);
264 264
265 265 abort_with_mem:
266 266 ddi_dma_mem_free(&dma->acc_handle);
267 267
268 268 abort_with_handle:
269 269 ddi_dma_free_handle(&dma->handle);
270 270 abort_with_nothing:
271 271 if (warn) {
272 272 cmn_err(CE_WARN, "myri10ge: myri10ge_dma_alloc failed.\n ");
273 273 cmn_err(CE_WARN, "args: dip=%p len=0x%lx ddi_dma_attr=%p\n",
274 274 (void*) dip, len, (void*) attr);
275 275 cmn_err(CE_WARN,
276 276 "args: ddi_device_acc_attr=%p alloc_flags=0x%x\n",
277 277 (void*) accattr, alloc_flags);
278 278 cmn_err(CE_WARN, "args: bind_flags=0x%x dmastuff=%p",
279 279 bind_flags, (void*) dma);
280 280 }
281 281 return (NULL);
282 282
283 283 }
284 284
285 285 void
286 286 myri10ge_dma_free(struct myri10ge_dma_stuff *dma)
287 287 {
288 288 (void) ddi_dma_unbind_handle(dma->handle);
289 289 ddi_dma_mem_free(&dma->acc_handle);
290 290 ddi_dma_free_handle(&dma->handle);
291 291 }
292 292
293 293 static inline void
294 294 myri10ge_pio_copy32(void *to, uint32_t *from32, size_t size)
295 295 {
296 296 register volatile uint32_t *to32;
297 297 size_t i;
298 298
299 299 to32 = (volatile uint32_t *) to;
300 300 for (i = (size / 4); i; i--) {
301 301 *to32 = *from32;
302 302 to32++;
303 303 from32++;
304 304 }
305 305 }
306 306
307 307 #if defined(_LP64)
308 308 static inline void
309 309 myri10ge_pio_copy64(void *to, uint64_t *from64, size_t size)
310 310 {
311 311 register volatile uint64_t *to64;
312 312 size_t i;
313 313
314 314 to64 = (volatile uint64_t *) to;
315 315 for (i = (size / 8); i; i--) {
316 316 *to64 = *from64;
317 317 to64++;
318 318 from64++;
319 319 }
320 320 }
321 321 #endif
322 322
323 323 /*
324 324 * This routine copies memory from the host to the NIC.
325 325 * The "size" argument must always be a multiple of
326 326 * the size of long (4 or 8 bytes), and to/from must also
327 327 * be naturally aligned.
328 328 */
329 329 static inline void
330 330 myri10ge_pio_copy(void *to, void *from, size_t size)
331 331 {
332 332 #if !defined(_LP64)
333 333 ASSERT((size % 4) == 0);
334 334 myri10ge_pio_copy32(to, (uint32_t *)from, size);
335 335 #else
336 336 ASSERT((size % 8) == 0);
337 337 myri10ge_pio_copy64(to, (uint64_t *)from, size);
338 338 #endif
339 339 }
340 340
341 341
342 342 /*
343 343 * Due to various bugs in Solaris (especially bug 6186772 where the
344 344 * TCP/UDP checksum is calculated incorrectly on mblk chains with more
345 345 * than two elements), and the design bug where hardware checksums are
346 346 * ignored on mblk chains with more than 2 elements, we need to
347 347 * allocate private pool of physically contiguous receive buffers.
348 348 */
349 349
350 350 static void
351 351 myri10ge_jpool_init(struct myri10ge_slice_state *ss)
352 352 {
353 353 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
354 354
355 355 bzero(jpool, sizeof (*jpool));
356 356 mutex_init(&jpool->mtx, NULL, MUTEX_DRIVER,
357 357 ss->mgp->icookie);
358 358 jpool->head = NULL;
359 359 }
360 360
361 361 static void
362 362 myri10ge_jpool_fini(struct myri10ge_slice_state *ss)
363 363 {
364 364 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
365 365
366 366 if (jpool->head != NULL) {
367 367 cmn_err(CE_WARN,
368 368 "%s: BUG! myri10ge_jpool_fini called on non-empty pool\n",
369 369 ss->mgp->name);
370 370 }
371 371 mutex_destroy(&jpool->mtx);
372 372 }
373 373
374 374
375 375 /*
376 376 * copy an array of mcp_kreq_ether_recv_t's to the mcp. Copy
377 377 * at most 32 bytes at a time, so as to avoid involving the software
378 378 * pio handler in the nic. We re-write the first segment's low
379 379 * DMA address to mark it valid only after we write the entire chunk
380 380 * in a burst
381 381 */
382 382 static inline void
383 383 myri10ge_submit_8rx(mcp_kreq_ether_recv_t *dst, mcp_kreq_ether_recv_t *src)
384 384 {
385 385 src->addr_low |= BE_32(1);
386 386 myri10ge_pio_copy(dst, src, 4 * sizeof (*src));
387 387 mb();
388 388 myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof (*src));
389 389 mb();
390 390 src->addr_low &= ~(BE_32(1));
391 391 dst->addr_low = src->addr_low;
392 392 mb();
393 393 }
394 394
395 395 static void
396 396 myri10ge_pull_jpool(struct myri10ge_slice_state *ss)
397 397 {
398 398 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
399 399 struct myri10ge_jpool_entry *jtail, *j, *jfree;
400 400 volatile uintptr_t *putp;
401 401 uintptr_t put;
402 402 int i;
403 403
404 404 /* find tail */
405 405 jtail = NULL;
406 406 if (jpool->head != NULL) {
407 407 j = jpool->head;
408 408 while (j->next != NULL)
409 409 j = j->next;
410 410 jtail = j;
411 411 }
412 412
413 413 /*
414 414 * iterate over all per-CPU caches, and add contents into
415 415 * jpool
416 416 */
417 417 for (i = 0; i < MYRI10GE_MAX_CPUS; i++) {
418 418 /* take per-CPU free list */
419 419 putp = (void *)&jpool->cpu[i & MYRI10GE_MAX_CPU_MASK].head;
420 420 if (*putp == NULL)
421 421 continue;
422 422 put = atomic_swap_ulong(putp, 0);
423 423 jfree = (struct myri10ge_jpool_entry *)put;
424 424
425 425 /* append to pool */
426 426 if (jtail == NULL) {
427 427 jpool->head = jfree;
428 428 } else {
429 429 jtail->next = jfree;
430 430 }
431 431 j = jfree;
432 432 while (j->next != NULL)
433 433 j = j->next;
434 434 jtail = j;
435 435 }
436 436 }
437 437
438 438 /*
439 439 * Transfers buffers from the free pool to the nic
440 440 * Must be called holding the jpool mutex.
441 441 */
442 442
443 443 static inline void
444 444 myri10ge_restock_jumbos(struct myri10ge_slice_state *ss)
445 445 {
446 446 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
447 447 struct myri10ge_jpool_entry *j;
448 448 myri10ge_rx_ring_t *rx;
449 449 int i, idx, limit;
450 450
451 451 rx = &ss->rx_big;
452 452 limit = ss->j_rx_cnt + (rx->mask + 1);
453 453
454 454 for (i = rx->cnt; i != limit; i++) {
455 455 idx = i & (rx->mask);
456 456 j = jpool->head;
457 457 if (j == NULL) {
458 458 myri10ge_pull_jpool(ss);
459 459 j = jpool->head;
460 460 if (j == NULL) {
461 461 break;
462 462 }
463 463 }
464 464 jpool->head = j->next;
465 465 rx->info[idx].j = j;
466 466 rx->shadow[idx].addr_low = j->dma.low;
467 467 rx->shadow[idx].addr_high = j->dma.high;
468 468 /* copy 4 descriptors (32-bytes) to the mcp at a time */
469 469 if ((idx & 7) == 7) {
470 470 myri10ge_submit_8rx(&rx->lanai[idx - 7],
471 471 &rx->shadow[idx - 7]);
472 472 }
473 473 }
474 474 rx->cnt = i;
475 475 }
476 476
477 477 /*
478 478 * Transfer buffers from the nic to the free pool.
479 479 * Should be called holding the jpool mutex
480 480 */
481 481
482 482 static inline void
483 483 myri10ge_unstock_jumbos(struct myri10ge_slice_state *ss)
484 484 {
485 485 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
486 486 struct myri10ge_jpool_entry *j;
487 487 myri10ge_rx_ring_t *rx;
488 488 int i;
489 489
490 490 mutex_enter(&jpool->mtx);
491 491 rx = &ss->rx_big;
492 492
493 493 for (i = 0; i < rx->mask + 1; i++) {
494 494 j = rx->info[i].j;
495 495 rx->info[i].j = NULL;
496 496 if (j == NULL)
497 497 continue;
498 498 j->next = jpool->head;
499 499 jpool->head = j;
500 500 }
501 501 mutex_exit(&jpool->mtx);
502 502
503 503 }
504 504
505 505
506 506 /*
507 507 * Free routine which is called when the mblk allocated via
508 508 * esballoc() is freed. Here we return the jumbo buffer
509 509 * to the free pool, and possibly pass some jumbo buffers
510 510 * to the nic
511 511 */
512 512
513 513 static void
514 514 myri10ge_jfree_rtn(void *arg)
515 515 {
516 516 struct myri10ge_jpool_entry *j = (struct myri10ge_jpool_entry *)arg;
517 517 struct myri10ge_jpool_stuff *jpool;
518 518 volatile uintptr_t *putp;
519 519 uintptr_t old, new;
520 520
521 521 jpool = &j->ss->jpool;
522 522
523 523 /* prepend buffer locklessly to per-CPU freelist */
524 524 putp = (void *)&jpool->cpu[CPU->cpu_seqid & MYRI10GE_MAX_CPU_MASK].head;
525 525 new = (uintptr_t)j;
526 526 do {
527 527 old = *putp;
528 528 j->next = (void *)old;
529 529 } while (atomic_cas_ulong(putp, old, new) != old);
530 530 }
531 531
532 532 static void
533 533 myri10ge_remove_jbuf(struct myri10ge_jpool_entry *j)
534 534 {
535 535 (void) ddi_dma_unbind_handle(j->dma_handle);
536 536 ddi_dma_mem_free(&j->acc_handle);
537 537 ddi_dma_free_handle(&j->dma_handle);
538 538 kmem_free(j, sizeof (*j));
539 539 }
540 540
541 541
542 542 /*
543 543 * Allocates one physically contiguous descriptor
544 544 * and add it to the jumbo buffer pool.
545 545 */
546 546
547 547 static int
548 548 myri10ge_add_jbuf(struct myri10ge_slice_state *ss)
549 549 {
550 550 struct myri10ge_jpool_entry *j;
551 551 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
552 552 ddi_dma_attr_t *rx_dma_attr;
553 553 size_t real_length;
554 554 ddi_dma_cookie_t cookie;
555 555 uint_t count;
556 556 int err;
557 557
558 558 if (myri10ge_mtu < 2048)
559 559 rx_dma_attr = &myri10ge_rx_std_dma_attr;
560 560 else
561 561 rx_dma_attr = &myri10ge_rx_jumbo_dma_attr;
562 562
563 563 again:
564 564 j = (struct myri10ge_jpool_entry *)
565 565 kmem_alloc(sizeof (*j), KM_SLEEP);
566 566 err = ddi_dma_alloc_handle(ss->mgp->dip, rx_dma_attr,
567 567 DDI_DMA_DONTWAIT, NULL, &j->dma_handle);
568 568 if (err != DDI_SUCCESS)
569 569 goto abort_with_j;
570 570
571 571 err = ddi_dma_mem_alloc(j->dma_handle, myri10ge_mtu,
572 572 &myri10ge_dev_access_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
573 573 NULL, &j->buf, &real_length, &j->acc_handle);
574 574 if (err != DDI_SUCCESS)
575 575 goto abort_with_handle;
576 576
577 577 err = ddi_dma_addr_bind_handle(j->dma_handle, NULL, j->buf,
578 578 real_length, DDI_DMA_READ|DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
579 579 NULL, &cookie, &count);
580 580 if (err != DDI_SUCCESS)
581 581 goto abort_with_mem;
582 582
583 583 /*
584 584 * Make certain std MTU buffers do not cross a 4KB boundary:
585 585 *
586 586 * Setting dma_attr_align=4096 will do this, but the system
587 587 * will only allocate 1 RX buffer per 4KB page, rather than 2.
588 588 * Setting dma_attr_granular=4096 *seems* to work around this,
589 589 * but I'm paranoid about future systems no longer honoring
590 590 * this, so fall back to the safe, but memory wasting way if a
591 591 * buffer crosses a 4KB boundary.
592 592 */
593 593
594 594 if (rx_dma_attr == &myri10ge_rx_std_dma_attr &&
595 595 rx_dma_attr->dma_attr_align != 4096) {
596 596 uint32_t start, end;
597 597
598 598 start = MYRI10GE_LOWPART_TO_U32(cookie.dmac_laddress);
599 599 end = start + myri10ge_mtu;
600 600 if (((end >> 12) != (start >> 12)) && (start & 4095U)) {
601 601 printf("std buffer crossed a 4KB boundary!\n");
602 602 myri10ge_remove_jbuf(j);
603 603 rx_dma_attr->dma_attr_align = 4096;
604 604 rx_dma_attr->dma_attr_seg = UINT64_MAX;
605 605 goto again;
606 606 }
607 607 }
608 608
609 609 j->dma.low =
610 610 htonl(MYRI10GE_LOWPART_TO_U32(cookie.dmac_laddress));
611 611 j->dma.high =
612 612 htonl(MYRI10GE_HIGHPART_TO_U32(cookie.dmac_laddress));
613 613 j->ss = ss;
614 614
615 615
616 616 j->free_func.free_func = myri10ge_jfree_rtn;
617 617 j->free_func.free_arg = (char *)j;
618 618 mutex_enter(&jpool->mtx);
619 619 j->next = jpool->head;
620 620 jpool->head = j;
621 621 jpool->num_alloc++;
622 622 mutex_exit(&jpool->mtx);
623 623 return (0);
624 624
625 625 abort_with_mem:
626 626 ddi_dma_mem_free(&j->acc_handle);
627 627
628 628 abort_with_handle:
629 629 ddi_dma_free_handle(&j->dma_handle);
630 630
631 631 abort_with_j:
632 632 kmem_free(j, sizeof (*j));
633 633
634 634 /*
635 635 * If an allocation failed, perhaps it failed because it could
636 636 * not satisfy granularity requirement. Disable that, and
637 637 * try agin.
638 638 */
639 639 if (rx_dma_attr == &myri10ge_rx_std_dma_attr &&
640 640 rx_dma_attr->dma_attr_align != 4096) {
641 641 cmn_err(CE_NOTE,
642 642 "!alloc failed, reverting to gran=1\n");
643 643 rx_dma_attr->dma_attr_align = 4096;
644 644 rx_dma_attr->dma_attr_seg = UINT64_MAX;
645 645 goto again;
646 646 }
647 647 return (err);
648 648 }
649 649
650 650 static int
651 651 myri10ge_jfree_cnt(struct myri10ge_jpool_stuff *jpool)
652 652 {
653 653 int i;
654 654 struct myri10ge_jpool_entry *j;
655 655
656 656 mutex_enter(&jpool->mtx);
657 657 j = jpool->head;
658 658 i = 0;
659 659 while (j != NULL) {
660 660 i++;
661 661 j = j->next;
662 662 }
663 663 mutex_exit(&jpool->mtx);
664 664 return (i);
665 665 }
666 666
667 667 static int
668 668 myri10ge_add_jbufs(struct myri10ge_slice_state *ss, int num, int total)
669 669 {
670 670 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
671 671 int allocated = 0;
672 672 int err;
673 673 int needed;
674 674
675 675 /*
676 676 * if total is set, user wants "num" jbufs in the pool,
677 677 * otherwise the user wants to "num" additional jbufs
678 678 * added to the pool
679 679 */
680 680 if (total && jpool->num_alloc) {
681 681 allocated = myri10ge_jfree_cnt(jpool);
682 682 needed = num - allocated;
683 683 } else {
684 684 needed = num;
685 685 }
686 686
687 687 while (needed > 0) {
688 688 needed--;
689 689 err = myri10ge_add_jbuf(ss);
690 690 if (err == 0) {
691 691 allocated++;
692 692 }
693 693 }
694 694 return (allocated);
695 695 }
696 696
697 697 static void
698 698 myri10ge_remove_jbufs(struct myri10ge_slice_state *ss)
699 699 {
700 700 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
701 701 struct myri10ge_jpool_entry *j;
702 702
703 703 mutex_enter(&jpool->mtx);
704 704 myri10ge_pull_jpool(ss);
705 705 while (jpool->head != NULL) {
706 706 jpool->num_alloc--;
707 707 j = jpool->head;
708 708 jpool->head = j->next;
709 709 myri10ge_remove_jbuf(j);
710 710 }
711 711 mutex_exit(&jpool->mtx);
712 712 }
713 713
714 714 static void
715 715 myri10ge_carve_up_jbufs_into_small_ring(struct myri10ge_slice_state *ss)
716 716 {
717 717 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
718 718 struct myri10ge_jpool_entry *j = NULL;
719 719 caddr_t ptr;
720 720 uint32_t dma_low, dma_high;
721 721 int idx, len;
722 722 unsigned int alloc_size;
723 723
724 724 dma_low = dma_high = len = 0;
725 725 alloc_size = myri10ge_small_bytes + MXGEFW_PAD;
726 726 ptr = NULL;
727 727 for (idx = 0; idx < ss->rx_small.mask + 1; idx++) {
728 728 /* Allocate a jumbo frame and carve it into small frames */
729 729 if (len < alloc_size) {
730 730 mutex_enter(&jpool->mtx);
731 731 /* remove jumbo from freelist */
732 732 j = jpool->head;
733 733 jpool->head = j->next;
734 734 /* place it onto small list */
735 735 j->next = ss->small_jpool;
736 736 ss->small_jpool = j;
737 737 mutex_exit(&jpool->mtx);
738 738 len = myri10ge_mtu;
739 739 dma_low = ntohl(j->dma.low);
740 740 dma_high = ntohl(j->dma.high);
741 741 ptr = j->buf;
742 742 }
743 743 ss->rx_small.info[idx].ptr = ptr;
744 744 ss->rx_small.shadow[idx].addr_low = htonl(dma_low);
745 745 ss->rx_small.shadow[idx].addr_high = htonl(dma_high);
746 746 len -= alloc_size;
747 747 ptr += alloc_size;
748 748 dma_low += alloc_size;
749 749 }
750 750 }
751 751
752 752 /*
753 753 * Return the jumbo bufs we carved up for small to the jumbo pool
754 754 */
755 755
756 756 static void
757 757 myri10ge_release_small_jbufs(struct myri10ge_slice_state *ss)
758 758 {
759 759 struct myri10ge_jpool_stuff *jpool = &ss->jpool;
760 760 struct myri10ge_jpool_entry *j = NULL;
761 761
762 762 mutex_enter(&jpool->mtx);
763 763 while (ss->small_jpool != NULL) {
764 764 j = ss->small_jpool;
765 765 ss->small_jpool = j->next;
766 766 j->next = jpool->head;
767 767 jpool->head = j;
768 768 }
769 769 mutex_exit(&jpool->mtx);
770 770 ss->jbufs_for_smalls = 0;
771 771 }
772 772
773 773 static int
774 774 myri10ge_add_tx_handle(struct myri10ge_slice_state *ss)
775 775 {
776 776 myri10ge_tx_ring_t *tx = &ss->tx;
777 777 struct myri10ge_priv *mgp = ss->mgp;
778 778 struct myri10ge_tx_dma_handle *handle;
779 779 int err;
780 780
781 781 handle = kmem_zalloc(sizeof (*handle), KM_SLEEP);
782 782 err = ddi_dma_alloc_handle(mgp->dip,
783 783 &myri10ge_tx_dma_attr,
784 784 DDI_DMA_SLEEP, NULL,
785 785 &handle->h);
786 786 if (err) {
787 787 static int limit = 0;
788 788 if (limit == 0)
789 789 cmn_err(CE_WARN, "%s: Falled to alloc tx dma handle\n",
790 790 mgp->name);
791 791 limit++;
792 792 kmem_free(handle, sizeof (*handle));
793 793 return (err);
794 794 }
795 795 mutex_enter(&tx->handle_lock);
796 796 MYRI10GE_SLICE_STAT_INC(tx_handles_alloced);
797 797 handle->next = tx->free_tx_handles;
798 798 tx->free_tx_handles = handle;
799 799 mutex_exit(&tx->handle_lock);
800 800 return (DDI_SUCCESS);
801 801 }
802 802
803 803 static void
804 804 myri10ge_remove_tx_handles(struct myri10ge_slice_state *ss)
805 805 {
806 806 myri10ge_tx_ring_t *tx = &ss->tx;
807 807 struct myri10ge_tx_dma_handle *handle;
808 808 mutex_enter(&tx->handle_lock);
809 809
810 810 handle = tx->free_tx_handles;
811 811 while (handle != NULL) {
812 812 tx->free_tx_handles = handle->next;
813 813 ddi_dma_free_handle(&handle->h);
814 814 kmem_free(handle, sizeof (*handle));
815 815 handle = tx->free_tx_handles;
816 816 MYRI10GE_SLICE_STAT_DEC(tx_handles_alloced);
817 817 }
818 818 mutex_exit(&tx->handle_lock);
819 819 if (MYRI10GE_SLICE_STAT(tx_handles_alloced) != 0) {
820 820 cmn_err(CE_WARN, "%s: %d tx dma handles allocated at close\n",
821 821 ss->mgp->name,
822 822 (int)MYRI10GE_SLICE_STAT(tx_handles_alloced));
823 823 }
824 824 }
825 825
826 826 static void
827 827 myri10ge_free_tx_handles(myri10ge_tx_ring_t *tx,
828 828 struct myri10ge_tx_dma_handle_head *list)
829 829 {
830 830 mutex_enter(&tx->handle_lock);
831 831 list->tail->next = tx->free_tx_handles;
832 832 tx->free_tx_handles = list->head;
833 833 mutex_exit(&tx->handle_lock);
834 834 }
835 835
836 836 static void
837 837 myri10ge_free_tx_handle_slist(myri10ge_tx_ring_t *tx,
838 838 struct myri10ge_tx_dma_handle *handle)
839 839 {
840 840 struct myri10ge_tx_dma_handle_head list;
841 841
842 842 if (handle == NULL)
843 843 return;
844 844 list.head = handle;
845 845 list.tail = handle;
846 846 while (handle != NULL) {
847 847 list.tail = handle;
848 848 handle = handle->next;
849 849 }
850 850 myri10ge_free_tx_handles(tx, &list);
851 851 }
852 852
853 853 static int
854 854 myri10ge_alloc_tx_handles(struct myri10ge_slice_state *ss, int count,
855 855 struct myri10ge_tx_dma_handle **ret)
856 856 {
857 857 myri10ge_tx_ring_t *tx = &ss->tx;
858 858 struct myri10ge_tx_dma_handle *handle;
859 859 int err, i;
860 860
861 861 mutex_enter(&tx->handle_lock);
862 862 for (i = 0; i < count; i++) {
863 863 handle = tx->free_tx_handles;
864 864 while (handle == NULL) {
865 865 mutex_exit(&tx->handle_lock);
866 866 err = myri10ge_add_tx_handle(ss);
867 867 if (err != DDI_SUCCESS) {
868 868 goto abort_with_handles;
869 869 }
870 870 mutex_enter(&tx->handle_lock);
871 871 handle = tx->free_tx_handles;
872 872 }
873 873 tx->free_tx_handles = handle->next;
874 874 handle->next = *ret;
875 875 *ret = handle;
876 876 }
877 877 mutex_exit(&tx->handle_lock);
878 878 return (DDI_SUCCESS);
879 879
880 880 abort_with_handles:
881 881 myri10ge_free_tx_handle_slist(tx, *ret);
882 882 return (err);
883 883 }
884 884
885 885
886 886 /*
887 887 * Frees DMA resources associated with the send ring
888 888 */
889 889 static void
890 890 myri10ge_unprepare_tx_ring(struct myri10ge_slice_state *ss)
891 891 {
892 892 myri10ge_tx_ring_t *tx;
893 893 struct myri10ge_tx_dma_handle_head handles;
894 894 size_t bytes;
895 895 int idx;
896 896
897 897 tx = &ss->tx;
898 898 handles.head = NULL;
899 899 handles.tail = NULL;
900 900 for (idx = 0; idx < ss->tx.mask + 1; idx++) {
901 901 if (tx->info[idx].m) {
902 902 (void) ddi_dma_unbind_handle(tx->info[idx].handle->h);
903 903 handles.head = tx->info[idx].handle;
904 904 if (handles.tail == NULL)
905 905 handles.tail = tx->info[idx].handle;
906 906 freeb(tx->info[idx].m);
907 907 tx->info[idx].m = 0;
908 908 tx->info[idx].handle = 0;
909 909 }
910 910 tx->cp[idx].va = NULL;
911 911 myri10ge_dma_free(&tx->cp[idx].dma);
912 912 }
913 913 bytes = sizeof (*tx->cp) * (tx->mask + 1);
914 914 kmem_free(tx->cp, bytes);
915 915 tx->cp = NULL;
916 916 if (handles.head != NULL)
917 917 myri10ge_free_tx_handles(tx, &handles);
918 918 myri10ge_remove_tx_handles(ss);
919 919 }
920 920
921 921 /*
922 922 * Allocates DMA handles associated with the send ring
923 923 */
924 924 static inline int
925 925 myri10ge_prepare_tx_ring(struct myri10ge_slice_state *ss)
926 926 {
927 927 struct myri10ge_tx_dma_handle *handles;
928 928 int h;
929 929 size_t bytes;
930 930
931 931 bytes = sizeof (*ss->tx.cp) * (ss->tx.mask + 1);
932 932 ss->tx.cp = kmem_zalloc(bytes, KM_SLEEP);
933 933 if (ss->tx.cp == NULL) {
934 934 cmn_err(CE_WARN,
935 935 "%s: Failed to allocate tx copyblock storage\n",
936 936 ss->mgp->name);
937 937 return (DDI_FAILURE);
938 938 }
939 939
940 940
941 941 /* allocate the TX copyblocks */
942 942 for (h = 0; h < ss->tx.mask + 1; h++) {
943 943 ss->tx.cp[h].va = myri10ge_dma_alloc(ss->mgp->dip,
944 944 4096, &myri10ge_rx_jumbo_dma_attr,
945 945 &myri10ge_dev_access_attr, DDI_DMA_STREAMING,
946 946 DDI_DMA_WRITE|DDI_DMA_STREAMING, &ss->tx.cp[h].dma, 1,
947 947 DDI_DMA_DONTWAIT);
948 948 if (ss->tx.cp[h].va == NULL) {
949 949 cmn_err(CE_WARN, "%s: Failed to allocate tx "
950 950 "copyblock %d\n", ss->mgp->name, h);
951 951 goto abort_with_copyblocks;
952 952 }
953 953 }
954 954 /* pre-allocate transmit handles */
955 955 handles = NULL;
956 956 (void) myri10ge_alloc_tx_handles(ss, myri10ge_tx_handles_initial,
957 957 &handles);
958 958 if (handles != NULL)
959 959 myri10ge_free_tx_handle_slist(&ss->tx, handles);
960 960
961 961 return (DDI_SUCCESS);
962 962
963 963 abort_with_copyblocks:
964 964 while (h > 0) {
965 965 h--;
966 966 myri10ge_dma_free(&ss->tx.cp[h].dma);
967 967 }
968 968
969 969 bytes = sizeof (*ss->tx.cp) * (ss->tx.mask + 1);
970 970 kmem_free(ss->tx.cp, bytes);
971 971 ss->tx.cp = NULL;
972 972 return (DDI_FAILURE);
973 973 }
974 974
975 975 /*
976 976 * The eeprom strings on the lanaiX have the format
977 977 * SN=x\0
978 978 * MAC=x:x:x:x:x:x\0
979 979 * PT:ddd mmm xx xx:xx:xx xx\0
980 980 * PV:ddd mmm xx xx:xx:xx xx\0
981 981 */
982 982 static int
983 983 myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
984 984 {
985 985 #define MYRI10GE_NEXT_STRING(p) while (ptr < limit && *ptr++)
986 986 #define myri10ge_digit(c) (((c) >= '0' && (c) <= '9') ? ((c) - '0') : \
987 987 (((c) >= 'A' && (c) <= 'F') ? (10 + (c) - 'A') : \
988 988 (((c) >= 'a' && (c) <= 'f') ? (10 + (c) - 'a') : -1)))
989 989
990 990 char *ptr, *limit;
991 991 int i, hv, lv;
992 992
993 993 ptr = mgp->eeprom_strings;
994 994 limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
995 995
996 996 while (*ptr != '\0' && ptr < limit) {
997 997 if (memcmp(ptr, "MAC=", 4) == 0) {
998 998 ptr += 4;
999 999 if (myri10ge_verbose)
1000 1000 printf("%s: mac address = %s\n", mgp->name,
1001 1001 ptr);
1002 1002 mgp->mac_addr_string = ptr;
1003 1003 for (i = 0; i < 6; i++) {
1004 1004 if ((ptr + 2) > limit)
1005 1005 goto abort;
1006 1006
1007 1007 if (*(ptr+1) == ':') {
1008 1008 hv = 0;
1009 1009 lv = myri10ge_digit(*ptr); ptr++;
1010 1010 } else {
1011 1011 hv = myri10ge_digit(*ptr); ptr++;
1012 1012 lv = myri10ge_digit(*ptr); ptr++;
1013 1013 }
1014 1014 mgp->mac_addr[i] = (hv << 4) | lv;
1015 1015 ptr++;
1016 1016 }
1017 1017 }
1018 1018 if (memcmp((const void *)ptr, "SN=", 3) == 0) {
1019 1019 ptr += 3;
1020 1020 mgp->sn_str = (char *)ptr;
1021 1021 }
1022 1022 if (memcmp((const void *)ptr, "PC=", 3) == 0) {
1023 1023 ptr += 3;
1024 1024 mgp->pc_str = (char *)ptr;
1025 1025 }
1026 1026 MYRI10GE_NEXT_STRING(ptr);
1027 1027 }
1028 1028
1029 1029 return (0);
1030 1030
1031 1031 abort:
1032 1032 cmn_err(CE_WARN, "%s: failed to parse eeprom_strings", mgp->name);
1033 1033 return (ENXIO);
1034 1034 }
1035 1035
1036 1036
1037 1037 /*
1038 1038 * Determine the register set containing the PCI resource we
1039 1039 * want to map: the memory-mappable part of the interface. We do
1040 1040 * this by scanning the DDI "reg" property of the interface,
1041 1041 * which is an array of mx_ddi_reg_set structures.
1042 1042 */
1043 1043 static int
1044 1044 myri10ge_reg_set(dev_info_t *dip, int *reg_set, int *span,
1045 1045 unsigned long *busno, unsigned long *devno,
1046 1046 unsigned long *funcno)
1047 1047 {
1048 1048
1049 1049 #define REGISTER_NUMBER(ip) (ip[0] >> 0 & 0xff)
1050 1050 #define FUNCTION_NUMBER(ip) (ip[0] >> 8 & 0x07)
1051 1051 #define DEVICE_NUMBER(ip) (ip[0] >> 11 & 0x1f)
1052 1052 #define BUS_NUMBER(ip) (ip[0] >> 16 & 0xff)
1053 1053 #define ADDRESS_SPACE(ip) (ip[0] >> 24 & 0x03)
1054 1054 #define PCI_ADDR_HIGH(ip) (ip[1])
1055 1055 #define PCI_ADDR_LOW(ip) (ip[2])
1056 1056 #define PCI_SPAN_HIGH(ip) (ip[3])
1057 1057 #define PCI_SPAN_LOW(ip) (ip[4])
1058 1058
1059 1059 #define MX_DDI_REG_SET_32_BIT_MEMORY_SPACE 2
1060 1060 #define MX_DDI_REG_SET_64_BIT_MEMORY_SPACE 3
1061 1061
1062 1062 int *data, i, *rs;
1063 1063 uint32_t nelementsp;
1064 1064
1065 1065 #ifdef MYRI10GE_REGSET_VERBOSE
1066 1066 char *address_space_name[] = { "Configuration Space",
1067 1067 "I/O Space",
1068 1068 "32-bit Memory Space",
1069 1069 "64-bit Memory Space"
1070 1070 };
1071 1071 #endif
1072 1072
1073 1073 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1074 1074 "reg", &data, &nelementsp) != DDI_SUCCESS) {
1075 1075 printf("Could not determine register set.\n");
1076 1076 return (ENXIO);
1077 1077 }
1078 1078
1079 1079 #ifdef MYRI10GE_REGSET_VERBOSE
1080 1080 printf("There are %d register sets.\n", nelementsp / 5);
1081 1081 #endif
1082 1082 if (!nelementsp) {
1083 1083 printf("Didn't find any \"reg\" properties.\n");
1084 1084 ddi_prop_free(data);
1085 1085 return (ENODEV);
1086 1086 }
1087 1087
1088 1088 /* Scan for the register number. */
1089 1089 rs = &data[0];
1090 1090 *busno = BUS_NUMBER(rs);
1091 1091 *devno = DEVICE_NUMBER(rs);
1092 1092 *funcno = FUNCTION_NUMBER(rs);
1093 1093
1094 1094 #ifdef MYRI10GE_REGSET_VERBOSE
1095 1095 printf("*** Scanning for register number.\n");
1096 1096 #endif
1097 1097 for (i = 0; i < nelementsp / 5; i++) {
1098 1098 rs = &data[5 * i];
1099 1099 #ifdef MYRI10GE_REGSET_VERBOSE
1100 1100 printf("Examining register set %d:\n", i);
1101 1101 printf(" Register number = %d.\n", REGISTER_NUMBER(rs));
1102 1102 printf(" Function number = %d.\n", FUNCTION_NUMBER(rs));
1103 1103 printf(" Device number = %d.\n", DEVICE_NUMBER(rs));
1104 1104 printf(" Bus number = %d.\n", BUS_NUMBER(rs));
1105 1105 printf(" Address space = %d (%s ).\n", ADDRESS_SPACE(rs),
1106 1106 address_space_name[ADDRESS_SPACE(rs)]);
1107 1107 printf(" pci address 0x%08x %08x\n", PCI_ADDR_HIGH(rs),
1108 1108 PCI_ADDR_LOW(rs));
1109 1109 printf(" pci span 0x%08x %08x\n", PCI_SPAN_HIGH(rs),
1110 1110 PCI_SPAN_LOW(rs));
1111 1111 #endif
1112 1112 /* We are looking for a memory property. */
1113 1113
1114 1114 if (ADDRESS_SPACE(rs) == MX_DDI_REG_SET_64_BIT_MEMORY_SPACE ||
1115 1115 ADDRESS_SPACE(rs) == MX_DDI_REG_SET_32_BIT_MEMORY_SPACE) {
1116 1116 *reg_set = i;
1117 1117
1118 1118 #ifdef MYRI10GE_REGSET_VERBOSE
1119 1119 printf("%s uses register set %d.\n",
1120 1120 address_space_name[ADDRESS_SPACE(rs)], *reg_set);
1121 1121 #endif
1122 1122
1123 1123 *span = (PCI_SPAN_LOW(rs));
1124 1124 #ifdef MYRI10GE_REGSET_VERBOSE
1125 1125 printf("Board span is 0x%x\n", *span);
1126 1126 #endif
1127 1127 break;
1128 1128 }
1129 1129 }
1130 1130
1131 1131 ddi_prop_free(data);
1132 1132
1133 1133 /* If no match, fail. */
1134 1134 if (i >= nelementsp / 5) {
1135 1135 return (EIO);
1136 1136 }
1137 1137
1138 1138 return (0);
1139 1139 }
1140 1140
1141 1141
1142 1142 static int
1143 1143 myri10ge_load_firmware_from_zlib(struct myri10ge_priv *mgp, uint32_t *limit)
1144 1144 {
1145 1145 void *inflate_buffer;
1146 1146 int rv, status;
1147 1147 size_t sram_size = mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE;
1148 1148 size_t destlen;
1149 1149 mcp_gen_header_t *hdr;
1150 1150 unsigned hdr_offset, i;
1151 1151
1152 1152
1153 1153 *limit = 0; /* -Wuninitialized */
1154 1154 status = 0;
1155 1155
1156 1156 inflate_buffer = kmem_zalloc(sram_size, KM_NOSLEEP);
1157 1157 if (!inflate_buffer) {
1158 1158 cmn_err(CE_WARN,
1159 1159 "%s: Could not allocate buffer to inflate mcp\n",
1160 1160 mgp->name);
1161 1161 return (ENOMEM);
1162 1162 }
1163 1163
1164 1164 destlen = sram_size;
1165 1165 rv = z_uncompress(inflate_buffer, &destlen, mgp->eth_z8e,
1166 1166 mgp->eth_z8e_length);
1167 1167
1168 1168 if (rv != Z_OK) {
1169 1169 cmn_err(CE_WARN, "%s: Could not inflate mcp: %s\n",
1170 1170 mgp->name, z_strerror(rv));
1171 1171 status = ENXIO;
1172 1172 goto abort;
1173 1173 }
1174 1174
1175 1175 *limit = (uint32_t)destlen;
1176 1176
1177 1177 hdr_offset = htonl(*(uint32_t *)(void *)((char *)inflate_buffer +
1178 1178 MCP_HEADER_PTR_OFFSET));
1179 1179 hdr = (void *)((char *)inflate_buffer + hdr_offset);
1180 1180 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
1181 1181 cmn_err(CE_WARN, "%s: Bad firmware type: 0x%x\n", mgp->name,
1182 1182 ntohl(hdr->mcp_type));
1183 1183 status = EIO;
1184 1184 goto abort;
1185 1185 }
1186 1186
1187 1187 /* save firmware version for kstat */
1188 1188 (void) strncpy(mgp->fw_version, hdr->version, sizeof (mgp->fw_version));
1189 1189 if (myri10ge_verbose)
1190 1190 printf("%s: firmware id: %s\n", mgp->name, hdr->version);
1191 1191
1192 1192 /* Copy the inflated firmware to NIC SRAM. */
1193 1193 for (i = 0; i < *limit; i += 256) {
1194 1194 myri10ge_pio_copy((char *)mgp->sram + MYRI10GE_FW_OFFSET + i,
1195 1195 (char *)inflate_buffer + i,
1196 1196 min(256U, (unsigned)(*limit - i)));
1197 1197 mb();
1198 1198 (void) *(int *)(void *)mgp->sram;
1199 1199 mb();
1200 1200 }
1201 1201
1202 1202 abort:
1203 1203 kmem_free(inflate_buffer, sram_size);
1204 1204
1205 1205 return (status);
1206 1206
1207 1207 }
1208 1208
1209 1209
1210 1210 int
1211 1211 myri10ge_send_cmd(struct myri10ge_priv *mgp, uint32_t cmd,
1212 1212 myri10ge_cmd_t *data)
1213 1213 {
1214 1214 mcp_cmd_t *buf;
1215 1215 char buf_bytes[sizeof (*buf) + 8];
1216 1216 volatile mcp_cmd_response_t *response = mgp->cmd;
1217 1217 volatile char *cmd_addr =
1218 1218 (volatile char *)mgp->sram + MXGEFW_ETH_CMD;
1219 1219 int sleep_total = 0;
1220 1220
1221 1221 /* ensure buf is aligned to 8 bytes */
1222 1222 buf = (mcp_cmd_t *)((unsigned long)(buf_bytes + 7) & ~7UL);
1223 1223
1224 1224 buf->data0 = htonl(data->data0);
1225 1225 buf->data1 = htonl(data->data1);
1226 1226 buf->data2 = htonl(data->data2);
1227 1227 buf->cmd = htonl(cmd);
1228 1228 buf->response_addr.low = mgp->cmd_dma.low;
1229 1229 buf->response_addr.high = mgp->cmd_dma.high;
1230 1230 mutex_enter(&mgp->cmd_lock);
1231 1231 response->result = 0xffffffff;
1232 1232 mb();
1233 1233
1234 1234 myri10ge_pio_copy((void *)cmd_addr, buf, sizeof (*buf));
1235 1235
1236 1236 /* wait up to 20ms */
1237 1237 for (sleep_total = 0; sleep_total < 20; sleep_total++) {
1238 1238 mb();
1239 1239 if (response->result != 0xffffffff) {
1240 1240 if (response->result == 0) {
1241 1241 data->data0 = ntohl(response->data);
1242 1242 mutex_exit(&mgp->cmd_lock);
1243 1243 return (0);
1244 1244 } else if (ntohl(response->result)
1245 1245 == MXGEFW_CMD_UNKNOWN) {
1246 1246 mutex_exit(&mgp->cmd_lock);
1247 1247 return (ENOSYS);
1248 1248 } else if (ntohl(response->result)
1249 1249 == MXGEFW_CMD_ERROR_UNALIGNED) {
1250 1250 mutex_exit(&mgp->cmd_lock);
1251 1251 return (E2BIG);
1252 1252 } else {
1253 1253 cmn_err(CE_WARN,
1254 1254 "%s: command %d failed, result = %d\n",
1255 1255 mgp->name, cmd, ntohl(response->result));
1256 1256 mutex_exit(&mgp->cmd_lock);
1257 1257 return (ENXIO);
1258 1258 }
1259 1259 }
1260 1260 drv_usecwait(1000);
1261 1261 }
1262 1262 mutex_exit(&mgp->cmd_lock);
1263 1263 cmn_err(CE_WARN, "%s: command %d timed out, result = %d\n",
1264 1264 mgp->name, cmd, ntohl(response->result));
1265 1265 return (EAGAIN);
1266 1266 }
1267 1267
1268 1268 /*
1269 1269 * Enable or disable periodic RDMAs from the host to make certain
1270 1270 * chipsets resend dropped PCIe messages
1271 1271 */
1272 1272
1273 1273 static void
1274 1274 myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
1275 1275 {
1276 1276 char buf_bytes[72];
1277 1277 volatile uint32_t *confirm;
1278 1278 volatile char *submit;
1279 1279 uint32_t *buf;
1280 1280 int i;
1281 1281
1282 1282 buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL);
1283 1283
1284 1284 /* clear confirmation addr */
1285 1285 confirm = (volatile uint32_t *)mgp->cmd;
1286 1286 *confirm = 0;
1287 1287 mb();
1288 1288
1289 1289 /*
1290 1290 * send an rdma command to the PCIe engine, and wait for the
1291 1291 * response in the confirmation address. The firmware should
1292 1292 * write a -1 there to indicate it is alive and well
1293 1293 */
1294 1294
1295 1295 buf[0] = mgp->cmd_dma.high; /* confirm addr MSW */
1296 1296 buf[1] = mgp->cmd_dma.low; /* confirm addr LSW */
1297 1297 buf[2] = htonl(0xffffffff); /* confirm data */
1298 1298 buf[3] = htonl(mgp->cmd_dma.high); /* dummy addr MSW */
1299 1299 buf[4] = htonl(mgp->cmd_dma.low); /* dummy addr LSW */
1300 1300 buf[5] = htonl(enable); /* enable? */
1301 1301
1302 1302
1303 1303 submit = (volatile char *)(mgp->sram + MXGEFW_BOOT_DUMMY_RDMA);
1304 1304
1305 1305 myri10ge_pio_copy((char *)submit, buf, 64);
1306 1306 mb();
1307 1307 drv_usecwait(1000);
1308 1308 mb();
1309 1309 i = 0;
1310 1310 while (*confirm != 0xffffffff && i < 20) {
1311 1311 drv_usecwait(1000);
1312 1312 i++;
1313 1313 }
1314 1314 if (*confirm != 0xffffffff) {
1315 1315 cmn_err(CE_WARN, "%s: dummy rdma %s failed (%p = 0x%x)",
1316 1316 mgp->name,
1317 1317 (enable ? "enable" : "disable"), (void*) confirm, *confirm);
1318 1318 }
1319 1319 }
1320 1320
1321 1321 static int
1322 1322 myri10ge_load_firmware(struct myri10ge_priv *mgp)
1323 1323 {
1324 1324 myri10ge_cmd_t cmd;
1325 1325 volatile uint32_t *confirm;
1326 1326 volatile char *submit;
1327 1327 char buf_bytes[72];
1328 1328 uint32_t *buf, size;
1329 1329 int status, i;
1330 1330
1331 1331 buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL);
1332 1332
1333 1333 status = myri10ge_load_firmware_from_zlib(mgp, &size);
1334 1334 if (status) {
1335 1335 cmn_err(CE_WARN, "%s: firmware loading failed\n", mgp->name);
1336 1336 return (status);
1337 1337 }
1338 1338
1339 1339 /* clear confirmation addr */
1340 1340 confirm = (volatile uint32_t *)mgp->cmd;
1341 1341 *confirm = 0;
1342 1342 mb();
1343 1343
1344 1344 /*
1345 1345 * send a reload command to the bootstrap MCP, and wait for the
1346 1346 * response in the confirmation address. The firmware should
1347 1347 * write a -1 there to indicate it is alive and well
1348 1348 */
1349 1349
1350 1350 buf[0] = mgp->cmd_dma.high; /* confirm addr MSW */
1351 1351 buf[1] = mgp->cmd_dma.low; /* confirm addr LSW */
1352 1352 buf[2] = htonl(0xffffffff); /* confirm data */
1353 1353
1354 1354 /*
1355 1355 * FIX: All newest firmware should un-protect the bottom of
1356 1356 * the sram before handoff. However, the very first interfaces
1357 1357 * do not. Therefore the handoff copy must skip the first 8 bytes
1358 1358 */
1359 1359 buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
1360 1360 buf[4] = htonl(size - 8); /* length of code */
1361 1361 buf[5] = htonl(8); /* where to copy to */
1362 1362 buf[6] = htonl(0); /* where to jump to */
1363 1363
1364 1364 submit = (volatile char *)(mgp->sram + MXGEFW_BOOT_HANDOFF);
1365 1365
1366 1366 myri10ge_pio_copy((char *)submit, buf, 64);
1367 1367 mb();
1368 1368 drv_usecwait(1000);
1369 1369 mb();
1370 1370 i = 0;
1371 1371 while (*confirm != 0xffffffff && i < 1000) {
1372 1372 drv_usecwait(1000);
1373 1373 i++;
1374 1374 }
1375 1375 if (*confirm != 0xffffffff) {
1376 1376 cmn_err(CE_WARN, "%s: handoff failed (%p = 0x%x)",
1377 1377 mgp->name, (void *) confirm, *confirm);
1378 1378
1379 1379 return (ENXIO);
1380 1380 }
1381 1381 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd);
1382 1382 if (status != 0) {
1383 1383 cmn_err(CE_WARN, "%s: failed MXGEFW_CMD_GET_RX_RING_SIZE\n",
1384 1384 mgp->name);
1385 1385 return (ENXIO);
1386 1386 }
1387 1387
1388 1388 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof (mcp_dma_addr_t));
1389 1389 myri10ge_dummy_rdma(mgp, 1);
1390 1390 return (0);
1391 1391 }
1392 1392
1393 1393 static int
1394 1394 myri10ge_m_unicst(void *arg, const uint8_t *addr)
1395 1395 {
1396 1396 struct myri10ge_priv *mgp = arg;
1397 1397 myri10ge_cmd_t cmd;
1398 1398 int status;
1399 1399
1400 1400 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
1401 1401 | (addr[2] << 8) | addr[3]);
1402 1402
1403 1403 cmd.data1 = ((addr[4] << 8) | (addr[5]));
1404 1404
1405 1405 status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd);
1406 1406 if (status == 0 && (addr != mgp->mac_addr))
1407 1407 (void) memcpy(mgp->mac_addr, addr, sizeof (mgp->mac_addr));
1408 1408
1409 1409 return (status);
1410 1410 }
1411 1411
1412 1412 static int
1413 1413 myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
1414 1414 {
1415 1415 myri10ge_cmd_t cmd;
1416 1416 int status;
1417 1417
1418 1418 if (pause)
1419 1419 status = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_FLOW_CONTROL,
1420 1420 &cmd);
1421 1421 else
1422 1422 status = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_FLOW_CONTROL,
1423 1423 &cmd);
1424 1424
1425 1425 if (status) {
1426 1426 cmn_err(CE_WARN, "%s: Failed to set flow control mode\n",
1427 1427 mgp->name);
1428 1428 return (ENXIO);
1429 1429 }
1430 1430 mgp->pause = pause;
1431 1431 return (0);
1432 1432 }
1433 1433
1434 1434 static void
1435 1435 myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc)
1436 1436 {
1437 1437 myri10ge_cmd_t cmd;
1438 1438 int status;
1439 1439
1440 1440 if (promisc)
1441 1441 status = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_PROMISC, &cmd);
1442 1442 else
1443 1443 status = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_PROMISC, &cmd);
1444 1444
1445 1445 if (status) {
1446 1446 cmn_err(CE_WARN, "%s: Failed to set promisc mode\n",
1447 1447 mgp->name);
1448 1448 }
1449 1449 }
1450 1450
1451 1451 static int
1452 1452 myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
1453 1453 {
1454 1454 myri10ge_cmd_t cmd;
1455 1455 int status;
1456 1456 uint32_t len;
1457 1457 void *dmabench;
1458 1458 struct myri10ge_dma_stuff dmabench_dma;
1459 1459 char *test = " ";
1460 1460
1461 1461 /*
1462 1462 * Run a small DMA test.
1463 1463 * The magic multipliers to the length tell the firmware
1464 1464 * tp do DMA read, write, or read+write tests. The
1465 1465 * results are returned in cmd.data0. The upper 16
1466 1466 * bits or the return is the number of transfers completed.
1467 1467 * The lower 16 bits is the time in 0.5us ticks that the
1468 1468 * transfers took to complete
1469 1469 */
1470 1470
1471 1471 len = mgp->tx_boundary;
1472 1472
1473 1473 dmabench = myri10ge_dma_alloc(mgp->dip, len,
1474 1474 &myri10ge_rx_jumbo_dma_attr, &myri10ge_dev_access_attr,
1475 1475 DDI_DMA_STREAMING, DDI_DMA_RDWR|DDI_DMA_STREAMING,
1476 1476 &dmabench_dma, 1, DDI_DMA_DONTWAIT);
1477 1477 mgp->read_dma = mgp->write_dma = mgp->read_write_dma = 0;
1478 1478 if (dmabench == NULL) {
1479 1479 cmn_err(CE_WARN, "%s dma benchmark aborted\n", mgp->name);
1480 1480 return (ENOMEM);
1481 1481 }
1482 1482
1483 1483 cmd.data0 = ntohl(dmabench_dma.low);
1484 1484 cmd.data1 = ntohl(dmabench_dma.high);
1485 1485 cmd.data2 = len * 0x10000;
1486 1486 status = myri10ge_send_cmd(mgp, test_type, &cmd);
1487 1487 if (status != 0) {
1488 1488 test = "read";
1489 1489 goto abort;
1490 1490 }
1491 1491 mgp->read_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff);
1492 1492
1493 1493 cmd.data0 = ntohl(dmabench_dma.low);
1494 1494 cmd.data1 = ntohl(dmabench_dma.high);
1495 1495 cmd.data2 = len * 0x1;
1496 1496 status = myri10ge_send_cmd(mgp, test_type, &cmd);
1497 1497 if (status != 0) {
1498 1498 test = "write";
1499 1499 goto abort;
1500 1500 }
1501 1501 mgp->write_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff);
1502 1502
1503 1503 cmd.data0 = ntohl(dmabench_dma.low);
1504 1504 cmd.data1 = ntohl(dmabench_dma.high);
1505 1505 cmd.data2 = len * 0x10001;
1506 1506 status = myri10ge_send_cmd(mgp, test_type, &cmd);
1507 1507 if (status != 0) {
1508 1508 test = "read/write";
1509 1509 goto abort;
1510 1510 }
1511 1511 mgp->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) /
1512 1512 (cmd.data0 & 0xffff);
1513 1513
1514 1514
1515 1515 abort:
1516 1516 myri10ge_dma_free(&dmabench_dma);
1517 1517 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
1518 1518 cmn_err(CE_WARN, "%s %s dma benchmark failed\n", mgp->name,
1519 1519 test);
1520 1520 return (status);
1521 1521 }
1522 1522
1523 1523 static int
1524 1524 myri10ge_reset(struct myri10ge_priv *mgp)
1525 1525 {
1526 1526 myri10ge_cmd_t cmd;
1527 1527 struct myri10ge_nic_stat *ethstat;
1528 1528 struct myri10ge_slice_state *ss;
1529 1529 int i, status;
1530 1530 size_t bytes;
1531 1531
1532 1532 /* send a reset command to the card to see if it is alive */
1533 1533 (void) memset(&cmd, 0, sizeof (cmd));
1534 1534 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd);
1535 1535 if (status != 0) {
1536 1536 cmn_err(CE_WARN, "%s: failed reset\n", mgp->name);
1537 1537 return (ENXIO);
1538 1538 }
1539 1539
1540 1540 /* Now exchange information about interrupts */
1541 1541
1542 1542 bytes = mgp->max_intr_slots * sizeof (*mgp->ss[0].rx_done.entry);
1543 1543 cmd.data0 = (uint32_t)bytes;
1544 1544 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd);
1545 1545
1546 1546 /*
1547 1547 * Even though we already know how many slices are supported
1548 1548 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
1549 1549 * has magic side effects, and must be called after a reset.
1550 1550 * It must be called prior to calling any RSS related cmds,
1551 1551 * including assigning an interrupt queue for anything but
1552 1552 * slice 0. It must also be called *after*
1553 1553 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
1554 1554 * the firmware to compute offsets.
1555 1555 */
1556 1556
1557 1557 if (mgp->num_slices > 1) {
1558 1558
1559 1559 /* ask the maximum number of slices it supports */
1560 1560 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
1561 1561 &cmd);
1562 1562 if (status != 0) {
1563 1563 cmn_err(CE_WARN,
1564 1564 "%s: failed to get number of slices\n",
1565 1565 mgp->name);
1566 1566 return (status);
1567 1567 }
1568 1568
1569 1569 /*
1570 1570 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
1571 1571 * to setting up the interrupt queue DMA
1572 1572 */
1573 1573
1574 1574 cmd.data0 = mgp->num_slices;
1575 1575 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE |
1576 1576 MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
1577 1577 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
1578 1578 &cmd);
1579 1579 if (status != 0) {
1580 1580 cmn_err(CE_WARN,
1581 1581 "%s: failed to set number of slices\n",
1582 1582 mgp->name);
1583 1583 return (status);
1584 1584 }
1585 1585 }
1586 1586 for (i = 0; i < mgp->num_slices; i++) {
1587 1587 ss = &mgp->ss[i];
1588 1588 cmd.data0 = ntohl(ss->rx_done.dma.low);
1589 1589 cmd.data1 = ntohl(ss->rx_done.dma.high);
1590 1590 cmd.data2 = i;
1591 1591 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
1592 1592 &cmd);
1593 1593 };
1594 1594
1595 1595 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd);
1596 1596 for (i = 0; i < mgp->num_slices; i++) {
1597 1597 ss = &mgp->ss[i];
1598 1598 ss->irq_claim = (volatile unsigned int *)
1599 1599 (void *)(mgp->sram + cmd.data0 + 8 * i);
1600 1600 }
1601 1601
1602 1602 if (mgp->ddi_intr_type == DDI_INTR_TYPE_FIXED) {
1603 1603 status |= myri10ge_send_cmd(mgp,
1604 1604 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd);
1605 1605 mgp->irq_deassert = (uint32_t *)(void *)(mgp->sram + cmd.data0);
1606 1606 }
1607 1607
1608 1608 status |= myri10ge_send_cmd(mgp,
1609 1609 MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd);
1610 1610 mgp->intr_coal_delay_ptr = (uint32_t *)(void *)(mgp->sram + cmd.data0);
1611 1611
1612 1612 if (status != 0) {
1613 1613 cmn_err(CE_WARN, "%s: failed set interrupt parameters\n",
1614 1614 mgp->name);
1615 1615 return (status);
1616 1616 }
1617 1617
1618 1618 *mgp->intr_coal_delay_ptr = htonl(mgp->intr_coal_delay);
1619 1619 (void) myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
1620 1620
1621 1621 /* reset mcp/driver shared state back to 0 */
1622 1622
1623 1623 for (i = 0; i < mgp->num_slices; i++) {
1624 1624 ss = &mgp->ss[i];
1625 1625 bytes = mgp->max_intr_slots *
1626 1626 sizeof (*mgp->ss[0].rx_done.entry);
1627 1627 (void) memset(ss->rx_done.entry, 0, bytes);
1628 1628 ss->tx.req = 0;
1629 1629 ss->tx.done = 0;
1630 1630 ss->tx.pkt_done = 0;
1631 1631 ss->rx_big.cnt = 0;
1632 1632 ss->rx_small.cnt = 0;
1633 1633 ss->rx_done.idx = 0;
1634 1634 ss->rx_done.cnt = 0;
1635 1635 ss->rx_token = 0;
1636 1636 ss->tx.watchdog_done = 0;
1637 1637 ss->tx.watchdog_req = 0;
1638 1638 ss->tx.active = 0;
1639 1639 ss->tx.activate = 0;
1640 1640 }
1641 1641 mgp->watchdog_rx_pause = 0;
1642 1642 if (mgp->ksp_stat != NULL) {
1643 1643 ethstat = (struct myri10ge_nic_stat *)mgp->ksp_stat->ks_data;
1644 1644 ethstat->link_changes.value.ul = 0;
1645 1645 }
1646 1646 status = myri10ge_m_unicst(mgp, mgp->mac_addr);
1647 1647 myri10ge_change_promisc(mgp, 0);
1648 1648 (void) myri10ge_change_pause(mgp, mgp->pause);
1649 1649 return (status);
1650 1650 }
1651 1651
1652 1652 static int
1653 1653 myri10ge_init_toeplitz(struct myri10ge_priv *mgp)
1654 1654 {
1655 1655 myri10ge_cmd_t cmd;
1656 1656 int i, b, s, t, j;
1657 1657 int status;
1658 1658 uint32_t k[8];
1659 1659 uint32_t tmp;
1660 1660 uint8_t *key;
1661 1661
1662 1662 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RSS_KEY_OFFSET,
1663 1663 &cmd);
1664 1664 if (status != 0) {
1665 1665 cmn_err(CE_WARN, "%s: failed to get rss key\n",
1666 1666 mgp->name);
1667 1667 return (EIO);
1668 1668 }
1669 1669 myri10ge_pio_copy32(mgp->rss_key,
1670 1670 (uint32_t *)(void*)((char *)mgp->sram + cmd.data0),
1671 1671 sizeof (mgp->rss_key));
1672 1672
1673 1673 mgp->toeplitz_hash_table = kmem_alloc(sizeof (uint32_t) * 12 * 256,
1674 1674 KM_SLEEP);
1675 1675 key = (uint8_t *)mgp->rss_key;
1676 1676 t = 0;
1677 1677 for (b = 0; b < 12; b++) {
1678 1678 for (s = 0; s < 8; s++) {
1679 1679 /* Bits: b*8+s, ..., b*8+s+31 */
1680 1680 k[s] = 0;
1681 1681 for (j = 0; j < 32; j++) {
1682 1682 int bit = b*8+s+j;
1683 1683 bit = 0x1 & (key[bit / 8] >> (7 -(bit & 0x7)));
1684 1684 k[s] |= bit << (31 - j);
1685 1685 }
1686 1686 }
1687 1687
1688 1688 for (i = 0; i <= 0xff; i++) {
1689 1689 tmp = 0;
1690 1690 if (i & (1 << 7)) { tmp ^= k[0]; }
1691 1691 if (i & (1 << 6)) { tmp ^= k[1]; }
1692 1692 if (i & (1 << 5)) { tmp ^= k[2]; }
1693 1693 if (i & (1 << 4)) { tmp ^= k[3]; }
1694 1694 if (i & (1 << 3)) { tmp ^= k[4]; }
1695 1695 if (i & (1 << 2)) { tmp ^= k[5]; }
1696 1696 if (i & (1 << 1)) { tmp ^= k[6]; }
1697 1697 if (i & (1 << 0)) { tmp ^= k[7]; }
1698 1698 mgp->toeplitz_hash_table[t++] = tmp;
1699 1699 }
1700 1700 }
1701 1701 return (0);
1702 1702 }
1703 1703
1704 1704 static inline struct myri10ge_slice_state *
1705 1705 myri10ge_toeplitz_send_hash(struct myri10ge_priv *mgp, struct ip *ip)
1706 1706 {
1707 1707 struct tcphdr *hdr;
1708 1708 uint32_t saddr, daddr;
1709 1709 uint32_t hash, slice;
1710 1710 uint32_t *table = mgp->toeplitz_hash_table;
1711 1711 uint16_t src, dst;
1712 1712
1713 1713 /*
1714 1714 * Note hashing order is reversed from how it is done
1715 1715 * in the NIC, so as to generate the same hash value
1716 1716 * for the connection to try to keep connections CPU local
1717 1717 */
1718 1718
1719 1719 /* hash on IPv4 src/dst address */
1720 1720 saddr = ntohl(ip->ip_src.s_addr);
1721 1721 daddr = ntohl(ip->ip_dst.s_addr);
1722 1722 hash = table[(256 * 0) + ((daddr >> 24) & 0xff)];
1723 1723 hash ^= table[(256 * 1) + ((daddr >> 16) & 0xff)];
1724 1724 hash ^= table[(256 * 2) + ((daddr >> 8) & 0xff)];
1725 1725 hash ^= table[(256 * 3) + ((daddr) & 0xff)];
1726 1726 hash ^= table[(256 * 4) + ((saddr >> 24) & 0xff)];
1727 1727 hash ^= table[(256 * 5) + ((saddr >> 16) & 0xff)];
1728 1728 hash ^= table[(256 * 6) + ((saddr >> 8) & 0xff)];
1729 1729 hash ^= table[(256 * 7) + ((saddr) & 0xff)];
1730 1730 /* hash on TCP port, if required */
1731 1731 if ((myri10ge_rss_hash & MXGEFW_RSS_HASH_TYPE_TCP_IPV4) &&
1732 1732 ip->ip_p == IPPROTO_TCP) {
1733 1733 hdr = (struct tcphdr *)(void *)
1734 1734 (((uint8_t *)ip) + (ip->ip_hl << 2));
1735 1735 src = ntohs(hdr->th_sport);
1736 1736 dst = ntohs(hdr->th_dport);
1737 1737
1738 1738 hash ^= table[(256 * 8) + ((dst >> 8) & 0xff)];
1739 1739 hash ^= table[(256 * 9) + ((dst) & 0xff)];
1740 1740 hash ^= table[(256 * 10) + ((src >> 8) & 0xff)];
1741 1741 hash ^= table[(256 * 11) + ((src) & 0xff)];
1742 1742 }
1743 1743 slice = (mgp->num_slices - 1) & hash;
1744 1744 return (&mgp->ss[slice]);
1745 1745
1746 1746 }
1747 1747
1748 1748 static inline struct myri10ge_slice_state *
1749 1749 myri10ge_simple_send_hash(struct myri10ge_priv *mgp, struct ip *ip)
1750 1750 {
1751 1751 struct tcphdr *hdr;
1752 1752 uint32_t slice, hash_val;
1753 1753
1754 1754
1755 1755 if (ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP) {
1756 1756 return (&mgp->ss[0]);
1757 1757 }
1758 1758 hdr = (struct tcphdr *)(void *)(((uint8_t *)ip) + (ip->ip_hl << 2));
1759 1759
1760 1760 /*
1761 1761 * Use the second byte of the *destination* address for
1762 1762 * MXGEFW_RSS_HASH_TYPE_SRC_PORT, so as to match NIC's hashing
1763 1763 */
1764 1764 hash_val = ntohs(hdr->th_dport) & 0xff;
1765 1765 if (myri10ge_rss_hash == MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT)
1766 1766 hash_val += ntohs(hdr->th_sport) & 0xff;
1767 1767
1768 1768 slice = (mgp->num_slices - 1) & hash_val;
1769 1769 return (&mgp->ss[slice]);
1770 1770 }
1771 1771
1772 1772 static inline struct myri10ge_slice_state *
1773 1773 myri10ge_send_hash(struct myri10ge_priv *mgp, mblk_t *mp)
1774 1774 {
1775 1775 unsigned int slice = 0;
1776 1776 struct ether_header *eh;
1777 1777 struct ether_vlan_header *vh;
1778 1778 struct ip *ip;
1779 1779 int ehl, ihl;
1780 1780
1781 1781 if (mgp->num_slices == 1)
1782 1782 return (&mgp->ss[0]);
1783 1783
1784 1784 if (myri10ge_tx_hash == 0) {
1785 1785 slice = CPU->cpu_id & (mgp->num_slices - 1);
1786 1786 return (&mgp->ss[slice]);
1787 1787 }
1788 1788
1789 1789 /*
1790 1790 * ensure it is a TCP or UDP over IPv4 packet, and that the
1791 1791 * headers are in the 1st mblk. Otherwise, punt
1792 1792 */
1793 1793 ehl = sizeof (*eh);
1794 1794 ihl = sizeof (*ip);
1795 1795 if ((MBLKL(mp)) < (ehl + ihl + 8))
1796 1796 return (&mgp->ss[0]);
1797 1797 eh = (struct ether_header *)(void *)mp->b_rptr;
1798 1798 ip = (struct ip *)(void *)(eh + 1);
1799 1799 if (eh->ether_type != BE_16(ETHERTYPE_IP)) {
1800 1800 if (eh->ether_type != BE_16(ETHERTYPE_VLAN))
1801 1801 return (&mgp->ss[0]);
1802 1802 vh = (struct ether_vlan_header *)(void *)mp->b_rptr;
1803 1803 if (vh->ether_type != BE_16(ETHERTYPE_IP))
1804 1804 return (&mgp->ss[0]);
1805 1805 ehl += 4;
1806 1806 ip = (struct ip *)(void *)(vh + 1);
1807 1807 }
1808 1808 ihl = ip->ip_hl << 2;
1809 1809 if (MBLKL(mp) < (ehl + ihl + 8))
1810 1810 return (&mgp->ss[0]);
1811 1811 switch (myri10ge_rss_hash) {
1812 1812 case MXGEFW_RSS_HASH_TYPE_IPV4:
1813 1813 /* fallthru */
1814 1814 case MXGEFW_RSS_HASH_TYPE_TCP_IPV4:
1815 1815 /* fallthru */
1816 1816 case (MXGEFW_RSS_HASH_TYPE_IPV4|MXGEFW_RSS_HASH_TYPE_TCP_IPV4):
1817 1817 return (myri10ge_toeplitz_send_hash(mgp, ip));
1818 1818 case MXGEFW_RSS_HASH_TYPE_SRC_PORT:
1819 1819 /* fallthru */
1820 1820 case MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT:
1821 1821 return (myri10ge_simple_send_hash(mgp, ip));
1822 1822 default:
1823 1823 break;
1824 1824 }
1825 1825 return (&mgp->ss[0]);
1826 1826 }
1827 1827
1828 1828 static int
1829 1829 myri10ge_setup_slice(struct myri10ge_slice_state *ss)
1830 1830 {
1831 1831 struct myri10ge_priv *mgp = ss->mgp;
1832 1832 myri10ge_cmd_t cmd;
1833 1833 int tx_ring_size, rx_ring_size;
1834 1834 int tx_ring_entries, rx_ring_entries;
1835 1835 int slice, status;
1836 1836 int allocated, idx;
1837 1837 size_t bytes;
1838 1838
1839 1839 slice = ss - mgp->ss;
1840 1840 cmd.data0 = slice;
1841 1841 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd);
1842 1842 tx_ring_size = cmd.data0;
1843 1843 cmd.data0 = slice;
1844 1844 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd);
1845 1845 if (status != 0)
1846 1846 return (status);
1847 1847 rx_ring_size = cmd.data0;
1848 1848
1849 1849 tx_ring_entries = tx_ring_size / sizeof (struct mcp_kreq_ether_send);
1850 1850 rx_ring_entries = rx_ring_size / sizeof (struct mcp_dma_addr);
1851 1851 ss->tx.mask = tx_ring_entries - 1;
1852 1852 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1853 1853
1854 1854 /* get the lanai pointers to the send and receive rings */
1855 1855
1856 1856 cmd.data0 = slice;
1857 1857 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd);
1858 1858 ss->tx.lanai = (mcp_kreq_ether_send_t *)(void *)(mgp->sram + cmd.data0);
1859 1859 if (mgp->num_slices > 1) {
1860 1860 ss->tx.go = (char *)mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice;
1861 1861 ss->tx.stop = (char *)mgp->sram + MXGEFW_ETH_SEND_STOP +
1862 1862 64 * slice;
1863 1863 } else {
1864 1864 ss->tx.go = NULL;
1865 1865 ss->tx.stop = NULL;
1866 1866 }
1867 1867
1868 1868 cmd.data0 = slice;
1869 1869 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd);
1870 1870 ss->rx_small.lanai = (mcp_kreq_ether_recv_t *)
1871 1871 (void *)(mgp->sram + cmd.data0);
1872 1872
1873 1873 cmd.data0 = slice;
1874 1874 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd);
1875 1875 ss->rx_big.lanai = (mcp_kreq_ether_recv_t *)(void *)
1876 1876 (mgp->sram + cmd.data0);
1877 1877
1878 1878 if (status != 0) {
1879 1879 cmn_err(CE_WARN,
1880 1880 "%s: failed to get ring sizes or locations\n", mgp->name);
1881 1881 return (status);
1882 1882 }
1883 1883
1884 1884 status = ENOMEM;
1885 1885 bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow);
1886 1886 ss->rx_small.shadow = kmem_zalloc(bytes, KM_SLEEP);
1887 1887 if (ss->rx_small.shadow == NULL)
1888 1888 goto abort;
1889 1889 (void) memset(ss->rx_small.shadow, 0, bytes);
1890 1890
1891 1891 bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow);
1892 1892 ss->rx_big.shadow = kmem_zalloc(bytes, KM_SLEEP);
1893 1893 if (ss->rx_big.shadow == NULL)
1894 1894 goto abort_with_rx_small_shadow;
1895 1895 (void) memset(ss->rx_big.shadow, 0, bytes);
1896 1896
1897 1897 /* allocate the host info rings */
1898 1898
1899 1899 bytes = tx_ring_entries * sizeof (*ss->tx.info);
1900 1900 ss->tx.info = kmem_zalloc(bytes, KM_SLEEP);
1901 1901 if (ss->tx.info == NULL)
1902 1902 goto abort_with_rx_big_shadow;
1903 1903 (void) memset(ss->tx.info, 0, bytes);
1904 1904
1905 1905 bytes = rx_ring_entries * sizeof (*ss->rx_small.info);
1906 1906 ss->rx_small.info = kmem_zalloc(bytes, KM_SLEEP);
1907 1907 if (ss->rx_small.info == NULL)
1908 1908 goto abort_with_tx_info;
1909 1909 (void) memset(ss->rx_small.info, 0, bytes);
1910 1910
1911 1911 bytes = rx_ring_entries * sizeof (*ss->rx_big.info);
1912 1912 ss->rx_big.info = kmem_zalloc(bytes, KM_SLEEP);
1913 1913 if (ss->rx_big.info == NULL)
1914 1914 goto abort_with_rx_small_info;
1915 1915 (void) memset(ss->rx_big.info, 0, bytes);
1916 1916
1917 1917 ss->tx.stall = ss->tx.sched = 0;
1918 1918 ss->tx.stall_early = ss->tx.stall_late = 0;
1919 1919
1920 1920 ss->jbufs_for_smalls = 1 + (1 + ss->rx_small.mask) /
1921 1921 (myri10ge_mtu / (myri10ge_small_bytes + MXGEFW_PAD));
1922 1922
1923 1923 allocated = myri10ge_add_jbufs(ss,
1924 1924 myri10ge_bigbufs_initial + ss->jbufs_for_smalls, 1);
1925 1925 if (allocated < ss->jbufs_for_smalls + myri10ge_bigbufs_initial) {
1926 1926 cmn_err(CE_WARN,
1927 1927 "%s: Could not allocate enough receive buffers (%d/%d)\n",
1928 1928 mgp->name, allocated,
1929 1929 myri10ge_bigbufs_initial + ss->jbufs_for_smalls);
1930 1930 goto abort_with_jumbos;
1931 1931 }
1932 1932
1933 1933 myri10ge_carve_up_jbufs_into_small_ring(ss);
1934 1934 ss->j_rx_cnt = 0;
1935 1935
1936 1936 mutex_enter(&ss->jpool.mtx);
1937 1937 if (allocated < rx_ring_entries)
1938 1938 ss->jpool.low_water = allocated / 4;
1939 1939 else
1940 1940 ss->jpool.low_water = rx_ring_entries / 2;
1941 1941
1942 1942 /*
1943 1943 * invalidate the big receive ring in case we do not
1944 1944 * allocate sufficient jumbos to fill it
1945 1945 */
1946 1946 (void) memset(ss->rx_big.shadow, 1,
1947 1947 (ss->rx_big.mask + 1) * sizeof (ss->rx_big.shadow[0]));
1948 1948 for (idx = 7; idx <= ss->rx_big.mask; idx += 8) {
1949 1949 myri10ge_submit_8rx(&ss->rx_big.lanai[idx - 7],
1950 1950 &ss->rx_big.shadow[idx - 7]);
1951 1951 mb();
1952 1952 }
1953 1953
1954 1954
1955 1955 myri10ge_restock_jumbos(ss);
1956 1956
1957 1957 for (idx = 7; idx <= ss->rx_small.mask; idx += 8) {
1958 1958 myri10ge_submit_8rx(&ss->rx_small.lanai[idx - 7],
1959 1959 &ss->rx_small.shadow[idx - 7]);
1960 1960 mb();
1961 1961 }
1962 1962 ss->rx_small.cnt = ss->rx_small.mask + 1;
1963 1963
1964 1964 mutex_exit(&ss->jpool.mtx);
1965 1965
1966 1966 status = myri10ge_prepare_tx_ring(ss);
1967 1967
1968 1968 if (status != 0)
1969 1969 goto abort_with_small_jbufs;
1970 1970
1971 1971 cmd.data0 = ntohl(ss->fw_stats_dma.low);
1972 1972 cmd.data1 = ntohl(ss->fw_stats_dma.high);
1973 1973 cmd.data2 = sizeof (mcp_irq_data_t);
1974 1974 cmd.data2 |= (slice << 16);
1975 1975 bzero(ss->fw_stats, sizeof (*ss->fw_stats));
1976 1976 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd);
1977 1977 if (status == ENOSYS) {
1978 1978 cmd.data0 = ntohl(ss->fw_stats_dma.low) +
1979 1979 offsetof(mcp_irq_data_t, send_done_count);
1980 1980 cmd.data1 = ntohl(ss->fw_stats_dma.high);
1981 1981 status = myri10ge_send_cmd(mgp,
1982 1982 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, &cmd);
1983 1983 }
1984 1984 if (status) {
1985 1985 cmn_err(CE_WARN, "%s: Couldn't set stats DMA\n", mgp->name);
1986 1986 goto abort_with_tx;
1987 1987 }
1988 1988
1989 1989 return (0);
1990 1990
1991 1991 abort_with_tx:
1992 1992 myri10ge_unprepare_tx_ring(ss);
1993 1993
1994 1994 abort_with_small_jbufs:
1995 1995 myri10ge_release_small_jbufs(ss);
1996 1996
1997 1997 abort_with_jumbos:
1998 1998 if (allocated != 0) {
1999 1999 mutex_enter(&ss->jpool.mtx);
2000 2000 ss->jpool.low_water = 0;
2001 2001 mutex_exit(&ss->jpool.mtx);
2002 2002 myri10ge_unstock_jumbos(ss);
2003 2003 myri10ge_remove_jbufs(ss);
2004 2004 }
2005 2005
2006 2006 bytes = rx_ring_entries * sizeof (*ss->rx_big.info);
2007 2007 kmem_free(ss->rx_big.info, bytes);
2008 2008
2009 2009 abort_with_rx_small_info:
2010 2010 bytes = rx_ring_entries * sizeof (*ss->rx_small.info);
2011 2011 kmem_free(ss->rx_small.info, bytes);
2012 2012
2013 2013 abort_with_tx_info:
2014 2014 bytes = tx_ring_entries * sizeof (*ss->tx.info);
2015 2015 kmem_free(ss->tx.info, bytes);
2016 2016
2017 2017 abort_with_rx_big_shadow:
2018 2018 bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow);
2019 2019 kmem_free(ss->rx_big.shadow, bytes);
2020 2020
2021 2021 abort_with_rx_small_shadow:
2022 2022 bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow);
2023 2023 kmem_free(ss->rx_small.shadow, bytes);
2024 2024 abort:
2025 2025 return (status);
2026 2026
2027 2027 }
2028 2028
2029 2029 static void
2030 2030 myri10ge_teardown_slice(struct myri10ge_slice_state *ss)
2031 2031 {
2032 2032 int tx_ring_entries, rx_ring_entries;
2033 2033 size_t bytes;
2034 2034
2035 2035 /* ignore slices that have not been fully setup */
2036 2036 if (ss->tx.cp == NULL)
2037 2037 return;
2038 2038 /* Free the TX copy buffers */
2039 2039 myri10ge_unprepare_tx_ring(ss);
2040 2040
2041 2041 /* stop passing returned buffers to firmware */
2042 2042
2043 2043 mutex_enter(&ss->jpool.mtx);
2044 2044 ss->jpool.low_water = 0;
2045 2045 mutex_exit(&ss->jpool.mtx);
2046 2046 myri10ge_release_small_jbufs(ss);
2047 2047
2048 2048 /* Release the free jumbo frame pool */
2049 2049 myri10ge_unstock_jumbos(ss);
2050 2050 myri10ge_remove_jbufs(ss);
2051 2051
2052 2052 rx_ring_entries = ss->rx_big.mask + 1;
2053 2053 tx_ring_entries = ss->tx.mask + 1;
2054 2054
2055 2055 bytes = rx_ring_entries * sizeof (*ss->rx_big.info);
2056 2056 kmem_free(ss->rx_big.info, bytes);
2057 2057
2058 2058 bytes = rx_ring_entries * sizeof (*ss->rx_small.info);
2059 2059 kmem_free(ss->rx_small.info, bytes);
2060 2060
2061 2061 bytes = tx_ring_entries * sizeof (*ss->tx.info);
2062 2062 kmem_free(ss->tx.info, bytes);
2063 2063
2064 2064 bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow);
2065 2065 kmem_free(ss->rx_big.shadow, bytes);
2066 2066
2067 2067 bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow);
2068 2068 kmem_free(ss->rx_small.shadow, bytes);
2069 2069
2070 2070 }
2071 2071 static int
2072 2072 myri10ge_start_locked(struct myri10ge_priv *mgp)
2073 2073 {
2074 2074 myri10ge_cmd_t cmd;
2075 2075 int status, big_pow2, i;
2076 2076 volatile uint8_t *itable;
2077 2077
2078 2078 status = DDI_SUCCESS;
2079 2079 /* Allocate DMA resources and receive buffers */
2080 2080
2081 2081 status = myri10ge_reset(mgp);
2082 2082 if (status != 0) {
2083 2083 cmn_err(CE_WARN, "%s: failed reset\n", mgp->name);
2084 2084 return (DDI_FAILURE);
2085 2085 }
2086 2086
2087 2087 if (mgp->num_slices > 1) {
2088 2088 cmd.data0 = mgp->num_slices;
2089 2089 cmd.data1 = 1; /* use MSI-X */
2090 2090 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2091 2091 &cmd);
2092 2092 if (status != 0) {
2093 2093 cmn_err(CE_WARN,
2094 2094 "%s: failed to set number of slices\n",
2095 2095 mgp->name);
2096 2096 goto abort_with_nothing;
2097 2097 }
2098 2098 /* setup the indirection table */
2099 2099 cmd.data0 = mgp->num_slices;
2100 2100 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2101 2101 &cmd);
2102 2102
2103 2103 status |= myri10ge_send_cmd(mgp,
2104 2104 MXGEFW_CMD_GET_RSS_TABLE_OFFSET, &cmd);
2105 2105 if (status != 0) {
2106 2106 cmn_err(CE_WARN,
2107 2107 "%s: failed to setup rss tables\n", mgp->name);
2108 2108 }
2109 2109
2110 2110 /* just enable an identity mapping */
2111 2111 itable = mgp->sram + cmd.data0;
2112 2112 for (i = 0; i < mgp->num_slices; i++)
2113 2113 itable[i] = (uint8_t)i;
2114 2114
2115 2115 if (myri10ge_rss_hash & MYRI10GE_TOEPLITZ_HASH) {
2116 2116 status = myri10ge_init_toeplitz(mgp);
2117 2117 if (status != 0) {
2118 2118 cmn_err(CE_WARN, "%s: failed to setup "
2119 2119 "toeplitz tx hash table", mgp->name);
2120 2120 goto abort_with_nothing;
2121 2121 }
2122 2122 }
2123 2123 cmd.data0 = 1;
2124 2124 cmd.data1 = myri10ge_rss_hash;
2125 2125 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2126 2126 &cmd);
2127 2127 if (status != 0) {
2128 2128 cmn_err(CE_WARN,
2129 2129 "%s: failed to enable slices\n", mgp->name);
2130 2130 goto abort_with_toeplitz;
2131 2131 }
2132 2132 }
2133 2133
2134 2134 for (i = 0; i < mgp->num_slices; i++) {
2135 2135 status = myri10ge_setup_slice(&mgp->ss[i]);
2136 2136 if (status != 0)
2137 2137 goto abort_with_slices;
2138 2138 }
2139 2139
2140 2140 /*
2141 2141 * Tell the MCP how many buffers he has, and to
2142 2142 * bring the ethernet interface up
2143 2143 *
2144 2144 * Firmware needs the big buff size as a power of 2. Lie and
2145 2145 * tell him the buffer is larger, because we only use 1
2146 2146 * buffer/pkt, and the mtu will prevent overruns
2147 2147 */
2148 2148 big_pow2 = myri10ge_mtu + MXGEFW_PAD;
2149 2149 while (!ISP2(big_pow2))
2150 2150 big_pow2++;
2151 2151
2152 2152 /* now give firmware buffers sizes, and MTU */
2153 2153 cmd.data0 = myri10ge_mtu;
2154 2154 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd);
2155 2155 cmd.data0 = myri10ge_small_bytes;
2156 2156 status |=
2157 2157 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd);
2158 2158 cmd.data0 = big_pow2;
2159 2159 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd);
2160 2160 if (status) {
2161 2161 cmn_err(CE_WARN, "%s: Couldn't set buffer sizes\n", mgp->name);
2162 2162 goto abort_with_slices;
2163 2163 }
2164 2164
2165 2165
2166 2166 cmd.data0 = 1;
2167 2167 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd);
2168 2168 if (status) {
2169 2169 cmn_err(CE_WARN, "%s: unable to setup TSO (%d)\n",
2170 2170 mgp->name, status);
2171 2171 } else {
2172 2172 mgp->features |= MYRI10GE_TSO;
2173 2173 }
2174 2174
2175 2175 mgp->link_state = -1;
2176 2176 mgp->rdma_tags_available = 15;
2177 2177 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd);
2178 2178 if (status) {
2179 2179 cmn_err(CE_WARN, "%s: unable to start ethernet\n", mgp->name);
2180 2180 goto abort_with_slices;
2181 2181 }
2182 2182 mgp->running = MYRI10GE_ETH_RUNNING;
2183 2183 return (DDI_SUCCESS);
2184 2184
2185 2185 abort_with_slices:
2186 2186 for (i = 0; i < mgp->num_slices; i++)
2187 2187 myri10ge_teardown_slice(&mgp->ss[i]);
2188 2188
2189 2189 mgp->running = MYRI10GE_ETH_STOPPED;
2190 2190
2191 2191 abort_with_toeplitz:
2192 2192 if (mgp->toeplitz_hash_table != NULL) {
2193 2193 kmem_free(mgp->toeplitz_hash_table,
2194 2194 sizeof (uint32_t) * 12 * 256);
2195 2195 mgp->toeplitz_hash_table = NULL;
2196 2196 }
2197 2197
2198 2198 abort_with_nothing:
2199 2199 return (DDI_FAILURE);
2200 2200 }
2201 2201
2202 2202 static void
2203 2203 myri10ge_stop_locked(struct myri10ge_priv *mgp)
2204 2204 {
2205 2205 int status, old_down_cnt;
2206 2206 myri10ge_cmd_t cmd;
2207 2207 int wait_time = 10;
↓ open down ↓ |
2207 lines elided |
↑ open up ↑ |
2208 2208 int i, polling;
2209 2209
2210 2210 old_down_cnt = mgp->down_cnt;
2211 2211 mb();
2212 2212 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd);
2213 2213 if (status) {
2214 2214 cmn_err(CE_WARN, "%s: Couldn't bring down link\n", mgp->name);
2215 2215 }
2216 2216
2217 2217 while (old_down_cnt == *((volatile int *)&mgp->down_cnt)) {
2218 - delay(1 * drv_usectohz(1000000));
2218 + delay(drv_sectohz(1));
2219 2219 wait_time--;
2220 2220 if (wait_time == 0)
2221 2221 break;
2222 2222 }
2223 2223 again:
2224 2224 if (old_down_cnt == *((volatile int *)&mgp->down_cnt)) {
2225 2225 cmn_err(CE_WARN, "%s: didn't get down irq\n", mgp->name);
2226 2226 for (i = 0; i < mgp->num_slices; i++) {
2227 2227 /*
2228 2228 * take and release the rx lock to ensure
2229 2229 * that no interrupt thread is blocked
2230 2230 * elsewhere in the stack, preventing
2231 2231 * completion
2232 2232 */
2233 2233
2234 2234 mutex_enter(&mgp->ss[i].rx_lock);
2235 2235 printf("%s: slice %d rx irq idle\n",
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2236 2236 mgp->name, i);
2237 2237 mutex_exit(&mgp->ss[i].rx_lock);
2238 2238
2239 2239 /* verify that the poll handler is inactive */
2240 2240 mutex_enter(&mgp->ss->poll_lock);
2241 2241 polling = mgp->ss->rx_polling;
2242 2242 mutex_exit(&mgp->ss->poll_lock);
2243 2243 if (polling) {
2244 2244 printf("%s: slice %d is polling\n",
2245 2245 mgp->name, i);
2246 - delay(1 * drv_usectohz(1000000));
2246 + delay(drv_sectohz(1));
2247 2247 goto again;
2248 2248 }
2249 2249 }
2250 - delay(1 * drv_usectohz(1000000));
2250 + delay(drv_sectohz(1));
2251 2251 if (old_down_cnt == *((volatile int *)&mgp->down_cnt)) {
2252 2252 cmn_err(CE_WARN, "%s: Never got down irq\n", mgp->name);
2253 2253 }
2254 2254 }
2255 2255
2256 2256 for (i = 0; i < mgp->num_slices; i++)
2257 2257 myri10ge_teardown_slice(&mgp->ss[i]);
2258 2258
2259 2259 if (mgp->toeplitz_hash_table != NULL) {
2260 2260 kmem_free(mgp->toeplitz_hash_table,
2261 2261 sizeof (uint32_t) * 12 * 256);
2262 2262 mgp->toeplitz_hash_table = NULL;
2263 2263 }
2264 2264 mgp->running = MYRI10GE_ETH_STOPPED;
2265 2265 }
2266 2266
2267 2267 static int
2268 2268 myri10ge_m_start(void *arg)
2269 2269 {
2270 2270 struct myri10ge_priv *mgp = arg;
2271 2271 int status;
2272 2272
2273 2273 mutex_enter(&mgp->intrlock);
2274 2274
2275 2275 if (mgp->running != MYRI10GE_ETH_STOPPED) {
2276 2276 mutex_exit(&mgp->intrlock);
2277 2277 return (DDI_FAILURE);
2278 2278 }
2279 2279 status = myri10ge_start_locked(mgp);
2280 2280 mutex_exit(&mgp->intrlock);
2281 2281
2282 2282 if (status != DDI_SUCCESS)
2283 2283 return (status);
2284 2284
2285 2285 /* start the watchdog timer */
2286 2286 mgp->timer_id = timeout(myri10ge_watchdog, mgp,
2287 2287 mgp->timer_ticks);
2288 2288 return (DDI_SUCCESS);
2289 2289
2290 2290 }
2291 2291
2292 2292 static void
2293 2293 myri10ge_m_stop(void *arg)
2294 2294 {
2295 2295 struct myri10ge_priv *mgp = arg;
2296 2296
2297 2297 mutex_enter(&mgp->intrlock);
2298 2298 /* if the device not running give up */
2299 2299 if (mgp->running != MYRI10GE_ETH_RUNNING) {
2300 2300 mutex_exit(&mgp->intrlock);
2301 2301 return;
2302 2302 }
2303 2303
2304 2304 mgp->running = MYRI10GE_ETH_STOPPING;
2305 2305 mutex_exit(&mgp->intrlock);
2306 2306 (void) untimeout(mgp->timer_id);
2307 2307 mutex_enter(&mgp->intrlock);
2308 2308 myri10ge_stop_locked(mgp);
2309 2309 mutex_exit(&mgp->intrlock);
2310 2310
2311 2311 }
2312 2312
2313 2313 static inline void
2314 2314 myri10ge_rx_csum(mblk_t *mp, struct myri10ge_rx_ring_stats *s, uint32_t csum)
2315 2315 {
2316 2316 struct ether_header *eh;
2317 2317 struct ip *ip;
2318 2318 struct ip6_hdr *ip6;
2319 2319 uint32_t start, stuff, end, partial, hdrlen;
2320 2320
2321 2321
2322 2322 csum = ntohs((uint16_t)csum);
2323 2323 eh = (struct ether_header *)(void *)mp->b_rptr;
2324 2324 hdrlen = sizeof (*eh);
2325 2325 if (eh->ether_dhost.ether_addr_octet[0] & 1) {
2326 2326 if (0 == (bcmp(eh->ether_dhost.ether_addr_octet,
2327 2327 myri10ge_broadcastaddr, sizeof (eh->ether_dhost))))
2328 2328 s->brdcstrcv++;
2329 2329 else
2330 2330 s->multircv++;
2331 2331 }
2332 2332
2333 2333 if (eh->ether_type == BE_16(ETHERTYPE_VLAN)) {
2334 2334 /*
2335 2335 * fix checksum by subtracting 4 bytes after what the
2336 2336 * firmware thought was the end of the ether hdr
2337 2337 */
2338 2338 partial = *(uint32_t *)
2339 2339 (void *)(mp->b_rptr + ETHERNET_HEADER_SIZE);
2340 2340 csum += ~partial;
2341 2341 csum += (csum < ~partial);
2342 2342 csum = (csum >> 16) + (csum & 0xFFFF);
2343 2343 csum = (csum >> 16) + (csum & 0xFFFF);
2344 2344 hdrlen += VLAN_TAGSZ;
2345 2345 }
2346 2346
2347 2347 if (eh->ether_type == BE_16(ETHERTYPE_IP)) {
2348 2348 ip = (struct ip *)(void *)(mp->b_rptr + hdrlen);
2349 2349 start = ip->ip_hl << 2;
2350 2350
2351 2351 if (ip->ip_p == IPPROTO_TCP)
2352 2352 stuff = start + offsetof(struct tcphdr, th_sum);
2353 2353 else if (ip->ip_p == IPPROTO_UDP)
2354 2354 stuff = start + offsetof(struct udphdr, uh_sum);
2355 2355 else
2356 2356 return;
2357 2357 end = ntohs(ip->ip_len);
2358 2358 } else if (eh->ether_type == BE_16(ETHERTYPE_IPV6)) {
2359 2359 ip6 = (struct ip6_hdr *)(void *)(mp->b_rptr + hdrlen);
2360 2360 start = sizeof (*ip6);
2361 2361 if (ip6->ip6_nxt == IPPROTO_TCP) {
2362 2362 stuff = start + offsetof(struct tcphdr, th_sum);
2363 2363 } else if (ip6->ip6_nxt == IPPROTO_UDP)
2364 2364 stuff = start + offsetof(struct udphdr, uh_sum);
2365 2365 else
2366 2366 return;
2367 2367 end = start + ntohs(ip6->ip6_plen);
2368 2368 /*
2369 2369 * IPv6 headers do not contain a checksum, and hence
2370 2370 * do not checksum to zero, so they don't "fall out"
2371 2371 * of the partial checksum calculation like IPv4
2372 2372 * headers do. We need to fix the partial checksum by
2373 2373 * subtracting the checksum of the IPv6 header.
2374 2374 */
2375 2375
2376 2376 partial = myri10ge_csum_generic((uint16_t *)ip6, sizeof (*ip6));
2377 2377 csum += ~partial;
2378 2378 csum += (csum < ~partial);
2379 2379 csum = (csum >> 16) + (csum & 0xFFFF);
2380 2380 csum = (csum >> 16) + (csum & 0xFFFF);
2381 2381 } else {
2382 2382 return;
2383 2383 }
2384 2384
2385 2385 if (MBLKL(mp) > hdrlen + end) {
2386 2386 /* padded frame, so hw csum may be invalid */
2387 2387 return;
2388 2388 }
2389 2389
2390 2390 mac_hcksum_set(mp, start, stuff, end, csum, HCK_PARTIALCKSUM);
2391 2391 }
2392 2392
2393 2393 static mblk_t *
2394 2394 myri10ge_rx_done_small(struct myri10ge_slice_state *ss, uint32_t len,
2395 2395 uint32_t csum)
2396 2396 {
2397 2397 mblk_t *mp;
2398 2398 myri10ge_rx_ring_t *rx;
2399 2399 int idx;
2400 2400
2401 2401 rx = &ss->rx_small;
2402 2402 idx = rx->cnt & rx->mask;
2403 2403 ss->rx_small.cnt++;
2404 2404
2405 2405 /* allocate a new buffer to pass up the stack */
2406 2406 mp = allocb(len + MXGEFW_PAD, 0);
2407 2407 if (mp == NULL) {
2408 2408 MYRI10GE_ATOMIC_SLICE_STAT_INC(rx_small_nobuf);
2409 2409 goto abort;
2410 2410 }
2411 2411 bcopy(ss->rx_small.info[idx].ptr,
2412 2412 (caddr_t)mp->b_wptr, len + MXGEFW_PAD);
2413 2413 mp->b_wptr += len + MXGEFW_PAD;
2414 2414 mp->b_rptr += MXGEFW_PAD;
2415 2415
2416 2416 ss->rx_stats.ibytes += len;
2417 2417 ss->rx_stats.ipackets += 1;
2418 2418 myri10ge_rx_csum(mp, &ss->rx_stats, csum);
2419 2419
2420 2420 abort:
2421 2421 if ((idx & 7) == 7) {
2422 2422 myri10ge_submit_8rx(&rx->lanai[idx - 7],
2423 2423 &rx->shadow[idx - 7]);
2424 2424 }
2425 2425
2426 2426 return (mp);
2427 2427 }
2428 2428
2429 2429
2430 2430 static mblk_t *
2431 2431 myri10ge_rx_done_big(struct myri10ge_slice_state *ss, uint32_t len,
2432 2432 uint32_t csum)
2433 2433 {
2434 2434 struct myri10ge_jpool_stuff *jpool;
2435 2435 struct myri10ge_jpool_entry *j;
2436 2436 mblk_t *mp;
2437 2437 int idx, num_owned_by_mcp;
2438 2438
2439 2439 jpool = &ss->jpool;
2440 2440 idx = ss->j_rx_cnt & ss->rx_big.mask;
2441 2441 j = ss->rx_big.info[idx].j;
2442 2442
2443 2443 if (j == NULL) {
2444 2444 printf("%s: null j at idx=%d, rx_big.cnt = %d, j_rx_cnt=%d\n",
2445 2445 ss->mgp->name, idx, ss->rx_big.cnt, ss->j_rx_cnt);
2446 2446 return (NULL);
2447 2447 }
2448 2448
2449 2449
2450 2450 ss->rx_big.info[idx].j = NULL;
2451 2451 ss->j_rx_cnt++;
2452 2452
2453 2453
2454 2454 /*
2455 2455 * Check to see if we are low on rx buffers.
2456 2456 * Note that we must leave at least 8 free so there are
2457 2457 * enough to free in a single 64-byte write.
2458 2458 */
2459 2459 num_owned_by_mcp = ss->rx_big.cnt - ss->j_rx_cnt;
2460 2460 if (num_owned_by_mcp < jpool->low_water) {
2461 2461 mutex_enter(&jpool->mtx);
2462 2462 myri10ge_restock_jumbos(ss);
2463 2463 mutex_exit(&jpool->mtx);
2464 2464 num_owned_by_mcp = ss->rx_big.cnt - ss->j_rx_cnt;
2465 2465 /* if we are still low, then we have to copy */
2466 2466 if (num_owned_by_mcp < 16) {
2467 2467 MYRI10GE_ATOMIC_SLICE_STAT_INC(rx_copy);
2468 2468 /* allocate a new buffer to pass up the stack */
2469 2469 mp = allocb(len + MXGEFW_PAD, 0);
2470 2470 if (mp == NULL) {
2471 2471 goto abort;
2472 2472 }
2473 2473 bcopy(j->buf,
2474 2474 (caddr_t)mp->b_wptr, len + MXGEFW_PAD);
2475 2475 myri10ge_jfree_rtn(j);
2476 2476 /* push buffer back to NIC */
2477 2477 mutex_enter(&jpool->mtx);
2478 2478 myri10ge_restock_jumbos(ss);
2479 2479 mutex_exit(&jpool->mtx);
2480 2480 goto set_len;
2481 2481 }
2482 2482 }
2483 2483
2484 2484 /* loan our buffer to the stack */
2485 2485 mp = desballoc((unsigned char *)j->buf, myri10ge_mtu, 0, &j->free_func);
2486 2486 if (mp == NULL) {
2487 2487 goto abort;
2488 2488 }
2489 2489
2490 2490 set_len:
2491 2491 mp->b_rptr += MXGEFW_PAD;
2492 2492 mp->b_wptr = ((unsigned char *) mp->b_rptr + len);
2493 2493
2494 2494 ss->rx_stats.ibytes += len;
2495 2495 ss->rx_stats.ipackets += 1;
2496 2496 myri10ge_rx_csum(mp, &ss->rx_stats, csum);
2497 2497
2498 2498 return (mp);
2499 2499
2500 2500 abort:
2501 2501 myri10ge_jfree_rtn(j);
2502 2502 MYRI10GE_ATOMIC_SLICE_STAT_INC(rx_big_nobuf);
2503 2503 return (NULL);
2504 2504 }
2505 2505
2506 2506 /*
2507 2507 * Free all transmit buffers up until the specified index
2508 2508 */
2509 2509 static inline void
2510 2510 myri10ge_tx_done(struct myri10ge_slice_state *ss, uint32_t mcp_index)
2511 2511 {
2512 2512 myri10ge_tx_ring_t *tx;
2513 2513 struct myri10ge_tx_dma_handle_head handles;
2514 2514 int idx;
2515 2515 int limit = 0;
2516 2516
2517 2517 tx = &ss->tx;
2518 2518 handles.head = NULL;
2519 2519 handles.tail = NULL;
2520 2520 while (tx->pkt_done != (int)mcp_index) {
2521 2521 idx = tx->done & tx->mask;
2522 2522
2523 2523 /*
2524 2524 * mblk & DMA handle attached only to first slot
2525 2525 * per buffer in the packet
2526 2526 */
2527 2527
2528 2528 if (tx->info[idx].m) {
2529 2529 (void) ddi_dma_unbind_handle(tx->info[idx].handle->h);
2530 2530 tx->info[idx].handle->next = handles.head;
2531 2531 handles.head = tx->info[idx].handle;
2532 2532 if (handles.tail == NULL)
2533 2533 handles.tail = tx->info[idx].handle;
2534 2534 freeb(tx->info[idx].m);
2535 2535 tx->info[idx].m = 0;
2536 2536 tx->info[idx].handle = 0;
2537 2537 }
2538 2538 if (tx->info[idx].ostat.opackets != 0) {
2539 2539 tx->stats.multixmt += tx->info[idx].ostat.multixmt;
2540 2540 tx->stats.brdcstxmt += tx->info[idx].ostat.brdcstxmt;
2541 2541 tx->stats.obytes += tx->info[idx].ostat.obytes;
2542 2542 tx->stats.opackets += tx->info[idx].ostat.opackets;
2543 2543 tx->info[idx].stat.un.all = 0;
2544 2544 tx->pkt_done++;
2545 2545 }
2546 2546
2547 2547 tx->done++;
2548 2548 /*
2549 2549 * if we stalled the queue, wake it. But Wait until
2550 2550 * we have at least 1/2 our slots free.
2551 2551 */
2552 2552 if ((tx->req - tx->done) < (tx->mask >> 1) &&
2553 2553 tx->stall != tx->sched) {
2554 2554 mutex_enter(&ss->tx.lock);
2555 2555 tx->sched = tx->stall;
2556 2556 mutex_exit(&ss->tx.lock);
2557 2557 mac_tx_ring_update(ss->mgp->mh, tx->rh);
2558 2558 }
2559 2559
2560 2560 /* limit potential for livelock */
2561 2561 if (unlikely(++limit > 2 * tx->mask))
2562 2562 break;
2563 2563 }
2564 2564 if (tx->req == tx->done && tx->stop != NULL) {
2565 2565 /*
2566 2566 * Nic has sent all pending requests, allow him
2567 2567 * to stop polling this queue
2568 2568 */
2569 2569 mutex_enter(&tx->lock);
2570 2570 if (tx->req == tx->done && tx->active) {
2571 2571 *(int *)(void *)tx->stop = 1;
2572 2572 tx->active = 0;
2573 2573 mb();
2574 2574 }
2575 2575 mutex_exit(&tx->lock);
2576 2576 }
2577 2577 if (handles.head != NULL)
2578 2578 myri10ge_free_tx_handles(tx, &handles);
2579 2579 }
2580 2580
2581 2581 static void
2582 2582 myri10ge_mbl_init(struct myri10ge_mblk_list *mbl)
2583 2583 {
2584 2584 mbl->head = NULL;
2585 2585 mbl->tail = &mbl->head;
2586 2586 mbl->cnt = 0;
2587 2587 }
2588 2588
2589 2589 /*ARGSUSED*/
2590 2590 void
2591 2591 myri10ge_mbl_append(struct myri10ge_slice_state *ss,
2592 2592 struct myri10ge_mblk_list *mbl, mblk_t *mp)
2593 2593 {
2594 2594 *(mbl->tail) = mp;
2595 2595 mbl->tail = &mp->b_next;
2596 2596 mp->b_next = NULL;
2597 2597 mbl->cnt++;
2598 2598 }
2599 2599
2600 2600
2601 2601 static inline void
2602 2602 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss,
2603 2603 struct myri10ge_mblk_list *mbl, int limit, boolean_t *stop)
2604 2604 {
2605 2605 myri10ge_rx_done_t *rx_done = &ss->rx_done;
2606 2606 struct myri10ge_priv *mgp = ss->mgp;
2607 2607 mblk_t *mp;
2608 2608 struct lro_entry *lro;
2609 2609 uint16_t length;
2610 2610 uint16_t checksum;
2611 2611
2612 2612
2613 2613 while (rx_done->entry[rx_done->idx].length != 0) {
2614 2614 if (unlikely (*stop)) {
2615 2615 break;
2616 2616 }
2617 2617 length = ntohs(rx_done->entry[rx_done->idx].length);
2618 2618 length &= (~MXGEFW_RSS_HASH_MASK);
2619 2619
2620 2620 /* limit potential for livelock */
2621 2621 limit -= length;
2622 2622 if (unlikely(limit < 0))
2623 2623 break;
2624 2624
2625 2625 rx_done->entry[rx_done->idx].length = 0;
2626 2626 checksum = ntohs(rx_done->entry[rx_done->idx].checksum);
2627 2627 if (length <= myri10ge_small_bytes)
2628 2628 mp = myri10ge_rx_done_small(ss, length, checksum);
2629 2629 else
2630 2630 mp = myri10ge_rx_done_big(ss, length, checksum);
2631 2631 if (mp != NULL) {
2632 2632 if (!myri10ge_lro ||
2633 2633 0 != myri10ge_lro_rx(ss, mp, checksum, mbl))
2634 2634 myri10ge_mbl_append(ss, mbl, mp);
2635 2635 }
2636 2636 rx_done->cnt++;
2637 2637 rx_done->idx = rx_done->cnt & (mgp->max_intr_slots - 1);
2638 2638 }
2639 2639 while (ss->lro_active != NULL) {
2640 2640 lro = ss->lro_active;
2641 2641 ss->lro_active = lro->next;
2642 2642 myri10ge_lro_flush(ss, lro, mbl);
2643 2643 }
2644 2644 }
2645 2645
2646 2646 static void
2647 2647 myri10ge_intr_rx(struct myri10ge_slice_state *ss)
2648 2648 {
2649 2649 uint64_t gen;
2650 2650 struct myri10ge_mblk_list mbl;
2651 2651
2652 2652 myri10ge_mbl_init(&mbl);
2653 2653 if (mutex_tryenter(&ss->rx_lock) == 0)
2654 2654 return;
2655 2655 gen = ss->rx_gen_num;
2656 2656 myri10ge_clean_rx_done(ss, &mbl, MYRI10GE_POLL_NULL,
2657 2657 &ss->rx_polling);
2658 2658 if (mbl.head != NULL)
2659 2659 mac_rx_ring(ss->mgp->mh, ss->rx_rh, mbl.head, gen);
2660 2660 mutex_exit(&ss->rx_lock);
2661 2661
2662 2662 }
2663 2663
2664 2664 static mblk_t *
2665 2665 myri10ge_poll_rx(void *arg, int bytes)
2666 2666 {
2667 2667 struct myri10ge_slice_state *ss = arg;
2668 2668 struct myri10ge_mblk_list mbl;
2669 2669 boolean_t dummy = B_FALSE;
2670 2670
2671 2671 if (bytes == 0)
2672 2672 return (NULL);
2673 2673
2674 2674 myri10ge_mbl_init(&mbl);
2675 2675 mutex_enter(&ss->rx_lock);
2676 2676 if (ss->rx_polling)
2677 2677 myri10ge_clean_rx_done(ss, &mbl, bytes, &dummy);
2678 2678 else
2679 2679 printf("%d: poll_rx: token=%d, polling=%d\n", (int)(ss -
2680 2680 ss->mgp->ss), ss->rx_token, ss->rx_polling);
2681 2681 mutex_exit(&ss->rx_lock);
2682 2682 return (mbl.head);
2683 2683 }
2684 2684
2685 2685 /*ARGSUSED*/
2686 2686 static uint_t
2687 2687 myri10ge_intr(caddr_t arg0, caddr_t arg1)
2688 2688 {
2689 2689 struct myri10ge_slice_state *ss =
2690 2690 (struct myri10ge_slice_state *)(void *)arg0;
2691 2691 struct myri10ge_priv *mgp = ss->mgp;
2692 2692 mcp_irq_data_t *stats = ss->fw_stats;
2693 2693 myri10ge_tx_ring_t *tx = &ss->tx;
2694 2694 uint32_t send_done_count;
2695 2695 uint8_t valid;
2696 2696
2697 2697
2698 2698 /* make sure the DMA has finished */
2699 2699 if (!stats->valid) {
2700 2700 return (DDI_INTR_UNCLAIMED);
2701 2701 }
2702 2702 valid = stats->valid;
2703 2703
2704 2704 /* low bit indicates receives are present */
2705 2705 if (valid & 1)
2706 2706 myri10ge_intr_rx(ss);
2707 2707
2708 2708 if (mgp->ddi_intr_type == DDI_INTR_TYPE_FIXED) {
2709 2709 /* lower legacy IRQ */
2710 2710 *mgp->irq_deassert = 0;
2711 2711 if (!myri10ge_deassert_wait)
2712 2712 /* don't wait for conf. that irq is low */
2713 2713 stats->valid = 0;
2714 2714 mb();
2715 2715 } else {
2716 2716 /* no need to wait for conf. that irq is low */
2717 2717 stats->valid = 0;
2718 2718 }
2719 2719
2720 2720 do {
2721 2721 /* check for transmit completes and receives */
2722 2722 send_done_count = ntohl(stats->send_done_count);
2723 2723 if (send_done_count != tx->pkt_done)
2724 2724 myri10ge_tx_done(ss, (int)send_done_count);
2725 2725 } while (*((volatile uint8_t *) &stats->valid));
2726 2726
2727 2727 if (stats->stats_updated) {
2728 2728 if (mgp->link_state != stats->link_up || stats->link_down) {
2729 2729 mgp->link_state = stats->link_up;
2730 2730 if (stats->link_down) {
2731 2731 mgp->down_cnt += stats->link_down;
2732 2732 mgp->link_state = 0;
2733 2733 }
2734 2734 if (mgp->link_state) {
2735 2735 if (myri10ge_verbose)
2736 2736 printf("%s: link up\n", mgp->name);
2737 2737 mac_link_update(mgp->mh, LINK_STATE_UP);
2738 2738 } else {
2739 2739 if (myri10ge_verbose)
2740 2740 printf("%s: link down\n", mgp->name);
2741 2741 mac_link_update(mgp->mh, LINK_STATE_DOWN);
2742 2742 }
2743 2743 MYRI10GE_NIC_STAT_INC(link_changes);
2744 2744 }
2745 2745 if (mgp->rdma_tags_available !=
2746 2746 ntohl(ss->fw_stats->rdma_tags_available)) {
2747 2747 mgp->rdma_tags_available =
2748 2748 ntohl(ss->fw_stats->rdma_tags_available);
2749 2749 cmn_err(CE_NOTE, "%s: RDMA timed out! "
2750 2750 "%d tags left\n", mgp->name,
2751 2751 mgp->rdma_tags_available);
2752 2752 }
2753 2753 }
2754 2754
2755 2755 mb();
2756 2756 /* check to see if we have rx token to pass back */
2757 2757 if (valid & 0x1) {
2758 2758 mutex_enter(&ss->poll_lock);
2759 2759 if (ss->rx_polling) {
2760 2760 ss->rx_token = 1;
2761 2761 } else {
2762 2762 *ss->irq_claim = BE_32(3);
2763 2763 ss->rx_token = 0;
2764 2764 }
2765 2765 mutex_exit(&ss->poll_lock);
2766 2766 }
2767 2767 *(ss->irq_claim + 1) = BE_32(3);
2768 2768 return (DDI_INTR_CLAIMED);
2769 2769 }
2770 2770
2771 2771 /*
2772 2772 * Add or remove a multicast address. This is called with our
2773 2773 * macinfo's lock held by GLD, so we do not need to worry about
2774 2774 * our own locking here.
2775 2775 */
2776 2776 static int
2777 2777 myri10ge_m_multicst(void *arg, boolean_t add, const uint8_t *multicastaddr)
2778 2778 {
2779 2779 myri10ge_cmd_t cmd;
2780 2780 struct myri10ge_priv *mgp = arg;
2781 2781 int status, join_leave;
2782 2782
2783 2783 if (add)
2784 2784 join_leave = MXGEFW_JOIN_MULTICAST_GROUP;
2785 2785 else
2786 2786 join_leave = MXGEFW_LEAVE_MULTICAST_GROUP;
2787 2787 (void) memcpy(&cmd.data0, multicastaddr, 4);
2788 2788 (void) memcpy(&cmd.data1, multicastaddr + 4, 2);
2789 2789 cmd.data0 = htonl(cmd.data0);
2790 2790 cmd.data1 = htonl(cmd.data1);
2791 2791 status = myri10ge_send_cmd(mgp, join_leave, &cmd);
2792 2792 if (status == 0)
2793 2793 return (0);
2794 2794
2795 2795 cmn_err(CE_WARN, "%s: failed to set multicast address\n",
2796 2796 mgp->name);
2797 2797 return (status);
2798 2798 }
2799 2799
2800 2800
2801 2801 static int
2802 2802 myri10ge_m_promisc(void *arg, boolean_t on)
2803 2803 {
2804 2804 struct myri10ge_priv *mgp = arg;
2805 2805
2806 2806 myri10ge_change_promisc(mgp, on);
2807 2807 return (0);
2808 2808 }
2809 2809
2810 2810 /*
2811 2811 * copy an array of mcp_kreq_ether_send_t's to the mcp. Copy
2812 2812 * backwards one at a time and handle ring wraps
2813 2813 */
2814 2814
2815 2815 static inline void
2816 2816 myri10ge_submit_req_backwards(myri10ge_tx_ring_t *tx,
2817 2817 mcp_kreq_ether_send_t *src, int cnt)
2818 2818 {
2819 2819 int idx, starting_slot;
2820 2820 starting_slot = tx->req;
2821 2821 while (cnt > 1) {
2822 2822 cnt--;
2823 2823 idx = (starting_slot + cnt) & tx->mask;
2824 2824 myri10ge_pio_copy(&tx->lanai[idx],
2825 2825 &src[cnt], sizeof (*src));
2826 2826 mb();
2827 2827 }
2828 2828 }
2829 2829
2830 2830 /*
2831 2831 * copy an array of mcp_kreq_ether_send_t's to the mcp. Copy
2832 2832 * at most 32 bytes at a time, so as to avoid involving the software
2833 2833 * pio handler in the nic. We re-write the first segment's flags
2834 2834 * to mark them valid only after writing the entire chain
2835 2835 */
2836 2836
2837 2837 static inline void
2838 2838 myri10ge_submit_req(myri10ge_tx_ring_t *tx, mcp_kreq_ether_send_t *src,
2839 2839 int cnt)
2840 2840 {
2841 2841 int idx, i;
2842 2842 uint32_t *src_ints, *dst_ints;
2843 2843 mcp_kreq_ether_send_t *srcp, *dstp, *dst;
2844 2844 uint8_t last_flags;
2845 2845
2846 2846 idx = tx->req & tx->mask;
2847 2847
2848 2848 last_flags = src->flags;
2849 2849 src->flags = 0;
2850 2850 mb();
2851 2851 dst = dstp = &tx->lanai[idx];
2852 2852 srcp = src;
2853 2853
2854 2854 if ((idx + cnt) < tx->mask) {
2855 2855 for (i = 0; i < (cnt - 1); i += 2) {
2856 2856 myri10ge_pio_copy(dstp, srcp, 2 * sizeof (*src));
2857 2857 mb(); /* force write every 32 bytes */
2858 2858 srcp += 2;
2859 2859 dstp += 2;
2860 2860 }
2861 2861 } else {
2862 2862 /*
2863 2863 * submit all but the first request, and ensure
2864 2864 * that it is submitted below
2865 2865 */
2866 2866 myri10ge_submit_req_backwards(tx, src, cnt);
2867 2867 i = 0;
2868 2868 }
2869 2869 if (i < cnt) {
2870 2870 /* submit the first request */
2871 2871 myri10ge_pio_copy(dstp, srcp, sizeof (*src));
2872 2872 mb(); /* barrier before setting valid flag */
2873 2873 }
2874 2874
2875 2875 /* re-write the last 32-bits with the valid flags */
2876 2876 src->flags |= last_flags;
2877 2877 src_ints = (uint32_t *)src;
2878 2878 src_ints += 3;
2879 2879 dst_ints = (uint32_t *)dst;
2880 2880 dst_ints += 3;
2881 2881 *dst_ints = *src_ints;
2882 2882 tx->req += cnt;
2883 2883 mb();
2884 2884 /* notify NIC to poll this tx ring */
2885 2885 if (!tx->active && tx->go != NULL) {
2886 2886 *(int *)(void *)tx->go = 1;
2887 2887 tx->active = 1;
2888 2888 tx->activate++;
2889 2889 mb();
2890 2890 }
2891 2891 }
2892 2892
2893 2893 /* ARGSUSED */
2894 2894 static inline void
2895 2895 myri10ge_lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags)
2896 2896 {
2897 2897 uint32_t lso_flag;
2898 2898 mac_lso_get(mp, mss, &lso_flag);
2899 2899 (*flags) |= lso_flag;
2900 2900 }
2901 2901
2902 2902
2903 2903 /* like pullupmsg, except preserve hcksum/LSO attributes */
2904 2904 static int
2905 2905 myri10ge_pullup(struct myri10ge_slice_state *ss, mblk_t *mp)
2906 2906 {
2907 2907 uint32_t start, stuff, tx_offload_flags, mss;
2908 2908 int ok;
2909 2909
2910 2910 mss = 0;
2911 2911 mac_hcksum_get(mp, &start, &stuff, NULL, NULL, &tx_offload_flags);
2912 2912 myri10ge_lso_info_get(mp, &mss, &tx_offload_flags);
2913 2913
2914 2914 ok = pullupmsg(mp, -1);
2915 2915 if (!ok) {
2916 2916 printf("pullupmsg failed");
2917 2917 return (DDI_FAILURE);
2918 2918 }
2919 2919 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_pullup);
2920 2920 mac_hcksum_set(mp, start, stuff, NULL, NULL, tx_offload_flags);
2921 2921 if (tx_offload_flags & HW_LSO)
2922 2922 DB_LSOMSS(mp) = (uint16_t)mss;
2923 2923 lso_info_set(mp, mss, tx_offload_flags);
2924 2924 return (DDI_SUCCESS);
2925 2925 }
2926 2926
2927 2927 static inline void
2928 2928 myri10ge_tx_stat(struct myri10ge_tx_pkt_stats *s, struct ether_header *eh,
2929 2929 int opackets, int obytes)
2930 2930 {
2931 2931 s->un.all = 0;
2932 2932 if (eh->ether_dhost.ether_addr_octet[0] & 1) {
2933 2933 if (0 == (bcmp(eh->ether_dhost.ether_addr_octet,
2934 2934 myri10ge_broadcastaddr, sizeof (eh->ether_dhost))))
2935 2935 s->un.s.brdcstxmt = 1;
2936 2936 else
2937 2937 s->un.s.multixmt = 1;
2938 2938 }
2939 2939 s->un.s.opackets = (uint16_t)opackets;
2940 2940 s->un.s.obytes = obytes;
2941 2941 }
2942 2942
2943 2943 static int
2944 2944 myri10ge_tx_copy(struct myri10ge_slice_state *ss, mblk_t *mp,
2945 2945 mcp_kreq_ether_send_t *req)
2946 2946 {
2947 2947 myri10ge_tx_ring_t *tx = &ss->tx;
2948 2948 caddr_t ptr;
2949 2949 struct myri10ge_tx_copybuf *cp;
2950 2950 mblk_t *bp;
2951 2951 int idx, mblen, avail;
2952 2952 uint16_t len;
2953 2953
2954 2954 mutex_enter(&tx->lock);
2955 2955 avail = tx->mask - (tx->req - tx->done);
2956 2956 if (avail <= 1) {
2957 2957 mutex_exit(&tx->lock);
2958 2958 return (EBUSY);
2959 2959 }
2960 2960 idx = tx->req & tx->mask;
2961 2961 cp = &tx->cp[idx];
2962 2962 ptr = cp->va;
2963 2963 for (len = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
2964 2964 mblen = MBLKL(bp);
2965 2965 bcopy(bp->b_rptr, ptr, mblen);
2966 2966 ptr += mblen;
2967 2967 len += mblen;
2968 2968 }
2969 2969 /* ensure runts are padded to 60 bytes */
2970 2970 if (len < 60) {
2971 2971 bzero(ptr, 64 - len);
2972 2972 len = 60;
2973 2973 }
2974 2974 req->addr_low = cp->dma.low;
2975 2975 req->addr_high = cp->dma.high;
2976 2976 req->length = htons(len);
2977 2977 req->pad = 0;
2978 2978 req->rdma_count = 1;
2979 2979 myri10ge_tx_stat(&tx->info[idx].stat,
2980 2980 (struct ether_header *)(void *)cp->va, 1, len);
2981 2981 (void) ddi_dma_sync(cp->dma.handle, 0, len, DDI_DMA_SYNC_FORDEV);
2982 2982 myri10ge_submit_req(&ss->tx, req, 1);
2983 2983 mutex_exit(&tx->lock);
2984 2984 freemsg(mp);
2985 2985 return (DDI_SUCCESS);
2986 2986 }
2987 2987
2988 2988
2989 2989 static void
2990 2990 myri10ge_send_locked(myri10ge_tx_ring_t *tx, mcp_kreq_ether_send_t *req_list,
2991 2991 struct myri10ge_tx_buffer_state *tx_info,
2992 2992 int count)
2993 2993 {
2994 2994 int i, idx;
2995 2995
2996 2996 idx = 0; /* gcc -Wuninitialized */
2997 2997 /* store unmapping and bp info for tx irq handler */
2998 2998 for (i = 0; i < count; i++) {
2999 2999 idx = (tx->req + i) & tx->mask;
3000 3000 tx->info[idx].m = tx_info[i].m;
3001 3001 tx->info[idx].handle = tx_info[i].handle;
3002 3002 }
3003 3003 tx->info[idx].stat.un.all = tx_info[0].stat.un.all;
3004 3004
3005 3005 /* submit the frame to the nic */
3006 3006 myri10ge_submit_req(tx, req_list, count);
3007 3007
3008 3008
3009 3009 }
3010 3010
3011 3011
3012 3012
3013 3013 static void
3014 3014 myri10ge_copydata(mblk_t *mp, int off, int len, caddr_t buf)
3015 3015 {
3016 3016 mblk_t *bp;
3017 3017 int seglen;
3018 3018 uint_t count;
3019 3019
3020 3020 bp = mp;
3021 3021
3022 3022 while (off > 0) {
3023 3023 seglen = MBLKL(bp);
3024 3024 if (off < seglen)
3025 3025 break;
3026 3026 off -= seglen;
3027 3027 bp = bp->b_cont;
3028 3028 }
3029 3029 while (len > 0) {
3030 3030 seglen = MBLKL(bp);
3031 3031 count = min(seglen - off, len);
3032 3032 bcopy(bp->b_rptr + off, buf, count);
3033 3033 len -= count;
3034 3034 buf += count;
3035 3035 off = 0;
3036 3036 bp = bp->b_cont;
3037 3037 }
3038 3038 }
3039 3039
3040 3040 static int
3041 3041 myri10ge_ether_parse_header(mblk_t *mp)
3042 3042 {
3043 3043 struct ether_header eh_copy;
3044 3044 struct ether_header *eh;
3045 3045 int eth_hdr_len, seglen;
3046 3046
3047 3047 seglen = MBLKL(mp);
3048 3048 eth_hdr_len = sizeof (*eh);
3049 3049 if (seglen < eth_hdr_len) {
3050 3050 myri10ge_copydata(mp, 0, eth_hdr_len, (caddr_t)&eh_copy);
3051 3051 eh = &eh_copy;
3052 3052 } else {
3053 3053 eh = (struct ether_header *)(void *)mp->b_rptr;
3054 3054 }
3055 3055 if (eh->ether_type == BE_16(ETHERTYPE_VLAN)) {
3056 3056 eth_hdr_len += 4;
3057 3057 }
3058 3058
3059 3059 return (eth_hdr_len);
3060 3060 }
3061 3061
3062 3062 static int
3063 3063 myri10ge_lso_parse_header(mblk_t *mp, int off)
3064 3064 {
3065 3065 char buf[128];
3066 3066 int seglen, sum_off;
3067 3067 struct ip *ip;
3068 3068 struct tcphdr *tcp;
3069 3069
3070 3070 seglen = MBLKL(mp);
3071 3071 if (seglen < off + sizeof (*ip)) {
3072 3072 myri10ge_copydata(mp, off, sizeof (*ip), buf);
3073 3073 ip = (struct ip *)(void *)buf;
3074 3074 } else {
3075 3075 ip = (struct ip *)(void *)(mp->b_rptr + off);
3076 3076 }
3077 3077 if (seglen < off + (ip->ip_hl << 2) + sizeof (*tcp)) {
3078 3078 myri10ge_copydata(mp, off,
3079 3079 (ip->ip_hl << 2) + sizeof (*tcp), buf);
3080 3080 ip = (struct ip *)(void *)buf;
3081 3081 }
3082 3082 tcp = (struct tcphdr *)(void *)((char *)ip + (ip->ip_hl << 2));
3083 3083
3084 3084 /*
3085 3085 * NIC expects ip_sum to be zero. Recent changes to
3086 3086 * OpenSolaris leave the correct ip checksum there, rather
3087 3087 * than the required zero, so we need to zero it. Otherwise,
3088 3088 * the NIC will produce bad checksums when sending LSO packets.
3089 3089 */
3090 3090 if (ip->ip_sum != 0) {
3091 3091 if (((char *)ip) != buf) {
3092 3092 /* ip points into mblk, so just zero it */
3093 3093 ip->ip_sum = 0;
3094 3094 } else {
3095 3095 /*
3096 3096 * ip points into a copy, so walk the chain
3097 3097 * to find the ip_csum, then zero it
3098 3098 */
3099 3099 sum_off = off + _PTRDIFF(&ip->ip_sum, buf);
3100 3100 while (sum_off > (int)(MBLKL(mp) - 1)) {
3101 3101 sum_off -= MBLKL(mp);
3102 3102 mp = mp->b_cont;
3103 3103 }
3104 3104 mp->b_rptr[sum_off] = 0;
3105 3105 sum_off++;
3106 3106 while (sum_off > MBLKL(mp) - 1) {
3107 3107 sum_off -= MBLKL(mp);
3108 3108 mp = mp->b_cont;
3109 3109 }
3110 3110 mp->b_rptr[sum_off] = 0;
3111 3111 }
3112 3112 }
3113 3113 return (off + ((ip->ip_hl + tcp->th_off) << 2));
3114 3114 }
3115 3115
3116 3116 static int
3117 3117 myri10ge_tx_tso_copy(struct myri10ge_slice_state *ss, mblk_t *mp,
3118 3118 mcp_kreq_ether_send_t *req_list, int hdr_size, int pkt_size,
3119 3119 uint16_t mss, uint8_t cksum_offset)
3120 3120 {
3121 3121 myri10ge_tx_ring_t *tx = &ss->tx;
3122 3122 struct myri10ge_priv *mgp = ss->mgp;
3123 3123 mblk_t *bp;
3124 3124 mcp_kreq_ether_send_t *req;
3125 3125 struct myri10ge_tx_copybuf *cp;
3126 3126 caddr_t rptr, ptr;
3127 3127 int mblen, count, cum_len, mss_resid, tx_req, pkt_size_tmp;
3128 3128 int resid, avail, idx, hdr_size_tmp, tx_boundary;
3129 3129 int rdma_count;
3130 3130 uint32_t seglen, len, boundary, low, high_swapped;
3131 3131 uint16_t pseudo_hdr_offset = htons(mss);
3132 3132 uint8_t flags;
3133 3133
3134 3134 tx_boundary = mgp->tx_boundary;
3135 3135 hdr_size_tmp = hdr_size;
3136 3136 resid = tx_boundary;
3137 3137 count = 1;
3138 3138 mutex_enter(&tx->lock);
3139 3139
3140 3140 /* check to see if the slots are really there */
3141 3141 avail = tx->mask - (tx->req - tx->done);
3142 3142 if (unlikely(avail <= MYRI10GE_MAX_SEND_DESC_TSO)) {
3143 3143 atomic_inc_32(&tx->stall);
3144 3144 mutex_exit(&tx->lock);
3145 3145 return (EBUSY);
3146 3146 }
3147 3147
3148 3148 /* copy */
3149 3149 cum_len = -hdr_size;
3150 3150 count = 0;
3151 3151 req = req_list;
3152 3152 idx = tx->mask & tx->req;
3153 3153 cp = &tx->cp[idx];
3154 3154 low = ntohl(cp->dma.low);
3155 3155 ptr = cp->va;
3156 3156 cp->len = 0;
3157 3157 if (mss) {
3158 3158 int payload = pkt_size - hdr_size;
3159 3159 uint16_t opackets = (payload / mss) + ((payload % mss) != 0);
3160 3160 tx->info[idx].ostat.opackets = opackets;
3161 3161 tx->info[idx].ostat.obytes = (opackets - 1) * hdr_size
3162 3162 + pkt_size;
3163 3163 }
3164 3164 hdr_size_tmp = hdr_size;
3165 3165 mss_resid = mss;
3166 3166 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
3167 3167 tx_req = tx->req;
3168 3168 for (bp = mp; bp != NULL; bp = bp->b_cont) {
3169 3169 mblen = MBLKL(bp);
3170 3170 rptr = (caddr_t)bp->b_rptr;
3171 3171 len = min(hdr_size_tmp, mblen);
3172 3172 if (len) {
3173 3173 bcopy(rptr, ptr, len);
3174 3174 rptr += len;
3175 3175 ptr += len;
3176 3176 resid -= len;
3177 3177 mblen -= len;
3178 3178 hdr_size_tmp -= len;
3179 3179 cp->len += len;
3180 3180 if (hdr_size_tmp)
3181 3181 continue;
3182 3182 if (resid < mss) {
3183 3183 tx_req++;
3184 3184 idx = tx->mask & tx_req;
3185 3185 cp = &tx->cp[idx];
3186 3186 low = ntohl(cp->dma.low);
3187 3187 ptr = cp->va;
3188 3188 resid = tx_boundary;
3189 3189 }
3190 3190 }
3191 3191 while (mblen) {
3192 3192 len = min(mss_resid, mblen);
3193 3193 bcopy(rptr, ptr, len);
3194 3194 mss_resid -= len;
3195 3195 resid -= len;
3196 3196 mblen -= len;
3197 3197 rptr += len;
3198 3198 ptr += len;
3199 3199 cp->len += len;
3200 3200 if (mss_resid == 0) {
3201 3201 mss_resid = mss;
3202 3202 if (resid < mss) {
3203 3203 tx_req++;
3204 3204 idx = tx->mask & tx_req;
3205 3205 cp = &tx->cp[idx];
3206 3206 cp->len = 0;
3207 3207 low = ntohl(cp->dma.low);
3208 3208 ptr = cp->va;
3209 3209 resid = tx_boundary;
3210 3210 }
3211 3211 }
3212 3212 }
3213 3213 }
3214 3214
3215 3215 req = req_list;
3216 3216 pkt_size_tmp = pkt_size;
3217 3217 count = 0;
3218 3218 rdma_count = 0;
3219 3219 tx_req = tx->req;
3220 3220 while (pkt_size_tmp) {
3221 3221 idx = tx->mask & tx_req;
3222 3222 cp = &tx->cp[idx];
3223 3223 high_swapped = cp->dma.high;
3224 3224 low = ntohl(cp->dma.low);
3225 3225 len = cp->len;
3226 3226 if (len == 0) {
3227 3227 printf("len=0! pkt_size_tmp=%d, pkt_size=%d\n",
3228 3228 pkt_size_tmp, pkt_size);
3229 3229 for (bp = mp; bp != NULL; bp = bp->b_cont) {
3230 3230 mblen = MBLKL(bp);
3231 3231 printf("mblen:%d\n", mblen);
3232 3232 }
3233 3233 pkt_size_tmp = pkt_size;
3234 3234 tx_req = tx->req;
3235 3235 while (pkt_size_tmp > 0) {
3236 3236 idx = tx->mask & tx_req;
3237 3237 cp = &tx->cp[idx];
3238 3238 printf("cp->len = %d\n", cp->len);
3239 3239 pkt_size_tmp -= cp->len;
3240 3240 tx_req++;
3241 3241 }
3242 3242 printf("dropped\n");
3243 3243 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3244 3244 goto done;
3245 3245 }
3246 3246 pkt_size_tmp -= len;
3247 3247 while (len) {
3248 3248 while (len) {
3249 3249 uint8_t flags_next;
3250 3250 int cum_len_next;
3251 3251
3252 3252 boundary = (low + mgp->tx_boundary) &
3253 3253 ~(mgp->tx_boundary - 1);
3254 3254 seglen = boundary - low;
3255 3255 if (seglen > len)
3256 3256 seglen = len;
3257 3257
3258 3258 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
3259 3259 cum_len_next = cum_len + seglen;
3260 3260 (req-rdma_count)->rdma_count = rdma_count + 1;
3261 3261 if (likely(cum_len >= 0)) {
3262 3262 /* payload */
3263 3263 int next_is_first, chop;
3264 3264
3265 3265 chop = (cum_len_next > mss);
3266 3266 cum_len_next = cum_len_next % mss;
3267 3267 next_is_first = (cum_len_next == 0);
3268 3268 flags |= chop *
3269 3269 MXGEFW_FLAGS_TSO_CHOP;
3270 3270 flags_next |= next_is_first *
3271 3271 MXGEFW_FLAGS_FIRST;
3272 3272 rdma_count |= -(chop | next_is_first);
3273 3273 rdma_count += chop & !next_is_first;
3274 3274 } else if (likely(cum_len_next >= 0)) {
3275 3275 /* header ends */
3276 3276 int small;
3277 3277
3278 3278 rdma_count = -1;
3279 3279 cum_len_next = 0;
3280 3280 seglen = -cum_len;
3281 3281 small = (mss <= MXGEFW_SEND_SMALL_SIZE);
3282 3282 flags_next = MXGEFW_FLAGS_TSO_PLD |
3283 3283 MXGEFW_FLAGS_FIRST |
3284 3284 (small * MXGEFW_FLAGS_SMALL);
3285 3285 }
3286 3286 req->addr_high = high_swapped;
3287 3287 req->addr_low = htonl(low);
3288 3288 req->pseudo_hdr_offset = pseudo_hdr_offset;
3289 3289 req->pad = 0; /* complete solid 16-byte block */
3290 3290 req->rdma_count = 1;
3291 3291 req->cksum_offset = cksum_offset;
3292 3292 req->length = htons(seglen);
3293 3293 req->flags = flags | ((cum_len & 1) *
3294 3294 MXGEFW_FLAGS_ALIGN_ODD);
3295 3295 if (cksum_offset > seglen)
3296 3296 cksum_offset -= seglen;
3297 3297 else
3298 3298 cksum_offset = 0;
3299 3299 low += seglen;
3300 3300 len -= seglen;
3301 3301 cum_len = cum_len_next;
3302 3302 req++;
3303 3303 req->flags = 0;
3304 3304 flags = flags_next;
3305 3305 count++;
3306 3306 rdma_count++;
3307 3307 }
3308 3308 }
3309 3309 tx_req++;
3310 3310 }
3311 3311 (req-rdma_count)->rdma_count = (uint8_t)rdma_count;
3312 3312 do {
3313 3313 req--;
3314 3314 req->flags |= MXGEFW_FLAGS_TSO_LAST;
3315 3315 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
3316 3316 MXGEFW_FLAGS_FIRST)));
3317 3317
3318 3318 myri10ge_submit_req(tx, req_list, count);
3319 3319 done:
3320 3320 mutex_exit(&tx->lock);
3321 3321 freemsg(mp);
3322 3322 return (DDI_SUCCESS);
3323 3323 }
3324 3324
3325 3325 /*
3326 3326 * Try to send the chain of buffers described by the mp. We must not
3327 3327 * encapsulate more than eth->tx.req - eth->tx.done, or
3328 3328 * MXGEFW_MAX_SEND_DESC, whichever is more.
3329 3329 */
3330 3330
3331 3331 static int
3332 3332 myri10ge_send(struct myri10ge_slice_state *ss, mblk_t *mp,
3333 3333 mcp_kreq_ether_send_t *req_list, struct myri10ge_tx_buffer_state *tx_info)
3334 3334 {
3335 3335 struct myri10ge_priv *mgp = ss->mgp;
3336 3336 myri10ge_tx_ring_t *tx = &ss->tx;
3337 3337 mcp_kreq_ether_send_t *req;
3338 3338 struct myri10ge_tx_dma_handle *handles, *dma_handle = NULL;
3339 3339 mblk_t *bp;
3340 3340 ddi_dma_cookie_t cookie;
3341 3341 int err, rv, count, avail, mblen, try_pullup, i, max_segs, maclen,
3342 3342 rdma_count, cum_len, lso_hdr_size;
3343 3343 uint32_t start, stuff, tx_offload_flags;
3344 3344 uint32_t seglen, len, mss, boundary, low, high_swapped;
3345 3345 uint_t ncookies;
3346 3346 uint16_t pseudo_hdr_offset;
3347 3347 uint8_t flags, cksum_offset, odd_flag;
3348 3348 int pkt_size;
3349 3349 int lso_copy = myri10ge_lso_copy;
3350 3350 try_pullup = 1;
3351 3351
3352 3352 again:
3353 3353 /* Setup checksum offloading, if needed */
3354 3354 mac_hcksum_get(mp, &start, &stuff, NULL, NULL, &tx_offload_flags);
3355 3355 myri10ge_lso_info_get(mp, &mss, &tx_offload_flags);
3356 3356 if (tx_offload_flags & HW_LSO) {
3357 3357 max_segs = MYRI10GE_MAX_SEND_DESC_TSO;
3358 3358 if ((tx_offload_flags & HCK_PARTIALCKSUM) == 0) {
3359 3359 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_lsobadflags);
3360 3360 freemsg(mp);
3361 3361 return (DDI_SUCCESS);
3362 3362 }
3363 3363 } else {
3364 3364 max_segs = MXGEFW_MAX_SEND_DESC;
3365 3365 mss = 0;
3366 3366 }
3367 3367 req = req_list;
3368 3368 cksum_offset = 0;
3369 3369 pseudo_hdr_offset = 0;
3370 3370
3371 3371 /* leave an extra slot keep the ring from wrapping */
3372 3372 avail = tx->mask - (tx->req - tx->done);
3373 3373
3374 3374 /*
3375 3375 * If we have > MXGEFW_MAX_SEND_DESC, then any over-length
3376 3376 * message will need to be pulled up in order to fit.
3377 3377 * Otherwise, we are low on transmit descriptors, it is
3378 3378 * probably better to stall and try again rather than pullup a
3379 3379 * message to fit.
3380 3380 */
3381 3381
3382 3382 if (avail < max_segs) {
3383 3383 err = EBUSY;
3384 3384 atomic_inc_32(&tx->stall_early);
3385 3385 goto stall;
3386 3386 }
3387 3387
3388 3388 /* find out how long the frame is and how many segments it is */
3389 3389 count = 0;
3390 3390 odd_flag = 0;
3391 3391 pkt_size = 0;
3392 3392 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
3393 3393 for (bp = mp; bp != NULL; bp = bp->b_cont) {
3394 3394 dblk_t *dbp;
3395 3395 mblen = MBLKL(bp);
3396 3396 if (mblen == 0) {
3397 3397 /*
3398 3398 * we can't simply skip over 0-length mblks
3399 3399 * because the hardware can't deal with them,
3400 3400 * and we could leak them.
3401 3401 */
3402 3402 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_zero_len);
3403 3403 err = EIO;
3404 3404 goto pullup;
3405 3405 }
3406 3406 /*
3407 3407 * There's no advantage to copying most gesballoc
3408 3408 * attached blocks, so disable lso copy in that case
3409 3409 */
3410 3410 if (mss && lso_copy == 1 && ((dbp = bp->b_datap) != NULL)) {
3411 3411 if ((void *)dbp->db_lastfree != myri10ge_db_lastfree) {
3412 3412 lso_copy = 0;
3413 3413 }
3414 3414 }
3415 3415 pkt_size += mblen;
3416 3416 count++;
3417 3417 }
3418 3418
3419 3419 /* Try to pull up excessivly long chains */
3420 3420 if (count >= max_segs) {
3421 3421 err = myri10ge_pullup(ss, mp);
3422 3422 if (likely(err == DDI_SUCCESS)) {
3423 3423 count = 1;
3424 3424 } else {
3425 3425 if (count < MYRI10GE_MAX_SEND_DESC_TSO) {
3426 3426 /*
3427 3427 * just let the h/w send it, it will be
3428 3428 * inefficient, but us better than dropping
3429 3429 */
3430 3430 max_segs = MYRI10GE_MAX_SEND_DESC_TSO;
3431 3431 } else {
3432 3432 /* drop it */
3433 3433 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3434 3434 freemsg(mp);
3435 3435 return (0);
3436 3436 }
3437 3437 }
3438 3438 }
3439 3439
3440 3440 cum_len = 0;
3441 3441 maclen = myri10ge_ether_parse_header(mp);
3442 3442
3443 3443 if (tx_offload_flags & HCK_PARTIALCKSUM) {
3444 3444
3445 3445 cksum_offset = start + maclen;
3446 3446 pseudo_hdr_offset = htons(stuff + maclen);
3447 3447 odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
3448 3448 flags |= MXGEFW_FLAGS_CKSUM;
3449 3449 }
3450 3450
3451 3451 lso_hdr_size = 0; /* -Wunitinialized */
3452 3452 if (mss) { /* LSO */
3453 3453 /* this removes any CKSUM flag from before */
3454 3454 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
3455 3455 /*
3456 3456 * parse the headers and set cum_len to a negative
3457 3457 * value to reflect the offset of the TCP payload
3458 3458 */
3459 3459 lso_hdr_size = myri10ge_lso_parse_header(mp, maclen);
3460 3460 cum_len = -lso_hdr_size;
3461 3461 if ((mss < mgp->tx_boundary) && lso_copy) {
3462 3462 err = myri10ge_tx_tso_copy(ss, mp, req_list,
3463 3463 lso_hdr_size, pkt_size, mss, cksum_offset);
3464 3464 return (err);
3465 3465 }
3466 3466
3467 3467 /*
3468 3468 * for TSO, pseudo_hdr_offset holds mss. The firmware
3469 3469 * figures out where to put the checksum by parsing
3470 3470 * the header.
3471 3471 */
3472 3472
3473 3473 pseudo_hdr_offset = htons(mss);
3474 3474 } else if (pkt_size <= MXGEFW_SEND_SMALL_SIZE) {
3475 3475 flags |= MXGEFW_FLAGS_SMALL;
3476 3476 if (pkt_size < myri10ge_tx_copylen) {
3477 3477 req->cksum_offset = cksum_offset;
3478 3478 req->pseudo_hdr_offset = pseudo_hdr_offset;
3479 3479 req->flags = flags;
3480 3480 err = myri10ge_tx_copy(ss, mp, req);
3481 3481 return (err);
3482 3482 }
3483 3483 cum_len = 0;
3484 3484 }
3485 3485
3486 3486 /* pull one DMA handle for each bp from our freelist */
3487 3487 handles = NULL;
3488 3488 err = myri10ge_alloc_tx_handles(ss, count, &handles);
3489 3489 if (err != DDI_SUCCESS) {
3490 3490 err = DDI_FAILURE;
3491 3491 goto stall;
3492 3492 }
3493 3493 count = 0;
3494 3494 rdma_count = 0;
3495 3495 for (bp = mp; bp != NULL; bp = bp->b_cont) {
3496 3496 mblen = MBLKL(bp);
3497 3497 dma_handle = handles;
3498 3498 handles = handles->next;
3499 3499
3500 3500 rv = ddi_dma_addr_bind_handle(dma_handle->h, NULL,
3501 3501 (caddr_t)bp->b_rptr, mblen,
3502 3502 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3503 3503 &cookie, &ncookies);
3504 3504 if (unlikely(rv != DDI_DMA_MAPPED)) {
3505 3505 err = EIO;
3506 3506 try_pullup = 0;
3507 3507 dma_handle->next = handles;
3508 3508 handles = dma_handle;
3509 3509 goto abort_with_handles;
3510 3510 }
3511 3511
3512 3512 /* reserve the slot */
3513 3513 tx_info[count].m = bp;
3514 3514 tx_info[count].handle = dma_handle;
3515 3515
3516 3516 for (; ; ) {
3517 3517 low = MYRI10GE_LOWPART_TO_U32(cookie.dmac_laddress);
3518 3518 high_swapped =
3519 3519 htonl(MYRI10GE_HIGHPART_TO_U32(
3520 3520 cookie.dmac_laddress));
3521 3521 len = (uint32_t)cookie.dmac_size;
3522 3522 while (len) {
3523 3523 uint8_t flags_next;
3524 3524 int cum_len_next;
3525 3525
3526 3526 boundary = (low + mgp->tx_boundary) &
3527 3527 ~(mgp->tx_boundary - 1);
3528 3528 seglen = boundary - low;
3529 3529 if (seglen > len)
3530 3530 seglen = len;
3531 3531
3532 3532 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
3533 3533 cum_len_next = cum_len + seglen;
3534 3534 if (mss) {
3535 3535 (req-rdma_count)->rdma_count =
3536 3536 rdma_count + 1;
3537 3537 if (likely(cum_len >= 0)) {
3538 3538 /* payload */
3539 3539 int next_is_first, chop;
3540 3540
3541 3541 chop = (cum_len_next > mss);
3542 3542 cum_len_next =
3543 3543 cum_len_next % mss;
3544 3544 next_is_first =
3545 3545 (cum_len_next == 0);
3546 3546 flags |= chop *
3547 3547 MXGEFW_FLAGS_TSO_CHOP;
3548 3548 flags_next |= next_is_first *
3549 3549 MXGEFW_FLAGS_FIRST;
3550 3550 rdma_count |=
3551 3551 -(chop | next_is_first);
3552 3552 rdma_count +=
3553 3553 chop & !next_is_first;
3554 3554 } else if (likely(cum_len_next >= 0)) {
3555 3555 /* header ends */
3556 3556 int small;
3557 3557
3558 3558 rdma_count = -1;
3559 3559 cum_len_next = 0;
3560 3560 seglen = -cum_len;
3561 3561 small = (mss <=
3562 3562 MXGEFW_SEND_SMALL_SIZE);
3563 3563 flags_next =
3564 3564 MXGEFW_FLAGS_TSO_PLD
3565 3565 | MXGEFW_FLAGS_FIRST
3566 3566 | (small *
3567 3567 MXGEFW_FLAGS_SMALL);
3568 3568 }
3569 3569 }
3570 3570 req->addr_high = high_swapped;
3571 3571 req->addr_low = htonl(low);
3572 3572 req->pseudo_hdr_offset = pseudo_hdr_offset;
3573 3573 req->pad = 0; /* complete solid 16-byte block */
3574 3574 req->rdma_count = 1;
3575 3575 req->cksum_offset = cksum_offset;
3576 3576 req->length = htons(seglen);
3577 3577 req->flags = flags | ((cum_len & 1) * odd_flag);
3578 3578 if (cksum_offset > seglen)
3579 3579 cksum_offset -= seglen;
3580 3580 else
3581 3581 cksum_offset = 0;
3582 3582 low += seglen;
3583 3583 len -= seglen;
3584 3584 cum_len = cum_len_next;
3585 3585 count++;
3586 3586 rdma_count++;
3587 3587 /* make sure all the segments will fit */
3588 3588 if (unlikely(count >= max_segs)) {
3589 3589 MYRI10GE_ATOMIC_SLICE_STAT_INC(
3590 3590 xmit_lowbuf);
3591 3591 /* may try a pullup */
3592 3592 err = EBUSY;
3593 3593 if (try_pullup)
3594 3594 try_pullup = 2;
3595 3595 goto abort_with_handles;
3596 3596 }
3597 3597 req++;
3598 3598 req->flags = 0;
3599 3599 flags = flags_next;
3600 3600 tx_info[count].m = 0;
3601 3601 }
3602 3602 ncookies--;
3603 3603 if (ncookies == 0)
3604 3604 break;
3605 3605 ddi_dma_nextcookie(dma_handle->h, &cookie);
3606 3606 }
3607 3607 }
3608 3608 (req-rdma_count)->rdma_count = (uint8_t)rdma_count;
3609 3609
3610 3610 if (mss) {
3611 3611 do {
3612 3612 req--;
3613 3613 req->flags |= MXGEFW_FLAGS_TSO_LAST;
3614 3614 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
3615 3615 MXGEFW_FLAGS_FIRST)));
3616 3616 }
3617 3617
3618 3618 /* calculate tx stats */
3619 3619 if (mss) {
3620 3620 uint16_t opackets;
3621 3621 int payload;
3622 3622
3623 3623 payload = pkt_size - lso_hdr_size;
3624 3624 opackets = (payload / mss) + ((payload % mss) != 0);
3625 3625 tx_info[0].stat.un.all = 0;
3626 3626 tx_info[0].ostat.opackets = opackets;
3627 3627 tx_info[0].ostat.obytes = (opackets - 1) * lso_hdr_size
3628 3628 + pkt_size;
3629 3629 } else {
3630 3630 myri10ge_tx_stat(&tx_info[0].stat,
3631 3631 (struct ether_header *)(void *)mp->b_rptr, 1, pkt_size);
3632 3632 }
3633 3633 mutex_enter(&tx->lock);
3634 3634
3635 3635 /* check to see if the slots are really there */
3636 3636 avail = tx->mask - (tx->req - tx->done);
3637 3637 if (unlikely(avail <= count)) {
3638 3638 mutex_exit(&tx->lock);
3639 3639 err = 0;
3640 3640 goto late_stall;
3641 3641 }
3642 3642
3643 3643 myri10ge_send_locked(tx, req_list, tx_info, count);
3644 3644 mutex_exit(&tx->lock);
3645 3645 return (DDI_SUCCESS);
3646 3646
3647 3647 late_stall:
3648 3648 try_pullup = 0;
3649 3649 atomic_inc_32(&tx->stall_late);
3650 3650
3651 3651 abort_with_handles:
3652 3652 /* unbind and free handles from previous mblks */
3653 3653 for (i = 0; i < count; i++) {
3654 3654 bp = tx_info[i].m;
3655 3655 tx_info[i].m = 0;
3656 3656 if (bp) {
3657 3657 dma_handle = tx_info[i].handle;
3658 3658 (void) ddi_dma_unbind_handle(dma_handle->h);
3659 3659 dma_handle->next = handles;
3660 3660 handles = dma_handle;
3661 3661 tx_info[i].handle = NULL;
3662 3662 tx_info[i].m = NULL;
3663 3663 }
3664 3664 }
3665 3665 myri10ge_free_tx_handle_slist(tx, handles);
3666 3666 pullup:
3667 3667 if (try_pullup) {
3668 3668 err = myri10ge_pullup(ss, mp);
3669 3669 if (err != DDI_SUCCESS && try_pullup == 2) {
3670 3670 /* drop */
3671 3671 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3672 3672 freemsg(mp);
3673 3673 return (0);
3674 3674 }
3675 3675 try_pullup = 0;
3676 3676 goto again;
3677 3677 }
3678 3678
3679 3679 stall:
3680 3680 if (err != 0) {
3681 3681 if (err == EBUSY) {
3682 3682 atomic_inc_32(&tx->stall);
3683 3683 } else {
3684 3684 MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
3685 3685 }
3686 3686 }
3687 3687 return (err);
3688 3688 }
3689 3689
3690 3690 static mblk_t *
3691 3691 myri10ge_send_wrapper(void *arg, mblk_t *mp)
3692 3692 {
3693 3693 struct myri10ge_slice_state *ss = arg;
3694 3694 int err = 0;
3695 3695 mcp_kreq_ether_send_t *req_list;
3696 3696 #if defined(__i386)
3697 3697 /*
3698 3698 * We need about 2.5KB of scratch space to handle transmits.
3699 3699 * i86pc has only 8KB of kernel stack space, so we malloc the
3700 3700 * scratch space there rather than keeping it on the stack.
3701 3701 */
3702 3702 size_t req_size, tx_info_size;
3703 3703 struct myri10ge_tx_buffer_state *tx_info;
3704 3704 caddr_t req_bytes;
3705 3705
3706 3706 req_size = sizeof (*req_list) * (MYRI10GE_MAX_SEND_DESC_TSO + 4)
3707 3707 + 8;
3708 3708 req_bytes = kmem_alloc(req_size, KM_SLEEP);
3709 3709 tx_info_size = sizeof (*tx_info) * (MYRI10GE_MAX_SEND_DESC_TSO + 1);
3710 3710 tx_info = kmem_alloc(tx_info_size, KM_SLEEP);
3711 3711 #else
3712 3712 char req_bytes[sizeof (*req_list) * (MYRI10GE_MAX_SEND_DESC_TSO + 4)
3713 3713 + 8];
3714 3714 struct myri10ge_tx_buffer_state tx_info[MYRI10GE_MAX_SEND_DESC_TSO + 1];
3715 3715 #endif
3716 3716
3717 3717 /* ensure req_list entries are aligned to 8 bytes */
3718 3718 req_list = (struct mcp_kreq_ether_send *)
3719 3719 (((unsigned long)req_bytes + 7UL) & ~7UL);
3720 3720
3721 3721 err = myri10ge_send(ss, mp, req_list, tx_info);
3722 3722
3723 3723 #if defined(__i386)
3724 3724 kmem_free(tx_info, tx_info_size);
3725 3725 kmem_free(req_bytes, req_size);
3726 3726 #endif
3727 3727 if (err)
3728 3728 return (mp);
3729 3729 else
3730 3730 return (NULL);
3731 3731 }
3732 3732
3733 3733 static int
3734 3734 myri10ge_addmac(void *arg, const uint8_t *mac_addr)
3735 3735 {
3736 3736 struct myri10ge_priv *mgp = arg;
3737 3737 int err;
3738 3738
3739 3739 if (mac_addr == NULL)
3740 3740 return (EINVAL);
3741 3741
3742 3742 mutex_enter(&mgp->intrlock);
3743 3743 if (mgp->macaddr_cnt) {
3744 3744 mutex_exit(&mgp->intrlock);
3745 3745 return (ENOSPC);
3746 3746 }
3747 3747 err = myri10ge_m_unicst(mgp, mac_addr);
3748 3748 if (!err)
3749 3749 mgp->macaddr_cnt++;
3750 3750
3751 3751 mutex_exit(&mgp->intrlock);
3752 3752 if (err)
3753 3753 return (err);
3754 3754
3755 3755 bcopy(mac_addr, mgp->mac_addr, sizeof (mgp->mac_addr));
3756 3756 return (0);
3757 3757 }
3758 3758
3759 3759 /*ARGSUSED*/
3760 3760 static int
3761 3761 myri10ge_remmac(void *arg, const uint8_t *mac_addr)
3762 3762 {
3763 3763 struct myri10ge_priv *mgp = arg;
3764 3764
3765 3765 mutex_enter(&mgp->intrlock);
3766 3766 mgp->macaddr_cnt--;
3767 3767 mutex_exit(&mgp->intrlock);
3768 3768
3769 3769 return (0);
3770 3770 }
3771 3771
3772 3772 /*ARGSUSED*/
3773 3773 static void
3774 3774 myri10ge_fill_group(void *arg, mac_ring_type_t rtype, const int index,
3775 3775 mac_group_info_t *infop, mac_group_handle_t gh)
3776 3776 {
3777 3777 struct myri10ge_priv *mgp = arg;
3778 3778
3779 3779 if (rtype != MAC_RING_TYPE_RX)
3780 3780 return;
3781 3781
3782 3782 infop->mgi_driver = (mac_group_driver_t)mgp;
3783 3783 infop->mgi_start = NULL;
3784 3784 infop->mgi_stop = NULL;
3785 3785 infop->mgi_addmac = myri10ge_addmac;
3786 3786 infop->mgi_remmac = myri10ge_remmac;
3787 3787 infop->mgi_count = mgp->num_slices;
3788 3788 }
3789 3789
3790 3790 static int
3791 3791 myri10ge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
3792 3792 {
3793 3793 struct myri10ge_slice_state *ss;
3794 3794
3795 3795 ss = (struct myri10ge_slice_state *)rh;
3796 3796 mutex_enter(&ss->rx_lock);
3797 3797 ss->rx_gen_num = mr_gen_num;
3798 3798 mutex_exit(&ss->rx_lock);
3799 3799 return (0);
3800 3800 }
3801 3801
3802 3802 /*
3803 3803 * Retrieve a value for one of the statistics for a particular rx ring
3804 3804 */
3805 3805 int
3806 3806 myri10ge_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
3807 3807 {
3808 3808 struct myri10ge_slice_state *ss;
3809 3809
3810 3810 ss = (struct myri10ge_slice_state *)rh;
3811 3811 switch (stat) {
3812 3812 case MAC_STAT_RBYTES:
3813 3813 *val = ss->rx_stats.ibytes;
3814 3814 break;
3815 3815
3816 3816 case MAC_STAT_IPACKETS:
3817 3817 *val = ss->rx_stats.ipackets;
3818 3818 break;
3819 3819
3820 3820 default:
3821 3821 *val = 0;
3822 3822 return (ENOTSUP);
3823 3823 }
3824 3824
3825 3825 return (0);
3826 3826 }
3827 3827
3828 3828 /*
3829 3829 * Retrieve a value for one of the statistics for a particular tx ring
3830 3830 */
3831 3831 int
3832 3832 myri10ge_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
3833 3833 {
3834 3834 struct myri10ge_slice_state *ss;
3835 3835
3836 3836 ss = (struct myri10ge_slice_state *)rh;
3837 3837 switch (stat) {
3838 3838 case MAC_STAT_OBYTES:
3839 3839 *val = ss->tx.stats.obytes;
3840 3840 break;
3841 3841
3842 3842 case MAC_STAT_OPACKETS:
3843 3843 *val = ss->tx.stats.opackets;
3844 3844 break;
3845 3845
3846 3846 default:
3847 3847 *val = 0;
3848 3848 return (ENOTSUP);
3849 3849 }
3850 3850
3851 3851 return (0);
3852 3852 }
3853 3853
3854 3854 static int
3855 3855 myri10ge_rx_ring_intr_disable(mac_intr_handle_t intrh)
3856 3856 {
3857 3857 struct myri10ge_slice_state *ss;
3858 3858
3859 3859 ss = (struct myri10ge_slice_state *)intrh;
3860 3860 mutex_enter(&ss->poll_lock);
3861 3861 ss->rx_polling = B_TRUE;
3862 3862 mutex_exit(&ss->poll_lock);
3863 3863 return (0);
3864 3864 }
3865 3865
3866 3866 static int
3867 3867 myri10ge_rx_ring_intr_enable(mac_intr_handle_t intrh)
3868 3868 {
3869 3869 struct myri10ge_slice_state *ss;
3870 3870
3871 3871 ss = (struct myri10ge_slice_state *)intrh;
3872 3872 mutex_enter(&ss->poll_lock);
3873 3873 ss->rx_polling = B_FALSE;
3874 3874 if (ss->rx_token) {
3875 3875 *ss->irq_claim = BE_32(3);
3876 3876 ss->rx_token = 0;
3877 3877 }
3878 3878 mutex_exit(&ss->poll_lock);
3879 3879 return (0);
3880 3880 }
3881 3881
3882 3882 /*ARGSUSED*/
3883 3883 static void
3884 3884 myri10ge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
3885 3885 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3886 3886 {
3887 3887 struct myri10ge_priv *mgp = arg;
3888 3888 struct myri10ge_slice_state *ss;
3889 3889 mac_intr_t *mintr = &infop->mri_intr;
3890 3890
3891 3891 ASSERT((unsigned int)ring_index < mgp->num_slices);
3892 3892
3893 3893 ss = &mgp->ss[ring_index];
3894 3894 switch (rtype) {
3895 3895 case MAC_RING_TYPE_RX:
3896 3896 ss->rx_rh = rh;
3897 3897 infop->mri_driver = (mac_ring_driver_t)ss;
3898 3898 infop->mri_start = myri10ge_ring_start;
3899 3899 infop->mri_stop = NULL;
3900 3900 infop->mri_poll = myri10ge_poll_rx;
3901 3901 infop->mri_stat = myri10ge_rx_ring_stat;
3902 3902 mintr->mi_handle = (mac_intr_handle_t)ss;
3903 3903 mintr->mi_enable = myri10ge_rx_ring_intr_enable;
3904 3904 mintr->mi_disable = myri10ge_rx_ring_intr_disable;
3905 3905 break;
3906 3906 case MAC_RING_TYPE_TX:
3907 3907 ss->tx.rh = rh;
3908 3908 infop->mri_driver = (mac_ring_driver_t)ss;
3909 3909 infop->mri_start = NULL;
3910 3910 infop->mri_stop = NULL;
3911 3911 infop->mri_tx = myri10ge_send_wrapper;
3912 3912 infop->mri_stat = myri10ge_tx_ring_stat;
3913 3913 break;
3914 3914 default:
3915 3915 break;
3916 3916 }
3917 3917 }
3918 3918
3919 3919 static void
3920 3920 myri10ge_nic_stat_destroy(struct myri10ge_priv *mgp)
3921 3921 {
3922 3922 if (mgp->ksp_stat == NULL)
3923 3923 return;
3924 3924
3925 3925 kstat_delete(mgp->ksp_stat);
3926 3926 mgp->ksp_stat = NULL;
3927 3927 }
3928 3928
3929 3929 static void
3930 3930 myri10ge_slice_stat_destroy(struct myri10ge_slice_state *ss)
3931 3931 {
3932 3932 if (ss->ksp_stat == NULL)
3933 3933 return;
3934 3934
3935 3935 kstat_delete(ss->ksp_stat);
3936 3936 ss->ksp_stat = NULL;
3937 3937 }
3938 3938
3939 3939 static void
3940 3940 myri10ge_info_destroy(struct myri10ge_priv *mgp)
3941 3941 {
3942 3942 if (mgp->ksp_info == NULL)
3943 3943 return;
3944 3944
3945 3945 kstat_delete(mgp->ksp_info);
3946 3946 mgp->ksp_info = NULL;
3947 3947 }
3948 3948
3949 3949 static int
3950 3950 myri10ge_nic_stat_kstat_update(kstat_t *ksp, int rw)
3951 3951 {
3952 3952 struct myri10ge_nic_stat *ethstat;
3953 3953 struct myri10ge_priv *mgp;
3954 3954 mcp_irq_data_t *fw_stats;
3955 3955
3956 3956
3957 3957 if (rw == KSTAT_WRITE)
3958 3958 return (EACCES);
3959 3959
3960 3960 ethstat = (struct myri10ge_nic_stat *)ksp->ks_data;
3961 3961 mgp = (struct myri10ge_priv *)ksp->ks_private;
3962 3962 fw_stats = mgp->ss[0].fw_stats;
3963 3963
3964 3964 ethstat->dma_read_bw_MBs.value.ul = mgp->read_dma;
3965 3965 ethstat->dma_write_bw_MBs.value.ul = mgp->write_dma;
3966 3966 ethstat->dma_read_write_bw_MBs.value.ul = mgp->read_write_dma;
3967 3967 if (myri10ge_tx_dma_attr.dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
3968 3968 ethstat->dma_force_physical.value.ul = 1;
3969 3969 else
3970 3970 ethstat->dma_force_physical.value.ul = 0;
3971 3971 ethstat->lanes.value.ul = mgp->pcie_link_width;
3972 3972 ethstat->dropped_bad_crc32.value.ul =
3973 3973 ntohl(fw_stats->dropped_bad_crc32);
3974 3974 ethstat->dropped_bad_phy.value.ul =
3975 3975 ntohl(fw_stats->dropped_bad_phy);
3976 3976 ethstat->dropped_link_error_or_filtered.value.ul =
3977 3977 ntohl(fw_stats->dropped_link_error_or_filtered);
3978 3978 ethstat->dropped_link_overflow.value.ul =
3979 3979 ntohl(fw_stats->dropped_link_overflow);
3980 3980 ethstat->dropped_multicast_filtered.value.ul =
3981 3981 ntohl(fw_stats->dropped_multicast_filtered);
3982 3982 ethstat->dropped_no_big_buffer.value.ul =
3983 3983 ntohl(fw_stats->dropped_no_big_buffer);
3984 3984 ethstat->dropped_no_small_buffer.value.ul =
3985 3985 ntohl(fw_stats->dropped_no_small_buffer);
3986 3986 ethstat->dropped_overrun.value.ul =
3987 3987 ntohl(fw_stats->dropped_overrun);
3988 3988 ethstat->dropped_pause.value.ul =
3989 3989 ntohl(fw_stats->dropped_pause);
3990 3990 ethstat->dropped_runt.value.ul =
3991 3991 ntohl(fw_stats->dropped_runt);
3992 3992 ethstat->link_up.value.ul =
3993 3993 ntohl(fw_stats->link_up);
3994 3994 ethstat->dropped_unicast_filtered.value.ul =
3995 3995 ntohl(fw_stats->dropped_unicast_filtered);
3996 3996 return (0);
3997 3997 }
3998 3998
3999 3999 static int
4000 4000 myri10ge_slice_stat_kstat_update(kstat_t *ksp, int rw)
4001 4001 {
4002 4002 struct myri10ge_slice_stat *ethstat;
4003 4003 struct myri10ge_slice_state *ss;
4004 4004
4005 4005 if (rw == KSTAT_WRITE)
4006 4006 return (EACCES);
4007 4007
4008 4008 ethstat = (struct myri10ge_slice_stat *)ksp->ks_data;
4009 4009 ss = (struct myri10ge_slice_state *)ksp->ks_private;
4010 4010
4011 4011 ethstat->rx_big.value.ul = ss->j_rx_cnt;
4012 4012 ethstat->rx_bigbuf_firmware.value.ul = ss->rx_big.cnt - ss->j_rx_cnt;
4013 4013 ethstat->rx_bigbuf_pool.value.ul =
4014 4014 ss->jpool.num_alloc - ss->jbufs_for_smalls;
4015 4015 ethstat->rx_bigbuf_smalls.value.ul = ss->jbufs_for_smalls;
4016 4016 ethstat->rx_small.value.ul = ss->rx_small.cnt -
4017 4017 (ss->rx_small.mask + 1);
4018 4018 ethstat->tx_done.value.ul = ss->tx.done;
4019 4019 ethstat->tx_req.value.ul = ss->tx.req;
4020 4020 ethstat->tx_activate.value.ul = ss->tx.activate;
4021 4021 ethstat->xmit_sched.value.ul = ss->tx.sched;
4022 4022 ethstat->xmit_stall.value.ul = ss->tx.stall;
4023 4023 ethstat->xmit_stall_early.value.ul = ss->tx.stall_early;
4024 4024 ethstat->xmit_stall_late.value.ul = ss->tx.stall_late;
4025 4025 ethstat->xmit_err.value.ul = MYRI10GE_SLICE_STAT(xmit_err);
4026 4026 return (0);
4027 4027 }
4028 4028
4029 4029 static int
4030 4030 myri10ge_info_kstat_update(kstat_t *ksp, int rw)
4031 4031 {
4032 4032 struct myri10ge_info *info;
4033 4033 struct myri10ge_priv *mgp;
4034 4034
4035 4035
4036 4036 if (rw == KSTAT_WRITE)
4037 4037 return (EACCES);
4038 4038
4039 4039 info = (struct myri10ge_info *)ksp->ks_data;
4040 4040 mgp = (struct myri10ge_priv *)ksp->ks_private;
4041 4041 kstat_named_setstr(&info->driver_version, MYRI10GE_VERSION_STR);
4042 4042 kstat_named_setstr(&info->firmware_version, mgp->fw_version);
4043 4043 kstat_named_setstr(&info->firmware_name, mgp->fw_name);
4044 4044 kstat_named_setstr(&info->interrupt_type, mgp->intr_type);
4045 4045 kstat_named_setstr(&info->product_code, mgp->pc_str);
4046 4046 kstat_named_setstr(&info->serial_number, mgp->sn_str);
4047 4047 return (0);
4048 4048 }
4049 4049
4050 4050 static struct myri10ge_info myri10ge_info_template = {
4051 4051 { "driver_version", KSTAT_DATA_STRING },
4052 4052 { "firmware_version", KSTAT_DATA_STRING },
4053 4053 { "firmware_name", KSTAT_DATA_STRING },
4054 4054 { "interrupt_type", KSTAT_DATA_STRING },
4055 4055 { "product_code", KSTAT_DATA_STRING },
4056 4056 { "serial_number", KSTAT_DATA_STRING },
4057 4057 };
4058 4058 static kmutex_t myri10ge_info_template_lock;
4059 4059
4060 4060
4061 4061 static int
4062 4062 myri10ge_info_init(struct myri10ge_priv *mgp)
4063 4063 {
4064 4064 struct kstat *ksp;
4065 4065
4066 4066 ksp = kstat_create("myri10ge", ddi_get_instance(mgp->dip),
4067 4067 "myri10ge_info", "net", KSTAT_TYPE_NAMED,
4068 4068 sizeof (myri10ge_info_template) /
4069 4069 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4070 4070 if (ksp == NULL) {
4071 4071 cmn_err(CE_WARN,
4072 4072 "%s: myri10ge_info_init: kstat_create failed", mgp->name);
4073 4073 return (DDI_FAILURE);
4074 4074 }
4075 4075 mgp->ksp_info = ksp;
4076 4076 ksp->ks_update = myri10ge_info_kstat_update;
4077 4077 ksp->ks_private = (void *) mgp;
4078 4078 ksp->ks_data = &myri10ge_info_template;
4079 4079 ksp->ks_lock = &myri10ge_info_template_lock;
4080 4080 if (MYRI10GE_VERSION_STR != NULL)
4081 4081 ksp->ks_data_size += strlen(MYRI10GE_VERSION_STR) + 1;
4082 4082 if (mgp->fw_version != NULL)
4083 4083 ksp->ks_data_size += strlen(mgp->fw_version) + 1;
4084 4084 ksp->ks_data_size += strlen(mgp->fw_name) + 1;
4085 4085 ksp->ks_data_size += strlen(mgp->intr_type) + 1;
4086 4086 if (mgp->pc_str != NULL)
4087 4087 ksp->ks_data_size += strlen(mgp->pc_str) + 1;
4088 4088 if (mgp->sn_str != NULL)
4089 4089 ksp->ks_data_size += strlen(mgp->sn_str) + 1;
4090 4090
4091 4091 kstat_install(ksp);
4092 4092 return (DDI_SUCCESS);
4093 4093 }
4094 4094
4095 4095
4096 4096 static int
4097 4097 myri10ge_nic_stat_init(struct myri10ge_priv *mgp)
4098 4098 {
4099 4099 struct kstat *ksp;
4100 4100 struct myri10ge_nic_stat *ethstat;
4101 4101
4102 4102 ksp = kstat_create("myri10ge", ddi_get_instance(mgp->dip),
4103 4103 "myri10ge_nic_stats", "net", KSTAT_TYPE_NAMED,
4104 4104 sizeof (*ethstat) / sizeof (kstat_named_t), 0);
4105 4105 if (ksp == NULL) {
4106 4106 cmn_err(CE_WARN,
4107 4107 "%s: myri10ge_stat_init: kstat_create failed", mgp->name);
4108 4108 return (DDI_FAILURE);
4109 4109 }
4110 4110 mgp->ksp_stat = ksp;
4111 4111 ethstat = (struct myri10ge_nic_stat *)(ksp->ks_data);
4112 4112
4113 4113 kstat_named_init(ðstat->dma_read_bw_MBs,
4114 4114 "dma_read_bw_MBs", KSTAT_DATA_ULONG);
4115 4115 kstat_named_init(ðstat->dma_write_bw_MBs,
4116 4116 "dma_write_bw_MBs", KSTAT_DATA_ULONG);
4117 4117 kstat_named_init(ðstat->dma_read_write_bw_MBs,
4118 4118 "dma_read_write_bw_MBs", KSTAT_DATA_ULONG);
4119 4119 kstat_named_init(ðstat->dma_force_physical,
4120 4120 "dma_force_physical", KSTAT_DATA_ULONG);
4121 4121 kstat_named_init(ðstat->lanes,
4122 4122 "lanes", KSTAT_DATA_ULONG);
4123 4123 kstat_named_init(ðstat->dropped_bad_crc32,
4124 4124 "dropped_bad_crc32", KSTAT_DATA_ULONG);
4125 4125 kstat_named_init(ðstat->dropped_bad_phy,
4126 4126 "dropped_bad_phy", KSTAT_DATA_ULONG);
4127 4127 kstat_named_init(ðstat->dropped_link_error_or_filtered,
4128 4128 "dropped_link_error_or_filtered", KSTAT_DATA_ULONG);
4129 4129 kstat_named_init(ðstat->dropped_link_overflow,
4130 4130 "dropped_link_overflow", KSTAT_DATA_ULONG);
4131 4131 kstat_named_init(ðstat->dropped_multicast_filtered,
4132 4132 "dropped_multicast_filtered", KSTAT_DATA_ULONG);
4133 4133 kstat_named_init(ðstat->dropped_no_big_buffer,
4134 4134 "dropped_no_big_buffer", KSTAT_DATA_ULONG);
4135 4135 kstat_named_init(ðstat->dropped_no_small_buffer,
4136 4136 "dropped_no_small_buffer", KSTAT_DATA_ULONG);
4137 4137 kstat_named_init(ðstat->dropped_overrun,
4138 4138 "dropped_overrun", KSTAT_DATA_ULONG);
4139 4139 kstat_named_init(ðstat->dropped_pause,
4140 4140 "dropped_pause", KSTAT_DATA_ULONG);
4141 4141 kstat_named_init(ðstat->dropped_runt,
4142 4142 "dropped_runt", KSTAT_DATA_ULONG);
4143 4143 kstat_named_init(ðstat->dropped_unicast_filtered,
4144 4144 "dropped_unicast_filtered", KSTAT_DATA_ULONG);
4145 4145 kstat_named_init(ðstat->dropped_runt, "dropped_runt",
4146 4146 KSTAT_DATA_ULONG);
4147 4147 kstat_named_init(ðstat->link_up, "link_up", KSTAT_DATA_ULONG);
4148 4148 kstat_named_init(ðstat->link_changes, "link_changes",
4149 4149 KSTAT_DATA_ULONG);
4150 4150 ksp->ks_update = myri10ge_nic_stat_kstat_update;
4151 4151 ksp->ks_private = (void *) mgp;
4152 4152 kstat_install(ksp);
4153 4153 return (DDI_SUCCESS);
4154 4154 }
4155 4155
4156 4156 static int
4157 4157 myri10ge_slice_stat_init(struct myri10ge_slice_state *ss)
4158 4158 {
4159 4159 struct myri10ge_priv *mgp = ss->mgp;
4160 4160 struct kstat *ksp;
4161 4161 struct myri10ge_slice_stat *ethstat;
4162 4162 int instance;
4163 4163
4164 4164 /*
4165 4165 * fake an instance so that the same slice numbers from
4166 4166 * different instances do not collide
4167 4167 */
4168 4168 instance = (ddi_get_instance(mgp->dip) * 1000) + (int)(ss - mgp->ss);
4169 4169 ksp = kstat_create("myri10ge", instance,
4170 4170 "myri10ge_slice_stats", "net", KSTAT_TYPE_NAMED,
4171 4171 sizeof (*ethstat) / sizeof (kstat_named_t), 0);
4172 4172 if (ksp == NULL) {
4173 4173 cmn_err(CE_WARN,
4174 4174 "%s: myri10ge_stat_init: kstat_create failed", mgp->name);
4175 4175 return (DDI_FAILURE);
4176 4176 }
4177 4177 ss->ksp_stat = ksp;
4178 4178 ethstat = (struct myri10ge_slice_stat *)(ksp->ks_data);
4179 4179 kstat_named_init(ðstat->lro_bad_csum, "lro_bad_csum",
4180 4180 KSTAT_DATA_ULONG);
4181 4181 kstat_named_init(ðstat->lro_flushed, "lro_flushed",
4182 4182 KSTAT_DATA_ULONG);
4183 4183 kstat_named_init(ðstat->lro_queued, "lro_queued",
4184 4184 KSTAT_DATA_ULONG);
4185 4185 kstat_named_init(ðstat->rx_bigbuf_firmware, "rx_bigbuf_firmware",
4186 4186 KSTAT_DATA_ULONG);
4187 4187 kstat_named_init(ðstat->rx_bigbuf_pool, "rx_bigbuf_pool",
4188 4188 KSTAT_DATA_ULONG);
4189 4189 kstat_named_init(ðstat->rx_bigbuf_smalls, "rx_bigbuf_smalls",
4190 4190 KSTAT_DATA_ULONG);
4191 4191 kstat_named_init(ðstat->rx_copy, "rx_copy",
4192 4192 KSTAT_DATA_ULONG);
4193 4193 kstat_named_init(ðstat->rx_big_nobuf, "rx_big_nobuf",
4194 4194 KSTAT_DATA_ULONG);
4195 4195 kstat_named_init(ðstat->rx_small_nobuf, "rx_small_nobuf",
4196 4196 KSTAT_DATA_ULONG);
4197 4197 kstat_named_init(ðstat->xmit_zero_len, "xmit_zero_len",
4198 4198 KSTAT_DATA_ULONG);
4199 4199 kstat_named_init(ðstat->xmit_pullup, "xmit_pullup",
4200 4200 KSTAT_DATA_ULONG);
4201 4201 kstat_named_init(ðstat->xmit_pullup_first, "xmit_pullup_first",
4202 4202 KSTAT_DATA_ULONG);
4203 4203 kstat_named_init(ðstat->xmit_lowbuf, "xmit_lowbuf",
4204 4204 KSTAT_DATA_ULONG);
4205 4205 kstat_named_init(ðstat->xmit_lsobadflags, "xmit_lsobadflags",
4206 4206 KSTAT_DATA_ULONG);
4207 4207 kstat_named_init(ðstat->xmit_sched, "xmit_sched",
4208 4208 KSTAT_DATA_ULONG);
4209 4209 kstat_named_init(ðstat->xmit_stall, "xmit_stall",
4210 4210 KSTAT_DATA_ULONG);
4211 4211 kstat_named_init(ðstat->xmit_stall_early, "xmit_stall_early",
4212 4212 KSTAT_DATA_ULONG);
4213 4213 kstat_named_init(ðstat->xmit_stall_late, "xmit_stall_late",
4214 4214 KSTAT_DATA_ULONG);
4215 4215 kstat_named_init(ðstat->xmit_err, "xmit_err",
4216 4216 KSTAT_DATA_ULONG);
4217 4217 kstat_named_init(ðstat->tx_req, "tx_req",
4218 4218 KSTAT_DATA_ULONG);
4219 4219 kstat_named_init(ðstat->tx_activate, "tx_activate",
4220 4220 KSTAT_DATA_ULONG);
4221 4221 kstat_named_init(ðstat->tx_done, "tx_done",
4222 4222 KSTAT_DATA_ULONG);
4223 4223 kstat_named_init(ðstat->tx_handles_alloced, "tx_handles_alloced",
4224 4224 KSTAT_DATA_ULONG);
4225 4225 kstat_named_init(ðstat->rx_big, "rx_big",
4226 4226 KSTAT_DATA_ULONG);
4227 4227 kstat_named_init(ðstat->rx_small, "rx_small",
4228 4228 KSTAT_DATA_ULONG);
4229 4229 ksp->ks_update = myri10ge_slice_stat_kstat_update;
4230 4230 ksp->ks_private = (void *) ss;
4231 4231 kstat_install(ksp);
4232 4232 return (DDI_SUCCESS);
4233 4233 }
4234 4234
4235 4235
4236 4236
4237 4237 #if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__
4238 4238
4239 4239 #include <vm/hat.h>
4240 4240 #include <sys/ddi_isa.h>
4241 4241 void *device_arena_alloc(size_t size, int vm_flag);
4242 4242 void device_arena_free(void *vaddr, size_t size);
4243 4243
4244 4244 static void
4245 4245 myri10ge_enable_nvidia_ecrc(struct myri10ge_priv *mgp)
4246 4246 {
4247 4247 dev_info_t *parent_dip;
4248 4248 ddi_acc_handle_t handle;
4249 4249 unsigned long bus_number, dev_number, func_number;
4250 4250 unsigned long cfg_pa, paddr, base, pgoffset;
4251 4251 char *cvaddr, *ptr;
4252 4252 uint32_t *ptr32;
4253 4253 int retval = DDI_FAILURE;
4254 4254 int dontcare;
4255 4255 uint16_t read_vid, read_did, vendor_id, device_id;
4256 4256
4257 4257 if (!myri10ge_nvidia_ecrc_enable)
4258 4258 return;
4259 4259
4260 4260 parent_dip = ddi_get_parent(mgp->dip);
4261 4261 if (parent_dip == NULL) {
4262 4262 cmn_err(CE_WARN, "%s: I'm an orphan?", mgp->name);
4263 4263 return;
4264 4264 }
4265 4265
4266 4266 if (pci_config_setup(parent_dip, &handle) != DDI_SUCCESS) {
4267 4267 cmn_err(CE_WARN,
4268 4268 "%s: Could not access my parent's registers", mgp->name);
4269 4269 return;
4270 4270 }
4271 4271
4272 4272 vendor_id = pci_config_get16(handle, PCI_CONF_VENID);
4273 4273 device_id = pci_config_get16(handle, PCI_CONF_DEVID);
4274 4274 pci_config_teardown(&handle);
4275 4275
4276 4276 if (myri10ge_verbose) {
4277 4277 unsigned long bus_number, dev_number, func_number;
4278 4278 int reg_set, span;
4279 4279 (void) myri10ge_reg_set(parent_dip, ®_set, &span,
4280 4280 &bus_number, &dev_number, &func_number);
4281 4281 if (myri10ge_verbose)
4282 4282 printf("%s: parent at %ld:%ld:%ld\n", mgp->name,
4283 4283 bus_number, dev_number, func_number);
4284 4284 }
4285 4285
4286 4286 if (vendor_id != 0x10de)
4287 4287 return;
4288 4288
4289 4289 if (device_id != 0x005d /* CK804 */ &&
4290 4290 (device_id < 0x374 || device_id > 0x378) /* MCP55 */) {
4291 4291 return;
4292 4292 }
4293 4293 (void) myri10ge_reg_set(parent_dip, &dontcare, &dontcare,
4294 4294 &bus_number, &dev_number, &func_number);
4295 4295
4296 4296 for (cfg_pa = 0xf0000000UL;
4297 4297 retval != DDI_SUCCESS && cfg_pa >= 0xe0000000UL;
4298 4298 cfg_pa -= 0x10000000UL) {
4299 4299 /* find the config space address for the nvidia bridge */
4300 4300 paddr = (cfg_pa + bus_number * 0x00100000UL +
4301 4301 (dev_number * 8 + func_number) * 0x00001000UL);
4302 4302
4303 4303 base = paddr & (~MMU_PAGEOFFSET);
4304 4304 pgoffset = paddr & MMU_PAGEOFFSET;
4305 4305
4306 4306 /* map it into the kernel */
4307 4307 cvaddr = device_arena_alloc(ptob(1), VM_NOSLEEP);
4308 4308 if (cvaddr == NULL)
4309 4309 cmn_err(CE_WARN, "%s: failed to map nf4: cvaddr\n",
4310 4310 mgp->name);
4311 4311
4312 4312 hat_devload(kas.a_hat, cvaddr, mmu_ptob(1),
4313 4313 i_ddi_paddr_to_pfn(base),
4314 4314 PROT_WRITE|HAT_STRICTORDER, HAT_LOAD_LOCK);
4315 4315
4316 4316 ptr = cvaddr + pgoffset;
4317 4317 read_vid = *(uint16_t *)(void *)(ptr + PCI_CONF_VENID);
4318 4318 read_did = *(uint16_t *)(void *)(ptr + PCI_CONF_DEVID);
4319 4319 if (vendor_id == read_did || device_id == read_did) {
4320 4320 ptr32 = (uint32_t *)(void *)(ptr + 0x178);
4321 4321 if (myri10ge_verbose)
4322 4322 printf("%s: Enabling ECRC on upstream "
4323 4323 "Nvidia bridge (0x%x:0x%x) "
4324 4324 "at %ld:%ld:%ld\n", mgp->name,
4325 4325 read_vid, read_did, bus_number,
4326 4326 dev_number, func_number);
4327 4327 *ptr32 |= 0x40;
4328 4328 retval = DDI_SUCCESS;
4329 4329 }
4330 4330 hat_unload(kas.a_hat, cvaddr, ptob(1), HAT_UNLOAD_UNLOCK);
4331 4331 device_arena_free(cvaddr, ptob(1));
4332 4332 }
4333 4333 }
4334 4334
4335 4335 #else
4336 4336 /*ARGSUSED*/
4337 4337 static void
4338 4338 myri10ge_enable_nvidia_ecrc(struct myri10ge_priv *mgp)
4339 4339 {
4340 4340 }
4341 4341 #endif /* i386 */
4342 4342
4343 4343
4344 4344 /*
4345 4345 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
4346 4346 * when the PCI-E Completion packets are aligned on an 8-byte
4347 4347 * boundary. Some PCI-E chip sets always align Completion packets; on
4348 4348 * the ones that do not, the alignment can be enforced by enabling
4349 4349 * ECRC generation (if supported).
4350 4350 *
4351 4351 * When PCI-E Completion packets are not aligned, it is actually more
4352 4352 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
4353 4353 *
4354 4354 * If the driver can neither enable ECRC nor verify that it has
4355 4355 * already been enabled, then it must use a firmware image which works
4356 4356 * around unaligned completion packets (ethp_z8e.dat), and it should
4357 4357 * also ensure that it never gives the device a Read-DMA which is
4358 4358 * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is
4359 4359 * enabled, then the driver should use the aligned (eth_z8e.dat)
4360 4360 * firmware image, and set tx.boundary to 4KB.
4361 4361 */
4362 4362
4363 4363
4364 4364 static int
4365 4365 myri10ge_firmware_probe(struct myri10ge_priv *mgp)
4366 4366 {
4367 4367 int status;
4368 4368
4369 4369 mgp->tx_boundary = 4096;
4370 4370 /*
4371 4371 * Verify the max read request size was set to 4KB
4372 4372 * before trying the test with 4KB.
4373 4373 */
4374 4374 if (mgp->max_read_request_4k == 0)
4375 4375 mgp->tx_boundary = 2048;
4376 4376 /*
4377 4377 * load the optimized firmware which assumes aligned PCIe
4378 4378 * completions in order to see if it works on this host.
4379 4379 */
4380 4380
4381 4381 mgp->fw_name = "rss_eth_z8e";
4382 4382 mgp->eth_z8e = (unsigned char *)rss_eth_z8e;
4383 4383 mgp->eth_z8e_length = rss_eth_z8e_length;
4384 4384
4385 4385 status = myri10ge_load_firmware(mgp);
4386 4386 if (status != 0) {
4387 4387 return (status);
4388 4388 }
4389 4389 /*
4390 4390 * Enable ECRC if possible
4391 4391 */
4392 4392 myri10ge_enable_nvidia_ecrc(mgp);
4393 4393
4394 4394 /*
4395 4395 * Run a DMA test which watches for unaligned completions and
4396 4396 * aborts on the first one seen.
4397 4397 */
4398 4398 status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
4399 4399 if (status == 0)
4400 4400 return (0); /* keep the aligned firmware */
4401 4401
4402 4402 if (status != E2BIG)
4403 4403 cmn_err(CE_WARN, "%s: DMA test failed: %d\n",
4404 4404 mgp->name, status);
4405 4405 if (status == ENOSYS)
4406 4406 cmn_err(CE_WARN, "%s: Falling back to ethp! "
4407 4407 "Please install up to date fw\n", mgp->name);
4408 4408 return (status);
4409 4409 }
4410 4410
4411 4411 static int
4412 4412 myri10ge_select_firmware(struct myri10ge_priv *mgp)
4413 4413 {
4414 4414 int aligned;
4415 4415
4416 4416 aligned = 0;
4417 4417
4418 4418 if (myri10ge_force_firmware == 1) {
4419 4419 if (myri10ge_verbose)
4420 4420 printf("%s: Assuming aligned completions (forced)\n",
4421 4421 mgp->name);
4422 4422 aligned = 1;
4423 4423 goto done;
4424 4424 }
4425 4425
4426 4426 if (myri10ge_force_firmware == 2) {
4427 4427 if (myri10ge_verbose)
4428 4428 printf("%s: Assuming unaligned completions (forced)\n",
4429 4429 mgp->name);
4430 4430 aligned = 0;
4431 4431 goto done;
4432 4432 }
4433 4433
4434 4434 /* If the width is less than 8, we may used the aligned firmware */
4435 4435 if (mgp->pcie_link_width != 0 && mgp->pcie_link_width < 8) {
4436 4436 cmn_err(CE_WARN, "!%s: PCIe link running at x%d\n",
4437 4437 mgp->name, mgp->pcie_link_width);
4438 4438 aligned = 1;
4439 4439 goto done;
4440 4440 }
4441 4441
4442 4442 if (0 == myri10ge_firmware_probe(mgp))
4443 4443 return (0); /* keep optimized firmware */
4444 4444
4445 4445 done:
4446 4446 if (aligned) {
4447 4447 mgp->fw_name = "rss_eth_z8e";
4448 4448 mgp->eth_z8e = (unsigned char *)rss_eth_z8e;
4449 4449 mgp->eth_z8e_length = rss_eth_z8e_length;
4450 4450 mgp->tx_boundary = 4096;
4451 4451 } else {
4452 4452 mgp->fw_name = "rss_ethp_z8e";
4453 4453 mgp->eth_z8e = (unsigned char *)rss_ethp_z8e;
4454 4454 mgp->eth_z8e_length = rss_ethp_z8e_length;
4455 4455 mgp->tx_boundary = 2048;
4456 4456 }
4457 4457
4458 4458 return (myri10ge_load_firmware(mgp));
4459 4459 }
4460 4460
4461 4461 static int
4462 4462 myri10ge_add_intrs(struct myri10ge_priv *mgp, int add_handler)
4463 4463 {
4464 4464 dev_info_t *devinfo = mgp->dip;
4465 4465 int count, avail, actual, intr_types;
4466 4466 int x, y, rc, inum = 0;
4467 4467
4468 4468
4469 4469 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4470 4470 if (rc != DDI_SUCCESS) {
4471 4471 cmn_err(CE_WARN,
4472 4472 "!%s: ddi_intr_get_nintrs() failure, rc = %d\n", mgp->name,
4473 4473 rc);
4474 4474 return (DDI_FAILURE);
4475 4475 }
4476 4476
4477 4477 if (!myri10ge_use_msi)
4478 4478 intr_types &= ~DDI_INTR_TYPE_MSI;
4479 4479 if (!myri10ge_use_msix)
4480 4480 intr_types &= ~DDI_INTR_TYPE_MSIX;
4481 4481
4482 4482 if (intr_types & DDI_INTR_TYPE_MSIX) {
4483 4483 mgp->ddi_intr_type = DDI_INTR_TYPE_MSIX;
4484 4484 mgp->intr_type = "MSI-X";
4485 4485 } else if (intr_types & DDI_INTR_TYPE_MSI) {
4486 4486 mgp->ddi_intr_type = DDI_INTR_TYPE_MSI;
4487 4487 mgp->intr_type = "MSI";
4488 4488 } else {
4489 4489 mgp->ddi_intr_type = DDI_INTR_TYPE_FIXED;
4490 4490 mgp->intr_type = "Legacy";
4491 4491 }
4492 4492 /* Get number of interrupts */
4493 4493 rc = ddi_intr_get_nintrs(devinfo, mgp->ddi_intr_type, &count);
4494 4494 if ((rc != DDI_SUCCESS) || (count == 0)) {
4495 4495 cmn_err(CE_WARN, "%s: ddi_intr_get_nintrs() failure, rc: %d, "
4496 4496 "count: %d", mgp->name, rc, count);
4497 4497
4498 4498 return (DDI_FAILURE);
4499 4499 }
4500 4500
4501 4501 /* Get number of available interrupts */
4502 4502 rc = ddi_intr_get_navail(devinfo, mgp->ddi_intr_type, &avail);
4503 4503 if ((rc != DDI_SUCCESS) || (avail == 0)) {
4504 4504 cmn_err(CE_WARN, "%s: ddi_intr_get_navail() failure, "
4505 4505 "rc: %d, avail: %d\n", mgp->name, rc, avail);
4506 4506 return (DDI_FAILURE);
4507 4507 }
4508 4508 if (avail < count) {
4509 4509 cmn_err(CE_NOTE,
4510 4510 "!%s: nintrs() returned %d, navail returned %d",
4511 4511 mgp->name, count, avail);
4512 4512 count = avail;
4513 4513 }
4514 4514
4515 4515 if (count < mgp->num_slices)
4516 4516 return (DDI_FAILURE);
4517 4517
4518 4518 if (count > mgp->num_slices)
4519 4519 count = mgp->num_slices;
4520 4520
4521 4521 /* Allocate memory for MSI interrupts */
4522 4522 mgp->intr_size = count * sizeof (ddi_intr_handle_t);
4523 4523 mgp->htable = kmem_alloc(mgp->intr_size, KM_SLEEP);
4524 4524
4525 4525 rc = ddi_intr_alloc(devinfo, mgp->htable, mgp->ddi_intr_type, inum,
4526 4526 count, &actual, DDI_INTR_ALLOC_NORMAL);
4527 4527
4528 4528 if ((rc != DDI_SUCCESS) || (actual == 0)) {
4529 4529 cmn_err(CE_WARN, "%s: ddi_intr_alloc() failed: %d",
4530 4530 mgp->name, rc);
4531 4531
4532 4532 kmem_free(mgp->htable, mgp->intr_size);
4533 4533 mgp->htable = NULL;
4534 4534 return (DDI_FAILURE);
4535 4535 }
4536 4536
4537 4537 if ((actual < count) && myri10ge_verbose) {
4538 4538 cmn_err(CE_NOTE, "%s: got %d/%d slices",
4539 4539 mgp->name, actual, count);
4540 4540 }
4541 4541
4542 4542 mgp->intr_cnt = actual;
4543 4543
4544 4544 /*
4545 4545 * Get priority for first irq, assume remaining are all the same
4546 4546 */
4547 4547 if (ddi_intr_get_pri(mgp->htable[0], &mgp->intr_pri)
4548 4548 != DDI_SUCCESS) {
4549 4549 cmn_err(CE_WARN, "%s: ddi_intr_get_pri() failed", mgp->name);
4550 4550
4551 4551 /* Free already allocated intr */
4552 4552 for (y = 0; y < actual; y++) {
4553 4553 (void) ddi_intr_free(mgp->htable[y]);
4554 4554 }
4555 4555
4556 4556 kmem_free(mgp->htable, mgp->intr_size);
4557 4557 mgp->htable = NULL;
4558 4558 return (DDI_FAILURE);
4559 4559 }
4560 4560
4561 4561 mgp->icookie = (void *)(uintptr_t)mgp->intr_pri;
4562 4562
4563 4563 if (!add_handler)
4564 4564 return (DDI_SUCCESS);
4565 4565
4566 4566 /* Call ddi_intr_add_handler() */
4567 4567 for (x = 0; x < actual; x++) {
4568 4568 if (ddi_intr_add_handler(mgp->htable[x], myri10ge_intr,
4569 4569 (caddr_t)&mgp->ss[x], NULL) != DDI_SUCCESS) {
4570 4570 cmn_err(CE_WARN, "%s: ddi_intr_add_handler() failed",
4571 4571 mgp->name);
4572 4572
4573 4573 /* Free already allocated intr */
4574 4574 for (y = 0; y < actual; y++) {
4575 4575 (void) ddi_intr_free(mgp->htable[y]);
4576 4576 }
4577 4577
4578 4578 kmem_free(mgp->htable, mgp->intr_size);
4579 4579 mgp->htable = NULL;
4580 4580 return (DDI_FAILURE);
4581 4581 }
4582 4582 }
4583 4583
4584 4584 (void) ddi_intr_get_cap(mgp->htable[0], &mgp->intr_cap);
4585 4585 if (mgp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4586 4586 /* Call ddi_intr_block_enable() for MSI */
4587 4587 (void) ddi_intr_block_enable(mgp->htable, mgp->intr_cnt);
4588 4588 } else {
4589 4589 /* Call ddi_intr_enable() for MSI non block enable */
4590 4590 for (x = 0; x < mgp->intr_cnt; x++) {
4591 4591 (void) ddi_intr_enable(mgp->htable[x]);
4592 4592 }
4593 4593 }
4594 4594
4595 4595 return (DDI_SUCCESS);
4596 4596 }
4597 4597
4598 4598 static void
4599 4599 myri10ge_rem_intrs(struct myri10ge_priv *mgp, int handler_installed)
4600 4600 {
4601 4601 int x, err;
4602 4602
4603 4603 /* Disable all interrupts */
4604 4604 if (handler_installed) {
4605 4605 if (mgp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4606 4606 /* Call ddi_intr_block_disable() */
4607 4607 (void) ddi_intr_block_disable(mgp->htable,
4608 4608 mgp->intr_cnt);
4609 4609 } else {
4610 4610 for (x = 0; x < mgp->intr_cnt; x++) {
4611 4611 (void) ddi_intr_disable(mgp->htable[x]);
4612 4612 }
4613 4613 }
4614 4614 }
4615 4615
4616 4616 for (x = 0; x < mgp->intr_cnt; x++) {
4617 4617 if (handler_installed) {
4618 4618 /* Call ddi_intr_remove_handler() */
4619 4619 err = ddi_intr_remove_handler(mgp->htable[x]);
4620 4620 if (err != DDI_SUCCESS) {
4621 4621 cmn_err(CE_WARN,
4622 4622 "%s: ddi_intr_remove_handler for"
4623 4623 "vec %d returned %d\n", mgp->name,
4624 4624 x, err);
4625 4625 }
4626 4626 }
4627 4627 err = ddi_intr_free(mgp->htable[x]);
4628 4628 if (err != DDI_SUCCESS) {
4629 4629 cmn_err(CE_WARN,
4630 4630 "%s: ddi_intr_free for vec %d returned %d\n",
4631 4631 mgp->name, x, err);
4632 4632 }
4633 4633 }
4634 4634 kmem_free(mgp->htable, mgp->intr_size);
4635 4635 mgp->htable = NULL;
4636 4636 }
4637 4637
4638 4638 static void
4639 4639 myri10ge_test_physical(dev_info_t *dip)
4640 4640 {
4641 4641 ddi_dma_handle_t handle;
4642 4642 struct myri10ge_dma_stuff dma;
4643 4643 void *addr;
4644 4644 int err;
4645 4645
4646 4646 /* test #1, sufficient for older sparc systems */
4647 4647 myri10ge_tx_dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
4648 4648 err = ddi_dma_alloc_handle(dip, &myri10ge_tx_dma_attr,
4649 4649 DDI_DMA_DONTWAIT, NULL, &handle);
4650 4650 if (err == DDI_DMA_BADATTR)
4651 4651 goto fail;
4652 4652 ddi_dma_free_handle(&handle);
4653 4653
4654 4654 /* test #2, required on Olympis where the bind is what fails */
4655 4655 addr = myri10ge_dma_alloc(dip, 128, &myri10ge_tx_dma_attr,
4656 4656 &myri10ge_dev_access_attr, DDI_DMA_STREAMING,
4657 4657 DDI_DMA_WRITE|DDI_DMA_STREAMING, &dma, 0, DDI_DMA_DONTWAIT);
4658 4658 if (addr == NULL)
4659 4659 goto fail;
4660 4660 myri10ge_dma_free(&dma);
4661 4661 return;
4662 4662
4663 4663 fail:
4664 4664 if (myri10ge_verbose)
4665 4665 printf("myri10ge%d: DDI_DMA_FORCE_PHYSICAL failed, "
4666 4666 "using IOMMU\n", ddi_get_instance(dip));
4667 4667
4668 4668 myri10ge_tx_dma_attr.dma_attr_flags &= ~DDI_DMA_FORCE_PHYSICAL;
4669 4669 }
4670 4670
4671 4671 static void
4672 4672 myri10ge_get_props(dev_info_t *dip)
4673 4673 {
4674 4674
4675 4675 myri10ge_flow_control = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4676 4676 "myri10ge_flow_control", myri10ge_flow_control);
4677 4677
4678 4678 myri10ge_intr_coal_delay = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4679 4679 "myri10ge_intr_coal_delay", myri10ge_intr_coal_delay);
4680 4680
4681 4681 #if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__
4682 4682 myri10ge_nvidia_ecrc_enable = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4683 4683 "myri10ge_nvidia_ecrc_enable", 1);
4684 4684 #endif
4685 4685
4686 4686
4687 4687 myri10ge_use_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4688 4688 "myri10ge_use_msi", myri10ge_use_msi);
4689 4689
4690 4690 myri10ge_deassert_wait = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4691 4691 "myri10ge_deassert_wait", myri10ge_deassert_wait);
4692 4692
4693 4693 myri10ge_verbose = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4694 4694 "myri10ge_verbose", myri10ge_verbose);
4695 4695
4696 4696 myri10ge_tx_copylen = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4697 4697 "myri10ge_tx_copylen", myri10ge_tx_copylen);
4698 4698
4699 4699 if (myri10ge_tx_copylen < 60) {
4700 4700 cmn_err(CE_WARN,
4701 4701 "myri10ge_tx_copylen must be >= 60 bytes\n");
4702 4702 myri10ge_tx_copylen = 60;
4703 4703 }
4704 4704
4705 4705 myri10ge_mtu_override = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4706 4706 "myri10ge_mtu_override", myri10ge_mtu_override);
4707 4707
4708 4708 if (myri10ge_mtu_override >= MYRI10GE_MIN_GLD_MTU &&
4709 4709 myri10ge_mtu_override <= MYRI10GE_MAX_GLD_MTU)
4710 4710 myri10ge_mtu = myri10ge_mtu_override +
4711 4711 sizeof (struct ether_header) + MXGEFW_PAD + VLAN_TAGSZ;
4712 4712 else if (myri10ge_mtu_override != 0) {
4713 4713 cmn_err(CE_WARN,
4714 4714 "myri10ge_mtu_override must be between 1500 and "
4715 4715 "9000 bytes\n");
4716 4716 }
4717 4717
4718 4718 myri10ge_bigbufs_initial = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4719 4719 "myri10ge_bigbufs_initial", myri10ge_bigbufs_initial);
4720 4720 myri10ge_bigbufs_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4721 4721 "myri10ge_bigbufs_max", myri10ge_bigbufs_max);
4722 4722
4723 4723 myri10ge_watchdog_reset = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4724 4724 "myri10ge_watchdog_reset", myri10ge_watchdog_reset);
4725 4725
4726 4726 if (myri10ge_bigbufs_initial < 128) {
4727 4727 cmn_err(CE_WARN,
4728 4728 "myri10ge_bigbufs_initial be at least 128\n");
4729 4729 myri10ge_bigbufs_initial = 128;
4730 4730 }
4731 4731 if (myri10ge_bigbufs_max < 128) {
4732 4732 cmn_err(CE_WARN,
4733 4733 "myri10ge_bigbufs_max be at least 128\n");
4734 4734 myri10ge_bigbufs_max = 128;
4735 4735 }
4736 4736
4737 4737 if (myri10ge_bigbufs_max < myri10ge_bigbufs_initial) {
4738 4738 cmn_err(CE_WARN,
4739 4739 "myri10ge_bigbufs_max must be >= "
4740 4740 "myri10ge_bigbufs_initial\n");
4741 4741 myri10ge_bigbufs_max = myri10ge_bigbufs_initial;
4742 4742 }
4743 4743
4744 4744 myri10ge_force_firmware = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4745 4745 "myri10ge_force_firmware", myri10ge_force_firmware);
4746 4746
4747 4747 myri10ge_max_slices = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4748 4748 "myri10ge_max_slices", myri10ge_max_slices);
4749 4749
4750 4750 myri10ge_use_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4751 4751 "myri10ge_use_msix", myri10ge_use_msix);
4752 4752
4753 4753 myri10ge_rss_hash = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4754 4754 "myri10ge_rss_hash", myri10ge_rss_hash);
4755 4755
4756 4756 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX ||
4757 4757 myri10ge_rss_hash < MXGEFW_RSS_HASH_TYPE_IPV4) {
4758 4758 cmn_err(CE_WARN, "myri10ge: Illegal rssh hash type %d\n",
4759 4759 myri10ge_rss_hash);
4760 4760 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
4761 4761 }
4762 4762 myri10ge_lro = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4763 4763 "myri10ge_lro", myri10ge_lro);
4764 4764 myri10ge_lro_cnt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4765 4765 "myri10ge_lro_cnt", myri10ge_lro_cnt);
4766 4766 myri10ge_lro_max_aggr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4767 4767 "myri10ge_lro_max_aggr", myri10ge_lro_max_aggr);
4768 4768 myri10ge_tx_hash = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4769 4769 "myri10ge_tx_hash", myri10ge_tx_hash);
4770 4770 myri10ge_use_lso = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4771 4771 "myri10ge_use_lso", myri10ge_use_lso);
4772 4772 myri10ge_lso_copy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4773 4773 "myri10ge_lso_copy", myri10ge_lso_copy);
4774 4774 myri10ge_tx_handles_initial = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4775 4775 "myri10ge_tx_handles_initial", myri10ge_tx_handles_initial);
4776 4776 myri10ge_small_bytes = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4777 4777 "myri10ge_small_bytes", myri10ge_small_bytes);
4778 4778 if ((myri10ge_small_bytes + MXGEFW_PAD) & (128 -1)) {
4779 4779 cmn_err(CE_WARN, "myri10ge: myri10ge_small_bytes (%d)\n",
4780 4780 myri10ge_small_bytes);
4781 4781 cmn_err(CE_WARN, "must be aligned on 128b bndry -2\n");
4782 4782 myri10ge_small_bytes += 128;
4783 4783 myri10ge_small_bytes &= ~(128 -1);
4784 4784 myri10ge_small_bytes -= MXGEFW_PAD;
4785 4785 cmn_err(CE_WARN, "rounded up to %d\n",
4786 4786 myri10ge_small_bytes);
4787 4787
4788 4788 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
4789 4789 }
4790 4790 }
4791 4791
4792 4792 #ifndef PCI_EXP_LNKSTA
4793 4793 #define PCI_EXP_LNKSTA 18
4794 4794 #endif
4795 4795
4796 4796 static int
4797 4797 myri10ge_find_cap(ddi_acc_handle_t handle, uint8_t *capptr, uint8_t capid)
4798 4798 {
4799 4799 uint16_t status;
4800 4800 uint8_t ptr;
4801 4801
4802 4802 /* check to see if we have capabilities */
4803 4803 status = pci_config_get16(handle, PCI_CONF_STAT);
4804 4804 if (!(status & PCI_STAT_CAP)) {
4805 4805 cmn_err(CE_WARN, "PCI_STAT_CAP not found\n");
4806 4806 return (ENXIO);
4807 4807 }
4808 4808
4809 4809 ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR);
4810 4810
4811 4811 /* Walk the capabilities list, looking for a PCI Express cap */
4812 4812 while (ptr != PCI_CAP_NEXT_PTR_NULL) {
4813 4813 if (pci_config_get8(handle, ptr + PCI_CAP_ID) == capid)
4814 4814 break;
4815 4815 ptr = pci_config_get8(handle, ptr + PCI_CAP_NEXT_PTR);
4816 4816 }
4817 4817 if (ptr < 64) {
4818 4818 cmn_err(CE_WARN, "Bad capability offset %d\n", ptr);
4819 4819 return (ENXIO);
4820 4820 }
4821 4821 *capptr = ptr;
4822 4822 return (0);
4823 4823 }
4824 4824
4825 4825 static int
4826 4826 myri10ge_set_max_readreq(ddi_acc_handle_t handle)
4827 4827 {
4828 4828 int err;
4829 4829 uint16_t val;
4830 4830 uint8_t ptr;
4831 4831
4832 4832 err = myri10ge_find_cap(handle, &ptr, PCI_CAP_ID_PCI_E);
4833 4833 if (err != 0) {
4834 4834 cmn_err(CE_WARN, "could not find PCIe cap\n");
4835 4835 return (ENXIO);
4836 4836 }
4837 4837
4838 4838 /* set max read req to 4096 */
4839 4839 val = pci_config_get16(handle, ptr + PCIE_DEVCTL);
4840 4840 val = (val & ~PCIE_DEVCTL_MAX_READ_REQ_MASK) |
4841 4841 PCIE_DEVCTL_MAX_READ_REQ_4096;
4842 4842 pci_config_put16(handle, ptr + PCIE_DEVCTL, val);
4843 4843 val = pci_config_get16(handle, ptr + PCIE_DEVCTL);
4844 4844 if ((val & (PCIE_DEVCTL_MAX_READ_REQ_4096)) !=
4845 4845 PCIE_DEVCTL_MAX_READ_REQ_4096) {
4846 4846 cmn_err(CE_WARN, "could not set max read req (%x)\n", val);
4847 4847 return (EINVAL);
4848 4848 }
4849 4849 return (0);
4850 4850 }
4851 4851
4852 4852 static int
4853 4853 myri10ge_read_pcie_link_width(ddi_acc_handle_t handle, int *link)
4854 4854 {
4855 4855 int err;
4856 4856 uint16_t val;
4857 4857 uint8_t ptr;
4858 4858
4859 4859 err = myri10ge_find_cap(handle, &ptr, PCI_CAP_ID_PCI_E);
4860 4860 if (err != 0) {
4861 4861 cmn_err(CE_WARN, "could not set max read req\n");
4862 4862 return (ENXIO);
4863 4863 }
4864 4864
4865 4865 /* read link width */
4866 4866 val = pci_config_get16(handle, ptr + PCIE_LINKSTS);
4867 4867 val &= PCIE_LINKSTS_NEG_WIDTH_MASK;
4868 4868 *link = (val >> 4);
4869 4869 return (0);
4870 4870 }
4871 4871
4872 4872 static int
4873 4873 myri10ge_reset_nic(struct myri10ge_priv *mgp)
4874 4874 {
4875 4875 ddi_acc_handle_t handle = mgp->cfg_hdl;
4876 4876 uint32_t reboot;
4877 4877 uint16_t cmd;
4878 4878 int err;
4879 4879
4880 4880 cmd = pci_config_get16(handle, PCI_CONF_COMM);
4881 4881 if ((cmd & PCI_COMM_ME) == 0) {
4882 4882 /*
4883 4883 * Bus master DMA disabled? Check to see if the card
4884 4884 * rebooted due to a parity error For now, just report
4885 4885 * it
4886 4886 */
4887 4887
4888 4888 /* enter read32 mode */
4889 4889 pci_config_put8(handle, mgp->vso + 0x10, 0x3);
4890 4890 /* read REBOOT_STATUS (0xfffffff0) */
4891 4891 pci_config_put32(handle, mgp->vso + 0x18, 0xfffffff0);
4892 4892 reboot = pci_config_get16(handle, mgp->vso + 0x14);
4893 4893 cmn_err(CE_WARN, "%s NIC rebooted 0x%x\n", mgp->name, reboot);
4894 4894 return (0);
4895 4895 }
4896 4896 if (!myri10ge_watchdog_reset) {
4897 4897 cmn_err(CE_WARN, "%s: not resetting\n", mgp->name);
4898 4898 return (1);
4899 4899 }
4900 4900
4901 4901 myri10ge_stop_locked(mgp);
4902 4902 err = myri10ge_start_locked(mgp);
4903 4903 if (err == DDI_FAILURE) {
4904 4904 return (0);
4905 4905 }
4906 4906 mac_tx_update(mgp->mh);
4907 4907 return (1);
4908 4908 }
4909 4909
4910 4910 static inline int
4911 4911 myri10ge_ring_stalled(myri10ge_tx_ring_t *tx)
4912 4912 {
4913 4913 if (tx->sched != tx->stall &&
4914 4914 tx->done == tx->watchdog_done &&
4915 4915 tx->watchdog_req != tx->watchdog_done)
4916 4916 return (1);
4917 4917 return (0);
4918 4918 }
4919 4919
4920 4920 static void
4921 4921 myri10ge_watchdog(void *arg)
4922 4922 {
4923 4923 struct myri10ge_priv *mgp;
4924 4924 struct myri10ge_slice_state *ss;
4925 4925 myri10ge_tx_ring_t *tx;
4926 4926 int nic_ok = 1;
4927 4927 int slices_stalled, rx_pause, i;
4928 4928 int add_rx;
4929 4929
4930 4930 mgp = arg;
4931 4931 mutex_enter(&mgp->intrlock);
4932 4932 if (mgp->running != MYRI10GE_ETH_RUNNING) {
4933 4933 cmn_err(CE_WARN,
4934 4934 "%s not running, not rearming watchdog (%d)\n",
4935 4935 mgp->name, mgp->running);
4936 4936 mutex_exit(&mgp->intrlock);
4937 4937 return;
4938 4938 }
4939 4939
4940 4940 rx_pause = ntohl(mgp->ss[0].fw_stats->dropped_pause);
4941 4941
4942 4942 /*
4943 4943 * make sure nic is stalled before we reset the nic, so as to
4944 4944 * ensure we don't rip the transmit data structures out from
4945 4945 * under a pending transmit
4946 4946 */
4947 4947
4948 4948 for (slices_stalled = 0, i = 0; i < mgp->num_slices; i++) {
4949 4949 tx = &mgp->ss[i].tx;
4950 4950 slices_stalled = myri10ge_ring_stalled(tx);
4951 4951 if (slices_stalled)
4952 4952 break;
4953 4953 }
4954 4954
4955 4955 if (slices_stalled) {
4956 4956 if (mgp->watchdog_rx_pause == rx_pause) {
4957 4957 cmn_err(CE_WARN,
4958 4958 "%s slice %d stalled:(%d, %d, %d, %d, %d %d %d\n)",
4959 4959 mgp->name, i, tx->sched, tx->stall,
4960 4960 tx->done, tx->watchdog_done, tx->req, tx->pkt_done,
4961 4961 (int)ntohl(mgp->ss[i].fw_stats->send_done_count));
4962 4962 nic_ok = myri10ge_reset_nic(mgp);
4963 4963 } else {
4964 4964 cmn_err(CE_WARN,
4965 4965 "%s Flow controlled, check link partner\n",
4966 4966 mgp->name);
4967 4967 }
4968 4968 }
4969 4969
4970 4970 if (!nic_ok) {
4971 4971 cmn_err(CE_WARN,
4972 4972 "%s Nic dead, not rearming watchdog\n", mgp->name);
4973 4973 mutex_exit(&mgp->intrlock);
4974 4974 return;
4975 4975 }
4976 4976 for (i = 0; i < mgp->num_slices; i++) {
4977 4977 ss = &mgp->ss[i];
4978 4978 tx = &ss->tx;
4979 4979 tx->watchdog_done = tx->done;
4980 4980 tx->watchdog_req = tx->req;
4981 4981 if (ss->watchdog_rx_copy != MYRI10GE_SLICE_STAT(rx_copy)) {
4982 4982 ss->watchdog_rx_copy = MYRI10GE_SLICE_STAT(rx_copy);
4983 4983 add_rx =
4984 4984 min(ss->jpool.num_alloc,
4985 4985 myri10ge_bigbufs_max -
4986 4986 (ss->jpool.num_alloc -
4987 4987 ss->jbufs_for_smalls));
4988 4988 if (add_rx != 0) {
4989 4989 (void) myri10ge_add_jbufs(ss, add_rx, 0);
4990 4990 /* now feed them to the firmware */
4991 4991 mutex_enter(&ss->jpool.mtx);
4992 4992 myri10ge_restock_jumbos(ss);
4993 4993 mutex_exit(&ss->jpool.mtx);
4994 4994 }
4995 4995 }
4996 4996 }
4997 4997 mgp->watchdog_rx_pause = rx_pause;
4998 4998
4999 4999 mgp->timer_id = timeout(myri10ge_watchdog, mgp,
5000 5000 mgp->timer_ticks);
5001 5001 mutex_exit(&mgp->intrlock);
5002 5002 }
5003 5003
5004 5004 /*ARGSUSED*/
5005 5005 static int
5006 5006 myri10ge_get_coalesce(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
5007 5007
5008 5008 {
5009 5009 struct myri10ge_priv *mgp = (struct myri10ge_priv *)(void *)cp;
5010 5010 (void) mi_mpprintf(mp, "%d", mgp->intr_coal_delay);
5011 5011 return (0);
5012 5012 }
5013 5013
5014 5014 /*ARGSUSED*/
5015 5015 static int
5016 5016 myri10ge_set_coalesce(queue_t *q, mblk_t *mp, char *value,
5017 5017 caddr_t cp, cred_t *credp)
5018 5018
5019 5019 {
5020 5020 struct myri10ge_priv *mgp = (struct myri10ge_priv *)(void *)cp;
5021 5021 char *end;
5022 5022 size_t new_value;
5023 5023
5024 5024 new_value = mi_strtol(value, &end, 10);
5025 5025 if (end == value)
5026 5026 return (EINVAL);
5027 5027
5028 5028 mutex_enter(&myri10ge_param_lock);
5029 5029 mgp->intr_coal_delay = (int)new_value;
5030 5030 *mgp->intr_coal_delay_ptr = htonl(mgp->intr_coal_delay);
5031 5031 mutex_exit(&myri10ge_param_lock);
5032 5032 return (0);
5033 5033 }
5034 5034
5035 5035 /*ARGSUSED*/
5036 5036 static int
5037 5037 myri10ge_get_pauseparam(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
5038 5038
5039 5039 {
5040 5040 struct myri10ge_priv *mgp = (struct myri10ge_priv *)(void *)cp;
5041 5041 (void) mi_mpprintf(mp, "%d", mgp->pause);
5042 5042 return (0);
5043 5043 }
5044 5044
5045 5045 /*ARGSUSED*/
5046 5046 static int
5047 5047 myri10ge_set_pauseparam(queue_t *q, mblk_t *mp, char *value,
5048 5048 caddr_t cp, cred_t *credp)
5049 5049
5050 5050 {
5051 5051 struct myri10ge_priv *mgp = (struct myri10ge_priv *)(void *)cp;
5052 5052 char *end;
5053 5053 size_t new_value;
5054 5054 int err = 0;
5055 5055
5056 5056 new_value = mi_strtol(value, &end, 10);
5057 5057 if (end == value)
5058 5058 return (EINVAL);
5059 5059 if (new_value != 0)
5060 5060 new_value = 1;
5061 5061
5062 5062 mutex_enter(&myri10ge_param_lock);
5063 5063 if (new_value != mgp->pause)
5064 5064 err = myri10ge_change_pause(mgp, new_value);
5065 5065 mutex_exit(&myri10ge_param_lock);
5066 5066 return (err);
5067 5067 }
5068 5068
5069 5069 /*ARGSUSED*/
5070 5070 static int
5071 5071 myri10ge_get_int(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
5072 5072
5073 5073 {
5074 5074 (void) mi_mpprintf(mp, "%d", *(int *)(void *)cp);
5075 5075 return (0);
5076 5076 }
5077 5077
5078 5078 /*ARGSUSED*/
5079 5079 static int
5080 5080 myri10ge_set_int(queue_t *q, mblk_t *mp, char *value,
5081 5081 caddr_t cp, cred_t *credp)
5082 5082
5083 5083 {
5084 5084 char *end;
5085 5085 size_t new_value;
5086 5086
5087 5087 new_value = mi_strtol(value, &end, 10);
5088 5088 if (end == value)
5089 5089 return (EINVAL);
5090 5090 *(int *)(void *)cp = new_value;
5091 5091
5092 5092 return (0);
5093 5093 }
5094 5094
5095 5095 static void
5096 5096 myri10ge_ndd_init(struct myri10ge_priv *mgp)
5097 5097 {
5098 5098 mgp->nd_head = NULL;
5099 5099
5100 5100 (void) nd_load(&mgp->nd_head, "myri10ge_intr_coal_delay",
5101 5101 myri10ge_get_coalesce, myri10ge_set_coalesce, (caddr_t)mgp);
5102 5102 (void) nd_load(&mgp->nd_head, "myri10ge_flow_control",
5103 5103 myri10ge_get_pauseparam, myri10ge_set_pauseparam, (caddr_t)mgp);
5104 5104 (void) nd_load(&mgp->nd_head, "myri10ge_verbose",
5105 5105 myri10ge_get_int, myri10ge_set_int, (caddr_t)&myri10ge_verbose);
5106 5106 (void) nd_load(&mgp->nd_head, "myri10ge_deassert_wait",
5107 5107 myri10ge_get_int, myri10ge_set_int,
5108 5108 (caddr_t)&myri10ge_deassert_wait);
5109 5109 (void) nd_load(&mgp->nd_head, "myri10ge_bigbufs_max",
5110 5110 myri10ge_get_int, myri10ge_set_int,
5111 5111 (caddr_t)&myri10ge_bigbufs_max);
5112 5112 (void) nd_load(&mgp->nd_head, "myri10ge_lro",
5113 5113 myri10ge_get_int, myri10ge_set_int,
5114 5114 (caddr_t)&myri10ge_lro);
5115 5115 (void) nd_load(&mgp->nd_head, "myri10ge_lro_max_aggr",
5116 5116 myri10ge_get_int, myri10ge_set_int,
5117 5117 (caddr_t)&myri10ge_lro_max_aggr);
5118 5118 (void) nd_load(&mgp->nd_head, "myri10ge_tx_hash",
5119 5119 myri10ge_get_int, myri10ge_set_int,
5120 5120 (caddr_t)&myri10ge_tx_hash);
5121 5121 (void) nd_load(&mgp->nd_head, "myri10ge_lso_copy",
5122 5122 myri10ge_get_int, myri10ge_set_int,
5123 5123 (caddr_t)&myri10ge_lso_copy);
5124 5124 }
5125 5125
5126 5126 static void
5127 5127 myri10ge_ndd_fini(struct myri10ge_priv *mgp)
5128 5128 {
5129 5129 nd_free(&mgp->nd_head);
5130 5130 }
5131 5131
5132 5132 static void
5133 5133 myri10ge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
5134 5134 {
5135 5135 struct iocblk *iocp;
5136 5136 struct myri10ge_priv *mgp = arg;
5137 5137 int cmd, ok, err;
5138 5138
5139 5139 iocp = (struct iocblk *)(void *)mp->b_rptr;
5140 5140 cmd = iocp->ioc_cmd;
5141 5141
5142 5142 ok = 0;
5143 5143 err = 0;
5144 5144
5145 5145 switch (cmd) {
5146 5146 case ND_GET:
5147 5147 case ND_SET:
5148 5148 ok = nd_getset(wq, mgp->nd_head, mp);
5149 5149 break;
5150 5150 default:
5151 5151 break;
5152 5152 }
5153 5153 if (!ok)
5154 5154 err = EINVAL;
5155 5155 else
5156 5156 err = iocp->ioc_error;
5157 5157
5158 5158 if (!err)
5159 5159 miocack(wq, mp, iocp->ioc_count, err);
5160 5160 else
5161 5161 miocnak(wq, mp, 0, err);
5162 5162 }
5163 5163
5164 5164 static struct myri10ge_priv *mgp_list;
5165 5165
5166 5166 struct myri10ge_priv *
5167 5167 myri10ge_get_instance(uint_t unit)
5168 5168 {
5169 5169 struct myri10ge_priv *mgp;
5170 5170
5171 5171 mutex_enter(&myri10ge_param_lock);
5172 5172 for (mgp = mgp_list; mgp != NULL; mgp = mgp->next) {
5173 5173 if (unit == ddi_get_instance(mgp->dip)) {
5174 5174 mgp->refcnt++;
5175 5175 break;
5176 5176 }
5177 5177 }
5178 5178 mutex_exit(&myri10ge_param_lock);
5179 5179 return (mgp);
5180 5180 }
5181 5181
5182 5182 void
5183 5183 myri10ge_put_instance(struct myri10ge_priv *mgp)
5184 5184 {
5185 5185 mutex_enter(&myri10ge_param_lock);
5186 5186 mgp->refcnt--;
5187 5187 mutex_exit(&myri10ge_param_lock);
5188 5188 }
5189 5189
5190 5190 static boolean_t
5191 5191 myri10ge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
5192 5192 {
5193 5193 struct myri10ge_priv *mgp = arg;
5194 5194 uint32_t *cap_hcksum;
5195 5195 mac_capab_lso_t *cap_lso;
5196 5196 mac_capab_rings_t *cap_rings;
5197 5197
5198 5198 switch (cap) {
5199 5199 case MAC_CAPAB_HCKSUM:
5200 5200 cap_hcksum = cap_data;
5201 5201 *cap_hcksum = HCKSUM_INET_PARTIAL;
5202 5202 break;
5203 5203 case MAC_CAPAB_RINGS:
5204 5204 cap_rings = cap_data;
5205 5205 switch (cap_rings->mr_type) {
5206 5206 case MAC_RING_TYPE_RX:
5207 5207 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
5208 5208 cap_rings->mr_rnum = mgp->num_slices;
5209 5209 cap_rings->mr_gnum = 1;
5210 5210 cap_rings->mr_rget = myri10ge_fill_ring;
5211 5211 cap_rings->mr_gget = myri10ge_fill_group;
5212 5212 break;
5213 5213 case MAC_RING_TYPE_TX:
5214 5214 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
5215 5215 cap_rings->mr_rnum = mgp->num_slices;
5216 5216 cap_rings->mr_gnum = 0;
5217 5217 cap_rings->mr_rget = myri10ge_fill_ring;
5218 5218 cap_rings->mr_gget = NULL;
5219 5219 break;
5220 5220 default:
5221 5221 return (B_FALSE);
5222 5222 }
5223 5223 break;
5224 5224 case MAC_CAPAB_LSO:
5225 5225 cap_lso = cap_data;
5226 5226 if (!myri10ge_use_lso)
5227 5227 return (B_FALSE);
5228 5228 if (!(mgp->features & MYRI10GE_TSO))
5229 5229 return (B_FALSE);
5230 5230 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
5231 5231 cap_lso->lso_basic_tcp_ipv4.lso_max = (uint16_t)-1;
5232 5232 break;
5233 5233
5234 5234 default:
5235 5235 return (B_FALSE);
5236 5236 }
5237 5237 return (B_TRUE);
5238 5238 }
5239 5239
5240 5240
5241 5241 static int
5242 5242 myri10ge_m_stat(void *arg, uint_t stat, uint64_t *val)
5243 5243 {
5244 5244 struct myri10ge_priv *mgp = arg;
5245 5245 struct myri10ge_rx_ring_stats *rstat;
5246 5246 struct myri10ge_tx_ring_stats *tstat;
5247 5247 mcp_irq_data_t *fw_stats = mgp->ss[0].fw_stats;
5248 5248 struct myri10ge_slice_state *ss;
5249 5249 uint64_t tmp = 0;
5250 5250 int i;
5251 5251
5252 5252 switch (stat) {
5253 5253 case MAC_STAT_IFSPEED:
5254 5254 *val = 10ull * 1000ull * 1000000ull;
5255 5255 break;
5256 5256
5257 5257 case MAC_STAT_MULTIRCV:
5258 5258 for (i = 0; i < mgp->num_slices; i++) {
5259 5259 rstat = &mgp->ss[i].rx_stats;
5260 5260 tmp += rstat->multircv;
5261 5261 }
5262 5262 *val = tmp;
5263 5263 break;
5264 5264
5265 5265 case MAC_STAT_BRDCSTRCV:
5266 5266 for (i = 0; i < mgp->num_slices; i++) {
5267 5267 rstat = &mgp->ss[i].rx_stats;
5268 5268 tmp += rstat->brdcstrcv;
5269 5269 }
5270 5270 *val = tmp;
5271 5271 break;
5272 5272
5273 5273 case MAC_STAT_MULTIXMT:
5274 5274 for (i = 0; i < mgp->num_slices; i++) {
5275 5275 tstat = &mgp->ss[i].tx.stats;
5276 5276 tmp += tstat->multixmt;
5277 5277 }
5278 5278 *val = tmp;
5279 5279 break;
5280 5280
5281 5281 case MAC_STAT_BRDCSTXMT:
5282 5282 for (i = 0; i < mgp->num_slices; i++) {
5283 5283 tstat = &mgp->ss[i].tx.stats;
5284 5284 tmp += tstat->brdcstxmt;
5285 5285 }
5286 5286 *val = tmp;
5287 5287 break;
5288 5288
5289 5289 case MAC_STAT_NORCVBUF:
5290 5290 tmp = ntohl(fw_stats->dropped_no_big_buffer);
5291 5291 tmp += ntohl(fw_stats->dropped_no_small_buffer);
5292 5292 tmp += ntohl(fw_stats->dropped_link_overflow);
5293 5293 for (i = 0; i < mgp->num_slices; i++) {
5294 5294 ss = &mgp->ss[i];
5295 5295 tmp += MYRI10GE_SLICE_STAT(rx_big_nobuf);
5296 5296 tmp += MYRI10GE_SLICE_STAT(rx_small_nobuf);
5297 5297 }
5298 5298 *val = tmp;
5299 5299 break;
5300 5300
5301 5301 case MAC_STAT_IERRORS:
5302 5302 tmp += ntohl(fw_stats->dropped_bad_crc32);
5303 5303 tmp += ntohl(fw_stats->dropped_bad_phy);
5304 5304 tmp += ntohl(fw_stats->dropped_runt);
5305 5305 tmp += ntohl(fw_stats->dropped_overrun);
5306 5306 *val = tmp;
5307 5307 break;
5308 5308
5309 5309 case MAC_STAT_OERRORS:
5310 5310 for (i = 0; i < mgp->num_slices; i++) {
5311 5311 ss = &mgp->ss[i];
5312 5312 tmp += MYRI10GE_SLICE_STAT(xmit_lsobadflags);
5313 5313 tmp += MYRI10GE_SLICE_STAT(xmit_err);
5314 5314 }
5315 5315 *val = tmp;
5316 5316 break;
5317 5317
5318 5318 case MAC_STAT_RBYTES:
5319 5319 for (i = 0; i < mgp->num_slices; i++) {
5320 5320 rstat = &mgp->ss[i].rx_stats;
5321 5321 tmp += rstat->ibytes;
5322 5322 }
5323 5323 *val = tmp;
5324 5324 break;
5325 5325
5326 5326 case MAC_STAT_IPACKETS:
5327 5327 for (i = 0; i < mgp->num_slices; i++) {
5328 5328 rstat = &mgp->ss[i].rx_stats;
5329 5329 tmp += rstat->ipackets;
5330 5330 }
5331 5331 *val = tmp;
5332 5332 break;
5333 5333
5334 5334 case MAC_STAT_OBYTES:
5335 5335 for (i = 0; i < mgp->num_slices; i++) {
5336 5336 tstat = &mgp->ss[i].tx.stats;
5337 5337 tmp += tstat->obytes;
5338 5338 }
5339 5339 *val = tmp;
5340 5340 break;
5341 5341
5342 5342 case MAC_STAT_OPACKETS:
5343 5343 for (i = 0; i < mgp->num_slices; i++) {
5344 5344 tstat = &mgp->ss[i].tx.stats;
5345 5345 tmp += tstat->opackets;
5346 5346 }
5347 5347 *val = tmp;
5348 5348 break;
5349 5349
5350 5350 case ETHER_STAT_TOOLONG_ERRORS:
5351 5351 *val = ntohl(fw_stats->dropped_overrun);
5352 5352 break;
5353 5353
5354 5354 #ifdef SOLARIS_S11
5355 5355 case ETHER_STAT_TOOSHORT_ERRORS:
5356 5356 *val = ntohl(fw_stats->dropped_runt);
5357 5357 break;
5358 5358 #endif
5359 5359
5360 5360 case ETHER_STAT_LINK_PAUSE:
5361 5361 *val = mgp->pause;
5362 5362 break;
5363 5363
5364 5364 case ETHER_STAT_LINK_AUTONEG:
5365 5365 *val = 1;
5366 5366 break;
5367 5367
5368 5368 case ETHER_STAT_LINK_DUPLEX:
5369 5369 *val = LINK_DUPLEX_FULL;
5370 5370 break;
5371 5371
5372 5372 default:
5373 5373 return (ENOTSUP);
5374 5374 }
5375 5375
5376 5376 return (0);
5377 5377 }
5378 5378
5379 5379 /* ARGSUSED */
5380 5380 static void
5381 5381 myri10ge_m_propinfo(void *arg, const char *pr_name,
5382 5382 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
5383 5383 {
5384 5384 switch (pr_num) {
5385 5385 case MAC_PROP_MTU:
5386 5386 mac_prop_info_set_default_uint32(prh, MYRI10GE_DEFAULT_GLD_MTU);
5387 5387 mac_prop_info_set_range_uint32(prh, MYRI10GE_MIN_GLD_MTU,
5388 5388 MYRI10GE_MAX_GLD_MTU);
5389 5389 break;
5390 5390 default:
5391 5391 break;
5392 5392 }
5393 5393 }
5394 5394
5395 5395 /*ARGSUSED*/
5396 5396 static int
5397 5397 myri10ge_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
5398 5398 uint_t pr_valsize, const void *pr_val)
5399 5399 {
5400 5400 int err = 0;
5401 5401 struct myri10ge_priv *mgp = arg;
5402 5402
5403 5403 switch (pr_num) {
5404 5404 case MAC_PROP_MTU: {
5405 5405 uint32_t mtu;
5406 5406 if (pr_valsize < sizeof (mtu)) {
5407 5407 err = EINVAL;
5408 5408 break;
5409 5409 }
5410 5410 bcopy(pr_val, &mtu, sizeof (mtu));
5411 5411 if (mtu > MYRI10GE_MAX_GLD_MTU ||
5412 5412 mtu < MYRI10GE_MIN_GLD_MTU) {
5413 5413 err = EINVAL;
5414 5414 break;
5415 5415 }
5416 5416
5417 5417 mutex_enter(&mgp->intrlock);
5418 5418 if (mgp->running != MYRI10GE_ETH_STOPPED) {
5419 5419 err = EBUSY;
5420 5420 mutex_exit(&mgp->intrlock);
5421 5421 break;
5422 5422 }
5423 5423
5424 5424 myri10ge_mtu = mtu + sizeof (struct ether_header) +
5425 5425 MXGEFW_PAD + VLAN_TAGSZ;
5426 5426 mutex_exit(&mgp->intrlock);
5427 5427 break;
5428 5428 }
5429 5429 default:
5430 5430 err = ENOTSUP;
5431 5431 break;
5432 5432 }
5433 5433
5434 5434 return (err);
5435 5435 }
5436 5436
5437 5437 static mac_callbacks_t myri10ge_m_callbacks = {
5438 5438 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_PROPINFO),
5439 5439 myri10ge_m_stat,
5440 5440 myri10ge_m_start,
5441 5441 myri10ge_m_stop,
5442 5442 myri10ge_m_promisc,
5443 5443 myri10ge_m_multicst,
5444 5444 NULL,
5445 5445 NULL,
5446 5446 NULL,
5447 5447 myri10ge_m_ioctl,
5448 5448 myri10ge_m_getcapab,
5449 5449 NULL,
5450 5450 NULL,
5451 5451 myri10ge_m_setprop,
5452 5452 NULL,
5453 5453 myri10ge_m_propinfo
5454 5454 };
5455 5455
5456 5456
5457 5457 static int
5458 5458 myri10ge_probe_slices(struct myri10ge_priv *mgp)
5459 5459 {
5460 5460 myri10ge_cmd_t cmd;
5461 5461 int status;
5462 5462
5463 5463 mgp->num_slices = 1;
5464 5464
5465 5465 /* hit the board with a reset to ensure it is alive */
5466 5466 (void) memset(&cmd, 0, sizeof (cmd));
5467 5467 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd);
5468 5468 if (status != 0) {
5469 5469 cmn_err(CE_WARN, "%s: failed reset\n", mgp->name);
5470 5470 return (ENXIO);
5471 5471 }
5472 5472
5473 5473 if (myri10ge_use_msix == 0)
5474 5474 return (0);
5475 5475
5476 5476 /* tell it the size of the interrupt queues */
5477 5477 cmd.data0 = mgp->max_intr_slots * sizeof (struct mcp_slot);
5478 5478 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd);
5479 5479 if (status != 0) {
5480 5480 cmn_err(CE_WARN, "%s: failed MXGEFW_CMD_SET_INTRQ_SIZE\n",
5481 5481 mgp->name);
5482 5482 return (ENXIO);
5483 5483 }
5484 5484
5485 5485 /* ask the maximum number of slices it supports */
5486 5486 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
5487 5487 &cmd);
5488 5488 if (status != 0)
5489 5489 return (0);
5490 5490
5491 5491 mgp->num_slices = cmd.data0;
5492 5492
5493 5493 /*
5494 5494 * if the admin did not specify a limit to how many
5495 5495 * slices we should use, cap it automatically to the
5496 5496 * number of CPUs currently online
5497 5497 */
5498 5498 if (myri10ge_max_slices == -1)
5499 5499 myri10ge_max_slices = ncpus;
5500 5500
5501 5501 if (mgp->num_slices > myri10ge_max_slices)
5502 5502 mgp->num_slices = myri10ge_max_slices;
5503 5503
5504 5504
5505 5505 /*
5506 5506 * Now try to allocate as many MSI-X vectors as we have
5507 5507 * slices. We give up on MSI-X if we can only get a single
5508 5508 * vector.
5509 5509 */
5510 5510 while (mgp->num_slices > 1) {
5511 5511 /* make sure it is a power of two */
5512 5512 while (!ISP2(mgp->num_slices))
5513 5513 mgp->num_slices--;
5514 5514 if (mgp->num_slices == 1)
5515 5515 return (0);
5516 5516
5517 5517 status = myri10ge_add_intrs(mgp, 0);
5518 5518 if (status == 0) {
5519 5519 myri10ge_rem_intrs(mgp, 0);
5520 5520 if (mgp->intr_cnt == mgp->num_slices) {
5521 5521 if (myri10ge_verbose)
5522 5522 printf("Got %d slices!\n",
5523 5523 mgp->num_slices);
5524 5524 return (0);
5525 5525 }
5526 5526 mgp->num_slices = mgp->intr_cnt;
5527 5527 } else {
5528 5528 mgp->num_slices = mgp->num_slices / 2;
5529 5529 }
5530 5530 }
5531 5531
5532 5532 if (myri10ge_verbose)
5533 5533 printf("Got %d slices\n", mgp->num_slices);
5534 5534 return (0);
5535 5535 }
5536 5536
5537 5537 static void
5538 5538 myri10ge_lro_free(struct myri10ge_slice_state *ss)
5539 5539 {
5540 5540 struct lro_entry *lro;
5541 5541
5542 5542 while (ss->lro_free != NULL) {
5543 5543 lro = ss->lro_free;
5544 5544 ss->lro_free = lro->next;
5545 5545 kmem_free(lro, sizeof (*lro));
5546 5546 }
5547 5547 }
5548 5548
5549 5549 static void
5550 5550 myri10ge_lro_alloc(struct myri10ge_slice_state *ss)
5551 5551 {
5552 5552 struct lro_entry *lro;
5553 5553 int idx;
5554 5554
5555 5555 ss->lro_free = NULL;
5556 5556 ss->lro_active = NULL;
5557 5557
5558 5558 for (idx = 0; idx < myri10ge_lro_cnt; idx++) {
5559 5559 lro = kmem_zalloc(sizeof (*lro), KM_SLEEP);
5560 5560 if (lro == NULL)
5561 5561 continue;
5562 5562 lro->next = ss->lro_free;
5563 5563 ss->lro_free = lro;
5564 5564 }
5565 5565 }
5566 5566
5567 5567 static void
5568 5568 myri10ge_free_slices(struct myri10ge_priv *mgp)
5569 5569 {
5570 5570 struct myri10ge_slice_state *ss;
5571 5571 size_t bytes;
5572 5572 int i;
5573 5573
5574 5574 if (mgp->ss == NULL)
5575 5575 return;
5576 5576
5577 5577 for (i = 0; i < mgp->num_slices; i++) {
5578 5578 ss = &mgp->ss[i];
5579 5579 if (ss->rx_done.entry == NULL)
5580 5580 continue;
5581 5581 myri10ge_dma_free(&ss->rx_done.dma);
5582 5582 ss->rx_done.entry = NULL;
5583 5583 if (ss->fw_stats == NULL)
5584 5584 continue;
5585 5585 myri10ge_dma_free(&ss->fw_stats_dma);
5586 5586 ss->fw_stats = NULL;
5587 5587 mutex_destroy(&ss->rx_lock);
5588 5588 mutex_destroy(&ss->tx.lock);
5589 5589 mutex_destroy(&ss->tx.handle_lock);
5590 5590 mutex_destroy(&ss->poll_lock);
5591 5591 myri10ge_jpool_fini(ss);
5592 5592 myri10ge_slice_stat_destroy(ss);
5593 5593 myri10ge_lro_free(ss);
5594 5594 }
5595 5595 bytes = sizeof (*mgp->ss) * mgp->num_slices;
5596 5596 kmem_free(mgp->ss, bytes);
5597 5597 mgp->ss = NULL;
5598 5598 }
5599 5599
5600 5600
5601 5601 static int
5602 5602 myri10ge_alloc_slices(struct myri10ge_priv *mgp)
5603 5603 {
5604 5604 struct myri10ge_slice_state *ss;
5605 5605 size_t bytes;
5606 5606 int i;
5607 5607
5608 5608 bytes = sizeof (*mgp->ss) * mgp->num_slices;
5609 5609 mgp->ss = kmem_zalloc(bytes, KM_SLEEP);
5610 5610 if (mgp->ss == NULL)
5611 5611 return (ENOMEM);
5612 5612 for (i = 0; i < mgp->num_slices; i++) {
5613 5613 ss = &mgp->ss[i];
5614 5614
5615 5615 ss->mgp = mgp;
5616 5616
5617 5617 /* allocate the per-slice firmware stats */
5618 5618 bytes = sizeof (*ss->fw_stats);
5619 5619 ss->fw_stats = (mcp_irq_data_t *)(void *)
5620 5620 myri10ge_dma_alloc(mgp->dip, bytes,
5621 5621 &myri10ge_misc_dma_attr, &myri10ge_dev_access_attr,
5622 5622 DDI_DMA_CONSISTENT, DDI_DMA_READ|DDI_DMA_CONSISTENT,
5623 5623 &ss->fw_stats_dma, 1, DDI_DMA_DONTWAIT);
5624 5624 if (ss->fw_stats == NULL)
5625 5625 goto abort;
5626 5626 (void) memset(ss->fw_stats, 0, bytes);
5627 5627
5628 5628 /* allocate rx done ring */
5629 5629 bytes = mgp->max_intr_slots *
5630 5630 sizeof (*ss->rx_done.entry);
5631 5631 ss->rx_done.entry = (mcp_slot_t *)(void *)
5632 5632 myri10ge_dma_alloc(mgp->dip, bytes,
5633 5633 &myri10ge_misc_dma_attr, &myri10ge_dev_access_attr,
5634 5634 DDI_DMA_CONSISTENT, DDI_DMA_READ|DDI_DMA_CONSISTENT,
5635 5635 &ss->rx_done.dma, 1, DDI_DMA_DONTWAIT);
5636 5636 if (ss->rx_done.entry == NULL) {
5637 5637 goto abort;
5638 5638 }
5639 5639 (void) memset(ss->rx_done.entry, 0, bytes);
5640 5640 mutex_init(&ss->rx_lock, NULL, MUTEX_DEFAULT, mgp->icookie);
5641 5641 mutex_init(&ss->tx.lock, NULL, MUTEX_DEFAULT, NULL);
5642 5642 mutex_init(&ss->tx.handle_lock, NULL, MUTEX_DEFAULT, NULL);
5643 5643 mutex_init(&ss->poll_lock, NULL, MUTEX_DEFAULT, NULL);
5644 5644 myri10ge_jpool_init(ss);
5645 5645 (void) myri10ge_slice_stat_init(ss);
5646 5646 myri10ge_lro_alloc(ss);
5647 5647 }
5648 5648
5649 5649 return (0);
5650 5650
5651 5651 abort:
5652 5652 myri10ge_free_slices(mgp);
5653 5653 return (ENOMEM);
5654 5654 }
5655 5655
5656 5656 static int
5657 5657 myri10ge_save_msi_state(struct myri10ge_priv *mgp,
5658 5658 ddi_acc_handle_t handle)
5659 5659 {
5660 5660 uint8_t ptr;
5661 5661 int err;
5662 5662
5663 5663 err = myri10ge_find_cap(handle, &ptr, PCI_CAP_ID_MSI);
5664 5664 if (err != 0) {
5665 5665 cmn_err(CE_WARN, "%s: could not find MSI cap\n",
5666 5666 mgp->name);
5667 5667 return (DDI_FAILURE);
5668 5668 }
5669 5669 mgp->pci_saved_state.msi_ctrl =
5670 5670 pci_config_get16(handle, ptr + PCI_MSI_CTRL);
5671 5671 mgp->pci_saved_state.msi_addr_low =
5672 5672 pci_config_get32(handle, ptr + PCI_MSI_ADDR_OFFSET);
5673 5673 mgp->pci_saved_state.msi_addr_high =
5674 5674 pci_config_get32(handle, ptr + PCI_MSI_ADDR_OFFSET + 4);
5675 5675 mgp->pci_saved_state.msi_data_32 =
5676 5676 pci_config_get16(handle, ptr + PCI_MSI_32BIT_DATA);
5677 5677 mgp->pci_saved_state.msi_data_64 =
5678 5678 pci_config_get16(handle, ptr + PCI_MSI_64BIT_DATA);
5679 5679 return (DDI_SUCCESS);
5680 5680 }
5681 5681
5682 5682 static int
5683 5683 myri10ge_restore_msi_state(struct myri10ge_priv *mgp,
5684 5684 ddi_acc_handle_t handle)
5685 5685 {
5686 5686 uint8_t ptr;
5687 5687 int err;
5688 5688
5689 5689 err = myri10ge_find_cap(handle, &ptr, PCI_CAP_ID_MSI);
5690 5690 if (err != 0) {
5691 5691 cmn_err(CE_WARN, "%s: could not find MSI cap\n",
5692 5692 mgp->name);
5693 5693 return (DDI_FAILURE);
5694 5694 }
5695 5695
5696 5696 pci_config_put16(handle, ptr + PCI_MSI_CTRL,
5697 5697 mgp->pci_saved_state.msi_ctrl);
5698 5698 pci_config_put32(handle, ptr + PCI_MSI_ADDR_OFFSET,
5699 5699 mgp->pci_saved_state.msi_addr_low);
5700 5700 pci_config_put32(handle, ptr + PCI_MSI_ADDR_OFFSET + 4,
5701 5701 mgp->pci_saved_state.msi_addr_high);
5702 5702 pci_config_put16(handle, ptr + PCI_MSI_32BIT_DATA,
5703 5703 mgp->pci_saved_state.msi_data_32);
5704 5704 pci_config_put16(handle, ptr + PCI_MSI_64BIT_DATA,
5705 5705 mgp->pci_saved_state.msi_data_64);
5706 5706
5707 5707 return (DDI_SUCCESS);
5708 5708 }
5709 5709
5710 5710 static int
5711 5711 myri10ge_save_pci_state(struct myri10ge_priv *mgp)
5712 5712 {
5713 5713 ddi_acc_handle_t handle = mgp->cfg_hdl;
5714 5714 int i;
5715 5715 int err = DDI_SUCCESS;
5716 5716
5717 5717
5718 5718 /* Save the non-extended PCI config space 32-bits at a time */
5719 5719 for (i = 0; i < 16; i++)
5720 5720 mgp->pci_saved_state.base[i] =
5721 5721 pci_config_get32(handle, i*4);
5722 5722
5723 5723 /* now save MSI interrupt state *, if needed */
5724 5724 if (mgp->ddi_intr_type == DDI_INTR_TYPE_MSI)
5725 5725 err = myri10ge_save_msi_state(mgp, handle);
5726 5726
5727 5727 return (err);
5728 5728 }
5729 5729
5730 5730 static int
5731 5731 myri10ge_restore_pci_state(struct myri10ge_priv *mgp)
5732 5732 {
5733 5733 ddi_acc_handle_t handle = mgp->cfg_hdl;
5734 5734 int i;
5735 5735 int err = DDI_SUCCESS;
5736 5736
5737 5737
5738 5738 /* Restore the non-extended PCI config space 32-bits at a time */
5739 5739 for (i = 15; i >= 0; i--)
5740 5740 pci_config_put32(handle, i*4, mgp->pci_saved_state.base[i]);
5741 5741
5742 5742 /* now restore MSI interrupt state *, if needed */
5743 5743 if (mgp->ddi_intr_type == DDI_INTR_TYPE_MSI)
5744 5744 err = myri10ge_restore_msi_state(mgp, handle);
5745 5745
5746 5746 if (mgp->max_read_request_4k)
5747 5747 (void) myri10ge_set_max_readreq(handle);
5748 5748 return (err);
5749 5749 }
5750 5750
5751 5751
5752 5752 static int
5753 5753 myri10ge_suspend(dev_info_t *dip)
5754 5754 {
5755 5755 struct myri10ge_priv *mgp = ddi_get_driver_private(dip);
5756 5756 int status;
5757 5757
5758 5758 if (mgp == NULL) {
5759 5759 cmn_err(CE_WARN, "null dip in myri10ge_suspend\n");
5760 5760 return (DDI_FAILURE);
5761 5761 }
5762 5762 if (mgp->dip != dip) {
5763 5763 cmn_err(CE_WARN, "bad dip in myri10ge_suspend\n");
5764 5764 return (DDI_FAILURE);
5765 5765 }
5766 5766 mutex_enter(&mgp->intrlock);
5767 5767 if (mgp->running == MYRI10GE_ETH_RUNNING) {
5768 5768 mgp->running = MYRI10GE_ETH_STOPPING;
5769 5769 mutex_exit(&mgp->intrlock);
5770 5770 (void) untimeout(mgp->timer_id);
5771 5771 mutex_enter(&mgp->intrlock);
5772 5772 myri10ge_stop_locked(mgp);
5773 5773 mgp->running = MYRI10GE_ETH_SUSPENDED_RUNNING;
5774 5774 }
5775 5775 status = myri10ge_save_pci_state(mgp);
5776 5776 mutex_exit(&mgp->intrlock);
5777 5777 return (status);
5778 5778 }
5779 5779
5780 5780 static int
5781 5781 myri10ge_resume(dev_info_t *dip)
5782 5782 {
5783 5783 struct myri10ge_priv *mgp = ddi_get_driver_private(dip);
5784 5784 int status = DDI_SUCCESS;
5785 5785
5786 5786 if (mgp == NULL) {
5787 5787 cmn_err(CE_WARN, "null dip in myri10ge_resume\n");
5788 5788 return (DDI_FAILURE);
5789 5789 }
5790 5790 if (mgp->dip != dip) {
5791 5791 cmn_err(CE_WARN, "bad dip in myri10ge_resume\n");
5792 5792 return (DDI_FAILURE);
5793 5793 }
5794 5794
5795 5795 mutex_enter(&mgp->intrlock);
5796 5796 status = myri10ge_restore_pci_state(mgp);
5797 5797 if (status == DDI_SUCCESS &&
5798 5798 mgp->running == MYRI10GE_ETH_SUSPENDED_RUNNING) {
5799 5799 status = myri10ge_start_locked(mgp);
5800 5800 }
5801 5801 mutex_exit(&mgp->intrlock);
5802 5802 if (status != DDI_SUCCESS)
5803 5803 return (status);
5804 5804
5805 5805 /* start the watchdog timer */
5806 5806 mgp->timer_id = timeout(myri10ge_watchdog, mgp,
5807 5807 mgp->timer_ticks);
5808 5808 return (DDI_SUCCESS);
5809 5809 }
5810 5810
5811 5811 static int
5812 5812 myri10ge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
5813 5813 {
5814 5814
5815 5815 struct myri10ge_priv *mgp;
5816 5816 mac_register_t *macp, *omacp;
5817 5817 ddi_acc_handle_t handle;
5818 5818 uint32_t csr, hdr_offset;
5819 5819 int status, span, link_width, max_read_request_4k;
5820 5820 unsigned long bus_number, dev_number, func_number;
5821 5821 size_t bytes;
5822 5822 offset_t ss_offset;
5823 5823 uint8_t vso;
5824 5824
5825 5825 if (cmd == DDI_RESUME) {
5826 5826 return (myri10ge_resume(dip));
5827 5827 }
5828 5828
5829 5829 if (cmd != DDI_ATTACH)
5830 5830 return (DDI_FAILURE);
5831 5831 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
5832 5832 return (DDI_FAILURE);
5833 5833
5834 5834 /* enable busmater and io space access */
5835 5835 csr = pci_config_get32(handle, PCI_CONF_COMM);
5836 5836 pci_config_put32(handle, PCI_CONF_COMM,
5837 5837 (csr |PCI_COMM_ME|PCI_COMM_MAE));
5838 5838 status = myri10ge_read_pcie_link_width(handle, &link_width);
5839 5839 if (status != 0) {
5840 5840 cmn_err(CE_WARN, "could not read link width!\n");
5841 5841 link_width = 0;
5842 5842 }
5843 5843 max_read_request_4k = !myri10ge_set_max_readreq(handle);
5844 5844 status = myri10ge_find_cap(handle, &vso, PCI_CAP_ID_VS);
5845 5845 if (status != 0)
5846 5846 goto abort_with_cfg_hdl;
5847 5847 if ((omacp = mac_alloc(MAC_VERSION)) == NULL)
5848 5848 goto abort_with_cfg_hdl;
5849 5849 /*
5850 5850 * XXXX Hack: mac_register_t grows in newer kernels. To be
5851 5851 * able to write newer fields, such as m_margin, without
5852 5852 * writing outside allocated memory, we allocate our own macp
5853 5853 * and pass that to mac_register()
5854 5854 */
5855 5855 macp = kmem_zalloc(sizeof (*macp) * 8, KM_SLEEP);
5856 5856 macp->m_version = omacp->m_version;
5857 5857
5858 5858 if ((mgp = (struct myri10ge_priv *)
5859 5859 kmem_zalloc(sizeof (*mgp), KM_SLEEP)) == NULL) {
5860 5860 goto abort_with_macinfo;
5861 5861 }
5862 5862 ddi_set_driver_private(dip, mgp);
5863 5863
5864 5864 /* setup device name for log messages */
5865 5865 (void) sprintf(mgp->name, "myri10ge%d", ddi_get_instance(dip));
5866 5866
5867 5867 mutex_enter(&myri10ge_param_lock);
5868 5868 myri10ge_get_props(dip);
5869 5869 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
↓ open down ↓ |
3609 lines elided |
↑ open up ↑ |
5870 5870 mgp->pause = myri10ge_flow_control;
5871 5871 mutex_exit(&myri10ge_param_lock);
5872 5872
5873 5873 mgp->max_read_request_4k = max_read_request_4k;
5874 5874 mgp->pcie_link_width = link_width;
5875 5875 mgp->running = MYRI10GE_ETH_STOPPED;
5876 5876 mgp->vso = vso;
5877 5877 mgp->dip = dip;
5878 5878 mgp->cfg_hdl = handle;
5879 5879
5880 - mgp->timer_ticks = 5 * drv_usectohz(1000000); /* 5 seconds */
5880 + mgp->timer_ticks = drv_sectohz(5);
5881 5881 myri10ge_test_physical(dip);
5882 5882
5883 5883 /* allocate command page */
5884 5884 bytes = sizeof (*mgp->cmd);
5885 5885 mgp->cmd = (mcp_cmd_response_t *)
5886 5886 (void *)myri10ge_dma_alloc(dip, bytes,
5887 5887 &myri10ge_misc_dma_attr, &myri10ge_dev_access_attr,
5888 5888 DDI_DMA_CONSISTENT, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
5889 5889 &mgp->cmd_dma, 1, DDI_DMA_DONTWAIT);
5890 5890 if (mgp->cmd == NULL)
5891 5891 goto abort_with_mgp;
5892 5892
5893 5893 (void) myri10ge_reg_set(dip, &mgp->reg_set, &span, &bus_number,
5894 5894 &dev_number, &func_number);
5895 5895 if (myri10ge_verbose)
5896 5896 printf("%s at %ld:%ld:%ld attaching\n", mgp->name,
5897 5897 bus_number, dev_number, func_number);
5898 5898 status = ddi_regs_map_setup(dip, mgp->reg_set, (caddr_t *)&mgp->sram,
5899 5899 (offset_t)0, (offset_t)span, &myri10ge_dev_access_attr,
5900 5900 &mgp->io_handle);
5901 5901 if (status != DDI_SUCCESS) {
5902 5902 cmn_err(CE_WARN, "%s: couldn't map memory space", mgp->name);
5903 5903 printf("%s: reg_set = %d, span = %d, status = %d",
5904 5904 mgp->name, mgp->reg_set, span, status);
5905 5905 goto abort_with_mgp;
5906 5906 }
5907 5907
5908 5908 hdr_offset = *(uint32_t *)(void*)(mgp->sram + MCP_HEADER_PTR_OFFSET);
5909 5909 hdr_offset = ntohl(hdr_offset) & 0xffffc;
5910 5910 ss_offset = hdr_offset +
5911 5911 offsetof(struct mcp_gen_header, string_specs);
5912 5912 mgp->sram_size = ntohl(*(uint32_t *)(void*)(mgp->sram + ss_offset));
5913 5913 myri10ge_pio_copy32(mgp->eeprom_strings,
5914 5914 (uint32_t *)(void*)((char *)mgp->sram + mgp->sram_size),
5915 5915 MYRI10GE_EEPROM_STRINGS_SIZE);
5916 5916 (void) memset(mgp->eeprom_strings +
5917 5917 MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
5918 5918
5919 5919 status = myri10ge_read_mac_addr(mgp);
5920 5920 if (status) {
5921 5921 goto abort_with_mapped;
5922 5922 }
5923 5923
5924 5924 status = myri10ge_select_firmware(mgp);
5925 5925 if (status != 0) {
5926 5926 cmn_err(CE_WARN, "%s: failed to load firmware\n", mgp->name);
5927 5927 goto abort_with_mapped;
5928 5928 }
5929 5929
5930 5930 status = myri10ge_probe_slices(mgp);
5931 5931 if (status != 0) {
5932 5932 cmn_err(CE_WARN, "%s: failed to probe slices\n", mgp->name);
5933 5933 goto abort_with_dummy_rdma;
5934 5934 }
5935 5935
5936 5936 status = myri10ge_alloc_slices(mgp);
5937 5937 if (status != 0) {
5938 5938 cmn_err(CE_WARN, "%s: failed to alloc slices\n", mgp->name);
5939 5939 goto abort_with_dummy_rdma;
5940 5940 }
5941 5941
5942 5942 /* add the interrupt handler */
5943 5943 status = myri10ge_add_intrs(mgp, 1);
5944 5944 if (status != 0) {
5945 5945 cmn_err(CE_WARN, "%s: Failed to add interrupt\n",
5946 5946 mgp->name);
5947 5947 goto abort_with_slices;
5948 5948 }
5949 5949
5950 5950 /* now that we have an iblock_cookie, init the mutexes */
5951 5951 mutex_init(&mgp->cmd_lock, NULL, MUTEX_DRIVER, mgp->icookie);
5952 5952 mutex_init(&mgp->intrlock, NULL, MUTEX_DRIVER, mgp->icookie);
5953 5953
5954 5954
5955 5955 status = myri10ge_nic_stat_init(mgp);
5956 5956 if (status != DDI_SUCCESS)
5957 5957 goto abort_with_interrupts;
5958 5958 status = myri10ge_info_init(mgp);
5959 5959 if (status != DDI_SUCCESS)
5960 5960 goto abort_with_stats;
5961 5961
5962 5962 /*
5963 5963 * Initialize GLD state
5964 5964 */
5965 5965
5966 5966 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
5967 5967 macp->m_driver = mgp;
5968 5968 macp->m_dip = dip;
5969 5969 macp->m_src_addr = mgp->mac_addr;
5970 5970 macp->m_callbacks = &myri10ge_m_callbacks;
5971 5971 macp->m_min_sdu = 0;
5972 5972 macp->m_max_sdu = myri10ge_mtu -
5973 5973 (sizeof (struct ether_header) + MXGEFW_PAD + VLAN_TAGSZ);
5974 5974 #ifdef SOLARIS_S11
5975 5975 macp->m_margin = VLAN_TAGSZ;
5976 5976 #endif
5977 5977 macp->m_v12n = MAC_VIRT_LEVEL1;
5978 5978 status = mac_register(macp, &mgp->mh);
5979 5979 if (status != 0) {
5980 5980 cmn_err(CE_WARN, "%s: mac_register failed with %d\n",
5981 5981 mgp->name, status);
5982 5982 goto abort_with_info;
5983 5983 }
5984 5984 myri10ge_ndd_init(mgp);
5985 5985 if (myri10ge_verbose)
5986 5986 printf("%s: %s, tx bndry %d, fw %s\n", mgp->name,
5987 5987 mgp->intr_type, mgp->tx_boundary, mgp->fw_name);
5988 5988 mutex_enter(&myri10ge_param_lock);
5989 5989 mgp->next = mgp_list;
5990 5990 mgp_list = mgp;
5991 5991 mutex_exit(&myri10ge_param_lock);
5992 5992 kmem_free(macp, sizeof (*macp) * 8);
5993 5993 mac_free(omacp);
5994 5994 return (DDI_SUCCESS);
5995 5995
5996 5996 abort_with_info:
5997 5997 myri10ge_info_destroy(mgp);
5998 5998
5999 5999 abort_with_stats:
6000 6000 myri10ge_nic_stat_destroy(mgp);
6001 6001
6002 6002 abort_with_interrupts:
6003 6003 mutex_destroy(&mgp->cmd_lock);
6004 6004 mutex_destroy(&mgp->intrlock);
6005 6005 myri10ge_rem_intrs(mgp, 1);
6006 6006
6007 6007 abort_with_slices:
6008 6008 myri10ge_free_slices(mgp);
6009 6009
6010 6010 abort_with_dummy_rdma:
6011 6011 myri10ge_dummy_rdma(mgp, 0);
6012 6012
6013 6013 abort_with_mapped:
6014 6014 ddi_regs_map_free(&mgp->io_handle);
6015 6015
6016 6016 myri10ge_dma_free(&mgp->cmd_dma);
6017 6017
6018 6018 abort_with_mgp:
6019 6019 kmem_free(mgp, sizeof (*mgp));
6020 6020
6021 6021 abort_with_macinfo:
6022 6022 kmem_free(macp, sizeof (*macp) * 8);
6023 6023 mac_free(omacp);
6024 6024
6025 6025 abort_with_cfg_hdl:
6026 6026 pci_config_teardown(&handle);
6027 6027 return (DDI_FAILURE);
6028 6028
6029 6029 }
6030 6030
6031 6031
6032 6032 static int
6033 6033 myri10ge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
6034 6034 {
6035 6035 struct myri10ge_priv *mgp, *tmp;
6036 6036 int status, i, jbufs_alloced;
6037 6037
6038 6038 if (cmd == DDI_SUSPEND) {
6039 6039 status = myri10ge_suspend(dip);
6040 6040 return (status);
6041 6041 }
6042 6042
6043 6043 if (cmd != DDI_DETACH) {
6044 6044 return (DDI_FAILURE);
6045 6045 }
6046 6046 /* Get the driver private (gld_mac_info_t) structure */
6047 6047 mgp = ddi_get_driver_private(dip);
6048 6048
6049 6049 mutex_enter(&mgp->intrlock);
6050 6050 jbufs_alloced = 0;
6051 6051 for (i = 0; i < mgp->num_slices; i++) {
6052 6052 myri10ge_remove_jbufs(&mgp->ss[i]);
6053 6053 jbufs_alloced += mgp->ss[i].jpool.num_alloc;
6054 6054 }
6055 6055 mutex_exit(&mgp->intrlock);
6056 6056 if (jbufs_alloced != 0) {
6057 6057 cmn_err(CE_NOTE, "%s: %d loaned rx buffers remain\n",
6058 6058 mgp->name, jbufs_alloced);
6059 6059 return (DDI_FAILURE);
6060 6060 }
6061 6061
6062 6062 mutex_enter(&myri10ge_param_lock);
6063 6063 if (mgp->refcnt != 0) {
6064 6064 mutex_exit(&myri10ge_param_lock);
6065 6065 cmn_err(CE_NOTE, "%s: %d external refs remain\n",
6066 6066 mgp->name, mgp->refcnt);
6067 6067 return (DDI_FAILURE);
6068 6068 }
6069 6069 mutex_exit(&myri10ge_param_lock);
6070 6070
6071 6071 status = mac_unregister(mgp->mh);
6072 6072 if (status != DDI_SUCCESS)
6073 6073 return (status);
6074 6074
6075 6075 myri10ge_ndd_fini(mgp);
6076 6076 myri10ge_dummy_rdma(mgp, 0);
6077 6077 myri10ge_nic_stat_destroy(mgp);
6078 6078 myri10ge_info_destroy(mgp);
6079 6079
6080 6080 mutex_destroy(&mgp->cmd_lock);
6081 6081 mutex_destroy(&mgp->intrlock);
6082 6082
6083 6083 myri10ge_rem_intrs(mgp, 1);
6084 6084
6085 6085 myri10ge_free_slices(mgp);
6086 6086 ddi_regs_map_free(&mgp->io_handle);
6087 6087 myri10ge_dma_free(&mgp->cmd_dma);
6088 6088 pci_config_teardown(&mgp->cfg_hdl);
6089 6089
6090 6090 mutex_enter(&myri10ge_param_lock);
6091 6091 if (mgp_list == mgp) {
6092 6092 mgp_list = mgp->next;
6093 6093 } else {
6094 6094 tmp = mgp_list;
6095 6095 while (tmp->next != mgp && tmp->next != NULL)
6096 6096 tmp = tmp->next;
6097 6097 if (tmp->next != NULL)
6098 6098 tmp->next = tmp->next->next;
6099 6099 }
6100 6100 kmem_free(mgp, sizeof (*mgp));
6101 6101 mutex_exit(&myri10ge_param_lock);
6102 6102 return (DDI_SUCCESS);
6103 6103 }
6104 6104
6105 6105 /*
6106 6106 * Helper for quiesce entry point: Interrupt threads are not being
6107 6107 * scheduled, so we must poll for the confirmation DMA to arrive in
6108 6108 * the firmware stats block for slice 0. We're essentially running
6109 6109 * the guts of the interrupt handler, and just cherry picking the
6110 6110 * confirmation that the NIC is queuesced (stats->link_down)
6111 6111 */
6112 6112
6113 6113 static int
6114 6114 myri10ge_poll_down(struct myri10ge_priv *mgp)
6115 6115 {
6116 6116 struct myri10ge_slice_state *ss = mgp->ss;
6117 6117 mcp_irq_data_t *stats = ss->fw_stats;
6118 6118 int valid;
6119 6119 int found_down = 0;
6120 6120
6121 6121
6122 6122 /* check for a pending IRQ */
6123 6123
6124 6124 if (! *((volatile uint8_t *)& stats->valid))
6125 6125 return (0);
6126 6126 valid = stats->valid;
6127 6127
6128 6128 /*
6129 6129 * Make sure to tell the NIC to lower a legacy IRQ, else
6130 6130 * it may have corrupt state after restarting
6131 6131 */
6132 6132
6133 6133 if (mgp->ddi_intr_type == DDI_INTR_TYPE_FIXED) {
6134 6134 /* lower legacy IRQ */
6135 6135 *mgp->irq_deassert = 0;
6136 6136 mb();
6137 6137 /* wait for irq conf DMA */
6138 6138 while (*((volatile uint8_t *)& stats->valid))
6139 6139 ;
6140 6140 }
6141 6141 if (stats->stats_updated && stats->link_down)
6142 6142 found_down = 1;
6143 6143
6144 6144 if (valid & 0x1)
6145 6145 *ss->irq_claim = BE_32(3);
6146 6146 *(ss->irq_claim + 1) = BE_32(3);
6147 6147
6148 6148 return (found_down);
6149 6149 }
6150 6150
6151 6151 static int
6152 6152 myri10ge_quiesce(dev_info_t *dip)
6153 6153 {
6154 6154 struct myri10ge_priv *mgp;
6155 6155 myri10ge_cmd_t cmd;
6156 6156 int status, down, i;
6157 6157
6158 6158 mgp = ddi_get_driver_private(dip);
6159 6159 if (mgp == NULL)
6160 6160 return (DDI_FAILURE);
6161 6161
6162 6162 /* if devices was unplumbed, it is guaranteed to be quiescent */
6163 6163 if (mgp->running == MYRI10GE_ETH_STOPPED)
6164 6164 return (DDI_SUCCESS);
6165 6165
6166 6166 /* send a down CMD to queuesce NIC */
6167 6167 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd);
6168 6168 if (status) {
6169 6169 cmn_err(CE_WARN, "%s: Couldn't bring down link\n", mgp->name);
6170 6170 return (DDI_FAILURE);
6171 6171 }
6172 6172
6173 6173 for (i = 0; i < 20; i++) {
6174 6174 down = myri10ge_poll_down(mgp);
6175 6175 if (down)
6176 6176 break;
6177 6177 delay(drv_usectohz(100000));
6178 6178 mb();
6179 6179 }
6180 6180 if (down)
6181 6181 return (DDI_SUCCESS);
6182 6182 return (DDI_FAILURE);
6183 6183 }
6184 6184
6185 6185 /*
6186 6186 * Distinguish between allocb'ed blocks, and gesballoc'ed attached
6187 6187 * storage.
6188 6188 */
6189 6189 static void
6190 6190 myri10ge_find_lastfree(void)
6191 6191 {
6192 6192 mblk_t *mp = allocb(1024, 0);
6193 6193 dblk_t *dbp;
6194 6194
6195 6195 if (mp == NULL) {
6196 6196 cmn_err(CE_WARN, "myri10ge_find_lastfree failed\n");
6197 6197 return;
6198 6198 }
6199 6199 dbp = mp->b_datap;
6200 6200 myri10ge_db_lastfree = (void *)dbp->db_lastfree;
6201 6201 }
6202 6202
6203 6203 int
6204 6204 _init(void)
6205 6205 {
6206 6206 int i;
6207 6207
6208 6208 if (myri10ge_verbose)
6209 6209 cmn_err(CE_NOTE,
6210 6210 "Myricom 10G driver (10GbE) version %s loading\n",
6211 6211 MYRI10GE_VERSION_STR);
6212 6212 myri10ge_find_lastfree();
6213 6213 mac_init_ops(&myri10ge_ops, "myri10ge");
6214 6214 mutex_init(&myri10ge_param_lock, NULL, MUTEX_DEFAULT, NULL);
6215 6215 if ((i = mod_install(&modlinkage)) != 0) {
6216 6216 cmn_err(CE_WARN, "mod_install returned %d\n", i);
6217 6217 mac_fini_ops(&myri10ge_ops);
6218 6218 mutex_destroy(&myri10ge_param_lock);
6219 6219 }
6220 6220 return (i);
6221 6221 }
6222 6222
6223 6223 int
6224 6224 _fini(void)
6225 6225 {
6226 6226 int i;
6227 6227 i = mod_remove(&modlinkage);
6228 6228 if (i != 0) {
6229 6229 return (i);
6230 6230 }
6231 6231 mac_fini_ops(&myri10ge_ops);
6232 6232 mutex_destroy(&myri10ge_param_lock);
6233 6233 return (0);
6234 6234 }
6235 6235
6236 6236 int
6237 6237 _info(struct modinfo *modinfop)
6238 6238 {
6239 6239 return (mod_info(&modlinkage, modinfop));
6240 6240 }
6241 6241
6242 6242
6243 6243 /*
6244 6244 * This file uses MyriGE driver indentation.
6245 6245 *
6246 6246 * Local Variables:
6247 6247 * c-file-style:"sun"
6248 6248 * tab-width:8
6249 6249 * End:
6250 6250 */
↓ open down ↓ |
360 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX