Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 32 */
33 33
34 34 #include "ixgbe_sw.h"
35 35
36 36 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
37 37 static char ixgbe_version[] = "ixgbe 1.1.7";
38 38
39 39 /*
40 40 * Local function protoypes
41 41 */
42 42 static int ixgbe_register_mac(ixgbe_t *);
43 43 static int ixgbe_identify_hardware(ixgbe_t *);
44 44 static int ixgbe_regs_map(ixgbe_t *);
45 45 static void ixgbe_init_properties(ixgbe_t *);
46 46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 47 static void ixgbe_init_locks(ixgbe_t *);
48 48 static void ixgbe_destroy_locks(ixgbe_t *);
49 49 static int ixgbe_init(ixgbe_t *);
50 50 static int ixgbe_chip_start(ixgbe_t *);
51 51 static void ixgbe_chip_stop(ixgbe_t *);
52 52 static int ixgbe_reset(ixgbe_t *);
53 53 static void ixgbe_tx_clean(ixgbe_t *);
54 54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
55 55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
56 56 static int ixgbe_alloc_rings(ixgbe_t *);
57 57 static void ixgbe_free_rings(ixgbe_t *);
58 58 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 59 static void ixgbe_free_rx_data(ixgbe_t *);
60 60 static void ixgbe_setup_rings(ixgbe_t *);
61 61 static void ixgbe_setup_rx(ixgbe_t *);
62 62 static void ixgbe_setup_tx(ixgbe_t *);
63 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 65 static void ixgbe_setup_rss(ixgbe_t *);
66 66 static void ixgbe_setup_vmdq(ixgbe_t *);
67 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 68 static void ixgbe_init_unicst(ixgbe_t *);
69 69 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
70 70 static void ixgbe_setup_multicst(ixgbe_t *);
71 71 static void ixgbe_get_hw_state(ixgbe_t *);
72 72 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
73 73 static void ixgbe_get_conf(ixgbe_t *);
74 74 static void ixgbe_init_params(ixgbe_t *);
75 75 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
76 76 static void ixgbe_driver_link_check(ixgbe_t *);
77 77 static void ixgbe_sfp_check(void *);
78 78 static void ixgbe_overtemp_check(void *);
79 79 static void ixgbe_link_timer(void *);
80 80 static void ixgbe_local_timer(void *);
81 81 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
82 82 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
83 83 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
84 84 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
85 85 static boolean_t is_valid_mac_addr(uint8_t *);
86 86 static boolean_t ixgbe_stall_check(ixgbe_t *);
87 87 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
88 88 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
89 89 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
90 90 static int ixgbe_alloc_intrs(ixgbe_t *);
91 91 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
92 92 static int ixgbe_add_intr_handlers(ixgbe_t *);
93 93 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
94 94 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
95 95 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
96 96 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
97 97 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
98 98 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
99 99 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
100 100 static void ixgbe_setup_adapter_vector(ixgbe_t *);
101 101 static void ixgbe_rem_intr_handlers(ixgbe_t *);
102 102 static void ixgbe_rem_intrs(ixgbe_t *);
103 103 static int ixgbe_enable_intrs(ixgbe_t *);
104 104 static int ixgbe_disable_intrs(ixgbe_t *);
105 105 static uint_t ixgbe_intr_legacy(void *, void *);
106 106 static uint_t ixgbe_intr_msi(void *, void *);
107 107 static uint_t ixgbe_intr_msix(void *, void *);
108 108 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
109 109 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
110 110 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
111 111 static void ixgbe_get_driver_control(struct ixgbe_hw *);
112 112 static int ixgbe_addmac(void *, const uint8_t *);
113 113 static int ixgbe_remmac(void *, const uint8_t *);
114 114 static void ixgbe_release_driver_control(struct ixgbe_hw *);
115 115
116 116 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
117 117 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
118 118 static int ixgbe_resume(dev_info_t *);
119 119 static int ixgbe_suspend(dev_info_t *);
120 120 static int ixgbe_quiesce(dev_info_t *);
121 121 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
122 122 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
123 123 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
124 124 static int ixgbe_intr_cb_register(ixgbe_t *);
125 125 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
126 126
127 127 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
128 128 const void *impl_data);
129 129 static void ixgbe_fm_init(ixgbe_t *);
130 130 static void ixgbe_fm_fini(ixgbe_t *);
131 131
132 132 char *ixgbe_priv_props[] = {
133 133 "_tx_copy_thresh",
134 134 "_tx_recycle_thresh",
135 135 "_tx_overload_thresh",
136 136 "_tx_resched_thresh",
137 137 "_rx_copy_thresh",
138 138 "_rx_limit_per_intr",
139 139 "_intr_throttling",
140 140 "_adv_pause_cap",
141 141 "_adv_asym_pause_cap",
142 142 NULL
143 143 };
144 144
145 145 #define IXGBE_MAX_PRIV_PROPS \
146 146 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
147 147
148 148 static struct cb_ops ixgbe_cb_ops = {
149 149 nulldev, /* cb_open */
150 150 nulldev, /* cb_close */
151 151 nodev, /* cb_strategy */
152 152 nodev, /* cb_print */
153 153 nodev, /* cb_dump */
154 154 nodev, /* cb_read */
155 155 nodev, /* cb_write */
156 156 nodev, /* cb_ioctl */
157 157 nodev, /* cb_devmap */
158 158 nodev, /* cb_mmap */
159 159 nodev, /* cb_segmap */
160 160 nochpoll, /* cb_chpoll */
161 161 ddi_prop_op, /* cb_prop_op */
162 162 NULL, /* cb_stream */
163 163 D_MP | D_HOTPLUG, /* cb_flag */
164 164 CB_REV, /* cb_rev */
165 165 nodev, /* cb_aread */
166 166 nodev /* cb_awrite */
167 167 };
168 168
169 169 static struct dev_ops ixgbe_dev_ops = {
170 170 DEVO_REV, /* devo_rev */
171 171 0, /* devo_refcnt */
172 172 NULL, /* devo_getinfo */
173 173 nulldev, /* devo_identify */
174 174 nulldev, /* devo_probe */
175 175 ixgbe_attach, /* devo_attach */
176 176 ixgbe_detach, /* devo_detach */
177 177 nodev, /* devo_reset */
178 178 &ixgbe_cb_ops, /* devo_cb_ops */
179 179 NULL, /* devo_bus_ops */
180 180 ddi_power, /* devo_power */
181 181 ixgbe_quiesce, /* devo_quiesce */
182 182 };
183 183
184 184 static struct modldrv ixgbe_modldrv = {
185 185 &mod_driverops, /* Type of module. This one is a driver */
186 186 ixgbe_ident, /* Discription string */
187 187 &ixgbe_dev_ops /* driver ops */
188 188 };
189 189
190 190 static struct modlinkage ixgbe_modlinkage = {
191 191 MODREV_1, &ixgbe_modldrv, NULL
192 192 };
193 193
194 194 /*
195 195 * Access attributes for register mapping
196 196 */
197 197 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
198 198 DDI_DEVICE_ATTR_V1,
199 199 DDI_STRUCTURE_LE_ACC,
200 200 DDI_STRICTORDER_ACC,
201 201 DDI_FLAGERR_ACC
202 202 };
203 203
204 204 /*
205 205 * Loopback property
206 206 */
207 207 static lb_property_t lb_normal = {
208 208 normal, "normal", IXGBE_LB_NONE
209 209 };
210 210
211 211 static lb_property_t lb_mac = {
212 212 internal, "MAC", IXGBE_LB_INTERNAL_MAC
213 213 };
214 214
215 215 static lb_property_t lb_external = {
216 216 external, "External", IXGBE_LB_EXTERNAL
217 217 };
218 218
219 219 #define IXGBE_M_CALLBACK_FLAGS \
220 220 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
221 221
222 222 static mac_callbacks_t ixgbe_m_callbacks = {
223 223 IXGBE_M_CALLBACK_FLAGS,
224 224 ixgbe_m_stat,
225 225 ixgbe_m_start,
226 226 ixgbe_m_stop,
227 227 ixgbe_m_promisc,
228 228 ixgbe_m_multicst,
229 229 NULL,
230 230 NULL,
231 231 NULL,
232 232 ixgbe_m_ioctl,
233 233 ixgbe_m_getcapab,
234 234 NULL,
235 235 NULL,
236 236 ixgbe_m_setprop,
237 237 ixgbe_m_getprop,
238 238 ixgbe_m_propinfo
239 239 };
240 240
241 241 /*
242 242 * Initialize capabilities of each supported adapter type
243 243 */
244 244 static adapter_info_t ixgbe_82598eb_cap = {
245 245 64, /* maximum number of rx queues */
246 246 1, /* minimum number of rx queues */
247 247 64, /* default number of rx queues */
248 248 16, /* maximum number of rx groups */
249 249 1, /* minimum number of rx groups */
250 250 1, /* default number of rx groups */
251 251 32, /* maximum number of tx queues */
252 252 1, /* minimum number of tx queues */
253 253 8, /* default number of tx queues */
254 254 16366, /* maximum MTU size */
255 255 0xFFFF, /* maximum interrupt throttle rate */
256 256 0, /* minimum interrupt throttle rate */
257 257 200, /* default interrupt throttle rate */
258 258 18, /* maximum total msix vectors */
259 259 16, /* maximum number of ring vectors */
260 260 2, /* maximum number of other vectors */
261 261 IXGBE_EICR_LSC, /* "other" interrupt types handled */
262 262 0, /* "other" interrupt types enable mask */
263 263 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
264 264 | IXGBE_FLAG_RSS_CAPABLE
265 265 | IXGBE_FLAG_VMDQ_CAPABLE)
266 266 };
267 267
268 268 static adapter_info_t ixgbe_82599eb_cap = {
269 269 128, /* maximum number of rx queues */
270 270 1, /* minimum number of rx queues */
271 271 128, /* default number of rx queues */
272 272 64, /* maximum number of rx groups */
273 273 1, /* minimum number of rx groups */
274 274 1, /* default number of rx groups */
275 275 128, /* maximum number of tx queues */
276 276 1, /* minimum number of tx queues */
277 277 8, /* default number of tx queues */
278 278 15500, /* maximum MTU size */
279 279 0xFF8, /* maximum interrupt throttle rate */
280 280 0, /* minimum interrupt throttle rate */
281 281 200, /* default interrupt throttle rate */
282 282 64, /* maximum total msix vectors */
283 283 16, /* maximum number of ring vectors */
284 284 2, /* maximum number of other vectors */
285 285 (IXGBE_EICR_LSC
286 286 | IXGBE_EICR_GPI_SDP1
287 287 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
288 288
289 289 (IXGBE_SDP1_GPIEN
290 290 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
291 291
292 292 (IXGBE_FLAG_DCA_CAPABLE
293 293 | IXGBE_FLAG_RSS_CAPABLE
294 294 | IXGBE_FLAG_VMDQ_CAPABLE
295 295 | IXGBE_FLAG_RSC_CAPABLE
296 296 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
297 297 };
298 298
299 299 static adapter_info_t ixgbe_X540_cap = {
300 300 128, /* maximum number of rx queues */
301 301 1, /* minimum number of rx queues */
302 302 128, /* default number of rx queues */
303 303 64, /* maximum number of rx groups */
304 304 1, /* minimum number of rx groups */
305 305 1, /* default number of rx groups */
306 306 128, /* maximum number of tx queues */
307 307 1, /* minimum number of tx queues */
308 308 8, /* default number of tx queues */
309 309 15500, /* maximum MTU size */
310 310 0xFF8, /* maximum interrupt throttle rate */
311 311 0, /* minimum interrupt throttle rate */
312 312 200, /* default interrupt throttle rate */
313 313 64, /* maximum total msix vectors */
314 314 16, /* maximum number of ring vectors */
315 315 2, /* maximum number of other vectors */
316 316 (IXGBE_EICR_LSC
317 317 | IXGBE_EICR_GPI_SDP1
318 318 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
319 319
320 320 (IXGBE_SDP1_GPIEN
321 321 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
322 322
323 323 (IXGBE_FLAG_DCA_CAPABLE
324 324 | IXGBE_FLAG_RSS_CAPABLE
325 325 | IXGBE_FLAG_VMDQ_CAPABLE
326 326 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
327 327 };
328 328
329 329 /*
330 330 * Module Initialization Functions.
331 331 */
332 332
333 333 int
334 334 _init(void)
335 335 {
336 336 int status;
337 337
338 338 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
339 339
340 340 status = mod_install(&ixgbe_modlinkage);
341 341
342 342 if (status != DDI_SUCCESS) {
343 343 mac_fini_ops(&ixgbe_dev_ops);
344 344 }
345 345
346 346 return (status);
347 347 }
348 348
349 349 int
350 350 _fini(void)
351 351 {
352 352 int status;
353 353
354 354 status = mod_remove(&ixgbe_modlinkage);
355 355
356 356 if (status == DDI_SUCCESS) {
357 357 mac_fini_ops(&ixgbe_dev_ops);
358 358 }
359 359
360 360 return (status);
361 361 }
362 362
363 363 int
364 364 _info(struct modinfo *modinfop)
365 365 {
366 366 int status;
367 367
368 368 status = mod_info(&ixgbe_modlinkage, modinfop);
369 369
370 370 return (status);
371 371 }
372 372
373 373 /*
374 374 * ixgbe_attach - Driver attach.
375 375 *
376 376 * This function is the device specific initialization entry
377 377 * point. This entry point is required and must be written.
378 378 * The DDI_ATTACH command must be provided in the attach entry
379 379 * point. When attach() is called with cmd set to DDI_ATTACH,
380 380 * all normal kernel services (such as kmem_alloc(9F)) are
381 381 * available for use by the driver.
382 382 *
383 383 * The attach() function will be called once for each instance
384 384 * of the device on the system with cmd set to DDI_ATTACH.
385 385 * Until attach() succeeds, the only driver entry points which
386 386 * may be called are open(9E) and getinfo(9E).
387 387 */
388 388 static int
389 389 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
390 390 {
391 391 ixgbe_t *ixgbe;
392 392 struct ixgbe_osdep *osdep;
393 393 struct ixgbe_hw *hw;
394 394 int instance;
395 395 char taskqname[32];
396 396
397 397 /*
398 398 * Check the command and perform corresponding operations
399 399 */
400 400 switch (cmd) {
401 401 default:
402 402 return (DDI_FAILURE);
403 403
404 404 case DDI_RESUME:
405 405 return (ixgbe_resume(devinfo));
406 406
407 407 case DDI_ATTACH:
408 408 break;
409 409 }
410 410
411 411 /* Get the device instance */
412 412 instance = ddi_get_instance(devinfo);
413 413
414 414 /* Allocate memory for the instance data structure */
415 415 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
416 416
417 417 ixgbe->dip = devinfo;
418 418 ixgbe->instance = instance;
419 419
420 420 hw = &ixgbe->hw;
421 421 osdep = &ixgbe->osdep;
422 422 hw->back = osdep;
423 423 osdep->ixgbe = ixgbe;
424 424
425 425 /* Attach the instance pointer to the dev_info data structure */
426 426 ddi_set_driver_private(devinfo, ixgbe);
427 427
428 428 /*
429 429 * Initialize for fma support
430 430 */
431 431 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
432 432 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
433 433 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
434 434 ixgbe_fm_init(ixgbe);
435 435 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
436 436
437 437 /*
438 438 * Map PCI config space registers
439 439 */
440 440 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
441 441 ixgbe_error(ixgbe, "Failed to map PCI configurations");
442 442 goto attach_fail;
443 443 }
444 444 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
445 445
446 446 /*
447 447 * Identify the chipset family
448 448 */
449 449 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
450 450 ixgbe_error(ixgbe, "Failed to identify hardware");
451 451 goto attach_fail;
452 452 }
453 453
454 454 /*
455 455 * Map device registers
456 456 */
457 457 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
458 458 ixgbe_error(ixgbe, "Failed to map device registers");
459 459 goto attach_fail;
460 460 }
461 461 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
462 462
463 463 /*
464 464 * Initialize driver parameters
465 465 */
466 466 ixgbe_init_properties(ixgbe);
467 467 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
468 468
469 469 /*
470 470 * Register interrupt callback
471 471 */
472 472 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
473 473 ixgbe_error(ixgbe, "Failed to register interrupt callback");
474 474 goto attach_fail;
475 475 }
476 476
477 477 /*
478 478 * Allocate interrupts
479 479 */
480 480 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
481 481 ixgbe_error(ixgbe, "Failed to allocate interrupts");
482 482 goto attach_fail;
483 483 }
484 484 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
485 485
486 486 /*
487 487 * Allocate rx/tx rings based on the ring numbers.
488 488 * The actual numbers of rx/tx rings are decided by the number of
489 489 * allocated interrupt vectors, so we should allocate the rings after
490 490 * interrupts are allocated.
491 491 */
492 492 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
493 493 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
494 494 goto attach_fail;
495 495 }
496 496 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
497 497
498 498 /*
499 499 * Map rings to interrupt vectors
500 500 */
501 501 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
502 502 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
503 503 goto attach_fail;
504 504 }
505 505
506 506 /*
507 507 * Add interrupt handlers
508 508 */
509 509 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
510 510 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
511 511 goto attach_fail;
512 512 }
513 513 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
514 514
515 515 /*
516 516 * Create a taskq for sfp-change
517 517 */
518 518 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
519 519 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
520 520 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
521 521 ixgbe_error(ixgbe, "sfp_taskq create failed");
522 522 goto attach_fail;
523 523 }
524 524 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
525 525
526 526 /*
527 527 * Create a taskq for over-temp
528 528 */
529 529 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
530 530 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
531 531 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
532 532 ixgbe_error(ixgbe, "overtemp_taskq create failed");
533 533 goto attach_fail;
534 534 }
535 535 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
536 536
537 537 /*
538 538 * Initialize driver parameters
539 539 */
540 540 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
541 541 ixgbe_error(ixgbe, "Failed to initialize driver settings");
542 542 goto attach_fail;
543 543 }
544 544
545 545 /*
546 546 * Initialize mutexes for this device.
547 547 * Do this before enabling the interrupt handler and
548 548 * register the softint to avoid the condition where
549 549 * interrupt handler can try using uninitialized mutex.
550 550 */
551 551 ixgbe_init_locks(ixgbe);
552 552 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
553 553
554 554 /*
555 555 * Initialize chipset hardware
556 556 */
557 557 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
558 558 ixgbe_error(ixgbe, "Failed to initialize adapter");
559 559 goto attach_fail;
560 560 }
561 561 ixgbe->link_check_complete = B_FALSE;
562 562 ixgbe->link_check_hrtime = gethrtime() +
563 563 (IXGBE_LINK_UP_TIME * 100000000ULL);
564 564 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
565 565
566 566 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
567 567 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
568 568 goto attach_fail;
569 569 }
570 570
571 571 /*
572 572 * Initialize statistics
573 573 */
574 574 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
575 575 ixgbe_error(ixgbe, "Failed to initialize statistics");
576 576 goto attach_fail;
577 577 }
578 578 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
579 579
580 580 /*
581 581 * Register the driver to the MAC
582 582 */
583 583 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
584 584 ixgbe_error(ixgbe, "Failed to register MAC");
585 585 goto attach_fail;
586 586 }
587 587 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
588 588 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
589 589
590 590 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
591 591 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
592 592 if (ixgbe->periodic_id == 0) {
593 593 ixgbe_error(ixgbe, "Failed to add the link check timer");
594 594 goto attach_fail;
595 595 }
596 596 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
597 597
598 598 /*
599 599 * Now that mutex locks are initialized, and the chip is also
600 600 * initialized, enable interrupts.
601 601 */
602 602 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
603 603 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
604 604 goto attach_fail;
605 605 }
606 606 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
607 607
608 608 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
609 609 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
610 610
611 611 return (DDI_SUCCESS);
612 612
613 613 attach_fail:
614 614 ixgbe_unconfigure(devinfo, ixgbe);
615 615 return (DDI_FAILURE);
616 616 }
617 617
618 618 /*
619 619 * ixgbe_detach - Driver detach.
620 620 *
621 621 * The detach() function is the complement of the attach routine.
622 622 * If cmd is set to DDI_DETACH, detach() is used to remove the
623 623 * state associated with a given instance of a device node
624 624 * prior to the removal of that instance from the system.
625 625 *
626 626 * The detach() function will be called once for each instance
627 627 * of the device for which there has been a successful attach()
628 628 * once there are no longer any opens on the device.
629 629 *
630 630 * Interrupts routine are disabled, All memory allocated by this
631 631 * driver are freed.
632 632 */
633 633 static int
634 634 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
635 635 {
636 636 ixgbe_t *ixgbe;
637 637
638 638 /*
639 639 * Check detach command
640 640 */
641 641 switch (cmd) {
642 642 default:
643 643 return (DDI_FAILURE);
644 644
645 645 case DDI_SUSPEND:
646 646 return (ixgbe_suspend(devinfo));
647 647
648 648 case DDI_DETACH:
649 649 break;
650 650 }
651 651
652 652 /*
653 653 * Get the pointer to the driver private data structure
654 654 */
655 655 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
656 656 if (ixgbe == NULL)
657 657 return (DDI_FAILURE);
658 658
659 659 /*
660 660 * If the device is still running, it needs to be stopped first.
661 661 * This check is necessary because under some specific circumstances,
662 662 * the detach routine can be called without stopping the interface
663 663 * first.
664 664 */
665 665 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
666 666 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
667 667 mutex_enter(&ixgbe->gen_lock);
668 668 ixgbe_stop(ixgbe, B_TRUE);
669 669 mutex_exit(&ixgbe->gen_lock);
670 670 /* Disable and stop the watchdog timer */
671 671 ixgbe_disable_watchdog_timer(ixgbe);
672 672 }
673 673
674 674 /*
675 675 * Check if there are still rx buffers held by the upper layer.
676 676 * If so, fail the detach.
677 677 */
678 678 if (!ixgbe_rx_drain(ixgbe))
679 679 return (DDI_FAILURE);
680 680
681 681 /*
682 682 * Do the remaining unconfigure routines
683 683 */
684 684 ixgbe_unconfigure(devinfo, ixgbe);
685 685
686 686 return (DDI_SUCCESS);
687 687 }
688 688
689 689 /*
690 690 * quiesce(9E) entry point.
691 691 *
692 692 * This function is called when the system is single-threaded at high
693 693 * PIL with preemption disabled. Therefore, this function must not be
694 694 * blocked.
695 695 *
696 696 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
697 697 * DDI_FAILURE indicates an error condition and should almost never happen.
698 698 */
699 699 static int
700 700 ixgbe_quiesce(dev_info_t *devinfo)
701 701 {
702 702 ixgbe_t *ixgbe;
703 703 struct ixgbe_hw *hw;
704 704
705 705 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
706 706
707 707 if (ixgbe == NULL)
708 708 return (DDI_FAILURE);
709 709
710 710 hw = &ixgbe->hw;
711 711
712 712 /*
713 713 * Disable the adapter interrupts
714 714 */
715 715 ixgbe_disable_adapter_interrupts(ixgbe);
716 716
717 717 /*
718 718 * Tell firmware driver is no longer in control
719 719 */
720 720 ixgbe_release_driver_control(hw);
721 721
722 722 /*
723 723 * Reset the chipset
724 724 */
725 725 (void) ixgbe_reset_hw(hw);
726 726
727 727 /*
728 728 * Reset PHY
729 729 */
730 730 (void) ixgbe_reset_phy(hw);
731 731
732 732 return (DDI_SUCCESS);
733 733 }
734 734
735 735 static void
736 736 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
737 737 {
738 738 /*
739 739 * Disable interrupt
740 740 */
741 741 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
742 742 (void) ixgbe_disable_intrs(ixgbe);
743 743 }
744 744
745 745 /*
746 746 * remove the link check timer
747 747 */
748 748 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
749 749 if (ixgbe->periodic_id != NULL) {
750 750 ddi_periodic_delete(ixgbe->periodic_id);
751 751 ixgbe->periodic_id = NULL;
752 752 }
753 753 }
754 754
755 755 /*
756 756 * Unregister MAC
757 757 */
758 758 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
759 759 (void) mac_unregister(ixgbe->mac_hdl);
760 760 }
761 761
762 762 /*
763 763 * Free statistics
764 764 */
765 765 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
766 766 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
767 767 }
768 768
769 769 /*
770 770 * Remove interrupt handlers
771 771 */
772 772 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
773 773 ixgbe_rem_intr_handlers(ixgbe);
774 774 }
775 775
776 776 /*
777 777 * Remove taskq for sfp-status-change
778 778 */
779 779 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
780 780 ddi_taskq_destroy(ixgbe->sfp_taskq);
781 781 }
782 782
783 783 /*
784 784 * Remove taskq for over-temp
785 785 */
786 786 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
787 787 ddi_taskq_destroy(ixgbe->overtemp_taskq);
788 788 }
789 789
790 790 /*
791 791 * Remove interrupts
792 792 */
793 793 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
794 794 ixgbe_rem_intrs(ixgbe);
795 795 }
796 796
797 797 /*
798 798 * Unregister interrupt callback handler
799 799 */
800 800 (void) ddi_cb_unregister(ixgbe->cb_hdl);
801 801
802 802 /*
803 803 * Remove driver properties
804 804 */
805 805 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
806 806 (void) ddi_prop_remove_all(devinfo);
807 807 }
808 808
809 809 /*
810 810 * Stop the chipset
811 811 */
812 812 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
813 813 mutex_enter(&ixgbe->gen_lock);
814 814 ixgbe_chip_stop(ixgbe);
815 815 mutex_exit(&ixgbe->gen_lock);
816 816 }
817 817
818 818 /*
819 819 * Free register handle
820 820 */
821 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
822 822 if (ixgbe->osdep.reg_handle != NULL)
823 823 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
824 824 }
825 825
826 826 /*
827 827 * Free PCI config handle
828 828 */
829 829 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
830 830 if (ixgbe->osdep.cfg_handle != NULL)
831 831 pci_config_teardown(&ixgbe->osdep.cfg_handle);
832 832 }
833 833
834 834 /*
835 835 * Free locks
836 836 */
837 837 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
838 838 ixgbe_destroy_locks(ixgbe);
839 839 }
840 840
841 841 /*
842 842 * Free the rx/tx rings
843 843 */
844 844 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
845 845 ixgbe_free_rings(ixgbe);
846 846 }
847 847
848 848 /*
849 849 * Unregister FMA capabilities
850 850 */
851 851 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
852 852 ixgbe_fm_fini(ixgbe);
853 853 }
854 854
855 855 /*
856 856 * Free the driver data structure
857 857 */
858 858 kmem_free(ixgbe, sizeof (ixgbe_t));
859 859
860 860 ddi_set_driver_private(devinfo, NULL);
861 861 }
862 862
863 863 /*
864 864 * ixgbe_register_mac - Register the driver and its function pointers with
865 865 * the GLD interface.
866 866 */
867 867 static int
868 868 ixgbe_register_mac(ixgbe_t *ixgbe)
869 869 {
870 870 struct ixgbe_hw *hw = &ixgbe->hw;
871 871 mac_register_t *mac;
872 872 int status;
873 873
874 874 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
875 875 return (IXGBE_FAILURE);
876 876
877 877 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
878 878 mac->m_driver = ixgbe;
879 879 mac->m_dip = ixgbe->dip;
880 880 mac->m_src_addr = hw->mac.addr;
881 881 mac->m_callbacks = &ixgbe_m_callbacks;
882 882 mac->m_min_sdu = 0;
883 883 mac->m_max_sdu = ixgbe->default_mtu;
884 884 mac->m_margin = VLAN_TAGSZ;
885 885 mac->m_priv_props = ixgbe_priv_props;
886 886 mac->m_v12n = MAC_VIRT_LEVEL1;
887 887
888 888 status = mac_register(mac, &ixgbe->mac_hdl);
889 889
890 890 mac_free(mac);
891 891
892 892 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
893 893 }
894 894
895 895 /*
896 896 * ixgbe_identify_hardware - Identify the type of the chipset.
897 897 */
898 898 static int
899 899 ixgbe_identify_hardware(ixgbe_t *ixgbe)
900 900 {
901 901 struct ixgbe_hw *hw = &ixgbe->hw;
902 902 struct ixgbe_osdep *osdep = &ixgbe->osdep;
903 903
904 904 /*
905 905 * Get the device id
906 906 */
907 907 hw->vendor_id =
908 908 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
909 909 hw->device_id =
910 910 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
911 911 hw->revision_id =
912 912 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
913 913 hw->subsystem_device_id =
914 914 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
915 915 hw->subsystem_vendor_id =
916 916 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
917 917
918 918 /*
919 919 * Set the mac type of the adapter based on the device id
920 920 */
921 921 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
922 922 return (IXGBE_FAILURE);
923 923 }
924 924
925 925 /*
926 926 * Install adapter capabilities
927 927 */
928 928 switch (hw->mac.type) {
929 929 case ixgbe_mac_82598EB:
930 930 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
931 931 ixgbe->capab = &ixgbe_82598eb_cap;
932 932
933 933 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
934 934 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
935 935 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
936 936 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
937 937 }
938 938 break;
939 939
940 940 case ixgbe_mac_82599EB:
941 941 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
942 942 ixgbe->capab = &ixgbe_82599eb_cap;
943 943
944 944 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
945 945 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
946 946 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
947 947 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
948 948 }
949 949 break;
950 950
951 951 case ixgbe_mac_X540:
952 952 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
953 953 ixgbe->capab = &ixgbe_X540_cap;
954 954 /*
955 955 * For now, X540 is all set in its capab structure.
956 956 * As other X540 variants show up, things can change here.
957 957 */
958 958 break;
959 959
960 960 default:
961 961 IXGBE_DEBUGLOG_1(ixgbe,
962 962 "adapter not supported in ixgbe_identify_hardware(): %d\n",
963 963 hw->mac.type);
964 964 return (IXGBE_FAILURE);
965 965 }
966 966
967 967 return (IXGBE_SUCCESS);
968 968 }
969 969
970 970 /*
971 971 * ixgbe_regs_map - Map the device registers.
972 972 *
973 973 */
974 974 static int
975 975 ixgbe_regs_map(ixgbe_t *ixgbe)
976 976 {
977 977 dev_info_t *devinfo = ixgbe->dip;
978 978 struct ixgbe_hw *hw = &ixgbe->hw;
979 979 struct ixgbe_osdep *osdep = &ixgbe->osdep;
980 980 off_t mem_size;
981 981
982 982 /*
983 983 * First get the size of device registers to be mapped.
984 984 */
985 985 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
986 986 != DDI_SUCCESS) {
987 987 return (IXGBE_FAILURE);
988 988 }
989 989
990 990 /*
991 991 * Call ddi_regs_map_setup() to map registers
992 992 */
993 993 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
994 994 (caddr_t *)&hw->hw_addr, 0,
995 995 mem_size, &ixgbe_regs_acc_attr,
996 996 &osdep->reg_handle)) != DDI_SUCCESS) {
997 997 return (IXGBE_FAILURE);
998 998 }
999 999
1000 1000 return (IXGBE_SUCCESS);
1001 1001 }
1002 1002
1003 1003 /*
1004 1004 * ixgbe_init_properties - Initialize driver properties.
1005 1005 */
1006 1006 static void
1007 1007 ixgbe_init_properties(ixgbe_t *ixgbe)
1008 1008 {
1009 1009 /*
1010 1010 * Get conf file properties, including link settings
1011 1011 * jumbo frames, ring number, descriptor number, etc.
1012 1012 */
1013 1013 ixgbe_get_conf(ixgbe);
1014 1014
1015 1015 ixgbe_init_params(ixgbe);
1016 1016 }
1017 1017
1018 1018 /*
1019 1019 * ixgbe_init_driver_settings - Initialize driver settings.
1020 1020 *
1021 1021 * The settings include hardware function pointers, bus information,
1022 1022 * rx/tx rings settings, link state, and any other parameters that
1023 1023 * need to be setup during driver initialization.
1024 1024 */
1025 1025 static int
1026 1026 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1027 1027 {
1028 1028 struct ixgbe_hw *hw = &ixgbe->hw;
1029 1029 dev_info_t *devinfo = ixgbe->dip;
1030 1030 ixgbe_rx_ring_t *rx_ring;
1031 1031 ixgbe_rx_group_t *rx_group;
1032 1032 ixgbe_tx_ring_t *tx_ring;
1033 1033 uint32_t rx_size;
1034 1034 uint32_t tx_size;
1035 1035 uint32_t ring_per_group;
1036 1036 int i;
1037 1037
1038 1038 /*
1039 1039 * Initialize chipset specific hardware function pointers
1040 1040 */
1041 1041 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1042 1042 return (IXGBE_FAILURE);
1043 1043 }
1044 1044
1045 1045 /*
1046 1046 * Get the system page size
1047 1047 */
1048 1048 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1049 1049
1050 1050 /*
1051 1051 * Set rx buffer size
1052 1052 *
1053 1053 * The IP header alignment room is counted in the calculation.
1054 1054 * The rx buffer size is in unit of 1K that is required by the
1055 1055 * chipset hardware.
1056 1056 */
1057 1057 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1058 1058 ixgbe->rx_buf_size = ((rx_size >> 10) +
1059 1059 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1060 1060
1061 1061 /*
1062 1062 * Set tx buffer size
1063 1063 */
1064 1064 tx_size = ixgbe->max_frame_size;
1065 1065 ixgbe->tx_buf_size = ((tx_size >> 10) +
1066 1066 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1067 1067
1068 1068 /*
1069 1069 * Initialize rx/tx rings/groups parameters
1070 1070 */
1071 1071 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1072 1072 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1073 1073 rx_ring = &ixgbe->rx_rings[i];
1074 1074 rx_ring->index = i;
1075 1075 rx_ring->ixgbe = ixgbe;
1076 1076 rx_ring->group_index = i / ring_per_group;
1077 1077 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1078 1078 }
1079 1079
1080 1080 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1081 1081 rx_group = &ixgbe->rx_groups[i];
1082 1082 rx_group->index = i;
1083 1083 rx_group->ixgbe = ixgbe;
1084 1084 }
1085 1085
1086 1086 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1087 1087 tx_ring = &ixgbe->tx_rings[i];
1088 1088 tx_ring->index = i;
1089 1089 tx_ring->ixgbe = ixgbe;
1090 1090 if (ixgbe->tx_head_wb_enable)
1091 1091 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1092 1092 else
1093 1093 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1094 1094
1095 1095 tx_ring->ring_size = ixgbe->tx_ring_size;
1096 1096 tx_ring->free_list_size = ixgbe->tx_ring_size +
1097 1097 (ixgbe->tx_ring_size >> 1);
1098 1098 }
1099 1099
1100 1100 /*
1101 1101 * Initialize values of interrupt throttling rate
1102 1102 */
1103 1103 for (i = 1; i < MAX_INTR_VECTOR; i++)
1104 1104 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1105 1105
1106 1106 /*
1107 1107 * The initial link state should be "unknown"
1108 1108 */
1109 1109 ixgbe->link_state = LINK_STATE_UNKNOWN;
1110 1110
1111 1111 return (IXGBE_SUCCESS);
1112 1112 }
1113 1113
1114 1114 /*
1115 1115 * ixgbe_init_locks - Initialize locks.
1116 1116 */
1117 1117 static void
1118 1118 ixgbe_init_locks(ixgbe_t *ixgbe)
1119 1119 {
1120 1120 ixgbe_rx_ring_t *rx_ring;
1121 1121 ixgbe_tx_ring_t *tx_ring;
1122 1122 int i;
1123 1123
1124 1124 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1125 1125 rx_ring = &ixgbe->rx_rings[i];
1126 1126 mutex_init(&rx_ring->rx_lock, NULL,
1127 1127 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1128 1128 }
1129 1129
1130 1130 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1131 1131 tx_ring = &ixgbe->tx_rings[i];
1132 1132 mutex_init(&tx_ring->tx_lock, NULL,
1133 1133 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1134 1134 mutex_init(&tx_ring->recycle_lock, NULL,
1135 1135 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1136 1136 mutex_init(&tx_ring->tcb_head_lock, NULL,
1137 1137 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1138 1138 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1139 1139 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1140 1140 }
1141 1141
1142 1142 mutex_init(&ixgbe->gen_lock, NULL,
1143 1143 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1144 1144
1145 1145 mutex_init(&ixgbe->watchdog_lock, NULL,
1146 1146 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1147 1147 }
1148 1148
1149 1149 /*
1150 1150 * ixgbe_destroy_locks - Destroy locks.
1151 1151 */
1152 1152 static void
1153 1153 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1154 1154 {
1155 1155 ixgbe_rx_ring_t *rx_ring;
1156 1156 ixgbe_tx_ring_t *tx_ring;
1157 1157 int i;
1158 1158
1159 1159 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1160 1160 rx_ring = &ixgbe->rx_rings[i];
1161 1161 mutex_destroy(&rx_ring->rx_lock);
1162 1162 }
1163 1163
1164 1164 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1165 1165 tx_ring = &ixgbe->tx_rings[i];
1166 1166 mutex_destroy(&tx_ring->tx_lock);
1167 1167 mutex_destroy(&tx_ring->recycle_lock);
1168 1168 mutex_destroy(&tx_ring->tcb_head_lock);
1169 1169 mutex_destroy(&tx_ring->tcb_tail_lock);
1170 1170 }
1171 1171
1172 1172 mutex_destroy(&ixgbe->gen_lock);
1173 1173 mutex_destroy(&ixgbe->watchdog_lock);
1174 1174 }
1175 1175
1176 1176 static int
1177 1177 ixgbe_resume(dev_info_t *devinfo)
1178 1178 {
1179 1179 ixgbe_t *ixgbe;
1180 1180 int i;
1181 1181
1182 1182 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1183 1183 if (ixgbe == NULL)
1184 1184 return (DDI_FAILURE);
1185 1185
1186 1186 mutex_enter(&ixgbe->gen_lock);
1187 1187
1188 1188 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1189 1189 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1190 1190 mutex_exit(&ixgbe->gen_lock);
1191 1191 return (DDI_FAILURE);
1192 1192 }
1193 1193
1194 1194 /*
1195 1195 * Enable and start the watchdog timer
1196 1196 */
1197 1197 ixgbe_enable_watchdog_timer(ixgbe);
1198 1198 }
1199 1199
1200 1200 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1201 1201
1202 1202 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1203 1203 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1204 1204 mac_tx_ring_update(ixgbe->mac_hdl,
1205 1205 ixgbe->tx_rings[i].ring_handle);
1206 1206 }
1207 1207 }
1208 1208
1209 1209 mutex_exit(&ixgbe->gen_lock);
1210 1210
1211 1211 return (DDI_SUCCESS);
1212 1212 }
1213 1213
1214 1214 static int
1215 1215 ixgbe_suspend(dev_info_t *devinfo)
1216 1216 {
1217 1217 ixgbe_t *ixgbe;
1218 1218
1219 1219 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1220 1220 if (ixgbe == NULL)
1221 1221 return (DDI_FAILURE);
1222 1222
1223 1223 mutex_enter(&ixgbe->gen_lock);
1224 1224
1225 1225 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1226 1226 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1227 1227 mutex_exit(&ixgbe->gen_lock);
1228 1228 return (DDI_SUCCESS);
1229 1229 }
1230 1230 ixgbe_stop(ixgbe, B_FALSE);
1231 1231
1232 1232 mutex_exit(&ixgbe->gen_lock);
1233 1233
1234 1234 /*
1235 1235 * Disable and stop the watchdog timer
1236 1236 */
1237 1237 ixgbe_disable_watchdog_timer(ixgbe);
1238 1238
1239 1239 return (DDI_SUCCESS);
1240 1240 }
1241 1241
1242 1242 /*
1243 1243 * ixgbe_init - Initialize the device.
1244 1244 */
1245 1245 static int
1246 1246 ixgbe_init(ixgbe_t *ixgbe)
1247 1247 {
1248 1248 struct ixgbe_hw *hw = &ixgbe->hw;
1249 1249 u8 pbanum[IXGBE_PBANUM_LENGTH];
1250 1250
1251 1251 mutex_enter(&ixgbe->gen_lock);
1252 1252
1253 1253 /*
1254 1254 * Reset chipset to put the hardware in a known state
1255 1255 * before we try to do anything with the eeprom.
1256 1256 */
1257 1257 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1258 1258 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1259 1259 goto init_fail;
1260 1260 }
1261 1261
1262 1262 /*
1263 1263 * Need to init eeprom before validating the checksum.
1264 1264 */
1265 1265 if (ixgbe_init_eeprom_params(hw) < 0) {
1266 1266 ixgbe_error(ixgbe,
1267 1267 "Unable to intitialize the eeprom interface.");
1268 1268 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1269 1269 goto init_fail;
1270 1270 }
1271 1271
1272 1272 /*
1273 1273 * NVM validation
1274 1274 */
1275 1275 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1276 1276 /*
1277 1277 * Some PCI-E parts fail the first check due to
1278 1278 * the link being in sleep state. Call it again,
1279 1279 * if it fails a second time it's a real issue.
1280 1280 */
1281 1281 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1282 1282 ixgbe_error(ixgbe,
1283 1283 "Invalid NVM checksum. Please contact "
1284 1284 "the vendor to update the NVM.");
1285 1285 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1286 1286 goto init_fail;
1287 1287 }
1288 1288 }
1289 1289
1290 1290 /*
1291 1291 * Setup default flow control thresholds - enable/disable
1292 1292 * & flow control type is controlled by ixgbe.conf
1293 1293 */
1294 1294 hw->fc.high_water[0] = DEFAULT_FCRTH;
1295 1295 hw->fc.low_water[0] = DEFAULT_FCRTL;
1296 1296 hw->fc.pause_time = DEFAULT_FCPAUSE;
1297 1297 hw->fc.send_xon = B_TRUE;
1298 1298
1299 1299 /*
1300 1300 * Initialize link settings
1301 1301 */
1302 1302 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1303 1303
1304 1304 /*
1305 1305 * Initialize the chipset hardware
1306 1306 */
1307 1307 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1308 1308 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1309 1309 goto init_fail;
1310 1310 }
1311 1311
1312 1312 /*
1313 1313 * Read identifying information and place in devinfo.
1314 1314 */
1315 1315 pbanum[0] = '\0';
1316 1316 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1317 1317 if (*pbanum != '\0') {
1318 1318 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1319 1319 "printed-board-assembly", (char *)pbanum);
1320 1320 }
1321 1321
1322 1322 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1323 1323 goto init_fail;
1324 1324 }
1325 1325
1326 1326 mutex_exit(&ixgbe->gen_lock);
1327 1327 return (IXGBE_SUCCESS);
1328 1328
1329 1329 init_fail:
1330 1330 /*
1331 1331 * Reset PHY
1332 1332 */
1333 1333 (void) ixgbe_reset_phy(hw);
1334 1334
1335 1335 mutex_exit(&ixgbe->gen_lock);
1336 1336 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1337 1337 return (IXGBE_FAILURE);
1338 1338 }
1339 1339
1340 1340 /*
1341 1341 * ixgbe_chip_start - Initialize and start the chipset hardware.
1342 1342 */
1343 1343 static int
1344 1344 ixgbe_chip_start(ixgbe_t *ixgbe)
1345 1345 {
1346 1346 struct ixgbe_hw *hw = &ixgbe->hw;
1347 1347 int ret_val, i;
1348 1348
1349 1349 ASSERT(mutex_owned(&ixgbe->gen_lock));
1350 1350
1351 1351 /*
1352 1352 * Get the mac address
1353 1353 * This function should handle SPARC case correctly.
1354 1354 */
1355 1355 if (!ixgbe_find_mac_address(ixgbe)) {
1356 1356 ixgbe_error(ixgbe, "Failed to get the mac address");
1357 1357 return (IXGBE_FAILURE);
1358 1358 }
1359 1359
1360 1360 /*
1361 1361 * Validate the mac address
1362 1362 */
1363 1363 (void) ixgbe_init_rx_addrs(hw);
1364 1364 if (!is_valid_mac_addr(hw->mac.addr)) {
1365 1365 ixgbe_error(ixgbe, "Invalid mac address");
1366 1366 return (IXGBE_FAILURE);
1367 1367 }
1368 1368
1369 1369 /*
1370 1370 * Configure/Initialize hardware
1371 1371 */
1372 1372 ret_val = ixgbe_init_hw(hw);
1373 1373 if (ret_val != IXGBE_SUCCESS) {
1374 1374 if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1375 1375 ixgbe_error(ixgbe,
1376 1376 "This 82599 device is pre-release and contains"
1377 1377 " outdated firmware, please contact your hardware"
1378 1378 " vendor for a replacement.");
1379 1379 } else {
1380 1380 ixgbe_error(ixgbe, "Failed to initialize hardware");
1381 1381 return (IXGBE_FAILURE);
1382 1382 }
1383 1383 }
1384 1384
1385 1385 /*
1386 1386 * Re-enable relaxed ordering for performance. It is disabled
1387 1387 * by default in the hardware init.
1388 1388 */
1389 1389 if (ixgbe->relax_order_enable == B_TRUE)
1390 1390 ixgbe_enable_relaxed_ordering(hw);
1391 1391
1392 1392 /*
1393 1393 * Setup adapter interrupt vectors
1394 1394 */
1395 1395 ixgbe_setup_adapter_vector(ixgbe);
1396 1396
1397 1397 /*
1398 1398 * Initialize unicast addresses.
1399 1399 */
1400 1400 ixgbe_init_unicst(ixgbe);
1401 1401
1402 1402 /*
1403 1403 * Setup and initialize the mctable structures.
1404 1404 */
1405 1405 ixgbe_setup_multicst(ixgbe);
1406 1406
1407 1407 /*
1408 1408 * Set interrupt throttling rate
1409 1409 */
1410 1410 for (i = 0; i < ixgbe->intr_cnt; i++) {
1411 1411 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1412 1412 }
1413 1413
1414 1414 /*
1415 1415 * Save the state of the phy
1416 1416 */
1417 1417 ixgbe_get_hw_state(ixgbe);
1418 1418
1419 1419 /*
1420 1420 * Make sure driver has control
1421 1421 */
1422 1422 ixgbe_get_driver_control(hw);
1423 1423
1424 1424 return (IXGBE_SUCCESS);
1425 1425 }
1426 1426
1427 1427 /*
1428 1428 * ixgbe_chip_stop - Stop the chipset hardware
1429 1429 */
1430 1430 static void
1431 1431 ixgbe_chip_stop(ixgbe_t *ixgbe)
1432 1432 {
1433 1433 struct ixgbe_hw *hw = &ixgbe->hw;
1434 1434
1435 1435 ASSERT(mutex_owned(&ixgbe->gen_lock));
1436 1436
1437 1437 /*
1438 1438 * Tell firmware driver is no longer in control
1439 1439 */
1440 1440 ixgbe_release_driver_control(hw);
1441 1441
1442 1442 /*
1443 1443 * Reset the chipset
1444 1444 */
1445 1445 (void) ixgbe_reset_hw(hw);
1446 1446
1447 1447 /*
1448 1448 * Reset PHY
1449 1449 */
1450 1450 (void) ixgbe_reset_phy(hw);
1451 1451 }
1452 1452
1453 1453 /*
1454 1454 * ixgbe_reset - Reset the chipset and re-start the driver.
1455 1455 *
1456 1456 * It involves stopping and re-starting the chipset,
1457 1457 * and re-configuring the rx/tx rings.
1458 1458 */
1459 1459 static int
1460 1460 ixgbe_reset(ixgbe_t *ixgbe)
1461 1461 {
1462 1462 int i;
1463 1463
1464 1464 /*
1465 1465 * Disable and stop the watchdog timer
1466 1466 */
1467 1467 ixgbe_disable_watchdog_timer(ixgbe);
1468 1468
1469 1469 mutex_enter(&ixgbe->gen_lock);
1470 1470
1471 1471 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1472 1472 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1473 1473
1474 1474 ixgbe_stop(ixgbe, B_FALSE);
1475 1475
1476 1476 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1477 1477 mutex_exit(&ixgbe->gen_lock);
1478 1478 return (IXGBE_FAILURE);
1479 1479 }
1480 1480
1481 1481 /*
1482 1482 * After resetting, need to recheck the link status.
1483 1483 */
1484 1484 ixgbe->link_check_complete = B_FALSE;
1485 1485 ixgbe->link_check_hrtime = gethrtime() +
1486 1486 (IXGBE_LINK_UP_TIME * 100000000ULL);
1487 1487
1488 1488 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1489 1489
1490 1490 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1491 1491 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1492 1492 mac_tx_ring_update(ixgbe->mac_hdl,
1493 1493 ixgbe->tx_rings[i].ring_handle);
1494 1494 }
1495 1495 }
1496 1496
1497 1497 mutex_exit(&ixgbe->gen_lock);
1498 1498
1499 1499 /*
1500 1500 * Enable and start the watchdog timer
1501 1501 */
1502 1502 ixgbe_enable_watchdog_timer(ixgbe);
1503 1503
1504 1504 return (IXGBE_SUCCESS);
1505 1505 }
1506 1506
1507 1507 /*
1508 1508 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1509 1509 */
1510 1510 static void
1511 1511 ixgbe_tx_clean(ixgbe_t *ixgbe)
1512 1512 {
1513 1513 ixgbe_tx_ring_t *tx_ring;
1514 1514 tx_control_block_t *tcb;
1515 1515 link_list_t pending_list;
1516 1516 uint32_t desc_num;
1517 1517 int i, j;
1518 1518
1519 1519 LINK_LIST_INIT(&pending_list);
1520 1520
1521 1521 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1522 1522 tx_ring = &ixgbe->tx_rings[i];
1523 1523
1524 1524 mutex_enter(&tx_ring->recycle_lock);
1525 1525
1526 1526 /*
1527 1527 * Clean the pending tx data - the pending packets in the
1528 1528 * work_list that have no chances to be transmitted again.
1529 1529 *
1530 1530 * We must ensure the chipset is stopped or the link is down
1531 1531 * before cleaning the transmit packets.
1532 1532 */
1533 1533 desc_num = 0;
1534 1534 for (j = 0; j < tx_ring->ring_size; j++) {
1535 1535 tcb = tx_ring->work_list[j];
1536 1536 if (tcb != NULL) {
1537 1537 desc_num += tcb->desc_num;
1538 1538
1539 1539 tx_ring->work_list[j] = NULL;
1540 1540
1541 1541 ixgbe_free_tcb(tcb);
1542 1542
1543 1543 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1544 1544 }
1545 1545 }
1546 1546
1547 1547 if (desc_num > 0) {
1548 1548 atomic_add_32(&tx_ring->tbd_free, desc_num);
1549 1549 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1550 1550
1551 1551 /*
1552 1552 * Reset the head and tail pointers of the tbd ring;
1553 1553 * Reset the writeback head if it's enable.
1554 1554 */
1555 1555 tx_ring->tbd_head = 0;
1556 1556 tx_ring->tbd_tail = 0;
1557 1557 if (ixgbe->tx_head_wb_enable)
1558 1558 *tx_ring->tbd_head_wb = 0;
1559 1559
1560 1560 IXGBE_WRITE_REG(&ixgbe->hw,
1561 1561 IXGBE_TDH(tx_ring->index), 0);
1562 1562 IXGBE_WRITE_REG(&ixgbe->hw,
1563 1563 IXGBE_TDT(tx_ring->index), 0);
1564 1564 }
1565 1565
1566 1566 mutex_exit(&tx_ring->recycle_lock);
1567 1567
1568 1568 /*
1569 1569 * Add the tx control blocks in the pending list to
1570 1570 * the free list.
1571 1571 */
1572 1572 ixgbe_put_free_list(tx_ring, &pending_list);
1573 1573 }
1574 1574 }
1575 1575
1576 1576 /*
1577 1577 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1578 1578 * transmitted.
1579 1579 */
1580 1580 static boolean_t
1581 1581 ixgbe_tx_drain(ixgbe_t *ixgbe)
1582 1582 {
1583 1583 ixgbe_tx_ring_t *tx_ring;
1584 1584 boolean_t done;
1585 1585 int i, j;
1586 1586
1587 1587 /*
1588 1588 * Wait for a specific time to allow pending tx packets
1589 1589 * to be transmitted.
1590 1590 *
1591 1591 * Check the counter tbd_free to see if transmission is done.
1592 1592 * No lock protection is needed here.
1593 1593 *
1594 1594 * Return B_TRUE if all pending packets have been transmitted;
1595 1595 * Otherwise return B_FALSE;
1596 1596 */
1597 1597 for (i = 0; i < TX_DRAIN_TIME; i++) {
1598 1598
1599 1599 done = B_TRUE;
1600 1600 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1601 1601 tx_ring = &ixgbe->tx_rings[j];
1602 1602 done = done &&
1603 1603 (tx_ring->tbd_free == tx_ring->ring_size);
1604 1604 }
1605 1605
1606 1606 if (done)
1607 1607 break;
1608 1608
1609 1609 msec_delay(1);
1610 1610 }
1611 1611
1612 1612 return (done);
1613 1613 }
1614 1614
1615 1615 /*
1616 1616 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1617 1617 */
1618 1618 static boolean_t
1619 1619 ixgbe_rx_drain(ixgbe_t *ixgbe)
1620 1620 {
1621 1621 boolean_t done = B_TRUE;
1622 1622 int i;
1623 1623
1624 1624 /*
1625 1625 * Polling the rx free list to check if those rx buffers held by
1626 1626 * the upper layer are released.
1627 1627 *
1628 1628 * Check the counter rcb_free to see if all pending buffers are
1629 1629 * released. No lock protection is needed here.
1630 1630 *
1631 1631 * Return B_TRUE if all pending buffers have been released;
1632 1632 * Otherwise return B_FALSE;
1633 1633 */
1634 1634 for (i = 0; i < RX_DRAIN_TIME; i++) {
1635 1635 done = (ixgbe->rcb_pending == 0);
1636 1636
1637 1637 if (done)
1638 1638 break;
1639 1639
1640 1640 msec_delay(1);
1641 1641 }
1642 1642
1643 1643 return (done);
1644 1644 }
1645 1645
1646 1646 /*
1647 1647 * ixgbe_start - Start the driver/chipset.
1648 1648 */
1649 1649 int
1650 1650 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1651 1651 {
1652 1652 int i;
1653 1653
1654 1654 ASSERT(mutex_owned(&ixgbe->gen_lock));
1655 1655
1656 1656 if (alloc_buffer) {
1657 1657 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1658 1658 ixgbe_error(ixgbe,
1659 1659 "Failed to allocate software receive rings");
1660 1660 return (IXGBE_FAILURE);
1661 1661 }
1662 1662
1663 1663 /* Allocate buffers for all the rx/tx rings */
1664 1664 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1665 1665 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1666 1666 return (IXGBE_FAILURE);
1667 1667 }
1668 1668
1669 1669 ixgbe->tx_ring_init = B_TRUE;
1670 1670 } else {
1671 1671 ixgbe->tx_ring_init = B_FALSE;
1672 1672 }
1673 1673
1674 1674 for (i = 0; i < ixgbe->num_rx_rings; i++)
1675 1675 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1676 1676 for (i = 0; i < ixgbe->num_tx_rings; i++)
1677 1677 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1678 1678
1679 1679 /*
1680 1680 * Start the chipset hardware
1681 1681 */
1682 1682 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1683 1683 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1684 1684 goto start_failure;
1685 1685 }
1686 1686
1687 1687 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1688 1688 goto start_failure;
1689 1689 }
1690 1690
1691 1691 /*
1692 1692 * Setup the rx/tx rings
1693 1693 */
1694 1694 ixgbe_setup_rings(ixgbe);
1695 1695
1696 1696 /*
1697 1697 * ixgbe_start() will be called when resetting, however if reset
1698 1698 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1699 1699 * before enabling the interrupts.
1700 1700 */
1701 1701 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1702 1702 | IXGBE_STALL| IXGBE_OVERTEMP));
1703 1703
1704 1704 /*
1705 1705 * Enable adapter interrupts
1706 1706 * The interrupts must be enabled after the driver state is START
1707 1707 */
1708 1708 ixgbe_enable_adapter_interrupts(ixgbe);
1709 1709
1710 1710 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1711 1711 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1712 1712 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1713 1713 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1714 1714
1715 1715 return (IXGBE_SUCCESS);
1716 1716
1717 1717 start_failure:
1718 1718 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1719 1719 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1720 1720 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1721 1721 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1722 1722
1723 1723 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1724 1724
1725 1725 return (IXGBE_FAILURE);
1726 1726 }
1727 1727
1728 1728 /*
1729 1729 * ixgbe_stop - Stop the driver/chipset.
1730 1730 */
1731 1731 void
1732 1732 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1733 1733 {
1734 1734 int i;
1735 1735
1736 1736 ASSERT(mutex_owned(&ixgbe->gen_lock));
1737 1737
1738 1738 /*
1739 1739 * Disable the adapter interrupts
1740 1740 */
1741 1741 ixgbe_disable_adapter_interrupts(ixgbe);
1742 1742
1743 1743 /*
1744 1744 * Drain the pending tx packets
1745 1745 */
1746 1746 (void) ixgbe_tx_drain(ixgbe);
1747 1747
1748 1748 for (i = 0; i < ixgbe->num_rx_rings; i++)
1749 1749 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1750 1750 for (i = 0; i < ixgbe->num_tx_rings; i++)
1751 1751 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1752 1752
1753 1753 /*
1754 1754 * Stop the chipset hardware
1755 1755 */
1756 1756 ixgbe_chip_stop(ixgbe);
1757 1757
1758 1758 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1759 1759 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1760 1760 }
1761 1761
1762 1762 /*
1763 1763 * Clean the pending tx data/resources
1764 1764 */
1765 1765 ixgbe_tx_clean(ixgbe);
1766 1766
1767 1767 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1768 1768 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1769 1769 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1770 1770 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1771 1771
1772 1772 if (ixgbe->link_state == LINK_STATE_UP) {
1773 1773 ixgbe->link_state = LINK_STATE_UNKNOWN;
1774 1774 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1775 1775 }
1776 1776
1777 1777 if (free_buffer) {
1778 1778 /*
1779 1779 * Release the DMA/memory resources of rx/tx rings
1780 1780 */
1781 1781 ixgbe_free_dma(ixgbe);
1782 1782 ixgbe_free_rx_data(ixgbe);
1783 1783 }
1784 1784 }
1785 1785
1786 1786 /*
1787 1787 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1788 1788 */
1789 1789 /* ARGSUSED */
1790 1790 static int
1791 1791 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1792 1792 void *arg1, void *arg2)
1793 1793 {
1794 1794 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1795 1795
1796 1796 switch (cbaction) {
1797 1797 /* IRM callback */
1798 1798 int count;
1799 1799 case DDI_CB_INTR_ADD:
1800 1800 case DDI_CB_INTR_REMOVE:
1801 1801 count = (int)(uintptr_t)cbarg;
1802 1802 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1803 1803 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1804 1804 int, ixgbe->intr_cnt);
1805 1805 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1806 1806 DDI_SUCCESS) {
1807 1807 ixgbe_error(ixgbe,
1808 1808 "IRM CB: Failed to adjust interrupts");
1809 1809 goto cb_fail;
1810 1810 }
1811 1811 break;
1812 1812 default:
1813 1813 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1814 1814 cbaction);
1815 1815 return (DDI_ENOTSUP);
1816 1816 }
1817 1817 return (DDI_SUCCESS);
1818 1818 cb_fail:
1819 1819 return (DDI_FAILURE);
1820 1820 }
1821 1821
1822 1822 /*
1823 1823 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1824 1824 */
1825 1825 static int
1826 1826 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1827 1827 {
1828 1828 int i, rc, actual;
1829 1829
1830 1830 if (count == 0)
1831 1831 return (DDI_SUCCESS);
1832 1832
1833 1833 if ((cbaction == DDI_CB_INTR_ADD &&
1834 1834 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1835 1835 (cbaction == DDI_CB_INTR_REMOVE &&
1836 1836 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1837 1837 return (DDI_FAILURE);
1838 1838
1839 1839 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1840 1840 return (DDI_FAILURE);
1841 1841 }
1842 1842
1843 1843 for (i = 0; i < ixgbe->num_rx_rings; i++)
1844 1844 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1845 1845 for (i = 0; i < ixgbe->num_tx_rings; i++)
1846 1846 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1847 1847
1848 1848 mutex_enter(&ixgbe->gen_lock);
1849 1849 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1850 1850 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1851 1851 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1852 1852 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1853 1853
1854 1854 ixgbe_stop(ixgbe, B_FALSE);
1855 1855 /*
1856 1856 * Disable interrupts
1857 1857 */
1858 1858 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1859 1859 rc = ixgbe_disable_intrs(ixgbe);
1860 1860 ASSERT(rc == IXGBE_SUCCESS);
1861 1861 }
1862 1862 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1863 1863
1864 1864 /*
1865 1865 * Remove interrupt handlers
1866 1866 */
1867 1867 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1868 1868 ixgbe_rem_intr_handlers(ixgbe);
1869 1869 }
1870 1870 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1871 1871
1872 1872 /*
1873 1873 * Clear vect_map
1874 1874 */
1875 1875 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1876 1876 switch (cbaction) {
1877 1877 case DDI_CB_INTR_ADD:
1878 1878 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1879 1879 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1880 1880 DDI_INTR_ALLOC_NORMAL);
1881 1881 if (rc != DDI_SUCCESS || actual != count) {
1882 1882 ixgbe_log(ixgbe, "Adjust interrupts failed."
1883 1883 "return: %d, irm cb size: %d, actual: %d",
1884 1884 rc, count, actual);
1885 1885 goto intr_adjust_fail;
1886 1886 }
1887 1887 ixgbe->intr_cnt += count;
1888 1888 break;
1889 1889
1890 1890 case DDI_CB_INTR_REMOVE:
1891 1891 for (i = ixgbe->intr_cnt - count;
1892 1892 i < ixgbe->intr_cnt; i ++) {
1893 1893 rc = ddi_intr_free(ixgbe->htable[i]);
1894 1894 ixgbe->htable[i] = NULL;
1895 1895 if (rc != DDI_SUCCESS) {
1896 1896 ixgbe_log(ixgbe, "Adjust interrupts failed."
1897 1897 "return: %d, irm cb size: %d, actual: %d",
1898 1898 rc, count, actual);
1899 1899 goto intr_adjust_fail;
1900 1900 }
1901 1901 }
1902 1902 ixgbe->intr_cnt -= count;
1903 1903 break;
1904 1904 }
1905 1905
1906 1906 /*
1907 1907 * Get priority for first vector, assume remaining are all the same
1908 1908 */
1909 1909 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1910 1910 if (rc != DDI_SUCCESS) {
1911 1911 ixgbe_log(ixgbe,
1912 1912 "Get interrupt priority failed: %d", rc);
1913 1913 goto intr_adjust_fail;
1914 1914 }
1915 1915 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1916 1916 if (rc != DDI_SUCCESS) {
1917 1917 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1918 1918 goto intr_adjust_fail;
1919 1919 }
1920 1920 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1921 1921
1922 1922 /*
1923 1923 * Map rings to interrupt vectors
1924 1924 */
1925 1925 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1926 1926 ixgbe_error(ixgbe,
1927 1927 "IRM CB: Failed to map interrupts to vectors");
1928 1928 goto intr_adjust_fail;
1929 1929 }
1930 1930
1931 1931 /*
1932 1932 * Add interrupt handlers
1933 1933 */
1934 1934 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1935 1935 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1936 1936 goto intr_adjust_fail;
1937 1937 }
1938 1938 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1939 1939
1940 1940 /*
1941 1941 * Now that mutex locks are initialized, and the chip is also
1942 1942 * initialized, enable interrupts.
1943 1943 */
1944 1944 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1945 1945 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1946 1946 goto intr_adjust_fail;
1947 1947 }
1948 1948 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1949 1949 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1950 1950 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1951 1951 goto intr_adjust_fail;
1952 1952 }
1953 1953 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1954 1954 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1955 1955 ixgbe->ixgbe_state |= IXGBE_STARTED;
1956 1956 mutex_exit(&ixgbe->gen_lock);
1957 1957
1958 1958 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1959 1959 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1960 1960 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1961 1961 }
1962 1962 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1963 1963 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1964 1964 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1965 1965 }
1966 1966
1967 1967 /* Wakeup all Tx rings */
1968 1968 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1969 1969 mac_tx_ring_update(ixgbe->mac_hdl,
1970 1970 ixgbe->tx_rings[i].ring_handle);
1971 1971 }
1972 1972
1973 1973 IXGBE_DEBUGLOG_3(ixgbe,
1974 1974 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1975 1975 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1976 1976 return (DDI_SUCCESS);
1977 1977
1978 1978 intr_adjust_fail:
1979 1979 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1980 1980 mutex_exit(&ixgbe->gen_lock);
1981 1981 return (DDI_FAILURE);
1982 1982 }
1983 1983
1984 1984 /*
1985 1985 * ixgbe_intr_cb_register - Register interrupt callback function.
1986 1986 */
1987 1987 static int
1988 1988 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1989 1989 {
1990 1990 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1991 1991 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1992 1992 return (IXGBE_FAILURE);
1993 1993 }
1994 1994 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1995 1995 return (IXGBE_SUCCESS);
1996 1996 }
1997 1997
1998 1998 /*
1999 1999 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2000 2000 */
2001 2001 static int
2002 2002 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2003 2003 {
2004 2004 /*
2005 2005 * Allocate memory space for rx rings
2006 2006 */
2007 2007 ixgbe->rx_rings = kmem_zalloc(
2008 2008 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2009 2009 KM_NOSLEEP);
2010 2010
2011 2011 if (ixgbe->rx_rings == NULL) {
2012 2012 return (IXGBE_FAILURE);
2013 2013 }
2014 2014
2015 2015 /*
2016 2016 * Allocate memory space for tx rings
2017 2017 */
2018 2018 ixgbe->tx_rings = kmem_zalloc(
2019 2019 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2020 2020 KM_NOSLEEP);
2021 2021
2022 2022 if (ixgbe->tx_rings == NULL) {
2023 2023 kmem_free(ixgbe->rx_rings,
2024 2024 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2025 2025 ixgbe->rx_rings = NULL;
2026 2026 return (IXGBE_FAILURE);
2027 2027 }
2028 2028
2029 2029 /*
2030 2030 * Allocate memory space for rx ring groups
2031 2031 */
2032 2032 ixgbe->rx_groups = kmem_zalloc(
2033 2033 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2034 2034 KM_NOSLEEP);
2035 2035
2036 2036 if (ixgbe->rx_groups == NULL) {
2037 2037 kmem_free(ixgbe->rx_rings,
2038 2038 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2039 2039 kmem_free(ixgbe->tx_rings,
2040 2040 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2041 2041 ixgbe->rx_rings = NULL;
2042 2042 ixgbe->tx_rings = NULL;
2043 2043 return (IXGBE_FAILURE);
2044 2044 }
2045 2045
2046 2046 return (IXGBE_SUCCESS);
2047 2047 }
2048 2048
2049 2049 /*
2050 2050 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2051 2051 */
2052 2052 static void
2053 2053 ixgbe_free_rings(ixgbe_t *ixgbe)
2054 2054 {
2055 2055 if (ixgbe->rx_rings != NULL) {
2056 2056 kmem_free(ixgbe->rx_rings,
2057 2057 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2058 2058 ixgbe->rx_rings = NULL;
2059 2059 }
2060 2060
2061 2061 if (ixgbe->tx_rings != NULL) {
2062 2062 kmem_free(ixgbe->tx_rings,
2063 2063 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2064 2064 ixgbe->tx_rings = NULL;
2065 2065 }
2066 2066
2067 2067 if (ixgbe->rx_groups != NULL) {
2068 2068 kmem_free(ixgbe->rx_groups,
2069 2069 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2070 2070 ixgbe->rx_groups = NULL;
2071 2071 }
2072 2072 }
2073 2073
2074 2074 static int
2075 2075 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2076 2076 {
2077 2077 ixgbe_rx_ring_t *rx_ring;
2078 2078 int i;
2079 2079
2080 2080 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2081 2081 rx_ring = &ixgbe->rx_rings[i];
2082 2082 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2083 2083 goto alloc_rx_rings_failure;
2084 2084 }
2085 2085 return (IXGBE_SUCCESS);
2086 2086
2087 2087 alloc_rx_rings_failure:
2088 2088 ixgbe_free_rx_data(ixgbe);
2089 2089 return (IXGBE_FAILURE);
2090 2090 }
2091 2091
2092 2092 static void
2093 2093 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2094 2094 {
2095 2095 ixgbe_rx_ring_t *rx_ring;
2096 2096 ixgbe_rx_data_t *rx_data;
2097 2097 int i;
2098 2098
2099 2099 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2100 2100 rx_ring = &ixgbe->rx_rings[i];
2101 2101
2102 2102 mutex_enter(&ixgbe->rx_pending_lock);
2103 2103 rx_data = rx_ring->rx_data;
2104 2104
2105 2105 if (rx_data != NULL) {
2106 2106 rx_data->flag |= IXGBE_RX_STOPPED;
2107 2107
2108 2108 if (rx_data->rcb_pending == 0) {
2109 2109 ixgbe_free_rx_ring_data(rx_data);
2110 2110 rx_ring->rx_data = NULL;
2111 2111 }
2112 2112 }
2113 2113
2114 2114 mutex_exit(&ixgbe->rx_pending_lock);
2115 2115 }
2116 2116 }
2117 2117
2118 2118 /*
2119 2119 * ixgbe_setup_rings - Setup rx/tx rings.
2120 2120 */
2121 2121 static void
2122 2122 ixgbe_setup_rings(ixgbe_t *ixgbe)
2123 2123 {
2124 2124 /*
2125 2125 * Setup the rx/tx rings, including the following:
2126 2126 *
2127 2127 * 1. Setup the descriptor ring and the control block buffers;
2128 2128 * 2. Initialize necessary registers for receive/transmit;
2129 2129 * 3. Initialize software pointers/parameters for receive/transmit;
2130 2130 */
2131 2131 ixgbe_setup_rx(ixgbe);
2132 2132
2133 2133 ixgbe_setup_tx(ixgbe);
2134 2134 }
2135 2135
2136 2136 static void
2137 2137 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2138 2138 {
2139 2139 ixgbe_t *ixgbe = rx_ring->ixgbe;
2140 2140 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2141 2141 struct ixgbe_hw *hw = &ixgbe->hw;
2142 2142 rx_control_block_t *rcb;
2143 2143 union ixgbe_adv_rx_desc *rbd;
2144 2144 uint32_t size;
2145 2145 uint32_t buf_low;
2146 2146 uint32_t buf_high;
2147 2147 uint32_t reg_val;
2148 2148 int i;
2149 2149
2150 2150 ASSERT(mutex_owned(&rx_ring->rx_lock));
2151 2151 ASSERT(mutex_owned(&ixgbe->gen_lock));
2152 2152
2153 2153 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2154 2154 rcb = rx_data->work_list[i];
2155 2155 rbd = &rx_data->rbd_ring[i];
2156 2156
2157 2157 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2158 2158 rbd->read.hdr_addr = NULL;
2159 2159 }
2160 2160
2161 2161 /*
2162 2162 * Initialize the length register
2163 2163 */
2164 2164 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2165 2165 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2166 2166
2167 2167 /*
2168 2168 * Initialize the base address registers
2169 2169 */
2170 2170 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2171 2171 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2172 2172 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2173 2173 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2174 2174
2175 2175 /*
2176 2176 * Setup head & tail pointers
2177 2177 */
2178 2178 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2179 2179 rx_data->ring_size - 1);
2180 2180 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2181 2181
2182 2182 rx_data->rbd_next = 0;
2183 2183 rx_data->lro_first = 0;
2184 2184
2185 2185 /*
2186 2186 * Setup the Receive Descriptor Control Register (RXDCTL)
2187 2187 * PTHRESH=32 descriptors (half the internal cache)
2188 2188 * HTHRESH=0 descriptors (to minimize latency on fetch)
2189 2189 * WTHRESH defaults to 1 (writeback each descriptor)
2190 2190 */
2191 2191 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2192 2192 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2193 2193
2194 2194 /* Not a valid value for 82599 or X540 */
2195 2195 if (hw->mac.type == ixgbe_mac_82598EB) {
2196 2196 reg_val |= 0x0020; /* pthresh */
2197 2197 }
2198 2198 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2199 2199
2200 2200 if (hw->mac.type == ixgbe_mac_82599EB ||
2201 2201 hw->mac.type == ixgbe_mac_X540) {
2202 2202 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2203 2203 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2204 2204 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2205 2205 }
2206 2206
2207 2207 /*
2208 2208 * Setup the Split and Replication Receive Control Register.
2209 2209 * Set the rx buffer size and the advanced descriptor type.
2210 2210 */
2211 2211 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2212 2212 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2213 2213 reg_val |= IXGBE_SRRCTL_DROP_EN;
2214 2214 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2215 2215 }
2216 2216
2217 2217 static void
2218 2218 ixgbe_setup_rx(ixgbe_t *ixgbe)
2219 2219 {
2220 2220 ixgbe_rx_ring_t *rx_ring;
2221 2221 struct ixgbe_hw *hw = &ixgbe->hw;
2222 2222 uint32_t reg_val;
2223 2223 uint32_t ring_mapping;
2224 2224 uint32_t i, index;
2225 2225 uint32_t psrtype_rss_bit;
2226 2226
2227 2227 /* PSRTYPE must be configured for 82599 */
2228 2228 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2229 2229 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2230 2230 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2231 2231 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2232 2232 reg_val |= IXGBE_PSRTYPE_L2HDR;
2233 2233 reg_val |= 0x80000000;
2234 2234 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2235 2235 } else {
2236 2236 if (ixgbe->num_rx_groups > 32) {
2237 2237 psrtype_rss_bit = 0x20000000;
2238 2238 } else {
2239 2239 psrtype_rss_bit = 0x40000000;
2240 2240 }
2241 2241 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2242 2242 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2243 2243 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2244 2244 reg_val |= IXGBE_PSRTYPE_L2HDR;
2245 2245 reg_val |= psrtype_rss_bit;
2246 2246 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2247 2247 }
2248 2248 }
2249 2249
2250 2250 /*
2251 2251 * Set filter control in FCTRL to accept broadcast packets and do
2252 2252 * not pass pause frames to host. Flow control settings are already
2253 2253 * in this register, so preserve them.
2254 2254 */
2255 2255 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2256 2256 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2257 2257 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2258 2258 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2259 2259
2260 2260 /*
2261 2261 * Hardware checksum settings
2262 2262 */
2263 2263 if (ixgbe->rx_hcksum_enable) {
2264 2264 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2265 2265 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2266 2266 }
2267 2267
2268 2268 /*
2269 2269 * Setup VMDq and RSS for multiple receive queues
2270 2270 */
2271 2271 switch (ixgbe->classify_mode) {
2272 2272 case IXGBE_CLASSIFY_RSS:
2273 2273 /*
2274 2274 * One group, only RSS is needed when more than
2275 2275 * one ring enabled.
2276 2276 */
2277 2277 ixgbe_setup_rss(ixgbe);
2278 2278 break;
2279 2279
2280 2280 case IXGBE_CLASSIFY_VMDQ:
2281 2281 /*
2282 2282 * Multiple groups, each group has one ring,
2283 2283 * only VMDq is needed.
2284 2284 */
2285 2285 ixgbe_setup_vmdq(ixgbe);
2286 2286 break;
2287 2287
2288 2288 case IXGBE_CLASSIFY_VMDQ_RSS:
2289 2289 /*
2290 2290 * Multiple groups and multiple rings, both
2291 2291 * VMDq and RSS are needed.
2292 2292 */
2293 2293 ixgbe_setup_vmdq_rss(ixgbe);
2294 2294 break;
2295 2295
2296 2296 default:
2297 2297 break;
2298 2298 }
2299 2299
2300 2300 /*
2301 2301 * Enable the receive unit. This must be done after filter
2302 2302 * control is set in FCTRL.
2303 2303 */
2304 2304 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2305 2305 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2306 2306 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2307 2307
2308 2308 /*
2309 2309 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2310 2310 */
2311 2311 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2312 2312 rx_ring = &ixgbe->rx_rings[i];
2313 2313 ixgbe_setup_rx_ring(rx_ring);
2314 2314 }
2315 2315
2316 2316 /*
2317 2317 * Setup the per-ring statistics mapping.
2318 2318 */
2319 2319 ring_mapping = 0;
2320 2320 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2321 2321 index = ixgbe->rx_rings[i].hw_index;
2322 2322 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2323 2323 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2324 2324 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2325 2325 }
2326 2326
2327 2327 /*
2328 2328 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2329 2329 * by four bytes if the packet has a VLAN field, so includes MTU,
2330 2330 * ethernet header and frame check sequence.
2331 2331 * Register is MAXFRS in 82599.
2332 2332 */
2333 2333 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2334 2334 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2335 2335 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2336 2336
2337 2337 /*
2338 2338 * Setup Jumbo Frame enable bit
2339 2339 */
2340 2340 if (ixgbe->default_mtu > ETHERMTU) {
2341 2341 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2342 2342 reg_val |= IXGBE_HLREG0_JUMBOEN;
2343 2343 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2344 2344 }
2345 2345
2346 2346 /*
2347 2347 * Setup RSC for multiple receive queues.
2348 2348 */
2349 2349 if (ixgbe->lro_enable) {
2350 2350 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2351 2351 /*
2352 2352 * Make sure rx_buf_size * MAXDESC not greater
2353 2353 * than 65535.
2354 2354 * Intel recommends 4 for MAXDESC field value.
2355 2355 */
2356 2356 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2357 2357 reg_val |= IXGBE_RSCCTL_RSCEN;
2358 2358 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2359 2359 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2360 2360 else
2361 2361 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2362 2362 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2363 2363 }
2364 2364
2365 2365 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2366 2366 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2367 2367 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2368 2368
2369 2369 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2370 2370 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2371 2371 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2372 2372 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2373 2373
2374 2374 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2375 2375 }
2376 2376 }
2377 2377
2378 2378 static void
2379 2379 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2380 2380 {
2381 2381 ixgbe_t *ixgbe = tx_ring->ixgbe;
2382 2382 struct ixgbe_hw *hw = &ixgbe->hw;
2383 2383 uint32_t size;
2384 2384 uint32_t buf_low;
2385 2385 uint32_t buf_high;
2386 2386 uint32_t reg_val;
2387 2387
2388 2388 ASSERT(mutex_owned(&tx_ring->tx_lock));
2389 2389 ASSERT(mutex_owned(&ixgbe->gen_lock));
2390 2390
2391 2391 /*
2392 2392 * Initialize the length register
2393 2393 */
2394 2394 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2395 2395 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2396 2396
2397 2397 /*
2398 2398 * Initialize the base address registers
2399 2399 */
2400 2400 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2401 2401 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2402 2402 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2403 2403 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2404 2404
2405 2405 /*
2406 2406 * Setup head & tail pointers
2407 2407 */
2408 2408 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2409 2409 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2410 2410
2411 2411 /*
2412 2412 * Setup head write-back
2413 2413 */
2414 2414 if (ixgbe->tx_head_wb_enable) {
2415 2415 /*
2416 2416 * The memory of the head write-back is allocated using
2417 2417 * the extra tbd beyond the tail of the tbd ring.
2418 2418 */
2419 2419 tx_ring->tbd_head_wb = (uint32_t *)
2420 2420 ((uintptr_t)tx_ring->tbd_area.address + size);
2421 2421 *tx_ring->tbd_head_wb = 0;
2422 2422
2423 2423 buf_low = (uint32_t)
2424 2424 (tx_ring->tbd_area.dma_address + size);
2425 2425 buf_high = (uint32_t)
2426 2426 ((tx_ring->tbd_area.dma_address + size) >> 32);
2427 2427
2428 2428 /* Set the head write-back enable bit */
2429 2429 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2430 2430
2431 2431 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2432 2432 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2433 2433
2434 2434 /*
2435 2435 * Turn off relaxed ordering for head write back or it will
2436 2436 * cause problems with the tx recycling
2437 2437 */
2438 2438
2439 2439 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2440 2440 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2441 2441 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2442 2442 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2443 2443 if (hw->mac.type == ixgbe_mac_82598EB) {
2444 2444 IXGBE_WRITE_REG(hw,
2445 2445 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2446 2446 } else {
2447 2447 IXGBE_WRITE_REG(hw,
2448 2448 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2449 2449 }
2450 2450 } else {
2451 2451 tx_ring->tbd_head_wb = NULL;
2452 2452 }
2453 2453
2454 2454 tx_ring->tbd_head = 0;
2455 2455 tx_ring->tbd_tail = 0;
2456 2456 tx_ring->tbd_free = tx_ring->ring_size;
2457 2457
2458 2458 if (ixgbe->tx_ring_init == B_TRUE) {
2459 2459 tx_ring->tcb_head = 0;
2460 2460 tx_ring->tcb_tail = 0;
2461 2461 tx_ring->tcb_free = tx_ring->free_list_size;
2462 2462 }
2463 2463
2464 2464 /*
2465 2465 * Initialize the s/w context structure
2466 2466 */
2467 2467 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2468 2468 }
2469 2469
2470 2470 static void
2471 2471 ixgbe_setup_tx(ixgbe_t *ixgbe)
2472 2472 {
2473 2473 struct ixgbe_hw *hw = &ixgbe->hw;
2474 2474 ixgbe_tx_ring_t *tx_ring;
2475 2475 uint32_t reg_val;
2476 2476 uint32_t ring_mapping;
2477 2477 int i;
2478 2478
2479 2479 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2480 2480 tx_ring = &ixgbe->tx_rings[i];
2481 2481 ixgbe_setup_tx_ring(tx_ring);
2482 2482 }
2483 2483
2484 2484 /*
2485 2485 * Setup the per-ring statistics mapping.
2486 2486 */
2487 2487 ring_mapping = 0;
2488 2488 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2489 2489 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2490 2490 if ((i & 0x3) == 0x3) {
2491 2491 switch (hw->mac.type) {
2492 2492 case ixgbe_mac_82598EB:
2493 2493 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2494 2494 ring_mapping);
2495 2495 break;
2496 2496
2497 2497 case ixgbe_mac_82599EB:
2498 2498 case ixgbe_mac_X540:
2499 2499 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2500 2500 ring_mapping);
2501 2501 break;
2502 2502
2503 2503 default:
2504 2504 break;
2505 2505 }
2506 2506
2507 2507 ring_mapping = 0;
2508 2508 }
2509 2509 }
2510 2510 if (i & 0x3) {
2511 2511 switch (hw->mac.type) {
2512 2512 case ixgbe_mac_82598EB:
2513 2513 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2514 2514 break;
2515 2515
2516 2516 case ixgbe_mac_82599EB:
2517 2517 case ixgbe_mac_X540:
2518 2518 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2519 2519 break;
2520 2520
2521 2521 default:
2522 2522 break;
2523 2523 }
2524 2524 }
2525 2525
2526 2526 /*
2527 2527 * Enable CRC appending and TX padding (for short tx frames)
2528 2528 */
2529 2529 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2530 2530 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2531 2531 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2532 2532
2533 2533 /*
2534 2534 * enable DMA for 82599 and X540 parts
2535 2535 */
2536 2536 if (hw->mac.type == ixgbe_mac_82599EB ||
2537 2537 hw->mac.type == ixgbe_mac_X540) {
2538 2538 /* DMATXCTL.TE must be set after all Tx config is complete */
2539 2539 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2540 2540 reg_val |= IXGBE_DMATXCTL_TE;
2541 2541 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2542 2542
2543 2543 /* Disable arbiter to set MTQC */
2544 2544 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2545 2545 reg_val |= IXGBE_RTTDCS_ARBDIS;
2546 2546 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2547 2547 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2548 2548 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2549 2549 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2550 2550 }
2551 2551
2552 2552 /*
2553 2553 * Enabling tx queues ..
2554 2554 * For 82599 must be done after DMATXCTL.TE is set
2555 2555 */
2556 2556 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2557 2557 tx_ring = &ixgbe->tx_rings[i];
2558 2558 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2559 2559 reg_val |= IXGBE_TXDCTL_ENABLE;
2560 2560 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2561 2561 }
2562 2562 }
2563 2563
2564 2564 /*
2565 2565 * ixgbe_setup_rss - Setup receive-side scaling feature.
2566 2566 */
2567 2567 static void
2568 2568 ixgbe_setup_rss(ixgbe_t *ixgbe)
2569 2569 {
2570 2570 struct ixgbe_hw *hw = &ixgbe->hw;
2571 2571 uint32_t i, mrqc, rxcsum;
2572 2572 uint32_t random;
2573 2573 uint32_t reta;
2574 2574 uint32_t ring_per_group;
2575 2575
2576 2576 /*
2577 2577 * Fill out redirection table
2578 2578 */
2579 2579 reta = 0;
2580 2580 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2581 2581
2582 2582 for (i = 0; i < 128; i++) {
2583 2583 reta = (reta << 8) | (i % ring_per_group) |
2584 2584 ((i % ring_per_group) << 4);
2585 2585 if ((i & 3) == 3)
2586 2586 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2587 2587 }
2588 2588
2589 2589 /*
2590 2590 * Fill out hash function seeds with a random constant
2591 2591 */
2592 2592 for (i = 0; i < 10; i++) {
2593 2593 (void) random_get_pseudo_bytes((uint8_t *)&random,
2594 2594 sizeof (uint32_t));
2595 2595 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2596 2596 }
2597 2597
2598 2598 /*
2599 2599 * Enable RSS & perform hash on these packet types
2600 2600 */
2601 2601 mrqc = IXGBE_MRQC_RSSEN |
2602 2602 IXGBE_MRQC_RSS_FIELD_IPV4 |
2603 2603 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2604 2604 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2605 2605 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2606 2606 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2607 2607 IXGBE_MRQC_RSS_FIELD_IPV6 |
2608 2608 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2609 2609 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2610 2610 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2611 2611 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2612 2612
2613 2613 /*
2614 2614 * Disable Packet Checksum to enable RSS for multiple receive queues.
2615 2615 * It is an adapter hardware limitation that Packet Checksum is
2616 2616 * mutually exclusive with RSS.
2617 2617 */
2618 2618 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2619 2619 rxcsum |= IXGBE_RXCSUM_PCSD;
2620 2620 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2621 2621 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2622 2622 }
2623 2623
2624 2624 /*
2625 2625 * ixgbe_setup_vmdq - Setup MAC classification feature
2626 2626 */
2627 2627 static void
2628 2628 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2629 2629 {
2630 2630 struct ixgbe_hw *hw = &ixgbe->hw;
2631 2631 uint32_t vmdctl, i, vtctl;
2632 2632
2633 2633 /*
2634 2634 * Setup the VMDq Control register, enable VMDq based on
2635 2635 * packet destination MAC address:
2636 2636 */
2637 2637 switch (hw->mac.type) {
2638 2638 case ixgbe_mac_82598EB:
2639 2639 /*
2640 2640 * VMDq Enable = 1;
2641 2641 * VMDq Filter = 0; MAC filtering
2642 2642 * Default VMDq output index = 0;
2643 2643 */
2644 2644 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2645 2645 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2646 2646 break;
2647 2647
2648 2648 case ixgbe_mac_82599EB:
2649 2649 case ixgbe_mac_X540:
2650 2650 /*
2651 2651 * Enable VMDq-only.
2652 2652 */
2653 2653 vmdctl = IXGBE_MRQC_VMDQEN;
2654 2654 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2655 2655
2656 2656 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2657 2657 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2658 2658 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2659 2659 }
2660 2660
2661 2661 /*
2662 2662 * Enable Virtualization and Replication.
2663 2663 */
2664 2664 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2665 2665 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2666 2666
2667 2667 /*
2668 2668 * Enable receiving packets to all VFs
2669 2669 */
2670 2670 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2671 2671 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2672 2672 break;
2673 2673
2674 2674 default:
2675 2675 break;
2676 2676 }
2677 2677 }
2678 2678
2679 2679 /*
2680 2680 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2681 2681 */
2682 2682 static void
2683 2683 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2684 2684 {
2685 2685 struct ixgbe_hw *hw = &ixgbe->hw;
2686 2686 uint32_t i, mrqc, rxcsum;
2687 2687 uint32_t random;
2688 2688 uint32_t reta;
2689 2689 uint32_t ring_per_group;
2690 2690 uint32_t vmdctl, vtctl;
2691 2691
2692 2692 /*
2693 2693 * Fill out redirection table
2694 2694 */
2695 2695 reta = 0;
2696 2696 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2697 2697 for (i = 0; i < 128; i++) {
2698 2698 reta = (reta << 8) | (i % ring_per_group) |
2699 2699 ((i % ring_per_group) << 4);
2700 2700 if ((i & 3) == 3)
2701 2701 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2702 2702 }
2703 2703
2704 2704 /*
2705 2705 * Fill out hash function seeds with a random constant
2706 2706 */
2707 2707 for (i = 0; i < 10; i++) {
2708 2708 (void) random_get_pseudo_bytes((uint8_t *)&random,
2709 2709 sizeof (uint32_t));
2710 2710 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2711 2711 }
2712 2712
2713 2713 /*
2714 2714 * Enable and setup RSS and VMDq
2715 2715 */
2716 2716 switch (hw->mac.type) {
2717 2717 case ixgbe_mac_82598EB:
2718 2718 /*
2719 2719 * Enable RSS & Setup RSS Hash functions
2720 2720 */
2721 2721 mrqc = IXGBE_MRQC_RSSEN |
2722 2722 IXGBE_MRQC_RSS_FIELD_IPV4 |
2723 2723 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2724 2724 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2725 2725 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2726 2726 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2727 2727 IXGBE_MRQC_RSS_FIELD_IPV6 |
2728 2728 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2729 2729 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2730 2730 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2731 2731 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2732 2732
2733 2733 /*
2734 2734 * Enable and Setup VMDq
2735 2735 * VMDq Filter = 0; MAC filtering
2736 2736 * Default VMDq output index = 0;
2737 2737 */
2738 2738 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2739 2739 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2740 2740 break;
2741 2741
2742 2742 case ixgbe_mac_82599EB:
2743 2743 case ixgbe_mac_X540:
2744 2744 /*
2745 2745 * Enable RSS & Setup RSS Hash functions
2746 2746 */
2747 2747 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2748 2748 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2749 2749 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2750 2750 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2751 2751 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2752 2752 IXGBE_MRQC_RSS_FIELD_IPV6 |
2753 2753 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2754 2754 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2755 2755 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2756 2756
2757 2757 /*
2758 2758 * Enable VMDq+RSS.
2759 2759 */
2760 2760 if (ixgbe->num_rx_groups > 32) {
2761 2761 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2762 2762 } else {
2763 2763 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2764 2764 }
2765 2765
2766 2766 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2767 2767
2768 2768 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2769 2769 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2770 2770 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2771 2771 }
2772 2772 break;
2773 2773
2774 2774 default:
2775 2775 break;
2776 2776
2777 2777 }
2778 2778
2779 2779 /*
2780 2780 * Disable Packet Checksum to enable RSS for multiple receive queues.
2781 2781 * It is an adapter hardware limitation that Packet Checksum is
2782 2782 * mutually exclusive with RSS.
2783 2783 */
2784 2784 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2785 2785 rxcsum |= IXGBE_RXCSUM_PCSD;
2786 2786 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2787 2787 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2788 2788
2789 2789 if (hw->mac.type == ixgbe_mac_82599EB ||
2790 2790 hw->mac.type == ixgbe_mac_X540) {
2791 2791 /*
2792 2792 * Enable Virtualization and Replication.
2793 2793 */
2794 2794 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2795 2795 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2796 2796
2797 2797 /*
2798 2798 * Enable receiving packets to all VFs
2799 2799 */
2800 2800 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2801 2801 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2802 2802 }
2803 2803 }
2804 2804
2805 2805 /*
2806 2806 * ixgbe_init_unicst - Initialize the unicast addresses.
2807 2807 */
2808 2808 static void
2809 2809 ixgbe_init_unicst(ixgbe_t *ixgbe)
2810 2810 {
2811 2811 struct ixgbe_hw *hw = &ixgbe->hw;
2812 2812 uint8_t *mac_addr;
2813 2813 int slot;
2814 2814 /*
2815 2815 * Here we should consider two situations:
2816 2816 *
2817 2817 * 1. Chipset is initialized at the first time,
2818 2818 * Clear all the multiple unicast addresses.
2819 2819 *
2820 2820 * 2. Chipset is reset
2821 2821 * Recover the multiple unicast addresses from the
2822 2822 * software data structure to the RAR registers.
2823 2823 */
2824 2824 if (!ixgbe->unicst_init) {
2825 2825 /*
2826 2826 * Initialize the multiple unicast addresses
2827 2827 */
2828 2828 ixgbe->unicst_total = hw->mac.num_rar_entries;
2829 2829 ixgbe->unicst_avail = ixgbe->unicst_total;
2830 2830 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2831 2831 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2832 2832 bzero(mac_addr, ETHERADDRL);
2833 2833 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2834 2834 ixgbe->unicst_addr[slot].mac.set = 0;
2835 2835 }
2836 2836 ixgbe->unicst_init = B_TRUE;
2837 2837 } else {
2838 2838 /* Re-configure the RAR registers */
2839 2839 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2840 2840 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2841 2841 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2842 2842 (void) ixgbe_set_rar(hw, slot, mac_addr,
2843 2843 ixgbe->unicst_addr[slot].mac.group_index,
2844 2844 IXGBE_RAH_AV);
2845 2845 } else {
2846 2846 bzero(mac_addr, ETHERADDRL);
2847 2847 (void) ixgbe_set_rar(hw, slot, mac_addr,
2848 2848 NULL, NULL);
2849 2849 }
2850 2850 }
2851 2851 }
2852 2852 }
2853 2853
2854 2854 /*
2855 2855 * ixgbe_unicst_find - Find the slot for the specified unicast address
2856 2856 */
2857 2857 int
2858 2858 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2859 2859 {
2860 2860 int slot;
2861 2861
2862 2862 ASSERT(mutex_owned(&ixgbe->gen_lock));
2863 2863
2864 2864 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2865 2865 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2866 2866 mac_addr, ETHERADDRL) == 0)
2867 2867 return (slot);
2868 2868 }
2869 2869
2870 2870 return (-1);
2871 2871 }
2872 2872
2873 2873 /*
2874 2874 * ixgbe_multicst_add - Add a multicst address.
2875 2875 */
2876 2876 int
2877 2877 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2878 2878 {
2879 2879 ASSERT(mutex_owned(&ixgbe->gen_lock));
2880 2880
2881 2881 if ((multiaddr[0] & 01) == 0) {
2882 2882 return (EINVAL);
2883 2883 }
2884 2884
2885 2885 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2886 2886 return (ENOENT);
2887 2887 }
2888 2888
2889 2889 bcopy(multiaddr,
2890 2890 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2891 2891 ixgbe->mcast_count++;
2892 2892
2893 2893 /*
2894 2894 * Update the multicast table in the hardware
2895 2895 */
2896 2896 ixgbe_setup_multicst(ixgbe);
2897 2897
2898 2898 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2899 2899 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2900 2900 return (EIO);
2901 2901 }
2902 2902
2903 2903 return (0);
2904 2904 }
2905 2905
2906 2906 /*
2907 2907 * ixgbe_multicst_remove - Remove a multicst address.
2908 2908 */
2909 2909 int
2910 2910 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2911 2911 {
2912 2912 int i;
2913 2913
2914 2914 ASSERT(mutex_owned(&ixgbe->gen_lock));
2915 2915
2916 2916 for (i = 0; i < ixgbe->mcast_count; i++) {
2917 2917 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2918 2918 ETHERADDRL) == 0) {
2919 2919 for (i++; i < ixgbe->mcast_count; i++) {
2920 2920 ixgbe->mcast_table[i - 1] =
2921 2921 ixgbe->mcast_table[i];
2922 2922 }
2923 2923 ixgbe->mcast_count--;
2924 2924 break;
2925 2925 }
2926 2926 }
2927 2927
2928 2928 /*
2929 2929 * Update the multicast table in the hardware
2930 2930 */
2931 2931 ixgbe_setup_multicst(ixgbe);
2932 2932
2933 2933 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2934 2934 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2935 2935 return (EIO);
2936 2936 }
2937 2937
2938 2938 return (0);
2939 2939 }
2940 2940
2941 2941 /*
2942 2942 * ixgbe_setup_multicast - Setup multicast data structures.
2943 2943 *
2944 2944 * This routine initializes all of the multicast related structures
2945 2945 * and save them in the hardware registers.
2946 2946 */
2947 2947 static void
2948 2948 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2949 2949 {
2950 2950 uint8_t *mc_addr_list;
2951 2951 uint32_t mc_addr_count;
2952 2952 struct ixgbe_hw *hw = &ixgbe->hw;
2953 2953
2954 2954 ASSERT(mutex_owned(&ixgbe->gen_lock));
2955 2955
2956 2956 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2957 2957
2958 2958 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2959 2959 mc_addr_count = ixgbe->mcast_count;
2960 2960
2961 2961 /*
2962 2962 * Update the multicast addresses to the MTA registers
2963 2963 */
2964 2964 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2965 2965 ixgbe_mc_table_itr, TRUE);
2966 2966 }
2967 2967
2968 2968 /*
2969 2969 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2970 2970 *
2971 2971 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2972 2972 * Different chipsets may have different allowed configuration of vmdq and rss.
2973 2973 */
2974 2974 static void
2975 2975 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2976 2976 {
2977 2977 struct ixgbe_hw *hw = &ixgbe->hw;
2978 2978 uint32_t ring_per_group;
2979 2979
2980 2980 switch (hw->mac.type) {
2981 2981 case ixgbe_mac_82598EB:
2982 2982 /*
2983 2983 * 82598 supports the following combination:
2984 2984 * vmdq no. x rss no.
2985 2985 * [5..16] x 1
2986 2986 * [1..4] x [1..16]
2987 2987 * However 8 rss queue per pool (vmdq) is sufficient for
2988 2988 * most cases.
2989 2989 */
2990 2990 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2991 2991 if (ixgbe->num_rx_groups > 4) {
2992 2992 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2993 2993 } else {
2994 2994 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2995 2995 min(8, ring_per_group);
2996 2996 }
2997 2997
2998 2998 break;
2999 2999
3000 3000 case ixgbe_mac_82599EB:
3001 3001 case ixgbe_mac_X540:
3002 3002 /*
3003 3003 * 82599 supports the following combination:
3004 3004 * vmdq no. x rss no.
3005 3005 * [33..64] x [1..2]
3006 3006 * [2..32] x [1..4]
3007 3007 * 1 x [1..16]
3008 3008 * However 8 rss queue per pool (vmdq) is sufficient for
3009 3009 * most cases.
3010 3010 *
3011 3011 * For now, treat X540 like the 82599.
3012 3012 */
3013 3013 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3014 3014 if (ixgbe->num_rx_groups == 1) {
3015 3015 ixgbe->num_rx_rings = min(8, ring_per_group);
3016 3016 } else if (ixgbe->num_rx_groups <= 32) {
3017 3017 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3018 3018 min(4, ring_per_group);
3019 3019 } else if (ixgbe->num_rx_groups <= 64) {
3020 3020 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3021 3021 min(2, ring_per_group);
3022 3022 }
3023 3023 break;
3024 3024
3025 3025 default:
3026 3026 break;
3027 3027 }
3028 3028
3029 3029 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3030 3030
3031 3031 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3032 3032 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3033 3033 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3034 3034 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3035 3035 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3036 3036 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3037 3037 } else {
3038 3038 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3039 3039 }
3040 3040
3041 3041 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3042 3042 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3043 3043 }
3044 3044
3045 3045 /*
3046 3046 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3047 3047 *
3048 3048 * This routine gets user-configured values out of the configuration
3049 3049 * file ixgbe.conf.
3050 3050 *
3051 3051 * For each configurable value, there is a minimum, a maximum, and a
3052 3052 * default.
3053 3053 * If user does not configure a value, use the default.
3054 3054 * If user configures below the minimum, use the minumum.
3055 3055 * If user configures above the maximum, use the maxumum.
3056 3056 */
3057 3057 static void
3058 3058 ixgbe_get_conf(ixgbe_t *ixgbe)
3059 3059 {
3060 3060 struct ixgbe_hw *hw = &ixgbe->hw;
3061 3061 uint32_t flow_control;
3062 3062
3063 3063 /*
3064 3064 * ixgbe driver supports the following user configurations:
3065 3065 *
3066 3066 * Jumbo frame configuration:
3067 3067 * default_mtu
3068 3068 *
3069 3069 * Ethernet flow control configuration:
3070 3070 * flow_control
3071 3071 *
3072 3072 * Multiple rings configurations:
3073 3073 * tx_queue_number
3074 3074 * tx_ring_size
3075 3075 * rx_queue_number
3076 3076 * rx_ring_size
3077 3077 *
3078 3078 * Call ixgbe_get_prop() to get the value for a specific
3079 3079 * configuration parameter.
3080 3080 */
3081 3081
3082 3082 /*
3083 3083 * Jumbo frame configuration - max_frame_size controls host buffer
3084 3084 * allocation, so includes MTU, ethernet header, vlan tag and
3085 3085 * frame check sequence.
3086 3086 */
3087 3087 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3088 3088 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3089 3089
3090 3090 ixgbe->max_frame_size = ixgbe->default_mtu +
3091 3091 sizeof (struct ether_vlan_header) + ETHERFCSL;
3092 3092
3093 3093 /*
3094 3094 * Ethernet flow control configuration
3095 3095 */
3096 3096 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3097 3097 ixgbe_fc_none, 3, ixgbe_fc_none);
3098 3098 if (flow_control == 3)
3099 3099 flow_control = ixgbe_fc_default;
3100 3100
3101 3101 /*
3102 3102 * fc.requested mode is what the user requests. After autoneg,
3103 3103 * fc.current_mode will be the flow_control mode that was negotiated.
3104 3104 */
3105 3105 hw->fc.requested_mode = flow_control;
3106 3106
3107 3107 /*
3108 3108 * Multiple rings configurations
3109 3109 */
3110 3110 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3111 3111 ixgbe->capab->min_tx_que_num,
3112 3112 ixgbe->capab->max_tx_que_num,
3113 3113 ixgbe->capab->def_tx_que_num);
3114 3114 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3115 3115 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3116 3116
3117 3117 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3118 3118 ixgbe->capab->min_rx_que_num,
3119 3119 ixgbe->capab->max_rx_que_num,
3120 3120 ixgbe->capab->def_rx_que_num);
3121 3121 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3122 3122 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3123 3123
3124 3124 /*
3125 3125 * Multiple groups configuration
3126 3126 */
3127 3127 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3128 3128 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3129 3129 ixgbe->capab->def_rx_grp_num);
3130 3130
3131 3131 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3132 3132 0, 1, DEFAULT_MR_ENABLE);
3133 3133
3134 3134 if (ixgbe->mr_enable == B_FALSE) {
3135 3135 ixgbe->num_tx_rings = 1;
3136 3136 ixgbe->num_rx_rings = 1;
3137 3137 ixgbe->num_rx_groups = 1;
3138 3138 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3139 3139 } else {
3140 3140 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3141 3141 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3142 3142 /*
3143 3143 * The combination of num_rx_rings and num_rx_groups
3144 3144 * may be not supported by h/w. We need to adjust
3145 3145 * them to appropriate values.
3146 3146 */
3147 3147 ixgbe_setup_vmdq_rss_conf(ixgbe);
3148 3148 }
3149 3149
3150 3150 /*
3151 3151 * Tunable used to force an interrupt type. The only use is
3152 3152 * for testing of the lesser interrupt types.
3153 3153 * 0 = don't force interrupt type
3154 3154 * 1 = force interrupt type MSI-X
3155 3155 * 2 = force interrupt type MSI
3156 3156 * 3 = force interrupt type Legacy
3157 3157 */
3158 3158 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3159 3159 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3160 3160
3161 3161 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3162 3162 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3163 3163 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3164 3164 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3165 3165 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3166 3166 0, 1, DEFAULT_LSO_ENABLE);
3167 3167 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3168 3168 0, 1, DEFAULT_LRO_ENABLE);
3169 3169 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3170 3170 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3171 3171 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3172 3172 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3173 3173
3174 3174 /* Head Write Back not recommended for 82599 and X540 */
3175 3175 if (hw->mac.type == ixgbe_mac_82599EB ||
3176 3176 hw->mac.type == ixgbe_mac_X540) {
3177 3177 ixgbe->tx_head_wb_enable = B_FALSE;
3178 3178 }
3179 3179
3180 3180 /*
3181 3181 * ixgbe LSO needs the tx h/w checksum support.
3182 3182 * LSO will be disabled if tx h/w checksum is not
3183 3183 * enabled.
3184 3184 */
3185 3185 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3186 3186 ixgbe->lso_enable = B_FALSE;
3187 3187 }
3188 3188
3189 3189 /*
3190 3190 * ixgbe LRO needs the rx h/w checksum support.
3191 3191 * LRO will be disabled if rx h/w checksum is not
3192 3192 * enabled.
3193 3193 */
3194 3194 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3195 3195 ixgbe->lro_enable = B_FALSE;
3196 3196 }
3197 3197
3198 3198 /*
3199 3199 * ixgbe LRO only been supported by 82599 and X540 now
3200 3200 */
3201 3201 if (hw->mac.type == ixgbe_mac_82598EB) {
3202 3202 ixgbe->lro_enable = B_FALSE;
3203 3203 }
3204 3204 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3205 3205 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3206 3206 DEFAULT_TX_COPY_THRESHOLD);
3207 3207 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3208 3208 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3209 3209 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3210 3210 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3211 3211 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3212 3212 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3213 3213 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3214 3214 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3215 3215 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3216 3216
3217 3217 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3218 3218 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3219 3219 DEFAULT_RX_COPY_THRESHOLD);
3220 3220 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3221 3221 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3222 3222 DEFAULT_RX_LIMIT_PER_INTR);
3223 3223
3224 3224 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3225 3225 ixgbe->capab->min_intr_throttle,
3226 3226 ixgbe->capab->max_intr_throttle,
3227 3227 ixgbe->capab->def_intr_throttle);
3228 3228 /*
3229 3229 * 82599 and X540 require the interrupt throttling rate is
3230 3230 * a multiple of 8. This is enforced by the register
3231 3231 * definiton.
3232 3232 */
3233 3233 if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540)
3234 3234 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3235 3235
3236 3236 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3237 3237 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3238 3238 }
3239 3239
3240 3240 static void
3241 3241 ixgbe_init_params(ixgbe_t *ixgbe)
3242 3242 {
3243 3243 ixgbe->param_en_10000fdx_cap = 1;
3244 3244 ixgbe->param_en_1000fdx_cap = 1;
3245 3245 ixgbe->param_en_100fdx_cap = 1;
3246 3246 ixgbe->param_adv_10000fdx_cap = 1;
3247 3247 ixgbe->param_adv_1000fdx_cap = 1;
3248 3248 ixgbe->param_adv_100fdx_cap = 1;
3249 3249
3250 3250 ixgbe->param_pause_cap = 1;
3251 3251 ixgbe->param_asym_pause_cap = 1;
3252 3252 ixgbe->param_rem_fault = 0;
3253 3253
3254 3254 ixgbe->param_adv_autoneg_cap = 1;
3255 3255 ixgbe->param_adv_pause_cap = 1;
3256 3256 ixgbe->param_adv_asym_pause_cap = 1;
3257 3257 ixgbe->param_adv_rem_fault = 0;
3258 3258
3259 3259 ixgbe->param_lp_10000fdx_cap = 0;
3260 3260 ixgbe->param_lp_1000fdx_cap = 0;
3261 3261 ixgbe->param_lp_100fdx_cap = 0;
3262 3262 ixgbe->param_lp_autoneg_cap = 0;
3263 3263 ixgbe->param_lp_pause_cap = 0;
3264 3264 ixgbe->param_lp_asym_pause_cap = 0;
3265 3265 ixgbe->param_lp_rem_fault = 0;
3266 3266 }
3267 3267
3268 3268 /*
3269 3269 * ixgbe_get_prop - Get a property value out of the configuration file
3270 3270 * ixgbe.conf.
3271 3271 *
3272 3272 * Caller provides the name of the property, a default value, a minimum
3273 3273 * value, and a maximum value.
3274 3274 *
3275 3275 * Return configured value of the property, with default, minimum and
3276 3276 * maximum properly applied.
3277 3277 */
3278 3278 static int
3279 3279 ixgbe_get_prop(ixgbe_t *ixgbe,
3280 3280 char *propname, /* name of the property */
3281 3281 int minval, /* minimum acceptable value */
3282 3282 int maxval, /* maximim acceptable value */
3283 3283 int defval) /* default value */
3284 3284 {
3285 3285 int value;
3286 3286
3287 3287 /*
3288 3288 * Call ddi_prop_get_int() to read the conf settings
3289 3289 */
3290 3290 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3291 3291 DDI_PROP_DONTPASS, propname, defval);
3292 3292 if (value > maxval)
3293 3293 value = maxval;
3294 3294
3295 3295 if (value < minval)
3296 3296 value = minval;
3297 3297
3298 3298 return (value);
3299 3299 }
3300 3300
3301 3301 /*
3302 3302 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3303 3303 */
3304 3304 int
3305 3305 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3306 3306 {
3307 3307 u32 autoneg_advertised = 0;
3308 3308
3309 3309 /*
3310 3310 * No half duplex support with 10Gb parts
3311 3311 */
3312 3312 if (ixgbe->param_adv_10000fdx_cap == 1)
3313 3313 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3314 3314
3315 3315 if (ixgbe->param_adv_1000fdx_cap == 1)
3316 3316 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3317 3317
3318 3318 if (ixgbe->param_adv_100fdx_cap == 1)
3319 3319 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3320 3320
3321 3321 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3322 3322 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3323 3323 "to autonegotiation with full link capabilities.");
3324 3324
3325 3325 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3326 3326 IXGBE_LINK_SPEED_1GB_FULL |
3327 3327 IXGBE_LINK_SPEED_100_FULL;
3328 3328 }
3329 3329
3330 3330 if (setup_hw) {
3331 3331 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3332 3332 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3333 3333 ixgbe_notice(ixgbe, "Setup link failed on this "
3334 3334 "device.");
3335 3335 return (IXGBE_FAILURE);
3336 3336 }
3337 3337 }
3338 3338
3339 3339 return (IXGBE_SUCCESS);
3340 3340 }
3341 3341
3342 3342 /*
3343 3343 * ixgbe_driver_link_check - Link status processing.
3344 3344 *
3345 3345 * This function can be called in both kernel context and interrupt context
3346 3346 */
3347 3347 static void
3348 3348 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3349 3349 {
3350 3350 struct ixgbe_hw *hw = &ixgbe->hw;
3351 3351 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3352 3352 boolean_t link_up = B_FALSE;
3353 3353 boolean_t link_changed = B_FALSE;
3354 3354
3355 3355 ASSERT(mutex_owned(&ixgbe->gen_lock));
3356 3356
3357 3357 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3358 3358 if (link_up) {
3359 3359 ixgbe->link_check_complete = B_TRUE;
3360 3360
3361 3361 /* Link is up, enable flow control settings */
3362 3362 (void) ixgbe_fc_enable(hw);
3363 3363
3364 3364 /*
3365 3365 * The Link is up, check whether it was marked as down earlier
3366 3366 */
3367 3367 if (ixgbe->link_state != LINK_STATE_UP) {
3368 3368 switch (speed) {
3369 3369 case IXGBE_LINK_SPEED_10GB_FULL:
3370 3370 ixgbe->link_speed = SPEED_10GB;
3371 3371 break;
3372 3372 case IXGBE_LINK_SPEED_1GB_FULL:
3373 3373 ixgbe->link_speed = SPEED_1GB;
3374 3374 break;
3375 3375 case IXGBE_LINK_SPEED_100_FULL:
3376 3376 ixgbe->link_speed = SPEED_100;
3377 3377 }
3378 3378 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3379 3379 ixgbe->link_state = LINK_STATE_UP;
3380 3380 link_changed = B_TRUE;
3381 3381 }
3382 3382 } else {
3383 3383 if (ixgbe->link_check_complete == B_TRUE ||
3384 3384 (ixgbe->link_check_complete == B_FALSE &&
3385 3385 gethrtime() >= ixgbe->link_check_hrtime)) {
3386 3386 /*
3387 3387 * The link is really down
3388 3388 */
3389 3389 ixgbe->link_check_complete = B_TRUE;
3390 3390
3391 3391 if (ixgbe->link_state != LINK_STATE_DOWN) {
3392 3392 ixgbe->link_speed = 0;
3393 3393 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3394 3394 ixgbe->link_state = LINK_STATE_DOWN;
3395 3395 link_changed = B_TRUE;
3396 3396 }
3397 3397 }
3398 3398 }
3399 3399
3400 3400 /*
3401 3401 * If we are in an interrupt context, need to re-enable the
3402 3402 * interrupt, which was automasked
3403 3403 */
3404 3404 if (servicing_interrupt() != 0) {
3405 3405 ixgbe->eims |= IXGBE_EICR_LSC;
3406 3406 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3407 3407 }
3408 3408
3409 3409 if (link_changed) {
3410 3410 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3411 3411 }
3412 3412 }
3413 3413
3414 3414 /*
3415 3415 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3416 3416 */
3417 3417 static void
3418 3418 ixgbe_sfp_check(void *arg)
3419 3419 {
3420 3420 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3421 3421 uint32_t eicr = ixgbe->eicr;
3422 3422 struct ixgbe_hw *hw = &ixgbe->hw;
3423 3423
3424 3424 mutex_enter(&ixgbe->gen_lock);
3425 3425 if (eicr & IXGBE_EICR_GPI_SDP1) {
3426 3426 /* clear the interrupt */
3427 3427 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3428 3428
3429 3429 /* if link up, do multispeed fiber setup */
3430 3430 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3431 3431 B_TRUE, B_TRUE);
3432 3432 ixgbe_driver_link_check(ixgbe);
3433 3433 ixgbe_get_hw_state(ixgbe);
3434 3434 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3435 3435 /* clear the interrupt */
3436 3436 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3437 3437
3438 3438 /* if link up, do sfp module setup */
3439 3439 (void) hw->mac.ops.setup_sfp(hw);
3440 3440
3441 3441 /* do multispeed fiber setup */
3442 3442 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3443 3443 B_TRUE, B_TRUE);
3444 3444 ixgbe_driver_link_check(ixgbe);
3445 3445 ixgbe_get_hw_state(ixgbe);
3446 3446 }
3447 3447 mutex_exit(&ixgbe->gen_lock);
3448 3448
3449 3449 /*
3450 3450 * We need to fully re-check the link later.
3451 3451 */
3452 3452 ixgbe->link_check_complete = B_FALSE;
3453 3453 ixgbe->link_check_hrtime = gethrtime() +
3454 3454 (IXGBE_LINK_UP_TIME * 100000000ULL);
3455 3455 }
3456 3456
3457 3457 /*
3458 3458 * ixgbe_overtemp_check - overtemp module processing done in taskq
3459 3459 *
3460 3460 * This routine will only be called on adapters with temperature sensor.
3461 3461 * The indication of over-temperature can be either SDP0 interrupt or the link
3462 3462 * status change interrupt.
3463 3463 */
3464 3464 static void
3465 3465 ixgbe_overtemp_check(void *arg)
3466 3466 {
3467 3467 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3468 3468 struct ixgbe_hw *hw = &ixgbe->hw;
3469 3469 uint32_t eicr = ixgbe->eicr;
3470 3470 ixgbe_link_speed speed;
3471 3471 boolean_t link_up;
3472 3472
3473 3473 mutex_enter(&ixgbe->gen_lock);
3474 3474
3475 3475 /* make sure we know current state of link */
3476 3476 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3477 3477
3478 3478 /* check over-temp condition */
3479 3479 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3480 3480 (eicr & IXGBE_EICR_LSC)) {
3481 3481 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3482 3482 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3483 3483
3484 3484 /*
3485 3485 * Disable the adapter interrupts
3486 3486 */
3487 3487 ixgbe_disable_adapter_interrupts(ixgbe);
3488 3488
3489 3489 /*
3490 3490 * Disable Rx/Tx units
3491 3491 */
3492 3492 (void) ixgbe_stop_adapter(hw);
3493 3493
3494 3494 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3495 3495 ixgbe_error(ixgbe,
3496 3496 "Problem: Network adapter has been stopped "
3497 3497 "because it has overheated");
3498 3498 ixgbe_error(ixgbe,
3499 3499 "Action: Restart the computer. "
3500 3500 "If the problem persists, power off the system "
3501 3501 "and replace the adapter");
3502 3502 }
3503 3503 }
3504 3504
3505 3505 /* write to clear the interrupt */
3506 3506 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3507 3507
3508 3508 mutex_exit(&ixgbe->gen_lock);
3509 3509 }
3510 3510
3511 3511 /*
3512 3512 * ixgbe_link_timer - timer for link status detection
3513 3513 */
3514 3514 static void
3515 3515 ixgbe_link_timer(void *arg)
3516 3516 {
3517 3517 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3518 3518
3519 3519 mutex_enter(&ixgbe->gen_lock);
3520 3520 ixgbe_driver_link_check(ixgbe);
3521 3521 mutex_exit(&ixgbe->gen_lock);
3522 3522 }
3523 3523
3524 3524 /*
3525 3525 * ixgbe_local_timer - Driver watchdog function.
3526 3526 *
3527 3527 * This function will handle the transmit stall check and other routines.
3528 3528 */
3529 3529 static void
3530 3530 ixgbe_local_timer(void *arg)
3531 3531 {
3532 3532 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3533 3533
3534 3534 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3535 3535 goto out;
3536 3536
3537 3537 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3538 3538 ixgbe->reset_count++;
3539 3539 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3540 3540 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3541 3541 goto out;
3542 3542 }
3543 3543
3544 3544 if (ixgbe_stall_check(ixgbe)) {
3545 3545 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3546 3546 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3547 3547
3548 3548 ixgbe->reset_count++;
3549 3549 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3550 3550 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3551 3551 }
3552 3552
3553 3553 out:
3554 3554 ixgbe_restart_watchdog_timer(ixgbe);
3555 3555 }
3556 3556
3557 3557 /*
3558 3558 * ixgbe_stall_check - Check for transmit stall.
3559 3559 *
3560 3560 * This function checks if the adapter is stalled (in transmit).
3561 3561 *
3562 3562 * It is called each time the watchdog timeout is invoked.
3563 3563 * If the transmit descriptor reclaim continuously fails,
3564 3564 * the watchdog value will increment by 1. If the watchdog
3565 3565 * value exceeds the threshold, the ixgbe is assumed to
3566 3566 * have stalled and need to be reset.
3567 3567 */
3568 3568 static boolean_t
3569 3569 ixgbe_stall_check(ixgbe_t *ixgbe)
3570 3570 {
3571 3571 ixgbe_tx_ring_t *tx_ring;
3572 3572 boolean_t result;
3573 3573 int i;
3574 3574
3575 3575 if (ixgbe->link_state != LINK_STATE_UP)
3576 3576 return (B_FALSE);
3577 3577
3578 3578 /*
3579 3579 * If any tx ring is stalled, we'll reset the chipset
3580 3580 */
3581 3581 result = B_FALSE;
3582 3582 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3583 3583 tx_ring = &ixgbe->tx_rings[i];
3584 3584 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3585 3585 tx_ring->tx_recycle(tx_ring);
3586 3586 }
3587 3587
3588 3588 if (tx_ring->recycle_fail > 0)
3589 3589 tx_ring->stall_watchdog++;
3590 3590 else
3591 3591 tx_ring->stall_watchdog = 0;
3592 3592
3593 3593 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3594 3594 result = B_TRUE;
3595 3595 break;
3596 3596 }
3597 3597 }
3598 3598
3599 3599 if (result) {
3600 3600 tx_ring->stall_watchdog = 0;
3601 3601 tx_ring->recycle_fail = 0;
3602 3602 }
3603 3603
3604 3604 return (result);
3605 3605 }
3606 3606
3607 3607
3608 3608 /*
3609 3609 * is_valid_mac_addr - Check if the mac address is valid.
3610 3610 */
3611 3611 static boolean_t
3612 3612 is_valid_mac_addr(uint8_t *mac_addr)
3613 3613 {
3614 3614 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3615 3615 const uint8_t addr_test2[6] =
3616 3616 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3617 3617
3618 3618 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3619 3619 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3620 3620 return (B_FALSE);
3621 3621
3622 3622 return (B_TRUE);
3623 3623 }
3624 3624
3625 3625 static boolean_t
3626 3626 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3627 3627 {
3628 3628 #ifdef __sparc
3629 3629 struct ixgbe_hw *hw = &ixgbe->hw;
3630 3630 uchar_t *bytes;
3631 3631 struct ether_addr sysaddr;
3632 3632 uint_t nelts;
3633 3633 int err;
3634 3634 boolean_t found = B_FALSE;
3635 3635
3636 3636 /*
3637 3637 * The "vendor's factory-set address" may already have
3638 3638 * been extracted from the chip, but if the property
3639 3639 * "local-mac-address" is set we use that instead.
3640 3640 *
3641 3641 * We check whether it looks like an array of 6
3642 3642 * bytes (which it should, if OBP set it). If we can't
3643 3643 * make sense of it this way, we'll ignore it.
3644 3644 */
3645 3645 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3646 3646 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3647 3647 if (err == DDI_PROP_SUCCESS) {
3648 3648 if (nelts == ETHERADDRL) {
3649 3649 while (nelts--)
3650 3650 hw->mac.addr[nelts] = bytes[nelts];
3651 3651 found = B_TRUE;
3652 3652 }
3653 3653 ddi_prop_free(bytes);
3654 3654 }
3655 3655
3656 3656 /*
3657 3657 * Look up the OBP property "local-mac-address?". If the user has set
3658 3658 * 'local-mac-address? = false', use "the system address" instead.
3659 3659 */
3660 3660 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3661 3661 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3662 3662 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3663 3663 if (localetheraddr(NULL, &sysaddr) != 0) {
3664 3664 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3665 3665 found = B_TRUE;
3666 3666 }
3667 3667 }
3668 3668 ddi_prop_free(bytes);
3669 3669 }
3670 3670
3671 3671 /*
3672 3672 * Finally(!), if there's a valid "mac-address" property (created
3673 3673 * if we netbooted from this interface), we must use this instead
3674 3674 * of any of the above to ensure that the NFS/install server doesn't
3675 3675 * get confused by the address changing as Solaris takes over!
3676 3676 */
3677 3677 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3678 3678 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3679 3679 if (err == DDI_PROP_SUCCESS) {
3680 3680 if (nelts == ETHERADDRL) {
3681 3681 while (nelts--)
3682 3682 hw->mac.addr[nelts] = bytes[nelts];
3683 3683 found = B_TRUE;
3684 3684 }
3685 3685 ddi_prop_free(bytes);
3686 3686 }
3687 3687
3688 3688 if (found) {
3689 3689 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3690 3690 return (B_TRUE);
3691 3691 }
3692 3692 #else
3693 3693 _NOTE(ARGUNUSED(ixgbe));
3694 3694 #endif
3695 3695
3696 3696 return (B_TRUE);
3697 3697 }
↓ open down ↓ |
3697 lines elided |
↑ open up ↑ |
3698 3698
3699 3699 #pragma inline(ixgbe_arm_watchdog_timer)
3700 3700 static void
3701 3701 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3702 3702 {
3703 3703 /*
3704 3704 * Fire a watchdog timer
3705 3705 */
3706 3706 ixgbe->watchdog_tid =
3707 3707 timeout(ixgbe_local_timer,
3708 - (void *)ixgbe, 1 * drv_usectohz(1000000));
3708 + (void *)ixgbe, drv_sectohz(1));
3709 3709
3710 3710 }
3711 3711
3712 3712 /*
3713 3713 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3714 3714 */
3715 3715 void
3716 3716 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3717 3717 {
3718 3718 mutex_enter(&ixgbe->watchdog_lock);
3719 3719
3720 3720 if (!ixgbe->watchdog_enable) {
3721 3721 ixgbe->watchdog_enable = B_TRUE;
3722 3722 ixgbe->watchdog_start = B_TRUE;
3723 3723 ixgbe_arm_watchdog_timer(ixgbe);
3724 3724 }
3725 3725
3726 3726 mutex_exit(&ixgbe->watchdog_lock);
3727 3727 }
3728 3728
3729 3729 /*
3730 3730 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3731 3731 */
3732 3732 void
3733 3733 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3734 3734 {
3735 3735 timeout_id_t tid;
3736 3736
3737 3737 mutex_enter(&ixgbe->watchdog_lock);
3738 3738
3739 3739 ixgbe->watchdog_enable = B_FALSE;
3740 3740 ixgbe->watchdog_start = B_FALSE;
3741 3741 tid = ixgbe->watchdog_tid;
3742 3742 ixgbe->watchdog_tid = 0;
3743 3743
3744 3744 mutex_exit(&ixgbe->watchdog_lock);
3745 3745
3746 3746 if (tid != 0)
3747 3747 (void) untimeout(tid);
3748 3748 }
3749 3749
3750 3750 /*
3751 3751 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3752 3752 */
3753 3753 void
3754 3754 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3755 3755 {
3756 3756 mutex_enter(&ixgbe->watchdog_lock);
3757 3757
3758 3758 if (ixgbe->watchdog_enable) {
3759 3759 if (!ixgbe->watchdog_start) {
3760 3760 ixgbe->watchdog_start = B_TRUE;
3761 3761 ixgbe_arm_watchdog_timer(ixgbe);
3762 3762 }
3763 3763 }
3764 3764
3765 3765 mutex_exit(&ixgbe->watchdog_lock);
3766 3766 }
3767 3767
3768 3768 /*
3769 3769 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3770 3770 */
3771 3771 static void
3772 3772 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3773 3773 {
3774 3774 mutex_enter(&ixgbe->watchdog_lock);
3775 3775
3776 3776 if (ixgbe->watchdog_start)
3777 3777 ixgbe_arm_watchdog_timer(ixgbe);
3778 3778
3779 3779 mutex_exit(&ixgbe->watchdog_lock);
3780 3780 }
3781 3781
3782 3782 /*
3783 3783 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3784 3784 */
3785 3785 void
3786 3786 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3787 3787 {
3788 3788 timeout_id_t tid;
3789 3789
3790 3790 mutex_enter(&ixgbe->watchdog_lock);
3791 3791
3792 3792 ixgbe->watchdog_start = B_FALSE;
3793 3793 tid = ixgbe->watchdog_tid;
3794 3794 ixgbe->watchdog_tid = 0;
3795 3795
3796 3796 mutex_exit(&ixgbe->watchdog_lock);
3797 3797
3798 3798 if (tid != 0)
3799 3799 (void) untimeout(tid);
3800 3800 }
3801 3801
3802 3802 /*
3803 3803 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3804 3804 */
3805 3805 static void
3806 3806 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3807 3807 {
3808 3808 struct ixgbe_hw *hw = &ixgbe->hw;
3809 3809
3810 3810 /*
3811 3811 * mask all interrupts off
3812 3812 */
3813 3813 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3814 3814
3815 3815 /*
3816 3816 * for MSI-X, also disable autoclear
3817 3817 */
3818 3818 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3819 3819 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3820 3820 }
3821 3821
3822 3822 IXGBE_WRITE_FLUSH(hw);
3823 3823 }
3824 3824
3825 3825 /*
3826 3826 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3827 3827 */
3828 3828 static void
3829 3829 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3830 3830 {
3831 3831 struct ixgbe_hw *hw = &ixgbe->hw;
3832 3832 uint32_t eiac, eiam;
3833 3833 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3834 3834
3835 3835 /* interrupt types to enable */
3836 3836 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3837 3837 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3838 3838 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3839 3839
3840 3840 /* enable automask on "other" causes that this adapter can generate */
3841 3841 eiam = ixgbe->capab->other_intr;
3842 3842
3843 3843 /*
3844 3844 * msi-x mode
3845 3845 */
3846 3846 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3847 3847 /* enable autoclear but not on bits 29:20 */
3848 3848 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3849 3849
3850 3850 /* general purpose interrupt enable */
3851 3851 gpie |= (IXGBE_GPIE_MSIX_MODE
3852 3852 | IXGBE_GPIE_PBA_SUPPORT
3853 3853 | IXGBE_GPIE_OCD
3854 3854 | IXGBE_GPIE_EIAME);
3855 3855 /*
3856 3856 * non-msi-x mode
3857 3857 */
3858 3858 } else {
3859 3859
3860 3860 /* disable autoclear, leave gpie at default */
3861 3861 eiac = 0;
3862 3862
3863 3863 /*
3864 3864 * General purpose interrupt enable.
3865 3865 * For 82599 or X540, extended interrupt automask enable
3866 3866 * only in MSI or MSI-X mode
3867 3867 */
3868 3868 if ((hw->mac.type == ixgbe_mac_82598EB) ||
3869 3869 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3870 3870 gpie |= IXGBE_GPIE_EIAME;
3871 3871 }
3872 3872 }
3873 3873
3874 3874 /* Enable specific "other" interrupt types */
3875 3875 switch (hw->mac.type) {
3876 3876 case ixgbe_mac_82598EB:
3877 3877 gpie |= ixgbe->capab->other_gpie;
3878 3878 break;
3879 3879
3880 3880 case ixgbe_mac_82599EB:
3881 3881 case ixgbe_mac_X540:
3882 3882 gpie |= ixgbe->capab->other_gpie;
3883 3883
3884 3884 /* Enable RSC Delay 8us when LRO enabled */
3885 3885 if (ixgbe->lro_enable) {
3886 3886 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3887 3887 }
3888 3888 break;
3889 3889
3890 3890 default:
3891 3891 break;
3892 3892 }
3893 3893
3894 3894 /* write to interrupt control registers */
3895 3895 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3896 3896 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3897 3897 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3898 3898 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3899 3899 IXGBE_WRITE_FLUSH(hw);
3900 3900 }
3901 3901
3902 3902 /*
3903 3903 * ixgbe_loopback_ioctl - Loopback support.
3904 3904 */
3905 3905 enum ioc_reply
3906 3906 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3907 3907 {
3908 3908 lb_info_sz_t *lbsp;
3909 3909 lb_property_t *lbpp;
3910 3910 uint32_t *lbmp;
3911 3911 uint32_t size;
3912 3912 uint32_t value;
3913 3913
3914 3914 if (mp->b_cont == NULL)
3915 3915 return (IOC_INVAL);
3916 3916
3917 3917 switch (iocp->ioc_cmd) {
3918 3918 default:
3919 3919 return (IOC_INVAL);
3920 3920
3921 3921 case LB_GET_INFO_SIZE:
3922 3922 size = sizeof (lb_info_sz_t);
3923 3923 if (iocp->ioc_count != size)
3924 3924 return (IOC_INVAL);
3925 3925
3926 3926 value = sizeof (lb_normal);
3927 3927 value += sizeof (lb_mac);
3928 3928 value += sizeof (lb_external);
3929 3929
3930 3930 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3931 3931 *lbsp = value;
3932 3932 break;
3933 3933
3934 3934 case LB_GET_INFO:
3935 3935 value = sizeof (lb_normal);
3936 3936 value += sizeof (lb_mac);
3937 3937 value += sizeof (lb_external);
3938 3938
3939 3939 size = value;
3940 3940 if (iocp->ioc_count != size)
3941 3941 return (IOC_INVAL);
3942 3942
3943 3943 value = 0;
3944 3944 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3945 3945
3946 3946 lbpp[value++] = lb_normal;
3947 3947 lbpp[value++] = lb_mac;
3948 3948 lbpp[value++] = lb_external;
3949 3949 break;
3950 3950
3951 3951 case LB_GET_MODE:
3952 3952 size = sizeof (uint32_t);
3953 3953 if (iocp->ioc_count != size)
3954 3954 return (IOC_INVAL);
3955 3955
3956 3956 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3957 3957 *lbmp = ixgbe->loopback_mode;
3958 3958 break;
3959 3959
3960 3960 case LB_SET_MODE:
3961 3961 size = 0;
3962 3962 if (iocp->ioc_count != sizeof (uint32_t))
3963 3963 return (IOC_INVAL);
3964 3964
3965 3965 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3966 3966 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3967 3967 return (IOC_INVAL);
3968 3968 break;
3969 3969 }
3970 3970
3971 3971 iocp->ioc_count = size;
3972 3972 iocp->ioc_error = 0;
3973 3973
3974 3974 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3975 3975 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3976 3976 return (IOC_INVAL);
3977 3977 }
3978 3978
3979 3979 return (IOC_REPLY);
3980 3980 }
3981 3981
3982 3982 /*
3983 3983 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3984 3984 */
3985 3985 static boolean_t
3986 3986 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3987 3987 {
3988 3988 if (mode == ixgbe->loopback_mode)
3989 3989 return (B_TRUE);
3990 3990
3991 3991 ixgbe->loopback_mode = mode;
3992 3992
3993 3993 if (mode == IXGBE_LB_NONE) {
3994 3994 /*
3995 3995 * Reset the chip
3996 3996 */
3997 3997 (void) ixgbe_reset(ixgbe);
3998 3998 return (B_TRUE);
3999 3999 }
4000 4000
4001 4001 mutex_enter(&ixgbe->gen_lock);
4002 4002
4003 4003 switch (mode) {
4004 4004 default:
4005 4005 mutex_exit(&ixgbe->gen_lock);
4006 4006 return (B_FALSE);
4007 4007
4008 4008 case IXGBE_LB_EXTERNAL:
4009 4009 break;
4010 4010
4011 4011 case IXGBE_LB_INTERNAL_MAC:
4012 4012 ixgbe_set_internal_mac_loopback(ixgbe);
4013 4013 break;
4014 4014 }
4015 4015
4016 4016 mutex_exit(&ixgbe->gen_lock);
4017 4017
4018 4018 return (B_TRUE);
4019 4019 }
4020 4020
4021 4021 /*
4022 4022 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4023 4023 */
4024 4024 static void
4025 4025 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4026 4026 {
4027 4027 struct ixgbe_hw *hw;
4028 4028 uint32_t reg;
4029 4029 uint8_t atlas;
4030 4030
4031 4031 hw = &ixgbe->hw;
4032 4032
4033 4033 /*
4034 4034 * Setup MAC loopback
4035 4035 */
4036 4036 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4037 4037 reg |= IXGBE_HLREG0_LPBK;
4038 4038 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4039 4039
4040 4040 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4041 4041 reg &= ~IXGBE_AUTOC_LMS_MASK;
4042 4042 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4043 4043
4044 4044 /*
4045 4045 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4046 4046 */
4047 4047 switch (hw->mac.type) {
4048 4048 case ixgbe_mac_82598EB:
4049 4049 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4050 4050 &atlas);
4051 4051 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4052 4052 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4053 4053 atlas);
4054 4054
4055 4055 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4056 4056 &atlas);
4057 4057 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4058 4058 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4059 4059 atlas);
4060 4060
4061 4061 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4062 4062 &atlas);
4063 4063 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4064 4064 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4065 4065 atlas);
4066 4066
4067 4067 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4068 4068 &atlas);
4069 4069 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4070 4070 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4071 4071 atlas);
4072 4072 break;
4073 4073
4074 4074 case ixgbe_mac_82599EB:
4075 4075 case ixgbe_mac_X540:
4076 4076 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4077 4077 reg |= (IXGBE_AUTOC_FLU |
4078 4078 IXGBE_AUTOC_10G_KX4);
4079 4079 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4080 4080
4081 4081 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4082 4082 B_FALSE, B_TRUE);
4083 4083 break;
4084 4084
4085 4085 default:
4086 4086 break;
4087 4087 }
4088 4088 }
4089 4089
4090 4090 #pragma inline(ixgbe_intr_rx_work)
4091 4091 /*
4092 4092 * ixgbe_intr_rx_work - RX processing of ISR.
4093 4093 */
4094 4094 static void
4095 4095 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4096 4096 {
4097 4097 mblk_t *mp;
4098 4098
4099 4099 mutex_enter(&rx_ring->rx_lock);
4100 4100
4101 4101 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4102 4102 mutex_exit(&rx_ring->rx_lock);
4103 4103
4104 4104 if (mp != NULL)
4105 4105 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4106 4106 rx_ring->ring_gen_num);
4107 4107 }
4108 4108
4109 4109 #pragma inline(ixgbe_intr_tx_work)
4110 4110 /*
4111 4111 * ixgbe_intr_tx_work - TX processing of ISR.
4112 4112 */
4113 4113 static void
4114 4114 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4115 4115 {
4116 4116 ixgbe_t *ixgbe = tx_ring->ixgbe;
4117 4117
4118 4118 /*
4119 4119 * Recycle the tx descriptors
4120 4120 */
4121 4121 tx_ring->tx_recycle(tx_ring);
4122 4122
4123 4123 /*
4124 4124 * Schedule the re-transmit
4125 4125 */
4126 4126 if (tx_ring->reschedule &&
4127 4127 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4128 4128 tx_ring->reschedule = B_FALSE;
4129 4129 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4130 4130 tx_ring->ring_handle);
4131 4131 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4132 4132 }
4133 4133 }
4134 4134
4135 4135 #pragma inline(ixgbe_intr_other_work)
4136 4136 /*
4137 4137 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4138 4138 */
4139 4139 static void
4140 4140 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4141 4141 {
4142 4142 ASSERT(mutex_owned(&ixgbe->gen_lock));
4143 4143
4144 4144 /*
4145 4145 * handle link status change
4146 4146 */
4147 4147 if (eicr & IXGBE_EICR_LSC) {
4148 4148 ixgbe_driver_link_check(ixgbe);
4149 4149 ixgbe_get_hw_state(ixgbe);
4150 4150 }
4151 4151
4152 4152 /*
4153 4153 * check for fan failure on adapters with fans
4154 4154 */
4155 4155 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4156 4156 (eicr & IXGBE_EICR_GPI_SDP1)) {
4157 4157 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4158 4158
4159 4159 /*
4160 4160 * Disable the adapter interrupts
4161 4161 */
4162 4162 ixgbe_disable_adapter_interrupts(ixgbe);
4163 4163
4164 4164 /*
4165 4165 * Disable Rx/Tx units
4166 4166 */
4167 4167 (void) ixgbe_stop_adapter(&ixgbe->hw);
4168 4168
4169 4169 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4170 4170 ixgbe_error(ixgbe,
4171 4171 "Problem: Network adapter has been stopped "
4172 4172 "because the fan has stopped.\n");
4173 4173 ixgbe_error(ixgbe,
4174 4174 "Action: Replace the adapter.\n");
4175 4175
4176 4176 /* re-enable the interrupt, which was automasked */
4177 4177 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4178 4178 }
4179 4179
4180 4180 /*
4181 4181 * Do SFP check for adapters with hot-plug capability
4182 4182 */
4183 4183 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4184 4184 ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4185 4185 ixgbe->eicr = eicr;
4186 4186 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4187 4187 ixgbe_sfp_check, (void *)ixgbe,
4188 4188 DDI_NOSLEEP)) != DDI_SUCCESS) {
4189 4189 ixgbe_log(ixgbe, "No memory available to dispatch "
4190 4190 "taskq for SFP check");
4191 4191 }
4192 4192 }
4193 4193
4194 4194 /*
4195 4195 * Do over-temperature check for adapters with temp sensor
4196 4196 */
4197 4197 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4198 4198 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4199 4199 ixgbe->eicr = eicr;
4200 4200 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4201 4201 ixgbe_overtemp_check, (void *)ixgbe,
4202 4202 DDI_NOSLEEP)) != DDI_SUCCESS) {
4203 4203 ixgbe_log(ixgbe, "No memory available to dispatch "
4204 4204 "taskq for overtemp check");
4205 4205 }
4206 4206 }
4207 4207 }
4208 4208
4209 4209 /*
4210 4210 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4211 4211 */
4212 4212 static uint_t
4213 4213 ixgbe_intr_legacy(void *arg1, void *arg2)
4214 4214 {
4215 4215 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4216 4216 struct ixgbe_hw *hw = &ixgbe->hw;
4217 4217 ixgbe_tx_ring_t *tx_ring;
4218 4218 ixgbe_rx_ring_t *rx_ring;
4219 4219 uint32_t eicr;
4220 4220 mblk_t *mp;
4221 4221 boolean_t tx_reschedule;
4222 4222 uint_t result;
4223 4223
4224 4224 _NOTE(ARGUNUSED(arg2));
4225 4225
4226 4226 mutex_enter(&ixgbe->gen_lock);
4227 4227 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4228 4228 mutex_exit(&ixgbe->gen_lock);
4229 4229 return (DDI_INTR_UNCLAIMED);
4230 4230 }
4231 4231
4232 4232 mp = NULL;
4233 4233 tx_reschedule = B_FALSE;
4234 4234
4235 4235 /*
4236 4236 * Any bit set in eicr: claim this interrupt
4237 4237 */
4238 4238 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4239 4239
4240 4240 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4241 4241 mutex_exit(&ixgbe->gen_lock);
4242 4242 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4243 4243 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4244 4244 return (DDI_INTR_CLAIMED);
4245 4245 }
4246 4246
4247 4247 if (eicr) {
4248 4248 /*
4249 4249 * For legacy interrupt, we have only one interrupt,
4250 4250 * so we have only one rx ring and one tx ring enabled.
4251 4251 */
4252 4252 ASSERT(ixgbe->num_rx_rings == 1);
4253 4253 ASSERT(ixgbe->num_tx_rings == 1);
4254 4254
4255 4255 /*
4256 4256 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4257 4257 */
4258 4258 if (eicr & 0x1) {
4259 4259 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4260 4260 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4261 4261 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4262 4262 /*
4263 4263 * Clean the rx descriptors
4264 4264 */
4265 4265 rx_ring = &ixgbe->rx_rings[0];
4266 4266 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4267 4267 }
4268 4268
4269 4269 /*
4270 4270 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4271 4271 */
4272 4272 if (eicr & 0x2) {
4273 4273 /*
4274 4274 * Recycle the tx descriptors
4275 4275 */
4276 4276 tx_ring = &ixgbe->tx_rings[0];
4277 4277 tx_ring->tx_recycle(tx_ring);
4278 4278
4279 4279 /*
4280 4280 * Schedule the re-transmit
4281 4281 */
4282 4282 tx_reschedule = (tx_ring->reschedule &&
4283 4283 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4284 4284 }
4285 4285
4286 4286 /* any interrupt type other than tx/rx */
4287 4287 if (eicr & ixgbe->capab->other_intr) {
4288 4288 switch (hw->mac.type) {
4289 4289 case ixgbe_mac_82598EB:
4290 4290 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4291 4291 break;
4292 4292
4293 4293 case ixgbe_mac_82599EB:
4294 4294 case ixgbe_mac_X540:
4295 4295 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4296 4296 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4297 4297 break;
4298 4298
4299 4299 default:
4300 4300 break;
4301 4301 }
4302 4302 ixgbe_intr_other_work(ixgbe, eicr);
4303 4303 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4304 4304 }
4305 4305
4306 4306 mutex_exit(&ixgbe->gen_lock);
4307 4307
4308 4308 result = DDI_INTR_CLAIMED;
4309 4309 } else {
4310 4310 mutex_exit(&ixgbe->gen_lock);
4311 4311
4312 4312 /*
4313 4313 * No interrupt cause bits set: don't claim this interrupt.
4314 4314 */
4315 4315 result = DDI_INTR_UNCLAIMED;
4316 4316 }
4317 4317
4318 4318 /* re-enable the interrupts which were automasked */
4319 4319 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4320 4320
4321 4321 /*
4322 4322 * Do the following work outside of the gen_lock
4323 4323 */
4324 4324 if (mp != NULL) {
4325 4325 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4326 4326 rx_ring->ring_gen_num);
4327 4327 }
4328 4328
4329 4329 if (tx_reschedule) {
4330 4330 tx_ring->reschedule = B_FALSE;
4331 4331 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4332 4332 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4333 4333 }
4334 4334
4335 4335 return (result);
4336 4336 }
4337 4337
4338 4338 /*
4339 4339 * ixgbe_intr_msi - Interrupt handler for MSI.
4340 4340 */
4341 4341 static uint_t
4342 4342 ixgbe_intr_msi(void *arg1, void *arg2)
4343 4343 {
4344 4344 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4345 4345 struct ixgbe_hw *hw = &ixgbe->hw;
4346 4346 uint32_t eicr;
4347 4347
4348 4348 _NOTE(ARGUNUSED(arg2));
4349 4349
4350 4350 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4351 4351
4352 4352 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4353 4353 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4354 4354 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4355 4355 return (DDI_INTR_CLAIMED);
4356 4356 }
4357 4357
4358 4358 /*
4359 4359 * For MSI interrupt, we have only one vector,
4360 4360 * so we have only one rx ring and one tx ring enabled.
4361 4361 */
4362 4362 ASSERT(ixgbe->num_rx_rings == 1);
4363 4363 ASSERT(ixgbe->num_tx_rings == 1);
4364 4364
4365 4365 /*
4366 4366 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4367 4367 */
4368 4368 if (eicr & 0x1) {
4369 4369 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4370 4370 }
4371 4371
4372 4372 /*
4373 4373 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4374 4374 */
4375 4375 if (eicr & 0x2) {
4376 4376 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4377 4377 }
4378 4378
4379 4379 /* any interrupt type other than tx/rx */
4380 4380 if (eicr & ixgbe->capab->other_intr) {
4381 4381 mutex_enter(&ixgbe->gen_lock);
4382 4382 switch (hw->mac.type) {
4383 4383 case ixgbe_mac_82598EB:
4384 4384 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4385 4385 break;
4386 4386
4387 4387 case ixgbe_mac_82599EB:
4388 4388 case ixgbe_mac_X540:
4389 4389 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4390 4390 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4391 4391 break;
4392 4392
4393 4393 default:
4394 4394 break;
4395 4395 }
4396 4396 ixgbe_intr_other_work(ixgbe, eicr);
4397 4397 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4398 4398 mutex_exit(&ixgbe->gen_lock);
4399 4399 }
4400 4400
4401 4401 /* re-enable the interrupts which were automasked */
4402 4402 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4403 4403
4404 4404 return (DDI_INTR_CLAIMED);
4405 4405 }
4406 4406
4407 4407 /*
4408 4408 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4409 4409 */
4410 4410 static uint_t
4411 4411 ixgbe_intr_msix(void *arg1, void *arg2)
4412 4412 {
4413 4413 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4414 4414 ixgbe_t *ixgbe = vect->ixgbe;
4415 4415 struct ixgbe_hw *hw = &ixgbe->hw;
4416 4416 uint32_t eicr;
4417 4417 int r_idx = 0;
4418 4418
4419 4419 _NOTE(ARGUNUSED(arg2));
4420 4420
4421 4421 /*
4422 4422 * Clean each rx ring that has its bit set in the map
4423 4423 */
4424 4424 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4425 4425 while (r_idx >= 0) {
4426 4426 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4427 4427 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4428 4428 (ixgbe->num_rx_rings - 1));
4429 4429 }
4430 4430
4431 4431 /*
4432 4432 * Clean each tx ring that has its bit set in the map
4433 4433 */
4434 4434 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4435 4435 while (r_idx >= 0) {
4436 4436 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4437 4437 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4438 4438 (ixgbe->num_tx_rings - 1));
4439 4439 }
4440 4440
4441 4441
4442 4442 /*
4443 4443 * Clean other interrupt (link change) that has its bit set in the map
4444 4444 */
4445 4445 if (BT_TEST(vect->other_map, 0) == 1) {
4446 4446 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4447 4447
4448 4448 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4449 4449 DDI_FM_OK) {
4450 4450 ddi_fm_service_impact(ixgbe->dip,
4451 4451 DDI_SERVICE_DEGRADED);
4452 4452 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4453 4453 return (DDI_INTR_CLAIMED);
4454 4454 }
4455 4455
4456 4456 /*
4457 4457 * Check "other" cause bits: any interrupt type other than tx/rx
4458 4458 */
4459 4459 if (eicr & ixgbe->capab->other_intr) {
4460 4460 mutex_enter(&ixgbe->gen_lock);
4461 4461 switch (hw->mac.type) {
4462 4462 case ixgbe_mac_82598EB:
4463 4463 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4464 4464 ixgbe_intr_other_work(ixgbe, eicr);
4465 4465 break;
4466 4466
4467 4467 case ixgbe_mac_82599EB:
4468 4468 case ixgbe_mac_X540:
4469 4469 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4470 4470 ixgbe_intr_other_work(ixgbe, eicr);
4471 4471 break;
4472 4472
4473 4473 default:
4474 4474 break;
4475 4475 }
4476 4476 mutex_exit(&ixgbe->gen_lock);
4477 4477 }
4478 4478
4479 4479 /* re-enable the interrupts which were automasked */
4480 4480 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4481 4481 }
4482 4482
4483 4483 return (DDI_INTR_CLAIMED);
4484 4484 }
4485 4485
4486 4486 /*
4487 4487 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4488 4488 *
4489 4489 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4490 4490 * if not successful, try Legacy.
4491 4491 * ixgbe->intr_force can be used to force sequence to start with
4492 4492 * any of the 3 types.
4493 4493 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4494 4494 */
4495 4495 static int
4496 4496 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4497 4497 {
4498 4498 dev_info_t *devinfo;
4499 4499 int intr_types;
4500 4500 int rc;
4501 4501
4502 4502 devinfo = ixgbe->dip;
4503 4503
4504 4504 /*
4505 4505 * Get supported interrupt types
4506 4506 */
4507 4507 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4508 4508
4509 4509 if (rc != DDI_SUCCESS) {
4510 4510 ixgbe_log(ixgbe,
4511 4511 "Get supported interrupt types failed: %d", rc);
4512 4512 return (IXGBE_FAILURE);
4513 4513 }
4514 4514 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4515 4515
4516 4516 ixgbe->intr_type = 0;
4517 4517
4518 4518 /*
4519 4519 * Install MSI-X interrupts
4520 4520 */
4521 4521 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4522 4522 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4523 4523 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4524 4524 if (rc == IXGBE_SUCCESS)
4525 4525 return (IXGBE_SUCCESS);
4526 4526
4527 4527 ixgbe_log(ixgbe,
4528 4528 "Allocate MSI-X failed, trying MSI interrupts...");
4529 4529 }
4530 4530
4531 4531 /*
4532 4532 * MSI-X not used, force rings and groups to 1
4533 4533 */
4534 4534 ixgbe->num_rx_rings = 1;
4535 4535 ixgbe->num_rx_groups = 1;
4536 4536 ixgbe->num_tx_rings = 1;
4537 4537 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4538 4538 ixgbe_log(ixgbe,
4539 4539 "MSI-X not used, force rings and groups number to 1");
4540 4540
4541 4541 /*
4542 4542 * Install MSI interrupts
4543 4543 */
4544 4544 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4545 4545 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4546 4546 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4547 4547 if (rc == IXGBE_SUCCESS)
4548 4548 return (IXGBE_SUCCESS);
4549 4549
4550 4550 ixgbe_log(ixgbe,
4551 4551 "Allocate MSI failed, trying Legacy interrupts...");
4552 4552 }
4553 4553
4554 4554 /*
4555 4555 * Install legacy interrupts
4556 4556 */
4557 4557 if (intr_types & DDI_INTR_TYPE_FIXED) {
4558 4558 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4559 4559 if (rc == IXGBE_SUCCESS)
4560 4560 return (IXGBE_SUCCESS);
4561 4561
4562 4562 ixgbe_log(ixgbe,
4563 4563 "Allocate Legacy interrupts failed");
4564 4564 }
4565 4565
4566 4566 /*
4567 4567 * If none of the 3 types succeeded, return failure
4568 4568 */
4569 4569 return (IXGBE_FAILURE);
4570 4570 }
4571 4571
4572 4572 /*
4573 4573 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4574 4574 *
4575 4575 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4576 4576 * if fewer than 2 handles are available, return failure.
4577 4577 * Upon success, this maps the vectors to rx and tx rings for
4578 4578 * interrupts.
4579 4579 */
4580 4580 static int
4581 4581 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4582 4582 {
4583 4583 dev_info_t *devinfo;
4584 4584 int request, count, actual;
4585 4585 int minimum;
4586 4586 int rc;
4587 4587 uint32_t ring_per_group;
4588 4588
4589 4589 devinfo = ixgbe->dip;
4590 4590
4591 4591 switch (intr_type) {
4592 4592 case DDI_INTR_TYPE_FIXED:
4593 4593 request = 1; /* Request 1 legacy interrupt handle */
4594 4594 minimum = 1;
4595 4595 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4596 4596 break;
4597 4597
4598 4598 case DDI_INTR_TYPE_MSI:
4599 4599 request = 1; /* Request 1 MSI interrupt handle */
4600 4600 minimum = 1;
4601 4601 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4602 4602 break;
4603 4603
4604 4604 case DDI_INTR_TYPE_MSIX:
4605 4605 /*
4606 4606 * Best number of vectors for the adapter is
4607 4607 * (# rx rings + # tx rings), however we will
4608 4608 * limit the request number.
4609 4609 */
4610 4610 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4611 4611 if (request > ixgbe->capab->max_ring_vect)
4612 4612 request = ixgbe->capab->max_ring_vect;
4613 4613 minimum = 1;
4614 4614 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4615 4615 break;
4616 4616
4617 4617 default:
4618 4618 ixgbe_log(ixgbe,
4619 4619 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4620 4620 intr_type);
4621 4621 return (IXGBE_FAILURE);
4622 4622 }
4623 4623 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4624 4624 request, minimum);
4625 4625
4626 4626 /*
4627 4627 * Get number of supported interrupts
4628 4628 */
4629 4629 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4630 4630 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4631 4631 ixgbe_log(ixgbe,
4632 4632 "Get interrupt number failed. Return: %d, count: %d",
4633 4633 rc, count);
4634 4634 return (IXGBE_FAILURE);
4635 4635 }
4636 4636 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4637 4637
4638 4638 actual = 0;
4639 4639 ixgbe->intr_cnt = 0;
4640 4640 ixgbe->intr_cnt_max = 0;
4641 4641 ixgbe->intr_cnt_min = 0;
4642 4642
4643 4643 /*
4644 4644 * Allocate an array of interrupt handles
4645 4645 */
4646 4646 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4647 4647 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4648 4648
4649 4649 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4650 4650 request, &actual, DDI_INTR_ALLOC_NORMAL);
4651 4651 if (rc != DDI_SUCCESS) {
4652 4652 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4653 4653 "return: %d, request: %d, actual: %d",
4654 4654 rc, request, actual);
4655 4655 goto alloc_handle_fail;
4656 4656 }
4657 4657 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4658 4658
4659 4659 /*
4660 4660 * upper/lower limit of interrupts
4661 4661 */
4662 4662 ixgbe->intr_cnt = actual;
4663 4663 ixgbe->intr_cnt_max = request;
4664 4664 ixgbe->intr_cnt_min = minimum;
4665 4665
4666 4666 /*
4667 4667 * rss number per group should not exceed the rx interrupt number,
4668 4668 * else need to adjust rx ring number.
4669 4669 */
4670 4670 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4671 4671 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4672 4672 if (actual < ring_per_group) {
4673 4673 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4674 4674 ixgbe_setup_vmdq_rss_conf(ixgbe);
4675 4675 }
4676 4676
4677 4677 /*
4678 4678 * Now we know the actual number of vectors. Here we map the vector
4679 4679 * to other, rx rings and tx ring.
4680 4680 */
4681 4681 if (actual < minimum) {
4682 4682 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4683 4683 actual);
4684 4684 goto alloc_handle_fail;
4685 4685 }
4686 4686
4687 4687 /*
4688 4688 * Get priority for first vector, assume remaining are all the same
4689 4689 */
4690 4690 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4691 4691 if (rc != DDI_SUCCESS) {
4692 4692 ixgbe_log(ixgbe,
4693 4693 "Get interrupt priority failed: %d", rc);
4694 4694 goto alloc_handle_fail;
4695 4695 }
4696 4696
4697 4697 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4698 4698 if (rc != DDI_SUCCESS) {
4699 4699 ixgbe_log(ixgbe,
4700 4700 "Get interrupt cap failed: %d", rc);
4701 4701 goto alloc_handle_fail;
4702 4702 }
4703 4703
4704 4704 ixgbe->intr_type = intr_type;
4705 4705
4706 4706 return (IXGBE_SUCCESS);
4707 4707
4708 4708 alloc_handle_fail:
4709 4709 ixgbe_rem_intrs(ixgbe);
4710 4710
4711 4711 return (IXGBE_FAILURE);
4712 4712 }
4713 4713
4714 4714 /*
4715 4715 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4716 4716 *
4717 4717 * Before adding the interrupt handlers, the interrupt vectors have
4718 4718 * been allocated, and the rx/tx rings have also been allocated.
4719 4719 */
4720 4720 static int
4721 4721 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4722 4722 {
4723 4723 int vector = 0;
4724 4724 int rc;
4725 4725
4726 4726 switch (ixgbe->intr_type) {
4727 4727 case DDI_INTR_TYPE_MSIX:
4728 4728 /*
4729 4729 * Add interrupt handler for all vectors
4730 4730 */
4731 4731 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4732 4732 /*
4733 4733 * install pointer to vect_map[vector]
4734 4734 */
4735 4735 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4736 4736 (ddi_intr_handler_t *)ixgbe_intr_msix,
4737 4737 (void *)&ixgbe->vect_map[vector], NULL);
4738 4738
4739 4739 if (rc != DDI_SUCCESS) {
4740 4740 ixgbe_log(ixgbe,
4741 4741 "Add interrupt handler failed. "
4742 4742 "return: %d, vector: %d", rc, vector);
4743 4743 for (vector--; vector >= 0; vector--) {
4744 4744 (void) ddi_intr_remove_handler(
4745 4745 ixgbe->htable[vector]);
4746 4746 }
4747 4747 return (IXGBE_FAILURE);
4748 4748 }
4749 4749 }
4750 4750
4751 4751 break;
4752 4752
4753 4753 case DDI_INTR_TYPE_MSI:
4754 4754 /*
4755 4755 * Add interrupt handlers for the only vector
4756 4756 */
4757 4757 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4758 4758 (ddi_intr_handler_t *)ixgbe_intr_msi,
4759 4759 (void *)ixgbe, NULL);
4760 4760
4761 4761 if (rc != DDI_SUCCESS) {
4762 4762 ixgbe_log(ixgbe,
4763 4763 "Add MSI interrupt handler failed: %d", rc);
4764 4764 return (IXGBE_FAILURE);
4765 4765 }
4766 4766
4767 4767 break;
4768 4768
4769 4769 case DDI_INTR_TYPE_FIXED:
4770 4770 /*
4771 4771 * Add interrupt handlers for the only vector
4772 4772 */
4773 4773 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4774 4774 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4775 4775 (void *)ixgbe, NULL);
4776 4776
4777 4777 if (rc != DDI_SUCCESS) {
4778 4778 ixgbe_log(ixgbe,
4779 4779 "Add legacy interrupt handler failed: %d", rc);
4780 4780 return (IXGBE_FAILURE);
4781 4781 }
4782 4782
4783 4783 break;
4784 4784
4785 4785 default:
4786 4786 return (IXGBE_FAILURE);
4787 4787 }
4788 4788
4789 4789 return (IXGBE_SUCCESS);
4790 4790 }
4791 4791
4792 4792 #pragma inline(ixgbe_map_rxring_to_vector)
4793 4793 /*
4794 4794 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4795 4795 */
4796 4796 static void
4797 4797 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4798 4798 {
4799 4799 /*
4800 4800 * Set bit in map
4801 4801 */
4802 4802 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4803 4803
4804 4804 /*
4805 4805 * Count bits set
4806 4806 */
4807 4807 ixgbe->vect_map[v_idx].rxr_cnt++;
4808 4808
4809 4809 /*
4810 4810 * Remember bit position
4811 4811 */
4812 4812 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4813 4813 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4814 4814 }
4815 4815
4816 4816 #pragma inline(ixgbe_map_txring_to_vector)
4817 4817 /*
4818 4818 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4819 4819 */
4820 4820 static void
4821 4821 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4822 4822 {
4823 4823 /*
4824 4824 * Set bit in map
4825 4825 */
4826 4826 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4827 4827
4828 4828 /*
4829 4829 * Count bits set
4830 4830 */
4831 4831 ixgbe->vect_map[v_idx].txr_cnt++;
4832 4832
4833 4833 /*
4834 4834 * Remember bit position
4835 4835 */
4836 4836 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4837 4837 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4838 4838 }
4839 4839
4840 4840 /*
4841 4841 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4842 4842 * allocation register (IVAR).
4843 4843 * cause:
4844 4844 * -1 : other cause
4845 4845 * 0 : rx
4846 4846 * 1 : tx
4847 4847 */
4848 4848 static void
4849 4849 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4850 4850 int8_t cause)
4851 4851 {
4852 4852 struct ixgbe_hw *hw = &ixgbe->hw;
4853 4853 u32 ivar, index;
4854 4854
4855 4855 switch (hw->mac.type) {
4856 4856 case ixgbe_mac_82598EB:
4857 4857 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4858 4858 if (cause == -1) {
4859 4859 cause = 0;
4860 4860 }
4861 4861 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4862 4862 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4863 4863 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4864 4864 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4865 4865 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4866 4866 break;
4867 4867
4868 4868 case ixgbe_mac_82599EB:
4869 4869 case ixgbe_mac_X540:
4870 4870 if (cause == -1) {
4871 4871 /* other causes */
4872 4872 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4873 4873 index = (intr_alloc_entry & 1) * 8;
4874 4874 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4875 4875 ivar &= ~(0xFF << index);
4876 4876 ivar |= (msix_vector << index);
4877 4877 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4878 4878 } else {
4879 4879 /* tx or rx causes */
4880 4880 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4881 4881 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4882 4882 ivar = IXGBE_READ_REG(hw,
4883 4883 IXGBE_IVAR(intr_alloc_entry >> 1));
4884 4884 ivar &= ~(0xFF << index);
4885 4885 ivar |= (msix_vector << index);
4886 4886 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4887 4887 ivar);
4888 4888 }
4889 4889 break;
4890 4890
4891 4891 default:
4892 4892 break;
4893 4893 }
4894 4894 }
4895 4895
4896 4896 /*
4897 4897 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4898 4898 * given interrupt vector allocation register (IVAR).
4899 4899 * cause:
4900 4900 * -1 : other cause
4901 4901 * 0 : rx
4902 4902 * 1 : tx
4903 4903 */
4904 4904 static void
4905 4905 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4906 4906 {
4907 4907 struct ixgbe_hw *hw = &ixgbe->hw;
4908 4908 u32 ivar, index;
4909 4909
4910 4910 switch (hw->mac.type) {
4911 4911 case ixgbe_mac_82598EB:
4912 4912 if (cause == -1) {
4913 4913 cause = 0;
4914 4914 }
4915 4915 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4916 4916 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4917 4917 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4918 4918 (intr_alloc_entry & 0x3)));
4919 4919 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4920 4920 break;
4921 4921
4922 4922 case ixgbe_mac_82599EB:
4923 4923 case ixgbe_mac_X540:
4924 4924 if (cause == -1) {
4925 4925 /* other causes */
4926 4926 index = (intr_alloc_entry & 1) * 8;
4927 4927 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4928 4928 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4929 4929 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4930 4930 } else {
4931 4931 /* tx or rx causes */
4932 4932 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4933 4933 ivar = IXGBE_READ_REG(hw,
4934 4934 IXGBE_IVAR(intr_alloc_entry >> 1));
4935 4935 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4936 4936 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4937 4937 ivar);
4938 4938 }
4939 4939 break;
4940 4940
4941 4941 default:
4942 4942 break;
4943 4943 }
4944 4944 }
4945 4945
4946 4946 /*
4947 4947 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4948 4948 * given interrupt vector allocation register (IVAR).
4949 4949 * cause:
4950 4950 * -1 : other cause
4951 4951 * 0 : rx
4952 4952 * 1 : tx
4953 4953 */
4954 4954 static void
4955 4955 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4956 4956 {
4957 4957 struct ixgbe_hw *hw = &ixgbe->hw;
4958 4958 u32 ivar, index;
4959 4959
4960 4960 switch (hw->mac.type) {
4961 4961 case ixgbe_mac_82598EB:
4962 4962 if (cause == -1) {
4963 4963 cause = 0;
4964 4964 }
4965 4965 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4966 4966 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4967 4967 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4968 4968 (intr_alloc_entry & 0x3)));
4969 4969 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4970 4970 break;
4971 4971
4972 4972 case ixgbe_mac_82599EB:
4973 4973 case ixgbe_mac_X540:
4974 4974 if (cause == -1) {
4975 4975 /* other causes */
4976 4976 index = (intr_alloc_entry & 1) * 8;
4977 4977 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4978 4978 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4979 4979 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4980 4980 } else {
4981 4981 /* tx or rx causes */
4982 4982 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4983 4983 ivar = IXGBE_READ_REG(hw,
4984 4984 IXGBE_IVAR(intr_alloc_entry >> 1));
4985 4985 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4986 4986 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4987 4987 ivar);
4988 4988 }
4989 4989 break;
4990 4990
4991 4991 default:
4992 4992 break;
4993 4993 }
4994 4994 }
4995 4995
4996 4996 /*
4997 4997 * Convert the rx ring index driver maintained to the rx ring index
4998 4998 * in h/w.
4999 4999 */
5000 5000 static uint32_t
5001 5001 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5002 5002 {
5003 5003
5004 5004 struct ixgbe_hw *hw = &ixgbe->hw;
5005 5005 uint32_t rx_ring_per_group, hw_rx_index;
5006 5006
5007 5007 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5008 5008 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5009 5009 return (sw_rx_index);
5010 5010 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5011 5011 switch (hw->mac.type) {
5012 5012 case ixgbe_mac_82598EB:
5013 5013 return (sw_rx_index);
5014 5014
5015 5015 case ixgbe_mac_82599EB:
5016 5016 case ixgbe_mac_X540:
5017 5017 return (sw_rx_index * 2);
5018 5018
5019 5019 default:
5020 5020 break;
5021 5021 }
5022 5022 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5023 5023 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5024 5024
5025 5025 switch (hw->mac.type) {
5026 5026 case ixgbe_mac_82598EB:
5027 5027 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5028 5028 16 + (sw_rx_index % rx_ring_per_group);
5029 5029 return (hw_rx_index);
5030 5030
5031 5031 case ixgbe_mac_82599EB:
5032 5032 case ixgbe_mac_X540:
5033 5033 if (ixgbe->num_rx_groups > 32) {
5034 5034 hw_rx_index = (sw_rx_index /
5035 5035 rx_ring_per_group) * 2 +
5036 5036 (sw_rx_index % rx_ring_per_group);
5037 5037 } else {
5038 5038 hw_rx_index = (sw_rx_index /
5039 5039 rx_ring_per_group) * 4 +
5040 5040 (sw_rx_index % rx_ring_per_group);
5041 5041 }
5042 5042 return (hw_rx_index);
5043 5043
5044 5044 default:
5045 5045 break;
5046 5046 }
5047 5047 }
5048 5048
5049 5049 /*
5050 5050 * Should never reach. Just to make compiler happy.
5051 5051 */
5052 5052 return (sw_rx_index);
5053 5053 }
5054 5054
5055 5055 /*
5056 5056 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5057 5057 *
5058 5058 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5059 5059 * to vector[0 - (intr_cnt -1)].
5060 5060 */
5061 5061 static int
5062 5062 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5063 5063 {
5064 5064 int i, vector = 0;
5065 5065
5066 5066 /* initialize vector map */
5067 5067 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5068 5068 for (i = 0; i < ixgbe->intr_cnt; i++) {
5069 5069 ixgbe->vect_map[i].ixgbe = ixgbe;
5070 5070 }
5071 5071
5072 5072 /*
5073 5073 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5074 5074 * tx rings[0] on RTxQ[1].
5075 5075 */
5076 5076 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5077 5077 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5078 5078 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5079 5079 return (IXGBE_SUCCESS);
5080 5080 }
5081 5081
5082 5082 /*
5083 5083 * Interrupts/vectors mapping for MSI-X
5084 5084 */
5085 5085
5086 5086 /*
5087 5087 * Map other interrupt to vector 0,
5088 5088 * Set bit in map and count the bits set.
5089 5089 */
5090 5090 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5091 5091 ixgbe->vect_map[vector].other_cnt++;
5092 5092
5093 5093 /*
5094 5094 * Map rx ring interrupts to vectors
5095 5095 */
5096 5096 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5097 5097 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5098 5098 vector = (vector +1) % ixgbe->intr_cnt;
5099 5099 }
5100 5100
5101 5101 /*
5102 5102 * Map tx ring interrupts to vectors
5103 5103 */
5104 5104 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5105 5105 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5106 5106 vector = (vector +1) % ixgbe->intr_cnt;
5107 5107 }
5108 5108
5109 5109 return (IXGBE_SUCCESS);
5110 5110 }
5111 5111
5112 5112 /*
5113 5113 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5114 5114 *
5115 5115 * This relies on ring/vector mapping already set up in the
5116 5116 * vect_map[] structures
5117 5117 */
5118 5118 static void
5119 5119 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5120 5120 {
5121 5121 struct ixgbe_hw *hw = &ixgbe->hw;
5122 5122 ixgbe_intr_vector_t *vect; /* vector bitmap */
5123 5123 int r_idx; /* ring index */
5124 5124 int v_idx; /* vector index */
5125 5125 uint32_t hw_index;
5126 5126
5127 5127 /*
5128 5128 * Clear any previous entries
5129 5129 */
5130 5130 switch (hw->mac.type) {
5131 5131 case ixgbe_mac_82598EB:
5132 5132 for (v_idx = 0; v_idx < 25; v_idx++)
5133 5133 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5134 5134 break;
5135 5135
5136 5136 case ixgbe_mac_82599EB:
5137 5137 case ixgbe_mac_X540:
5138 5138 for (v_idx = 0; v_idx < 64; v_idx++)
5139 5139 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5140 5140 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5141 5141 break;
5142 5142
5143 5143 default:
5144 5144 break;
5145 5145 }
5146 5146
5147 5147 /*
5148 5148 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5149 5149 * tx rings[0] will use RTxQ[1].
5150 5150 */
5151 5151 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5152 5152 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5153 5153 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5154 5154 return;
5155 5155 }
5156 5156
5157 5157 /*
5158 5158 * For MSI-X interrupt, "Other" is always on vector[0].
5159 5159 */
5160 5160 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5161 5161
5162 5162 /*
5163 5163 * For each interrupt vector, populate the IVAR table
5164 5164 */
5165 5165 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5166 5166 vect = &ixgbe->vect_map[v_idx];
5167 5167
5168 5168 /*
5169 5169 * For each rx ring bit set
5170 5170 */
5171 5171 r_idx = bt_getlowbit(vect->rx_map, 0,
5172 5172 (ixgbe->num_rx_rings - 1));
5173 5173
5174 5174 while (r_idx >= 0) {
5175 5175 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5176 5176 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5177 5177 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5178 5178 (ixgbe->num_rx_rings - 1));
5179 5179 }
5180 5180
5181 5181 /*
5182 5182 * For each tx ring bit set
5183 5183 */
5184 5184 r_idx = bt_getlowbit(vect->tx_map, 0,
5185 5185 (ixgbe->num_tx_rings - 1));
5186 5186
5187 5187 while (r_idx >= 0) {
5188 5188 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5189 5189 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5190 5190 (ixgbe->num_tx_rings - 1));
5191 5191 }
5192 5192 }
5193 5193 }
5194 5194
5195 5195 /*
5196 5196 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5197 5197 */
5198 5198 static void
5199 5199 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5200 5200 {
5201 5201 int i;
5202 5202 int rc;
5203 5203
5204 5204 for (i = 0; i < ixgbe->intr_cnt; i++) {
5205 5205 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5206 5206 if (rc != DDI_SUCCESS) {
5207 5207 IXGBE_DEBUGLOG_1(ixgbe,
5208 5208 "Remove intr handler failed: %d", rc);
5209 5209 }
5210 5210 }
5211 5211 }
5212 5212
5213 5213 /*
5214 5214 * ixgbe_rem_intrs - Remove the allocated interrupts.
5215 5215 */
5216 5216 static void
5217 5217 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5218 5218 {
5219 5219 int i;
5220 5220 int rc;
5221 5221
5222 5222 for (i = 0; i < ixgbe->intr_cnt; i++) {
5223 5223 rc = ddi_intr_free(ixgbe->htable[i]);
5224 5224 if (rc != DDI_SUCCESS) {
5225 5225 IXGBE_DEBUGLOG_1(ixgbe,
5226 5226 "Free intr failed: %d", rc);
5227 5227 }
5228 5228 }
5229 5229
5230 5230 kmem_free(ixgbe->htable, ixgbe->intr_size);
5231 5231 ixgbe->htable = NULL;
5232 5232 }
5233 5233
5234 5234 /*
5235 5235 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5236 5236 */
5237 5237 static int
5238 5238 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5239 5239 {
5240 5240 int i;
5241 5241 int rc;
5242 5242
5243 5243 /*
5244 5244 * Enable interrupts
5245 5245 */
5246 5246 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5247 5247 /*
5248 5248 * Call ddi_intr_block_enable() for MSI
5249 5249 */
5250 5250 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5251 5251 if (rc != DDI_SUCCESS) {
5252 5252 ixgbe_log(ixgbe,
5253 5253 "Enable block intr failed: %d", rc);
5254 5254 return (IXGBE_FAILURE);
5255 5255 }
5256 5256 } else {
5257 5257 /*
5258 5258 * Call ddi_intr_enable() for Legacy/MSI non block enable
5259 5259 */
5260 5260 for (i = 0; i < ixgbe->intr_cnt; i++) {
5261 5261 rc = ddi_intr_enable(ixgbe->htable[i]);
5262 5262 if (rc != DDI_SUCCESS) {
5263 5263 ixgbe_log(ixgbe,
5264 5264 "Enable intr failed: %d", rc);
5265 5265 return (IXGBE_FAILURE);
5266 5266 }
5267 5267 }
5268 5268 }
5269 5269
5270 5270 return (IXGBE_SUCCESS);
5271 5271 }
5272 5272
5273 5273 /*
5274 5274 * ixgbe_disable_intrs - Disable all the interrupts.
5275 5275 */
5276 5276 static int
5277 5277 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5278 5278 {
5279 5279 int i;
5280 5280 int rc;
5281 5281
5282 5282 /*
5283 5283 * Disable all interrupts
5284 5284 */
5285 5285 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5286 5286 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5287 5287 if (rc != DDI_SUCCESS) {
5288 5288 ixgbe_log(ixgbe,
5289 5289 "Disable block intr failed: %d", rc);
5290 5290 return (IXGBE_FAILURE);
5291 5291 }
5292 5292 } else {
5293 5293 for (i = 0; i < ixgbe->intr_cnt; i++) {
5294 5294 rc = ddi_intr_disable(ixgbe->htable[i]);
5295 5295 if (rc != DDI_SUCCESS) {
5296 5296 ixgbe_log(ixgbe,
5297 5297 "Disable intr failed: %d", rc);
5298 5298 return (IXGBE_FAILURE);
5299 5299 }
5300 5300 }
5301 5301 }
5302 5302
5303 5303 return (IXGBE_SUCCESS);
5304 5304 }
5305 5305
5306 5306 /*
5307 5307 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5308 5308 */
5309 5309 static void
5310 5310 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5311 5311 {
5312 5312 struct ixgbe_hw *hw = &ixgbe->hw;
5313 5313 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5314 5314 boolean_t link_up = B_FALSE;
5315 5315 uint32_t pcs1g_anlp = 0;
5316 5316 uint32_t pcs1g_ana = 0;
5317 5317 boolean_t autoneg = B_FALSE;
5318 5318
5319 5319 ASSERT(mutex_owned(&ixgbe->gen_lock));
5320 5320 ixgbe->param_lp_1000fdx_cap = 0;
5321 5321 ixgbe->param_lp_100fdx_cap = 0;
5322 5322
5323 5323 /* check for link, don't wait */
5324 5324 (void) ixgbe_check_link(hw, &speed, &link_up, false);
5325 5325 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5326 5326
5327 5327 if (link_up) {
5328 5328 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5329 5329
5330 5330 ixgbe->param_lp_1000fdx_cap =
5331 5331 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5332 5332 ixgbe->param_lp_100fdx_cap =
5333 5333 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5334 5334 }
5335 5335
5336 5336 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5337 5337
5338 5338 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5339 5339 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5340 5340 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5341 5341 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5342 5342 }
5343 5343
5344 5344 /*
5345 5345 * ixgbe_get_driver_control - Notify that driver is in control of device.
5346 5346 */
5347 5347 static void
5348 5348 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5349 5349 {
5350 5350 uint32_t ctrl_ext;
5351 5351
5352 5352 /*
5353 5353 * Notify firmware that driver is in control of device
5354 5354 */
5355 5355 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5356 5356 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5357 5357 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5358 5358 }
5359 5359
5360 5360 /*
5361 5361 * ixgbe_release_driver_control - Notify that driver is no longer in control
5362 5362 * of device.
5363 5363 */
5364 5364 static void
5365 5365 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5366 5366 {
5367 5367 uint32_t ctrl_ext;
5368 5368
5369 5369 /*
5370 5370 * Notify firmware that driver is no longer in control of device
5371 5371 */
5372 5372 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5373 5373 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5374 5374 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5375 5375 }
5376 5376
5377 5377 /*
5378 5378 * ixgbe_atomic_reserve - Atomic decrease operation.
5379 5379 */
5380 5380 int
5381 5381 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5382 5382 {
5383 5383 uint32_t oldval;
5384 5384 uint32_t newval;
5385 5385
5386 5386 /*
5387 5387 * ATOMICALLY
5388 5388 */
5389 5389 do {
5390 5390 oldval = *count_p;
5391 5391 if (oldval < n)
5392 5392 return (-1);
5393 5393 newval = oldval - n;
5394 5394 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5395 5395
5396 5396 return (newval);
5397 5397 }
5398 5398
5399 5399 /*
5400 5400 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5401 5401 */
5402 5402 static uint8_t *
5403 5403 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5404 5404 {
5405 5405 uint8_t *addr = *upd_ptr;
5406 5406 uint8_t *new_ptr;
5407 5407
5408 5408 _NOTE(ARGUNUSED(hw));
5409 5409 _NOTE(ARGUNUSED(vmdq));
5410 5410
5411 5411 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5412 5412 *upd_ptr = new_ptr;
5413 5413 return (addr);
5414 5414 }
5415 5415
5416 5416 /*
5417 5417 * FMA support
5418 5418 */
5419 5419 int
5420 5420 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5421 5421 {
5422 5422 ddi_fm_error_t de;
5423 5423
5424 5424 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5425 5425 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5426 5426 return (de.fme_status);
5427 5427 }
5428 5428
5429 5429 int
5430 5430 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5431 5431 {
5432 5432 ddi_fm_error_t de;
5433 5433
5434 5434 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5435 5435 return (de.fme_status);
5436 5436 }
5437 5437
5438 5438 /*
5439 5439 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5440 5440 */
5441 5441 static int
5442 5442 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5443 5443 {
5444 5444 _NOTE(ARGUNUSED(impl_data));
5445 5445 /*
5446 5446 * as the driver can always deal with an error in any dma or
5447 5447 * access handle, we can just return the fme_status value.
5448 5448 */
5449 5449 pci_ereport_post(dip, err, NULL);
5450 5450 return (err->fme_status);
5451 5451 }
5452 5452
5453 5453 static void
5454 5454 ixgbe_fm_init(ixgbe_t *ixgbe)
5455 5455 {
5456 5456 ddi_iblock_cookie_t iblk;
5457 5457 int fma_dma_flag;
5458 5458
5459 5459 /*
5460 5460 * Only register with IO Fault Services if we have some capability
5461 5461 */
5462 5462 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5463 5463 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5464 5464 } else {
5465 5465 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5466 5466 }
5467 5467
5468 5468 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5469 5469 fma_dma_flag = 1;
5470 5470 } else {
5471 5471 fma_dma_flag = 0;
5472 5472 }
5473 5473
5474 5474 ixgbe_set_fma_flags(fma_dma_flag);
5475 5475
5476 5476 if (ixgbe->fm_capabilities) {
5477 5477
5478 5478 /*
5479 5479 * Register capabilities with IO Fault Services
5480 5480 */
5481 5481 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5482 5482
5483 5483 /*
5484 5484 * Initialize pci ereport capabilities if ereport capable
5485 5485 */
5486 5486 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5487 5487 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5488 5488 pci_ereport_setup(ixgbe->dip);
5489 5489
5490 5490 /*
5491 5491 * Register error callback if error callback capable
5492 5492 */
5493 5493 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5494 5494 ddi_fm_handler_register(ixgbe->dip,
5495 5495 ixgbe_fm_error_cb, (void*) ixgbe);
5496 5496 }
5497 5497 }
5498 5498
5499 5499 static void
5500 5500 ixgbe_fm_fini(ixgbe_t *ixgbe)
5501 5501 {
5502 5502 /*
5503 5503 * Only unregister FMA capabilities if they are registered
5504 5504 */
5505 5505 if (ixgbe->fm_capabilities) {
5506 5506
5507 5507 /*
5508 5508 * Release any resources allocated by pci_ereport_setup()
5509 5509 */
5510 5510 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5511 5511 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5512 5512 pci_ereport_teardown(ixgbe->dip);
5513 5513
5514 5514 /*
5515 5515 * Un-register error callback if error callback capable
5516 5516 */
5517 5517 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5518 5518 ddi_fm_handler_unregister(ixgbe->dip);
5519 5519
5520 5520 /*
5521 5521 * Unregister from IO Fault Service
5522 5522 */
5523 5523 ddi_fm_fini(ixgbe->dip);
5524 5524 }
5525 5525 }
5526 5526
5527 5527 void
5528 5528 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5529 5529 {
5530 5530 uint64_t ena;
5531 5531 char buf[FM_MAX_CLASS];
5532 5532
5533 5533 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5534 5534 ena = fm_ena_generate(0, FM_ENA_FMT1);
5535 5535 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5536 5536 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5537 5537 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5538 5538 }
5539 5539 }
5540 5540
5541 5541 static int
5542 5542 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5543 5543 {
5544 5544 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5545 5545
5546 5546 mutex_enter(&rx_ring->rx_lock);
5547 5547 rx_ring->ring_gen_num = mr_gen_num;
5548 5548 mutex_exit(&rx_ring->rx_lock);
5549 5549 return (0);
5550 5550 }
5551 5551
5552 5552 /*
5553 5553 * Get the global ring index by a ring index within a group.
5554 5554 */
5555 5555 static int
5556 5556 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5557 5557 {
5558 5558 ixgbe_rx_ring_t *rx_ring;
5559 5559 int i;
5560 5560
5561 5561 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5562 5562 rx_ring = &ixgbe->rx_rings[i];
5563 5563 if (rx_ring->group_index == gindex)
5564 5564 rindex--;
5565 5565 if (rindex < 0)
5566 5566 return (i);
5567 5567 }
5568 5568
5569 5569 return (-1);
5570 5570 }
5571 5571
5572 5572 /*
5573 5573 * Callback funtion for MAC layer to register all rings.
5574 5574 */
5575 5575 /* ARGSUSED */
5576 5576 void
5577 5577 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5578 5578 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5579 5579 {
5580 5580 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5581 5581 mac_intr_t *mintr = &infop->mri_intr;
5582 5582
5583 5583 switch (rtype) {
5584 5584 case MAC_RING_TYPE_RX: {
5585 5585 /*
5586 5586 * 'index' is the ring index within the group.
5587 5587 * Need to get the global ring index by searching in groups.
5588 5588 */
5589 5589 int global_ring_index = ixgbe_get_rx_ring_index(
5590 5590 ixgbe, group_index, ring_index);
5591 5591
5592 5592 ASSERT(global_ring_index >= 0);
5593 5593
5594 5594 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5595 5595 rx_ring->ring_handle = rh;
5596 5596
5597 5597 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5598 5598 infop->mri_start = ixgbe_ring_start;
5599 5599 infop->mri_stop = NULL;
5600 5600 infop->mri_poll = ixgbe_ring_rx_poll;
5601 5601 infop->mri_stat = ixgbe_rx_ring_stat;
5602 5602
5603 5603 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5604 5604 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5605 5605 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5606 5606 if (ixgbe->intr_type &
5607 5607 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5608 5608 mintr->mi_ddi_handle =
5609 5609 ixgbe->htable[rx_ring->intr_vector];
5610 5610 }
5611 5611
5612 5612 break;
5613 5613 }
5614 5614 case MAC_RING_TYPE_TX: {
5615 5615 ASSERT(group_index == -1);
5616 5616 ASSERT(ring_index < ixgbe->num_tx_rings);
5617 5617
5618 5618 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5619 5619 tx_ring->ring_handle = rh;
5620 5620
5621 5621 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5622 5622 infop->mri_start = NULL;
5623 5623 infop->mri_stop = NULL;
5624 5624 infop->mri_tx = ixgbe_ring_tx;
5625 5625 infop->mri_stat = ixgbe_tx_ring_stat;
5626 5626 if (ixgbe->intr_type &
5627 5627 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5628 5628 mintr->mi_ddi_handle =
5629 5629 ixgbe->htable[tx_ring->intr_vector];
5630 5630 }
5631 5631 break;
5632 5632 }
5633 5633 default:
5634 5634 break;
5635 5635 }
5636 5636 }
5637 5637
5638 5638 /*
5639 5639 * Callback funtion for MAC layer to register all groups.
5640 5640 */
5641 5641 void
5642 5642 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5643 5643 mac_group_info_t *infop, mac_group_handle_t gh)
5644 5644 {
5645 5645 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5646 5646
5647 5647 switch (rtype) {
5648 5648 case MAC_RING_TYPE_RX: {
5649 5649 ixgbe_rx_group_t *rx_group;
5650 5650
5651 5651 rx_group = &ixgbe->rx_groups[index];
5652 5652 rx_group->group_handle = gh;
5653 5653
5654 5654 infop->mgi_driver = (mac_group_driver_t)rx_group;
5655 5655 infop->mgi_start = NULL;
5656 5656 infop->mgi_stop = NULL;
5657 5657 infop->mgi_addmac = ixgbe_addmac;
5658 5658 infop->mgi_remmac = ixgbe_remmac;
5659 5659 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5660 5660
5661 5661 break;
5662 5662 }
5663 5663 case MAC_RING_TYPE_TX:
5664 5664 break;
5665 5665 default:
5666 5666 break;
5667 5667 }
5668 5668 }
5669 5669
5670 5670 /*
5671 5671 * Enable interrupt on the specificed rx ring.
5672 5672 */
5673 5673 int
5674 5674 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5675 5675 {
5676 5676 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5677 5677 ixgbe_t *ixgbe = rx_ring->ixgbe;
5678 5678 int r_idx = rx_ring->index;
5679 5679 int hw_r_idx = rx_ring->hw_index;
5680 5680 int v_idx = rx_ring->intr_vector;
5681 5681
5682 5682 mutex_enter(&ixgbe->gen_lock);
5683 5683 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5684 5684 mutex_exit(&ixgbe->gen_lock);
5685 5685 /*
5686 5686 * Simply return 0.
5687 5687 * Interrupts are being adjusted. ixgbe_intr_adjust()
5688 5688 * will eventually re-enable the interrupt when it's
5689 5689 * done with the adjustment.
5690 5690 */
5691 5691 return (0);
5692 5692 }
5693 5693
5694 5694 /*
5695 5695 * To enable interrupt by setting the VAL bit of given interrupt
5696 5696 * vector allocation register (IVAR).
5697 5697 */
5698 5698 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5699 5699
5700 5700 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5701 5701
5702 5702 /*
5703 5703 * Trigger a Rx interrupt on this ring
5704 5704 */
5705 5705 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5706 5706 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5707 5707
5708 5708 mutex_exit(&ixgbe->gen_lock);
5709 5709
5710 5710 return (0);
5711 5711 }
5712 5712
5713 5713 /*
5714 5714 * Disable interrupt on the specificed rx ring.
5715 5715 */
5716 5716 int
5717 5717 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5718 5718 {
5719 5719 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5720 5720 ixgbe_t *ixgbe = rx_ring->ixgbe;
5721 5721 int r_idx = rx_ring->index;
5722 5722 int hw_r_idx = rx_ring->hw_index;
5723 5723 int v_idx = rx_ring->intr_vector;
5724 5724
5725 5725 mutex_enter(&ixgbe->gen_lock);
5726 5726 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5727 5727 mutex_exit(&ixgbe->gen_lock);
5728 5728 /*
5729 5729 * Simply return 0.
5730 5730 * In the rare case where an interrupt is being
5731 5731 * disabled while interrupts are being adjusted,
5732 5732 * we don't fail the operation. No interrupts will
5733 5733 * be generated while they are adjusted, and
5734 5734 * ixgbe_intr_adjust() will cause the interrupts
5735 5735 * to be re-enabled once it completes. Note that
5736 5736 * in this case, packets may be delivered to the
5737 5737 * stack via interrupts before xgbe_rx_ring_intr_enable()
5738 5738 * is called again. This is acceptable since interrupt
5739 5739 * adjustment is infrequent, and the stack will be
5740 5740 * able to handle these packets.
5741 5741 */
5742 5742 return (0);
5743 5743 }
5744 5744
5745 5745 /*
5746 5746 * To disable interrupt by clearing the VAL bit of given interrupt
5747 5747 * vector allocation register (IVAR).
5748 5748 */
5749 5749 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5750 5750
5751 5751 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5752 5752
5753 5753 mutex_exit(&ixgbe->gen_lock);
5754 5754
5755 5755 return (0);
5756 5756 }
5757 5757
5758 5758 /*
5759 5759 * Add a mac address.
5760 5760 */
5761 5761 static int
5762 5762 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5763 5763 {
5764 5764 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5765 5765 ixgbe_t *ixgbe = rx_group->ixgbe;
5766 5766 struct ixgbe_hw *hw = &ixgbe->hw;
5767 5767 int slot, i;
5768 5768
5769 5769 mutex_enter(&ixgbe->gen_lock);
5770 5770
5771 5771 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5772 5772 mutex_exit(&ixgbe->gen_lock);
5773 5773 return (ECANCELED);
5774 5774 }
5775 5775
5776 5776 if (ixgbe->unicst_avail == 0) {
5777 5777 /* no slots available */
5778 5778 mutex_exit(&ixgbe->gen_lock);
5779 5779 return (ENOSPC);
5780 5780 }
5781 5781
5782 5782 /*
5783 5783 * The first ixgbe->num_rx_groups slots are reserved for each respective
5784 5784 * group. The rest slots are shared by all groups. While adding a
5785 5785 * MAC address, reserved slots are firstly checked then the shared
5786 5786 * slots are searched.
5787 5787 */
5788 5788 slot = -1;
5789 5789 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5790 5790 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5791 5791 if (ixgbe->unicst_addr[i].mac.set == 0) {
5792 5792 slot = i;
5793 5793 break;
5794 5794 }
5795 5795 }
5796 5796 } else {
5797 5797 slot = rx_group->index;
5798 5798 }
5799 5799
5800 5800 if (slot == -1) {
5801 5801 /* no slots available */
5802 5802 mutex_exit(&ixgbe->gen_lock);
5803 5803 return (ENOSPC);
5804 5804 }
5805 5805
5806 5806 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5807 5807 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5808 5808 rx_group->index, IXGBE_RAH_AV);
5809 5809 ixgbe->unicst_addr[slot].mac.set = 1;
5810 5810 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5811 5811 ixgbe->unicst_avail--;
5812 5812
5813 5813 mutex_exit(&ixgbe->gen_lock);
5814 5814
5815 5815 return (0);
5816 5816 }
5817 5817
5818 5818 /*
5819 5819 * Remove a mac address.
5820 5820 */
5821 5821 static int
5822 5822 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5823 5823 {
5824 5824 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5825 5825 ixgbe_t *ixgbe = rx_group->ixgbe;
5826 5826 struct ixgbe_hw *hw = &ixgbe->hw;
5827 5827 int slot;
5828 5828
5829 5829 mutex_enter(&ixgbe->gen_lock);
5830 5830
5831 5831 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5832 5832 mutex_exit(&ixgbe->gen_lock);
5833 5833 return (ECANCELED);
5834 5834 }
5835 5835
5836 5836 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5837 5837 if (slot == -1) {
5838 5838 mutex_exit(&ixgbe->gen_lock);
5839 5839 return (EINVAL);
5840 5840 }
5841 5841
5842 5842 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5843 5843 mutex_exit(&ixgbe->gen_lock);
5844 5844 return (EINVAL);
5845 5845 }
5846 5846
5847 5847 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5848 5848 (void) ixgbe_clear_rar(hw, slot);
5849 5849 ixgbe->unicst_addr[slot].mac.set = 0;
5850 5850 ixgbe->unicst_avail++;
5851 5851
5852 5852 mutex_exit(&ixgbe->gen_lock);
5853 5853
5854 5854 return (0);
5855 5855 }
↓ open down ↓ |
2137 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX