Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nxge/nxge_hio.c
+++ new/usr/src/uts/common/io/nxge/nxge_hio.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * nxge_hio.c
29 29 *
30 30 * This file manages the virtualization resources for Neptune
31 31 * devices. That is, it implements a hybrid I/O (HIO) approach in the
32 32 * Solaris kernel, whereby a guest domain on an LDOMs server may
33 33 * request & use hardware resources from the service domain.
34 34 *
35 35 */
36 36
37 37 #include <sys/mac_provider.h>
38 38 #include <sys/nxge/nxge_impl.h>
39 39 #include <sys/nxge/nxge_fzc.h>
40 40 #include <sys/nxge/nxge_rxdma.h>
41 41 #include <sys/nxge/nxge_txdma.h>
42 42 #include <sys/nxge/nxge_hio.h>
43 43
44 44 /*
45 45 * External prototypes
46 46 */
47 47 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
48 48
49 49 /* The following function may be found in nxge_main.c */
50 50 extern int nxge_m_mmac_remove(void *arg, int slot);
51 51 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
52 52 boolean_t usetbl);
53 53 extern int nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num);
54 54
55 55 /* The following function may be found in nxge_[t|r]xdma.c */
56 56 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
57 57 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
58 58
59 59 /*
60 60 * Local prototypes
61 61 */
62 62 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
63 63 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
64 64 static void nxge_grp_dc_map(nxge_grp_t *group);
65 65
66 66 /*
67 67 * These functions are used by both service & guest domains to
68 68 * decide whether they're running in an LDOMs/XEN environment
69 69 * or not. If so, then the Hybrid I/O (HIO) module is initialized.
70 70 */
71 71
72 72 /*
73 73 * nxge_get_environs
74 74 *
75 75 * Figure out if we are in a guest domain or not.
76 76 *
77 77 * Arguments:
78 78 * nxge
79 79 *
80 80 * Notes:
81 81 *
82 82 * Context:
83 83 * Any domain
84 84 */
85 85 void
86 86 nxge_get_environs(
87 87 nxge_t *nxge)
88 88 {
89 89 char *string;
90 90
91 91 /*
92 92 * In the beginning, assume that we are running sans LDOMs/XEN.
93 93 */
94 94 nxge->environs = SOLARIS_DOMAIN;
95 95
96 96 /*
97 97 * Are we a hybrid I/O (HIO) guest domain driver?
98 98 */
99 99 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
100 100 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
101 101 "niutype", &string)) == DDI_PROP_SUCCESS) {
102 102 if (strcmp(string, "n2niu") == 0) {
103 103 nxge->environs = SOLARIS_GUEST_DOMAIN;
104 104 /* So we can allocate properly-aligned memory. */
105 105 nxge->niu_type = N2_NIU;
106 106 NXGE_DEBUG_MSG((nxge, HIO_CTL,
107 107 "Hybrid IO-capable guest domain"));
108 108 }
109 109 ddi_prop_free(string);
110 110 }
111 111 }
112 112
113 113 #if !defined(sun4v)
114 114
115 115 /*
116 116 * nxge_hio_init
117 117 *
118 118 * Initialize the HIO module of the NXGE driver.
119 119 *
120 120 * Arguments:
121 121 * nxge
122 122 *
123 123 * Notes:
124 124 * This is the non-hybrid I/O version of this function.
125 125 *
126 126 * Context:
127 127 * Any domain
128 128 */
129 129 int
130 130 nxge_hio_init(nxge_t *nxge)
131 131 {
132 132 nxge_hio_data_t *nhd;
133 133 int i;
134 134
135 135 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
136 136 if (nhd == NULL) {
137 137 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
138 138 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
139 139 nhd->type = NXGE_HIO_TYPE_SERVICE;
140 140 nxge->nxge_hw_p->hio = (uintptr_t)nhd;
141 141 }
142 142
143 143 /*
144 144 * Initialize share and ring group structures.
145 145 */
146 146 for (i = 0; i < NXGE_MAX_TDCS; i++)
147 147 nxge->tdc_is_shared[i] = B_FALSE;
148 148
149 149 for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
150 150 nxge->tx_hio_groups[i].ghandle = NULL;
151 151 nxge->tx_hio_groups[i].nxgep = nxge;
152 152 nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
153 153 nxge->tx_hio_groups[i].gindex = 0;
154 154 nxge->tx_hio_groups[i].sindex = 0;
155 155 }
156 156
157 157 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
158 158 nxge->rx_hio_groups[i].ghandle = NULL;
159 159 nxge->rx_hio_groups[i].nxgep = nxge;
160 160 nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
161 161 nxge->rx_hio_groups[i].gindex = 0;
162 162 nxge->rx_hio_groups[i].sindex = 0;
163 163 nxge->rx_hio_groups[i].started = B_FALSE;
164 164 nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
165 165 nxge->rx_hio_groups[i].rdctbl = -1;
166 166 nxge->rx_hio_groups[i].n_mac_addrs = 0;
167 167 }
168 168
169 169 nhd->hio.ldoms = B_FALSE;
170 170
171 171 return (NXGE_OK);
172 172 }
173 173
174 174 #endif
175 175
176 176 void
177 177 nxge_hio_uninit(nxge_t *nxge)
178 178 {
179 179 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
180 180
181 181 ASSERT(nxge->nxge_hw_p->ndevs == 0);
182 182
183 183 if (nhd != NULL) {
184 184 MUTEX_DESTROY(&nhd->lock);
185 185 KMEM_FREE(nhd, sizeof (*nhd));
186 186 nxge->nxge_hw_p->hio = 0;
187 187 }
188 188 }
189 189
190 190 /*
191 191 * nxge_dci_map
192 192 *
193 193 * Map a DMA channel index to a channel number.
194 194 *
195 195 * Arguments:
196 196 * instance The instance number of the driver.
197 197 * type The type of channel this is: Tx or Rx.
198 198 * index The index to convert to a channel number
199 199 *
200 200 * Notes:
201 201 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
202 202 *
203 203 * Context:
204 204 * Any domain
205 205 */
206 206 int
207 207 nxge_dci_map(
208 208 nxge_t *nxge,
209 209 vpc_type_t type,
210 210 int index)
211 211 {
212 212 nxge_grp_set_t *set;
213 213 int dc;
214 214
215 215 switch (type) {
216 216 case VP_BOUND_TX:
217 217 set = &nxge->tx_set;
218 218 break;
219 219 case VP_BOUND_RX:
220 220 set = &nxge->rx_set;
221 221 break;
222 222 }
223 223
224 224 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
225 225 if ((1 << dc) & set->owned.map) {
226 226 if (index == 0)
227 227 return (dc);
228 228 else
229 229 index--;
230 230 }
231 231 }
232 232
233 233 return (-1);
234 234 }
235 235
236 236 /*
237 237 * ---------------------------------------------------------------------
238 238 * These are the general-purpose DMA channel group functions. That is,
239 239 * these functions are used to manage groups of TDCs or RDCs in an HIO
240 240 * environment.
241 241 *
242 242 * But is also expected that in the future they will be able to manage
243 243 * Crossbow groups.
244 244 * ---------------------------------------------------------------------
245 245 */
246 246
247 247 /*
248 248 * nxge_grp_cleanup(p_nxge_t nxge)
249 249 *
250 250 * Remove all outstanding groups.
251 251 *
252 252 * Arguments:
253 253 * nxge
254 254 */
255 255 void
256 256 nxge_grp_cleanup(p_nxge_t nxge)
257 257 {
258 258 nxge_grp_set_t *set;
259 259 int i;
260 260
261 261 MUTEX_ENTER(&nxge->group_lock);
262 262
263 263 /*
264 264 * Find RX groups that need to be cleaned up.
265 265 */
266 266 set = &nxge->rx_set;
267 267 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
268 268 if (set->group[i] != NULL) {
269 269 KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
270 270 set->group[i] = NULL;
271 271 }
272 272 }
273 273
274 274 /*
275 275 * Find TX groups that need to be cleaned up.
276 276 */
277 277 set = &nxge->tx_set;
278 278 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
279 279 if (set->group[i] != NULL) {
280 280 KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
281 281 set->group[i] = NULL;
282 282 }
283 283 }
284 284 MUTEX_EXIT(&nxge->group_lock);
285 285 }
286 286
287 287
288 288 /*
289 289 * nxge_grp_add
290 290 *
291 291 * Add a group to an instance of NXGE.
292 292 *
293 293 * Arguments:
294 294 * nxge
295 295 * type Tx or Rx
296 296 *
297 297 * Notes:
298 298 *
299 299 * Context:
300 300 * Any domain
301 301 */
302 302 nxge_grp_t *
303 303 nxge_grp_add(
304 304 nxge_t *nxge,
305 305 nxge_grp_type_t type)
306 306 {
307 307 nxge_grp_set_t *set;
308 308 nxge_grp_t *group;
309 309 int i;
310 310
311 311 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
312 312 group->nxge = nxge;
313 313
314 314 MUTEX_ENTER(&nxge->group_lock);
315 315 switch (type) {
316 316 case NXGE_TRANSMIT_GROUP:
317 317 case EXT_TRANSMIT_GROUP:
318 318 set = &nxge->tx_set;
319 319 break;
320 320 default:
321 321 set = &nxge->rx_set;
322 322 break;
323 323 }
324 324
325 325 group->type = type;
326 326 group->active = B_TRUE;
327 327 group->sequence = set->sequence++;
328 328
329 329 /* Find an empty slot for this logical group. */
330 330 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
331 331 if (set->group[i] == 0) {
332 332 group->index = i;
333 333 set->group[i] = group;
334 334 NXGE_DC_SET(set->lg.map, i);
335 335 set->lg.count++;
336 336 break;
337 337 }
338 338 }
339 339 MUTEX_EXIT(&nxge->group_lock);
340 340
341 341 NXGE_DEBUG_MSG((nxge, HIO_CTL,
342 342 "nxge_grp_add: %cgroup = %d.%d",
343 343 type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
344 344 nxge->mac.portnum, group->sequence));
345 345
346 346 return (group);
347 347 }
348 348
349 349 void
350 350 nxge_grp_remove(
351 351 nxge_t *nxge,
352 352 nxge_grp_t *group) /* The group to remove. */
353 353 {
354 354 nxge_grp_set_t *set;
355 355 vpc_type_t type;
356 356
357 357 if (group == NULL)
358 358 return;
359 359
360 360 MUTEX_ENTER(&nxge->group_lock);
361 361 switch (group->type) {
362 362 case NXGE_TRANSMIT_GROUP:
363 363 case EXT_TRANSMIT_GROUP:
364 364 set = &nxge->tx_set;
365 365 break;
366 366 default:
367 367 set = &nxge->rx_set;
368 368 break;
369 369 }
370 370
371 371 if (set->group[group->index] != group) {
372 372 MUTEX_EXIT(&nxge->group_lock);
373 373 return;
374 374 }
375 375
376 376 set->group[group->index] = 0;
377 377 NXGE_DC_RESET(set->lg.map, group->index);
378 378 set->lg.count--;
379 379
380 380 /* While inside the mutex, deactivate <group>. */
381 381 group->active = B_FALSE;
382 382
383 383 MUTEX_EXIT(&nxge->group_lock);
384 384
385 385 NXGE_DEBUG_MSG((nxge, HIO_CTL,
386 386 "nxge_grp_remove(%c.%d.%d) called",
387 387 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
388 388 nxge->mac.portnum, group->sequence));
389 389
390 390 /* Now, remove any DCs which are still active. */
391 391 switch (group->type) {
392 392 default:
393 393 type = VP_BOUND_TX;
394 394 break;
395 395 case NXGE_RECEIVE_GROUP:
396 396 case EXT_RECEIVE_GROUP:
397 397 type = VP_BOUND_RX;
398 398 }
399 399
400 400 while (group->dc) {
401 401 nxge_grp_dc_remove(nxge, type, group->dc->channel);
402 402 }
403 403
404 404 KMEM_FREE(group, sizeof (*group));
405 405 }
406 406
407 407 /*
408 408 * nxge_grp_dc_add
409 409 *
410 410 * Add a DMA channel to a VR/Group.
411 411 *
412 412 * Arguments:
413 413 * nxge
414 414 * channel The channel to add.
415 415 * Notes:
416 416 *
417 417 * Context:
418 418 * Any domain
419 419 */
420 420 /* ARGSUSED */
421 421 int
422 422 nxge_grp_dc_add(
423 423 nxge_t *nxge,
424 424 nxge_grp_t *group, /* The group to add <channel> to. */
425 425 vpc_type_t type, /* Rx or Tx */
426 426 int channel) /* A physical/logical channel number */
427 427 {
428 428 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
429 429 nxge_hio_dc_t *dc;
430 430 nxge_grp_set_t *set;
431 431 nxge_status_t status = NXGE_OK;
432 432 int error = 0;
433 433
434 434 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
435 435
436 436 if (group == 0)
437 437 return (0);
438 438
439 439 switch (type) {
440 440 case VP_BOUND_TX:
441 441 set = &nxge->tx_set;
442 442 if (channel > NXGE_MAX_TDCS) {
443 443 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
444 444 "nxge_grp_dc_add: TDC = %d", channel));
445 445 return (NXGE_ERROR);
446 446 }
447 447 break;
448 448 case VP_BOUND_RX:
449 449 set = &nxge->rx_set;
450 450 if (channel > NXGE_MAX_RDCS) {
451 451 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
452 452 "nxge_grp_dc_add: RDC = %d", channel));
453 453 return (NXGE_ERROR);
454 454 }
455 455 break;
456 456
457 457 default:
458 458 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
459 459 "nxge_grp_dc_add: unknown type channel(%d)", channel));
460 460 }
461 461
462 462 NXGE_DEBUG_MSG((nxge, HIO_CTL,
463 463 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
464 464 type == VP_BOUND_TX ? 't' : 'r',
465 465 nxge->mac.portnum, group->sequence, group->count, channel));
466 466
467 467 MUTEX_ENTER(&nxge->group_lock);
468 468 if (group->active != B_TRUE) {
469 469 /* We may be in the process of removing this group. */
470 470 MUTEX_EXIT(&nxge->group_lock);
471 471 return (NXGE_ERROR);
472 472 }
473 473 MUTEX_EXIT(&nxge->group_lock);
474 474
475 475 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
476 476 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
477 477 "nxge_grp_dc_add(%d): DC FIND failed", channel));
478 478 return (NXGE_ERROR);
479 479 }
480 480
481 481 MUTEX_ENTER(&nhd->lock);
482 482
483 483 if (dc->group) {
484 484 MUTEX_EXIT(&nhd->lock);
485 485 /* This channel is already in use! */
486 486 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
487 487 "nxge_grp_dc_add(%d): channel already in group", channel));
488 488 return (NXGE_ERROR);
489 489 }
490 490
491 491 dc->next = 0;
492 492 dc->page = channel;
493 493 dc->channel = (nxge_channel_t)channel;
494 494
495 495 dc->type = type;
496 496 if (type == VP_BOUND_RX) {
497 497 dc->init = nxge_init_rxdma_channel;
498 498 dc->uninit = nxge_uninit_rxdma_channel;
499 499 } else {
500 500 dc->init = nxge_init_txdma_channel;
501 501 dc->uninit = nxge_uninit_txdma_channel;
502 502 }
503 503
504 504 dc->group = group;
505 505
506 506 if (isLDOMguest(nxge)) {
507 507 error = nxge_hio_ldsv_add(nxge, dc);
508 508 if (error != 0) {
509 509 MUTEX_EXIT(&nhd->lock);
510 510 return (NXGE_ERROR);
511 511 }
512 512 }
513 513
514 514 NXGE_DC_SET(set->owned.map, channel);
515 515 set->owned.count++;
516 516
517 517 MUTEX_EXIT(&nhd->lock);
518 518
519 519 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
520 520 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
521 521 "nxge_grp_dc_add(%d): channel init failed", channel));
522 522 MUTEX_ENTER(&nhd->lock);
523 523 (void) memset(dc, 0, sizeof (*dc));
524 524 NXGE_DC_RESET(set->owned.map, channel);
525 525 set->owned.count--;
526 526 MUTEX_EXIT(&nhd->lock);
527 527 return (NXGE_ERROR);
528 528 }
529 529
530 530 nxge_grp_dc_append(nxge, group, dc);
531 531
532 532 if (type == VP_BOUND_TX) {
533 533 MUTEX_ENTER(&nhd->lock);
534 534 nxge->tdc_is_shared[channel] = B_FALSE;
535 535 MUTEX_EXIT(&nhd->lock);
536 536 }
537 537
538 538 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
539 539
540 540 return ((int)status);
541 541 }
542 542
543 543 void
544 544 nxge_grp_dc_remove(
545 545 nxge_t *nxge,
546 546 vpc_type_t type,
547 547 int channel)
548 548 {
549 549 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
550 550 nxge_hio_dc_t *dc;
551 551 nxge_grp_set_t *set;
552 552 nxge_grp_t *group;
553 553
554 554 dc_uninit_t uninit;
555 555
556 556 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
557 557
558 558 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
559 559 goto nxge_grp_dc_remove_exit;
560 560
561 561 if ((dc->group == NULL) && (dc->next == 0) &&
562 562 (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
563 563 goto nxge_grp_dc_remove_exit;
564 564 }
565 565
566 566 group = (nxge_grp_t *)dc->group;
567 567
568 568 if (isLDOMguest(nxge)) {
569 569 (void) nxge_hio_intr_remove(nxge, type, channel);
570 570 }
571 571
572 572 NXGE_DEBUG_MSG((nxge, HIO_CTL,
573 573 "DC remove: group = %d.%d.%d, %cdc %d",
574 574 nxge->mac.portnum, group->sequence, group->count,
575 575 type == VP_BOUND_TX ? 't' : 'r', dc->channel));
576 576
577 577 MUTEX_ENTER(&nhd->lock);
578 578
579 579 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
580 580
581 581 /* Remove the DC from its group. */
582 582 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
583 583 MUTEX_EXIT(&nhd->lock);
584 584 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
585 585 "nxge_grp_dc_remove(%d) failed", channel));
586 586 goto nxge_grp_dc_remove_exit;
587 587 }
588 588
589 589 uninit = dc->uninit;
590 590 channel = dc->channel;
591 591
592 592 NXGE_DC_RESET(set->owned.map, channel);
593 593 set->owned.count--;
594 594
595 595 (void) memset(dc, 0, sizeof (*dc));
596 596
597 597 MUTEX_EXIT(&nhd->lock);
598 598
599 599 (*uninit)(nxge, channel);
600 600
601 601 nxge_grp_dc_remove_exit:
602 602 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
603 603 }
604 604
605 605 nxge_hio_dc_t *
606 606 nxge_grp_dc_find(
607 607 nxge_t *nxge,
608 608 vpc_type_t type, /* Rx or Tx */
609 609 int channel)
610 610 {
611 611 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
612 612 nxge_hio_dc_t *current;
613 613
614 614 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
615 615
616 616 if (!isLDOMguest(nxge)) {
617 617 return (¤t[channel]);
618 618 } else {
619 619 /* We're in a guest domain. */
620 620 int i, limit = (type == VP_BOUND_TX) ?
621 621 NXGE_MAX_TDCS : NXGE_MAX_RDCS;
622 622
623 623 MUTEX_ENTER(&nhd->lock);
624 624 for (i = 0; i < limit; i++, current++) {
625 625 if (current->channel == channel) {
626 626 if (current->vr && current->vr->nxge ==
627 627 (uintptr_t)nxge) {
628 628 MUTEX_EXIT(&nhd->lock);
629 629 return (current);
630 630 }
631 631 }
632 632 }
633 633 MUTEX_EXIT(&nhd->lock);
634 634 }
635 635
636 636 return (0);
637 637 }
638 638
639 639 /*
640 640 * nxge_grp_dc_append
641 641 *
642 642 * Append a DMA channel to a group.
643 643 *
644 644 * Arguments:
645 645 * nxge
646 646 * group The group to append to
647 647 * dc The DMA channel to append
648 648 *
649 649 * Notes:
650 650 *
651 651 * Context:
652 652 * Any domain
653 653 */
654 654 static
655 655 void
656 656 nxge_grp_dc_append(
657 657 nxge_t *nxge,
658 658 nxge_grp_t *group,
659 659 nxge_hio_dc_t *dc)
660 660 {
661 661 MUTEX_ENTER(&nxge->group_lock);
662 662
663 663 if (group->dc == 0) {
664 664 group->dc = dc;
665 665 } else {
666 666 nxge_hio_dc_t *current = group->dc;
667 667 do {
668 668 if (current->next == 0) {
669 669 current->next = dc;
670 670 break;
671 671 }
672 672 current = current->next;
673 673 } while (current);
674 674 }
675 675
676 676 NXGE_DC_SET(group->map, dc->channel);
677 677
678 678 nxge_grp_dc_map(group);
679 679 group->count++;
680 680
681 681 MUTEX_EXIT(&nxge->group_lock);
682 682 }
683 683
684 684 /*
685 685 * nxge_grp_dc_unlink
686 686 *
687 687 * Unlink a DMA channel fromits linked list (group).
688 688 *
689 689 * Arguments:
690 690 * nxge
691 691 * group The group (linked list) to unlink from
692 692 * dc The DMA channel to append
693 693 *
694 694 * Notes:
695 695 *
696 696 * Context:
697 697 * Any domain
698 698 */
699 699 nxge_hio_dc_t *
700 700 nxge_grp_dc_unlink(
701 701 nxge_t *nxge,
702 702 nxge_grp_t *group,
703 703 int channel)
704 704 {
705 705 nxge_hio_dc_t *current, *previous;
706 706
707 707 MUTEX_ENTER(&nxge->group_lock);
708 708
709 709 if (group == NULL) {
710 710 MUTEX_EXIT(&nxge->group_lock);
711 711 return (0);
712 712 }
713 713
714 714 if ((current = group->dc) == 0) {
715 715 MUTEX_EXIT(&nxge->group_lock);
716 716 return (0);
717 717 }
718 718
719 719 previous = 0;
720 720 do {
721 721 if (current->channel == channel) {
722 722 if (previous)
723 723 previous->next = current->next;
724 724 else
725 725 group->dc = current->next;
726 726 break;
727 727 }
728 728 previous = current;
729 729 current = current->next;
730 730 } while (current);
731 731
732 732 if (current == 0) {
733 733 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
734 734 "DC unlink: DC %d not found", channel));
735 735 } else {
736 736 current->next = 0;
737 737 current->group = 0;
738 738
739 739 NXGE_DC_RESET(group->map, channel);
740 740 group->count--;
741 741 }
742 742
743 743 nxge_grp_dc_map(group);
744 744
745 745 MUTEX_EXIT(&nxge->group_lock);
746 746
747 747 return (current);
748 748 }
749 749
750 750 /*
751 751 * nxge_grp_dc_map
752 752 *
753 753 * Map a linked list to an array of channel numbers.
754 754 *
755 755 * Arguments:
756 756 * nxge
757 757 * group The group to remap.
758 758 *
759 759 * Notes:
760 760 * It is expected that the caller will hold the correct mutex.
761 761 *
762 762 * Context:
763 763 * Service domain
764 764 */
765 765 void
766 766 nxge_grp_dc_map(
767 767 nxge_grp_t *group)
768 768 {
769 769 nxge_channel_t *legend;
770 770 nxge_hio_dc_t *dc;
771 771
772 772 (void) memset(group->legend, 0, sizeof (group->legend));
773 773
774 774 legend = group->legend;
775 775 dc = group->dc;
776 776 while (dc) {
777 777 *legend = dc->channel;
778 778 legend++;
779 779 dc = dc->next;
780 780 }
781 781 }
782 782
783 783 /*
784 784 * ---------------------------------------------------------------------
785 785 * These are HIO debugging functions.
786 786 * ---------------------------------------------------------------------
787 787 */
788 788
789 789 /*
790 790 * nxge_delay
791 791 *
792 792 * Delay <seconds> number of seconds.
793 793 *
794 794 * Arguments:
795 795 * nxge
796 796 * group The group to append to
797 797 * dc The DMA channel to append
798 798 *
↓ open down ↓ |
798 lines elided |
↑ open up ↑ |
799 799 * Notes:
800 800 * This is a developer-only function.
801 801 *
802 802 * Context:
803 803 * Any domain
804 804 */
805 805 void
806 806 nxge_delay(
807 807 int seconds)
808 808 {
809 - delay(drv_usectohz(seconds * 1000000));
809 + delay(drv_sectohz(seconds));
810 810 }
811 811
812 812 static dmc_reg_name_t rx_names[] = {
813 813 { "RXDMA_CFIG1", 0 },
814 814 { "RXDMA_CFIG2", 8 },
815 815 { "RBR_CFIG_A", 0x10 },
816 816 { "RBR_CFIG_B", 0x18 },
817 817 { "RBR_KICK", 0x20 },
818 818 { "RBR_STAT", 0x28 },
819 819 { "RBR_HDH", 0x30 },
820 820 { "RBR_HDL", 0x38 },
821 821 { "RCRCFIG_A", 0x40 },
822 822 { "RCRCFIG_B", 0x48 },
823 823 { "RCRSTAT_A", 0x50 },
824 824 { "RCRSTAT_B", 0x58 },
825 825 { "RCRSTAT_C", 0x60 },
826 826 { "RX_DMA_ENT_MSK", 0x68 },
827 827 { "RX_DMA_CTL_STAT", 0x70 },
828 828 { "RCR_FLSH", 0x78 },
829 829 { "RXMISC", 0x90 },
830 830 { "RX_DMA_CTL_STAT_DBG", 0x98 },
831 831 { 0, -1 }
832 832 };
833 833
834 834 static dmc_reg_name_t tx_names[] = {
835 835 { "Tx_RNG_CFIG", 0 },
836 836 { "Tx_RNG_HDL", 0x10 },
837 837 { "Tx_RNG_KICK", 0x18 },
838 838 { "Tx_ENT_MASK", 0x20 },
839 839 { "Tx_CS", 0x28 },
840 840 { "TxDMA_MBH", 0x30 },
841 841 { "TxDMA_MBL", 0x38 },
842 842 { "TxDMA_PRE_ST", 0x40 },
843 843 { "Tx_RNG_ERR_LOGH", 0x48 },
844 844 { "Tx_RNG_ERR_LOGL", 0x50 },
845 845 { "TDMC_INTR_DBG", 0x60 },
846 846 { "Tx_CS_DBG", 0x68 },
847 847 { 0, -1 }
848 848 };
849 849
850 850 /*
851 851 * nxge_xx2str
852 852 *
853 853 * Translate a register address into a string.
854 854 *
855 855 * Arguments:
856 856 * offset The address of the register to translate.
857 857 *
858 858 * Notes:
859 859 * These are developer-only function.
860 860 *
861 861 * Context:
862 862 * Any domain
863 863 */
864 864 const char *
865 865 nxge_rx2str(
866 866 int offset)
867 867 {
868 868 dmc_reg_name_t *reg = &rx_names[0];
869 869
870 870 offset &= DMA_CSR_MASK;
871 871
872 872 while (reg->name) {
873 873 if (offset == reg->offset)
874 874 return (reg->name);
875 875 reg++;
876 876 }
877 877
878 878 return (0);
879 879 }
880 880
881 881 const char *
882 882 nxge_tx2str(
883 883 int offset)
884 884 {
885 885 dmc_reg_name_t *reg = &tx_names[0];
886 886
887 887 offset &= DMA_CSR_MASK;
888 888
889 889 while (reg->name) {
890 890 if (offset == reg->offset)
891 891 return (reg->name);
892 892 reg++;
893 893 }
894 894
895 895 return (0);
896 896 }
897 897
898 898 /*
899 899 * nxge_ddi_perror
900 900 *
901 901 * Map a DDI error number to a string.
902 902 *
903 903 * Arguments:
904 904 * ddi_error The DDI error number to map.
905 905 *
906 906 * Notes:
907 907 *
908 908 * Context:
909 909 * Any domain
910 910 */
911 911 const char *
912 912 nxge_ddi_perror(
913 913 int ddi_error)
914 914 {
915 915 switch (ddi_error) {
916 916 case DDI_SUCCESS:
917 917 return ("DDI_SUCCESS");
918 918 case DDI_FAILURE:
919 919 return ("DDI_FAILURE");
920 920 case DDI_NOT_WELL_FORMED:
921 921 return ("DDI_NOT_WELL_FORMED");
922 922 case DDI_EAGAIN:
923 923 return ("DDI_EAGAIN");
924 924 case DDI_EINVAL:
925 925 return ("DDI_EINVAL");
926 926 case DDI_ENOTSUP:
927 927 return ("DDI_ENOTSUP");
928 928 case DDI_EPENDING:
929 929 return ("DDI_EPENDING");
930 930 case DDI_ENOMEM:
931 931 return ("DDI_ENOMEM");
932 932 case DDI_EBUSY:
933 933 return ("DDI_EBUSY");
934 934 case DDI_ETRANSPORT:
935 935 return ("DDI_ETRANSPORT");
936 936 case DDI_ECONTEXT:
937 937 return ("DDI_ECONTEXT");
938 938 default:
939 939 return ("Unknown error");
940 940 }
941 941 }
942 942
943 943 /*
944 944 * ---------------------------------------------------------------------
945 945 * These are Sun4v HIO function definitions
946 946 * ---------------------------------------------------------------------
947 947 */
948 948
949 949 #if defined(sun4v)
950 950
951 951 /*
952 952 * Local prototypes
953 953 */
954 954 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
955 955 static void nxge_hio_unshare(nxge_hio_vr_t *);
956 956
957 957 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
958 958 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
959 959
960 960 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
961 961 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
962 962 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
963 963 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
964 964 mac_ring_type_t, int);
965 965
966 966 /*
967 967 * nxge_hio_init
968 968 *
969 969 * Initialize the HIO module of the NXGE driver.
970 970 *
971 971 * Arguments:
972 972 * nxge
973 973 *
974 974 * Notes:
975 975 *
976 976 * Context:
977 977 * Any domain
978 978 */
979 979 int
980 980 nxge_hio_init(nxge_t *nxge)
981 981 {
982 982 nxge_hio_data_t *nhd;
983 983 int i, region;
984 984
985 985 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
986 986 if (nhd == 0) {
987 987 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
988 988 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
989 989 if (isLDOMguest(nxge))
990 990 nhd->type = NXGE_HIO_TYPE_GUEST;
991 991 else
992 992 nhd->type = NXGE_HIO_TYPE_SERVICE;
993 993 nxge->nxge_hw_p->hio = (uintptr_t)nhd;
994 994 }
995 995
996 996 if ((nxge->environs == SOLARIS_DOMAIN) &&
997 997 (nxge->niu_type == N2_NIU)) {
998 998 if (nxge->niu_hsvc_available == B_TRUE) {
999 999 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
1000 1000 /*
1001 1001 * Versions supported now are:
1002 1002 * - major number >= 1 (NIU_MAJOR_VER).
1003 1003 */
1004 1004 if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) ||
1005 1005 (niu_hsvc->hsvc_major == 1 &&
1006 1006 niu_hsvc->hsvc_minor == 1)) {
1007 1007 nxge->environs = SOLARIS_SERVICE_DOMAIN;
1008 1008 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1009 1009 "nxge_hio_init: hypervisor services "
1010 1010 "version %d.%d",
1011 1011 niu_hsvc->hsvc_major,
1012 1012 niu_hsvc->hsvc_minor));
1013 1013 }
1014 1014 }
1015 1015 }
1016 1016
1017 1017 /*
1018 1018 * Initialize share and ring group structures.
1019 1019 */
1020 1020 for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1021 1021 nxge->tx_hio_groups[i].ghandle = NULL;
1022 1022 nxge->tx_hio_groups[i].nxgep = nxge;
1023 1023 nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1024 1024 nxge->tx_hio_groups[i].gindex = 0;
1025 1025 nxge->tx_hio_groups[i].sindex = 0;
1026 1026 }
1027 1027
1028 1028 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1029 1029 nxge->rx_hio_groups[i].ghandle = NULL;
1030 1030 nxge->rx_hio_groups[i].nxgep = nxge;
1031 1031 nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1032 1032 nxge->rx_hio_groups[i].gindex = 0;
1033 1033 nxge->rx_hio_groups[i].sindex = 0;
1034 1034 nxge->rx_hio_groups[i].started = B_FALSE;
1035 1035 nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1036 1036 nxge->rx_hio_groups[i].rdctbl = -1;
1037 1037 nxge->rx_hio_groups[i].n_mac_addrs = 0;
1038 1038 }
1039 1039
1040 1040 if (!isLDOMs(nxge)) {
1041 1041 nhd->hio.ldoms = B_FALSE;
1042 1042 return (NXGE_OK);
1043 1043 }
1044 1044
1045 1045 nhd->hio.ldoms = B_TRUE;
1046 1046
1047 1047 /*
1048 1048 * Fill in what we can.
1049 1049 */
1050 1050 for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1051 1051 nhd->vr[region].region = region;
1052 1052 }
1053 1053 nhd->vrs = NXGE_VR_SR_MAX - 2;
1054 1054
1055 1055 /*
1056 1056 * Initialize the share stuctures.
1057 1057 */
1058 1058 for (i = 0; i < NXGE_MAX_TDCS; i++)
1059 1059 nxge->tdc_is_shared[i] = B_FALSE;
1060 1060
1061 1061 for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1062 1062 nxge->shares[i].nxgep = nxge;
1063 1063 nxge->shares[i].index = 0;
1064 1064 nxge->shares[i].vrp = NULL;
1065 1065 nxge->shares[i].tmap = 0;
1066 1066 nxge->shares[i].rmap = 0;
1067 1067 nxge->shares[i].rxgroup = 0;
1068 1068 nxge->shares[i].active = B_FALSE;
1069 1069 }
1070 1070
1071 1071 /* Fill in the HV HIO function pointers. */
1072 1072 nxge_hio_hv_init(nxge);
1073 1073
1074 1074 if (isLDOMservice(nxge)) {
1075 1075 NXGE_DEBUG_MSG((nxge, HIO_CTL,
1076 1076 "Hybrid IO-capable service domain"));
1077 1077 return (NXGE_OK);
1078 1078 }
1079 1079
1080 1080 return (0);
1081 1081 }
1082 1082 #endif /* defined(sun4v) */
1083 1083
1084 1084 static int
1085 1085 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1086 1086 const uint8_t *macaddr)
1087 1087 {
1088 1088 int rv;
1089 1089 nxge_rdc_grp_t *group;
1090 1090
1091 1091 mutex_enter(nxge->genlock);
1092 1092
1093 1093 /*
1094 1094 * Initialize the NXGE RDC table data structure.
1095 1095 */
1096 1096 group = &nxge->pt_config.rdc_grps[g->rdctbl];
1097 1097 if (!group->flag) {
1098 1098 group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1099 1099 group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1100 1100 group->flag = B_TRUE; /* This group has been configured. */
1101 1101 }
1102 1102
1103 1103 mutex_exit(nxge->genlock);
1104 1104
1105 1105 /*
1106 1106 * Add the MAC address.
1107 1107 */
1108 1108 if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1109 1109 g->rdctbl, B_TRUE)) != 0) {
1110 1110 return (rv);
1111 1111 }
1112 1112
1113 1113 mutex_enter(nxge->genlock);
1114 1114 g->n_mac_addrs++;
1115 1115 mutex_exit(nxge->genlock);
1116 1116 return (0);
1117 1117 }
1118 1118
1119 1119 static int
1120 1120 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1121 1121 {
1122 1122 p_nxge_t nxgep = (p_nxge_t)arg;
1123 1123 struct ether_addr addrp;
1124 1124
1125 1125 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1126 1126 if (nxge_set_mac_addr(nxgep, &addrp)) {
1127 1127 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1128 1128 "<== nxge_m_unicst: set unitcast failed"));
1129 1129 return (EINVAL);
1130 1130 }
1131 1131
1132 1132 nxgep->primary = B_TRUE;
1133 1133
1134 1134 return (0);
1135 1135 }
1136 1136
1137 1137 /*ARGSUSED*/
1138 1138 static int
1139 1139 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1140 1140 {
1141 1141 nxgep->primary = B_FALSE;
1142 1142 return (0);
1143 1143 }
1144 1144
1145 1145 static int
1146 1146 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1147 1147 {
1148 1148 nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1149 1149 p_nxge_t nxge = group->nxgep;
1150 1150 int rv;
1151 1151 nxge_hio_vr_t *vr; /* The Virtualization Region */
1152 1152
1153 1153 ASSERT(group->type == MAC_RING_TYPE_RX);
1154 1154 ASSERT(group->nxgep != NULL);
1155 1155
1156 1156 if (isLDOMguest(group->nxgep))
1157 1157 return (0);
1158 1158
1159 1159 mutex_enter(nxge->genlock);
1160 1160
1161 1161 if (!nxge->primary && group->port_default_grp) {
1162 1162 rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1163 1163 mutex_exit(nxge->genlock);
1164 1164 return (rv);
1165 1165 }
1166 1166
1167 1167 /*
1168 1168 * If the group is associated with a VR, then only one
1169 1169 * address may be assigned to the group.
1170 1170 */
1171 1171 vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1172 1172 if ((vr != NULL) && (group->n_mac_addrs)) {
1173 1173 mutex_exit(nxge->genlock);
1174 1174 return (ENOSPC);
1175 1175 }
1176 1176
1177 1177 mutex_exit(nxge->genlock);
1178 1178
1179 1179 /*
1180 1180 * Program the mac address for the group.
1181 1181 */
1182 1182 if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
1183 1183 return (rv);
1184 1184 }
1185 1185
1186 1186 return (0);
1187 1187 }
1188 1188
1189 1189 static int
1190 1190 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1191 1191 {
1192 1192 int i;
1193 1193 for (i = 0; i <= mmac_info->num_mmac; i++) {
1194 1194 if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1195 1195 ETHERADDRL) == 0) {
1196 1196 return (i);
1197 1197 }
1198 1198 }
1199 1199 return (-1);
1200 1200 }
1201 1201
1202 1202 /* ARGSUSED */
1203 1203 static int
1204 1204 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1205 1205 {
1206 1206 nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1207 1207 struct ether_addr addrp;
1208 1208 p_nxge_t nxge = group->nxgep;
1209 1209 nxge_mmac_t *mmac_info;
1210 1210 int rv, slot;
1211 1211
1212 1212 ASSERT(group->type == MAC_RING_TYPE_RX);
1213 1213 ASSERT(group->nxgep != NULL);
1214 1214
1215 1215 if (isLDOMguest(group->nxgep))
1216 1216 return (0);
1217 1217
1218 1218 mutex_enter(nxge->genlock);
1219 1219
1220 1220 mmac_info = &nxge->nxge_mmac_info;
1221 1221 slot = find_mac_slot(mmac_info, mac_addr);
1222 1222 if (slot < 0) {
1223 1223 if (group->port_default_grp && nxge->primary) {
1224 1224 bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1225 1225 if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1226 1226 rv = nxge_hio_clear_unicst(nxge, mac_addr);
1227 1227 mutex_exit(nxge->genlock);
1228 1228 return (rv);
1229 1229 } else {
1230 1230 mutex_exit(nxge->genlock);
1231 1231 return (EINVAL);
1232 1232 }
1233 1233 } else {
1234 1234 mutex_exit(nxge->genlock);
1235 1235 return (EINVAL);
1236 1236 }
1237 1237 }
1238 1238
1239 1239 mutex_exit(nxge->genlock);
1240 1240
1241 1241 /*
1242 1242 * Remove the mac address for the group
1243 1243 */
1244 1244 if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1245 1245 return (rv);
1246 1246 }
1247 1247
1248 1248 mutex_enter(nxge->genlock);
1249 1249 group->n_mac_addrs--;
1250 1250 mutex_exit(nxge->genlock);
1251 1251
1252 1252 return (0);
1253 1253 }
1254 1254
1255 1255 static int
1256 1256 nxge_hio_group_start(mac_group_driver_t gdriver)
1257 1257 {
1258 1258 nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1259 1259 nxge_rdc_grp_t *rdc_grp_p;
1260 1260 int rdctbl;
1261 1261 int dev_gindex;
1262 1262
1263 1263 ASSERT(group->type == MAC_RING_TYPE_RX);
1264 1264 ASSERT(group->nxgep != NULL);
1265 1265
1266 1266 ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1267 1267 if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1268 1268 return (ENXIO);
1269 1269
1270 1270 mutex_enter(group->nxgep->genlock);
1271 1271 if (isLDOMguest(group->nxgep))
1272 1272 goto nxge_hio_group_start_exit;
1273 1273
1274 1274 dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1275 1275 group->gindex;
1276 1276 rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
1277 1277
1278 1278 /*
1279 1279 * Get an rdc table for this group.
1280 1280 * Group ID is given by the caller, and that's the group it needs
1281 1281 * to bind to. The default group is already bound when the driver
1282 1282 * was attached.
1283 1283 *
1284 1284 * For Group 0, it's RDC table was allocated at attach time
1285 1285 * no need to allocate a new table.
1286 1286 */
1287 1287 if (group->gindex != 0) {
1288 1288 rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1289 1289 dev_gindex, B_TRUE);
1290 1290 if (rdctbl < 0) {
1291 1291 mutex_exit(group->nxgep->genlock);
1292 1292 return (rdctbl);
1293 1293 }
1294 1294 } else {
1295 1295 rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1296 1296 }
1297 1297
1298 1298 group->rdctbl = rdctbl;
1299 1299
1300 1300 (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
1301 1301
1302 1302 nxge_hio_group_start_exit:
1303 1303 group->started = B_TRUE;
1304 1304 mutex_exit(group->nxgep->genlock);
1305 1305 return (0);
1306 1306 }
1307 1307
1308 1308 static void
1309 1309 nxge_hio_group_stop(mac_group_driver_t gdriver)
1310 1310 {
1311 1311 nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1312 1312
1313 1313 ASSERT(group->type == MAC_RING_TYPE_RX);
1314 1314
1315 1315 mutex_enter(group->nxgep->genlock);
1316 1316 group->started = B_FALSE;
1317 1317
1318 1318 if (isLDOMguest(group->nxgep))
1319 1319 goto nxge_hio_group_stop_exit;
1320 1320
1321 1321 /*
1322 1322 * Unbind the RDC table previously bound for this group.
1323 1323 *
1324 1324 * Since RDC table for group 0 was allocated at attach
1325 1325 * time, no need to unbind the table here.
1326 1326 */
1327 1327 if (group->gindex != 0)
1328 1328 (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1329 1329
1330 1330 nxge_hio_group_stop_exit:
1331 1331 mutex_exit(group->nxgep->genlock);
1332 1332 }
1333 1333
1334 1334 /* ARGSUSED */
1335 1335 void
1336 1336 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1337 1337 mac_group_info_t *infop, mac_group_handle_t ghdl)
1338 1338 {
1339 1339 p_nxge_t nxgep = (p_nxge_t)arg;
1340 1340 nxge_ring_group_t *group;
1341 1341 int dev_gindex;
1342 1342
1343 1343 switch (type) {
1344 1344 case MAC_RING_TYPE_RX:
1345 1345 group = &nxgep->rx_hio_groups[groupid];
1346 1346 group->nxgep = nxgep;
1347 1347 group->ghandle = ghdl;
1348 1348 group->gindex = groupid;
1349 1349 group->sindex = 0; /* not yet bound to a share */
1350 1350
1351 1351 if (!isLDOMguest(nxgep)) {
1352 1352 dev_gindex =
1353 1353 nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1354 1354 groupid;
1355 1355
1356 1356 if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1357 1357 dev_gindex)
1358 1358 group->port_default_grp = B_TRUE;
1359 1359
1360 1360 infop->mgi_count =
1361 1361 nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1362 1362 } else {
1363 1363 infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
1364 1364 }
1365 1365
1366 1366 infop->mgi_driver = (mac_group_driver_t)group;
1367 1367 infop->mgi_start = nxge_hio_group_start;
1368 1368 infop->mgi_stop = nxge_hio_group_stop;
1369 1369 infop->mgi_addmac = nxge_hio_add_mac;
1370 1370 infop->mgi_remmac = nxge_hio_rem_mac;
1371 1371 break;
1372 1372
1373 1373 case MAC_RING_TYPE_TX:
1374 1374 /*
1375 1375 * 'groupid' for TX should be incremented by one since
1376 1376 * the default group (groupid 0) is not known by the MAC layer
1377 1377 */
1378 1378 group = &nxgep->tx_hio_groups[groupid + 1];
1379 1379 group->nxgep = nxgep;
1380 1380 group->ghandle = ghdl;
1381 1381 group->gindex = groupid + 1;
1382 1382 group->sindex = 0; /* not yet bound to a share */
1383 1383
1384 1384 infop->mgi_driver = (mac_group_driver_t)group;
1385 1385 infop->mgi_start = NULL;
1386 1386 infop->mgi_stop = NULL;
1387 1387 infop->mgi_addmac = NULL; /* not needed */
1388 1388 infop->mgi_remmac = NULL; /* not needed */
1389 1389 /* no rings associated with group initially */
1390 1390 infop->mgi_count = 0;
1391 1391 break;
1392 1392 }
1393 1393 }
1394 1394
1395 1395 #if defined(sun4v)
1396 1396
1397 1397 int
1398 1398 nxge_hio_share_assign(
1399 1399 nxge_t *nxge,
1400 1400 uint64_t cookie,
1401 1401 res_map_t *tmap,
1402 1402 res_map_t *rmap,
1403 1403 nxge_hio_vr_t *vr)
1404 1404 {
1405 1405 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1406 1406 uint64_t slot, hv_rv;
1407 1407 nxge_hio_dc_t *dc;
1408 1408 nxhv_vr_fp_t *fp;
1409 1409 int i;
1410 1410 uint64_t major;
1411 1411
1412 1412 /*
1413 1413 * Ask the Hypervisor to set up the VR for us
1414 1414 */
1415 1415 fp = &nhd->hio.vr;
1416 1416 major = nxge->niu_hsvc.hsvc_major;
1417 1417 switch (major) {
1418 1418 case NIU_MAJOR_VER: /* 1 */
1419 1419 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1420 1420 NXGE_ERROR_MSG((nxge, HIO_CTL,
1421 1421 "nxge_hio_share_assign: major %d "
1422 1422 "vr->assign() returned %d", major, hv_rv));
1423 1423 nxge_hio_unshare(vr);
1424 1424 return (-EIO);
1425 1425 }
1426 1426
1427 1427 break;
1428 1428
1429 1429 case NIU_MAJOR_VER_2: /* 2 */
1430 1430 default:
1431 1431 if ((hv_rv = (*fp->cfgh_assign)
1432 1432 (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) {
1433 1433 NXGE_ERROR_MSG((nxge, HIO_CTL,
1434 1434 "nxge_hio_share_assign: major %d "
1435 1435 "vr->assign() returned %d", major, hv_rv));
1436 1436 nxge_hio_unshare(vr);
1437 1437 return (-EIO);
1438 1438 }
1439 1439
1440 1440 break;
1441 1441 }
1442 1442
1443 1443 NXGE_DEBUG_MSG((nxge, HIO_CTL,
1444 1444 "nxge_hio_share_assign: major %d "
1445 1445 "vr->assign() success", major));
1446 1446
1447 1447 /*
1448 1448 * For each shared TDC, ask the HV to find us an empty slot.
1449 1449 */
1450 1450 dc = vr->tx_group.dc;
1451 1451 for (i = 0; i < NXGE_MAX_TDCS; i++) {
1452 1452 nxhv_dc_fp_t *tx = &nhd->hio.tx;
1453 1453 while (dc) {
1454 1454 hv_rv = (*tx->assign)
1455 1455 (vr->cookie, dc->channel, &slot);
1456 1456 if (hv_rv != 0) {
1457 1457 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1458 1458 "nxge_hio_share_assign: "
1459 1459 "tx->assign(%x, %d) failed: %ld",
1460 1460 vr->cookie, dc->channel, hv_rv));
1461 1461 return (-EIO);
1462 1462 }
1463 1463
1464 1464 dc->cookie = vr->cookie;
1465 1465 dc->page = (vp_channel_t)slot;
1466 1466
1467 1467 /* Inform the caller about the slot chosen. */
1468 1468 (*tmap) |= 1 << slot;
1469 1469
1470 1470 dc = dc->next;
1471 1471 }
1472 1472 }
1473 1473
1474 1474 /*
1475 1475 * For each shared RDC, ask the HV to find us an empty slot.
1476 1476 */
1477 1477 dc = vr->rx_group.dc;
1478 1478 for (i = 0; i < NXGE_MAX_RDCS; i++) {
1479 1479 nxhv_dc_fp_t *rx = &nhd->hio.rx;
1480 1480 while (dc) {
1481 1481 hv_rv = (*rx->assign)
1482 1482 (vr->cookie, dc->channel, &slot);
1483 1483 if (hv_rv != 0) {
1484 1484 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1485 1485 "nxge_hio_share_assign: "
1486 1486 "rx->assign(%x, %d) failed: %ld",
1487 1487 vr->cookie, dc->channel, hv_rv));
1488 1488 return (-EIO);
1489 1489 }
1490 1490
1491 1491 dc->cookie = vr->cookie;
1492 1492 dc->page = (vp_channel_t)slot;
1493 1493
1494 1494 /* Inform the caller about the slot chosen. */
1495 1495 (*rmap) |= 1 << slot;
1496 1496
1497 1497 dc = dc->next;
1498 1498 }
1499 1499 }
1500 1500
1501 1501 return (0);
1502 1502 }
1503 1503
1504 1504 void
1505 1505 nxge_hio_share_unassign(
1506 1506 nxge_hio_vr_t *vr)
1507 1507 {
1508 1508 nxge_t *nxge = (nxge_t *)vr->nxge;
1509 1509 nxge_hio_data_t *nhd;
1510 1510 nxge_hio_dc_t *dc;
1511 1511 nxhv_vr_fp_t *fp;
1512 1512 uint64_t hv_rv;
1513 1513
1514 1514 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1515 1515
1516 1516 dc = vr->tx_group.dc;
1517 1517 while (dc) {
1518 1518 nxhv_dc_fp_t *tx = &nhd->hio.tx;
1519 1519 hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1520 1520 if (hv_rv != 0) {
1521 1521 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1522 1522 "nxge_hio_share_unassign: "
1523 1523 "tx->unassign(%x, %d) failed: %ld",
1524 1524 vr->cookie, dc->page, hv_rv));
1525 1525 }
1526 1526 dc = dc->next;
1527 1527 }
1528 1528
1529 1529 dc = vr->rx_group.dc;
1530 1530 while (dc) {
1531 1531 nxhv_dc_fp_t *rx = &nhd->hio.rx;
1532 1532 hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1533 1533 if (hv_rv != 0) {
1534 1534 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1535 1535 "nxge_hio_share_unassign: "
1536 1536 "rx->unassign(%x, %d) failed: %ld",
1537 1537 vr->cookie, dc->page, hv_rv));
1538 1538 }
1539 1539 dc = dc->next;
1540 1540 }
1541 1541
1542 1542 fp = &nhd->hio.vr;
1543 1543 if (fp->unassign) {
1544 1544 hv_rv = (*fp->unassign)(vr->cookie);
1545 1545 if (hv_rv != 0) {
1546 1546 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1547 1547 "nxge_hio_share_unassign: "
1548 1548 "vr->assign(%x) failed: %ld",
1549 1549 vr->cookie, hv_rv));
1550 1550 }
1551 1551 }
1552 1552 }
1553 1553
1554 1554 int
1555 1555 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1556 1556 {
1557 1557 p_nxge_t nxge = (p_nxge_t)arg;
1558 1558 nxge_share_handle_t *shp;
1559 1559 nxge_hio_vr_t *vr; /* The Virtualization Region */
1560 1560 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1561 1561
1562 1562 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1563 1563
1564 1564 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1565 1565 nhd->hio.rx.assign == 0) {
1566 1566 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1567 1567 return (EIO);
1568 1568 }
1569 1569
1570 1570 /*
1571 1571 * Get a VR.
1572 1572 */
1573 1573 if ((vr = nxge_hio_vr_share(nxge)) == 0)
1574 1574 return (EAGAIN);
1575 1575
1576 1576 shp = &nxge->shares[vr->region];
1577 1577 shp->nxgep = nxge;
1578 1578 shp->index = vr->region;
1579 1579 shp->vrp = (void *)vr;
1580 1580 shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */
1581 1581 shp->rxgroup = 0; /* to be assigned by ms_sadd */
1582 1582 shp->active = B_FALSE; /* not bound yet */
1583 1583
1584 1584 *shandle = (mac_share_handle_t)shp;
1585 1585
1586 1586 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1587 1587 return (0);
1588 1588 }
1589 1589
1590 1590
1591 1591 void
1592 1592 nxge_hio_share_free(mac_share_handle_t shandle)
1593 1593 {
1594 1594 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1595 1595 nxge_hio_vr_t *vr;
1596 1596
1597 1597 /*
1598 1598 * Clear internal handle state.
1599 1599 */
1600 1600 vr = shp->vrp;
1601 1601 shp->vrp = (void *)NULL;
1602 1602 shp->index = 0;
1603 1603 shp->tmap = 0;
1604 1604 shp->rmap = 0;
1605 1605 shp->rxgroup = 0;
1606 1606 shp->active = B_FALSE;
1607 1607
1608 1608 /*
1609 1609 * Free VR resource.
1610 1610 */
1611 1611 nxge_hio_unshare(vr);
1612 1612 }
1613 1613
1614 1614
1615 1615 void
1616 1616 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1617 1617 mac_ring_handle_t *rings, uint_t *n_rings)
1618 1618 {
1619 1619 nxge_t *nxge;
1620 1620 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1621 1621 nxge_ring_handle_t *rh;
1622 1622 uint32_t offset;
1623 1623
1624 1624 nxge = shp->nxgep;
1625 1625
1626 1626 switch (type) {
1627 1627 case MAC_RING_TYPE_RX:
1628 1628 rh = nxge->rx_ring_handles;
1629 1629 offset = nxge->pt_config.hw_config.start_rdc;
1630 1630 break;
1631 1631
1632 1632 case MAC_RING_TYPE_TX:
1633 1633 rh = nxge->tx_ring_handles;
1634 1634 offset = nxge->pt_config.hw_config.tdc.start;
1635 1635 break;
1636 1636 }
1637 1637
1638 1638 /*
1639 1639 * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that,
1640 1640 * but the HV has statically assigned the channels like so:
1641 1641 * VR0: RDC0 & RDC1
1642 1642 * VR1: RDC2 & RDC3, etc.
1643 1643 * The TDCs are assigned in exactly the same way.
1644 1644 */
1645 1645 if (rings != NULL) {
1646 1646 rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1647 1647 rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1648 1648 }
1649 1649 if (n_rings != NULL) {
1650 1650 *n_rings = 2;
1651 1651 }
1652 1652 }
1653 1653
1654 1654 int
1655 1655 nxge_hio_share_add_group(mac_share_handle_t shandle,
1656 1656 mac_group_driver_t ghandle)
1657 1657 {
1658 1658 nxge_t *nxge;
1659 1659 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1660 1660 nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle;
1661 1661 nxge_hio_vr_t *vr; /* The Virtualization Region */
1662 1662 nxge_grp_t *group;
1663 1663 int i;
1664 1664
1665 1665 if (rg->sindex != 0) {
1666 1666 /* the group is already bound to a share */
1667 1667 return (EALREADY);
1668 1668 }
1669 1669
1670 1670 /*
1671 1671 * If we are adding a group 0 to a share, this
1672 1672 * is not correct.
1673 1673 */
1674 1674 ASSERT(rg->gindex != 0);
1675 1675
1676 1676 nxge = rg->nxgep;
1677 1677 vr = shp->vrp;
1678 1678
1679 1679 switch (rg->type) {
1680 1680 case MAC_RING_TYPE_RX:
1681 1681 /*
1682 1682 * Make sure that the group has the right rings associated
1683 1683 * for the share. In version 1.0, we may only give a VR
1684 1684 * 2 RDCs. Not only that, but the HV has statically
1685 1685 * assigned the channels like so:
1686 1686 * VR0: RDC0 & RDC1
1687 1687 * VR1: RDC2 & RDC3, etc.
1688 1688 */
1689 1689 group = nxge->rx_set.group[rg->gindex];
1690 1690
1691 1691 if (group->count > 2) {
1692 1692 /* a share can have at most 2 rings */
1693 1693 return (EINVAL);
1694 1694 }
1695 1695
1696 1696 for (i = 0; i < NXGE_MAX_RDCS; i++) {
1697 1697 if (group->map & (1 << i)) {
1698 1698 if ((i != shp->index * 2) &&
1699 1699 (i != (shp->index * 2 + 1))) {
1700 1700 /*
1701 1701 * A group with invalid rings was
1702 1702 * attempted to bind to this share
1703 1703 */
1704 1704 return (EINVAL);
1705 1705 }
1706 1706 }
1707 1707 }
1708 1708
1709 1709 rg->sindex = vr->region;
1710 1710 vr->rdc_tbl = rg->rdctbl;
1711 1711 shp->rxgroup = vr->rdc_tbl;
1712 1712 break;
1713 1713
1714 1714 case MAC_RING_TYPE_TX:
1715 1715 /*
1716 1716 * Make sure that the group has the right rings associated
1717 1717 * for the share. In version 1.0, we may only give a VR
1718 1718 * 2 TDCs. Not only that, but the HV has statically
1719 1719 * assigned the channels like so:
1720 1720 * VR0: TDC0 & TDC1
1721 1721 * VR1: TDC2 & TDC3, etc.
1722 1722 */
1723 1723 group = nxge->tx_set.group[rg->gindex];
1724 1724
1725 1725 if (group->count > 2) {
1726 1726 /* a share can have at most 2 rings */
1727 1727 return (EINVAL);
1728 1728 }
1729 1729
1730 1730 for (i = 0; i < NXGE_MAX_TDCS; i++) {
1731 1731 if (group->map & (1 << i)) {
1732 1732 if ((i != shp->index * 2) &&
1733 1733 (i != (shp->index * 2 + 1))) {
1734 1734 /*
1735 1735 * A group with invalid rings was
1736 1736 * attempted to bind to this share
1737 1737 */
1738 1738 return (EINVAL);
1739 1739 }
1740 1740 }
1741 1741 }
1742 1742
1743 1743 vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1744 1744 rg->gindex;
1745 1745 rg->sindex = vr->region;
1746 1746 break;
1747 1747 }
1748 1748 return (0);
1749 1749 }
1750 1750
1751 1751 int
1752 1752 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1753 1753 mac_group_driver_t ghandle)
1754 1754 {
1755 1755 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1756 1756 nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle;
1757 1757 nxge_hio_vr_t *vr; /* The Virtualization Region */
1758 1758 int rv = 0;
1759 1759
1760 1760 vr = shp->vrp;
1761 1761
1762 1762 switch (group->type) {
1763 1763 case MAC_RING_TYPE_RX:
1764 1764 group->sindex = 0;
1765 1765 vr->rdc_tbl = 0;
1766 1766 shp->rxgroup = 0;
1767 1767 break;
1768 1768
1769 1769 case MAC_RING_TYPE_TX:
1770 1770 group->sindex = 0;
1771 1771 vr->tdc_tbl = 0;
1772 1772 break;
1773 1773 }
1774 1774
1775 1775 return (rv);
1776 1776 }
1777 1777
1778 1778 int
1779 1779 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1780 1780 uint64_t *rcookie)
1781 1781 {
1782 1782 nxge_t *nxge;
1783 1783 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1784 1784 nxge_hio_vr_t *vr;
1785 1785 uint64_t rmap, tmap, hv_rmap, hv_tmap;
1786 1786 int rv;
1787 1787
1788 1788 ASSERT(shp != NULL);
1789 1789 ASSERT(shp->nxgep != NULL);
1790 1790 ASSERT(shp->vrp != NULL);
1791 1791
1792 1792 nxge = shp->nxgep;
1793 1793 vr = (nxge_hio_vr_t *)shp->vrp;
1794 1794
1795 1795 /*
1796 1796 * Add resources to the share.
1797 1797 * For each DMA channel associated with the VR, bind its resources
1798 1798 * to the VR.
1799 1799 */
1800 1800 tmap = 0;
1801 1801 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1802 1802 if (rv != 0) {
1803 1803 return (rv);
1804 1804 }
1805 1805
1806 1806 rmap = 0;
1807 1807 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1808 1808 if (rv != 0) {
1809 1809 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1810 1810 return (rv);
1811 1811 }
1812 1812
1813 1813 /*
1814 1814 * Ask the Hypervisor to set up the VR and allocate slots for
1815 1815 * each rings associated with the VR.
1816 1816 */
1817 1817 hv_tmap = hv_rmap = 0;
1818 1818 if ((rv = nxge_hio_share_assign(nxge, cookie,
1819 1819 &hv_tmap, &hv_rmap, vr))) {
1820 1820 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1821 1821 nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1822 1822 return (rv);
1823 1823 }
1824 1824
1825 1825 shp->active = B_TRUE;
1826 1826 shp->tmap = hv_tmap;
1827 1827 shp->rmap = hv_rmap;
1828 1828
1829 1829 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1830 1830 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1831 1831
1832 1832 return (0);
1833 1833 }
1834 1834
1835 1835 void
1836 1836 nxge_hio_share_unbind(mac_share_handle_t shandle)
1837 1837 {
1838 1838 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1839 1839
1840 1840 /*
1841 1841 * First, unassign the VR (take it back),
1842 1842 * so we can enable interrupts again.
1843 1843 */
1844 1844 nxge_hio_share_unassign(shp->vrp);
1845 1845
1846 1846 /*
1847 1847 * Free Ring Resources for TX and RX
1848 1848 */
1849 1849 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1850 1850 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1851 1851 }
1852 1852
1853 1853
1854 1854 /*
1855 1855 * nxge_hio_vr_share
1856 1856 *
1857 1857 * Find an unused Virtualization Region (VR).
1858 1858 *
1859 1859 * Arguments:
1860 1860 * nxge
1861 1861 *
1862 1862 * Notes:
1863 1863 *
1864 1864 * Context:
1865 1865 * Service domain
1866 1866 */
1867 1867 nxge_hio_vr_t *
1868 1868 nxge_hio_vr_share(
1869 1869 nxge_t *nxge)
1870 1870 {
1871 1871 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1872 1872 nxge_hio_vr_t *vr;
1873 1873
1874 1874 int first, limit, region;
1875 1875
1876 1876 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1877 1877
1878 1878 MUTEX_ENTER(&nhd->lock);
1879 1879
1880 1880 if (nhd->vrs == 0) {
1881 1881 MUTEX_EXIT(&nhd->lock);
1882 1882 return (0);
1883 1883 }
1884 1884
1885 1885 /* Find an empty virtual region (VR). */
1886 1886 if (nxge->function_num == 0) {
1887 1887 // FUNC0_VIR0 'belongs' to NIU port 0.
1888 1888 first = FUNC0_VIR1;
1889 1889 limit = FUNC2_VIR0;
1890 1890 } else if (nxge->function_num == 1) {
1891 1891 // FUNC2_VIR0 'belongs' to NIU port 1.
1892 1892 first = FUNC2_VIR1;
1893 1893 limit = FUNC_VIR_MAX;
1894 1894 } else {
1895 1895 cmn_err(CE_WARN,
1896 1896 "Shares not supported on function(%d) at this time.\n",
1897 1897 nxge->function_num);
1898 1898 }
1899 1899
1900 1900 for (region = first; region < limit; region++) {
1901 1901 if (nhd->vr[region].nxge == 0)
1902 1902 break;
1903 1903 }
1904 1904
1905 1905 if (region == limit) {
1906 1906 MUTEX_EXIT(&nhd->lock);
1907 1907 return (0);
1908 1908 }
1909 1909
1910 1910 vr = &nhd->vr[region];
1911 1911 vr->nxge = (uintptr_t)nxge;
1912 1912 vr->region = (uintptr_t)region;
1913 1913
1914 1914 nhd->vrs--;
1915 1915
1916 1916 MUTEX_EXIT(&nhd->lock);
1917 1917
1918 1918 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1919 1919
1920 1920 return (vr);
1921 1921 }
1922 1922
1923 1923 void
1924 1924 nxge_hio_unshare(
1925 1925 nxge_hio_vr_t *vr)
1926 1926 {
1927 1927 nxge_t *nxge = (nxge_t *)vr->nxge;
1928 1928 nxge_hio_data_t *nhd;
1929 1929
1930 1930 vr_region_t region;
1931 1931
1932 1932 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1933 1933
1934 1934 if (!nxge) {
1935 1935 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1936 1936 "vr->nxge is NULL"));
1937 1937 return;
1938 1938 }
1939 1939
1940 1940 /*
1941 1941 * This function is no longer called, but I will keep it
1942 1942 * here in case we want to revisit this topic in the future.
1943 1943 *
1944 1944 * nxge_hio_hostinfo_uninit(nxge, vr);
1945 1945 */
1946 1946
1947 1947 /*
1948 1948 * XXX: This is done by ms_sremove?
1949 1949 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1950 1950 */
1951 1951
1952 1952 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1953 1953
1954 1954 MUTEX_ENTER(&nhd->lock);
1955 1955
1956 1956 region = vr->region;
1957 1957 (void) memset(vr, 0, sizeof (*vr));
1958 1958 vr->region = region;
1959 1959
1960 1960 nhd->vrs++;
1961 1961
1962 1962 MUTEX_EXIT(&nhd->lock);
1963 1963
1964 1964 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1965 1965 }
1966 1966
1967 1967 int
1968 1968 nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map)
1969 1969 {
1970 1970 nxge_t *nxge;
1971 1971 nxge_grp_t *group;
1972 1972 int groupid;
1973 1973 int i, rv = 0;
1974 1974 int max_dcs;
1975 1975
1976 1976 ASSERT(vr != NULL);
1977 1977 ASSERT(vr->nxge != NULL);
1978 1978 nxge = (nxge_t *)vr->nxge;
1979 1979
1980 1980 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1981 1981
1982 1982 /*
1983 1983 * For each ring associated with the group, add the resources
1984 1984 * to the group and bind.
1985 1985 */
1986 1986 max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1987 1987 if (type == MAC_RING_TYPE_TX) {
1988 1988 /* set->group is an array of group indexed by a port group id */
1989 1989 groupid = vr->tdc_tbl -
1990 1990 nxge->pt_config.hw_config.def_mac_txdma_grpid;
1991 1991 group = nxge->tx_set.group[groupid];
1992 1992 } else {
1993 1993 /* set->group is an array of group indexed by a port group id */
1994 1994 groupid = vr->rdc_tbl -
1995 1995 nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1996 1996 group = nxge->rx_set.group[groupid];
1997 1997 }
1998 1998
1999 1999 ASSERT(group != NULL);
2000 2000
2001 2001 if (group->map == 0) {
2002 2002 NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
2003 2003 "with this VR"));
2004 2004 return (EINVAL);
2005 2005 }
2006 2006
2007 2007 for (i = 0; i < max_dcs; i++) {
2008 2008 if (group->map & (1 << i)) {
2009 2009 if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
2010 2010 if (*map == 0) /* Couldn't get even one DC. */
2011 2011 return (-rv);
2012 2012 else
2013 2013 break;
2014 2014 }
2015 2015 *map |= (1 << i);
2016 2016 }
2017 2017 }
2018 2018
2019 2019 if ((*map == 0) || (rv != 0)) {
2020 2020 NXGE_DEBUG_MSG((nxge, HIO_CTL,
2021 2021 "<== nxge_hio_addres: rv(%x)", rv));
2022 2022 return (EIO);
2023 2023 }
2024 2024
2025 2025 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
2026 2026 return (0);
2027 2027 }
2028 2028
2029 2029 /* ARGSUSED */
2030 2030 void
2031 2031 nxge_hio_remres(
2032 2032 nxge_hio_vr_t *vr,
2033 2033 mac_ring_type_t type,
2034 2034 res_map_t res_map)
2035 2035 {
2036 2036 nxge_t *nxge = (nxge_t *)vr->nxge;
2037 2037 nxge_grp_t *group;
2038 2038
2039 2039 if (!nxge) {
2040 2040 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2041 2041 "vr->nxge is NULL"));
2042 2042 return;
2043 2043 }
2044 2044
2045 2045 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
2046 2046
2047 2047 /*
2048 2048 * For each ring bound to the group, remove the DMA resources
2049 2049 * from the group and unbind.
2050 2050 */
2051 2051 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2052 2052 while (group->dc) {
2053 2053 nxge_hio_dc_t *dc = group->dc;
2054 2054 NXGE_DC_RESET(res_map, dc->page);
2055 2055 nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2056 2056 }
2057 2057
2058 2058 if (res_map) {
2059 2059 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2060 2060 "res_map %lx", res_map));
2061 2061 }
2062 2062
2063 2063 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2064 2064 }
2065 2065
2066 2066 /*
2067 2067 * nxge_hio_tdc_share
2068 2068 *
2069 2069 * Share an unused TDC channel.
2070 2070 *
2071 2071 * Arguments:
2072 2072 * nxge
2073 2073 *
2074 2074 * Notes:
2075 2075 *
2076 2076 * A.7.3 Reconfigure Tx DMA channel
2077 2077 * Disable TxDMA A.9.6.10
2078 2078 * [Rebind TxDMA channel to Port A.9.6.7]
2079 2079 *
2080 2080 * We don't have to Rebind the TDC to the port - it always already bound.
2081 2081 *
2082 2082 * Soft Reset TxDMA A.9.6.2
2083 2083 *
2084 2084 * This procedure will be executed by nxge_init_txdma_channel() in the
2085 2085 * guest domain:
2086 2086 *
2087 2087 * Re-initialize TxDMA A.9.6.8
2088 2088 * Reconfigure TxDMA
2089 2089 * Enable TxDMA A.9.6.9
2090 2090 *
2091 2091 * Context:
2092 2092 * Service domain
2093 2093 */
2094 2094 int
2095 2095 nxge_hio_tdc_share(
2096 2096 nxge_t *nxge,
2097 2097 int channel)
2098 2098 {
2099 2099 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2100 2100 nxge_grp_set_t *set = &nxge->tx_set;
2101 2101 tx_ring_t *ring;
2102 2102 int count;
2103 2103
2104 2104 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2105 2105
2106 2106 /*
2107 2107 * Wait until this channel is idle.
2108 2108 */
2109 2109 ring = nxge->tx_rings->rings[channel];
2110 2110 ASSERT(ring != NULL);
2111 2111
2112 2112 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2113 2113 if (ring->tx_ring_busy) {
2114 2114 /*
2115 2115 * Wait for 30 seconds.
2116 2116 */
2117 2117 for (count = 30 * 1000; count; count--) {
2118 2118 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2119 2119 break;
2120 2120 }
2121 2121
2122 2122 drv_usecwait(1000);
2123 2123 }
2124 2124
2125 2125 if (count == 0) {
2126 2126 (void) atomic_swap_32(&ring->tx_ring_offline,
2127 2127 NXGE_TX_RING_ONLINE);
2128 2128 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2129 2129 "nxge_hio_tdc_share: "
2130 2130 "Tx ring %d was always BUSY", channel));
2131 2131 return (-EIO);
2132 2132 }
2133 2133 } else {
2134 2134 (void) atomic_swap_32(&ring->tx_ring_offline,
2135 2135 NXGE_TX_RING_OFFLINED);
2136 2136 }
2137 2137
2138 2138 MUTEX_ENTER(&nhd->lock);
2139 2139 nxge->tdc_is_shared[channel] = B_TRUE;
2140 2140 MUTEX_EXIT(&nhd->lock);
2141 2141
2142 2142 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2143 2143 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2144 2144 "Failed to remove interrupt for TxDMA channel %d",
2145 2145 channel));
2146 2146 return (-EINVAL);
2147 2147 }
2148 2148
2149 2149 /* Disable TxDMA A.9.6.10 */
2150 2150 (void) nxge_txdma_channel_disable(nxge, channel);
2151 2151
2152 2152 /* The SD is sharing this channel. */
2153 2153 NXGE_DC_SET(set->shared.map, channel);
2154 2154 set->shared.count++;
2155 2155
2156 2156 /* Soft Reset TxDMA A.9.6.2 */
2157 2157 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2158 2158
2159 2159 /*
2160 2160 * Initialize the DC-specific FZC control registers.
2161 2161 * -----------------------------------------------------
2162 2162 */
2163 2163 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2164 2164 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2165 2165 "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2166 2166 return (-EIO);
2167 2167 }
2168 2168
2169 2169 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2170 2170
2171 2171 return (0);
2172 2172 }
2173 2173
2174 2174 /*
2175 2175 * nxge_hio_rdc_share
2176 2176 *
2177 2177 * Share an unused RDC channel.
2178 2178 *
2179 2179 * Arguments:
2180 2180 * nxge
2181 2181 *
2182 2182 * Notes:
2183 2183 *
2184 2184 * This is the latest version of the procedure to
2185 2185 * Reconfigure an Rx DMA channel:
2186 2186 *
2187 2187 * A.6.3 Reconfigure Rx DMA channel
2188 2188 * Stop RxMAC A.9.2.6
2189 2189 * Drain IPP Port A.9.3.6
2190 2190 * Stop and reset RxDMA A.9.5.3
2191 2191 *
2192 2192 * This procedure will be executed by nxge_init_rxdma_channel() in the
2193 2193 * guest domain:
2194 2194 *
2195 2195 * Initialize RxDMA A.9.5.4
2196 2196 * Reconfigure RxDMA
2197 2197 * Enable RxDMA A.9.5.5
2198 2198 *
2199 2199 * We will do this here, since the RDC is a canalis non grata:
2200 2200 * Enable RxMAC A.9.2.10
2201 2201 *
2202 2202 * Context:
2203 2203 * Service domain
2204 2204 */
2205 2205 int
2206 2206 nxge_hio_rdc_share(
2207 2207 nxge_t *nxge,
2208 2208 nxge_hio_vr_t *vr,
2209 2209 int channel)
2210 2210 {
2211 2211 nxge_grp_set_t *set = &nxge->rx_set;
2212 2212 nxge_rdc_grp_t *rdc_grp;
2213 2213
2214 2214 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2215 2215
2216 2216 /* Disable interrupts. */
2217 2217 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2218 2218 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2219 2219 "Failed to remove interrupt for RxDMA channel %d",
2220 2220 channel));
2221 2221 return (NXGE_ERROR);
2222 2222 }
2223 2223
2224 2224 /* Stop RxMAC = A.9.2.6 */
2225 2225 if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2226 2226 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2227 2227 "Failed to disable RxMAC"));
2228 2228 }
2229 2229
2230 2230 /* Drain IPP Port = A.9.3.6 */
2231 2231 (void) nxge_ipp_drain(nxge);
2232 2232
2233 2233 /* Stop and reset RxDMA = A.9.5.3 */
2234 2234 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2235 2235 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2236 2236 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2237 2237 "Failed to disable RxDMA channel %d", channel));
2238 2238 }
2239 2239
2240 2240 /* The SD is sharing this channel. */
2241 2241 NXGE_DC_SET(set->shared.map, channel);
2242 2242 set->shared.count++;
2243 2243
2244 2244 // Assert RST: RXDMA_CFIG1[30] = 1
2245 2245 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2246 2246
2247 2247 /*
2248 2248 * The guest domain will reconfigure the RDC later.
2249 2249 *
2250 2250 * But in the meantime, we must re-enable the Rx MAC so
2251 2251 * that we can start receiving packets again on the
2252 2252 * remaining RDCs:
2253 2253 *
2254 2254 * Enable RxMAC = A.9.2.10
2255 2255 */
2256 2256 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2257 2257 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2258 2258 "nxge_hio_rdc_share: Rx MAC still disabled"));
2259 2259 }
2260 2260
2261 2261 /*
2262 2262 * Initialize the DC-specific FZC control registers.
2263 2263 * -----------------------------------------------------
2264 2264 */
2265 2265 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2266 2266 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2267 2267 "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2268 2268 return (-EIO);
2269 2269 }
2270 2270
2271 2271 /*
2272 2272 * Update the RDC group.
2273 2273 */
2274 2274 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2275 2275 NXGE_DC_SET(rdc_grp->map, channel);
2276 2276
2277 2277 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2278 2278
2279 2279 return (0);
2280 2280 }
2281 2281
2282 2282 /*
2283 2283 * nxge_hio_dc_share
2284 2284 *
2285 2285 * Share a DMA channel with a guest domain.
2286 2286 *
2287 2287 * Arguments:
2288 2288 * nxge
2289 2289 * vr The VR that <channel> will belong to.
2290 2290 * type Tx or Rx.
2291 2291 * channel Channel to share
2292 2292 *
2293 2293 * Notes:
2294 2294 *
2295 2295 * Context:
2296 2296 * Service domain
2297 2297 */
2298 2298 int
2299 2299 nxge_hio_dc_share(
2300 2300 nxge_t *nxge,
2301 2301 nxge_hio_vr_t *vr,
2302 2302 mac_ring_type_t type,
2303 2303 int channel)
2304 2304 {
2305 2305 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2306 2306 nxge_hio_dc_t *dc;
2307 2307 nxge_grp_t *group;
2308 2308 int slot;
2309 2309
2310 2310 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2311 2311 type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2312 2312
2313 2313
2314 2314 /* -------------------------------------------------- */
2315 2315 slot = (type == MAC_RING_TYPE_TX) ?
2316 2316 nxge_hio_tdc_share(nxge, channel) :
2317 2317 nxge_hio_rdc_share(nxge, vr, channel);
2318 2318
2319 2319 if (slot < 0) {
2320 2320 if (type == MAC_RING_TYPE_RX) {
2321 2321 nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2322 2322 } else {
2323 2323 nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2324 2324 }
2325 2325 return (slot);
2326 2326 }
2327 2327
2328 2328 MUTEX_ENTER(&nhd->lock);
2329 2329
2330 2330 /*
2331 2331 * Tag this channel.
2332 2332 * --------------------------------------------------
2333 2333 */
2334 2334 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2335 2335
2336 2336 dc->vr = vr;
2337 2337 dc->channel = (nxge_channel_t)channel;
2338 2338
2339 2339 MUTEX_EXIT(&nhd->lock);
2340 2340
2341 2341 /*
2342 2342 * vr->[t|r]x_group is used by the service domain to
2343 2343 * keep track of its shared DMA channels.
2344 2344 */
2345 2345 MUTEX_ENTER(&nxge->group_lock);
2346 2346 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2347 2347
2348 2348 dc->group = group;
2349 2349 /* Initialize <group>, if necessary */
2350 2350 if (group->count == 0) {
2351 2351 group->nxge = nxge;
2352 2352 group->type = (type == MAC_RING_TYPE_TX) ?
2353 2353 VP_BOUND_TX : VP_BOUND_RX;
2354 2354 group->sequence = nhd->sequence++;
2355 2355 group->active = B_TRUE;
2356 2356 }
2357 2357
2358 2358 MUTEX_EXIT(&nxge->group_lock);
2359 2359
2360 2360 NXGE_ERROR_MSG((nxge, HIO_CTL,
2361 2361 "DC share: %cDC %d was assigned to slot %d",
2362 2362 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2363 2363
2364 2364 nxge_grp_dc_append(nxge, group, dc);
2365 2365
2366 2366 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2367 2367
2368 2368 return (0);
2369 2369 }
2370 2370
2371 2371 /*
2372 2372 * nxge_hio_tdc_unshare
2373 2373 *
2374 2374 * Unshare a TDC.
2375 2375 *
2376 2376 * Arguments:
2377 2377 * nxge
2378 2378 * channel The channel to unshare (add again).
2379 2379 *
2380 2380 * Notes:
2381 2381 *
2382 2382 * Context:
2383 2383 * Service domain
2384 2384 */
2385 2385 void
2386 2386 nxge_hio_tdc_unshare(
2387 2387 nxge_t *nxge,
2388 2388 int dev_grpid,
2389 2389 int channel)
2390 2390 {
2391 2391 nxge_grp_set_t *set = &nxge->tx_set;
2392 2392 nxge_grp_t *group;
2393 2393 int grpid;
2394 2394
2395 2395 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2396 2396
2397 2397 NXGE_DC_RESET(set->shared.map, channel);
2398 2398 set->shared.count--;
2399 2399
2400 2400 grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2401 2401 group = set->group[grpid];
2402 2402
2403 2403 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2404 2404 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2405 2405 "Failed to initialize TxDMA channel %d", channel));
2406 2406 return;
2407 2407 }
2408 2408
2409 2409 /* Re-add this interrupt. */
2410 2410 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2411 2411 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2412 2412 "Failed to add interrupt for TxDMA channel %d", channel));
2413 2413 }
2414 2414
2415 2415 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2416 2416 }
2417 2417
2418 2418 /*
2419 2419 * nxge_hio_rdc_unshare
2420 2420 *
2421 2421 * Unshare an RDC: add it to the SD's RDC groups (tables).
2422 2422 *
2423 2423 * Arguments:
2424 2424 * nxge
2425 2425 * channel The channel to unshare (add again).
2426 2426 *
2427 2427 * Notes:
2428 2428 *
2429 2429 * Context:
2430 2430 * Service domain
2431 2431 */
2432 2432 void
2433 2433 nxge_hio_rdc_unshare(
2434 2434 nxge_t *nxge,
2435 2435 int dev_grpid,
2436 2436 int channel)
2437 2437 {
2438 2438 nxge_grp_set_t *set = &nxge->rx_set;
2439 2439 nxge_grp_t *group;
2440 2440 int grpid;
2441 2441 int i;
2442 2442
2443 2443 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2444 2444
2445 2445 /* Stop RxMAC = A.9.2.6 */
2446 2446 if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2447 2447 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2448 2448 "Failed to disable RxMAC"));
2449 2449 }
2450 2450
2451 2451 /* Drain IPP Port = A.9.3.6 */
2452 2452 (void) nxge_ipp_drain(nxge);
2453 2453
2454 2454 /* Stop and reset RxDMA = A.9.5.3 */
2455 2455 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2456 2456 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2457 2457 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2458 2458 "Failed to disable RxDMA channel %d", channel));
2459 2459 }
2460 2460
2461 2461 NXGE_DC_RESET(set->shared.map, channel);
2462 2462 set->shared.count--;
2463 2463
2464 2464 grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2465 2465 group = set->group[grpid];
2466 2466
2467 2467 /*
2468 2468 * Assert RST: RXDMA_CFIG1[30] = 1
2469 2469 *
2470 2470 * Initialize RxDMA A.9.5.4
2471 2471 * Reconfigure RxDMA
2472 2472 * Enable RxDMA A.9.5.5
2473 2473 */
2474 2474 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2475 2475 /* Be sure to re-enable the RX MAC. */
2476 2476 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2477 2477 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2478 2478 "nxge_hio_rdc_share: Rx MAC still disabled"));
2479 2479 }
2480 2480 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2481 2481 "Failed to initialize RxDMA channel %d", channel));
2482 2482 return;
2483 2483 }
2484 2484
2485 2485 /*
2486 2486 * Enable RxMAC = A.9.2.10
2487 2487 */
2488 2488 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2489 2489 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2490 2490 "nxge_hio_rdc_share: Rx MAC still disabled"));
2491 2491 return;
2492 2492 }
2493 2493
2494 2494 /* Re-add this interrupt. */
2495 2495 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2496 2496 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2497 2497 "nxge_hio_rdc_unshare: Failed to add interrupt for "
2498 2498 "RxDMA CHANNEL %d", channel));
2499 2499 }
2500 2500
2501 2501 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2502 2502
2503 2503 for (i = 0; i < NXGE_MAX_RDCS; i++) {
2504 2504 if (nxge->rx_ring_handles[i].channel == channel) {
2505 2505 (void) nxge_rx_ring_start(
2506 2506 (mac_ring_driver_t)&nxge->rx_ring_handles[i],
2507 2507 nxge->rx_ring_handles[i].ring_gen_num);
2508 2508 }
2509 2509 }
2510 2510 }
2511 2511
2512 2512 /*
2513 2513 * nxge_hio_dc_unshare
2514 2514 *
2515 2515 * Unshare (reuse) a DMA channel.
2516 2516 *
2517 2517 * Arguments:
2518 2518 * nxge
2519 2519 * vr The VR that <channel> belongs to.
2520 2520 * type Tx or Rx.
2521 2521 * channel The DMA channel to reuse.
2522 2522 *
2523 2523 * Notes:
2524 2524 *
2525 2525 * Context:
2526 2526 * Service domain
2527 2527 */
2528 2528 void
2529 2529 nxge_hio_dc_unshare(
2530 2530 nxge_t *nxge,
2531 2531 nxge_hio_vr_t *vr,
2532 2532 mac_ring_type_t type,
2533 2533 int channel)
2534 2534 {
2535 2535 nxge_grp_t *group;
2536 2536 nxge_hio_dc_t *dc;
2537 2537
2538 2538 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2539 2539 type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2540 2540
2541 2541 /* Unlink the channel from its group. */
2542 2542 /* -------------------------------------------------- */
2543 2543 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2544 2544 NXGE_DC_RESET(group->map, channel);
2545 2545 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2546 2546 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2547 2547 "nxge_hio_dc_unshare(%d) failed", channel));
2548 2548 return;
2549 2549 }
2550 2550
2551 2551 dc->vr = 0;
2552 2552 dc->cookie = 0;
2553 2553
2554 2554 if (type == MAC_RING_TYPE_RX) {
2555 2555 nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2556 2556 } else {
2557 2557 nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2558 2558 }
2559 2559
2560 2560 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2561 2561 }
2562 2562
2563 2563
2564 2564 /*
2565 2565 * nxge_hio_rxdma_bind_intr():
2566 2566 *
2567 2567 * For the guest domain driver, need to bind the interrupt group
2568 2568 * and state to the rx_rcr_ring_t.
2569 2569 */
2570 2570
2571 2571 int
2572 2572 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2573 2573 {
2574 2574 nxge_hio_dc_t *dc;
2575 2575 nxge_ldgv_t *control;
2576 2576 nxge_ldg_t *group;
2577 2577 nxge_ldv_t *device;
2578 2578
2579 2579 /*
2580 2580 * Find the DMA channel.
2581 2581 */
2582 2582 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2583 2583 return (NXGE_ERROR);
2584 2584 }
2585 2585
2586 2586 /*
2587 2587 * Get the control structure.
2588 2588 */
2589 2589 control = nxge->ldgvp;
2590 2590 if (control == NULL) {
2591 2591 return (NXGE_ERROR);
2592 2592 }
2593 2593
2594 2594 group = &control->ldgp[dc->ldg.vector];
2595 2595 device = &control->ldvp[dc->ldg.ldsv];
2596 2596
2597 2597 MUTEX_ENTER(&ring->lock);
2598 2598 ring->ldgp = group;
2599 2599 ring->ldvp = device;
2600 2600 MUTEX_EXIT(&ring->lock);
2601 2601
2602 2602 return (NXGE_OK);
2603 2603 }
2604 2604 #endif /* if defined(sun4v) */
↓ open down ↓ |
1785 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX