Print this page
4778 iprb shouldn't abuse ddi_get_time(9f)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/iprb/iprb.c
+++ new/usr/src/uts/common/io/iprb/iprb.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
14 14 */
15 15
16 16 /*
17 17 * Intel Pro/100B Ethernet Driver
18 18 */
19 19
20 20 #include <sys/types.h>
21 21 #include <sys/modctl.h>
22 22 #include <sys/conf.h>
23 23 #include <sys/kmem.h>
24 24 #include <sys/ksynch.h>
25 25 #include <sys/cmn_err.h>
26 26 #include <sys/note.h>
27 27 #include <sys/pci.h>
28 28 #include <sys/pci_cap.h>
29 29 #include <sys/ethernet.h>
30 30 #include <sys/mii.h>
31 31 #include <sys/miiregs.h>
32 32 #include <sys/mac.h>
33 33 #include <sys/mac_ether.h>
34 34 #include <sys/ethernet.h>
35 35 #include <sys/vlan.h>
36 36 #include <sys/list.h>
37 37 #include <sys/sysmacros.h>
38 38 #include <sys/varargs.h>
39 39 #include <sys/stream.h>
40 40 #include <sys/strsun.h>
41 41 #include <sys/ddi.h>
42 42 #include <sys/sunddi.h>
43 43
44 44 #include "iprb.h"
45 45 #include "rcvbundl.h"
46 46
47 47 /*
48 48 * Intel has openly documented the programming interface for these
49 49 * parts in the "Intel 8255x 10/100 Mbps Ethernet Controller Family
50 50 * Open Source Software Developer Manual".
51 51 *
52 52 * While some open source systems have utilized many of the features
53 53 * of some models in this family (especially scatter gather and IP
54 54 * checksum support), we have elected to offer only the basic
55 55 * functionality. These are only 10/100 parts, and the additional
56 56 * complexity is not justified by the minimal performance benefit.
57 57 * KISS. So, we are only supporting the simple 82557 features.
58 58 */
59 59
60 60 static uint16_t iprb_mii_read(void *, uint8_t, uint8_t);
61 61 static void iprb_mii_write(void *, uint8_t, uint8_t, uint16_t);
62 62 static void iprb_mii_notify(void *, link_state_t);
63 63 static int iprb_attach(dev_info_t *);
64 64 static int iprb_detach(dev_info_t *);
65 65 static int iprb_quiesce(dev_info_t *);
66 66 static int iprb_suspend(dev_info_t *);
67 67 static int iprb_resume(dev_info_t *);
68 68 static int iprb_m_stat(void *, uint_t, uint64_t *);
69 69 static int iprb_m_start(void *);
70 70 static void iprb_m_stop(void *);
71 71 static int iprb_m_promisc(void *, boolean_t);
72 72 static int iprb_m_multicst(void *, boolean_t, const uint8_t *);
73 73 static int iprb_m_unicst(void *, const uint8_t *);
74 74 static mblk_t *iprb_m_tx(void *, mblk_t *);
75 75 static void iprb_m_ioctl(void *, queue_t *, mblk_t *);
76 76 static int iprb_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
77 77 const void *);
78 78 static int iprb_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
79 79 void *);
80 80 static void iprb_m_propinfo(void *, const char *, mac_prop_id_t,
81 81 mac_prop_info_handle_t);
82 82 static void iprb_destroy(iprb_t *);
83 83 static int iprb_configure(iprb_t *);
84 84 static void iprb_eeprom_sendbits(iprb_t *, uint32_t, uint8_t);
85 85 static uint16_t iprb_eeprom_read(iprb_t *, uint16_t);
86 86 static void iprb_identify(iprb_t *);
87 87 static int iprb_cmd_submit(iprb_t *, uint16_t);
88 88 static void iprb_cmd_reclaim(iprb_t *);
89 89 static int iprb_cmd_ready(iprb_t *);
90 90 static int iprb_cmd_drain(iprb_t *);
91 91 static void iprb_rx_add(iprb_t *);
92 92 static void iprb_rx_init(iprb_t *);
93 93 static mblk_t *iprb_rx(iprb_t *);
94 94 static mblk_t *iprb_send(iprb_t *, mblk_t *);
95 95 static uint_t iprb_intr(caddr_t, caddr_t);
96 96 static void iprb_periodic(void *);
97 97 static int iprb_add_intr(iprb_t *);
98 98 static int iprb_dma_alloc(iprb_t *, iprb_dma_t *, size_t);
99 99 static void iprb_dma_free(iprb_dma_t *);
100 100 static iprb_dma_t *iprb_cmd_next(iprb_t *);
101 101 static int iprb_set_config(iprb_t *);
102 102 static int iprb_set_unicast(iprb_t *);
103 103 static int iprb_set_multicast(iprb_t *);
104 104 static int iprb_set_ucode(iprb_t *);
105 105 static void iprb_update_stats(iprb_t *);
106 106 static int iprb_start(iprb_t *);
107 107 static void iprb_stop(iprb_t *);
108 108 static int iprb_ddi_attach(dev_info_t *, ddi_attach_cmd_t);
109 109 static int iprb_ddi_detach(dev_info_t *, ddi_detach_cmd_t);
110 110 static void iprb_error(iprb_t *, const char *, ...);
111 111
112 112 static mii_ops_t iprb_mii_ops = {
113 113 MII_OPS_VERSION,
114 114 iprb_mii_read,
115 115 iprb_mii_write,
116 116 iprb_mii_notify,
117 117 NULL, /* reset */
118 118 };
119 119
120 120 static mac_callbacks_t iprb_m_callbacks = {
121 121 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
122 122 iprb_m_stat,
123 123 iprb_m_start,
124 124 iprb_m_stop,
125 125 iprb_m_promisc,
126 126 iprb_m_multicst,
127 127 iprb_m_unicst,
128 128 iprb_m_tx,
129 129 NULL,
130 130 iprb_m_ioctl, /* mc_ioctl */
131 131 NULL, /* mc_getcapab */
132 132 NULL, /* mc_open */
133 133 NULL, /* mc_close */
134 134 iprb_m_setprop,
135 135 iprb_m_getprop,
136 136 iprb_m_propinfo
137 137 };
138 138
139 139
140 140 /*
141 141 * Stream information
142 142 */
143 143 DDI_DEFINE_STREAM_OPS(iprb_devops, nulldev, nulldev,
144 144 iprb_ddi_attach, iprb_ddi_detach, nodev, NULL, D_MP, NULL, iprb_quiesce);
145 145
146 146 static struct modldrv iprb_modldrv = {
147 147 &mod_driverops, /* drv_modops */
148 148 "Intel 8255x Ethernet", /* drv_linkinfo */
149 149 &iprb_devops /* drv_dev_ops */
150 150 };
151 151
152 152 static struct modlinkage iprb_modlinkage = {
153 153 MODREV_1, /* ml_rev */
154 154 { &iprb_modldrv, NULL } /* ml_linkage */
155 155 };
156 156
157 157
158 158 static ddi_device_acc_attr_t acc_attr = {
159 159 DDI_DEVICE_ATTR_V0,
160 160 DDI_STRUCTURE_LE_ACC,
161 161 DDI_STRICTORDER_ACC
162 162 };
163 163
164 164 static ddi_device_acc_attr_t buf_attr = {
165 165 DDI_DEVICE_ATTR_V0,
166 166 DDI_NEVERSWAP_ACC,
167 167 DDI_STORECACHING_OK_ACC
168 168 };
169 169
170 170 /*
171 171 * The 8225x is a 32-bit addressing engine, but it can only address up
172 172 * to 31 bits on a single transaction. (Far less in reality it turns
173 173 * out.) Statistics buffers have to be 16-byte aligned, and as we
174 174 * allocate individual data pieces for other things, there is no
175 175 * compelling reason to use another attribute with support for less
176 176 * strict alignment.
177 177 */
178 178 static ddi_dma_attr_t dma_attr = {
179 179 DMA_ATTR_V0, /* dma_attr_version */
180 180 0, /* dma_attr_addr_lo */
181 181 0xFFFFFFFFU, /* dma_attr_addr_hi */
182 182 0x7FFFFFFFU, /* dma_attr_count_max */
183 183 16, /* dma_attr_align */
184 184 0x100, /* dma_attr_burstsizes */
185 185 1, /* dma_attr_minxfer */
186 186 0xFFFFFFFFU, /* dma_attr_maxxfer */
187 187 0xFFFFFFFFU, /* dma_attr_seg */
188 188 1, /* dma_attr_sgllen */
189 189 1, /* dma_attr_granular */
190 190 0 /* dma_attr_flags */
191 191 };
192 192
193 193 #define DECL_UCODE(x) \
194 194 static const uint32_t x ## _WORDS[] = x ## _RCVBUNDLE_UCODE
195 195 DECL_UCODE(D101_A);
196 196 DECL_UCODE(D101_B0);
197 197 DECL_UCODE(D101M_B);
198 198 DECL_UCODE(D101S);
199 199 DECL_UCODE(D102_B);
200 200 DECL_UCODE(D102_C);
201 201 DECL_UCODE(D102_E);
202 202
203 203 static uint8_t iprb_bcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
204 204
205 205 /*
206 206 * We don't bother allowing for tuning of the CPU saver algorithm.
207 207 * The ucode has reasonable defaults built-in. However, some variants
208 208 * apparently have bug fixes delivered via this ucode, so we still
209 209 * need to support the ucode upload.
210 210 */
211 211 typedef struct {
212 212 uint8_t rev;
213 213 uint8_t length;
214 214 const uint32_t *ucode;
215 215 } iprb_ucode_t;
216 216
217 217 #define UCODE(x) \
218 218 sizeof (x ## _WORDS) / sizeof (uint32_t), x ## _WORDS
219 219
220 220 static const iprb_ucode_t iprb_ucode[] = {
221 221 { REV_82558_A4, UCODE(D101_A) },
222 222 { REV_82558_B0, UCODE(D101_B0) },
223 223 { REV_82559_A0, UCODE(D101M_B) },
224 224 { REV_82559S_A, UCODE(D101S) },
225 225 { REV_82550, UCODE(D102_B) },
226 226 { REV_82550_C, UCODE(D102_C) },
227 227 { REV_82551_F, UCODE(D102_E) },
228 228 { 0 },
229 229 };
230 230
231 231 int
232 232 _init(void)
233 233 {
234 234 int rv;
235 235 mac_init_ops(&iprb_devops, "iprb");
236 236 if ((rv = mod_install(&iprb_modlinkage)) != DDI_SUCCESS) {
237 237 mac_fini_ops(&iprb_devops);
238 238 }
239 239 return (rv);
240 240 }
241 241
242 242 int
243 243 _fini(void)
244 244 {
245 245 int rv;
246 246 if ((rv = mod_remove(&iprb_modlinkage)) == DDI_SUCCESS) {
247 247 mac_fini_ops(&iprb_devops);
248 248 }
249 249 return (rv);
250 250 }
251 251
252 252 int
253 253 _info(struct modinfo *modinfop)
254 254 {
255 255 return (mod_info(&iprb_modlinkage, modinfop));
256 256 }
257 257
258 258 int
259 259 iprb_attach(dev_info_t *dip)
260 260 {
261 261 iprb_t *ip;
262 262 uint16_t w;
263 263 int i;
264 264 mac_register_t *macp;
265 265
266 266 ip = kmem_zalloc(sizeof (*ip), KM_SLEEP);
267 267 ddi_set_driver_private(dip, ip);
268 268 ip->dip = dip;
269 269
270 270 list_create(&ip->mcast, sizeof (struct iprb_mcast),
271 271 offsetof(struct iprb_mcast, node));
272 272
273 273 /* we don't support high level interrupts, so we don't need cookies */
274 274 mutex_init(&ip->culock, NULL, MUTEX_DRIVER, NULL);
275 275 mutex_init(&ip->rulock, NULL, MUTEX_DRIVER, NULL);
276 276
277 277 if (pci_config_setup(dip, &ip->pcih) != DDI_SUCCESS) {
278 278 iprb_error(ip, "unable to map configuration space");
279 279 iprb_destroy(ip);
280 280 return (DDI_FAILURE);
281 281 }
282 282
283 283 if (ddi_regs_map_setup(dip, 1, &ip->regs, 0, 0, &acc_attr,
284 284 &ip->regsh) != DDI_SUCCESS) {
285 285 iprb_error(ip, "unable to map device registers");
286 286 iprb_destroy(ip);
287 287 return (DDI_FAILURE);
288 288 }
289 289
290 290 /* Reset, but first go into idle state */
↓ open down ↓ |
290 lines elided |
↑ open up ↑ |
291 291 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
292 292 drv_usecwait(10);
293 293 PUT32(ip, CSR_PORT, PORT_SW_RESET);
294 294 drv_usecwait(10);
295 295 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
296 296 (void) GET8(ip, CSR_INTCTL);
297 297
298 298 /*
299 299 * Precalculate watchdog times.
300 300 */
301 - ip->tx_timeout = drv_usectohz(TX_WATCHDOG * 1000000);
302 - ip->rx_timeout = drv_usectohz(RX_WATCHDOG * 1000000);
301 + ip->tx_timeout = TX_WATCHDOG;
302 + ip->rx_timeout = RX_WATCHDOG;
303 303
304 304 iprb_identify(ip);
305 305
306 306 /* Obtain our factory MAC address */
307 307 w = iprb_eeprom_read(ip, 0);
308 308 ip->factaddr[0] = w & 0xff;
309 309 ip->factaddr[1] = w >> 8;
310 310 w = iprb_eeprom_read(ip, 1);
311 311 ip->factaddr[2] = w & 0xff;
312 312 ip->factaddr[3] = w >> 8;
313 313 w = iprb_eeprom_read(ip, 2);
314 314 ip->factaddr[4] = w & 0xff;
315 315 ip->factaddr[5] = w >> 8;
316 316 bcopy(ip->factaddr, ip->curraddr, 6);
317 317
318 318 if (ip->resumebug) {
319 319 /*
320 320 * Generally, most devices we will ever see will
321 321 * already have fixed firmware. Since I can't verify
322 322 * the validity of the fix (no suitably downrev
323 323 * hardware), we'll just do our best to avoid it for
324 324 * devices that exhibit this behavior.
325 325 */
326 326 if ((iprb_eeprom_read(ip, 10) & 0x02) == 0) {
327 327 /* EEPROM fix was already applied, assume safe. */
328 328 ip->resumebug = B_FALSE;
329 329 }
330 330 }
331 331
332 332 if ((iprb_eeprom_read(ip, 3) & 0x3) != 0x3) {
333 333 cmn_err(CE_CONT, "?Enabling RX errata workaround.\n");
334 334 ip->rxhangbug = B_TRUE;
335 335 }
336 336
337 337 /* Determine whether we have an MII or a legacy 80c24 */
338 338 w = iprb_eeprom_read(ip, 6);
339 339 if ((w & 0x3f00) != 0x0600) {
340 340 if ((ip->miih = mii_alloc(ip, dip, &iprb_mii_ops)) == NULL) {
341 341 iprb_error(ip, "unable to allocate MII ops vector");
342 342 iprb_destroy(ip);
343 343 return (DDI_FAILURE);
344 344 }
345 345 if (ip->canpause) {
346 346 mii_set_pauseable(ip->miih, B_TRUE, B_FALSE);
347 347 }
348 348 }
349 349
350 350 /* Allocate cmds and tx region */
351 351 for (i = 0; i < NUM_TX; i++) {
352 352 /* Command blocks */
353 353 if (iprb_dma_alloc(ip, &ip->cmds[i], CB_SIZE) != DDI_SUCCESS) {
354 354 iprb_destroy(ip);
355 355 return (DDI_FAILURE);
356 356 }
357 357 }
358 358
359 359 for (i = 0; i < NUM_TX; i++) {
360 360 iprb_dma_t *cb = &ip->cmds[i];
361 361 /* Link the command blocks into a ring */
362 362 PUTCB32(cb, CB_LNK_OFFSET, (ip->cmds[(i + 1) % NUM_TX].paddr));
363 363 }
364 364
365 365 for (i = 0; i < NUM_RX; i++) {
366 366 /* Rx packet buffers */
367 367 if (iprb_dma_alloc(ip, &ip->rxb[i], RFD_SIZE) != DDI_SUCCESS) {
368 368 iprb_destroy(ip);
369 369 return (DDI_FAILURE);
370 370 }
371 371 }
372 372 if (iprb_dma_alloc(ip, &ip->stats, STATS_SIZE) != DDI_SUCCESS) {
373 373 iprb_destroy(ip);
374 374 return (DDI_FAILURE);
375 375 }
376 376
377 377 if (iprb_add_intr(ip) != DDI_SUCCESS) {
378 378 iprb_destroy(ip);
379 379 return (DDI_FAILURE);
380 380 }
381 381
382 382 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
383 383 iprb_error(ip, "unable to allocate mac structure");
384 384 iprb_destroy(ip);
385 385 return (DDI_FAILURE);
386 386 }
387 387
388 388 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
389 389 macp->m_driver = ip;
390 390 macp->m_dip = dip;
391 391 macp->m_src_addr = ip->curraddr;
392 392 macp->m_callbacks = &iprb_m_callbacks;
393 393 macp->m_min_sdu = 0;
394 394 macp->m_max_sdu = ETHERMTU;
395 395 macp->m_margin = VLAN_TAGSZ;
396 396 if (mac_register(macp, &ip->mach) != 0) {
397 397 iprb_error(ip, "unable to register mac with framework");
398 398 mac_free(macp);
399 399 iprb_destroy(ip);
400 400 return (DDI_FAILURE);
401 401 }
402 402
403 403 mac_free(macp);
404 404 return (DDI_SUCCESS);
405 405 }
406 406
407 407 int
408 408 iprb_detach(dev_info_t *dip)
409 409 {
410 410 iprb_t *ip;
411 411
412 412 ip = ddi_get_driver_private(dip);
413 413 ASSERT(ip != NULL);
414 414
415 415 if (mac_disable(ip->mach) != 0)
416 416 return (DDI_FAILURE);
417 417
418 418 (void) mac_unregister(ip->mach);
419 419 iprb_destroy(ip);
420 420 return (DDI_SUCCESS);
421 421 }
422 422
423 423 int
424 424 iprb_add_intr(iprb_t *ip)
425 425 {
426 426 int actual;
427 427
428 428 if (ddi_intr_alloc(ip->dip, &ip->intrh, DDI_INTR_TYPE_FIXED, 0, 1,
429 429 &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS) {
430 430 iprb_error(ip, "failed allocating interrupt handle");
431 431 return (DDI_FAILURE);
432 432 }
433 433
434 434 if (ddi_intr_add_handler(ip->intrh, iprb_intr, ip, NULL) !=
435 435 DDI_SUCCESS) {
436 436 (void) ddi_intr_free(ip->intrh);
437 437 ip->intrh = NULL;
438 438 iprb_error(ip, "failed adding interrupt handler");
439 439 return (DDI_FAILURE);
440 440 }
441 441 if (ddi_intr_enable(ip->intrh) != DDI_SUCCESS) {
442 442 (void) ddi_intr_remove_handler(ip->intrh);
443 443 (void) ddi_intr_free(ip->intrh);
444 444 ip->intrh = NULL;
445 445 iprb_error(ip, "failed enabling interrupt");
446 446 return (DDI_FAILURE);
447 447 }
448 448 return (DDI_SUCCESS);
449 449 }
450 450
451 451 int
452 452 iprb_dma_alloc(iprb_t *ip, iprb_dma_t *h, size_t size)
453 453 {
454 454 size_t rlen;
455 455 ddi_dma_cookie_t dmac;
456 456 uint_t ndmac;
457 457
458 458 if (ddi_dma_alloc_handle(ip->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
459 459 &h->dmah) != DDI_SUCCESS) {
460 460 iprb_error(ip, "unable to allocate dma handle");
461 461 return (DDI_FAILURE);
462 462 }
463 463 if (ddi_dma_mem_alloc(h->dmah, size, &buf_attr, DDI_DMA_CONSISTENT,
464 464 DDI_DMA_SLEEP, NULL, &h->vaddr, &rlen, &h->acch) != DDI_SUCCESS) {
465 465 iprb_error(ip, "unable to allocate dma memory");
466 466 return (DDI_FAILURE);
467 467 }
468 468 bzero(h->vaddr, size);
469 469 if (ddi_dma_addr_bind_handle(h->dmah, NULL, h->vaddr, size,
470 470 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL,
471 471 &dmac, &ndmac) != DDI_DMA_MAPPED) {
472 472 iprb_error(ip, "unable to map command memory");
473 473 return (DDI_FAILURE);
474 474 }
475 475 h->paddr = dmac.dmac_address;
476 476 return (DDI_SUCCESS);
477 477 }
478 478
479 479 void
480 480 iprb_dma_free(iprb_dma_t *h)
481 481 {
482 482 if (h->paddr != 0)
483 483 (void) ddi_dma_unbind_handle(h->dmah);
484 484 h->paddr = 0;
485 485 if (h->acch != NULL)
486 486 ddi_dma_mem_free(&h->acch);
487 487 h->acch = NULL;
488 488 if (h->dmah != NULL)
489 489 ddi_dma_free_handle(&h->dmah);
490 490 h->dmah = NULL;
491 491 }
492 492
493 493 void
494 494 iprb_destroy(iprb_t *ip)
495 495 {
496 496 int i;
497 497 iprb_mcast_t *mc;
498 498
499 499 /* shut down interrupts */
500 500 if (ip->intrh != NULL) {
501 501 (void) ddi_intr_disable(ip->intrh);
502 502 (void) ddi_intr_remove_handler(ip->intrh);
503 503 (void) ddi_intr_free(ip->intrh);
504 504 }
505 505 /* release DMA resources */
506 506 for (i = 0; i < NUM_TX; i++) {
507 507 iprb_dma_free(&ip->cmds[i]);
508 508 }
509 509 for (i = 0; i < NUM_RX; i++) {
510 510 iprb_dma_free(&ip->rxb[i]);
511 511 }
512 512 iprb_dma_free(&ip->stats);
513 513
514 514 if (ip->miih)
515 515 mii_free(ip->miih);
516 516
517 517 /* clean up the multicast list */
518 518 while ((mc = list_head(&ip->mcast)) != NULL) {
519 519 list_remove(&ip->mcast, mc);
520 520 kmem_free(mc, sizeof (*mc));
521 521 }
522 522
523 523 /* tear down register mappings */
524 524 if (ip->pcih)
525 525 pci_config_teardown(&ip->pcih);
526 526 if (ip->regsh)
527 527 ddi_regs_map_free(&ip->regsh);
528 528
529 529 /* clean the dip */
530 530 ddi_set_driver_private(ip->dip, NULL);
531 531
532 532 list_destroy(&ip->mcast);
533 533 mutex_destroy(&ip->culock);
534 534 mutex_destroy(&ip->rulock);
535 535
536 536 /* and finally toss the structure itself */
537 537 kmem_free(ip, sizeof (*ip));
538 538 }
539 539
540 540 void
541 541 iprb_identify(iprb_t *ip)
542 542 {
543 543 ip->devid = pci_config_get16(ip->pcih, PCI_CONF_DEVID);
544 544 ip->revid = pci_config_get8(ip->pcih, PCI_CONF_REVID);
545 545
546 546 switch (ip->devid) {
547 547 case 0x1229: /* 8255x family */
548 548 case 0x1030: /* Intel InBusiness */
549 549
550 550 if (ip->revid >= REV_82558_A4) {
551 551 ip->canpause = B_TRUE;
552 552 ip->canmwi = B_TRUE;
553 553 } else {
554 554 ip->is557 = B_TRUE;
555 555 }
556 556 if (ip->revid >= REV_82559_A0)
557 557 ip->resumebug = B_TRUE;
558 558 break;
559 559
560 560 case 0x1209: /* Embedded 82559ER */
561 561 ip->canpause = B_TRUE;
562 562 ip->resumebug = B_TRUE;
563 563 ip->canmwi = B_TRUE;
564 564 break;
565 565
566 566 case 0x2449: /* ICH2 */
567 567 case 0x1031: /* Pro/100 VE (ICH3) */
568 568 case 0x1032: /* Pro/100 VE (ICH3) */
569 569 case 0x1033: /* Pro/100 VM (ICH3) */
570 570 case 0x1034: /* Pro/100 VM (ICH3) */
571 571 case 0x1038: /* Pro/100 VM (ICH3) */
572 572 ip->resumebug = B_TRUE;
573 573 if (ip->revid >= REV_82558_A4)
574 574 ip->canpause = B_TRUE;
575 575 break;
576 576
577 577 default:
578 578 if (ip->revid >= REV_82558_A4)
579 579 ip->canpause = B_TRUE;
580 580 break;
581 581 }
582 582
583 583 /* Allow property override MWI support - not normally needed. */
584 584 if (ddi_prop_get_int(DDI_DEV_T_ANY, ip->dip, 0, "MWIEnable", 1) == 0) {
585 585 ip->canmwi = B_FALSE;
586 586 }
587 587 }
588 588
589 589 void
590 590 iprb_eeprom_sendbits(iprb_t *ip, uint32_t val, uint8_t nbits)
591 591 {
592 592 uint32_t mask;
593 593 uint16_t x;
594 594
595 595 mask = 1U << (nbits - 1);
596 596 while (mask) {
597 597 x = (mask & val) ? EEPROM_EEDI : 0;
598 598 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
599 599 drv_usecwait(100);
600 600 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS);
601 601 drv_usecwait(100);
602 602 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
603 603 drv_usecwait(100);
604 604 mask >>= 1;
605 605 }
606 606 }
607 607
608 608 uint16_t
609 609 iprb_eeprom_read(iprb_t *ip, uint16_t address)
610 610 {
611 611 uint16_t val;
612 612 int mask;
613 613 uint16_t n;
614 614 uint16_t bits;
615 615
616 616 /* if we don't know the address size yet call again to determine it */
617 617 if ((address != 0) && (ip->eeprom_bits == 0))
618 618 (void) iprb_eeprom_read(ip, 0);
619 619
620 620 if ((bits = ip->eeprom_bits) == 0) {
621 621 bits = 8;
622 622 ASSERT(address == 0);
623 623 }
624 624 /* enable the EEPROM chip select */
625 625 PUT16(ip, CSR_EECTL, EEPROM_EECS);
626 626 drv_usecwait(100);
627 627
628 628 /* send a read command */
629 629 iprb_eeprom_sendbits(ip, 6, 3);
630 630 n = 0;
631 631 for (mask = (1U << (bits - 1)); mask != 0; mask >>= 1) {
632 632 uint16_t x = (mask & address) ? EEPROM_EEDI : 0;
633 633 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
634 634 drv_usecwait(100);
635 635 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS);
636 636 drv_usecwait(100);
637 637 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
638 638 drv_usecwait(100);
639 639
640 640 n++;
641 641 /* check the dummy 0 bit */
642 642 if ((GET16(ip, CSR_EECTL) & EEPROM_EEDO) == 0) {
643 643 if (ip->eeprom_bits == 0) {
644 644 ip->eeprom_bits = n;
645 645 cmn_err(CE_CONT, "?EEPROM size %d words.\n",
646 646 1U << ip->eeprom_bits);
647 647 }
648 648 break;
649 649 }
650 650 }
651 651 if (n != ip->eeprom_bits) {
652 652 iprb_error(ip, "cannot determine EEPROM size (%d, %d)",
653 653 ip->eeprom_bits, n);
654 654 }
655 655
656 656 /* shift out a 16-bit word */
657 657 val = 0;
658 658 for (mask = 0x8000; mask; mask >>= 1) {
659 659 PUT16(ip, CSR_EECTL, EEPROM_EECS | EEPROM_EESK);
660 660 drv_usecwait(100);
661 661 if (GET16(ip, CSR_EECTL) & EEPROM_EEDO)
662 662 val |= mask;
663 663 drv_usecwait(100);
664 664 PUT16(ip, CSR_EECTL, EEPROM_EECS);
665 665 drv_usecwait(100);
666 666 }
667 667
668 668 /* and disable the eeprom */
669 669 PUT16(ip, CSR_EECTL, 0);
670 670 drv_usecwait(100);
671 671
672 672 return (val);
673 673 }
674 674
675 675 int
676 676 iprb_cmd_ready(iprb_t *ip)
677 677 {
678 678 /* wait for pending SCB commands to be accepted */
679 679 for (int cnt = 1000000; cnt != 0; cnt -= 10) {
680 680 if (GET8(ip, CSR_CMD) == 0) {
681 681 return (DDI_SUCCESS);
682 682 }
683 683 drv_usecwait(10);
684 684 }
685 685 iprb_error(ip, "timeout waiting for chip to become ready");
686 686 return (DDI_FAILURE);
687 687 }
688 688
689 689 void
690 690 iprb_cmd_reclaim(iprb_t *ip)
691 691 {
692 692 while (ip->cmd_count) {
693 693 iprb_dma_t *cb = &ip->cmds[ip->cmd_tail];
694 694
695 695 SYNCCB(cb, CB_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
↓ open down ↓ |
383 lines elided |
↑ open up ↑ |
696 696 if ((GETCB16(cb, CB_STS_OFFSET) & CB_STS_C) == 0) {
697 697 break;
698 698 }
699 699
700 700 ip->cmd_tail++;
701 701 ip->cmd_tail %= NUM_TX;
702 702 ip->cmd_count--;
703 703 if (ip->cmd_count == 0) {
704 704 ip->tx_wdog = 0;
705 705 } else {
706 - ip->tx_wdog = ddi_get_time();
706 + ip->tx_wdog = gethrtime();
707 707 }
708 708 }
709 709 }
710 710
711 711 int
712 712 iprb_cmd_drain(iprb_t *ip)
713 713 {
714 714 for (int i = 1000000; i; i -= 10) {
715 715 iprb_cmd_reclaim(ip);
716 716 if (ip->cmd_count == 0)
717 717 return (DDI_SUCCESS);
718 718 drv_usecwait(10);
719 719 }
720 720 iprb_error(ip, "time out waiting for commands to drain");
721 721 return (DDI_FAILURE);
722 722 }
723 723
724 724 int
725 725 iprb_cmd_submit(iprb_t *ip, uint16_t cmd)
726 726 {
727 727 iprb_dma_t *ncb = &ip->cmds[ip->cmd_head];
728 728 iprb_dma_t *lcb = &ip->cmds[ip->cmd_last];
729 729
730 730 /* If this command will consume the last CB, interrupt when done */
731 731 ASSERT((ip->cmd_count) < NUM_TX);
732 732 if (ip->cmd_count == (NUM_TX - 1)) {
733 733 cmd |= CB_CMD_I;
734 734 }
735 735
736 736 /* clear the status entry */
737 737 PUTCB16(ncb, CB_STS_OFFSET, 0);
738 738
739 739 /* suspend upon completion of this new command */
740 740 cmd |= CB_CMD_S;
741 741 PUTCB16(ncb, CB_CMD_OFFSET, cmd);
742 742 SYNCCB(ncb, 0, 0, DDI_DMA_SYNC_FORDEV);
743 743
744 744 /* clear the suspend flag from the last submitted command */
745 745 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
746 746 PUTCB16(lcb, CB_CMD_OFFSET, GETCB16(lcb, CB_CMD_OFFSET) & ~CB_CMD_S);
747 747 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORDEV);
748 748
749 749
750 750 /*
751 751 * If the chip has a resume bug, then we need to try this as a work
752 752 * around. Some anecdotal evidence is that this will help solve
753 753 * the resume bug. Its a performance hit, but only if the EEPROM
754 754 * is not updated. (In theory we could do this only for 10Mbps HDX,
755 755 * but since it should just about never get used, we keep it simple.)
756 756 */
757 757 if (ip->resumebug) {
758 758 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
759 759 return (DDI_FAILURE);
760 760 PUT8(ip, CSR_CMD, CUC_NOP);
761 761 (void) GET8(ip, CSR_CMD);
762 762 drv_usecwait(1);
763 763 }
764 764
765 765 /* wait for the SCB to be ready to accept a new command */
766 766 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
767 767 return (DDI_FAILURE);
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
768 768
769 769 /*
770 770 * Finally we can resume the CU. Note that if this the first
771 771 * command in the sequence (i.e. if the CU is IDLE), or if the
772 772 * CU is already busy working, then this CU resume command
773 773 * will not have any effect.
774 774 */
775 775 PUT8(ip, CSR_CMD, CUC_RESUME);
776 776 (void) GET8(ip, CSR_CMD); /* flush CSR */
777 777
778 - ip->tx_wdog = ddi_get_time();
778 + ip->tx_wdog = gethrtime();
779 779 ip->cmd_last = ip->cmd_head;
780 780 ip->cmd_head++;
781 781 ip->cmd_head %= NUM_TX;
782 782 ip->cmd_count++;
783 783
784 784 return (DDI_SUCCESS);
785 785 }
786 786
787 787 iprb_dma_t *
788 788 iprb_cmd_next(iprb_t *ip)
789 789 {
790 790 if (ip->cmd_count == NUM_TX) {
791 791 return (NULL);
792 792 }
793 793 ASSERT(ip->cmd_count < NUM_TX);
794 794 return (&ip->cmds[ip->cmd_head]);
795 795 }
796 796
797 797 int
798 798 iprb_set_unicast(iprb_t *ip)
799 799 {
800 800 iprb_dma_t *cb;
801 801
802 802 ASSERT(mutex_owned(&ip->culock));
803 803
804 804 if ((cb = iprb_cmd_next(ip)) == NULL)
805 805 return (DDI_FAILURE);
806 806
807 807 PUTCBEA(cb, CB_IAS_ADR_OFFSET, ip->curraddr);
808 808 return (iprb_cmd_submit(ip, CB_CMD_IAS));
809 809 }
810 810
811 811 int
812 812 iprb_set_multicast(iprb_t *ip)
813 813 {
814 814 iprb_dma_t *cb;
815 815 iprb_mcast_t *mc;
816 816 int i;
817 817 list_t *l;
818 818
819 819 ASSERT(mutex_owned(&ip->culock));
820 820
821 821 if ((ip->nmcast <= 0) || (ip->nmcast > CB_MCS_CNT_MAX)) {
822 822 /*
823 823 * Only send the list if the total number of multicast
824 824 * address is nonzero and small enough to fit. We
825 825 * don't error out if it is too big, because in that
826 826 * case we will use the "allmulticast" support
827 827 * via iprb_set_config instead.
828 828 */
829 829 return (DDI_SUCCESS);
830 830 }
831 831
832 832 if ((cb = iprb_cmd_next(ip)) == NULL) {
833 833 return (DDI_FAILURE);
834 834 }
835 835
836 836 l = &ip->mcast;
837 837 for (mc = list_head(l), i = 0; mc; mc = list_next(l, mc), i++) {
838 838 PUTCBEA(cb, CB_MCS_ADR_OFFSET + (i * 6), mc->addr);
839 839 }
840 840 ASSERT(i == ip->nmcast);
841 841 PUTCB16(cb, CB_MCS_CNT_OFFSET, i);
842 842 return (iprb_cmd_submit(ip, CB_CMD_MCS));
843 843 }
844 844
845 845 int
846 846 iprb_set_config(iprb_t *ip)
847 847 {
848 848 iprb_dma_t *cb;
849 849
850 850 ASSERT(mutex_owned(&ip->culock));
851 851 if ((cb = iprb_cmd_next(ip)) == NULL) {
852 852 return (DDI_FAILURE);
853 853 }
854 854 PUTCB8(cb, CB_CONFIG_OFFSET + 0, 0x16);
855 855 PUTCB8(cb, CB_CONFIG_OFFSET + 1, 0x8);
856 856 PUTCB8(cb, CB_CONFIG_OFFSET + 2, 0);
857 857 PUTCB8(cb, CB_CONFIG_OFFSET + 3, (ip->canmwi ? 1 : 0));
858 858 PUTCB8(cb, CB_CONFIG_OFFSET + 4, 0);
859 859 PUTCB8(cb, CB_CONFIG_OFFSET + 5, 0);
860 860 PUTCB8(cb, CB_CONFIG_OFFSET + 6, (ip->promisc ? 0x80 : 0) | 0x3a);
861 861 PUTCB8(cb, CB_CONFIG_OFFSET + 7, (ip->promisc ? 0 : 0x1) | 2);
862 862 PUTCB8(cb, CB_CONFIG_OFFSET + 8, (ip->miih ? 0x1 : 0));
863 863 PUTCB8(cb, CB_CONFIG_OFFSET + 9, 0);
864 864 PUTCB8(cb, CB_CONFIG_OFFSET + 10, 0x2e);
865 865 PUTCB8(cb, CB_CONFIG_OFFSET + 11, 0);
866 866 PUTCB8(cb, CB_CONFIG_OFFSET + 12, (ip->is557 ? 0 : 1) | 0x60);
867 867 PUTCB8(cb, CB_CONFIG_OFFSET + 13, 0);
868 868 PUTCB8(cb, CB_CONFIG_OFFSET + 14, 0xf2);
869 869 PUTCB8(cb, CB_CONFIG_OFFSET + 15,
870 870 (ip->miih ? 0x80 : 0) | (ip->promisc ? 0x1 : 0) | 0x48);
871 871 PUTCB8(cb, CB_CONFIG_OFFSET + 16, 0);
872 872 PUTCB8(cb, CB_CONFIG_OFFSET + 17, (ip->canpause ? 0x40 : 0));
873 873 PUTCB8(cb, CB_CONFIG_OFFSET + 18, (ip->is557 ? 0 : 0x8) | 0xf2);
874 874 PUTCB8(cb, CB_CONFIG_OFFSET + 19,
875 875 ((ip->revid < REV_82558_B0) ? 0 : 0x80) |
876 876 (ip->canpause ? 0x18 : 0));
877 877 PUTCB8(cb, CB_CONFIG_OFFSET + 20, 0x3f);
878 878 PUTCB8(cb, CB_CONFIG_OFFSET + 21,
879 879 ((ip->nmcast >= CB_MCS_CNT_MAX) ? 0x8 : 0) | 0x5);
880 880
881 881 return (iprb_cmd_submit(ip, CB_CMD_CONFIG));
882 882 }
883 883
884 884 int
885 885 iprb_set_ucode(iprb_t *ip)
886 886 {
887 887 iprb_dma_t *cb;
888 888 const iprb_ucode_t *uc = NULL;
889 889 int i;
890 890
891 891 for (i = 0; iprb_ucode[i].length; i++) {
892 892 if (iprb_ucode[i].rev == ip->revid) {
893 893 uc = &iprb_ucode[i];
894 894 break;
895 895 }
896 896 }
897 897 if (uc == NULL) {
898 898 /* no matching firmware found, assume success */
899 899 return (DDI_SUCCESS);
900 900 }
901 901
902 902 ASSERT(mutex_owned(&ip->culock));
903 903 if ((cb = iprb_cmd_next(ip)) == NULL) {
904 904 return (DDI_FAILURE);
905 905 }
906 906 for (i = 0; i < uc->length; i++) {
907 907 PUTCB32(cb, (CB_UCODE_OFFSET + i * 4), uc->ucode[i]);
908 908 }
909 909 return (iprb_cmd_submit(ip, CB_CMD_UCODE));
910 910 }
911 911
912 912 int
913 913 iprb_configure(iprb_t *ip)
914 914 {
915 915 ASSERT(mutex_owned(&ip->culock));
916 916
917 917 if (iprb_cmd_drain(ip) != DDI_SUCCESS)
918 918 return (DDI_FAILURE);
919 919
920 920 if (iprb_set_config(ip) != DDI_SUCCESS)
921 921 return (DDI_FAILURE);
922 922 if (iprb_set_unicast(ip) != DDI_SUCCESS)
923 923 return (DDI_FAILURE);
924 924 if (iprb_set_multicast(ip) != DDI_SUCCESS)
925 925 return (DDI_FAILURE);
926 926
927 927 return (DDI_SUCCESS);
928 928 }
929 929
930 930 void
931 931 iprb_stop(iprb_t *ip)
932 932 {
933 933 /* go idle */
934 934 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
935 935 (void) GET32(ip, CSR_PORT);
936 936 drv_usecwait(50);
937 937
938 938 /* shut off device interrupts */
939 939 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
940 940 }
941 941
942 942 int
943 943 iprb_start(iprb_t *ip)
944 944 {
945 945 iprb_dma_t *cb;
946 946
947 947 ASSERT(mutex_owned(&ip->rulock));
948 948 ASSERT(mutex_owned(&ip->culock));
949 949
950 950 /* Reset, but first go into idle state */
951 951 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
952 952 (void) GET32(ip, CSR_PORT);
953 953 drv_usecwait(50);
954 954
955 955 PUT32(ip, CSR_PORT, PORT_SW_RESET);
956 956 (void) GET32(ip, CSR_PORT);
957 957 drv_usecwait(10);
958 958 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
959 959
960 960 /* Reset pointers */
961 961 ip->cmd_head = ip->cmd_tail = 0;
962 962 ip->cmd_last = NUM_TX - 1;
963 963
964 964 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
965 965 return (DDI_FAILURE);
966 966 PUT32(ip, CSR_GEN_PTR, 0);
967 967 PUT8(ip, CSR_CMD, CUC_CUBASE);
968 968 (void) GET8(ip, CSR_CMD);
969 969
970 970 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
971 971 return (DDI_FAILURE);
972 972 PUT32(ip, CSR_GEN_PTR, 0);
973 973 PUT8(ip, CSR_CMD, RUC_RUBASE);
974 974 (void) GET8(ip, CSR_CMD);
975 975
976 976 /* Send a NOP. This will be the first command seen by the device. */
977 977 cb = iprb_cmd_next(ip);
978 978 ASSERT(cb);
979 979 if (iprb_cmd_submit(ip, CB_CMD_NOP) != DDI_SUCCESS)
980 980 return (DDI_FAILURE);
981 981
982 982 /* as that was the first command, go ahead and submit a CU start */
983 983 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
984 984 return (DDI_FAILURE);
985 985 PUT32(ip, CSR_GEN_PTR, cb->paddr);
986 986 PUT8(ip, CSR_CMD, CUC_START);
987 987 (void) GET8(ip, CSR_CMD);
988 988
989 989 /* Upload firmware. */
990 990 if (iprb_set_ucode(ip) != DDI_SUCCESS)
991 991 return (DDI_FAILURE);
992 992
993 993 /* Set up RFDs */
994 994 iprb_rx_init(ip);
995 995
996 996 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr);
997 997 /* wait for the SCB */
998 998 (void) iprb_cmd_ready(ip);
999 999 PUT8(ip, CSR_CMD, RUC_START);
1000 1000 (void) GET8(ip, CSR_CMD); /* flush CSR */
1001 1001
1002 1002 /* Enable device interrupts */
↓ open down ↓ |
214 lines elided |
↑ open up ↑ |
1003 1003 PUT8(ip, CSR_INTCTL, 0);
1004 1004 (void) GET8(ip, CSR_INTCTL);
1005 1005
1006 1006 return (DDI_SUCCESS);
1007 1007 }
1008 1008
1009 1009 void
1010 1010 iprb_update_stats(iprb_t *ip)
1011 1011 {
1012 1012 iprb_dma_t *sp = &ip->stats;
1013 - time_t tstamp;
1013 + hrtime_t tstamp;
1014 1014 int i;
1015 1015
1016 1016 ASSERT(mutex_owned(&ip->culock));
1017 1017
1018 1018 /* Collect the hardware stats, but don't keep redoing it */
1019 - if ((tstamp = ddi_get_time()) == ip->stats_time) {
1019 + tstamp = gethrtime();
1020 + if (tstamp / NANOSEC == ip->stats_time / NANOSEC)
1020 1021 return;
1021 - }
1022 1022
1023 1023 PUTSTAT(sp, STATS_DONE_OFFSET, 0);
1024 1024 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORDEV);
1025 1025
1026 1026 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
1027 1027 return;
1028 1028 PUT32(ip, CSR_GEN_PTR, sp->paddr);
1029 1029 PUT8(ip, CSR_CMD, CUC_STATSBASE);
1030 1030 (void) GET8(ip, CSR_CMD);
1031 1031
1032 1032 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
1033 1033 return;
1034 1034 PUT8(ip, CSR_CMD, CUC_STATS_RST);
1035 1035 (void) GET8(ip, CSR_CMD); /* flush wb */
1036 1036
1037 1037 for (i = 10000; i; i -= 10) {
1038 1038 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1039 1039 if (GETSTAT(sp, STATS_DONE_OFFSET) == STATS_RST_DONE) {
1040 1040 /* yay stats are updated */
1041 1041 break;
1042 1042 }
1043 1043 drv_usecwait(10);
1044 1044 }
1045 1045 if (i == 0) {
1046 1046 iprb_error(ip, "time out acquiring hardware statistics");
1047 1047 return;
1048 1048 }
1049 1049
1050 1050 ip->ex_coll += GETSTAT(sp, STATS_TX_MAXCOL_OFFSET);
1051 1051 ip->late_coll += GETSTAT(sp, STATS_TX_LATECOL_OFFSET);
1052 1052 ip->uflo += GETSTAT(sp, STATS_TX_UFLO_OFFSET);
1053 1053 ip->defer_xmt += GETSTAT(sp, STATS_TX_DEFER_OFFSET);
1054 1054 ip->one_coll += GETSTAT(sp, STATS_TX_ONECOL_OFFSET);
1055 1055 ip->multi_coll += GETSTAT(sp, STATS_TX_MULTCOL_OFFSET);
1056 1056 ip->collisions += GETSTAT(sp, STATS_TX_TOTCOL_OFFSET);
1057 1057 ip->fcs_errs += GETSTAT(sp, STATS_RX_FCS_OFFSET);
1058 1058 ip->align_errs += GETSTAT(sp, STATS_RX_ALIGN_OFFSET);
1059 1059 ip->norcvbuf += GETSTAT(sp, STATS_RX_NOBUF_OFFSET);
1060 1060 ip->oflo += GETSTAT(sp, STATS_RX_OFLO_OFFSET);
1061 1061 ip->runt += GETSTAT(sp, STATS_RX_SHORT_OFFSET);
1062 1062
1063 1063 ip->stats_time = tstamp;
1064 1064 }
1065 1065
1066 1066 mblk_t *
1067 1067 iprb_send(iprb_t *ip, mblk_t *mp)
1068 1068 {
1069 1069 iprb_dma_t *cb;
1070 1070 size_t sz;
1071 1071
1072 1072 ASSERT(mutex_owned(&ip->culock));
1073 1073
1074 1074 /* possibly reclaim some CBs */
1075 1075 iprb_cmd_reclaim(ip);
1076 1076
1077 1077 cb = iprb_cmd_next(ip);
1078 1078
1079 1079 if (cb == NULL) {
1080 1080 /* flow control */
1081 1081 ip->wantw = B_TRUE;
1082 1082 return (mp);
1083 1083 }
1084 1084
1085 1085 if ((sz = msgsize(mp)) > (ETHERMAX + VLAN_TAGSZ)) {
1086 1086 /* Generally this should never occur */
1087 1087 ip->macxmt_errs++;
1088 1088 freemsg(mp);
1089 1089 return (NULL);
1090 1090 }
1091 1091
1092 1092 ip->opackets++;
1093 1093 ip->obytes += sz;
1094 1094
1095 1095 PUTCB32(cb, CB_TX_TBD_OFFSET, 0xffffffffU);
1096 1096 PUTCB16(cb, CB_TX_COUNT_OFFSET, (sz & 0x3fff) | CB_TX_EOF);
1097 1097 PUTCB8(cb, CB_TX_THRESH_OFFSET, (sz / 8) & 0xff);
1098 1098 PUTCB8(cb, CB_TX_NUMBER_OFFSET, 0);
1099 1099 mcopymsg(mp, cb->vaddr + CB_TX_DATA_OFFSET);
1100 1100 if (cb->vaddr[CB_TX_DATA_OFFSET] & 0x1) {
1101 1101 if (bcmp(cb->vaddr + CB_TX_DATA_OFFSET, &iprb_bcast, 6) != 0) {
1102 1102 ip->multixmt++;
1103 1103 } else {
1104 1104 ip->brdcstxmt++;
1105 1105 }
1106 1106 }
1107 1107 SYNCCB(cb, 0, CB_TX_DATA_OFFSET + sz, DDI_DMA_SYNC_FORDEV);
1108 1108
1109 1109 if (iprb_cmd_submit(ip, CB_CMD_TX) != DDI_SUCCESS) {
1110 1110 ip->macxmt_errs++;
1111 1111 }
1112 1112
1113 1113 return (NULL);
1114 1114 }
1115 1115
1116 1116 void
1117 1117 iprb_rx_add(iprb_t *ip)
1118 1118 {
1119 1119 uint16_t last, curr, next;
1120 1120 iprb_dma_t *rfd, *nfd, *lfd;
1121 1121
1122 1122 ASSERT(mutex_owned(&ip->rulock));
1123 1123
1124 1124 curr = ip->rx_index;
1125 1125 last = ip->rx_last;
1126 1126 next = (curr + 1) % NUM_RX;
1127 1127
1128 1128 ip->rx_last = curr;
1129 1129 ip->rx_index = next;
1130 1130
1131 1131 lfd = &ip->rxb[last];
1132 1132 rfd = &ip->rxb[curr];
1133 1133 nfd = &ip->rxb[next];
1134 1134
1135 1135 PUTRFD32(rfd, RFD_LNK_OFFSET, nfd->paddr);
1136 1136 PUTRFD16(rfd, RFD_CTL_OFFSET, RFD_CTL_EL);
1137 1137 PUTRFD16(rfd, RFD_SIZ_OFFSET, RFD_SIZE - RFD_PKT_OFFSET);
1138 1138 PUTRFD16(rfd, RFD_CNT_OFFSET, 0);
1139 1139 SYNCRFD(rfd, 0, RFD_PKT_OFFSET, DDI_DMA_SYNC_FORDEV);
1140 1140 /* clear the suspend & EL bits from the previous RFD */
1141 1141 PUTRFD16(lfd, RFD_CTL_OFFSET, 0);
1142 1142 SYNCRFD(rfd, RFD_CTL_OFFSET, 2, DDI_DMA_SYNC_FORDEV);
1143 1143 }
1144 1144
1145 1145 void
1146 1146 iprb_rx_init(iprb_t *ip)
1147 1147 {
1148 1148 ip->rx_index = 0;
1149 1149 ip->rx_last = NUM_RX - 1;
1150 1150 for (int i = 0; i < NUM_RX; i++)
1151 1151 iprb_rx_add(ip);
1152 1152 ip->rx_index = 0;
1153 1153 ip->rx_last = NUM_RX - 1;
1154 1154 }
1155 1155
1156 1156 mblk_t *
1157 1157 iprb_rx(iprb_t *ip)
1158 1158 {
1159 1159 iprb_dma_t *rfd;
1160 1160 uint16_t cnt;
1161 1161 uint16_t sts;
1162 1162 int i;
1163 1163 mblk_t *mplist;
1164 1164 mblk_t **mpp;
1165 1165 mblk_t *mp;
1166 1166
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
1167 1167 mplist = NULL;
1168 1168 mpp = &mplist;
1169 1169
1170 1170 for (i = 0; i < NUM_RX; i++) {
1171 1171 rfd = &ip->rxb[ip->rx_index];
1172 1172 SYNCRFD(rfd, RFD_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
1173 1173 if ((GETRFD16(rfd, RFD_STS_OFFSET) & RFD_STS_C) == 0) {
1174 1174 break;
1175 1175 }
1176 1176
1177 - ip->rx_wdog = ddi_get_time();
1177 + ip->rx_wdog = gethrtime();
1178 1178
1179 1179 SYNCRFD(rfd, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1180 1180 cnt = GETRFD16(rfd, RFD_CNT_OFFSET);
1181 1181 cnt &= ~(RFD_CNT_EOF | RFD_CNT_F);
1182 1182 sts = GETRFD16(rfd, RFD_STS_OFFSET);
1183 1183
1184 1184 if (cnt > (ETHERMAX + VLAN_TAGSZ)) {
1185 1185 ip->toolong++;
1186 1186 iprb_rx_add(ip);
1187 1187 continue;
1188 1188 }
1189 1189 if (((sts & RFD_STS_OK) == 0) && (sts & RFD_STS_ERRS)) {
1190 1190 iprb_rx_add(ip);
1191 1191 continue;
1192 1192 }
1193 1193 if ((mp = allocb(cnt, BPRI_MED)) == NULL) {
1194 1194 ip->norcvbuf++;
1195 1195 iprb_rx_add(ip);
1196 1196 continue;
1197 1197 }
1198 1198 bcopy(rfd->vaddr + RFD_PKT_OFFSET, mp->b_wptr, cnt);
1199 1199
1200 1200 /* return it to the RFD list */
1201 1201 iprb_rx_add(ip);
1202 1202
1203 1203 mp->b_wptr += cnt;
1204 1204 ip->ipackets++;
1205 1205 ip->rbytes += cnt;
1206 1206 if (mp->b_rptr[0] & 0x1) {
1207 1207 if (bcmp(mp->b_rptr, &iprb_bcast, 6) != 0) {
1208 1208 ip->multircv++;
1209 1209 } else {
1210 1210 ip->brdcstrcv++;
1211 1211 }
1212 1212 }
1213 1213 *mpp = mp;
1214 1214 mpp = &mp->b_next;
1215 1215 }
1216 1216 return (mplist);
1217 1217 }
1218 1218
1219 1219 int
1220 1220 iprb_m_promisc(void *arg, boolean_t on)
1221 1221 {
1222 1222 iprb_t *ip = arg;
1223 1223
1224 1224 mutex_enter(&ip->culock);
1225 1225 ip->promisc = on;
1226 1226 if (ip->running && !ip->suspended)
1227 1227 (void) iprb_configure(ip);
1228 1228 mutex_exit(&ip->culock);
1229 1229 return (0);
1230 1230 }
1231 1231
1232 1232 int
1233 1233 iprb_m_unicst(void *arg, const uint8_t *macaddr)
1234 1234 {
1235 1235 iprb_t *ip = arg;
1236 1236
1237 1237 mutex_enter(&ip->culock);
1238 1238 bcopy(macaddr, ip->curraddr, 6);
1239 1239 if (ip->running && !ip->suspended)
1240 1240 (void) iprb_configure(ip);
1241 1241 mutex_exit(&ip->culock);
1242 1242 return (0);
1243 1243 }
1244 1244
1245 1245 int
1246 1246 iprb_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
1247 1247 {
1248 1248 iprb_t *ip = arg;
1249 1249 list_t *l = &ip->mcast;
1250 1250 iprb_mcast_t *mc;
1251 1251
1252 1252 if (add) {
1253 1253 mc = kmem_alloc(sizeof (*mc), KM_NOSLEEP);
1254 1254 if (mc == NULL) {
1255 1255 return (ENOMEM);
1256 1256 }
1257 1257 bcopy(macaddr, mc->addr, 6);
1258 1258 mutex_enter(&ip->culock);
1259 1259 list_insert_head(l, mc);
1260 1260 ip->nmcast++;
1261 1261 if (ip->running && !ip->suspended)
1262 1262 (void) iprb_configure(ip);
1263 1263 mutex_exit(&ip->culock);
1264 1264 } else {
1265 1265 mutex_enter(&ip->culock);
1266 1266 for (mc = list_head(l); mc != NULL; mc = list_next(l, mc)) {
1267 1267 if (bcmp(macaddr, mc->addr, 6) == 0) {
1268 1268 list_remove(&ip->mcast, mc);
1269 1269 ip->nmcast--;
1270 1270 if (ip->running && !ip->suspended)
1271 1271 (void) iprb_configure(ip);
1272 1272 break;
1273 1273 }
1274 1274 }
1275 1275 mutex_exit(&ip->culock);
1276 1276 if (mc)
1277 1277 kmem_free(mc, sizeof (*mc));
1278 1278 }
1279 1279 return (0);
1280 1280 }
1281 1281
1282 1282 int
1283 1283 iprb_m_start(void *arg)
1284 1284 {
1285 1285 int rv;
1286 1286 iprb_t *ip = arg;
1287 1287
1288 1288 mutex_enter(&ip->rulock);
1289 1289 mutex_enter(&ip->culock);
1290 1290 rv = ip->suspended ? 0 : iprb_start(ip);
1291 1291 if (rv == 0)
1292 1292 ip->running = B_TRUE;
1293 1293 ip->perh = ddi_periodic_add(iprb_periodic, ip, 5000000000, 0);
1294 1294 mutex_exit(&ip->culock);
1295 1295 mutex_exit(&ip->rulock);
1296 1296 if (rv == 0) {
1297 1297 if (ip->miih)
1298 1298 mii_start(ip->miih);
1299 1299 else
1300 1300 /* might be a lie. */
1301 1301 mac_link_update(ip->mach, LINK_STATE_UP);
1302 1302 }
1303 1303 return (rv ? EIO : 0);
1304 1304 }
1305 1305
1306 1306 void
1307 1307 iprb_m_stop(void *arg)
1308 1308 {
1309 1309 iprb_t *ip = arg;
1310 1310
1311 1311 if (ip->miih) {
1312 1312 mii_stop(ip->miih);
1313 1313 } else {
1314 1314 mac_link_update(ip->mach, LINK_STATE_DOWN);
1315 1315 }
1316 1316
1317 1317 ddi_periodic_delete(ip->perh);
1318 1318 ip->perh = 0;
1319 1319
1320 1320 mutex_enter(&ip->rulock);
1321 1321 mutex_enter(&ip->culock);
1322 1322
1323 1323 if (!ip->suspended) {
1324 1324 iprb_update_stats(ip);
1325 1325 iprb_stop(ip);
1326 1326 }
1327 1327 ip->running = B_FALSE;
1328 1328 mutex_exit(&ip->culock);
1329 1329 mutex_exit(&ip->rulock);
1330 1330 }
1331 1331
1332 1332 int
1333 1333 iprb_m_stat(void *arg, uint_t stat, uint64_t *val)
1334 1334 {
1335 1335 iprb_t *ip = arg;
1336 1336
1337 1337 if (ip->miih && (mii_m_getstat(ip->miih, stat, val) == 0)) {
1338 1338 return (0);
1339 1339 }
1340 1340
1341 1341 mutex_enter(&ip->culock);
1342 1342 if ((!ip->suspended) && (ip->running)) {
1343 1343 iprb_update_stats(ip);
1344 1344 }
1345 1345 mutex_exit(&ip->culock);
1346 1346
1347 1347 switch (stat) {
1348 1348 case MAC_STAT_IFSPEED:
1349 1349 if (ip->miih == NULL) {
1350 1350 *val = 10000000; /* 10 Mbps */
1351 1351 }
1352 1352 break;
1353 1353 case ETHER_STAT_LINK_DUPLEX:
1354 1354 if (ip->miih == NULL) {
1355 1355 *val = LINK_DUPLEX_UNKNOWN;
1356 1356 }
1357 1357 break;
1358 1358 case MAC_STAT_MULTIRCV:
1359 1359 *val = ip->multircv;
1360 1360 break;
1361 1361 case MAC_STAT_BRDCSTRCV:
1362 1362 *val = ip->brdcstrcv;
1363 1363 break;
1364 1364 case MAC_STAT_MULTIXMT:
1365 1365 *val = ip->multixmt;
1366 1366 break;
1367 1367 case MAC_STAT_BRDCSTXMT:
1368 1368 *val = ip->brdcstxmt;
1369 1369 break;
1370 1370 case MAC_STAT_IPACKETS:
1371 1371 * val = ip->ipackets;
1372 1372 break;
1373 1373 case MAC_STAT_RBYTES:
1374 1374 *val = ip->rbytes;
1375 1375 break;
1376 1376 case MAC_STAT_OPACKETS:
1377 1377 *val = ip->opackets;
1378 1378 break;
1379 1379 case MAC_STAT_OBYTES:
1380 1380 *val = ip->obytes;
1381 1381 break;
1382 1382 case MAC_STAT_NORCVBUF:
1383 1383 *val = ip->norcvbuf;
1384 1384 break;
1385 1385 case MAC_STAT_COLLISIONS:
1386 1386 *val = ip->collisions;
1387 1387 break;
1388 1388 case MAC_STAT_IERRORS:
1389 1389 *val = ip->align_errs +
1390 1390 ip->fcs_errs +
1391 1391 ip->norcvbuf +
1392 1392 ip->runt +
1393 1393 ip->toolong +
1394 1394 ip->macrcv_errs;
1395 1395 break;
1396 1396 case MAC_STAT_OERRORS:
1397 1397 *val = ip->ex_coll +
1398 1398 ip->late_coll +
1399 1399 ip->uflo +
1400 1400 ip->macxmt_errs +
1401 1401 ip->nocarrier;
1402 1402 break;
1403 1403 case ETHER_STAT_ALIGN_ERRORS:
1404 1404 *val = ip->align_errs;
1405 1405 break;
1406 1406 case ETHER_STAT_FCS_ERRORS:
1407 1407 *val = ip->fcs_errs;
1408 1408 break;
1409 1409 case ETHER_STAT_DEFER_XMTS:
1410 1410 *val = ip->defer_xmt;
1411 1411 break;
1412 1412 case ETHER_STAT_FIRST_COLLISIONS:
1413 1413 *val = ip->one_coll + ip->multi_coll + ip->ex_coll;
1414 1414 break;
1415 1415 case ETHER_STAT_MULTI_COLLISIONS:
1416 1416 *val = ip->multi_coll;
1417 1417 break;
1418 1418 case ETHER_STAT_TX_LATE_COLLISIONS:
1419 1419 *val = ip->late_coll;
1420 1420 break;
1421 1421 case ETHER_STAT_EX_COLLISIONS:
1422 1422 *val = ip->ex_coll;
1423 1423 break;
1424 1424 case MAC_STAT_OVERFLOWS:
1425 1425 *val = ip->oflo;
1426 1426 break;
1427 1427 case MAC_STAT_UNDERFLOWS:
1428 1428 *val = ip->uflo;
1429 1429 break;
1430 1430 case ETHER_STAT_TOOSHORT_ERRORS:
1431 1431 *val = ip->runt;
1432 1432 break;
1433 1433 case ETHER_STAT_TOOLONG_ERRORS:
1434 1434 *val = ip->toolong;
1435 1435 break;
1436 1436 case ETHER_STAT_CARRIER_ERRORS:
1437 1437 *val = ip->nocarrier; /* reported only for "suspend" */
1438 1438 break;
1439 1439 case ETHER_STAT_MACXMT_ERRORS:
1440 1440 *val = ip->macxmt_errs;
1441 1441 break;
1442 1442 case ETHER_STAT_MACRCV_ERRORS:
1443 1443 *val = ip->macrcv_errs;
1444 1444 break;
1445 1445 default:
1446 1446 return (ENOTSUP);
1447 1447 }
1448 1448 return (0);
1449 1449 }
1450 1450
1451 1451 void
1452 1452 iprb_m_propinfo(void *arg, const char *name, mac_prop_id_t id,
1453 1453 mac_prop_info_handle_t pih)
1454 1454 {
1455 1455 iprb_t *ip = arg;
1456 1456
1457 1457 if (ip->miih != NULL) {
1458 1458 mii_m_propinfo(ip->miih, name, id, pih);
1459 1459 return;
1460 1460 }
1461 1461 switch (id) {
1462 1462 case MAC_PROP_DUPLEX:
1463 1463 case MAC_PROP_SPEED:
1464 1464 mac_prop_info_set_perm(pih, MAC_PROP_PERM_READ);
1465 1465 break;
1466 1466 }
1467 1467 }
1468 1468
1469 1469 int
1470 1470 iprb_m_getprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz,
1471 1471 void *val)
1472 1472 {
1473 1473 iprb_t *ip = arg;
1474 1474 uint64_t x;
1475 1475
1476 1476 if (ip->miih != NULL) {
1477 1477 return (mii_m_getprop(ip->miih, name, id, sz, val));
1478 1478 }
1479 1479 switch (id) {
1480 1480 case MAC_PROP_SPEED:
1481 1481 x = 10000000;
1482 1482 bcopy(&x, val, sizeof (x));
1483 1483 return (0);
1484 1484
1485 1485 case MAC_PROP_DUPLEX:
1486 1486 x = LINK_DUPLEX_UNKNOWN;
1487 1487 bcopy(&x, val, sizeof (x));
1488 1488 return (0);
1489 1489 }
1490 1490
1491 1491 return (ENOTSUP);
1492 1492 }
1493 1493
1494 1494 int
1495 1495 iprb_m_setprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz,
1496 1496 const void *val)
1497 1497 {
1498 1498 iprb_t *ip = arg;
1499 1499
1500 1500 if (ip->miih != NULL) {
1501 1501 return (mii_m_setprop(ip->miih, name, id, sz, val));
1502 1502 }
1503 1503 return (ENOTSUP);
1504 1504 }
1505 1505
1506 1506 mblk_t *
1507 1507 iprb_m_tx(void *arg, mblk_t *mp)
1508 1508 {
1509 1509 iprb_t *ip = arg;
1510 1510 mblk_t *nmp;
1511 1511
1512 1512 mutex_enter(&ip->culock);
1513 1513
1514 1514 while (mp != NULL) {
1515 1515 nmp = mp->b_next;
1516 1516 mp->b_next = NULL;
1517 1517 if (ip->suspended) {
1518 1518 freemsg(mp);
1519 1519 ip->nocarrier++;
1520 1520 mp = nmp;
1521 1521 continue;
1522 1522 }
1523 1523 if ((mp = iprb_send(ip, mp)) != NULL) {
1524 1524 mp->b_next = nmp;
1525 1525 break;
1526 1526 }
1527 1527 mp = nmp;
1528 1528 }
1529 1529 mutex_exit(&ip->culock);
1530 1530 return (mp);
1531 1531 }
1532 1532
1533 1533 void
1534 1534 iprb_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1535 1535 {
1536 1536 iprb_t *ip = arg;
1537 1537
1538 1538 if ((ip->miih != NULL) && (mii_m_loop_ioctl(ip->miih, wq, mp)))
1539 1539 return;
1540 1540
1541 1541 miocnak(wq, mp, 0, EINVAL);
1542 1542 }
1543 1543
1544 1544 uint16_t
1545 1545 iprb_mii_read(void *arg, uint8_t phy, uint8_t reg)
1546 1546 {
1547 1547 iprb_t *ip = arg;
1548 1548 uint32_t mdi;
1549 1549
1550 1550 /*
1551 1551 * NB: we are guaranteed by the MII layer not to be suspended.
1552 1552 * Furthermore, we have an independent MII register.
1553 1553 */
1554 1554
1555 1555 mdi = MDI_OP_RD |
1556 1556 ((uint32_t)phy << MDI_PHYAD_SHIFT) |
1557 1557 ((uint32_t)reg << MDI_REGAD_SHIFT);
1558 1558
1559 1559 PUT32(ip, CSR_MDICTL, mdi);
1560 1560 for (int i = 0; i < 100; i++) {
1561 1561 mdi = GET32(ip, CSR_MDICTL);
1562 1562 if (mdi & MDI_R) {
1563 1563 return (mdi & 0xffff);
1564 1564 }
1565 1565 drv_usecwait(1);
1566 1566 }
1567 1567 return (0xffff);
1568 1568 }
1569 1569
1570 1570 void
1571 1571 iprb_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data)
1572 1572 {
1573 1573 iprb_t *ip = arg;
1574 1574 uint32_t mdi;
1575 1575
1576 1576 mdi = MDI_OP_WR |
1577 1577 ((uint32_t)phy << MDI_PHYAD_SHIFT) |
1578 1578 ((uint32_t)reg << MDI_REGAD_SHIFT) |
1579 1579 (data);
1580 1580
1581 1581 PUT32(ip, CSR_MDICTL, mdi);
1582 1582 for (int i = 0; i < 100; i++) {
1583 1583 if (GET32(ip, CSR_MDICTL) & MDI_R)
1584 1584 break;
1585 1585 }
1586 1586 }
1587 1587
1588 1588 void
1589 1589 iprb_mii_notify(void *arg, link_state_t link)
1590 1590 {
1591 1591 iprb_t *ip = arg;
1592 1592
1593 1593 mac_link_update(ip->mach, link);
1594 1594 }
1595 1595
1596 1596 uint_t
1597 1597 iprb_intr(caddr_t arg1, caddr_t arg2)
1598 1598 {
1599 1599 iprb_t *ip = (void *)arg1;
1600 1600 uint8_t sts;
1601 1601 mblk_t *mp = NULL;
1602 1602
1603 1603 _NOTE(ARGUNUSED(arg2));
1604 1604
1605 1605 mutex_enter(&ip->rulock);
1606 1606 if (ip->suspended) {
1607 1607 mutex_exit(&ip->rulock);
1608 1608 return (DDI_INTR_UNCLAIMED);
1609 1609 }
1610 1610 sts = GET8(ip, CSR_STS);
1611 1611 if (sts == 0) {
1612 1612 /* No interrupt status! */
1613 1613 mutex_exit(&ip->rulock);
1614 1614 return (DDI_INTR_UNCLAIMED);
1615 1615 }
1616 1616 /* acknowledge the interrupts */
1617 1617 PUT8(ip, CSR_STS, sts);
1618 1618
1619 1619 if (sts & (STS_RNR | STS_FR)) {
1620 1620 mp = iprb_rx(ip);
1621 1621
1622 1622 if ((sts & STS_RNR) &&
1623 1623 ((GET8(ip, CSR_STATE) & STATE_RUS) == STATE_RUS_NORES)) {
1624 1624 iprb_rx_init(ip);
1625 1625
1626 1626 mutex_enter(&ip->culock);
1627 1627 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr);
1628 1628 /* wait for the SCB */
1629 1629 (void) iprb_cmd_ready(ip);
1630 1630 PUT8(ip, CSR_CMD, RUC_START);
1631 1631 (void) GET8(ip, CSR_CMD); /* flush CSR */
1632 1632 mutex_exit(&ip->culock);
1633 1633 }
1634 1634 }
1635 1635 mutex_exit(&ip->rulock);
1636 1636
1637 1637 if (mp) {
1638 1638 mac_rx(ip->mach, NULL, mp);
1639 1639 }
1640 1640 if ((sts & (STS_CNA | STS_CX)) && ip->wantw) {
1641 1641 ip->wantw = B_FALSE;
1642 1642 mac_tx_update(ip->mach);
1643 1643 }
1644 1644 return (DDI_INTR_CLAIMED);
1645 1645 }
1646 1646
1647 1647 void
1648 1648 iprb_periodic(void *arg)
1649 1649 {
1650 1650 iprb_t *ip = arg;
1651 1651 boolean_t reset = B_FALSE;
1652 1652
1653 1653 mutex_enter(&ip->rulock);
1654 1654 if (ip->suspended || !ip->running) {
1655 1655 mutex_exit(&ip->rulock);
↓ open down ↓ |
468 lines elided |
↑ open up ↑ |
1656 1656 return;
1657 1657 }
1658 1658
1659 1659 /*
1660 1660 * If we haven't received a packet in a while, and if the link
1661 1661 * is up, then it might be a hung chip. This problem
1662 1662 * reportedly only occurs at 10 Mbps.
1663 1663 */
1664 1664 if (ip->rxhangbug &&
1665 1665 ((ip->miih == NULL) || (mii_get_speed(ip->miih) == 10000000)) &&
1666 - ((ddi_get_time() - ip->rx_wdog) > ip->rx_timeout)) {
1666 + ((gethrtime() - ip->rx_wdog) > ip->rx_timeout)) {
1667 1667 cmn_err(CE_CONT, "?Possible RU hang, resetting.\n");
1668 1668 reset = B_TRUE;
1669 1669 }
1670 1670
1671 1671 /* update the statistics */
1672 1672 mutex_enter(&ip->culock);
1673 1673
1674 - if (ip->tx_wdog && ((ddi_get_time() - ip->tx_wdog) > ip->tx_timeout)) {
1674 + if (ip->tx_wdog && ((gethrtime() - ip->tx_wdog) > ip->tx_timeout)) {
1675 1675 /* transmit/CU hang? */
1676 1676 cmn_err(CE_CONT, "?CU stalled, resetting.\n");
1677 1677 reset = B_TRUE;
1678 1678 }
1679 1679
1680 1680 if (reset) {
1681 1681 /* We want to reconfigure */
1682 1682 iprb_stop(ip);
1683 1683 if (iprb_start(ip) != DDI_SUCCESS) {
1684 1684 iprb_error(ip, "unable to restart chip");
1685 1685 }
1686 1686 }
1687 1687
1688 1688 iprb_update_stats(ip);
1689 1689
1690 1690 mutex_exit(&ip->culock);
1691 1691 mutex_exit(&ip->rulock);
1692 1692 }
1693 1693
1694 1694 int
1695 1695 iprb_quiesce(dev_info_t *dip)
1696 1696 {
1697 1697 iprb_t *ip = ddi_get_driver_private(dip);
1698 1698
1699 1699 /* Reset, but first go into idle state */
1700 1700 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
1701 1701 drv_usecwait(50);
1702 1702 PUT32(ip, CSR_PORT, PORT_SW_RESET);
1703 1703 drv_usecwait(10);
1704 1704 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
1705 1705
1706 1706 return (DDI_SUCCESS);
1707 1707 }
1708 1708
1709 1709 int
1710 1710 iprb_suspend(dev_info_t *dip)
1711 1711 {
1712 1712 iprb_t *ip = ddi_get_driver_private(dip);
1713 1713
1714 1714 if (ip->miih)
1715 1715 mii_suspend(ip->miih);
1716 1716
1717 1717 mutex_enter(&ip->rulock);
1718 1718 mutex_enter(&ip->culock);
1719 1719 if (!ip->suspended) {
1720 1720 ip->suspended = B_TRUE;
1721 1721 if (ip->running) {
1722 1722 iprb_update_stats(ip);
1723 1723 iprb_stop(ip);
1724 1724 }
1725 1725 }
1726 1726 mutex_exit(&ip->culock);
1727 1727 mutex_exit(&ip->rulock);
1728 1728 return (DDI_SUCCESS);
1729 1729 }
1730 1730
1731 1731 int
1732 1732 iprb_resume(dev_info_t *dip)
1733 1733 {
1734 1734 iprb_t *ip = ddi_get_driver_private(dip);
1735 1735
1736 1736 mutex_enter(&ip->rulock);
1737 1737 mutex_enter(&ip->culock);
1738 1738
1739 1739 ip->suspended = B_FALSE;
1740 1740 if (ip->running) {
1741 1741 if (iprb_start(ip) != DDI_SUCCESS) {
1742 1742 iprb_error(ip, "unable to restart chip!");
1743 1743 ip->suspended = B_TRUE;
1744 1744 mutex_exit(&ip->culock);
1745 1745 mutex_exit(&ip->rulock);
1746 1746 return (DDI_FAILURE);
1747 1747 }
1748 1748 }
1749 1749
1750 1750 mutex_exit(&ip->culock);
1751 1751 mutex_exit(&ip->rulock);
1752 1752 if (ip->miih)
1753 1753 mii_resume(ip->miih);
1754 1754 return (DDI_SUCCESS);
1755 1755 }
1756 1756
1757 1757 int
1758 1758 iprb_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1759 1759 {
1760 1760 switch (cmd) {
1761 1761 case DDI_ATTACH:
1762 1762 return (iprb_attach(dip));
1763 1763
1764 1764 case DDI_RESUME:
1765 1765 return (iprb_resume(dip));
1766 1766
1767 1767 default:
1768 1768 return (DDI_FAILURE);
1769 1769 }
1770 1770 }
1771 1771
1772 1772 int
1773 1773 iprb_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1774 1774 {
1775 1775 switch (cmd) {
1776 1776 case DDI_DETACH:
1777 1777 return (iprb_detach(dip));
1778 1778
1779 1779 case DDI_SUSPEND:
1780 1780 return (iprb_suspend(dip));
1781 1781
1782 1782 default:
1783 1783 return (DDI_FAILURE);
1784 1784 }
1785 1785 }
1786 1786
1787 1787 void
1788 1788 iprb_error(iprb_t *ip, const char *fmt, ...)
1789 1789 {
1790 1790 va_list ap;
1791 1791 char buf[256];
1792 1792
1793 1793 va_start(ap, fmt);
1794 1794 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
1795 1795 va_end(ap);
1796 1796
1797 1797 cmn_err(CE_WARN, "%s%d: %s",
1798 1798 ddi_driver_name(ip->dip), ddi_get_instance(ip->dip), buf);
1799 1799 }
↓ open down ↓ |
115 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX