Print this page
patch tsoome-feedback
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/xen/os/xvdi.c
+++ new/usr/src/uts/common/xen/os/xvdi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Xen virtual device driver interfaces
29 29 */
30 30
31 31 /*
32 32 * todo:
33 33 * + name space clean up:
34 34 * xvdi_* - public xen interfaces, for use by all leaf drivers
35 35 * xd_* - public xen data structures
36 36 * i_xvdi_* - implementation private functions
37 37 * xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
38 38 * + add mdb dcmds to dump ring status
39 39 * + implement xvdi_xxx to wrap xenbus_xxx read/write function
40 40 * + convert (xendev_ring_t *) into xvdi_ring_handle_t
41 41 */
42 42 #include <sys/conf.h>
43 43 #include <sys/param.h>
44 44 #include <sys/kmem.h>
45 45 #include <vm/seg_kmem.h>
46 46 #include <sys/debug.h>
47 47 #include <sys/modctl.h>
48 48 #include <sys/autoconf.h>
49 49 #include <sys/ddi_impldefs.h>
50 50 #include <sys/ddi_subrdefs.h>
51 51 #include <sys/ddi.h>
52 52 #include <sys/sunddi.h>
53 53 #include <sys/sunndi.h>
54 54 #include <sys/sunldi.h>
55 55 #include <sys/fs/dv_node.h>
56 56 #include <sys/avintr.h>
57 57 #include <sys/psm.h>
58 58 #include <sys/spl.h>
59 59 #include <sys/promif.h>
60 60 #include <sys/list.h>
61 61 #include <sys/bootconf.h>
62 62 #include <sys/bootsvcs.h>
63 63 #include <sys/bootinfo.h>
64 64 #include <sys/note.h>
65 65 #include <sys/sysmacros.h>
66 66 #ifdef XPV_HVM_DRIVER
67 67 #include <sys/xpv_support.h>
68 68 #include <sys/hypervisor.h>
69 69 #include <public/grant_table.h>
70 70 #include <public/xen.h>
71 71 #include <public/io/xenbus.h>
72 72 #include <public/io/xs_wire.h>
73 73 #include <public/event_channel.h>
74 74 #include <public/io/xenbus.h>
75 75 #else /* XPV_HVM_DRIVER */
76 76 #include <sys/hypervisor.h>
77 77 #include <sys/xen_mmu.h>
78 78 #include <xen/sys/xenbus_impl.h>
79 79 #include <sys/evtchn_impl.h>
80 80 #endif /* XPV_HVM_DRIVER */
81 81 #include <sys/gnttab.h>
82 82 #include <xen/sys/xendev.h>
83 83 #include <vm/hat_i86.h>
84 84 #include <sys/scsi/generic/inquiry.h>
85 85 #include <util/sscanf.h>
86 86 #include <xen/public/io/xs_wire.h>
87 87
88 88
89 89 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
90 90 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
91 91 ((ch) >= 'A' && (ch) <= 'F'))
92 92
93 93 static void xvdi_ring_init_sring(xendev_ring_t *);
94 94 static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
95 95 #ifndef XPV_HVM_DRIVER
96 96 static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
97 97 #endif
98 98 static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
99 99
100 100 static int i_xvdi_add_watches(dev_info_t *);
101 101 static void i_xvdi_rem_watches(dev_info_t *);
102 102
103 103 static int i_xvdi_add_watch_oestate(dev_info_t *);
104 104 static void i_xvdi_rem_watch_oestate(dev_info_t *);
105 105 static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
106 106 static void i_xvdi_oestate_handler(void *);
107 107
108 108 static int i_xvdi_add_watch_hpstate(dev_info_t *);
109 109 static void i_xvdi_rem_watch_hpstate(dev_info_t *);
110 110 static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
111 111 unsigned int);
112 112 static void i_xvdi_hpstate_handler(void *);
113 113
114 114 static int i_xvdi_add_watch_bepath(dev_info_t *);
115 115 static void i_xvdi_rem_watch_bepath(dev_info_t *);
116 116 static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
117 117 unsigned in);
118 118
119 119 static void xendev_offline_device(void *);
120 120
121 121 static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
122 122 unsigned int);
123 123 static void i_xvdi_probe_path_handler(void *);
124 124
125 125 typedef struct oestate_evt {
126 126 dev_info_t *dip;
127 127 XenbusState state;
128 128 } i_oestate_evt_t;
129 129
130 130 typedef struct xd_cfg {
131 131 xendev_devclass_t devclass;
132 132 char *xsdev;
133 133 char *xs_path_fe;
134 134 char *xs_path_be;
135 135 char *node_fe;
136 136 char *node_be;
137 137 char *device_type;
138 138 int xd_ipl;
139 139 int flags;
140 140 } i_xd_cfg_t;
141 141
142 142 #define XD_DOM_ZERO 0x01 /* dom0 only. */
143 143 #define XD_DOM_GUEST 0x02 /* Guest domains (i.e. non-dom0). */
144 144 #define XD_DOM_IO 0x04 /* IO domains. */
145 145
146 146 #define XD_DOM_ALL (XD_DOM_ZERO | XD_DOM_GUEST)
147 147
148 148 static i_xd_cfg_t xdci[] = {
149 149 { XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
150 150 "console", IPL_CONS, XD_DOM_ALL, },
151 151
152 152 { XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
153 153 "network", IPL_VIF, XD_DOM_ALL, },
154 154
155 155 { XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
156 156 "block", IPL_VBD, XD_DOM_ALL, },
157 157
158 158 { XEN_BLKTAP, "tap", NULL, "backend/tap", NULL, "xpvtap",
159 159 "block", IPL_VBD, XD_DOM_ALL, },
160 160
161 161 { XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
162 162 NULL, 0, XD_DOM_ALL, },
163 163
164 164 { XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
165 165 NULL, 0, XD_DOM_ALL, },
166 166
167 167 { XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
168 168 NULL, 0, XD_DOM_ALL, },
169 169
170 170 { XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
171 171 NULL, 0, XD_DOM_ZERO, },
172 172
173 173 { XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
174 174 NULL, 0, XD_DOM_ZERO, },
175 175 };
176 176 #define NXDC (sizeof (xdci) / sizeof (xdci[0]))
177 177
178 178 static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
179 179 static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
180 180 static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
181 181
182 182 /*
183 183 * Xen device channel device access and DMA attributes
184 184 */
185 185 static ddi_device_acc_attr_t xendev_dc_accattr = {
186 186 DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
187 187 };
188 188
189 189 static ddi_dma_attr_t xendev_dc_dmaattr = {
190 190 DMA_ATTR_V0, /* version of this structure */
191 191 0, /* lowest usable address */
192 192 0xffffffffffffffffULL, /* highest usable address */
193 193 0x7fffffff, /* maximum DMAable byte count */
194 194 MMU_PAGESIZE, /* alignment in bytes */
195 195 0x7ff, /* bitmap of burst sizes */
196 196 1, /* minimum transfer */
197 197 0xffffffffU, /* maximum transfer */
198 198 0xffffffffffffffffULL, /* maximum segment length */
199 199 1, /* maximum number of segments */
200 200 1, /* granularity */
201 201 0, /* flags (reserved) */
202 202 };
203 203
204 204 static dev_info_t *xendev_dip = NULL;
205 205
206 206 #define XVDI_DBG_STATE 0x01
207 207 #define XVDI_DBG_PROBE 0x02
208 208
209 209 #ifdef DEBUG
210 210 int i_xvdi_debug = 0;
211 211
212 212 #define XVDI_DPRINTF(flag, format, ...) \
213 213 { \
214 214 if (i_xvdi_debug & (flag)) \
215 215 prom_printf((format), __VA_ARGS__); \
216 216 }
217 217 #else
218 218 #define XVDI_DPRINTF(flag, format, ...)
219 219 #endif /* DEBUG */
220 220
221 221 static i_xd_cfg_t *
222 222 i_xvdi_devclass2cfg(xendev_devclass_t devclass)
223 223 {
224 224 i_xd_cfg_t *xdcp;
225 225 int i;
226 226
227 227 for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
228 228 if (xdcp->devclass == devclass)
229 229 return (xdcp);
230 230
231 231 return (NULL);
232 232 }
233 233
234 234 int
235 235 xvdi_init_dev(dev_info_t *dip)
236 236 {
237 237 xendev_devclass_t devcls;
238 238 int vdevnum;
239 239 domid_t domid;
240 240 struct xendev_ppd *pdp;
241 241 i_xd_cfg_t *xdcp;
242 242 boolean_t backend;
243 243 char xsnamebuf[TYPICALMAXPATHLEN];
244 244 char *xsname;
245 245 void *prop_str;
246 246 unsigned int prop_len;
247 247 char unitaddr[8];
248 248
249 249 devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
250 250 DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
251 251 vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
252 252 DDI_PROP_DONTPASS, "vdev", VDEV_NOXS);
253 253 domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
254 254 DDI_PROP_DONTPASS, "domain", DOMID_SELF);
255 255
256 256 backend = (domid != DOMID_SELF);
257 257 xdcp = i_xvdi_devclass2cfg(devcls);
258 258 if (xdcp->device_type != NULL)
259 259 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
260 260 "device_type", xdcp->device_type);
261 261
262 262 pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
263 263 pdp->xd_domain = domid;
264 264 pdp->xd_vdevnum = vdevnum;
265 265 pdp->xd_devclass = devcls;
266 266 pdp->xd_evtchn = INVALID_EVTCHN;
267 267 list_create(&pdp->xd_xb_watches, sizeof (xd_xb_watches_t),
268 268 offsetof(xd_xb_watches_t, xxw_list));
269 269 mutex_init(&pdp->xd_evt_lk, NULL, MUTEX_DRIVER, NULL);
270 270 mutex_init(&pdp->xd_ndi_lk, NULL, MUTEX_DRIVER, NULL);
271 271 ddi_set_parent_data(dip, pdp);
272 272
273 273 /*
274 274 * devices that do not need to interact with xenstore
275 275 */
276 276 if (vdevnum == VDEV_NOXS) {
277 277 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
278 278 "unit-address", "0");
279 279 if (devcls == XEN_CONSOLE)
280 280 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
281 281 "pm-hardware-state", "needs-suspend-resume");
282 282 return (DDI_SUCCESS);
283 283 }
284 284
285 285 /*
286 286 * PV devices that need to probe xenstore
287 287 */
288 288
289 289 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
290 290 "pm-hardware-state", "needs-suspend-resume");
291 291
292 292 xsname = xsnamebuf;
293 293 if (!backend)
294 294 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
295 295 "%s/%d", xdcp->xs_path_fe, vdevnum);
296 296 else
297 297 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
298 298 "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
299 299 if ((xenbus_read_driver_state(xsname) >= XenbusStateClosing)) {
300 300 /* Don't try to init a dev that may be closing */
301 301 mutex_destroy(&pdp->xd_ndi_lk);
302 302 mutex_destroy(&pdp->xd_evt_lk);
303 303 kmem_free(pdp, sizeof (*pdp));
304 304 ddi_set_parent_data(dip, NULL);
305 305 return (DDI_FAILURE);
306 306 }
307 307
308 308 pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
309 309 pdp->xd_xsdev.devicetype = xdcp->xsdev;
310 310 pdp->xd_xsdev.frontend = (backend ? 0 : 1);
311 311 pdp->xd_xsdev.data = dip;
312 312 pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
313 313 if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
314 314 cmn_err(CE_WARN, "xvdi_init_dev: "
315 315 "cannot add watches for %s", xsname);
316 316 xvdi_uninit_dev(dip);
317 317 return (DDI_FAILURE);
318 318 }
319 319
320 320 if (backend)
321 321 return (DDI_SUCCESS);
322 322
323 323 /*
324 324 * The unit-address for frontend devices is the name of the
325 325 * of the xenstore node containing the device configuration
326 326 * and is contained in the 'vdev' property.
327 327 * VIF devices are named using an incrementing integer.
328 328 * VBD devices are either named using the 16-bit dev_t value
329 329 * for linux 'hd' and 'xvd' devices, or a simple integer value
330 330 * in the range 0..767. 768 is the base value of the linux
331 331 * dev_t namespace, the dev_t value for 'hda'.
332 332 */
333 333 (void) snprintf(unitaddr, sizeof (unitaddr), "%d", vdevnum);
334 334 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "unit-address",
335 335 unitaddr);
336 336
337 337 switch (devcls) {
338 338 case XEN_VNET:
339 339 if (xenbus_read(XBT_NULL, xsname, "mac", (void *)&prop_str,
340 340 &prop_len) != 0)
341 341 break;
342 342 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "mac",
343 343 prop_str);
344 344 kmem_free(prop_str, prop_len);
345 345 break;
346 346 case XEN_VBLK:
347 347 /*
348 348 * cache a copy of the otherend name
349 349 * for ease of observeability
350 350 */
351 351 if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend, "dev",
352 352 &prop_str, &prop_len) != 0)
353 353 break;
354 354 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
355 355 "dev-address", prop_str);
356 356 kmem_free(prop_str, prop_len);
357 357 break;
358 358 default:
359 359 break;
360 360 }
361 361
362 362 return (DDI_SUCCESS);
363 363 }
364 364
365 365 void
366 366 xvdi_uninit_dev(dev_info_t *dip)
367 367 {
368 368 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
369 369
370 370 if (pdp != NULL) {
371 371 /* Remove any registered callbacks. */
372 372 xvdi_remove_event_handler(dip, NULL);
373 373
374 374 /* Remove any registered watches. */
375 375 i_xvdi_rem_watches(dip);
376 376
377 377 /* tell other end to close */
378 378 if (pdp->xd_xsdev.otherend_id != (domid_t)-1)
379 379 (void) xvdi_switch_state(dip, XBT_NULL,
380 380 XenbusStateClosed);
381 381
382 382 if (pdp->xd_xsdev.nodename != NULL)
383 383 kmem_free((char *)(pdp->xd_xsdev.nodename),
384 384 strlen(pdp->xd_xsdev.nodename) + 1);
385 385
386 386 ddi_set_parent_data(dip, NULL);
387 387
388 388 mutex_destroy(&pdp->xd_ndi_lk);
389 389 mutex_destroy(&pdp->xd_evt_lk);
390 390 kmem_free(pdp, sizeof (*pdp));
391 391 }
392 392 }
393 393
394 394 /*
395 395 * Bind the event channel for this device instance.
396 396 * Currently we only support one evtchn per device instance.
397 397 */
398 398 int
399 399 xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
400 400 {
401 401 struct xendev_ppd *pdp;
402 402 domid_t oeid;
403 403 int r;
404 404
405 405 pdp = ddi_get_parent_data(dip);
406 406 ASSERT(pdp != NULL);
407 407 ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
408 408
409 409 mutex_enter(&pdp->xd_evt_lk);
410 410 if (pdp->xd_devclass == XEN_CONSOLE) {
411 411 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
412 412 pdp->xd_evtchn = xen_info->console.domU.evtchn;
413 413 } else {
414 414 pdp->xd_evtchn = INVALID_EVTCHN;
415 415 mutex_exit(&pdp->xd_evt_lk);
416 416 return (DDI_SUCCESS);
417 417 }
418 418 } else {
419 419 oeid = pdp->xd_xsdev.otherend_id;
420 420 if (oeid == (domid_t)-1) {
421 421 mutex_exit(&pdp->xd_evt_lk);
422 422 return (DDI_FAILURE);
423 423 }
424 424
425 425 if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
426 426 xvdi_dev_error(dip, r, "bind event channel");
427 427 mutex_exit(&pdp->xd_evt_lk);
428 428 return (DDI_FAILURE);
429 429 }
430 430 }
431 431 #ifndef XPV_HVM_DRIVER
432 432 pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
433 433 #endif
434 434 mutex_exit(&pdp->xd_evt_lk);
435 435
436 436 return (DDI_SUCCESS);
437 437 }
438 438
439 439 /*
440 440 * Allocate an event channel for this device instance.
441 441 * Currently we only support one evtchn per device instance.
442 442 */
443 443 int
444 444 xvdi_alloc_evtchn(dev_info_t *dip)
445 445 {
446 446 struct xendev_ppd *pdp;
447 447 domid_t oeid;
448 448 int rv;
449 449
450 450 pdp = ddi_get_parent_data(dip);
451 451 ASSERT(pdp != NULL);
452 452 ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
453 453
454 454 mutex_enter(&pdp->xd_evt_lk);
455 455 if (pdp->xd_devclass == XEN_CONSOLE) {
456 456 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
457 457 pdp->xd_evtchn = xen_info->console.domU.evtchn;
458 458 } else {
459 459 pdp->xd_evtchn = INVALID_EVTCHN;
460 460 mutex_exit(&pdp->xd_evt_lk);
461 461 return (DDI_SUCCESS);
462 462 }
463 463 } else {
464 464 oeid = pdp->xd_xsdev.otherend_id;
465 465 if (oeid == (domid_t)-1) {
466 466 mutex_exit(&pdp->xd_evt_lk);
467 467 return (DDI_FAILURE);
468 468 }
469 469
470 470 if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
471 471 xvdi_dev_error(dip, rv, "bind event channel");
472 472 mutex_exit(&pdp->xd_evt_lk);
473 473 return (DDI_FAILURE);
474 474 }
475 475 }
476 476 #ifndef XPV_HVM_DRIVER
477 477 pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
478 478 #endif
479 479 mutex_exit(&pdp->xd_evt_lk);
480 480
481 481 return (DDI_SUCCESS);
482 482 }
483 483
484 484 /*
485 485 * Unbind the event channel for this device instance.
486 486 * Currently we only support one evtchn per device instance.
487 487 */
488 488 void
489 489 xvdi_free_evtchn(dev_info_t *dip)
490 490 {
491 491 struct xendev_ppd *pdp;
492 492
493 493 pdp = ddi_get_parent_data(dip);
494 494 ASSERT(pdp != NULL);
495 495
496 496 mutex_enter(&pdp->xd_evt_lk);
497 497 if (pdp->xd_evtchn != INVALID_EVTCHN) {
498 498 #ifndef XPV_HVM_DRIVER
499 499 ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
500 500 pdp->xd_ispec.intrspec_vec = 0;
501 501 #endif
502 502 pdp->xd_evtchn = INVALID_EVTCHN;
503 503 }
504 504 mutex_exit(&pdp->xd_evt_lk);
505 505 }
506 506
507 507 #ifndef XPV_HVM_DRIVER
508 508 /*
509 509 * Map an inter-domain communication ring for a virtual device.
510 510 * This is used by backend drivers.
511 511 */
512 512 int
513 513 xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
514 514 grant_ref_t gref, xendev_ring_t **ringpp)
515 515 {
516 516 domid_t oeid;
517 517 gnttab_map_grant_ref_t mapop;
518 518 gnttab_unmap_grant_ref_t unmapop;
519 519 caddr_t ringva;
520 520 ddi_acc_hdl_t *ap;
521 521 ddi_acc_impl_t *iap;
522 522 xendev_ring_t *ring;
523 523 int err;
524 524 char errstr[] = "mapping in ring buffer";
525 525
526 526 ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
527 527 oeid = xvdi_get_oeid(dip);
528 528
529 529 /* alloc va in backend dom for ring buffer */
530 530 ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
531 531 0, 0, 0, 0, VM_SLEEP);
532 532
533 533 /* map in ring page */
534 534 hat_prepare_mapping(kas.a_hat, ringva, NULL);
535 535 mapop.host_addr = (uint64_t)(uintptr_t)ringva;
536 536 mapop.flags = GNTMAP_host_map;
537 537 mapop.ref = gref;
538 538 mapop.dom = oeid;
539 539 err = xen_map_gref(GNTTABOP_map_grant_ref, &mapop, 1, B_FALSE);
540 540 if (err) {
541 541 xvdi_fatal_error(dip, err, errstr);
542 542 goto errout1;
543 543 }
544 544
545 545 if (mapop.status != 0) {
546 546 xvdi_fatal_error(dip, err, errstr);
547 547 goto errout2;
548 548 }
549 549 ring->xr_vaddr = ringva;
550 550 ring->xr_grant_hdl = mapop.handle;
551 551 ring->xr_gref = gref;
552 552
553 553 /*
554 554 * init an acc handle and associate it w/ this ring
555 555 * this is only for backend drivers. we get the memory by calling
556 556 * vmem_xalloc(), instead of calling any ddi function, so we have
557 557 * to init an acc handle by ourselves
558 558 */
559 559 ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
560 560 ap = impl_acc_hdl_get(ring->xr_acc_hdl);
561 561 ap->ah_vers = VERS_ACCHDL;
562 562 ap->ah_dip = dip;
563 563 ap->ah_xfermodes = DDI_DMA_CONSISTENT;
564 564 ap->ah_acc = xendev_dc_accattr;
565 565 iap = (ddi_acc_impl_t *)ap->ah_platform_private;
566 566 iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
567 567 impl_acc_hdl_init(ap);
568 568 ap->ah_offset = 0;
569 569 ap->ah_len = (off_t)PAGESIZE;
570 570 ap->ah_addr = ring->xr_vaddr;
571 571
572 572 /* init backend ring */
573 573 xvdi_ring_init_back_ring(ring, nentry, entrysize);
574 574
575 575 *ringpp = ring;
576 576
577 577 return (DDI_SUCCESS);
578 578
579 579 errout2:
580 580 /* unmap ring page */
581 581 unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
582 582 unmapop.handle = ring->xr_grant_hdl;
583 583 unmapop.dev_bus_addr = NULL;
584 584 (void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
585 585 hat_release_mapping(kas.a_hat, ringva);
586 586 errout1:
587 587 vmem_xfree(heap_arena, ringva, PAGESIZE);
588 588 kmem_free(ring, sizeof (xendev_ring_t));
589 589 return (DDI_FAILURE);
590 590 }
591 591
592 592 /*
593 593 * Unmap a ring for a virtual device.
594 594 * This is used by backend drivers.
595 595 */
596 596 void
597 597 xvdi_unmap_ring(xendev_ring_t *ring)
598 598 {
599 599 gnttab_unmap_grant_ref_t unmapop;
600 600
601 601 ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
602 602
603 603 impl_acc_hdl_free(ring->xr_acc_hdl);
604 604 unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
605 605 unmapop.handle = ring->xr_grant_hdl;
606 606 unmapop.dev_bus_addr = NULL;
607 607 (void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
608 608 hat_release_mapping(kas.a_hat, ring->xr_vaddr);
609 609 vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
610 610 kmem_free(ring, sizeof (xendev_ring_t));
611 611 }
612 612 #endif /* XPV_HVM_DRIVER */
613 613
614 614 /*
615 615 * Re-initialise an inter-domain communications ring for the backend domain.
616 616 * ring will be re-initialized after re-grant succeed
617 617 * ring will be freed if fails to re-grant access to backend domain
618 618 * so, don't keep useful data in the ring
619 619 * used only in frontend driver
620 620 */
621 621 static void
622 622 xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
623 623 {
624 624 paddr_t rpaddr;
625 625 maddr_t rmaddr;
626 626
627 627 ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
628 628 rpaddr = ringp->xr_paddr;
629 629
630 630 rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
631 631 gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
632 632 rmaddr >> PAGESHIFT, 0);
633 633 *gref = ringp->xr_gref;
634 634
635 635 /* init frontend ring */
636 636 xvdi_ring_init_sring(ringp);
637 637 xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
638 638 ringp->xr_entry_size);
639 639 }
640 640
641 641 /*
642 642 * allocate Xen inter-domain communications ring for Xen virtual devices
643 643 * used only in frontend driver
644 644 * if *ringpp is not NULL, we'll simply re-init it
645 645 */
646 646 int
647 647 xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
648 648 grant_ref_t *gref, xendev_ring_t **ringpp)
649 649 {
650 650 size_t len;
651 651 xendev_ring_t *ring;
652 652 ddi_dma_cookie_t dma_cookie;
653 653 uint_t ncookies;
654 654 grant_ref_t ring_gref;
655 655 domid_t oeid;
656 656 maddr_t rmaddr;
657 657
658 658 if (*ringpp) {
659 659 xvdi_reinit_ring(dip, gref, *ringpp);
660 660 return (DDI_SUCCESS);
661 661 }
662 662
663 663 *ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
664 664 oeid = xvdi_get_oeid(dip);
665 665
666 666 /*
667 667 * Allocate page for this ring buffer
668 668 */
669 669 if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
670 670 0, &ring->xr_dma_hdl) != DDI_SUCCESS)
671 671 goto err;
672 672
673 673 if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
674 674 &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
675 675 &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
676 676 ddi_dma_free_handle(&ring->xr_dma_hdl);
677 677 goto err;
678 678 }
679 679
680 680 if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
681 681 ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
682 682 DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
683 683 ddi_dma_mem_free(&ring->xr_acc_hdl);
684 684 ring->xr_vaddr = NULL;
685 685 ddi_dma_free_handle(&ring->xr_dma_hdl);
686 686 goto err;
687 687 }
688 688 ASSERT(ncookies == 1);
689 689 ring->xr_paddr = dma_cookie.dmac_laddress;
690 690 rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
691 691 pa_to_ma(ring->xr_paddr);
692 692
693 693 if ((ring_gref = gnttab_grant_foreign_access(oeid,
694 694 rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
695 695 (void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
696 696 ddi_dma_mem_free(&ring->xr_acc_hdl);
697 697 ring->xr_vaddr = NULL;
698 698 ddi_dma_free_handle(&ring->xr_dma_hdl);
699 699 goto err;
700 700 }
701 701 *gref = ring->xr_gref = ring_gref;
702 702
703 703 /* init frontend ring */
704 704 xvdi_ring_init_sring(ring);
705 705 xvdi_ring_init_front_ring(ring, nentry, entrysize);
706 706
707 707 return (DDI_SUCCESS);
708 708
709 709 err:
710 710 kmem_free(ring, sizeof (xendev_ring_t));
711 711 return (DDI_FAILURE);
712 712 }
713 713
714 714 /*
715 715 * Release ring buffers allocated for Xen devices
716 716 * used for frontend driver
717 717 */
718 718 void
719 719 xvdi_free_ring(xendev_ring_t *ring)
720 720 {
721 721 ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
722 722
723 723 (void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
724 724 (void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
725 725 ddi_dma_mem_free(&ring->xr_acc_hdl);
726 726 ddi_dma_free_handle(&ring->xr_dma_hdl);
727 727 kmem_free(ring, sizeof (xendev_ring_t));
728 728 }
729 729
730 730 dev_info_t *
731 731 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
732 732 domid_t dom, int vdev)
733 733 {
734 734 dev_info_t *dip;
735 735 boolean_t backend;
736 736 i_xd_cfg_t *xdcp;
737 737 char xsnamebuf[TYPICALMAXPATHLEN];
738 738 char *type, *node = NULL, *xsname = NULL;
739 739 unsigned int tlen;
740 740 int ret;
741 741
742 742 ASSERT(DEVI_BUSY_OWNED(parent));
743 743
744 744 backend = (dom != DOMID_SELF);
745 745 xdcp = i_xvdi_devclass2cfg(devclass);
746 746 ASSERT(xdcp != NULL);
747 747
748 748 if (vdev != VDEV_NOXS) {
749 749 if (!backend) {
750 750 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
751 751 "%s/%d", xdcp->xs_path_fe, vdev);
752 752 xsname = xsnamebuf;
753 753 node = xdcp->node_fe;
754 754 } else {
755 755 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
756 756 "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
757 757 xsname = xsnamebuf;
758 758 node = xdcp->node_be;
759 759 }
760 760 } else {
761 761 node = xdcp->node_fe;
762 762 }
763 763
764 764 /* Must have a driver to use. */
765 765 if (node == NULL)
766 766 return (NULL);
767 767
768 768 /*
769 769 * We need to check the state of this device before we go
770 770 * further, otherwise we'll end up with a dead loop if
771 771 * anything goes wrong.
772 772 */
773 773 if ((xsname != NULL) &&
774 774 (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
775 775 return (NULL);
776 776
777 777 ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
778 778
779 779 /*
780 780 * Driver binding uses the compatible property _before_ the
781 781 * node name, so we set the node name to the 'model' of the
782 782 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
783 783 * encode both the model and the type in a compatible property
784 784 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac'). This allows a
785 785 * driver binding based on the <model,type> pair _before_ a
786 786 * binding based on the node name.
787 787 */
788 788 if ((xsname != NULL) &&
789 789 (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
790 790 == 0)) {
791 791 size_t clen;
792 792 char *c[1];
793 793
794 794 clen = strlen(node) + strlen(type) + 2;
795 795 c[0] = kmem_alloc(clen, KM_SLEEP);
796 796 (void) snprintf(c[0], clen, "%s,%s", node, type);
797 797
798 798 (void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
799 799 dip, "compatible", (char **)c, 1);
800 800
801 801 kmem_free(c[0], clen);
802 802 kmem_free(type, tlen);
803 803 }
804 804
805 805 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
806 806 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
807 807 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
808 808
809 809 if (i_ddi_devi_attached(parent))
810 810 ret = ndi_devi_online(dip, 0);
811 811 else
812 812 ret = ndi_devi_bind_driver(dip, 0);
813 813 if (ret != NDI_SUCCESS)
814 814 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
815 815
816 816 return (dip);
817 817 }
818 818
819 819 /*
820 820 * xendev_enum_class()
821 821 */
822 822 void
823 823 xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
824 824 {
825 825 boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
826 826 boolean_t domU = !dom0;
827 827 i_xd_cfg_t *xdcp;
828 828
829 829 xdcp = i_xvdi_devclass2cfg(devclass);
830 830 ASSERT(xdcp != NULL);
831 831
832 832 if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
833 833 return;
834 834
835 835 if (domU && !(xdcp->flags & XD_DOM_GUEST))
836 836 return;
837 837
838 838 if (xdcp->xsdev == NULL) {
839 839 int circ;
840 840
841 841 /*
842 842 * Don't need to probe this kind of device from the
843 843 * store, just create one if it doesn't exist.
844 844 */
845 845
846 846 ndi_devi_enter(parent, &circ);
847 847 if (xvdi_find_dev(parent, devclass, DOMID_SELF, VDEV_NOXS)
848 848 == NULL)
849 849 (void) xvdi_create_dev(parent, devclass,
850 850 DOMID_SELF, VDEV_NOXS);
851 851 ndi_devi_exit(parent, circ);
852 852 } else {
853 853 /*
854 854 * Probe this kind of device from the store, both
855 855 * frontend and backend.
856 856 */
857 857 if (xdcp->node_fe != NULL) {
858 858 i_xvdi_enum_fe(parent, xdcp);
859 859 }
860 860 if (xdcp->node_be != NULL) {
861 861 i_xvdi_enum_be(parent, xdcp);
862 862 }
863 863 }
864 864 }
865 865
866 866 /*
867 867 * xendev_enum_all()
868 868 */
869 869 void
870 870 xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
871 871 {
872 872 int i;
873 873 i_xd_cfg_t *xdcp;
874 874 boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
875 875
876 876 for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
877 877 /*
878 878 * Dom0 relies on watchpoints to create non-soft
879 879 * devices - don't attempt to iterate over the store.
880 880 */
881 881 if (dom0 && (xdcp->xsdev != NULL))
882 882 continue;
883 883
884 884 /*
885 885 * If the store is not yet available, don't attempt to
886 886 * iterate.
887 887 */
888 888 if (store_unavailable && (xdcp->xsdev != NULL))
889 889 continue;
890 890
891 891 xendev_enum_class(parent, xdcp->devclass);
892 892 }
893 893 }
894 894
895 895 xendev_devclass_t
896 896 xendev_nodename_to_devclass(char *nodename)
897 897 {
898 898 int i;
899 899 i_xd_cfg_t *xdcp;
900 900
901 901 /*
902 902 * This relies on the convention that variants of a base
903 903 * driver share the same prefix and that there are no drivers
904 904 * which share a common prefix with the name of any other base
905 905 * drivers.
906 906 *
907 907 * So for a base driver 'xnb' (which is the name listed in
908 908 * xdci) the variants all begin with the string 'xnb' (in fact
909 909 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
910 910 * base drivers which have the prefix 'xnb'.
911 911 */
912 912 ASSERT(nodename != NULL);
913 913 for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
914 914 if (((xdcp->node_fe != NULL) &&
915 915 (strncmp(nodename, xdcp->node_fe,
916 916 strlen(xdcp->node_fe)) == 0)) ||
917 917 ((xdcp->node_be != NULL) &&
918 918 (strncmp(nodename, xdcp->node_be,
919 919 strlen(xdcp->node_be)) == 0)))
920 920
921 921 return (xdcp->devclass);
922 922 }
923 923 return (XEN_INVAL);
924 924 }
925 925
926 926 int
927 927 xendev_devclass_ipl(xendev_devclass_t devclass)
928 928 {
929 929 i_xd_cfg_t *xdcp;
930 930
931 931 xdcp = i_xvdi_devclass2cfg(devclass);
932 932 ASSERT(xdcp != NULL);
933 933
934 934 return (xdcp->xd_ipl);
935 935 }
936 936
937 937 /*
938 938 * Determine if a devinfo instance exists of a particular device
939 939 * class, domain and xenstore virtual device number.
940 940 */
941 941 dev_info_t *
942 942 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
943 943 domid_t dom, int vdev)
944 944 {
945 945 dev_info_t *dip;
946 946
947 947 ASSERT(DEVI_BUSY_OWNED(parent));
948 948
949 949 switch (devclass) {
950 950 case XEN_CONSOLE:
951 951 case XEN_XENBUS:
952 952 case XEN_DOMCAPS:
953 953 case XEN_BALLOON:
954 954 case XEN_EVTCHN:
955 955 case XEN_PRIVCMD:
956 956 /* Console and soft devices have no vdev. */
957 957 vdev = VDEV_NOXS;
958 958 break;
959 959 default:
960 960 break;
961 961 }
962 962
963 963 for (dip = ddi_get_child(parent); dip != NULL;
964 964 dip = ddi_get_next_sibling(dip)) {
965 965 int *vdevnump, *domidp, *devclsp, vdevnum;
966 966 uint_t ndomid, nvdevnum, ndevcls;
967 967 xendev_devclass_t devcls;
968 968 domid_t domid;
969 969 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
970 970
971 971 if (pdp == NULL) {
972 972 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
973 973 DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
974 974 DDI_PROP_SUCCESS)
975 975 continue;
976 976 ASSERT(ndomid == 1);
977 977 domid = (domid_t)*domidp;
978 978 ddi_prop_free(domidp);
979 979
980 980 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
981 981 DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
982 982 DDI_PROP_SUCCESS)
983 983 continue;
984 984 ASSERT(nvdevnum == 1);
985 985 vdevnum = *vdevnump;
986 986 ddi_prop_free(vdevnump);
987 987
988 988 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
989 989 DDI_PROP_DONTPASS, "devclass", &devclsp,
990 990 &ndevcls) != DDI_PROP_SUCCESS)
991 991 continue;
992 992 ASSERT(ndevcls == 1);
993 993 devcls = (xendev_devclass_t)*devclsp;
994 994 ddi_prop_free(devclsp);
995 995 } else {
996 996 domid = pdp->xd_domain;
997 997 vdevnum = pdp->xd_vdevnum;
998 998 devcls = pdp->xd_devclass;
999 999 }
1000 1000
1001 1001 if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
1002 1002 return (dip);
1003 1003 }
1004 1004 return (NULL);
1005 1005 }
1006 1006
1007 1007 int
1008 1008 xvdi_get_evtchn(dev_info_t *xdip)
1009 1009 {
1010 1010 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1011 1011
1012 1012 ASSERT(pdp != NULL);
1013 1013 return (pdp->xd_evtchn);
1014 1014 }
1015 1015
1016 1016 int
1017 1017 xvdi_get_vdevnum(dev_info_t *xdip)
1018 1018 {
1019 1019 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1020 1020
1021 1021 ASSERT(pdp != NULL);
1022 1022 return (pdp->xd_vdevnum);
1023 1023 }
1024 1024
1025 1025 char *
1026 1026 xvdi_get_xsname(dev_info_t *xdip)
1027 1027 {
1028 1028 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1029 1029
1030 1030 ASSERT(pdp != NULL);
1031 1031 return ((char *)(pdp->xd_xsdev.nodename));
1032 1032 }
1033 1033
1034 1034 char *
1035 1035 xvdi_get_oename(dev_info_t *xdip)
1036 1036 {
1037 1037 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1038 1038
1039 1039 ASSERT(pdp != NULL);
1040 1040 if (pdp->xd_devclass == XEN_CONSOLE)
1041 1041 return (NULL);
1042 1042 return ((char *)(pdp->xd_xsdev.otherend));
1043 1043 }
1044 1044
1045 1045 struct xenbus_device *
1046 1046 xvdi_get_xsd(dev_info_t *xdip)
1047 1047 {
1048 1048 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1049 1049
1050 1050 ASSERT(pdp != NULL);
1051 1051 return (&pdp->xd_xsdev);
1052 1052 }
1053 1053
1054 1054 domid_t
1055 1055 xvdi_get_oeid(dev_info_t *xdip)
1056 1056 {
1057 1057 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1058 1058
1059 1059 ASSERT(pdp != NULL);
1060 1060 if (pdp->xd_devclass == XEN_CONSOLE)
1061 1061 return ((domid_t)-1);
1062 1062 return ((domid_t)(pdp->xd_xsdev.otherend_id));
1063 1063 }
1064 1064
1065 1065 void
1066 1066 xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1067 1067 {
1068 1068 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1069 1069
1070 1070 ASSERT(pdp != NULL);
1071 1071 xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1072 1072 }
1073 1073
1074 1074 void
1075 1075 xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1076 1076 {
1077 1077 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1078 1078
1079 1079 ASSERT(pdp != NULL);
1080 1080 xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1081 1081 }
1082 1082
1083 1083 static void
1084 1084 i_xvdi_oestate_handler(void *arg)
1085 1085 {
1086 1086 i_oestate_evt_t *evt = (i_oestate_evt_t *)arg;
1087 1087 dev_info_t *dip = evt->dip;
1088 1088 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1089 1089 XenbusState oestate = pdp->xd_xsdev.otherend_state;
1090 1090 XenbusState curr_oestate = evt->state;
1091 1091 ddi_eventcookie_t evc;
1092 1092
1093 1093 /* evt is alloc'ed in i_xvdi_oestate_cb */
1094 1094 kmem_free(evt, sizeof (i_oestate_evt_t));
1095 1095
1096 1096 /*
1097 1097 * If the oestate we're handling is not the latest one,
1098 1098 * it does not make any sense to continue handling it.
1099 1099 */
1100 1100 if (curr_oestate != oestate)
1101 1101 return;
1102 1102
1103 1103 mutex_enter(&pdp->xd_ndi_lk);
1104 1104
1105 1105 if (pdp->xd_oe_ehid != NULL) {
1106 1106 /* send notification to driver */
1107 1107 if (ddi_get_eventcookie(dip, XS_OE_STATE,
1108 1108 &evc) == DDI_SUCCESS) {
1109 1109 mutex_exit(&pdp->xd_ndi_lk);
1110 1110 (void) ndi_post_event(dip, dip, evc, &oestate);
1111 1111 mutex_enter(&pdp->xd_ndi_lk);
1112 1112 }
1113 1113 } else {
1114 1114 /*
1115 1115 * take default action, if driver hasn't registered its
1116 1116 * event handler yet
1117 1117 */
1118 1118 if (oestate == XenbusStateClosing) {
1119 1119 (void) xvdi_switch_state(dip, XBT_NULL,
1120 1120 XenbusStateClosed);
1121 1121 } else if (oestate == XenbusStateClosed) {
1122 1122 (void) xvdi_switch_state(dip, XBT_NULL,
1123 1123 XenbusStateClosed);
1124 1124 (void) xvdi_post_event(dip, XEN_HP_REMOVE);
1125 1125 }
1126 1126 }
1127 1127
1128 1128 mutex_exit(&pdp->xd_ndi_lk);
1129 1129
1130 1130 /*
1131 1131 * We'll try to remove the devinfo node of this device if the
1132 1132 * other end has closed.
1133 1133 */
1134 1134 if (oestate == XenbusStateClosed)
1135 1135 (void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1136 1136 xendev_offline_device, dip, DDI_SLEEP);
1137 1137 }
1138 1138
1139 1139 static void
1140 1140 i_xvdi_hpstate_handler(void *arg)
1141 1141 {
1142 1142 dev_info_t *dip = (dev_info_t *)arg;
1143 1143 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1144 1144 ddi_eventcookie_t evc;
1145 1145 char *hp_status;
1146 1146 unsigned int hpl;
1147 1147
1148 1148 mutex_enter(&pdp->xd_ndi_lk);
1149 1149 if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1150 1150 (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1151 1151 (void *)&hp_status, &hpl) == 0)) {
1152 1152
1153 1153 xendev_hotplug_state_t new_state = Unrecognized;
1154 1154
1155 1155 if (strcmp(hp_status, "connected") == 0)
1156 1156 new_state = Connected;
1157 1157
1158 1158 mutex_exit(&pdp->xd_ndi_lk);
1159 1159
1160 1160 (void) ndi_post_event(dip, dip, evc, &new_state);
1161 1161 kmem_free(hp_status, hpl);
1162 1162 return;
1163 1163 }
1164 1164 mutex_exit(&pdp->xd_ndi_lk);
1165 1165 }
1166 1166
1167 1167 void
1168 1168 xvdi_notify_oe(dev_info_t *dip)
1169 1169 {
1170 1170 struct xendev_ppd *pdp;
1171 1171
1172 1172 pdp = ddi_get_parent_data(dip);
1173 1173 ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1174 1174 ec_notify_via_evtchn(pdp->xd_evtchn);
1175 1175 }
1176 1176
1177 1177 static void
1178 1178 i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1179 1179 {
1180 1180 dev_info_t *dip = (dev_info_t *)w->dev;
1181 1181 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1182 1182 char *be = NULL;
1183 1183 unsigned int bel;
1184 1184
1185 1185 ASSERT(len > XS_WATCH_PATH);
1186 1186 ASSERT(vec[XS_WATCH_PATH] != NULL);
1187 1187
1188 1188 /*
1189 1189 * If the backend is not the same as that we already stored,
1190 1190 * re-set our watch for its' state.
1191 1191 */
1192 1192 if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1193 1193 == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1194 1194 (void) i_xvdi_add_watch_oestate(dip);
1195 1195
1196 1196 if (be != NULL) {
1197 1197 ASSERT(bel > 0);
1198 1198 kmem_free(be, bel);
1199 1199 }
1200 1200 }
1201 1201
1202 1202 static void
1203 1203 i_xvdi_xb_watch_free(xd_xb_watches_t *xxwp)
1204 1204 {
1205 1205 ASSERT(xxwp->xxw_ref == 0);
1206 1206 strfree((char *)xxwp->xxw_watch.node);
1207 1207 kmem_free(xxwp, sizeof (*xxwp));
1208 1208 }
1209 1209
1210 1210 static void
1211 1211 i_xvdi_xb_watch_release(xd_xb_watches_t *xxwp)
1212 1212 {
1213 1213 ASSERT(MUTEX_HELD(&xxwp->xxw_xppd->xd_ndi_lk));
1214 1214 ASSERT(xxwp->xxw_ref > 0);
1215 1215 if (--xxwp->xxw_ref == 0)
1216 1216 i_xvdi_xb_watch_free(xxwp);
1217 1217 }
1218 1218
1219 1219 static void
1220 1220 i_xvdi_xb_watch_hold(xd_xb_watches_t *xxwp)
1221 1221 {
1222 1222 ASSERT(MUTEX_HELD(&xxwp->xxw_xppd->xd_ndi_lk));
1223 1223 ASSERT(xxwp->xxw_ref > 0);
1224 1224 xxwp->xxw_ref++;
1225 1225 }
1226 1226
1227 1227 static void
1228 1228 i_xvdi_xb_watch_cb_tq(void *arg)
1229 1229 {
1230 1230 xd_xb_watches_t *xxwp = (xd_xb_watches_t *)arg;
1231 1231 dev_info_t *dip = (dev_info_t *)xxwp->xxw_watch.dev;
1232 1232 struct xendev_ppd *pdp = xxwp->xxw_xppd;
1233 1233
1234 1234 xxwp->xxw_cb(dip, xxwp->xxw_watch.node, xxwp->xxw_arg);
1235 1235
1236 1236 mutex_enter(&pdp->xd_ndi_lk);
1237 1237 i_xvdi_xb_watch_release(xxwp);
1238 1238 mutex_exit(&pdp->xd_ndi_lk);
1239 1239 }
1240 1240
1241 1241 static void
1242 1242 i_xvdi_xb_watch_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1243 1243 {
1244 1244 dev_info_t *dip = (dev_info_t *)w->dev;
1245 1245 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1246 1246 xd_xb_watches_t *xxwp;
1247 1247
1248 1248 ASSERT(len > XS_WATCH_PATH);
1249 1249 ASSERT(vec[XS_WATCH_PATH] != NULL);
1250 1250
1251 1251 mutex_enter(&pdp->xd_ndi_lk);
1252 1252 for (xxwp = list_head(&pdp->xd_xb_watches); xxwp != NULL;
1253 1253 xxwp = list_next(&pdp->xd_xb_watches, xxwp)) {
1254 1254 if (w == &xxwp->xxw_watch)
1255 1255 break;
1256 1256 }
1257 1257
1258 1258 if (xxwp == NULL) {
1259 1259 mutex_exit(&pdp->xd_ndi_lk);
1260 1260 return;
1261 1261 }
1262 1262
1263 1263 i_xvdi_xb_watch_hold(xxwp);
1264 1264 (void) ddi_taskq_dispatch(pdp->xd_xb_watch_taskq,
1265 1265 i_xvdi_xb_watch_cb_tq, xxwp, DDI_SLEEP);
1266 1266 mutex_exit(&pdp->xd_ndi_lk);
1267 1267 }
1268 1268
1269 1269 /*
1270 1270 * Any watches registered with xvdi_add_xb_watch_handler() get torn down during
1271 1271 * a suspend operation. So if a frontend driver want's to use these interfaces,
1272 1272 * that driver is responsible for re-registering any watches it had before
1273 1273 * the suspend operation.
1274 1274 */
1275 1275 int
1276 1276 xvdi_add_xb_watch_handler(dev_info_t *dip, const char *dir, const char *node,
1277 1277 xvdi_xb_watch_cb_t cb, void *arg)
1278 1278 {
1279 1279 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1280 1280 xd_xb_watches_t *xxw_new, *xxwp;
1281 1281 char *path;
1282 1282 int n;
1283 1283
1284 1284 ASSERT((dip != NULL) && (dir != NULL) && (node != NULL));
1285 1285 ASSERT(cb != NULL);
1286 1286
1287 1287 n = strlen(dir) + 1 + strlen(node) + 1;
1288 1288 path = kmem_zalloc(n, KM_SLEEP);
1289 1289 (void) strlcat(path, dir, n);
1290 1290 (void) strlcat(path, "/", n);
1291 1291 (void) strlcat(path, node, n);
1292 1292 ASSERT((strlen(path) + 1) == n);
1293 1293
1294 1294 xxw_new = kmem_zalloc(sizeof (*xxw_new), KM_SLEEP);
1295 1295 xxw_new->xxw_ref = 1;
1296 1296 xxw_new->xxw_watch.node = path;
1297 1297 xxw_new->xxw_watch.callback = i_xvdi_xb_watch_cb;
1298 1298 xxw_new->xxw_watch.dev = (struct xenbus_device *)dip;
1299 1299 xxw_new->xxw_xppd = pdp;
1300 1300 xxw_new->xxw_cb = cb;
1301 1301 xxw_new->xxw_arg = arg;
1302 1302
1303 1303 mutex_enter(&pdp->xd_ndi_lk);
1304 1304
1305 1305 /*
1306 1306 * If this is the first watch we're setting up, create a taskq
1307 1307 * to dispatch watch events and initialize the watch list.
1308 1308 */
1309 1309 if (pdp->xd_xb_watch_taskq == NULL) {
1310 1310 char tq_name[TASKQ_NAMELEN];
1311 1311
1312 1312 ASSERT(list_is_empty(&pdp->xd_xb_watches));
1313 1313
1314 1314 (void) snprintf(tq_name, sizeof (tq_name),
1315 1315 "%s_xb_watch_tq", ddi_get_name(dip));
1316 1316
1317 1317 if ((pdp->xd_xb_watch_taskq = ddi_taskq_create(dip, tq_name,
1318 1318 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1319 1319 i_xvdi_xb_watch_release(xxw_new);
1320 1320 mutex_exit(&pdp->xd_ndi_lk);
1321 1321 return (DDI_FAILURE);
1322 1322 }
1323 1323 }
1324 1324
1325 1325 /* Don't allow duplicate watches to be registered */
1326 1326 for (xxwp = list_head(&pdp->xd_xb_watches); xxwp != NULL;
1327 1327 xxwp = list_next(&pdp->xd_xb_watches, xxwp)) {
1328 1328
1329 1329 ASSERT(strcmp(xxwp->xxw_watch.node, path) != 0);
1330 1330 if (strcmp(xxwp->xxw_watch.node, path) != 0)
1331 1331 continue;
1332 1332 i_xvdi_xb_watch_release(xxw_new);
1333 1333 mutex_exit(&pdp->xd_ndi_lk);
1334 1334 return (DDI_FAILURE);
1335 1335 }
1336 1336
1337 1337 if (register_xenbus_watch(&xxw_new->xxw_watch) != 0) {
1338 1338 if (list_is_empty(&pdp->xd_xb_watches)) {
1339 1339 ddi_taskq_destroy(pdp->xd_xb_watch_taskq);
1340 1340 pdp->xd_xb_watch_taskq = NULL;
1341 1341 }
1342 1342 i_xvdi_xb_watch_release(xxw_new);
1343 1343 mutex_exit(&pdp->xd_ndi_lk);
1344 1344 return (DDI_FAILURE);
1345 1345 }
1346 1346
1347 1347 list_insert_head(&pdp->xd_xb_watches, xxw_new);
1348 1348 mutex_exit(&pdp->xd_ndi_lk);
1349 1349 return (DDI_SUCCESS);
1350 1350 }
1351 1351
1352 1352 /*
1353 1353 * Tear down all xenbus watches registered by the specified dip.
1354 1354 */
1355 1355 void
1356 1356 xvdi_remove_xb_watch_handlers(dev_info_t *dip)
1357 1357 {
1358 1358 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1359 1359 xd_xb_watches_t *xxwp;
1360 1360 ddi_taskq_t *tq;
1361 1361
1362 1362 mutex_enter(&pdp->xd_ndi_lk);
1363 1363
1364 1364 while ((xxwp = list_remove_head(&pdp->xd_xb_watches)) != NULL) {
1365 1365 mutex_exit(&pdp->xd_ndi_lk);
1366 1366 unregister_xenbus_watch(&xxwp->xxw_watch);
1367 1367 mutex_enter(&pdp->xd_ndi_lk);
1368 1368 i_xvdi_xb_watch_release(xxwp);
1369 1369 }
1370 1370 ASSERT(list_is_empty(&pdp->xd_xb_watches));
1371 1371
1372 1372 /*
1373 1373 * We can't hold xd_ndi_lk while we destroy the xd_xb_watch_taskq.
1374 1374 * This is because if there are currently any executing taskq threads,
1375 1375 * we will block until they are finished, and to finish they need
1376 1376 * to aquire xd_ndi_lk in i_xvdi_xb_watch_cb_tq() so they can release
1377 1377 * their reference on their corresponding xxwp structure.
1378 1378 */
1379 1379 tq = pdp->xd_xb_watch_taskq;
1380 1380 pdp->xd_xb_watch_taskq = NULL;
1381 1381 mutex_exit(&pdp->xd_ndi_lk);
1382 1382 if (tq != NULL)
1383 1383 ddi_taskq_destroy(tq);
1384 1384 }
1385 1385
1386 1386 static int
1387 1387 i_xvdi_add_watch_oestate(dev_info_t *dip)
1388 1388 {
1389 1389 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1390 1390
1391 1391 ASSERT(pdp != NULL);
1392 1392 ASSERT(pdp->xd_xsdev.nodename != NULL);
1393 1393 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1394 1394
1395 1395 /*
1396 1396 * Create taskq for delivering other end state change event to
1397 1397 * this device later.
1398 1398 *
1399 1399 * Set nthreads to 1 to make sure that events can be delivered
1400 1400 * in order.
1401 1401 *
1402 1402 * Note: It is _not_ guaranteed that driver can see every
1403 1403 * xenstore change under the path that it is watching. If two
1404 1404 * changes happen consecutively in a very short amount of
1405 1405 * time, it is likely that the driver will see only the last
1406 1406 * one.
1407 1407 */
1408 1408 if (pdp->xd_oe_taskq == NULL)
1409 1409 if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1410 1410 "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1411 1411 return (DDI_FAILURE);
1412 1412
1413 1413 /*
1414 1414 * Watch for changes to the XenbusState of otherend.
1415 1415 */
1416 1416 pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1417 1417 pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1418 1418
1419 1419 if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1420 1420 i_xvdi_rem_watch_oestate(dip);
1421 1421 return (DDI_FAILURE);
1422 1422 }
1423 1423
1424 1424 return (DDI_SUCCESS);
1425 1425 }
1426 1426
1427 1427 static void
1428 1428 i_xvdi_rem_watch_oestate(dev_info_t *dip)
1429 1429 {
1430 1430 struct xendev_ppd *pdp;
1431 1431 struct xenbus_device *dev;
1432 1432
1433 1433 pdp = ddi_get_parent_data(dip);
1434 1434 ASSERT(pdp != NULL);
1435 1435 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1436 1436
1437 1437 dev = &pdp->xd_xsdev;
1438 1438
1439 1439 /* Unwatch for changes to XenbusState of otherend */
1440 1440 if (dev->otherend_watch.node != NULL) {
1441 1441 mutex_exit(&pdp->xd_ndi_lk);
1442 1442 unregister_xenbus_watch(&dev->otherend_watch);
1443 1443 mutex_enter(&pdp->xd_ndi_lk);
1444 1444 }
1445 1445
1446 1446 /* make sure no event handler is running */
1447 1447 if (pdp->xd_oe_taskq != NULL) {
1448 1448 mutex_exit(&pdp->xd_ndi_lk);
1449 1449 ddi_taskq_destroy(pdp->xd_oe_taskq);
1450 1450 mutex_enter(&pdp->xd_ndi_lk);
1451 1451 pdp->xd_oe_taskq = NULL;
1452 1452 }
1453 1453
1454 1454 /* clean up */
1455 1455 dev->otherend_state = XenbusStateUnknown;
1456 1456 dev->otherend_id = (domid_t)-1;
1457 1457 if (dev->otherend_watch.node != NULL)
1458 1458 kmem_free((void *)dev->otherend_watch.node,
1459 1459 strlen(dev->otherend_watch.node) + 1);
1460 1460 dev->otherend_watch.node = NULL;
1461 1461 if (dev->otherend != NULL)
1462 1462 kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1463 1463 dev->otherend = NULL;
1464 1464 }
1465 1465
1466 1466 static int
1467 1467 i_xvdi_add_watch_hpstate(dev_info_t *dip)
1468 1468 {
1469 1469 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1470 1470
1471 1471 ASSERT(pdp != NULL);
1472 1472 ASSERT(pdp->xd_xsdev.frontend == 0);
1473 1473 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1474 1474
1475 1475 /*
1476 1476 * Create taskq for delivering hotplug status change event to
1477 1477 * this device later.
1478 1478 *
1479 1479 * Set nthreads to 1 to make sure that events can be delivered
1480 1480 * in order.
1481 1481 *
1482 1482 * Note: It is _not_ guaranteed that driver can see every
1483 1483 * hotplug status change under the path that it is
1484 1484 * watching. If two changes happen consecutively in a very
1485 1485 * short amount of time, it is likely that the driver only
1486 1486 * sees the last one.
1487 1487 */
1488 1488 if (pdp->xd_hp_taskq == NULL)
1489 1489 if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1490 1490 "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1491 1491 return (DDI_FAILURE);
1492 1492
1493 1493 if (pdp->xd_hp_watch.node == NULL) {
1494 1494 size_t len;
1495 1495 char *path;
1496 1496
1497 1497 ASSERT(pdp->xd_xsdev.nodename != NULL);
1498 1498
1499 1499 len = strlen(pdp->xd_xsdev.nodename) +
1500 1500 strlen("/hotplug-status") + 1;
1501 1501 path = kmem_alloc(len, KM_SLEEP);
1502 1502 (void) snprintf(path, len, "%s/hotplug-status",
1503 1503 pdp->xd_xsdev.nodename);
1504 1504
1505 1505 pdp->xd_hp_watch.node = path;
1506 1506 pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1507 1507 pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1508 1508 if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1509 1509 i_xvdi_rem_watch_hpstate(dip);
1510 1510 return (DDI_FAILURE);
1511 1511 }
1512 1512 }
1513 1513
1514 1514 return (DDI_SUCCESS);
1515 1515 }
1516 1516
1517 1517 static void
1518 1518 i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1519 1519 {
1520 1520 struct xendev_ppd *pdp;
1521 1521 pdp = ddi_get_parent_data(dip);
1522 1522
1523 1523 ASSERT(pdp != NULL);
1524 1524 ASSERT(pdp->xd_xsdev.frontend == 0);
1525 1525 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1526 1526
1527 1527 /* Unwatch for changes to "hotplug-status" node for backend device. */
1528 1528 if (pdp->xd_hp_watch.node != NULL) {
1529 1529 mutex_exit(&pdp->xd_ndi_lk);
1530 1530 unregister_xenbus_watch(&pdp->xd_hp_watch);
1531 1531 mutex_enter(&pdp->xd_ndi_lk);
1532 1532 }
1533 1533
1534 1534 /* Make sure no event handler is running. */
1535 1535 if (pdp->xd_hp_taskq != NULL) {
1536 1536 mutex_exit(&pdp->xd_ndi_lk);
1537 1537 ddi_taskq_destroy(pdp->xd_hp_taskq);
1538 1538 mutex_enter(&pdp->xd_ndi_lk);
1539 1539 pdp->xd_hp_taskq = NULL;
1540 1540 }
1541 1541
1542 1542 /* Clean up. */
1543 1543 if (pdp->xd_hp_watch.node != NULL) {
1544 1544 kmem_free((void *)pdp->xd_hp_watch.node,
1545 1545 strlen(pdp->xd_hp_watch.node) + 1);
1546 1546 pdp->xd_hp_watch.node = NULL;
1547 1547 }
1548 1548 }
1549 1549
1550 1550 static int
1551 1551 i_xvdi_add_watches(dev_info_t *dip)
1552 1552 {
1553 1553 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1554 1554
1555 1555 ASSERT(pdp != NULL);
1556 1556
1557 1557 mutex_enter(&pdp->xd_ndi_lk);
1558 1558
1559 1559 if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1560 1560 mutex_exit(&pdp->xd_ndi_lk);
1561 1561 return (DDI_FAILURE);
1562 1562 }
1563 1563
1564 1564 if (pdp->xd_xsdev.frontend == 1) {
1565 1565 /*
1566 1566 * Frontend devices must watch for the backend path
1567 1567 * changing.
1568 1568 */
1569 1569 if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1570 1570 goto unwatch_and_fail;
1571 1571 } else {
1572 1572 /*
1573 1573 * Backend devices must watch for hotplug events.
1574 1574 */
1575 1575 if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1576 1576 goto unwatch_and_fail;
1577 1577 }
1578 1578
1579 1579 mutex_exit(&pdp->xd_ndi_lk);
1580 1580
1581 1581 return (DDI_SUCCESS);
1582 1582
1583 1583 unwatch_and_fail:
1584 1584 i_xvdi_rem_watch_oestate(dip);
1585 1585 mutex_exit(&pdp->xd_ndi_lk);
1586 1586
1587 1587 return (DDI_FAILURE);
1588 1588 }
1589 1589
1590 1590 static void
1591 1591 i_xvdi_rem_watches(dev_info_t *dip)
1592 1592 {
1593 1593 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1594 1594
1595 1595 ASSERT(pdp != NULL);
1596 1596
1597 1597 mutex_enter(&pdp->xd_ndi_lk);
1598 1598
1599 1599 i_xvdi_rem_watch_oestate(dip);
1600 1600
1601 1601 if (pdp->xd_xsdev.frontend == 1)
1602 1602 i_xvdi_rem_watch_bepath(dip);
1603 1603 else
1604 1604 i_xvdi_rem_watch_hpstate(dip);
1605 1605
1606 1606 mutex_exit(&pdp->xd_ndi_lk);
1607 1607
1608 1608 xvdi_remove_xb_watch_handlers(dip);
1609 1609 }
1610 1610
1611 1611 static int
1612 1612 i_xvdi_add_watch_bepath(dev_info_t *dip)
1613 1613 {
1614 1614 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1615 1615
1616 1616 ASSERT(pdp != NULL);
1617 1617 ASSERT(pdp->xd_xsdev.frontend == 1);
1618 1618
1619 1619 /*
1620 1620 * Frontend devices need to watch for the backend path changing.
1621 1621 */
1622 1622 if (pdp->xd_bepath_watch.node == NULL) {
1623 1623 size_t len;
1624 1624 char *path;
1625 1625
1626 1626 ASSERT(pdp->xd_xsdev.nodename != NULL);
1627 1627
1628 1628 len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1629 1629 path = kmem_alloc(len, KM_SLEEP);
1630 1630 (void) snprintf(path, len, "%s/backend",
1631 1631 pdp->xd_xsdev.nodename);
1632 1632
1633 1633 pdp->xd_bepath_watch.node = path;
1634 1634 pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1635 1635 pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1636 1636 if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1637 1637 kmem_free(path, len);
1638 1638 pdp->xd_bepath_watch.node = NULL;
1639 1639 return (DDI_FAILURE);
1640 1640 }
1641 1641 }
1642 1642
1643 1643 return (DDI_SUCCESS);
1644 1644 }
1645 1645
1646 1646 static void
1647 1647 i_xvdi_rem_watch_bepath(dev_info_t *dip)
1648 1648 {
1649 1649 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1650 1650
1651 1651 ASSERT(pdp != NULL);
1652 1652 ASSERT(pdp->xd_xsdev.frontend == 1);
1653 1653 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1654 1654
1655 1655 if (pdp->xd_bepath_watch.node != NULL) {
1656 1656 mutex_exit(&pdp->xd_ndi_lk);
1657 1657 unregister_xenbus_watch(&pdp->xd_bepath_watch);
1658 1658 mutex_enter(&pdp->xd_ndi_lk);
1659 1659
1660 1660 kmem_free((void *)(pdp->xd_bepath_watch.node),
1661 1661 strlen(pdp->xd_bepath_watch.node) + 1);
1662 1662 pdp->xd_bepath_watch.node = NULL;
1663 1663 }
1664 1664 }
1665 1665
1666 1666 int
1667 1667 xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1668 1668 XenbusState newState)
1669 1669 {
1670 1670 int rv;
1671 1671 struct xendev_ppd *pdp;
1672 1672
1673 1673 pdp = ddi_get_parent_data(dip);
1674 1674 ASSERT(pdp != NULL);
1675 1675
1676 1676 XVDI_DPRINTF(XVDI_DBG_STATE,
1677 1677 "xvdi_switch_state: %s@%s's xenbus state moves to %d\n",
1678 1678 ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
1679 1679 ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
1680 1680 newState);
1681 1681
1682 1682 rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1683 1683 if (rv > 0)
1684 1684 cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1685 1685
1686 1686 return (rv);
1687 1687 }
1688 1688
1689 1689 /*
1690 1690 * Notify hotplug script running in userland
1691 1691 */
1692 1692 int
1693 1693 xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1694 1694 {
1695 1695 struct xendev_ppd *pdp;
1696 1696 nvlist_t *attr_list = NULL;
1697 1697 i_xd_cfg_t *xdcp;
1698 1698 sysevent_id_t eid;
1699 1699 int err;
1700 1700 char devname[256]; /* XXPV dme: ? */
1701 1701
1702 1702 pdp = ddi_get_parent_data(dip);
1703 1703 ASSERT(pdp != NULL);
1704 1704
1705 1705 xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1706 1706 ASSERT(xdcp != NULL);
1707 1707
1708 1708 (void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1709 1709 ddi_driver_name(dip), ddi_get_instance(dip));
1710 1710
1711 1711 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1712 1712 if (err != DDI_SUCCESS)
1713 1713 goto failure;
1714 1714
1715 1715 err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1716 1716 if (err != DDI_SUCCESS)
1717 1717 goto failure;
1718 1718 err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1719 1719 if (err != DDI_SUCCESS)
1720 1720 goto failure;
1721 1721 err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1722 1722 if (err != DDI_SUCCESS)
1723 1723 goto failure;
1724 1724 err = nvlist_add_string(attr_list, "device", devname);
1725 1725 if (err != DDI_SUCCESS)
1726 1726 goto failure;
1727 1727 err = nvlist_add_string(attr_list, "fob",
1728 1728 ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1729 1729 if (err != DDI_SUCCESS)
1730 1730 goto failure;
1731 1731
1732 1732 switch (hpc) {
1733 1733 case XEN_HP_ADD:
1734 1734 err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1735 1735 "add", attr_list, &eid, DDI_NOSLEEP);
1736 1736 break;
↓ open down ↓ |
1736 lines elided |
↑ open up ↑ |
1737 1737 case XEN_HP_REMOVE:
1738 1738 err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1739 1739 "remove", attr_list, &eid, DDI_NOSLEEP);
1740 1740 break;
1741 1741 default:
1742 1742 err = DDI_FAILURE;
1743 1743 goto failure;
1744 1744 }
1745 1745
1746 1746 failure:
1747 - if (attr_list != NULL)
1748 - nvlist_free(attr_list);
1747 + nvlist_free(attr_list);
1749 1748
1750 1749 return (err);
1751 1750 }
1752 1751
1753 1752 /* ARGSUSED */
1754 1753 static void
1755 1754 i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1756 1755 unsigned int len)
1757 1756 {
1758 1757 char *path;
1759 1758
1760 1759 if (xendev_dip == NULL)
1761 1760 xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1762 1761
1763 1762 path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1764 1763
1765 1764 (void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1766 1765 i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1767 1766 }
1768 1767
1769 1768 static void
1770 1769 i_xvdi_watch_device(char *path)
1771 1770 {
1772 1771 struct xenbus_watch *w;
1773 1772
1774 1773 ASSERT(path != NULL);
1775 1774
1776 1775 w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1777 1776 w->node = path;
1778 1777 w->callback = &i_xvdi_probe_path_cb;
1779 1778 w->dev = NULL;
1780 1779
1781 1780 if (register_xenbus_watch(w) != 0) {
1782 1781 cmn_err(CE_WARN, "i_xvdi_watch_device: "
1783 1782 "cannot set watch on %s", path);
1784 1783 kmem_free(w, sizeof (*w));
1785 1784 return;
1786 1785 }
1787 1786 }
1788 1787
1789 1788 void
1790 1789 xvdi_watch_devices(int newstate)
1791 1790 {
1792 1791 int devclass;
1793 1792
1794 1793 /*
1795 1794 * Watch for devices being created in the store.
1796 1795 */
1797 1796 if (newstate == XENSTORE_DOWN)
1798 1797 return;
1799 1798 for (devclass = 0; devclass < NXDC; devclass++) {
1800 1799 if (xdci[devclass].xs_path_fe != NULL)
1801 1800 i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1802 1801 if (xdci[devclass].xs_path_be != NULL)
1803 1802 i_xvdi_watch_device(xdci[devclass].xs_path_be);
1804 1803 }
1805 1804 }
1806 1805
1807 1806 /*
1808 1807 * Iterate over the store looking for backend devices to create.
1809 1808 */
1810 1809 static void
1811 1810 i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1812 1811 {
1813 1812 char **domains;
1814 1813 unsigned int ndomains;
1815 1814 int ldomains, i;
1816 1815
1817 1816 if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1818 1817 &ndomains)) == NULL)
1819 1818 return;
1820 1819
1821 1820 for (i = 0, ldomains = 0; i < ndomains; i++) {
1822 1821 ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1823 1822
1824 1823 i_xvdi_enum_worker(parent, xdcp, domains[i]);
1825 1824 }
1826 1825 kmem_free(domains, ldomains);
1827 1826 }
1828 1827
1829 1828 /*
1830 1829 * Iterate over the store looking for frontend devices to create.
1831 1830 */
1832 1831 static void
1833 1832 i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1834 1833 {
1835 1834 i_xvdi_enum_worker(parent, xdcp, NULL);
1836 1835 }
1837 1836
1838 1837 static void
1839 1838 i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1840 1839 char *domain)
1841 1840 {
1842 1841 char *path, *domain_path, *ep;
1843 1842 char **devices;
1844 1843 unsigned int ndevices;
1845 1844 int ldevices, j, circ;
1846 1845 domid_t dom;
1847 1846 long tmplong;
1848 1847
1849 1848 if (domain == NULL) {
1850 1849 dom = DOMID_SELF;
1851 1850 path = xdcp->xs_path_fe;
1852 1851 domain_path = "";
1853 1852 } else {
1854 1853 (void) ddi_strtol(domain, &ep, 0, &tmplong);
1855 1854 dom = tmplong;
1856 1855 path = xdcp->xs_path_be;
1857 1856 domain_path = domain;
1858 1857 }
1859 1858
1860 1859 if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1861 1860 &ndevices)) == NULL)
1862 1861 return;
1863 1862
1864 1863 for (j = 0, ldevices = 0; j < ndevices; j++) {
1865 1864 int vdev;
1866 1865
1867 1866 ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1868 1867 (void) ddi_strtol(devices[j], &ep, 0, &tmplong);
1869 1868 vdev = tmplong;
1870 1869
1871 1870 ndi_devi_enter(parent, &circ);
1872 1871
1873 1872 if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL)
1874 1873 (void) xvdi_create_dev(parent, xdcp->devclass,
1875 1874 dom, vdev);
1876 1875
1877 1876 ndi_devi_exit(parent, circ);
1878 1877 }
1879 1878 kmem_free(devices, ldevices);
1880 1879 }
1881 1880
1882 1881 /*
1883 1882 * Leaf drivers should call this in their detach() routine during suspend.
1884 1883 */
1885 1884 void
1886 1885 xvdi_suspend(dev_info_t *dip)
1887 1886 {
1888 1887 i_xvdi_rem_watches(dip);
1889 1888 }
1890 1889
1891 1890 /*
1892 1891 * Leaf drivers should call this in their attach() routine during resume.
1893 1892 */
1894 1893 int
1895 1894 xvdi_resume(dev_info_t *dip)
1896 1895 {
1897 1896 return (i_xvdi_add_watches(dip));
1898 1897 }
1899 1898
1900 1899 /*
1901 1900 * Add event handler for the leaf driver
1902 1901 * to handle event triggered by the change in xenstore
1903 1902 */
1904 1903 int
1905 1904 xvdi_add_event_handler(dev_info_t *dip, char *name,
1906 1905 void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
1907 1906 void *arg)
1908 1907 {
1909 1908 ddi_eventcookie_t ecv;
1910 1909 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1911 1910 ddi_callback_id_t *cbid;
1912 1911 boolean_t call_handler;
1913 1912 i_oestate_evt_t *evt = NULL;
1914 1913 XenbusState oestate;
1915 1914
1916 1915 ASSERT(pdp != NULL);
1917 1916
1918 1917 mutex_enter(&pdp->xd_ndi_lk);
1919 1918
1920 1919 if (strcmp(name, XS_OE_STATE) == 0) {
1921 1920 ASSERT(pdp->xd_xsdev.otherend != NULL);
1922 1921
1923 1922 cbid = &pdp->xd_oe_ehid;
1924 1923 } else if (strcmp(name, XS_HP_STATE) == 0) {
1925 1924 if (pdp->xd_xsdev.frontend == 1) {
1926 1925 mutex_exit(&pdp->xd_ndi_lk);
1927 1926 return (DDI_FAILURE);
1928 1927 }
1929 1928
1930 1929 ASSERT(pdp->xd_hp_watch.node != NULL);
1931 1930
1932 1931 cbid = &pdp->xd_hp_ehid;
1933 1932 } else {
1934 1933 /* Unsupported watch. */
1935 1934 mutex_exit(&pdp->xd_ndi_lk);
1936 1935 return (DDI_FAILURE);
1937 1936 }
1938 1937
1939 1938 /*
1940 1939 * No event handler provided, take default action to handle
1941 1940 * event.
1942 1941 */
1943 1942 if (evthandler == NULL) {
1944 1943 mutex_exit(&pdp->xd_ndi_lk);
1945 1944 return (DDI_SUCCESS);
1946 1945 }
1947 1946
1948 1947 ASSERT(*cbid == NULL);
1949 1948
1950 1949 if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1951 1950 cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1952 1951 name, ddi_get_name(dip), ddi_get_name_addr(dip));
1953 1952 mutex_exit(&pdp->xd_ndi_lk);
1954 1953 return (DDI_FAILURE);
1955 1954 }
1956 1955 if (ddi_add_event_handler(dip, ecv, evthandler, arg, cbid)
1957 1956 != DDI_SUCCESS) {
1958 1957 cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1959 1958 name, ddi_get_name(dip), ddi_get_name_addr(dip));
1960 1959 *cbid = NULL;
1961 1960 mutex_exit(&pdp->xd_ndi_lk);
1962 1961 return (DDI_FAILURE);
1963 1962 }
1964 1963
1965 1964 /*
1966 1965 * if we're adding an oe state callback, and the ring has already
1967 1966 * transitioned out of Unknown, call the handler after we release
1968 1967 * the mutex.
1969 1968 */
1970 1969 call_handler = B_FALSE;
1971 1970 if ((strcmp(name, XS_OE_STATE) == 0) &&
1972 1971 (pdp->xd_xsdev.otherend_state != XenbusStateUnknown)) {
1973 1972 oestate = pdp->xd_xsdev.otherend_state;
1974 1973 call_handler = B_TRUE;
1975 1974 }
1976 1975
1977 1976 mutex_exit(&pdp->xd_ndi_lk);
1978 1977
1979 1978 if (call_handler) {
1980 1979 evt = kmem_alloc(sizeof (i_oestate_evt_t), KM_SLEEP);
1981 1980 evt->dip = dip;
1982 1981 evt->state = oestate;
1983 1982 (void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
1984 1983 i_xvdi_oestate_handler, (void *)evt, DDI_SLEEP);
1985 1984 }
1986 1985
1987 1986 return (DDI_SUCCESS);
1988 1987 }
1989 1988
1990 1989 /*
1991 1990 * Remove event handler for the leaf driver and unwatch xenstore
1992 1991 * so, driver will not be notified when xenstore entry changed later
1993 1992 */
1994 1993 void
1995 1994 xvdi_remove_event_handler(dev_info_t *dip, char *name)
1996 1995 {
1997 1996 struct xendev_ppd *pdp;
1998 1997 boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
1999 1998 ddi_callback_id_t oeid = NULL, hpid = NULL;
2000 1999
2001 2000 pdp = ddi_get_parent_data(dip);
2002 2001 ASSERT(pdp != NULL);
2003 2002
2004 2003 if (name == NULL) {
2005 2004 rem_oe = B_TRUE;
2006 2005 rem_hp = B_TRUE;
2007 2006 } else if (strcmp(name, XS_OE_STATE) == 0) {
2008 2007 rem_oe = B_TRUE;
2009 2008 } else if (strcmp(name, XS_HP_STATE) == 0) {
2010 2009 rem_hp = B_TRUE;
2011 2010 } else {
2012 2011 cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
2013 2012 return;
2014 2013 }
2015 2014
2016 2015 mutex_enter(&pdp->xd_ndi_lk);
2017 2016
2018 2017 if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
2019 2018 oeid = pdp->xd_oe_ehid;
2020 2019 pdp->xd_oe_ehid = NULL;
2021 2020 }
2022 2021
2023 2022 if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
2024 2023 hpid = pdp->xd_hp_ehid;
2025 2024 pdp->xd_hp_ehid = NULL;
2026 2025 }
2027 2026
2028 2027 mutex_exit(&pdp->xd_ndi_lk);
2029 2028
2030 2029 if (oeid != NULL)
2031 2030 (void) ddi_remove_event_handler(oeid);
2032 2031 if (hpid != NULL)
2033 2032 (void) ddi_remove_event_handler(hpid);
2034 2033 }
2035 2034
2036 2035
2037 2036 /*
2038 2037 * common ring interfaces
2039 2038 */
2040 2039
2041 2040 #define FRONT_RING(_ringp) (&(_ringp)->xr_sring.fr)
2042 2041 #define BACK_RING(_ringp) (&(_ringp)->xr_sring.br)
2043 2042 #define GET_RING_SIZE(_ringp) RING_SIZE(FRONT_RING(ringp))
2044 2043 #define GET_RING_ENTRY_FE(_ringp, _idx) \
2045 2044 (FRONT_RING(_ringp)->sring->ring + \
2046 2045 (_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
2047 2046 #define GET_RING_ENTRY_BE(_ringp, _idx) \
2048 2047 (BACK_RING(_ringp)->sring->ring + \
2049 2048 (_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
2050 2049
2051 2050 unsigned int
2052 2051 xvdi_ring_avail_slots(xendev_ring_t *ringp)
2053 2052 {
2054 2053 comif_ring_fe_t *frp;
2055 2054 comif_ring_be_t *brp;
2056 2055
2057 2056 if (ringp->xr_frontend) {
2058 2057 frp = FRONT_RING(ringp);
2059 2058 return (GET_RING_SIZE(ringp) -
2060 2059 (frp->req_prod_pvt - frp->rsp_cons));
2061 2060 } else {
2062 2061 brp = BACK_RING(ringp);
2063 2062 return (GET_RING_SIZE(ringp) -
2064 2063 (brp->rsp_prod_pvt - brp->req_cons));
2065 2064 }
2066 2065 }
2067 2066
2068 2067 int
2069 2068 xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
2070 2069 {
2071 2070 comif_ring_be_t *brp;
2072 2071
2073 2072 ASSERT(!ringp->xr_frontend);
2074 2073 brp = BACK_RING(ringp);
2075 2074 return ((brp->req_cons !=
2076 2075 ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
2077 2076 ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
2078 2077 }
2079 2078
2080 2079 int
2081 2080 xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
2082 2081 {
2083 2082 comif_ring_fe_t *frp;
2084 2083
2085 2084 ASSERT(ringp->xr_frontend);
2086 2085 frp = FRONT_RING(ringp);
2087 2086 return (frp->req_prod_pvt !=
2088 2087 ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
2089 2088 }
2090 2089
2091 2090 int
2092 2091 xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
2093 2092 {
2094 2093 comif_ring_fe_t *frp;
2095 2094
2096 2095 ASSERT(ringp->xr_frontend);
2097 2096 frp = FRONT_RING(ringp);
2098 2097 return (frp->rsp_cons !=
2099 2098 ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
2100 2099 }
2101 2100
2102 2101 /* NOTE: req_event will be increased as needed */
2103 2102 void *
2104 2103 xvdi_ring_get_request(xendev_ring_t *ringp)
2105 2104 {
2106 2105 comif_ring_fe_t *frp;
2107 2106 comif_ring_be_t *brp;
2108 2107
2109 2108 if (ringp->xr_frontend) {
2110 2109 /* for frontend ring */
2111 2110 frp = FRONT_RING(ringp);
2112 2111 if (!RING_FULL(frp))
2113 2112 return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
2114 2113 else
2115 2114 return (NULL);
2116 2115 } else {
2117 2116 /* for backend ring */
2118 2117 brp = BACK_RING(ringp);
2119 2118 /* RING_FINAL_CHECK_FOR_REQUESTS() */
2120 2119 if (xvdi_ring_has_unconsumed_requests(ringp))
2121 2120 return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
2122 2121 else {
2123 2122 ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
2124 2123 brp->req_cons + 1);
2125 2124 membar_enter();
2126 2125 if (xvdi_ring_has_unconsumed_requests(ringp))
2127 2126 return (GET_RING_ENTRY_BE(ringp,
2128 2127 brp->req_cons++));
2129 2128 else
2130 2129 return (NULL);
2131 2130 }
2132 2131 }
2133 2132 }
2134 2133
2135 2134 int
2136 2135 xvdi_ring_push_request(xendev_ring_t *ringp)
2137 2136 {
2138 2137 RING_IDX old, new, reqevt;
2139 2138 comif_ring_fe_t *frp;
2140 2139
2141 2140 /* only frontend should be able to push request */
2142 2141 ASSERT(ringp->xr_frontend);
2143 2142
2144 2143 /* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
2145 2144 frp = FRONT_RING(ringp);
2146 2145 old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
2147 2146 new = frp->req_prod_pvt;
2148 2147 ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
2149 2148 membar_enter();
2150 2149 reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
2151 2150 return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
2152 2151 }
2153 2152
2154 2153 /* NOTE: rsp_event will be increased as needed */
2155 2154 void *
2156 2155 xvdi_ring_get_response(xendev_ring_t *ringp)
2157 2156 {
2158 2157 comif_ring_fe_t *frp;
2159 2158 comif_ring_be_t *brp;
2160 2159
2161 2160 if (!ringp->xr_frontend) {
2162 2161 /* for backend ring */
2163 2162 brp = BACK_RING(ringp);
2164 2163 return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
2165 2164 } else {
2166 2165 /* for frontend ring */
2167 2166 frp = FRONT_RING(ringp);
2168 2167 /* RING_FINAL_CHECK_FOR_RESPONSES() */
2169 2168 if (xvdi_ring_has_unconsumed_responses(ringp))
2170 2169 return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
2171 2170 else {
2172 2171 ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
2173 2172 frp->rsp_cons + 1);
2174 2173 membar_enter();
2175 2174 if (xvdi_ring_has_unconsumed_responses(ringp))
2176 2175 return (GET_RING_ENTRY_FE(ringp,
2177 2176 frp->rsp_cons++));
2178 2177 else
2179 2178 return (NULL);
2180 2179 }
2181 2180 }
2182 2181 }
2183 2182
2184 2183 int
2185 2184 xvdi_ring_push_response(xendev_ring_t *ringp)
2186 2185 {
2187 2186 RING_IDX old, new, rspevt;
2188 2187 comif_ring_be_t *brp;
2189 2188
2190 2189 /* only backend should be able to push response */
2191 2190 ASSERT(!ringp->xr_frontend);
2192 2191
2193 2192 /* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
2194 2193 brp = BACK_RING(ringp);
2195 2194 old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
2196 2195 new = brp->rsp_prod_pvt;
2197 2196 ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
2198 2197 membar_enter();
2199 2198 rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
2200 2199 return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
2201 2200 }
2202 2201
2203 2202 static void
2204 2203 xvdi_ring_init_sring(xendev_ring_t *ringp)
2205 2204 {
2206 2205 ddi_acc_handle_t acchdl;
2207 2206 comif_sring_t *xsrp;
2208 2207 int i;
2209 2208
2210 2209 xsrp = (comif_sring_t *)ringp->xr_vaddr;
2211 2210 acchdl = ringp->xr_acc_hdl;
2212 2211
2213 2212 /* shared ring initialization */
2214 2213 ddi_put32(acchdl, &xsrp->req_prod, 0);
2215 2214 ddi_put32(acchdl, &xsrp->rsp_prod, 0);
2216 2215 ddi_put32(acchdl, &xsrp->req_event, 1);
2217 2216 ddi_put32(acchdl, &xsrp->rsp_event, 1);
2218 2217 for (i = 0; i < sizeof (xsrp->pad); i++)
2219 2218 ddi_put8(acchdl, xsrp->pad + i, 0);
2220 2219 }
2221 2220
2222 2221 static void
2223 2222 xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2224 2223 {
2225 2224 comif_ring_fe_t *xfrp;
2226 2225
2227 2226 xfrp = &ringp->xr_sring.fr;
2228 2227 xfrp->req_prod_pvt = 0;
2229 2228 xfrp->rsp_cons = 0;
2230 2229 xfrp->nr_ents = nentry;
2231 2230 xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2232 2231
2233 2232 ringp->xr_frontend = 1;
2234 2233 ringp->xr_entry_size = entrysize;
2235 2234 }
2236 2235
2237 2236 #ifndef XPV_HVM_DRIVER
2238 2237 static void
2239 2238 xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2240 2239 {
2241 2240 comif_ring_be_t *xbrp;
2242 2241
2243 2242 xbrp = &ringp->xr_sring.br;
2244 2243 xbrp->rsp_prod_pvt = 0;
2245 2244 xbrp->req_cons = 0;
2246 2245 xbrp->nr_ents = nentry;
2247 2246 xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2248 2247
2249 2248 ringp->xr_frontend = 0;
2250 2249 ringp->xr_entry_size = entrysize;
2251 2250 }
2252 2251 #endif /* XPV_HVM_DRIVER */
2253 2252
2254 2253 static void
2255 2254 xendev_offline_device(void *arg)
2256 2255 {
2257 2256 dev_info_t *dip = (dev_info_t *)arg;
2258 2257 char devname[MAXNAMELEN] = {0};
2259 2258
2260 2259 /*
2261 2260 * This is currently the only chance to delete a devinfo node, which
2262 2261 * is _not_ always successful.
2263 2262 */
2264 2263 (void) ddi_deviname(dip, devname);
2265 2264 (void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
2266 2265 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
2267 2266 }
2268 2267
2269 2268 static void
2270 2269 i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
2271 2270 {
2272 2271 dev_info_t *dip = (dev_info_t *)dev->data;
2273 2272 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2274 2273 i_oestate_evt_t *evt = NULL;
2275 2274 boolean_t call_handler;
2276 2275
2277 2276 XVDI_DPRINTF(XVDI_DBG_STATE,
2278 2277 "i_xvdi_oestate_cb: %s@%s sees oestate change to %d\n",
2279 2278 ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
2280 2279 ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
2281 2280 oestate);
2282 2281
2283 2282 /* only call the handler if our state has changed */
2284 2283 call_handler = B_FALSE;
2285 2284 mutex_enter(&pdp->xd_ndi_lk);
2286 2285 if (dev->otherend_state != oestate) {
2287 2286 dev->otherend_state = oestate;
2288 2287 call_handler = B_TRUE;
2289 2288 }
2290 2289 mutex_exit(&pdp->xd_ndi_lk);
2291 2290
2292 2291 if (call_handler) {
2293 2292 /*
2294 2293 * Try to deliver the oestate change event to the dip
2295 2294 */
2296 2295 evt = kmem_alloc(sizeof (i_oestate_evt_t), KM_SLEEP);
2297 2296 evt->dip = dip;
2298 2297 evt->state = oestate;
2299 2298 (void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2300 2299 i_xvdi_oestate_handler, (void *)evt, DDI_SLEEP);
2301 2300 }
2302 2301 }
2303 2302
2304 2303 /*ARGSUSED*/
2305 2304 static void
2306 2305 i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2307 2306 unsigned int len)
2308 2307 {
2309 2308 dev_info_t *dip = (dev_info_t *)w->dev;
2310 2309 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2311 2310
2312 2311 #ifdef DEBUG
2313 2312 char *hp_status = NULL;
2314 2313 unsigned int hpl = 0;
2315 2314
2316 2315 (void) xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
2317 2316 (void *)&hp_status, &hpl);
2318 2317 XVDI_DPRINTF(XVDI_DBG_STATE,
2319 2318 "i_xvdi_hpstate_cb: %s@%s sees hpstate change to %s\n",
2320 2319 ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
2321 2320 ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
2322 2321 hp_status == NULL ? "null" : hp_status);
2323 2322 if (hp_status != NULL)
2324 2323 kmem_free(hp_status, hpl);
2325 2324 #endif /* DEBUG */
2326 2325
2327 2326 (void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2328 2327 i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2329 2328 }
2330 2329
2331 2330 static void
2332 2331 i_xvdi_probe_path_handler(void *arg)
2333 2332 {
2334 2333 dev_info_t *parent;
2335 2334 char *path = arg, *p = NULL;
2336 2335 int i, vdev, circ;
2337 2336 i_xd_cfg_t *xdcp;
2338 2337 boolean_t frontend;
2339 2338 domid_t dom;
2340 2339
2341 2340 for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2342 2341
2343 2342 if ((xdcp->xs_path_fe != NULL) &&
2344 2343 (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2345 2344 == 0)) {
2346 2345
2347 2346 frontend = B_TRUE;
2348 2347 p = path + strlen(xdcp->xs_path_fe);
2349 2348 break;
2350 2349 }
2351 2350
2352 2351 if ((xdcp->xs_path_be != NULL) &&
2353 2352 (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2354 2353 == 0)) {
2355 2354
2356 2355 frontend = B_FALSE;
2357 2356 p = path + strlen(xdcp->xs_path_be);
2358 2357 break;
2359 2358 }
2360 2359
2361 2360 }
2362 2361
2363 2362 if (p == NULL) {
2364 2363 cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2365 2364 "unexpected path prefix in %s", path);
2366 2365 goto done;
2367 2366 }
2368 2367
2369 2368 if (frontend) {
2370 2369 dom = DOMID_SELF;
2371 2370 if (sscanf(p, "/%d/", &vdev) != 1) {
2372 2371 XVDI_DPRINTF(XVDI_DBG_PROBE,
2373 2372 "i_xvdi_probe_path_handler: "
2374 2373 "cannot parse frontend path %s",
2375 2374 path);
2376 2375 goto done;
2377 2376 }
2378 2377 } else {
2379 2378 if (sscanf(p, "/%hu/%d/", &dom, &vdev) != 2) {
2380 2379 XVDI_DPRINTF(XVDI_DBG_PROBE,
2381 2380 "i_xvdi_probe_path_handler: "
2382 2381 "cannot parse backend path %s",
2383 2382 path);
2384 2383 goto done;
2385 2384 }
2386 2385 }
2387 2386
2388 2387 /*
2389 2388 * This is an oxymoron, so indicates a bogus configuration we
2390 2389 * must check for.
2391 2390 */
2392 2391 if (vdev == VDEV_NOXS) {
2393 2392 cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2394 2393 "invalid path %s", path);
2395 2394 goto done;
2396 2395 }
2397 2396
2398 2397 parent = xendev_dip;
2399 2398 ASSERT(parent != NULL);
2400 2399
2401 2400 ndi_devi_enter(parent, &circ);
2402 2401
2403 2402 if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2404 2403 XVDI_DPRINTF(XVDI_DBG_PROBE,
2405 2404 "i_xvdi_probe_path_handler: create for %s", path);
2406 2405 (void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2407 2406 } else {
2408 2407 XVDI_DPRINTF(XVDI_DBG_PROBE,
2409 2408 "i_xvdi_probe_path_handler: %s already exists", path);
2410 2409 }
2411 2410
2412 2411 ndi_devi_exit(parent, circ);
2413 2412
2414 2413 done:
2415 2414 kmem_free(path, strlen(path) + 1);
2416 2415 }
↓ open down ↓ |
658 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX