Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/devid_cache.c
+++ new/usr/src/uts/common/os/devid_cache.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/note.h>
26 26 #include <sys/t_lock.h>
27 27 #include <sys/cmn_err.h>
28 28 #include <sys/instance.h>
29 29 #include <sys/conf.h>
30 30 #include <sys/stat.h>
31 31 #include <sys/ddi.h>
32 32 #include <sys/hwconf.h>
33 33 #include <sys/sunddi.h>
34 34 #include <sys/sunndi.h>
35 35 #include <sys/sunmdi.h>
36 36 #include <sys/ddi_impldefs.h>
37 37 #include <sys/ndi_impldefs.h>
38 38 #include <sys/kobj.h>
39 39 #include <sys/devcache.h>
40 40 #include <sys/devid_cache.h>
41 41 #include <sys/sysmacros.h>
42 42
43 43 /*
44 44 * Discovery refers to the heroic effort made to discover a device which
45 45 * cannot be accessed at the physical path where it once resided. Discovery
46 46 * involves walking the entire device tree attaching all possible disk
47 47 * instances, to search for the device referenced by a devid. Obviously,
48 48 * full device discovery is something to be avoided where possible.
49 49 * Note that simply invoking devfsadm(1M) is equivalent to running full
50 50 * discovery at the devid cache level.
51 51 *
52 52 * Reasons why a disk may not be accessible:
53 53 * disk powered off
54 54 * disk removed or cable disconnected
55 55 * disk or adapter broken
56 56 *
57 57 * Note that discovery is not needed and cannot succeed in any of these
58 58 * cases.
59 59 *
60 60 * When discovery may succeed:
61 61 * Discovery will result in success when a device has been moved
62 62 * to a different address. Note that it's recommended that
63 63 * devfsadm(1M) be invoked (no arguments required) whenever a system's
64 64 * h/w configuration has been updated. Alternatively, a
65 65 * reconfiguration boot can be used to accomplish the same result.
66 66 *
67 67 * Note that discovery is not necessary to be able to correct an access
68 68 * failure for a device which was powered off. Assuming the cache has an
69 69 * entry for such a device, simply powering it on should permit the system
70 70 * to access it. If problems persist after powering it on, invoke
71 71 * devfsadm(1M).
72 72 *
73 73 * Discovery prior to mounting root is only of interest when booting
74 74 * from a filesystem which accesses devices by device id, which of
75 75 * not all do.
76 76 *
77 77 * Tunables
78 78 *
79 79 * devid_discovery_boot (default 1)
80 80 * Number of times discovery will be attempted prior to mounting root.
81 81 * Must be done at least once to recover from corrupted or missing
82 82 * devid cache backing store. Probably there's no reason to ever
83 83 * set this to greater than one as a missing device will remain
84 84 * unavailable no matter how often the system searches for it.
85 85 *
86 86 * devid_discovery_postboot (default 1)
87 87 * Number of times discovery will be attempted after mounting root.
88 88 * This must be performed at least once to discover any devices
89 89 * needed after root is mounted which may have been powered
90 90 * off and moved before booting.
91 91 * Setting this to a larger positive number will introduce
92 92 * some inconsistency in system operation. Searching for a device
93 93 * will take an indeterminate amount of time, sometimes slower,
94 94 * sometimes faster. In addition, the system will sometimes
95 95 * discover a newly powered on device, sometimes it won't.
96 96 * Use of this option is not therefore recommended.
97 97 *
98 98 * devid_discovery_postboot_always (default 0)
99 99 * Set to 1, the system will always attempt full discovery.
100 100 *
101 101 * devid_discovery_secs (default 0)
102 102 * Set to a positive value, the system will attempt full discovery
103 103 * but with a minimum delay between attempts. A device search
104 104 * within the period of time specified will result in failure.
105 105 *
106 106 * devid_cache_read_disable (default 0)
107 107 * Set to 1 to disable reading /etc/devices/devid_cache.
108 108 * Devid cache will continue to operate normally but
109 109 * at least one discovery attempt will be required.
110 110 *
111 111 * devid_cache_write_disable (default 0)
112 112 * Set to 1 to disable updates to /etc/devices/devid_cache.
113 113 * Any updates to the devid cache will not be preserved across a reboot.
114 114 *
115 115 * devid_report_error (default 0)
116 116 * Set to 1 to enable some error messages related to devid
117 117 * cache failures.
118 118 *
119 119 * The devid is packed in the cache file as a byte array. For
120 120 * portability, this could be done in the encoded string format.
121 121 */
122 122
123 123
124 124 int devid_discovery_boot = 1;
125 125 int devid_discovery_postboot = 1;
126 126 int devid_discovery_postboot_always = 0;
127 127 int devid_discovery_secs = 0;
128 128
129 129 int devid_cache_read_disable = 0;
130 130 int devid_cache_write_disable = 0;
131 131
132 132 int devid_report_error = 0;
133 133
134 134
135 135 /*
136 136 * State to manage discovery of devices providing a devid
137 137 */
138 138 static int devid_discovery_busy = 0;
139 139 static kmutex_t devid_discovery_mutex;
140 140 static kcondvar_t devid_discovery_cv;
141 141 static clock_t devid_last_discovery = 0;
142 142
143 143
144 144 #ifdef DEBUG
145 145 int nvp_devid_debug = 0;
146 146 int devid_debug = 0;
147 147 int devid_log_registers = 0;
148 148 int devid_log_finds = 0;
149 149 int devid_log_lookups = 0;
150 150 int devid_log_discovery = 0;
151 151 int devid_log_matches = 0;
152 152 int devid_log_paths = 0;
153 153 int devid_log_failures = 0;
154 154 int devid_log_hold = 0;
155 155 int devid_log_unregisters = 0;
156 156 int devid_log_removes = 0;
157 157 int devid_register_debug = 0;
158 158 int devid_log_stale = 0;
159 159 int devid_log_detaches = 0;
160 160 #endif /* DEBUG */
161 161
162 162 /*
163 163 * devid cache file registration for cache reads and updates
164 164 */
165 165 static nvf_ops_t devid_cache_ops = {
166 166 "/etc/devices/devid_cache", /* path to cache */
167 167 devid_cache_unpack_nvlist, /* read: nvlist to nvp */
168 168 devid_cache_pack_list, /* write: nvp to nvlist */
169 169 devid_list_free, /* free data list */
170 170 NULL /* write complete callback */
171 171 };
172 172
173 173 /*
174 174 * handle to registered devid cache handlers
175 175 */
176 176 nvf_handle_t dcfd_handle;
177 177
178 178
179 179 /*
180 180 * Initialize devid cache file management
181 181 */
182 182 void
183 183 devid_cache_init(void)
184 184 {
185 185 dcfd_handle = nvf_register_file(&devid_cache_ops);
186 186 ASSERT(dcfd_handle);
187 187
188 188 list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
189 189 offsetof(nvp_devid_t, nvp_link));
190 190
191 191 mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
192 192 cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
193 193 }
194 194
195 195 /*
196 196 * Read and initialize the devid cache from the persistent store
197 197 */
198 198 void
199 199 devid_cache_read(void)
200 200 {
201 201 if (!devid_cache_read_disable) {
202 202 rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
203 203 ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
204 204 (void) nvf_read_file(dcfd_handle);
205 205 rw_exit(nvf_lock(dcfd_handle));
206 206 }
207 207 }
208 208
209 209 static void
210 210 devid_nvp_free(nvp_devid_t *dp)
211 211 {
212 212 if (dp->nvp_devpath)
213 213 kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
214 214 if (dp->nvp_devid)
215 215 kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
216 216
217 217 kmem_free(dp, sizeof (nvp_devid_t));
218 218 }
219 219
220 220 static void
221 221 devid_list_free(nvf_handle_t fd)
222 222 {
223 223 list_t *listp;
224 224 nvp_devid_t *np;
225 225
226 226 ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
227 227
228 228 listp = nvf_list(fd);
229 229 while (np = list_head(listp)) {
230 230 list_remove(listp, np);
231 231 devid_nvp_free(np);
232 232 }
233 233 }
234 234
235 235 /*
236 236 * Free an nvp element in a list
237 237 */
238 238 static void
239 239 devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
240 240 {
241 241 list_remove(nvf_list(fd), np);
242 242 devid_nvp_free(np);
243 243 }
244 244
245 245 /*
246 246 * Unpack a device path/nvlist pair to the list of devid cache elements.
247 247 * Used to parse the nvlist format when reading
248 248 * /etc/devices/devid_cache
249 249 */
250 250 static int
251 251 devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
252 252 {
253 253 nvp_devid_t *np;
254 254 ddi_devid_t devidp;
255 255 int rval;
256 256 uint_t n;
257 257
258 258 NVP_DEVID_DEBUG_PATH((name));
259 259 ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
260 260
261 261 /*
262 262 * check path for a devid
263 263 */
264 264 rval = nvlist_lookup_byte_array(nvl,
265 265 DP_DEVID_ID, (uchar_t **)&devidp, &n);
266 266 if (rval == 0) {
267 267 if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
268 268 ASSERT(n == ddi_devid_sizeof(devidp));
269 269 np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
270 270 np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
271 271 np->nvp_devid = kmem_alloc(n, KM_SLEEP);
272 272 (void) bcopy(devidp, np->nvp_devid, n);
273 273 list_insert_tail(nvf_list(fd), np);
274 274 NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
275 275 } else {
276 276 DEVIDERR((CE_CONT,
277 277 "%s: invalid devid\n", name));
278 278 }
279 279 } else {
280 280 DEVIDERR((CE_CONT,
281 281 "%s: devid not available\n", name));
282 282 }
283 283
284 284 return (0);
285 285 }
286 286
287 287 /*
288 288 * Pack the list of devid cache elements into a single nvlist
289 289 * Used when writing the nvlist file.
290 290 */
291 291 static int
292 292 devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
293 293 {
294 294 nvlist_t *nvl, *sub_nvl;
295 295 nvp_devid_t *np;
296 296 int rval;
297 297 list_t *listp;
298 298
299 299 ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
300 300
301 301 rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
302 302 if (rval != 0) {
303 303 nvf_error("%s: nvlist alloc error %d\n",
304 304 nvf_cache_name(fd), rval);
305 305 return (DDI_FAILURE);
306 306 }
307 307
308 308 listp = nvf_list(fd);
309 309 for (np = list_head(listp); np; np = list_next(listp, np)) {
310 310 if (np->nvp_devid == NULL)
311 311 continue;
312 312 NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
313 313 rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
314 314 if (rval != 0) {
315 315 nvf_error("%s: nvlist alloc error %d\n",
316 316 nvf_cache_name(fd), rval);
317 317 sub_nvl = NULL;
318 318 goto err;
319 319 }
320 320
321 321 rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
322 322 (uchar_t *)np->nvp_devid,
323 323 ddi_devid_sizeof(np->nvp_devid));
324 324 if (rval == 0) {
325 325 NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
326 326 } else {
327 327 nvf_error(
328 328 "%s: nvlist add error %d (devid)\n",
329 329 nvf_cache_name(fd), rval);
330 330 goto err;
331 331 }
332 332
333 333 rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
334 334 if (rval != 0) {
335 335 nvf_error("%s: nvlist add error %d (sublist)\n",
336 336 nvf_cache_name(fd), rval);
337 337 goto err;
338 338 }
339 339 nvlist_free(sub_nvl);
340 340 }
341 341
342 342 *ret_nvl = nvl;
343 343 return (DDI_SUCCESS);
344 344
345 345 err:
346 346 if (sub_nvl)
347 347 nvlist_free(sub_nvl);
348 348 nvlist_free(nvl);
349 349 *ret_nvl = NULL;
350 350 return (DDI_FAILURE);
351 351 }
352 352
353 353 static int
354 354 e_devid_do_discovery(void)
355 355 {
356 356 ASSERT(mutex_owned(&devid_discovery_mutex));
357 357
358 358 if (i_ddi_io_initialized() == 0) {
359 359 if (devid_discovery_boot > 0) {
360 360 devid_discovery_boot--;
361 361 return (1);
↓ open down ↓ |
361 lines elided |
↑ open up ↑ |
362 362 }
363 363 } else {
364 364 if (devid_discovery_postboot_always > 0)
365 365 return (1);
366 366 if (devid_discovery_postboot > 0) {
367 367 devid_discovery_postboot--;
368 368 return (1);
369 369 }
370 370 if (devid_discovery_secs > 0) {
371 371 if ((ddi_get_lbolt() - devid_last_discovery) >
372 - drv_usectohz(devid_discovery_secs * MICROSEC)) {
372 + drv_sectohz(devid_discovery_secs)) {
373 373 return (1);
374 374 }
375 375 }
376 376 }
377 377
378 378 DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
379 379 return (0);
380 380 }
381 381
382 382 static void
383 383 e_ddi_devid_hold_by_major(major_t major)
384 384 {
385 385 DEVID_LOG_DISC((CE_CONT,
386 386 "devid_discovery: ddi_hold_installed_driver %d\n", major));
387 387
388 388 if (ddi_hold_installed_driver(major) == NULL)
389 389 return;
390 390
391 391 ddi_rele_driver(major);
392 392 }
393 393
394 394 /* legacy support - see below */
395 395 static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd" };
396 396
397 397 #define N_DRIVERS_TO_HOLD \
398 398 (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
399 399
400 400 static void
401 401 e_ddi_devid_hold_installed_driver(ddi_devid_t devid)
402 402 {
403 403 impl_devid_t *id = (impl_devid_t *)devid;
404 404 major_t major, hint_major;
405 405 char hint[DEVID_HINT_SIZE + 1];
406 406 struct devnames *dnp;
407 407 char **drvp;
408 408 int i;
409 409
410 410 /* Count non-null bytes */
411 411 for (i = 0; i < DEVID_HINT_SIZE; i++)
412 412 if (id->did_driver[i] == '\0')
413 413 break;
414 414
415 415 /* Make a copy of the driver hint */
416 416 bcopy(id->did_driver, hint, i);
417 417 hint[i] = '\0';
418 418
419 419 /* search for the devid using the hint driver */
420 420 hint_major = ddi_name_to_major(hint);
421 421 if (hint_major != DDI_MAJOR_T_NONE) {
422 422 e_ddi_devid_hold_by_major(hint_major);
423 423 }
424 424
425 425 /*
426 426 * search for the devid with each driver declaring
427 427 * itself as a devid registrant.
428 428 */
429 429 for (major = 0; major < devcnt; major++) {
430 430 if (major == hint_major)
431 431 continue;
432 432 dnp = &devnamesp[major];
433 433 if (dnp->dn_flags & DN_DEVID_REGISTRANT) {
434 434 e_ddi_devid_hold_by_major(major);
435 435 }
436 436 }
437 437
438 438 /*
439 439 * Legacy support: may be removed once an upgrade mechanism
440 440 * for driver conf files is available.
441 441 */
442 442 drvp = e_ddi_devid_hold_driver_list;
443 443 for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
444 444 major = ddi_name_to_major(*drvp);
445 445 if (major != DDI_MAJOR_T_NONE && major != hint_major) {
446 446 e_ddi_devid_hold_by_major(major);
447 447 }
448 448 }
449 449 }
450 450
451 451 /*
452 452 * Return success if discovery was attempted, to indicate
453 453 * that the desired device may now be available.
454 454 */
455 455 int
456 456 e_ddi_devid_discovery(ddi_devid_t devid)
457 457 {
458 458 int flags;
459 459 int rval = DDI_SUCCESS;
460 460
461 461 mutex_enter(&devid_discovery_mutex);
462 462
463 463 if (devid_discovery_busy) {
464 464 DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
465 465 while (devid_discovery_busy) {
466 466 cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
467 467 }
468 468 } else if (e_devid_do_discovery()) {
469 469 devid_discovery_busy = 1;
470 470 mutex_exit(&devid_discovery_mutex);
471 471
472 472 if (i_ddi_io_initialized() == 0) {
473 473 e_ddi_devid_hold_installed_driver(devid);
474 474 } else {
475 475 DEVID_LOG_DISC((CE_CONT,
476 476 "devid_discovery: ndi_devi_config\n"));
477 477 flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
478 478 if (i_ddi_io_initialized())
479 479 flags |= NDI_DRV_CONF_REPROBE;
480 480 (void) ndi_devi_config(ddi_root_node(), flags);
481 481 }
482 482
483 483 mutex_enter(&devid_discovery_mutex);
484 484 devid_discovery_busy = 0;
485 485 cv_broadcast(&devid_discovery_cv);
486 486 if (devid_discovery_secs > 0)
487 487 devid_last_discovery = ddi_get_lbolt();
488 488 DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
489 489 } else {
490 490 rval = DDI_FAILURE;
491 491 DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
492 492 }
493 493
494 494 mutex_exit(&devid_discovery_mutex);
495 495
496 496 return (rval);
497 497 }
498 498
499 499 /*
500 500 * As part of registering a devid for a device,
501 501 * update the devid cache with this device/devid pair
502 502 * or note that this combination has registered.
503 503 *
504 504 * If a devpath is provided it will be used as the path to register the
505 505 * devid against, otherwise we use ddi_pathname(dip). In both cases
506 506 * we duplicate the path string so that it can be cached/freed indepdently
507 507 * of the original owner.
508 508 */
509 509 static int
510 510 e_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath)
511 511 {
512 512 nvp_devid_t *np;
513 513 nvp_devid_t *new_nvp;
514 514 ddi_devid_t new_devid;
515 515 int new_devid_size;
516 516 char *path, *fullpath;
517 517 ddi_devid_t free_devid = NULL;
518 518 int pathlen;
519 519 list_t *listp;
520 520 int is_dirty = 0;
521 521
522 522
523 523 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
524 524
525 525 if (devpath) {
526 526 pathlen = strlen(devpath) + 1;
527 527 path = kmem_alloc(pathlen, KM_SLEEP);
528 528 bcopy(devpath, path, pathlen);
529 529 } else {
530 530 /*
531 531 * We are willing to accept DS_BOUND nodes if we can form a full
532 532 * ddi_pathname (i.e. the node is part way to becomming
533 533 * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL).
534 534 */
535 535 if (ddi_get_name_addr(dip) == NULL)
536 536 return (DDI_FAILURE);
537 537
538 538 fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
539 539 (void) ddi_pathname(dip, fullpath);
540 540 pathlen = strlen(fullpath) + 1;
541 541 path = kmem_alloc(pathlen, KM_SLEEP);
542 542 bcopy(fullpath, path, pathlen);
543 543 kmem_free(fullpath, MAXPATHLEN);
544 544 }
545 545
546 546 DEVID_LOG_REG(("register", devid, path));
547 547
548 548 new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
549 549 new_devid_size = ddi_devid_sizeof(devid);
550 550 new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
551 551 (void) bcopy(devid, new_devid, new_devid_size);
552 552
553 553 rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
554 554
555 555 listp = nvf_list(dcfd_handle);
556 556 for (np = list_head(listp); np; np = list_next(listp, np)) {
557 557 if (strcmp(path, np->nvp_devpath) == 0) {
558 558 DEVID_DEBUG2((CE_CONT,
559 559 "register: %s path match\n", path));
560 560 if (np->nvp_devid == NULL) {
561 561 replace: np->nvp_devid = new_devid;
562 562 np->nvp_flags |=
563 563 NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
564 564 np->nvp_dip = dip;
565 565 if (!devid_cache_write_disable) {
566 566 nvf_mark_dirty(dcfd_handle);
567 567 is_dirty = 1;
568 568 }
569 569 rw_exit(nvf_lock(dcfd_handle));
570 570 kmem_free(new_nvp, sizeof (nvp_devid_t));
571 571 kmem_free(path, pathlen);
572 572 goto exit;
573 573 }
574 574 if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
575 575 /* replace invalid devid */
576 576 free_devid = np->nvp_devid;
577 577 goto replace;
578 578 }
579 579 /*
580 580 * We're registering an already-cached path
581 581 * Does the device's devid match the cache?
582 582 */
583 583 if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
584 584 DEVID_DEBUG((CE_CONT, "devid register: "
585 585 "devid %s does not match\n", path));
586 586 /*
587 587 * Replace cached devid for this path
588 588 * with newly registered devid. A devid
589 589 * may map to multiple paths but one path
590 590 * should only map to one devid.
591 591 */
592 592 devid_nvp_unlink_and_free(dcfd_handle, np);
593 593 np = NULL;
594 594 break;
595 595 } else {
596 596 DEVID_DEBUG2((CE_CONT,
597 597 "devid register: %s devid match\n", path));
598 598 np->nvp_flags |=
599 599 NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
600 600 np->nvp_dip = dip;
601 601 rw_exit(nvf_lock(dcfd_handle));
602 602 kmem_free(new_nvp, sizeof (nvp_devid_t));
603 603 kmem_free(path, pathlen);
604 604 kmem_free(new_devid, new_devid_size);
605 605 return (DDI_SUCCESS);
606 606 }
607 607 }
608 608 }
609 609
610 610 /*
611 611 * Add newly registered devid to the cache
612 612 */
613 613 ASSERT(np == NULL);
614 614
615 615 new_nvp->nvp_devpath = path;
616 616 new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
617 617 new_nvp->nvp_dip = dip;
618 618 new_nvp->nvp_devid = new_devid;
619 619
620 620 if (!devid_cache_write_disable) {
621 621 is_dirty = 1;
622 622 nvf_mark_dirty(dcfd_handle);
623 623 }
624 624 list_insert_tail(nvf_list(dcfd_handle), new_nvp);
625 625
626 626 rw_exit(nvf_lock(dcfd_handle));
627 627
628 628 exit:
629 629 if (free_devid)
630 630 kmem_free(free_devid, ddi_devid_sizeof(free_devid));
631 631
632 632 if (is_dirty)
633 633 nvf_wake_daemon();
634 634
635 635 return (DDI_SUCCESS);
636 636 }
637 637
638 638 int
639 639 e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
640 640 {
641 641 return (e_devid_cache_register_cmn(dip, devid, NULL));
642 642 }
643 643
644 644 /*
645 645 * Unregister a device's devid; the devinfo may hit on multiple entries
646 646 * arising from both pHCI and vHCI paths.
647 647 * Called as an instance detachs.
648 648 * Invalidate the devid's devinfo reference.
649 649 * Devid-path remains in the cache.
650 650 */
651 651
652 652 void
653 653 e_devid_cache_unregister(dev_info_t *dip)
654 654 {
655 655 nvp_devid_t *np;
656 656 list_t *listp;
657 657
658 658 rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
659 659
660 660 listp = nvf_list(dcfd_handle);
661 661 for (np = list_head(listp); np; np = list_next(listp, np)) {
662 662 if (np->nvp_devid == NULL)
663 663 continue;
664 664 if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
665 665 DEVID_LOG_UNREG((CE_CONT,
666 666 "unregister: %s\n", np->nvp_devpath));
667 667 np->nvp_flags &= ~NVP_DEVID_DIP;
668 668 np->nvp_dip = NULL;
669 669 }
670 670 }
671 671
672 672 rw_exit(nvf_lock(dcfd_handle));
673 673 }
674 674
675 675 int
676 676 e_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid)
677 677 {
678 678 char *path = mdi_pi_pathname(pip);
679 679
680 680 return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid,
681 681 path));
682 682 }
683 683
684 684 /*
685 685 * Purge devid cache of stale devids
686 686 */
687 687 void
688 688 devid_cache_cleanup(void)
689 689 {
690 690 nvp_devid_t *np, *next;
691 691 list_t *listp;
692 692 int is_dirty = 0;
693 693
694 694 rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
695 695
696 696 listp = nvf_list(dcfd_handle);
697 697 for (np = list_head(listp); np; np = next) {
698 698 next = list_next(listp, np);
699 699 if (np->nvp_devid == NULL)
700 700 continue;
701 701 if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
702 702 DEVID_LOG_REMOVE((CE_CONT,
703 703 "cleanup: %s\n", np->nvp_devpath));
704 704 if (!devid_cache_write_disable) {
705 705 nvf_mark_dirty(dcfd_handle);
706 706 is_dirty = 0;
707 707 }
708 708 devid_nvp_unlink_and_free(dcfd_handle, np);
709 709 }
710 710 }
711 711
712 712 rw_exit(nvf_lock(dcfd_handle));
713 713
714 714 if (is_dirty)
715 715 nvf_wake_daemon();
716 716 }
717 717
718 718
719 719 /*
720 720 * Build a list of dev_t's for a device/devid
721 721 *
722 722 * The effect of this function is cumulative, adding dev_t's
723 723 * for the device to the list of all dev_t's for a given
724 724 * devid.
725 725 */
726 726 static void
727 727 e_devid_minor_to_devlist(
728 728 dev_info_t *dip,
729 729 char *minor_name,
730 730 int ndevts_alloced,
731 731 int *devtcntp,
732 732 dev_t *devtsp)
733 733 {
734 734 int circ;
735 735 struct ddi_minor_data *dmdp;
736 736 int minor_all = 0;
737 737 int ndevts = *devtcntp;
738 738
739 739 ASSERT(i_ddi_devi_attached(dip));
740 740
741 741 /* are we looking for a set of minor nodes? */
742 742 if ((minor_name == DEVID_MINOR_NAME_ALL) ||
743 743 (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
744 744 (minor_name == DEVID_MINOR_NAME_ALL_BLK))
745 745 minor_all = 1;
746 746
747 747 /* Find matching minor names */
748 748 ndi_devi_enter(dip, &circ);
749 749 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
750 750
751 751 /* Skip non-minors, and non matching minor names */
752 752 if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
753 753 strcmp(dmdp->ddm_name, minor_name)))
754 754 continue;
755 755
756 756 /* filter out minor_all mismatches */
757 757 if (minor_all &&
758 758 (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
759 759 (dmdp->ddm_spec_type != S_IFCHR)) ||
760 760 ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
761 761 (dmdp->ddm_spec_type != S_IFBLK))))
762 762 continue;
763 763
764 764 if (ndevts < ndevts_alloced)
765 765 devtsp[ndevts] = dmdp->ddm_dev;
766 766 ndevts++;
767 767 }
768 768 ndi_devi_exit(dip, circ);
769 769
770 770 *devtcntp = ndevts;
771 771 }
772 772
773 773 /*
774 774 * Search for cached entries matching a devid
775 775 * Return two lists:
776 776 * a list of dev_info nodes, for those devices in the attached state
777 777 * a list of pathnames whose instances registered the given devid
778 778 * If the lists passed in are not sufficient to return the matching
779 779 * references, return the size of lists required.
780 780 * The dev_info nodes are returned with a hold that the caller must release.
781 781 */
782 782 static int
783 783 e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
784 784 int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
785 785 {
786 786 nvp_devid_t *np;
787 787 int ndevis, npaths;
788 788 dev_info_t *dip, *pdip;
789 789 int circ;
790 790 int maxdevis = 0;
791 791 int maxpaths = 0;
792 792 list_t *listp;
793 793
794 794 ndevis = 0;
795 795 npaths = 0;
796 796 listp = nvf_list(dcfd_handle);
797 797 for (np = list_head(listp); np; np = list_next(listp, np)) {
798 798 if (np->nvp_devid == NULL)
799 799 continue;
800 800 if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
801 801 DEVIDERR((CE_CONT,
802 802 "find: invalid devid %s\n",
803 803 np->nvp_devpath));
804 804 continue;
805 805 }
806 806 if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
807 807 DEVID_DEBUG2((CE_CONT,
808 808 "find: devid match: %s 0x%x\n",
809 809 np->nvp_devpath, np->nvp_flags));
810 810 DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
811 811 DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
812 812
813 813 /*
814 814 * Check if we have a cached devinfo reference for this
815 815 * devid. Place a hold on it to prevent detach
816 816 * Otherwise, use the path instead.
817 817 * Note: returns with a hold on each dev_info
818 818 * node in the list.
819 819 */
820 820 dip = NULL;
821 821 if (np->nvp_flags & NVP_DEVID_DIP) {
822 822 pdip = ddi_get_parent(np->nvp_dip);
823 823 if (ndi_devi_tryenter(pdip, &circ)) {
824 824 dip = np->nvp_dip;
825 825 ndi_hold_devi(dip);
826 826 ndi_devi_exit(pdip, circ);
827 827 ASSERT(!DEVI_IS_ATTACHING(dip));
828 828 ASSERT(!DEVI_IS_DETACHING(dip));
829 829 } else {
830 830 DEVID_LOG_DETACH((CE_CONT,
831 831 "may be detaching: %s\n",
832 832 np->nvp_devpath));
833 833 }
834 834 }
835 835
836 836 if (dip) {
837 837 if (ndevis < retmax) {
838 838 retdevis[ndevis++] = dip;
839 839 } else {
840 840 ndi_rele_devi(dip);
841 841 }
842 842 maxdevis++;
843 843 } else {
844 844 if (npaths < retmax)
845 845 retpaths[npaths++] = np->nvp_devpath;
846 846 maxpaths++;
847 847 }
848 848 }
849 849 }
850 850
851 851 *retndevis = ndevis;
852 852 *retnpaths = npaths;
853 853 return (maxdevis > maxpaths ? maxdevis : maxpaths);
854 854 }
855 855
856 856
857 857 /*
858 858 * Search the devid cache, returning dev_t list for all
859 859 * device paths mapping to the device identified by the
860 860 * given devid.
861 861 *
862 862 * Primary interface used by ddi_lyr_devid_to_devlist()
863 863 */
864 864 int
865 865 e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
866 866 int *retndevts, dev_t **retdevts)
867 867 {
868 868 char *path, **paths;
869 869 int i, j, n;
870 870 dev_t *devts, *udevts;
871 871 dev_t tdevt;
872 872 int ndevts, undevts, ndevts_alloced;
873 873 dev_info_t *devi, **devis;
874 874 int ndevis, npaths, nalloced;
875 875 ddi_devid_t match_devid;
876 876
877 877 DEVID_LOG_FIND(("find", devid, NULL));
878 878
879 879 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
880 880 if (ddi_devid_valid(devid) != DDI_SUCCESS) {
881 881 DEVID_LOG_ERR(("invalid devid", devid, NULL));
882 882 return (DDI_FAILURE);
883 883 }
884 884
885 885 nalloced = 128;
886 886
887 887 for (;;) {
888 888 paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
889 889 devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
890 890
891 891 rw_enter(nvf_lock(dcfd_handle), RW_READER);
892 892 n = e_devid_cache_devi_path_lists(devid, nalloced,
893 893 &ndevis, devis, &npaths, paths);
894 894 if (n <= nalloced)
895 895 break;
896 896 rw_exit(nvf_lock(dcfd_handle));
897 897 for (i = 0; i < ndevis; i++)
898 898 ndi_rele_devi(devis[i]);
899 899 kmem_free(paths, nalloced * sizeof (char *));
900 900 kmem_free(devis, nalloced * sizeof (dev_info_t *));
901 901 nalloced = n + 128;
902 902 }
903 903
904 904 for (i = 0; i < npaths; i++) {
905 905 path = i_ddi_strdup(paths[i], KM_SLEEP);
906 906 paths[i] = path;
907 907 }
908 908 rw_exit(nvf_lock(dcfd_handle));
909 909
910 910 if (ndevis == 0 && npaths == 0) {
911 911 DEVID_LOG_ERR(("no devid found", devid, NULL));
912 912 kmem_free(paths, nalloced * sizeof (char *));
913 913 kmem_free(devis, nalloced * sizeof (dev_info_t *));
914 914 return (DDI_FAILURE);
915 915 }
916 916
917 917 ndevts_alloced = 128;
918 918 restart:
919 919 ndevts = 0;
920 920 devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
921 921 for (i = 0; i < ndevis; i++) {
922 922 ASSERT(!DEVI_IS_ATTACHING(devis[i]));
923 923 ASSERT(!DEVI_IS_DETACHING(devis[i]));
924 924 e_devid_minor_to_devlist(devis[i], minor_name,
925 925 ndevts_alloced, &ndevts, devts);
926 926 if (ndevts > ndevts_alloced) {
927 927 kmem_free(devts, ndevts_alloced * sizeof (dev_t));
928 928 ndevts_alloced += 128;
929 929 goto restart;
930 930 }
931 931 }
932 932 for (i = 0; i < npaths; i++) {
933 933 DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
934 934 devi = e_ddi_hold_devi_by_path(paths[i], 0);
935 935 if (devi == NULL) {
936 936 DEVID_LOG_STALE(("stale device reference",
937 937 devid, paths[i]));
938 938 continue;
939 939 }
940 940 /*
941 941 * Verify the newly attached device registered a matching devid
942 942 */
943 943 if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
944 944 &match_devid) != DDI_SUCCESS) {
945 945 DEVIDERR((CE_CONT,
946 946 "%s: no devid registered on attach\n",
947 947 paths[i]));
948 948 ddi_release_devi(devi);
949 949 continue;
950 950 }
951 951
952 952 if (ddi_devid_compare(devid, match_devid) != 0) {
953 953 DEVID_LOG_STALE(("new devid registered",
954 954 devid, paths[i]));
955 955 ddi_release_devi(devi);
956 956 ddi_devid_free(match_devid);
957 957 continue;
958 958 }
959 959 ddi_devid_free(match_devid);
960 960
961 961 e_devid_minor_to_devlist(devi, minor_name,
962 962 ndevts_alloced, &ndevts, devts);
963 963 ddi_release_devi(devi);
964 964 if (ndevts > ndevts_alloced) {
965 965 kmem_free(devts,
966 966 ndevts_alloced * sizeof (dev_t));
967 967 ndevts_alloced += 128;
968 968 goto restart;
969 969 }
970 970 }
971 971
972 972 /* drop hold from e_devid_cache_devi_path_lists */
973 973 for (i = 0; i < ndevis; i++) {
974 974 ndi_rele_devi(devis[i]);
975 975 }
976 976 for (i = 0; i < npaths; i++) {
977 977 kmem_free(paths[i], strlen(paths[i]) + 1);
978 978 }
979 979 kmem_free(paths, nalloced * sizeof (char *));
980 980 kmem_free(devis, nalloced * sizeof (dev_info_t *));
981 981
982 982 if (ndevts == 0) {
983 983 DEVID_LOG_ERR(("no devid found", devid, NULL));
984 984 kmem_free(devts, ndevts_alloced * sizeof (dev_t));
985 985 return (DDI_FAILURE);
986 986 }
987 987
988 988 /*
989 989 * Build the final list of sorted dev_t's with duplicates collapsed so
990 990 * returned results are consistent. This prevents implementation
991 991 * artifacts from causing unnecessary changes in SVM namespace.
992 992 */
993 993 /* bubble sort */
994 994 for (i = 0; i < (ndevts - 1); i++) {
995 995 for (j = 0; j < ((ndevts - 1) - i); j++) {
996 996 if (devts[j + 1] < devts[j]) {
997 997 tdevt = devts[j];
998 998 devts[j] = devts[j + 1];
999 999 devts[j + 1] = tdevt;
1000 1000 }
1001 1001 }
1002 1002 }
1003 1003
1004 1004 /* determine number of unique values */
1005 1005 for (undevts = ndevts, i = 1; i < ndevts; i++) {
1006 1006 if (devts[i - 1] == devts[i])
1007 1007 undevts--;
1008 1008 }
1009 1009
1010 1010 /* allocate unique */
1011 1011 udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
1012 1012
1013 1013 /* copy unique */
1014 1014 udevts[0] = devts[0];
1015 1015 for (i = 1, j = 1; i < ndevts; i++) {
1016 1016 if (devts[i - 1] != devts[i])
1017 1017 udevts[j++] = devts[i];
1018 1018 }
1019 1019 ASSERT(j == undevts);
1020 1020
1021 1021 kmem_free(devts, ndevts_alloced * sizeof (dev_t));
1022 1022
1023 1023 *retndevts = undevts;
1024 1024 *retdevts = udevts;
1025 1025
1026 1026 return (DDI_SUCCESS);
1027 1027 }
1028 1028
1029 1029 void
1030 1030 e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list)
1031 1031 {
1032 1032 kmem_free(devt_list, ndevts * sizeof (dev_t *));
1033 1033 }
1034 1034
1035 1035 /*
1036 1036 * If given a full path and NULL ua, search for a cache entry
1037 1037 * whose path matches the full path. On a cache hit duplicate the
1038 1038 * devid of the matched entry into the given devid (caller
1039 1039 * must free); nodenamebuf is not touched for this usage.
1040 1040 *
1041 1041 * Given a path and a non-NULL unit address, search the cache for any entry
1042 1042 * matching "<path>/%@<unit-address>" where '%' is a wildcard meaning
1043 1043 * any node name. The path should not end a '/'. On a cache hit
1044 1044 * duplicate the devid as before (caller must free) and copy into
1045 1045 * the caller-provided nodenamebuf (if not NULL) the nodename of the
1046 1046 * matched entry.
1047 1047 *
1048 1048 * We must not make use of nvp_dip since that may be NULL for cached
1049 1049 * entries that are not present in the current tree.
1050 1050 */
1051 1051 int
1052 1052 e_devid_cache_path_to_devid(char *path, char *ua,
1053 1053 char *nodenamebuf, ddi_devid_t *devidp)
1054 1054 {
1055 1055 size_t pathlen, ualen;
1056 1056 int rv = DDI_FAILURE;
1057 1057 nvp_devid_t *np;
1058 1058 list_t *listp;
1059 1059 char *cand;
1060 1060
1061 1061 if (path == NULL || *path == '\0' || (ua && *ua == '\0') ||
1062 1062 devidp == NULL)
1063 1063 return (DDI_FAILURE);
1064 1064
1065 1065 *devidp = NULL;
1066 1066
1067 1067 if (ua) {
1068 1068 pathlen = strlen(path);
1069 1069 ualen = strlen(ua);
1070 1070 }
1071 1071
1072 1072 rw_enter(nvf_lock(dcfd_handle), RW_READER);
1073 1073
1074 1074 listp = nvf_list(dcfd_handle);
1075 1075 for (np = list_head(listp); np; np = list_next(listp, np)) {
1076 1076 size_t nodelen, candlen, n;
1077 1077 ddi_devid_t devid_dup;
1078 1078 char *uasep, *node;
1079 1079
1080 1080 if (np->nvp_devid == NULL)
1081 1081 continue;
1082 1082
1083 1083 if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
1084 1084 DEVIDERR((CE_CONT,
1085 1085 "pathsearch: invalid devid %s\n",
1086 1086 np->nvp_devpath));
1087 1087 continue;
1088 1088 }
1089 1089
1090 1090 cand = np->nvp_devpath; /* candidate path */
1091 1091
1092 1092 /* If a full pathname was provided the compare is easy */
1093 1093 if (ua == NULL) {
1094 1094 if (strcmp(cand, path) == 0)
1095 1095 goto match;
1096 1096 else
1097 1097 continue;
1098 1098 }
1099 1099
1100 1100 /*
1101 1101 * The compare for initial path plus ua and unknown nodename
1102 1102 * is trickier.
1103 1103 *
1104 1104 * Does the initial path component match 'path'?
1105 1105 */
1106 1106 if (strncmp(path, cand, pathlen) != 0)
1107 1107 continue;
1108 1108
1109 1109 candlen = strlen(cand);
1110 1110
1111 1111 /*
1112 1112 * The next character must be a '/' and there must be no
1113 1113 * further '/' thereafter. Begin by checking that the
1114 1114 * candidate is long enough to include at mininum a
1115 1115 * "/<nodename>@<ua>" after the initial portion already
1116 1116 * matched assuming a nodename length of 1.
1117 1117 */
1118 1118 if (candlen < pathlen + 1 + 1 + 1 + ualen ||
1119 1119 cand[pathlen] != '/' ||
1120 1120 strchr(cand + pathlen + 1, '/') != NULL)
1121 1121 continue;
1122 1122
1123 1123 node = cand + pathlen + 1; /* <node>@<ua> string */
1124 1124
1125 1125 /*
1126 1126 * Find the '@' before the unit address. Check for
1127 1127 * unit address match.
1128 1128 */
1129 1129 if ((uasep = strchr(node, '@')) == NULL)
1130 1130 continue;
1131 1131
1132 1132 /*
1133 1133 * Check we still have enough length and that ua matches
1134 1134 */
1135 1135 nodelen = (uintptr_t)uasep - (uintptr_t)node;
1136 1136 if (candlen < pathlen + 1 + nodelen + 1 + ualen ||
1137 1137 strncmp(ua, uasep + 1, ualen) != 0)
1138 1138 continue;
1139 1139 match:
1140 1140 n = ddi_devid_sizeof(np->nvp_devid);
1141 1141 devid_dup = kmem_alloc(n, KM_SLEEP); /* caller must free */
1142 1142 (void) bcopy(np->nvp_devid, devid_dup, n);
1143 1143 *devidp = devid_dup;
1144 1144
1145 1145 if (ua && nodenamebuf) {
1146 1146 (void) strncpy(nodenamebuf, node, nodelen);
1147 1147 nodenamebuf[nodelen] = '\0';
1148 1148 }
1149 1149
1150 1150 rv = DDI_SUCCESS;
1151 1151 break;
1152 1152 }
1153 1153
1154 1154 rw_exit(nvf_lock(dcfd_handle));
1155 1155
1156 1156 return (rv);
1157 1157 }
1158 1158
1159 1159 #ifdef DEBUG
1160 1160 static void
1161 1161 devid_log(char *fmt, ddi_devid_t devid, char *path)
1162 1162 {
1163 1163 char *devidstr = ddi_devid_str_encode(devid, NULL);
1164 1164 if (path) {
1165 1165 cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr);
1166 1166 } else {
1167 1167 cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr);
1168 1168 }
1169 1169 ddi_devid_str_free(devidstr);
1170 1170 }
1171 1171 #endif /* DEBUG */
↓ open down ↓ |
789 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX