Print this page
patch tsoome-feedback
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/rcm_daemon/common/ibpart_rcm.c
+++ new/usr/src/cmd/rcm_daemon/common/ibpart_rcm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * This RCM module adds support to the RCM framework for IBPART links
27 27 */
28 28
29 29 #include <stdio.h>
30 30 #include <stdlib.h>
31 31 #include <string.h>
32 32 #include <errno.h>
33 33 #include <sys/types.h>
34 34 #include <synch.h>
35 35 #include <assert.h>
36 36 #include <strings.h>
37 37 #include "rcm_module.h"
38 38 #include <libintl.h>
39 39 #include <libdllink.h>
40 40 #include <libdlib.h>
41 41 #include <libdlpi.h>
42 42
43 43 /*
44 44 * Definitions
45 45 */
46 46 #ifndef lint
47 47 #define _(x) gettext(x)
48 48 #else
49 49 #define _(x) x
50 50 #endif
51 51
52 52 /* Some generic well-knowns and defaults used in this module */
53 53 #define RCM_LINK_PREFIX "SUNW_datalink" /* RCM datalink name prefix */
54 54 #define RCM_LINK_RESOURCE_MAX (13 + LINKID_STR_WIDTH)
55 55
56 56 /* IBPART link flags */
57 57 typedef enum {
58 58 IBPART_OFFLINED = 0x1,
59 59 IBPART_CONSUMER_OFFLINED = 0x2,
60 60 IBPART_STALE = 0x4
61 61 } ibpart_flag_t;
62 62
63 63 /* link representation */
64 64 typedef struct dl_ibpart {
65 65 struct dl_ibpart *dlib_next; /* next IBPART on this link */
66 66 struct dl_ibpart *dlib_prev; /* prev IBPART on this link */
67 67 datalink_id_t dlib_ibpart_id;
68 68 ibpart_flag_t dlib_flags; /* IBPART link flags */
69 69 } dl_ibpart_t;
70 70
71 71 /* IBPART Cache state flags */
72 72 typedef enum {
73 73 CACHE_NODE_STALE = 0x1, /* stale cached data */
74 74 CACHE_NODE_NEW = 0x2, /* new cached nodes */
75 75 CACHE_NODE_OFFLINED = 0x4 /* nodes offlined */
76 76 } cache_node_state_t;
77 77
78 78 /* Network Cache lookup options */
79 79 #define CACHE_NO_REFRESH 0x1 /* cache refresh not needed */
80 80 #define CACHE_REFRESH 0x2 /* refresh cache */
81 81
82 82 /* Cache element */
83 83 typedef struct link_cache {
84 84 struct link_cache *pc_next; /* next cached resource */
85 85 struct link_cache *pc_prev; /* prev cached resource */
86 86 char *pc_resource; /* resource name */
87 87 datalink_id_t pc_linkid; /* linkid */
88 88 dl_ibpart_t *pc_ibpart; /* IBPART list on this link */
89 89 cache_node_state_t pc_state; /* cache state flags */
90 90 } link_cache_t;
91 91
92 92 /*
93 93 * Global cache for network IBPARTs
94 94 */
95 95 static link_cache_t cache_head;
96 96 static link_cache_t cache_tail;
97 97 static mutex_t cache_lock;
98 98 static int events_registered = 0;
99 99
100 100 static dladm_handle_t dld_handle = NULL;
101 101
102 102 /*
103 103 * RCM module interface prototypes
104 104 */
105 105 static int ibpart_register(rcm_handle_t *);
106 106 static int ibpart_unregister(rcm_handle_t *);
107 107 static int ibpart_get_info(rcm_handle_t *, char *, id_t, uint_t,
108 108 char **, char **, nvlist_t *, rcm_info_t **);
109 109 static int ibpart_suspend(rcm_handle_t *, char *, id_t,
110 110 timespec_t *, uint_t, char **, rcm_info_t **);
111 111 static int ibpart_resume(rcm_handle_t *, char *, id_t, uint_t,
112 112 char **, rcm_info_t **);
113 113 static int ibpart_offline(rcm_handle_t *, char *, id_t, uint_t,
114 114 char **, rcm_info_t **);
115 115 static int ibpart_undo_offline(rcm_handle_t *, char *, id_t,
116 116 uint_t, char **, rcm_info_t **);
117 117 static int ibpart_remove(rcm_handle_t *, char *, id_t, uint_t,
118 118 char **, rcm_info_t **);
119 119 static int ibpart_notify_event(rcm_handle_t *, char *, id_t,
120 120 uint_t, char **, nvlist_t *, rcm_info_t **);
121 121 static int ibpart_configure(rcm_handle_t *, datalink_id_t);
122 122
123 123 /* Module private routines */
124 124 static void cache_free();
125 125 static int cache_update(rcm_handle_t *);
126 126 static void cache_remove(link_cache_t *);
127 127 static void node_free(link_cache_t *);
128 128 static void cache_insert(link_cache_t *);
129 129 static link_cache_t *cache_lookup(rcm_handle_t *, char *, char);
130 130 static int ibpart_consumer_offline(rcm_handle_t *, link_cache_t *,
131 131 char **, uint_t, rcm_info_t **);
132 132 static void ibpart_consumer_online(rcm_handle_t *, link_cache_t *,
133 133 char **, uint_t, rcm_info_t **);
134 134 static int ibpart_offline_ibpart(link_cache_t *, uint32_t,
135 135 cache_node_state_t);
136 136 static void ibpart_online_ibpart(link_cache_t *);
137 137 static char *ibpart_usage(link_cache_t *);
138 138 static void ibpart_log_err(datalink_id_t, char **, char *);
139 139 static int ibpart_consumer_notify(rcm_handle_t *, datalink_id_t,
140 140 char **, uint_t, rcm_info_t **);
141 141
142 142 /* Module-Private data */
143 143 static struct rcm_mod_ops ibpart_ops =
144 144 {
145 145 RCM_MOD_OPS_VERSION,
146 146 ibpart_register,
147 147 ibpart_unregister,
148 148 ibpart_get_info,
149 149 ibpart_suspend,
150 150 ibpart_resume,
151 151 ibpart_offline,
152 152 ibpart_undo_offline,
153 153 ibpart_remove,
154 154 NULL,
155 155 NULL,
156 156 ibpart_notify_event
157 157 };
158 158
159 159 /*
160 160 * rcm_mod_init() - Update registrations, and return the ops structure.
161 161 */
162 162 struct rcm_mod_ops *
163 163 rcm_mod_init(void)
164 164 {
165 165 char errmsg[DLADM_STRSIZE];
166 166 dladm_status_t status;
167 167
168 168 rcm_log_message(RCM_TRACE1, "IBPART: mod_init\n");
169 169
170 170 cache_head.pc_next = &cache_tail;
171 171 cache_head.pc_prev = NULL;
172 172 cache_tail.pc_prev = &cache_head;
173 173 cache_tail.pc_next = NULL;
174 174 (void) mutex_init(&cache_lock, 0, NULL);
175 175
176 176 if ((status = dladm_open(&dld_handle)) != DLADM_STATUS_OK) {
177 177 rcm_log_message(RCM_WARNING,
178 178 "IBPART: mod_init failed: cannot open datalink "
179 179 "handle: %s\n", dladm_status2str(status, errmsg));
180 180 return (NULL);
181 181 }
182 182
183 183 /* Return the ops vectors */
184 184 return (&ibpart_ops);
185 185 }
186 186
187 187 /*
188 188 * rcm_mod_info() - Return a string describing this module.
189 189 */
190 190 const char *
191 191 rcm_mod_info(void)
192 192 {
193 193 rcm_log_message(RCM_TRACE1, "IBPART: mod_info\n");
194 194
195 195 return ("IBPART module");
196 196 }
197 197
198 198 /*
199 199 * rcm_mod_fini() - Destroy the network IBPART cache.
200 200 */
201 201 int
202 202 rcm_mod_fini(void)
203 203 {
204 204 rcm_log_message(RCM_TRACE1, "IBPART: mod_fini\n");
205 205
206 206 /*
207 207 * Note that ibpart_unregister() does not seem to be called anywhere,
208 208 * therefore we free the cache nodes here. In theory we should call
209 209 * rcm_register_interest() for each node before we free it, the
210 210 * framework does not provide the rcm_handle to allow us to do so.
211 211 */
212 212 cache_free();
213 213 (void) mutex_destroy(&cache_lock);
214 214
215 215 dladm_close(dld_handle);
216 216 return (RCM_SUCCESS);
217 217 }
218 218
219 219 /*
220 220 * ibpart_register() - Make sure the cache is properly sync'ed, and its
221 221 * registrations are in order.
222 222 */
223 223 static int
224 224 ibpart_register(rcm_handle_t *hd)
225 225 {
226 226 rcm_log_message(RCM_TRACE1, "IBPART: register\n");
227 227
228 228 if (cache_update(hd) < 0)
229 229 return (RCM_FAILURE);
230 230
231 231 /*
232 232 * Need to register interest in all new resources
233 233 * getting attached, so we get attach event notifications
234 234 */
235 235 if (!events_registered) {
236 236 if (rcm_register_event(hd, RCM_RESOURCE_LINK_NEW, 0, NULL)
237 237 != RCM_SUCCESS) {
238 238 rcm_log_message(RCM_ERROR,
239 239 _("IBPART: failed to register %s\n"),
240 240 RCM_RESOURCE_LINK_NEW);
241 241 return (RCM_FAILURE);
242 242 } else {
243 243 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
244 244 RCM_RESOURCE_LINK_NEW);
245 245 events_registered++;
246 246 }
247 247 }
248 248
249 249 return (RCM_SUCCESS);
250 250 }
251 251
252 252 /*
253 253 * ibpart_unregister() - Walk the cache, unregistering all the networks.
254 254 */
255 255 static int
256 256 ibpart_unregister(rcm_handle_t *hd)
257 257 {
258 258 link_cache_t *node;
259 259
260 260 rcm_log_message(RCM_TRACE1, "IBPART: unregister\n");
261 261
262 262 /* Walk the cache, unregistering everything */
263 263 (void) mutex_lock(&cache_lock);
264 264 node = cache_head.pc_next;
265 265 while (node != &cache_tail) {
266 266 if (rcm_unregister_interest(hd, node->pc_resource, 0)
267 267 != RCM_SUCCESS) {
268 268 rcm_log_message(RCM_ERROR,
269 269 _("IBPART: failed to unregister %s\n"),
270 270 node->pc_resource);
271 271 (void) mutex_unlock(&cache_lock);
272 272 return (RCM_FAILURE);
273 273 }
274 274 cache_remove(node);
275 275 node_free(node);
276 276 node = cache_head.pc_next;
277 277 }
278 278 (void) mutex_unlock(&cache_lock);
279 279
280 280 /*
281 281 * Unregister interest in all new resources
282 282 */
283 283 if (events_registered) {
284 284 if (rcm_unregister_event(hd, RCM_RESOURCE_LINK_NEW, 0)
285 285 != RCM_SUCCESS) {
286 286 rcm_log_message(RCM_ERROR,
287 287 _("IBPART: failed to unregister %s\n"),
288 288 RCM_RESOURCE_LINK_NEW);
289 289 return (RCM_FAILURE);
290 290 } else {
291 291 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
292 292 RCM_RESOURCE_LINK_NEW);
293 293 events_registered--;
294 294 }
295 295 }
296 296
297 297 return (RCM_SUCCESS);
298 298 }
299 299
300 300 /*
301 301 * ibpart_offline() - Offline IBPARTs on a specific node.
302 302 */
303 303 static int
304 304 ibpart_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
305 305 char **errorp, rcm_info_t **info)
306 306 {
307 307 link_cache_t *node;
308 308
309 309 rcm_log_message(RCM_TRACE1, "IBPART: offline(%s)\n", rsrc);
310 310
311 311 /* Lock the cache and lookup the resource */
312 312 (void) mutex_lock(&cache_lock);
313 313 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
314 314 if (node == NULL) {
315 315 /* should not happen because the resource is registered. */
316 316 ibpart_log_err(node->pc_linkid, errorp,
317 317 "unrecognized resource");
318 318 (void) mutex_unlock(&cache_lock);
319 319 return (RCM_SUCCESS);
320 320 }
321 321
322 322 /*
323 323 * Inform consumers (IP interfaces) of associated IBPARTs to be offlined
324 324 */
325 325 if (ibpart_consumer_offline(hd, node, errorp, flags, info) ==
326 326 RCM_SUCCESS) {
327 327 rcm_log_message(RCM_DEBUG,
328 328 "IBPART: consumers agreed on offline\n");
329 329 } else {
330 330 ibpart_log_err(node->pc_linkid, errorp,
331 331 "consumers failed to offline");
332 332 (void) mutex_unlock(&cache_lock);
333 333 return (RCM_FAILURE);
334 334 }
335 335
336 336 /* Check if it's a query */
337 337 if (flags & RCM_QUERY) {
338 338 rcm_log_message(RCM_TRACE1,
339 339 "IBPART: offline query succeeded(%s)\n", rsrc);
340 340 (void) mutex_unlock(&cache_lock);
341 341 return (RCM_SUCCESS);
342 342 }
343 343
344 344 if (ibpart_offline_ibpart(node, IBPART_OFFLINED, CACHE_NODE_OFFLINED) !=
345 345 RCM_SUCCESS) {
346 346 ibpart_online_ibpart(node);
347 347 ibpart_log_err(node->pc_linkid, errorp, "offline failed");
348 348 (void) mutex_unlock(&cache_lock);
349 349 return (RCM_FAILURE);
350 350 }
351 351
352 352 rcm_log_message(RCM_TRACE1, "IBPART: Offline succeeded(%s)\n", rsrc);
353 353 (void) mutex_unlock(&cache_lock);
354 354 return (RCM_SUCCESS);
355 355 }
356 356
357 357 /*
358 358 * ibpart_undo_offline() - Undo offline of a previously offlined node.
359 359 */
360 360 /*ARGSUSED*/
361 361 static int
362 362 ibpart_undo_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
363 363 char **errorp, rcm_info_t **info)
364 364 {
365 365 link_cache_t *node;
366 366
367 367 rcm_log_message(RCM_TRACE1, "IBPART: online(%s)\n", rsrc);
368 368
369 369 (void) mutex_lock(&cache_lock);
370 370 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
371 371 if (node == NULL) {
372 372 ibpart_log_err(DATALINK_INVALID_LINKID, errorp, "no such link");
373 373 (void) mutex_unlock(&cache_lock);
374 374 errno = ENOENT;
375 375 return (RCM_FAILURE);
376 376 }
377 377
378 378 /* Check if no attempt should be made to online the link here */
379 379 if (!(node->pc_state & CACHE_NODE_OFFLINED)) {
380 380 ibpart_log_err(node->pc_linkid, errorp, "link not offlined");
381 381 (void) mutex_unlock(&cache_lock);
382 382 errno = ENOTSUP;
383 383 return (RCM_SUCCESS);
384 384 }
385 385
386 386 ibpart_online_ibpart(node);
387 387
388 388 /*
389 389 * Inform IP interfaces on associated IBPARTs to be onlined
390 390 */
391 391 ibpart_consumer_online(hd, node, errorp, flags, info);
392 392
393 393 node->pc_state &= ~CACHE_NODE_OFFLINED;
394 394 rcm_log_message(RCM_TRACE1, "IBPART: online succeeded(%s)\n", rsrc);
395 395 (void) mutex_unlock(&cache_lock);
396 396 return (RCM_SUCCESS);
397 397 }
398 398
399 399 static void
400 400 ibpart_online_ibpart(link_cache_t *node)
401 401 {
402 402 dl_ibpart_t *ibpart;
403 403 dladm_status_t status;
404 404 char errmsg[DLADM_STRSIZE];
405 405
406 406 /*
407 407 * Try to bring on all offlined IBPARTs
408 408 */
409 409 for (ibpart = node->pc_ibpart; ibpart != NULL;
410 410 ibpart = ibpart->dlib_next) {
411 411 if (!(ibpart->dlib_flags & IBPART_OFFLINED))
412 412 continue;
413 413
414 414 rcm_log_message(RCM_TRACE1, "IBPART: online DLID %d\n",
415 415 ibpart->dlib_ibpart_id);
416 416 if ((status = dladm_part_up(dld_handle,
417 417 ibpart->dlib_ibpart_id, 0)) != DLADM_STATUS_OK) {
418 418 /*
419 419 * Print a warning message and continue to online
420 420 * other IBPARTs.
421 421 */
422 422 rcm_log_message(RCM_WARNING,
423 423 _("IBPART: IBPART online failed (%u): %s\n"),
424 424 ibpart->dlib_ibpart_id,
425 425 dladm_status2str(status, errmsg));
426 426 } else {
427 427 ibpart->dlib_flags &= ~IBPART_OFFLINED;
428 428 }
429 429 }
430 430 }
431 431
432 432 static int
433 433 ibpart_offline_ibpart(link_cache_t *node, uint32_t flags,
434 434 cache_node_state_t state)
435 435 {
436 436 dl_ibpart_t *ibpart;
437 437 dladm_status_t status;
438 438 char errmsg[DLADM_STRSIZE];
439 439
440 440 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_offline_ibpart "
441 441 "(%s %u %u)\n", node->pc_resource, flags, state);
442 442
443 443 /*
444 444 * Try to delete all explicit created IBPART
445 445 */
446 446 for (ibpart = node->pc_ibpart; ibpart != NULL;
447 447 ibpart = ibpart->dlib_next) {
448 448 rcm_log_message(RCM_TRACE1, "IBPART: offline DLID %d\n",
449 449 ibpart->dlib_ibpart_id);
450 450 if ((status = dladm_part_delete(dld_handle,
451 451 ibpart->dlib_ibpart_id, DLADM_OPT_ACTIVE)) !=
452 452 DLADM_STATUS_OK) {
453 453 rcm_log_message(RCM_WARNING,
454 454 _("IBPART: IBPART offline failed (%u): %s\n"),
455 455 ibpart->dlib_ibpart_id,
456 456 dladm_status2str(status, errmsg));
457 457 return (RCM_FAILURE);
458 458 } else {
459 459 rcm_log_message(RCM_TRACE1,
460 460 "IBPART: IBPART offline succeeded(%u)\n",
461 461 ibpart->dlib_ibpart_id);
462 462 ibpart->dlib_flags |= flags;
463 463 }
464 464 }
465 465
466 466 node->pc_state |= state;
467 467 return (RCM_SUCCESS);
468 468 }
469 469
470 470 /*
471 471 * ibpart_get_info() - Gather usage information for this resource.
472 472 */
473 473 /*ARGSUSED*/
474 474 int
475 475 ibpart_get_info(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
476 476 char **usagep, char **errorp, nvlist_t *props, rcm_info_t **info)
477 477 {
478 478 link_cache_t *node;
479 479
480 480 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s)\n", rsrc);
481 481
482 482 (void) mutex_lock(&cache_lock);
483 483 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
484 484 if (node == NULL) {
485 485 rcm_log_message(RCM_INFO,
486 486 _("IBPART: get_info(%s) unrecognized resource\n"), rsrc);
487 487 (void) mutex_unlock(&cache_lock);
488 488 errno = ENOENT;
489 489 return (RCM_FAILURE);
490 490 }
491 491
492 492 *usagep = ibpart_usage(node);
493 493 (void) mutex_unlock(&cache_lock);
494 494 if (*usagep == NULL) {
495 495 /* most likely malloc failure */
496 496 rcm_log_message(RCM_ERROR,
497 497 _("IBPART: get_info(%s) malloc failure\n"), rsrc);
498 498 (void) mutex_unlock(&cache_lock);
499 499 errno = ENOMEM;
500 500 return (RCM_FAILURE);
501 501 }
502 502
503 503 /* Set client/role properties */
504 504 (void) nvlist_add_string(props, RCM_CLIENT_NAME, "IBPART");
505 505
506 506 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s) info = %s\n",
507 507 rsrc, *usagep);
508 508 return (RCM_SUCCESS);
509 509 }
510 510
511 511 /*
512 512 * ibpart_suspend() - Nothing to do, always okay
513 513 */
514 514 /*ARGSUSED*/
515 515 static int
516 516 ibpart_suspend(rcm_handle_t *hd, char *rsrc, id_t id, timespec_t *interval,
517 517 uint_t flags, char **errorp, rcm_info_t **info)
518 518 {
519 519 rcm_log_message(RCM_TRACE1, "IBPART: suspend(%s)\n", rsrc);
520 520 return (RCM_SUCCESS);
521 521 }
522 522
523 523 /*
524 524 * ibpart_resume() - Nothing to do, always okay
525 525 */
526 526 /*ARGSUSED*/
527 527 static int
528 528 ibpart_resume(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
529 529 char **errorp, rcm_info_t **info)
530 530 {
531 531 rcm_log_message(RCM_TRACE1, "IBPART: resume(%s)\n", rsrc);
532 532 return (RCM_SUCCESS);
533 533 }
534 534
535 535 /*
536 536 * ibpart_consumer_remove()
537 537 *
538 538 * Notify IBPART consumers to remove cache.
539 539 */
540 540 static int
541 541 ibpart_consumer_remove(rcm_handle_t *hd, link_cache_t *node, uint_t flags,
542 542 rcm_info_t **info)
543 543 {
544 544 dl_ibpart_t *ibpart = NULL;
545 545 char rsrc[RCM_LINK_RESOURCE_MAX];
546 546 int ret = RCM_SUCCESS;
547 547
548 548 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove (%s)\n",
549 549 node->pc_resource);
550 550
551 551 for (ibpart = node->pc_ibpart; ibpart != NULL;
552 552 ibpart = ibpart->dlib_next) {
553 553
554 554 /*
555 555 * This will only be called when the offline operation
556 556 * succeeds, so the IBPART consumers must have been offlined
557 557 * at this point.
558 558 */
559 559 assert(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED);
560 560
561 561 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
562 562 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
563 563
564 564 ret = rcm_notify_remove(hd, rsrc, flags, info);
565 565 if (ret != RCM_SUCCESS) {
566 566 rcm_log_message(RCM_WARNING,
567 567 _("IBPART: notify remove failed (%s)\n"), rsrc);
568 568 break;
569 569 }
570 570 }
571 571
572 572 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove done\n");
573 573 return (ret);
574 574 }
575 575
576 576 /*
577 577 * ibpart_remove() - remove a resource from cache
578 578 */
579 579 /*ARGSUSED*/
580 580 static int
581 581 ibpart_remove(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
582 582 char **errorp, rcm_info_t **info)
583 583 {
584 584 link_cache_t *node;
585 585 int rv;
586 586
587 587 rcm_log_message(RCM_TRACE1, "IBPART: remove(%s)\n", rsrc);
588 588
589 589 (void) mutex_lock(&cache_lock);
590 590 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
591 591 if (node == NULL) {
592 592 rcm_log_message(RCM_INFO,
593 593 _("IBPART: remove(%s) unrecognized resource\n"), rsrc);
594 594 (void) mutex_unlock(&cache_lock);
595 595 errno = ENOENT;
596 596 return (RCM_FAILURE);
597 597 }
598 598
599 599 /* remove the cached entry for the resource */
600 600 cache_remove(node);
601 601 (void) mutex_unlock(&cache_lock);
602 602
603 603 rv = ibpart_consumer_remove(hd, node, flags, info);
604 604 node_free(node);
605 605 return (rv);
606 606 }
607 607
608 608 /*
609 609 * ibpart_notify_event - Project private implementation to receive new resource
610 610 * events. It intercepts all new resource events. If the
611 611 * new resource is a network resource, pass up a notify
612 612 * for it too. The new resource need not be cached, since
613 613 * it is done at register again.
614 614 */
615 615 /*ARGSUSED*/
616 616 static int
617 617 ibpart_notify_event(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
618 618 char **errorp, nvlist_t *nvl, rcm_info_t **info)
619 619 {
620 620 nvpair_t *nvp = NULL;
621 621 datalink_id_t linkid;
622 622 uint64_t id64;
623 623 int rv = RCM_SUCCESS;
624 624
625 625 rcm_log_message(RCM_TRACE1, "IBPART: notify_event(%s)\n", rsrc);
626 626
627 627 if (strcmp(rsrc, RCM_RESOURCE_LINK_NEW) != 0) {
628 628 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
629 629 "unrecognized event");
630 630 errno = EINVAL;
631 631 return (RCM_FAILURE);
632 632 }
633 633
634 634 /* Update cache to reflect latest IBPARTs */
635 635 if (cache_update(hd) < 0) {
636 636 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
637 637 "private Cache update failed");
638 638 return (RCM_FAILURE);
639 639 }
640 640
641 641 /*
642 642 * Try best to recover all configuration.
643 643 */
644 644 rcm_log_message(RCM_DEBUG, "IBPART: process_nvlist\n");
645 645 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
646 646 if (strcmp(nvpair_name(nvp), RCM_NV_LINKID) != 0)
647 647 continue;
648 648
649 649 if (nvpair_value_uint64(nvp, &id64) != 0) {
650 650 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
651 651 "cannot get linkid");
652 652 rv = RCM_FAILURE;
653 653 continue;
654 654 }
655 655
656 656 linkid = (datalink_id_t)id64;
657 657 if (ibpart_configure(hd, linkid) != 0) {
658 658 ibpart_log_err(linkid, errorp, "configuring failed");
659 659 rv = RCM_FAILURE;
660 660 continue;
661 661 }
662 662
663 663 /* Notify all IBPART consumers */
664 664 if (ibpart_consumer_notify(hd, linkid, errorp, flags,
665 665 info) != 0) {
666 666 ibpart_log_err(linkid, errorp,
667 667 "consumer notify failed");
668 668 rv = RCM_FAILURE;
669 669 }
670 670 }
671 671
672 672 rcm_log_message(RCM_TRACE1,
673 673 "IBPART: notify_event: link configuration complete\n");
674 674 return (rv);
675 675 }
676 676
677 677 /*
678 678 * ibpart_usage - Determine the usage of a link.
679 679 * The returned buffer is owned by caller, and the caller
680 680 * must free it up when done.
681 681 */
682 682 static char *
683 683 ibpart_usage(link_cache_t *node)
684 684 {
685 685 dl_ibpart_t *ibpart;
686 686 int nibpart;
687 687 char *buf;
688 688 const char *fmt;
689 689 char *sep;
690 690 char errmsg[DLADM_STRSIZE];
691 691 char name[MAXLINKNAMELEN];
692 692 dladm_status_t status;
693 693 size_t bufsz;
694 694
695 695 rcm_log_message(RCM_TRACE2, "IBPART: usage(%s)\n", node->pc_resource);
696 696
697 697 assert(MUTEX_HELD(&cache_lock));
698 698 if ((status = dladm_datalink_id2info(dld_handle, node->pc_linkid, NULL,
699 699 NULL, NULL, name, sizeof (name))) != DLADM_STATUS_OK) {
700 700 rcm_log_message(RCM_ERROR,
701 701 _("IBPART: usage(%s) get link name failure(%s)\n"),
702 702 node->pc_resource, dladm_status2str(status, errmsg));
703 703 return (NULL);
704 704 }
705 705
706 706 if (node->pc_state & CACHE_NODE_OFFLINED)
707 707 fmt = _("%1$s offlined");
708 708 else
709 709 fmt = _("%1$s IBPART: ");
710 710
711 711 /* TRANSLATION_NOTE: separator used between IBPART linkids */
712 712 sep = _(", ");
713 713
714 714 nibpart = 0;
715 715 for (ibpart = node->pc_ibpart; ibpart != NULL;
716 716 ibpart = ibpart->dlib_next)
717 717 nibpart++;
718 718
719 719 /* space for IBPARTs and separators, plus message */
720 720 bufsz = nibpart * (MAXLINKNAMELEN + strlen(sep)) +
721 721 strlen(fmt) + MAXLINKNAMELEN + 1;
722 722 if ((buf = malloc(bufsz)) == NULL) {
723 723 rcm_log_message(RCM_ERROR,
724 724 _("IBPART: usage(%s) malloc failure(%s)\n"),
725 725 node->pc_resource, strerror(errno));
726 726 return (NULL);
727 727 }
728 728 (void) snprintf(buf, bufsz, fmt, name);
729 729
730 730 if (node->pc_state & CACHE_NODE_OFFLINED) {
731 731 /* Nothing else to do */
732 732 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
733 733 node->pc_resource, buf);
734 734 return (buf);
735 735 }
736 736
737 737 for (ibpart = node->pc_ibpart; ibpart != NULL;
738 738 ibpart = ibpart->dlib_next) {
739 739 rcm_log_message(RCM_DEBUG, "IBPART:= %u\n",
740 740 ibpart->dlib_ibpart_id);
741 741
742 742 if ((status = dladm_datalink_id2info(dld_handle,
743 743 ibpart->dlib_ibpart_id, NULL, NULL, NULL, name,
744 744 sizeof (name))) != DLADM_STATUS_OK) {
745 745 rcm_log_message(RCM_ERROR,
746 746 _("IBPART: usage(%s) get ibpart %u name "
747 747 "failure(%s)\n"), node->pc_resource,
748 748 ibpart->dlib_ibpart_id,
749 749 dladm_status2str(status, errmsg));
750 750 free(buf);
751 751 return (NULL);
752 752 }
753 753
754 754 (void) strlcat(buf, name, bufsz);
755 755 if (ibpart->dlib_next != NULL)
756 756 (void) strlcat(buf, sep, bufsz);
757 757 }
758 758
759 759 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
760 760 node->pc_resource, buf);
761 761
762 762 return (buf);
763 763 }
764 764
765 765 /*
766 766 * Cache management routines, all cache management functions should be
767 767 * be called with cache_lock held.
768 768 */
769 769
770 770 /*
771 771 * cache_lookup() - Get a cache node for a resource.
772 772 * Call with cache lock held.
773 773 *
774 774 * This ensures that the cache is consistent with the system state and
775 775 * returns a pointer to the cache element corresponding to the resource.
776 776 */
777 777 static link_cache_t *
778 778 cache_lookup(rcm_handle_t *hd, char *rsrc, char options)
779 779 {
780 780 link_cache_t *node;
781 781
782 782 rcm_log_message(RCM_TRACE2, "IBPART: cache lookup(%s)\n", rsrc);
783 783
784 784 assert(MUTEX_HELD(&cache_lock));
785 785 if (options & CACHE_REFRESH) {
786 786 /* drop lock since update locks cache again */
787 787 (void) mutex_unlock(&cache_lock);
788 788 (void) cache_update(hd);
789 789 (void) mutex_lock(&cache_lock);
790 790 }
791 791
792 792 node = cache_head.pc_next;
793 793 for (; node != &cache_tail; node = node->pc_next) {
794 794 if (strcmp(rsrc, node->pc_resource) == 0) {
795 795 rcm_log_message(RCM_TRACE2,
796 796 "IBPART: cache lookup succeeded(%s)\n", rsrc);
797 797 return (node);
798 798 }
799 799 }
800 800 return (NULL);
801 801 }
802 802
803 803 /*
804 804 * node_free - Free a node from the cache
805 805 */
806 806 static void
807 807 node_free(link_cache_t *node)
808 808 {
809 809 dl_ibpart_t *ibpart, *next;
810 810
811 811 if (node != NULL) {
812 812 free(node->pc_resource);
813 813
814 814 /* free the IBPART list */
815 815 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
816 816 next = ibpart->dlib_next;
817 817 free(ibpart);
818 818 }
819 819 free(node);
820 820 }
821 821 }
822 822
823 823 /*
824 824 * cache_insert - Insert a resource node in cache
825 825 */
826 826 static void
827 827 cache_insert(link_cache_t *node)
828 828 {
829 829 assert(MUTEX_HELD(&cache_lock));
830 830
831 831 /* insert at the head for best performance */
832 832 node->pc_next = cache_head.pc_next;
833 833 node->pc_prev = &cache_head;
834 834
835 835 node->pc_next->pc_prev = node;
836 836 node->pc_prev->pc_next = node;
837 837 }
838 838
839 839 /*
840 840 * cache_remove() - Remove a resource node from cache.
841 841 */
842 842 static void
843 843 cache_remove(link_cache_t *node)
844 844 {
845 845 assert(MUTEX_HELD(&cache_lock));
846 846 node->pc_next->pc_prev = node->pc_prev;
847 847 node->pc_prev->pc_next = node->pc_next;
848 848 node->pc_next = NULL;
849 849 node->pc_prev = NULL;
850 850 }
851 851
852 852 typedef struct ibpart_update_arg_s {
853 853 rcm_handle_t *hd;
854 854 int retval;
855 855 } ibpart_update_arg_t;
856 856
857 857 /*
858 858 * ibpart_update() - Update physical interface properties
859 859 */
860 860 static int
861 861 ibpart_update(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
862 862 {
863 863 ibpart_update_arg_t *ibpart_update_argp = arg;
864 864 rcm_handle_t *hd = ibpart_update_argp->hd;
865 865 link_cache_t *node;
866 866 dl_ibpart_t *ibpart;
867 867 char *rsrc;
868 868 dladm_ib_attr_t ibpart_attr;
869 869 dladm_status_t status;
870 870 char errmsg[DLADM_STRSIZE];
871 871 boolean_t newnode = B_FALSE;
872 872 int ret = -1;
873 873
874 874 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update(%u)\n", ibpartid);
875 875
876 876 assert(MUTEX_HELD(&cache_lock));
877 877 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
878 878 DLADM_OPT_ACTIVE);
879 879 if (status != DLADM_STATUS_OK) {
880 880 rcm_log_message(RCM_TRACE1,
881 881 "IBPART: ibpart_update() cannot get ibpart information for "
882 882 "%u(%s)\n", ibpartid, dladm_status2str(status, errmsg));
883 883 return (DLADM_WALK_CONTINUE);
884 884 }
885 885
886 886 if (ibpart_attr.dia_physlinkid == DATALINK_INVALID_LINKID) {
887 887 /*
888 888 * Skip the IB port nodes.
889 889 */
890 890 rcm_log_message(RCM_TRACE1,
891 891 "IBPART: ibpart_update(): skip the PORT nodes %u\n",
892 892 ibpartid);
893 893 return (DLADM_WALK_CONTINUE);
894 894 }
895 895
896 896 rsrc = malloc(RCM_LINK_RESOURCE_MAX);
897 897 if (rsrc == NULL) {
898 898 rcm_log_message(RCM_ERROR, _("IBPART: malloc error(%s): %u\n"),
899 899 strerror(errno), ibpartid);
900 900 goto done;
901 901 }
902 902
903 903 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
904 904 RCM_LINK_PREFIX, ibpart_attr.dia_physlinkid);
905 905
906 906 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
907 907 if (node != NULL) {
908 908 rcm_log_message(RCM_DEBUG,
909 909 "IBPART: %s already registered (ibpartid:%d)\n",
910 910 rsrc, ibpart_attr.dia_partlinkid);
911 911 free(rsrc);
912 912 } else {
913 913 rcm_log_message(RCM_DEBUG,
914 914 "IBPART: %s is a new resource (ibpartid:%d)\n",
915 915 rsrc, ibpart_attr.dia_partlinkid);
916 916 if ((node = calloc(1, sizeof (link_cache_t))) == NULL) {
917 917 free(rsrc);
918 918 rcm_log_message(RCM_ERROR, _("IBPART: calloc: %s\n"),
919 919 strerror(errno));
920 920 goto done;
921 921 }
922 922
923 923 node->pc_resource = rsrc;
924 924 node->pc_ibpart = NULL;
925 925 node->pc_linkid = ibpart_attr.dia_physlinkid;
926 926 node->pc_state |= CACHE_NODE_NEW;
927 927 newnode = B_TRUE;
928 928 }
929 929
930 930 for (ibpart = node->pc_ibpart; ibpart != NULL;
931 931 ibpart = ibpart->dlib_next) {
932 932 if (ibpart->dlib_ibpart_id == ibpartid) {
933 933 ibpart->dlib_flags &= ~IBPART_STALE;
934 934 break;
935 935 }
936 936 }
937 937
938 938 if (ibpart == NULL) {
939 939 if ((ibpart = calloc(1, sizeof (dl_ibpart_t))) == NULL) {
940 940 rcm_log_message(RCM_ERROR, _("IBPART: malloc: %s\n"),
941 941 strerror(errno));
942 942 if (newnode) {
943 943 free(rsrc);
944 944 free(node);
945 945 }
946 946 goto done;
947 947 }
948 948 ibpart->dlib_ibpart_id = ibpartid;
949 949 ibpart->dlib_next = node->pc_ibpart;
950 950 ibpart->dlib_prev = NULL;
951 951 if (node->pc_ibpart != NULL)
952 952 node->pc_ibpart->dlib_prev = ibpart;
953 953 node->pc_ibpart = ibpart;
954 954 }
955 955
956 956 node->pc_state &= ~CACHE_NODE_STALE;
957 957
958 958 if (newnode)
959 959 cache_insert(node);
960 960
961 961 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_update: succeeded(%u)\n",
962 962 ibpartid);
963 963 ret = 0;
964 964 done:
965 965 ibpart_update_argp->retval = ret;
966 966 return (ret == 0 ? DLADM_WALK_CONTINUE : DLADM_WALK_TERMINATE);
967 967 }
968 968
969 969 /*
970 970 * ibpart_update_all() - Determine all IBPART links in the system
971 971 */
972 972 static int
973 973 ibpart_update_all(rcm_handle_t *hd)
974 974 {
975 975 ibpart_update_arg_t arg = {NULL, 0};
976 976
977 977 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update_all\n");
978 978
979 979 assert(MUTEX_HELD(&cache_lock));
980 980 arg.hd = hd;
981 981 (void) dladm_walk_datalink_id(ibpart_update, dld_handle, &arg,
982 982 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_ACTIVE);
983 983 return (arg.retval);
984 984 }
985 985
986 986 /*
987 987 * cache_update() - Update cache with latest interface info
988 988 */
989 989 static int
990 990 cache_update(rcm_handle_t *hd)
991 991 {
992 992 link_cache_t *node, *nnode;
993 993 dl_ibpart_t *ibpart;
994 994 int rv;
995 995
996 996 rcm_log_message(RCM_TRACE2, "IBPART: cache_update\n");
997 997
998 998 (void) mutex_lock(&cache_lock);
999 999
1000 1000 /* first we walk the entire cache, marking each entry stale */
1001 1001 node = cache_head.pc_next;
1002 1002 for (; node != &cache_tail; node = node->pc_next) {
1003 1003 node->pc_state |= CACHE_NODE_STALE;
1004 1004 for (ibpart = node->pc_ibpart; ibpart != NULL;
1005 1005 ibpart = ibpart->dlib_next)
1006 1006 ibpart->dlib_flags |= IBPART_STALE;
1007 1007 }
1008 1008
1009 1009 rv = ibpart_update_all(hd);
1010 1010
1011 1011 /*
1012 1012 * Continue to delete all stale nodes from the cache even
1013 1013 * ibpart_update_all() failed. Unregister link that are not offlined
1014 1014 * and still in cache
1015 1015 */
1016 1016 for (node = cache_head.pc_next; node != &cache_tail; node = nnode) {
1017 1017 dl_ibpart_t *ibpart, *next;
1018 1018
1019 1019 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
1020 1020 next = ibpart->dlib_next;
1021 1021
1022 1022 /* clear stale IBPARTs */
1023 1023 if (ibpart->dlib_flags & IBPART_STALE) {
1024 1024 if (ibpart->dlib_prev != NULL)
1025 1025 ibpart->dlib_prev->dlib_next = next;
1026 1026 else
1027 1027 node->pc_ibpart = next;
1028 1028
1029 1029 if (next != NULL)
1030 1030 next->dlib_prev = ibpart->dlib_prev;
1031 1031 free(ibpart);
1032 1032 }
1033 1033 }
1034 1034
1035 1035 nnode = node->pc_next;
1036 1036 if (node->pc_state & CACHE_NODE_STALE) {
1037 1037 (void) rcm_unregister_interest(hd, node->pc_resource,
1038 1038 0);
1039 1039 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
1040 1040 node->pc_resource);
1041 1041 assert(node->pc_ibpart == NULL);
1042 1042 cache_remove(node);
1043 1043 node_free(node);
1044 1044 continue;
1045 1045 }
1046 1046
1047 1047 if (!(node->pc_state & CACHE_NODE_NEW))
1048 1048 continue;
1049 1049
1050 1050 if (rcm_register_interest(hd, node->pc_resource, 0, NULL) !=
1051 1051 RCM_SUCCESS) {
1052 1052 rcm_log_message(RCM_ERROR,
1053 1053 _("IBPART: failed to register %s\n"),
1054 1054 node->pc_resource);
1055 1055 rv = -1;
1056 1056 } else {
1057 1057 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
1058 1058 node->pc_resource);
1059 1059 node->pc_state &= ~CACHE_NODE_NEW;
1060 1060 }
1061 1061 }
1062 1062
1063 1063 (void) mutex_unlock(&cache_lock);
1064 1064 return (rv);
1065 1065 }
1066 1066
1067 1067 /*
1068 1068 * cache_free() - Empty the cache
1069 1069 */
1070 1070 static void
1071 1071 cache_free()
1072 1072 {
1073 1073 link_cache_t *node;
1074 1074
1075 1075 rcm_log_message(RCM_TRACE2, "IBPART: cache_free\n");
1076 1076
1077 1077 (void) mutex_lock(&cache_lock);
1078 1078 node = cache_head.pc_next;
1079 1079 while (node != &cache_tail) {
1080 1080 cache_remove(node);
1081 1081 node_free(node);
1082 1082 node = cache_head.pc_next;
1083 1083 }
1084 1084 (void) mutex_unlock(&cache_lock);
1085 1085 }
1086 1086
1087 1087 /*
1088 1088 * ibpart_log_err() - RCM error log wrapper
1089 1089 */
1090 1090 static void
1091 1091 ibpart_log_err(datalink_id_t linkid, char **errorp, char *errmsg)
1092 1092 {
1093 1093 char link[MAXLINKNAMELEN];
1094 1094 char errstr[DLADM_STRSIZE];
1095 1095 dladm_status_t status;
1096 1096 int len;
1097 1097 const char *errfmt;
1098 1098 char *error;
1099 1099
1100 1100 link[0] = '\0';
1101 1101 if (linkid != DATALINK_INVALID_LINKID) {
1102 1102 char rsrc[RCM_LINK_RESOURCE_MAX];
1103 1103
1104 1104 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u",
1105 1105 RCM_LINK_PREFIX, linkid);
1106 1106
1107 1107 rcm_log_message(RCM_ERROR, _("IBPART: %s(%s)\n"), errmsg, rsrc);
1108 1108 if ((status = dladm_datalink_id2info(dld_handle, linkid, NULL,
1109 1109 NULL, NULL, link, sizeof (link))) != DLADM_STATUS_OK) {
1110 1110 rcm_log_message(RCM_WARNING,
1111 1111 _("IBPART: cannot get link name for (%s) %s\n"),
1112 1112 rsrc, dladm_status2str(status, errstr));
1113 1113 }
1114 1114 } else {
1115 1115 rcm_log_message(RCM_ERROR, _("IBPART: %s\n"), errmsg);
1116 1116 }
1117 1117
1118 1118 errfmt = strlen(link) > 0 ? _("IBPART: %s(%s)") : _("IBPART: %s");
1119 1119 len = strlen(errfmt) + strlen(errmsg) + MAXLINKNAMELEN + 1;
1120 1120 if ((error = malloc(len)) != NULL) {
1121 1121 if (strlen(link) > 0)
1122 1122 (void) snprintf(error, len, errfmt, errmsg, link);
1123 1123 else
1124 1124 (void) snprintf(error, len, errfmt, errmsg);
1125 1125 }
1126 1126
1127 1127 if (errorp != NULL)
1128 1128 *errorp = error;
1129 1129 }
1130 1130
1131 1131 /*
1132 1132 * ibpart_consumer_online()
1133 1133 *
1134 1134 * Notify online to IBPART consumers.
1135 1135 */
1136 1136 /* ARGSUSED */
1137 1137 static void
1138 1138 ibpart_consumer_online(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1139 1139 uint_t flags, rcm_info_t **info)
1140 1140 {
1141 1141 dl_ibpart_t *ibpart;
1142 1142 char rsrc[RCM_LINK_RESOURCE_MAX];
1143 1143
1144 1144 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online (%s)\n",
1145 1145 node->pc_resource);
1146 1146
1147 1147 for (ibpart = node->pc_ibpart; ibpart != NULL;
1148 1148 ibpart = ibpart->dlib_next) {
1149 1149 if (!(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED))
1150 1150 continue;
1151 1151
1152 1152 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1153 1153 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1154 1154
1155 1155 if (rcm_notify_online(hd, rsrc, flags, info) == RCM_SUCCESS)
1156 1156 ibpart->dlib_flags &= ~IBPART_CONSUMER_OFFLINED;
1157 1157 }
1158 1158
1159 1159 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online done\n");
1160 1160 }
1161 1161
1162 1162 /*
1163 1163 * ibpart_consumer_offline()
1164 1164 *
1165 1165 * Offline IBPART consumers.
1166 1166 */
1167 1167 static int
1168 1168 ibpart_consumer_offline(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1169 1169 uint_t flags, rcm_info_t **info)
1170 1170 {
1171 1171 dl_ibpart_t *ibpart;
1172 1172 char rsrc[RCM_LINK_RESOURCE_MAX];
1173 1173 int ret = RCM_SUCCESS;
1174 1174
1175 1175 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline (%s)\n",
1176 1176 node->pc_resource);
1177 1177
1178 1178 for (ibpart = node->pc_ibpart; ibpart != NULL;
1179 1179 ibpart = ibpart->dlib_next) {
1180 1180 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1181 1181 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1182 1182
1183 1183 ret = rcm_request_offline(hd, rsrc, flags, info);
1184 1184 if (ret != RCM_SUCCESS)
1185 1185 break;
1186 1186
1187 1187 ibpart->dlib_flags |= IBPART_CONSUMER_OFFLINED;
1188 1188 }
1189 1189
1190 1190 if (ibpart != NULL)
1191 1191 ibpart_consumer_online(hd, node, errorp, flags, info);
1192 1192
1193 1193 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline done\n");
1194 1194 return (ret);
1195 1195 }
1196 1196
1197 1197 /*
1198 1198 * Send RCM_RESOURCE_LINK_NEW events to other modules about new IBPARTs.
1199 1199 * Return 0 on success, -1 on failure.
1200 1200 */
1201 1201 static int
1202 1202 ibpart_notify_new_ibpart(rcm_handle_t *hd, char *rsrc)
1203 1203 {
1204 1204 link_cache_t *node;
1205 1205 dl_ibpart_t *ibpart;
1206 1206 nvlist_t *nvl = NULL;
1207 1207 uint64_t id;
1208 1208 int ret = -1;
1209 1209
1210 1210 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart (%s)\n",
1211 1211 rsrc);
1212 1212
1213 1213 (void) mutex_lock(&cache_lock);
1214 1214 if ((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) == NULL) {
1215 1215 (void) mutex_unlock(&cache_lock);
1216 1216 return (0);
1217 1217 }
1218 1218
1219 1219 if (nvlist_alloc(&nvl, 0, 0) != 0) {
1220 1220 (void) mutex_unlock(&cache_lock);
1221 1221 rcm_log_message(RCM_WARNING,
1222 1222 _("IBPART: failed to allocate nvlist\n"));
1223 1223 goto done;
1224 1224 }
1225 1225
1226 1226 for (ibpart = node->pc_ibpart; ibpart != NULL;
1227 1227 ibpart = ibpart->dlib_next) {
1228 1228 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart "
1229 1229 "add (%u)\n", ibpart->dlib_ibpart_id);
1230 1230
1231 1231 id = ibpart->dlib_ibpart_id;
1232 1232 if (nvlist_add_uint64(nvl, RCM_NV_LINKID, id) != 0) {
1233 1233 rcm_log_message(RCM_ERROR,
1234 1234 _("IBPART: failed to construct nvlist\n"));
1235 1235 (void) mutex_unlock(&cache_lock);
1236 1236 goto done;
1237 1237 }
1238 1238 }
1239 1239 (void) mutex_unlock(&cache_lock);
1240 1240
↓ open down ↓ |
1240 lines elided |
↑ open up ↑ |
1241 1241 if (rcm_notify_event(hd, RCM_RESOURCE_LINK_NEW, 0, nvl, NULL) !=
1242 1242 RCM_SUCCESS) {
1243 1243 rcm_log_message(RCM_ERROR,
1244 1244 _("IBPART: failed to notify %s event for %s\n"),
1245 1245 RCM_RESOURCE_LINK_NEW, node->pc_resource);
1246 1246 goto done;
1247 1247 }
1248 1248
1249 1249 ret = 0;
1250 1250 done:
1251 - if (nvl != NULL)
1252 - nvlist_free(nvl);
1251 + nvlist_free(nvl);
1253 1252 return (ret);
1254 1253 }
1255 1254
1256 1255 /*
1257 1256 * ibpart_consumer_notify() - Notify consumers of IBPARTs coming back online.
1258 1257 */
1259 1258 static int
1260 1259 ibpart_consumer_notify(rcm_handle_t *hd, datalink_id_t linkid, char **errorp,
1261 1260 uint_t flags, rcm_info_t **info)
1262 1261 {
1263 1262 char rsrc[RCM_LINK_RESOURCE_MAX];
1264 1263 link_cache_t *node;
1265 1264
1266 1265 /* Check for the interface in the cache */
1267 1266 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u", RCM_LINK_PREFIX,
1268 1267 linkid);
1269 1268
1270 1269 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify(%s)\n",
1271 1270 rsrc);
1272 1271
1273 1272 /*
1274 1273 * Inform IP consumers of the new link.
1275 1274 */
1276 1275 if (ibpart_notify_new_ibpart(hd, rsrc) != 0) {
1277 1276 (void) mutex_lock(&cache_lock);
1278 1277 if ((node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH)) != NULL) {
1279 1278 (void) ibpart_offline_ibpart(node, IBPART_STALE,
1280 1279 CACHE_NODE_STALE);
1281 1280 }
1282 1281 (void) mutex_unlock(&cache_lock);
1283 1282 rcm_log_message(RCM_TRACE2,
1284 1283 "IBPART: ibpart_notify_new_ibpart failed(%s)\n", rsrc);
1285 1284 return (-1);
1286 1285 }
1287 1286
1288 1287 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify "
1289 1288 "succeeded\n");
1290 1289 return (0);
1291 1290 }
1292 1291
1293 1292 typedef struct ibpart_up_arg_s {
1294 1293 datalink_id_t linkid;
1295 1294 int retval;
1296 1295 } ibpart_up_arg_t;
1297 1296
1298 1297 static int
1299 1298 ibpart_up(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
1300 1299 {
1301 1300 ibpart_up_arg_t *ibpart_up_argp = arg;
1302 1301 dladm_status_t status;
1303 1302 dladm_ib_attr_t ibpart_attr;
1304 1303 char errmsg[DLADM_STRSIZE];
1305 1304
1306 1305 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
1307 1306 DLADM_OPT_PERSIST);
1308 1307 if (status != DLADM_STATUS_OK) {
1309 1308 rcm_log_message(RCM_TRACE1,
1310 1309 "IBPART: ibpart_up(): cannot get information for IBPART %u "
1311 1310 "(%s)\n", ibpartid, dladm_status2str(status, errmsg));
1312 1311 return (DLADM_WALK_CONTINUE);
1313 1312 }
1314 1313
1315 1314 if (ibpart_attr.dia_physlinkid != ibpart_up_argp->linkid)
1316 1315 return (DLADM_WALK_CONTINUE);
1317 1316
1318 1317 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_up(%u)\n", ibpartid);
1319 1318 if ((status = dladm_part_up(handle, ibpartid, 0)) == DLADM_STATUS_OK)
1320 1319 return (DLADM_WALK_CONTINUE);
1321 1320
1322 1321 /*
1323 1322 * Prompt the warning message and continue to UP other IBPARTs.
1324 1323 */
1325 1324 rcm_log_message(RCM_WARNING,
1326 1325 _("IBPART: IBPART up failed (%u): %s\n"),
1327 1326 ibpartid, dladm_status2str(status, errmsg));
1328 1327
1329 1328 ibpart_up_argp->retval = -1;
1330 1329 return (DLADM_WALK_CONTINUE);
1331 1330 }
1332 1331
1333 1332 /*
1334 1333 * ibpart_configure() - Configure IBPARTs over a physical link after it attaches
1335 1334 */
1336 1335 static int
1337 1336 ibpart_configure(rcm_handle_t *hd, datalink_id_t linkid)
1338 1337 {
1339 1338 char rsrc[RCM_LINK_RESOURCE_MAX];
1340 1339 link_cache_t *node;
1341 1340 ibpart_up_arg_t arg = {DATALINK_INVALID_LINKID, 0};
1342 1341
1343 1342 /* Check for the IBPARTs in the cache */
1344 1343 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u", RCM_LINK_PREFIX, linkid);
1345 1344
1346 1345 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_configure(%s)\n", rsrc);
1347 1346
1348 1347 /* Check if the link is new or was previously offlined */
1349 1348 (void) mutex_lock(&cache_lock);
1350 1349 if (((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) != NULL) &&
1351 1350 (!(node->pc_state & CACHE_NODE_OFFLINED))) {
1352 1351 rcm_log_message(RCM_TRACE2,
1353 1352 "IBPART: Skipping configured interface(%s)\n", rsrc);
1354 1353 (void) mutex_unlock(&cache_lock);
1355 1354 return (0);
1356 1355 }
1357 1356 (void) mutex_unlock(&cache_lock);
1358 1357
1359 1358 arg.linkid = linkid;
1360 1359 (void) dladm_walk_datalink_id(ibpart_up, dld_handle, &arg,
1361 1360 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_PERSIST);
1362 1361
1363 1362 if (arg.retval == 0) {
1364 1363 rcm_log_message(RCM_TRACE2,
1365 1364 "IBPART: ibpart_configure succeeded(%s)\n", rsrc);
1366 1365 }
1367 1366 return (arg.retval);
1368 1367 }
↓ open down ↓ |
106 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX