Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/spa.c
+++ new/usr/src/uts/common/fs/zfs/spa.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 25 * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * SPA: Storage Pool Allocator
30 30 *
31 31 * This file contains all the routines used when modifying on-disk SPA state.
32 32 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 33 * pool.
34 34 */
35 35
36 36 #include <sys/zfs_context.h>
37 37 #include <sys/fm/fs/zfs.h>
38 38 #include <sys/spa_impl.h>
39 39 #include <sys/zio.h>
40 40 #include <sys/zio_checksum.h>
41 41 #include <sys/dmu.h>
42 42 #include <sys/dmu_tx.h>
43 43 #include <sys/zap.h>
44 44 #include <sys/zil.h>
45 45 #include <sys/ddt.h>
46 46 #include <sys/vdev_impl.h>
47 47 #include <sys/metaslab.h>
48 48 #include <sys/metaslab_impl.h>
49 49 #include <sys/uberblock_impl.h>
50 50 #include <sys/txg.h>
51 51 #include <sys/avl.h>
52 52 #include <sys/dmu_traverse.h>
53 53 #include <sys/dmu_objset.h>
54 54 #include <sys/unique.h>
55 55 #include <sys/dsl_pool.h>
56 56 #include <sys/dsl_dataset.h>
57 57 #include <sys/dsl_dir.h>
58 58 #include <sys/dsl_prop.h>
59 59 #include <sys/dsl_synctask.h>
60 60 #include <sys/fs/zfs.h>
61 61 #include <sys/arc.h>
62 62 #include <sys/callb.h>
63 63 #include <sys/systeminfo.h>
64 64 #include <sys/spa_boot.h>
65 65 #include <sys/zfs_ioctl.h>
66 66 #include <sys/dsl_scan.h>
67 67 #include <sys/zfeature.h>
68 68 #include <sys/dsl_destroy.h>
69 69
70 70 #ifdef _KERNEL
71 71 #include <sys/bootprops.h>
72 72 #include <sys/callb.h>
73 73 #include <sys/cpupart.h>
74 74 #include <sys/pool.h>
75 75 #include <sys/sysdc.h>
76 76 #include <sys/zone.h>
77 77 #endif /* _KERNEL */
78 78
79 79 #include "zfs_prop.h"
80 80 #include "zfs_comutil.h"
81 81
82 82 /*
83 83 * The interval, in seconds, at which failed configuration cache file writes
84 84 * should be retried.
85 85 */
86 86 static int zfs_ccw_retry_interval = 300;
87 87
88 88 typedef enum zti_modes {
89 89 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
90 90 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
91 91 ZTI_MODE_NULL, /* don't create a taskq */
92 92 ZTI_NMODES
93 93 } zti_modes_t;
94 94
95 95 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
96 96 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
97 97 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
98 98
99 99 #define ZTI_N(n) ZTI_P(n, 1)
100 100 #define ZTI_ONE ZTI_N(1)
101 101
102 102 typedef struct zio_taskq_info {
103 103 zti_modes_t zti_mode;
104 104 uint_t zti_value;
105 105 uint_t zti_count;
106 106 } zio_taskq_info_t;
107 107
108 108 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
109 109 "issue", "issue_high", "intr", "intr_high"
110 110 };
111 111
112 112 /*
113 113 * This table defines the taskq settings for each ZFS I/O type. When
114 114 * initializing a pool, we use this table to create an appropriately sized
115 115 * taskq. Some operations are low volume and therefore have a small, static
116 116 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
117 117 * macros. Other operations process a large amount of data; the ZTI_BATCH
118 118 * macro causes us to create a taskq oriented for throughput. Some operations
119 119 * are so high frequency and short-lived that the taskq itself can become a a
120 120 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
121 121 * additional degree of parallelism specified by the number of threads per-
122 122 * taskq and the number of taskqs; when dispatching an event in this case, the
123 123 * particular taskq is chosen at random.
124 124 *
125 125 * The different taskq priorities are to handle the different contexts (issue
126 126 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
127 127 * need to be handled with minimum delay.
128 128 */
129 129 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
130 130 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
131 131 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
132 132 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
133 133 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */
134 134 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
135 135 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
136 136 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
137 137 };
138 138
139 139 static void spa_sync_version(void *arg, dmu_tx_t *tx);
140 140 static void spa_sync_props(void *arg, dmu_tx_t *tx);
141 141 static boolean_t spa_has_active_shared_spare(spa_t *spa);
142 142 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
143 143 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
144 144 char **ereport);
145 145 static void spa_vdev_resilver_done(spa_t *spa);
146 146
147 147 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
148 148 id_t zio_taskq_psrset_bind = PS_NONE;
149 149 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
150 150 uint_t zio_taskq_basedc = 80; /* base duty cycle */
151 151
152 152 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
153 153 extern int zfs_sync_pass_deferred_free;
154 154
155 155 /*
156 156 * This (illegal) pool name is used when temporarily importing a spa_t in order
157 157 * to get the vdev stats associated with the imported devices.
158 158 */
159 159 #define TRYIMPORT_NAME "$import"
160 160
161 161 /*
162 162 * ==========================================================================
163 163 * SPA properties routines
164 164 * ==========================================================================
165 165 */
166 166
167 167 /*
168 168 * Add a (source=src, propname=propval) list to an nvlist.
169 169 */
170 170 static void
171 171 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
172 172 uint64_t intval, zprop_source_t src)
173 173 {
174 174 const char *propname = zpool_prop_to_name(prop);
175 175 nvlist_t *propval;
176 176
177 177 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
178 178 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
179 179
180 180 if (strval != NULL)
181 181 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
182 182 else
183 183 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
184 184
185 185 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
186 186 nvlist_free(propval);
187 187 }
188 188
189 189 /*
190 190 * Get property values from the spa configuration.
191 191 */
192 192 static void
193 193 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
194 194 {
195 195 vdev_t *rvd = spa->spa_root_vdev;
196 196 dsl_pool_t *pool = spa->spa_dsl_pool;
197 197 uint64_t size;
198 198 uint64_t alloc;
199 199 uint64_t space;
200 200 uint64_t cap, version;
201 201 zprop_source_t src = ZPROP_SRC_NONE;
202 202 spa_config_dirent_t *dp;
203 203
204 204 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
205 205
206 206 if (rvd != NULL) {
207 207 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
208 208 size = metaslab_class_get_space(spa_normal_class(spa));
209 209 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
210 210 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
211 211 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
212 212 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
213 213 size - alloc, src);
214 214
215 215 space = 0;
216 216 for (int c = 0; c < rvd->vdev_children; c++) {
217 217 vdev_t *tvd = rvd->vdev_child[c];
218 218 space += tvd->vdev_max_asize - tvd->vdev_asize;
219 219 }
220 220 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space,
221 221 src);
222 222
223 223 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
224 224 (spa_mode(spa) == FREAD), src);
225 225
226 226 cap = (size == 0) ? 0 : (alloc * 100 / size);
227 227 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
228 228
229 229 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
230 230 ddt_get_pool_dedup_ratio(spa), src);
231 231
232 232 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
233 233 rvd->vdev_state, src);
234 234
235 235 version = spa_version(spa);
236 236 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
237 237 src = ZPROP_SRC_DEFAULT;
238 238 else
239 239 src = ZPROP_SRC_LOCAL;
240 240 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
241 241 }
242 242
243 243 if (pool != NULL) {
244 244 /*
245 245 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
246 246 * when opening pools before this version freedir will be NULL.
247 247 */
248 248 if (pool->dp_free_dir != NULL) {
249 249 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
250 250 pool->dp_free_dir->dd_phys->dd_used_bytes, src);
251 251 } else {
252 252 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
253 253 NULL, 0, src);
254 254 }
255 255
256 256 if (pool->dp_leak_dir != NULL) {
257 257 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
258 258 pool->dp_leak_dir->dd_phys->dd_used_bytes, src);
259 259 } else {
260 260 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
261 261 NULL, 0, src);
262 262 }
263 263 }
264 264
265 265 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
266 266
267 267 if (spa->spa_comment != NULL) {
268 268 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
269 269 0, ZPROP_SRC_LOCAL);
270 270 }
271 271
272 272 if (spa->spa_root != NULL)
273 273 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
274 274 0, ZPROP_SRC_LOCAL);
275 275
276 276 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
277 277 if (dp->scd_path == NULL) {
278 278 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
279 279 "none", 0, ZPROP_SRC_LOCAL);
280 280 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
281 281 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
282 282 dp->scd_path, 0, ZPROP_SRC_LOCAL);
283 283 }
284 284 }
285 285 }
286 286
287 287 /*
288 288 * Get zpool property values.
289 289 */
290 290 int
291 291 spa_prop_get(spa_t *spa, nvlist_t **nvp)
292 292 {
293 293 objset_t *mos = spa->spa_meta_objset;
294 294 zap_cursor_t zc;
295 295 zap_attribute_t za;
296 296 int err;
297 297
298 298 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
299 299
300 300 mutex_enter(&spa->spa_props_lock);
301 301
302 302 /*
303 303 * Get properties from the spa config.
304 304 */
305 305 spa_prop_get_config(spa, nvp);
306 306
307 307 /* If no pool property object, no more prop to get. */
308 308 if (mos == NULL || spa->spa_pool_props_object == 0) {
309 309 mutex_exit(&spa->spa_props_lock);
310 310 return (0);
311 311 }
312 312
313 313 /*
314 314 * Get properties from the MOS pool property object.
315 315 */
316 316 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
317 317 (err = zap_cursor_retrieve(&zc, &za)) == 0;
318 318 zap_cursor_advance(&zc)) {
319 319 uint64_t intval = 0;
320 320 char *strval = NULL;
321 321 zprop_source_t src = ZPROP_SRC_DEFAULT;
322 322 zpool_prop_t prop;
323 323
324 324 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
325 325 continue;
326 326
327 327 switch (za.za_integer_length) {
328 328 case 8:
329 329 /* integer property */
330 330 if (za.za_first_integer !=
331 331 zpool_prop_default_numeric(prop))
332 332 src = ZPROP_SRC_LOCAL;
333 333
334 334 if (prop == ZPOOL_PROP_BOOTFS) {
335 335 dsl_pool_t *dp;
336 336 dsl_dataset_t *ds = NULL;
337 337
338 338 dp = spa_get_dsl(spa);
339 339 dsl_pool_config_enter(dp, FTAG);
340 340 if (err = dsl_dataset_hold_obj(dp,
341 341 za.za_first_integer, FTAG, &ds)) {
342 342 dsl_pool_config_exit(dp, FTAG);
343 343 break;
344 344 }
345 345
346 346 strval = kmem_alloc(
347 347 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
348 348 KM_SLEEP);
349 349 dsl_dataset_name(ds, strval);
350 350 dsl_dataset_rele(ds, FTAG);
351 351 dsl_pool_config_exit(dp, FTAG);
352 352 } else {
353 353 strval = NULL;
354 354 intval = za.za_first_integer;
355 355 }
356 356
357 357 spa_prop_add_list(*nvp, prop, strval, intval, src);
358 358
359 359 if (strval != NULL)
360 360 kmem_free(strval,
361 361 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
362 362
363 363 break;
364 364
365 365 case 1:
366 366 /* string property */
367 367 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
368 368 err = zap_lookup(mos, spa->spa_pool_props_object,
369 369 za.za_name, 1, za.za_num_integers, strval);
370 370 if (err) {
371 371 kmem_free(strval, za.za_num_integers);
372 372 break;
373 373 }
374 374 spa_prop_add_list(*nvp, prop, strval, 0, src);
375 375 kmem_free(strval, za.za_num_integers);
376 376 break;
377 377
378 378 default:
379 379 break;
380 380 }
381 381 }
382 382 zap_cursor_fini(&zc);
383 383 mutex_exit(&spa->spa_props_lock);
384 384 out:
385 385 if (err && err != ENOENT) {
386 386 nvlist_free(*nvp);
387 387 *nvp = NULL;
388 388 return (err);
389 389 }
390 390
391 391 return (0);
392 392 }
393 393
394 394 /*
395 395 * Validate the given pool properties nvlist and modify the list
396 396 * for the property values to be set.
397 397 */
398 398 static int
399 399 spa_prop_validate(spa_t *spa, nvlist_t *props)
400 400 {
401 401 nvpair_t *elem;
402 402 int error = 0, reset_bootfs = 0;
403 403 uint64_t objnum = 0;
404 404 boolean_t has_feature = B_FALSE;
405 405
406 406 elem = NULL;
407 407 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
408 408 uint64_t intval;
409 409 char *strval, *slash, *check, *fname;
410 410 const char *propname = nvpair_name(elem);
411 411 zpool_prop_t prop = zpool_name_to_prop(propname);
412 412
413 413 switch (prop) {
414 414 case ZPROP_INVAL:
415 415 if (!zpool_prop_feature(propname)) {
416 416 error = SET_ERROR(EINVAL);
417 417 break;
418 418 }
419 419
420 420 /*
421 421 * Sanitize the input.
422 422 */
423 423 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
424 424 error = SET_ERROR(EINVAL);
425 425 break;
426 426 }
427 427
428 428 if (nvpair_value_uint64(elem, &intval) != 0) {
429 429 error = SET_ERROR(EINVAL);
430 430 break;
431 431 }
432 432
433 433 if (intval != 0) {
434 434 error = SET_ERROR(EINVAL);
435 435 break;
436 436 }
437 437
438 438 fname = strchr(propname, '@') + 1;
439 439 if (zfeature_lookup_name(fname, NULL) != 0) {
440 440 error = SET_ERROR(EINVAL);
441 441 break;
442 442 }
443 443
444 444 has_feature = B_TRUE;
445 445 break;
446 446
447 447 case ZPOOL_PROP_VERSION:
448 448 error = nvpair_value_uint64(elem, &intval);
449 449 if (!error &&
450 450 (intval < spa_version(spa) ||
451 451 intval > SPA_VERSION_BEFORE_FEATURES ||
452 452 has_feature))
453 453 error = SET_ERROR(EINVAL);
454 454 break;
455 455
456 456 case ZPOOL_PROP_DELEGATION:
457 457 case ZPOOL_PROP_AUTOREPLACE:
458 458 case ZPOOL_PROP_LISTSNAPS:
459 459 case ZPOOL_PROP_AUTOEXPAND:
460 460 error = nvpair_value_uint64(elem, &intval);
461 461 if (!error && intval > 1)
462 462 error = SET_ERROR(EINVAL);
463 463 break;
464 464
465 465 case ZPOOL_PROP_BOOTFS:
466 466 /*
467 467 * If the pool version is less than SPA_VERSION_BOOTFS,
468 468 * or the pool is still being created (version == 0),
469 469 * the bootfs property cannot be set.
470 470 */
471 471 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
472 472 error = SET_ERROR(ENOTSUP);
473 473 break;
474 474 }
475 475
476 476 /*
477 477 * Make sure the vdev config is bootable
478 478 */
479 479 if (!vdev_is_bootable(spa->spa_root_vdev)) {
480 480 error = SET_ERROR(ENOTSUP);
481 481 break;
482 482 }
483 483
484 484 reset_bootfs = 1;
485 485
486 486 error = nvpair_value_string(elem, &strval);
487 487
488 488 if (!error) {
489 489 objset_t *os;
490 490 uint64_t compress;
491 491
492 492 if (strval == NULL || strval[0] == '\0') {
493 493 objnum = zpool_prop_default_numeric(
494 494 ZPOOL_PROP_BOOTFS);
495 495 break;
496 496 }
497 497
498 498 if (error = dmu_objset_hold(strval, FTAG, &os))
499 499 break;
500 500
501 501 /* Must be ZPL and not gzip compressed. */
502 502
503 503 if (dmu_objset_type(os) != DMU_OST_ZFS) {
504 504 error = SET_ERROR(ENOTSUP);
505 505 } else if ((error =
506 506 dsl_prop_get_int_ds(dmu_objset_ds(os),
507 507 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
508 508 &compress)) == 0 &&
509 509 !BOOTFS_COMPRESS_VALID(compress)) {
510 510 error = SET_ERROR(ENOTSUP);
511 511 } else {
512 512 objnum = dmu_objset_id(os);
513 513 }
514 514 dmu_objset_rele(os, FTAG);
515 515 }
516 516 break;
517 517
518 518 case ZPOOL_PROP_FAILUREMODE:
519 519 error = nvpair_value_uint64(elem, &intval);
520 520 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
521 521 intval > ZIO_FAILURE_MODE_PANIC))
522 522 error = SET_ERROR(EINVAL);
523 523
524 524 /*
525 525 * This is a special case which only occurs when
526 526 * the pool has completely failed. This allows
527 527 * the user to change the in-core failmode property
528 528 * without syncing it out to disk (I/Os might
529 529 * currently be blocked). We do this by returning
530 530 * EIO to the caller (spa_prop_set) to trick it
531 531 * into thinking we encountered a property validation
532 532 * error.
533 533 */
534 534 if (!error && spa_suspended(spa)) {
535 535 spa->spa_failmode = intval;
536 536 error = SET_ERROR(EIO);
537 537 }
538 538 break;
539 539
540 540 case ZPOOL_PROP_CACHEFILE:
541 541 if ((error = nvpair_value_string(elem, &strval)) != 0)
542 542 break;
543 543
544 544 if (strval[0] == '\0')
545 545 break;
546 546
547 547 if (strcmp(strval, "none") == 0)
548 548 break;
549 549
550 550 if (strval[0] != '/') {
551 551 error = SET_ERROR(EINVAL);
552 552 break;
553 553 }
554 554
555 555 slash = strrchr(strval, '/');
556 556 ASSERT(slash != NULL);
557 557
558 558 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
559 559 strcmp(slash, "/..") == 0)
560 560 error = SET_ERROR(EINVAL);
561 561 break;
562 562
563 563 case ZPOOL_PROP_COMMENT:
564 564 if ((error = nvpair_value_string(elem, &strval)) != 0)
565 565 break;
566 566 for (check = strval; *check != '\0'; check++) {
567 567 /*
568 568 * The kernel doesn't have an easy isprint()
569 569 * check. For this kernel check, we merely
570 570 * check ASCII apart from DEL. Fix this if
571 571 * there is an easy-to-use kernel isprint().
572 572 */
573 573 if (*check >= 0x7f) {
574 574 error = SET_ERROR(EINVAL);
575 575 break;
576 576 }
577 577 check++;
578 578 }
579 579 if (strlen(strval) > ZPROP_MAX_COMMENT)
580 580 error = E2BIG;
581 581 break;
582 582
583 583 case ZPOOL_PROP_DEDUPDITTO:
584 584 if (spa_version(spa) < SPA_VERSION_DEDUP)
585 585 error = SET_ERROR(ENOTSUP);
586 586 else
587 587 error = nvpair_value_uint64(elem, &intval);
588 588 if (error == 0 &&
589 589 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
590 590 error = SET_ERROR(EINVAL);
591 591 break;
592 592 }
593 593
594 594 if (error)
595 595 break;
596 596 }
597 597
598 598 if (!error && reset_bootfs) {
599 599 error = nvlist_remove(props,
600 600 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
601 601
602 602 if (!error) {
603 603 error = nvlist_add_uint64(props,
604 604 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
605 605 }
606 606 }
607 607
608 608 return (error);
609 609 }
610 610
611 611 void
612 612 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
613 613 {
614 614 char *cachefile;
615 615 spa_config_dirent_t *dp;
616 616
617 617 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
618 618 &cachefile) != 0)
619 619 return;
620 620
621 621 dp = kmem_alloc(sizeof (spa_config_dirent_t),
622 622 KM_SLEEP);
623 623
624 624 if (cachefile[0] == '\0')
625 625 dp->scd_path = spa_strdup(spa_config_path);
626 626 else if (strcmp(cachefile, "none") == 0)
627 627 dp->scd_path = NULL;
628 628 else
629 629 dp->scd_path = spa_strdup(cachefile);
630 630
631 631 list_insert_head(&spa->spa_config_list, dp);
632 632 if (need_sync)
633 633 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
634 634 }
635 635
636 636 int
637 637 spa_prop_set(spa_t *spa, nvlist_t *nvp)
638 638 {
639 639 int error;
640 640 nvpair_t *elem = NULL;
641 641 boolean_t need_sync = B_FALSE;
642 642
643 643 if ((error = spa_prop_validate(spa, nvp)) != 0)
644 644 return (error);
645 645
646 646 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
647 647 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
648 648
649 649 if (prop == ZPOOL_PROP_CACHEFILE ||
650 650 prop == ZPOOL_PROP_ALTROOT ||
651 651 prop == ZPOOL_PROP_READONLY)
652 652 continue;
653 653
654 654 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
655 655 uint64_t ver;
656 656
657 657 if (prop == ZPOOL_PROP_VERSION) {
658 658 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
659 659 } else {
660 660 ASSERT(zpool_prop_feature(nvpair_name(elem)));
661 661 ver = SPA_VERSION_FEATURES;
662 662 need_sync = B_TRUE;
663 663 }
664 664
665 665 /* Save time if the version is already set. */
666 666 if (ver == spa_version(spa))
667 667 continue;
668 668
669 669 /*
670 670 * In addition to the pool directory object, we might
671 671 * create the pool properties object, the features for
672 672 * read object, the features for write object, or the
673 673 * feature descriptions object.
674 674 */
675 675 error = dsl_sync_task(spa->spa_name, NULL,
676 676 spa_sync_version, &ver,
677 677 6, ZFS_SPACE_CHECK_RESERVED);
678 678 if (error)
679 679 return (error);
680 680 continue;
681 681 }
682 682
683 683 need_sync = B_TRUE;
684 684 break;
685 685 }
686 686
687 687 if (need_sync) {
688 688 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
689 689 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
690 690 }
691 691
692 692 return (0);
693 693 }
694 694
695 695 /*
696 696 * If the bootfs property value is dsobj, clear it.
697 697 */
698 698 void
699 699 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
700 700 {
701 701 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
702 702 VERIFY(zap_remove(spa->spa_meta_objset,
703 703 spa->spa_pool_props_object,
704 704 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
705 705 spa->spa_bootfs = 0;
706 706 }
707 707 }
708 708
709 709 /*ARGSUSED*/
710 710 static int
711 711 spa_change_guid_check(void *arg, dmu_tx_t *tx)
712 712 {
713 713 uint64_t *newguid = arg;
714 714 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
715 715 vdev_t *rvd = spa->spa_root_vdev;
716 716 uint64_t vdev_state;
717 717
718 718 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
719 719 vdev_state = rvd->vdev_state;
720 720 spa_config_exit(spa, SCL_STATE, FTAG);
721 721
722 722 if (vdev_state != VDEV_STATE_HEALTHY)
723 723 return (SET_ERROR(ENXIO));
724 724
725 725 ASSERT3U(spa_guid(spa), !=, *newguid);
726 726
727 727 return (0);
728 728 }
729 729
730 730 static void
731 731 spa_change_guid_sync(void *arg, dmu_tx_t *tx)
732 732 {
733 733 uint64_t *newguid = arg;
734 734 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
735 735 uint64_t oldguid;
736 736 vdev_t *rvd = spa->spa_root_vdev;
737 737
738 738 oldguid = spa_guid(spa);
739 739
740 740 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
741 741 rvd->vdev_guid = *newguid;
742 742 rvd->vdev_guid_sum += (*newguid - oldguid);
743 743 vdev_config_dirty(rvd);
744 744 spa_config_exit(spa, SCL_STATE, FTAG);
745 745
746 746 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
747 747 oldguid, *newguid);
748 748 }
749 749
750 750 /*
751 751 * Change the GUID for the pool. This is done so that we can later
752 752 * re-import a pool built from a clone of our own vdevs. We will modify
753 753 * the root vdev's guid, our own pool guid, and then mark all of our
754 754 * vdevs dirty. Note that we must make sure that all our vdevs are
755 755 * online when we do this, or else any vdevs that weren't present
756 756 * would be orphaned from our pool. We are also going to issue a
757 757 * sysevent to update any watchers.
758 758 */
759 759 int
760 760 spa_change_guid(spa_t *spa)
761 761 {
762 762 int error;
763 763 uint64_t guid;
764 764
765 765 mutex_enter(&spa->spa_vdev_top_lock);
766 766 mutex_enter(&spa_namespace_lock);
767 767 guid = spa_generate_guid(NULL);
768 768
769 769 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
770 770 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
771 771
772 772 if (error == 0) {
773 773 spa_config_sync(spa, B_FALSE, B_TRUE);
774 774 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
775 775 }
776 776
777 777 mutex_exit(&spa_namespace_lock);
778 778 mutex_exit(&spa->spa_vdev_top_lock);
779 779
780 780 return (error);
781 781 }
782 782
783 783 /*
784 784 * ==========================================================================
785 785 * SPA state manipulation (open/create/destroy/import/export)
786 786 * ==========================================================================
787 787 */
788 788
789 789 static int
790 790 spa_error_entry_compare(const void *a, const void *b)
791 791 {
792 792 spa_error_entry_t *sa = (spa_error_entry_t *)a;
793 793 spa_error_entry_t *sb = (spa_error_entry_t *)b;
794 794 int ret;
795 795
796 796 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
797 797 sizeof (zbookmark_phys_t));
798 798
799 799 if (ret < 0)
800 800 return (-1);
801 801 else if (ret > 0)
802 802 return (1);
803 803 else
804 804 return (0);
805 805 }
806 806
807 807 /*
808 808 * Utility function which retrieves copies of the current logs and
809 809 * re-initializes them in the process.
810 810 */
811 811 void
812 812 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
813 813 {
814 814 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
815 815
816 816 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
817 817 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
818 818
819 819 avl_create(&spa->spa_errlist_scrub,
820 820 spa_error_entry_compare, sizeof (spa_error_entry_t),
821 821 offsetof(spa_error_entry_t, se_avl));
822 822 avl_create(&spa->spa_errlist_last,
823 823 spa_error_entry_compare, sizeof (spa_error_entry_t),
824 824 offsetof(spa_error_entry_t, se_avl));
825 825 }
826 826
827 827 static void
828 828 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
829 829 {
830 830 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
831 831 enum zti_modes mode = ztip->zti_mode;
832 832 uint_t value = ztip->zti_value;
833 833 uint_t count = ztip->zti_count;
834 834 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
835 835 char name[32];
836 836 uint_t flags = 0;
837 837 boolean_t batch = B_FALSE;
838 838
839 839 if (mode == ZTI_MODE_NULL) {
840 840 tqs->stqs_count = 0;
841 841 tqs->stqs_taskq = NULL;
842 842 return;
843 843 }
844 844
845 845 ASSERT3U(count, >, 0);
846 846
847 847 tqs->stqs_count = count;
848 848 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
849 849
850 850 switch (mode) {
851 851 case ZTI_MODE_FIXED:
852 852 ASSERT3U(value, >=, 1);
853 853 value = MAX(value, 1);
854 854 break;
855 855
856 856 case ZTI_MODE_BATCH:
857 857 batch = B_TRUE;
858 858 flags |= TASKQ_THREADS_CPU_PCT;
859 859 value = zio_taskq_batch_pct;
860 860 break;
861 861
862 862 default:
863 863 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
864 864 "spa_activate()",
865 865 zio_type_name[t], zio_taskq_types[q], mode, value);
866 866 break;
867 867 }
868 868
869 869 for (uint_t i = 0; i < count; i++) {
870 870 taskq_t *tq;
871 871
872 872 if (count > 1) {
873 873 (void) snprintf(name, sizeof (name), "%s_%s_%u",
874 874 zio_type_name[t], zio_taskq_types[q], i);
875 875 } else {
876 876 (void) snprintf(name, sizeof (name), "%s_%s",
877 877 zio_type_name[t], zio_taskq_types[q]);
878 878 }
879 879
880 880 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
881 881 if (batch)
882 882 flags |= TASKQ_DC_BATCH;
883 883
884 884 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
885 885 spa->spa_proc, zio_taskq_basedc, flags);
886 886 } else {
887 887 pri_t pri = maxclsyspri;
888 888 /*
889 889 * The write issue taskq can be extremely CPU
890 890 * intensive. Run it at slightly lower priority
891 891 * than the other taskqs.
892 892 */
893 893 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
894 894 pri--;
895 895
896 896 tq = taskq_create_proc(name, value, pri, 50,
897 897 INT_MAX, spa->spa_proc, flags);
898 898 }
899 899
900 900 tqs->stqs_taskq[i] = tq;
901 901 }
902 902 }
903 903
904 904 static void
905 905 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
906 906 {
907 907 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
908 908
909 909 if (tqs->stqs_taskq == NULL) {
910 910 ASSERT0(tqs->stqs_count);
911 911 return;
912 912 }
913 913
914 914 for (uint_t i = 0; i < tqs->stqs_count; i++) {
915 915 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
916 916 taskq_destroy(tqs->stqs_taskq[i]);
917 917 }
918 918
919 919 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
920 920 tqs->stqs_taskq = NULL;
921 921 }
922 922
923 923 /*
924 924 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
925 925 * Note that a type may have multiple discrete taskqs to avoid lock contention
926 926 * on the taskq itself. In that case we choose which taskq at random by using
927 927 * the low bits of gethrtime().
928 928 */
929 929 void
930 930 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
931 931 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
932 932 {
933 933 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
934 934 taskq_t *tq;
935 935
936 936 ASSERT3P(tqs->stqs_taskq, !=, NULL);
937 937 ASSERT3U(tqs->stqs_count, !=, 0);
938 938
939 939 if (tqs->stqs_count == 1) {
940 940 tq = tqs->stqs_taskq[0];
941 941 } else {
942 942 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
943 943 }
944 944
945 945 taskq_dispatch_ent(tq, func, arg, flags, ent);
946 946 }
947 947
948 948 static void
949 949 spa_create_zio_taskqs(spa_t *spa)
950 950 {
951 951 for (int t = 0; t < ZIO_TYPES; t++) {
952 952 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
953 953 spa_taskqs_init(spa, t, q);
954 954 }
955 955 }
956 956 }
957 957
958 958 #ifdef _KERNEL
959 959 static void
960 960 spa_thread(void *arg)
961 961 {
962 962 callb_cpr_t cprinfo;
963 963
964 964 spa_t *spa = arg;
965 965 user_t *pu = PTOU(curproc);
966 966
967 967 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
968 968 spa->spa_name);
969 969
970 970 ASSERT(curproc != &p0);
971 971 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
972 972 "zpool-%s", spa->spa_name);
973 973 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
974 974
975 975 /* bind this thread to the requested psrset */
976 976 if (zio_taskq_psrset_bind != PS_NONE) {
977 977 pool_lock();
978 978 mutex_enter(&cpu_lock);
979 979 mutex_enter(&pidlock);
980 980 mutex_enter(&curproc->p_lock);
981 981
982 982 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
983 983 0, NULL, NULL) == 0) {
984 984 curthread->t_bind_pset = zio_taskq_psrset_bind;
985 985 } else {
986 986 cmn_err(CE_WARN,
987 987 "Couldn't bind process for zfs pool \"%s\" to "
988 988 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
989 989 }
990 990
991 991 mutex_exit(&curproc->p_lock);
992 992 mutex_exit(&pidlock);
993 993 mutex_exit(&cpu_lock);
994 994 pool_unlock();
995 995 }
996 996
997 997 if (zio_taskq_sysdc) {
998 998 sysdc_thread_enter(curthread, 100, 0);
999 999 }
1000 1000
1001 1001 spa->spa_proc = curproc;
1002 1002 spa->spa_did = curthread->t_did;
1003 1003
1004 1004 spa_create_zio_taskqs(spa);
1005 1005
1006 1006 mutex_enter(&spa->spa_proc_lock);
1007 1007 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1008 1008
1009 1009 spa->spa_proc_state = SPA_PROC_ACTIVE;
1010 1010 cv_broadcast(&spa->spa_proc_cv);
1011 1011
1012 1012 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1013 1013 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1014 1014 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1015 1015 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1016 1016
1017 1017 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1018 1018 spa->spa_proc_state = SPA_PROC_GONE;
1019 1019 spa->spa_proc = &p0;
1020 1020 cv_broadcast(&spa->spa_proc_cv);
1021 1021 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1022 1022
1023 1023 mutex_enter(&curproc->p_lock);
1024 1024 lwp_exit();
1025 1025 }
1026 1026 #endif
1027 1027
1028 1028 /*
1029 1029 * Activate an uninitialized pool.
1030 1030 */
1031 1031 static void
1032 1032 spa_activate(spa_t *spa, int mode)
1033 1033 {
1034 1034 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1035 1035
1036 1036 spa->spa_state = POOL_STATE_ACTIVE;
1037 1037 spa->spa_mode = mode;
1038 1038
1039 1039 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1040 1040 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1041 1041
1042 1042 /* Try to create a covering process */
1043 1043 mutex_enter(&spa->spa_proc_lock);
1044 1044 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1045 1045 ASSERT(spa->spa_proc == &p0);
1046 1046 spa->spa_did = 0;
1047 1047
1048 1048 /* Only create a process if we're going to be around a while. */
1049 1049 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1050 1050 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1051 1051 NULL, 0) == 0) {
1052 1052 spa->spa_proc_state = SPA_PROC_CREATED;
1053 1053 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1054 1054 cv_wait(&spa->spa_proc_cv,
1055 1055 &spa->spa_proc_lock);
1056 1056 }
1057 1057 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1058 1058 ASSERT(spa->spa_proc != &p0);
1059 1059 ASSERT(spa->spa_did != 0);
1060 1060 } else {
1061 1061 #ifdef _KERNEL
1062 1062 cmn_err(CE_WARN,
1063 1063 "Couldn't create process for zfs pool \"%s\"\n",
1064 1064 spa->spa_name);
1065 1065 #endif
1066 1066 }
1067 1067 }
1068 1068 mutex_exit(&spa->spa_proc_lock);
1069 1069
1070 1070 /* If we didn't create a process, we need to create our taskqs. */
1071 1071 if (spa->spa_proc == &p0) {
1072 1072 spa_create_zio_taskqs(spa);
1073 1073 }
1074 1074
1075 1075 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1076 1076 offsetof(vdev_t, vdev_config_dirty_node));
1077 1077 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1078 1078 offsetof(vdev_t, vdev_state_dirty_node));
1079 1079
1080 1080 txg_list_create(&spa->spa_vdev_txg_list,
1081 1081 offsetof(struct vdev, vdev_txg_node));
1082 1082
1083 1083 avl_create(&spa->spa_errlist_scrub,
1084 1084 spa_error_entry_compare, sizeof (spa_error_entry_t),
1085 1085 offsetof(spa_error_entry_t, se_avl));
1086 1086 avl_create(&spa->spa_errlist_last,
1087 1087 spa_error_entry_compare, sizeof (spa_error_entry_t),
1088 1088 offsetof(spa_error_entry_t, se_avl));
1089 1089 }
1090 1090
1091 1091 /*
1092 1092 * Opposite of spa_activate().
1093 1093 */
1094 1094 static void
1095 1095 spa_deactivate(spa_t *spa)
1096 1096 {
1097 1097 ASSERT(spa->spa_sync_on == B_FALSE);
1098 1098 ASSERT(spa->spa_dsl_pool == NULL);
1099 1099 ASSERT(spa->spa_root_vdev == NULL);
1100 1100 ASSERT(spa->spa_async_zio_root == NULL);
1101 1101 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1102 1102
1103 1103 txg_list_destroy(&spa->spa_vdev_txg_list);
1104 1104
1105 1105 list_destroy(&spa->spa_config_dirty_list);
1106 1106 list_destroy(&spa->spa_state_dirty_list);
1107 1107
1108 1108 for (int t = 0; t < ZIO_TYPES; t++) {
1109 1109 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1110 1110 spa_taskqs_fini(spa, t, q);
1111 1111 }
1112 1112 }
1113 1113
1114 1114 metaslab_class_destroy(spa->spa_normal_class);
1115 1115 spa->spa_normal_class = NULL;
1116 1116
1117 1117 metaslab_class_destroy(spa->spa_log_class);
1118 1118 spa->spa_log_class = NULL;
1119 1119
1120 1120 /*
1121 1121 * If this was part of an import or the open otherwise failed, we may
1122 1122 * still have errors left in the queues. Empty them just in case.
1123 1123 */
1124 1124 spa_errlog_drain(spa);
1125 1125
1126 1126 avl_destroy(&spa->spa_errlist_scrub);
1127 1127 avl_destroy(&spa->spa_errlist_last);
1128 1128
1129 1129 spa->spa_state = POOL_STATE_UNINITIALIZED;
1130 1130
1131 1131 mutex_enter(&spa->spa_proc_lock);
1132 1132 if (spa->spa_proc_state != SPA_PROC_NONE) {
1133 1133 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1134 1134 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1135 1135 cv_broadcast(&spa->spa_proc_cv);
1136 1136 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1137 1137 ASSERT(spa->spa_proc != &p0);
1138 1138 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1139 1139 }
1140 1140 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1141 1141 spa->spa_proc_state = SPA_PROC_NONE;
1142 1142 }
1143 1143 ASSERT(spa->spa_proc == &p0);
1144 1144 mutex_exit(&spa->spa_proc_lock);
1145 1145
1146 1146 /*
1147 1147 * We want to make sure spa_thread() has actually exited the ZFS
1148 1148 * module, so that the module can't be unloaded out from underneath
1149 1149 * it.
1150 1150 */
1151 1151 if (spa->spa_did != 0) {
1152 1152 thread_join(spa->spa_did);
1153 1153 spa->spa_did = 0;
1154 1154 }
1155 1155 }
1156 1156
1157 1157 /*
1158 1158 * Verify a pool configuration, and construct the vdev tree appropriately. This
1159 1159 * will create all the necessary vdevs in the appropriate layout, with each vdev
1160 1160 * in the CLOSED state. This will prep the pool before open/creation/import.
1161 1161 * All vdev validation is done by the vdev_alloc() routine.
1162 1162 */
1163 1163 static int
1164 1164 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1165 1165 uint_t id, int atype)
1166 1166 {
1167 1167 nvlist_t **child;
1168 1168 uint_t children;
1169 1169 int error;
1170 1170
1171 1171 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1172 1172 return (error);
1173 1173
1174 1174 if ((*vdp)->vdev_ops->vdev_op_leaf)
1175 1175 return (0);
1176 1176
1177 1177 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1178 1178 &child, &children);
1179 1179
1180 1180 if (error == ENOENT)
1181 1181 return (0);
1182 1182
1183 1183 if (error) {
1184 1184 vdev_free(*vdp);
1185 1185 *vdp = NULL;
1186 1186 return (SET_ERROR(EINVAL));
1187 1187 }
1188 1188
1189 1189 for (int c = 0; c < children; c++) {
1190 1190 vdev_t *vd;
1191 1191 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1192 1192 atype)) != 0) {
1193 1193 vdev_free(*vdp);
1194 1194 *vdp = NULL;
1195 1195 return (error);
1196 1196 }
1197 1197 }
1198 1198
1199 1199 ASSERT(*vdp != NULL);
1200 1200
1201 1201 return (0);
1202 1202 }
1203 1203
1204 1204 /*
1205 1205 * Opposite of spa_load().
1206 1206 */
1207 1207 static void
1208 1208 spa_unload(spa_t *spa)
1209 1209 {
1210 1210 int i;
1211 1211
1212 1212 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1213 1213
1214 1214 /*
1215 1215 * Stop async tasks.
1216 1216 */
1217 1217 spa_async_suspend(spa);
1218 1218
1219 1219 /*
1220 1220 * Stop syncing.
1221 1221 */
1222 1222 if (spa->spa_sync_on) {
1223 1223 txg_sync_stop(spa->spa_dsl_pool);
1224 1224 spa->spa_sync_on = B_FALSE;
1225 1225 }
1226 1226
1227 1227 /*
1228 1228 * Wait for any outstanding async I/O to complete.
1229 1229 */
1230 1230 if (spa->spa_async_zio_root != NULL) {
1231 1231 (void) zio_wait(spa->spa_async_zio_root);
1232 1232 spa->spa_async_zio_root = NULL;
1233 1233 }
1234 1234
1235 1235 bpobj_close(&spa->spa_deferred_bpobj);
1236 1236
1237 1237 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1238 1238
1239 1239 /*
1240 1240 * Close all vdevs.
1241 1241 */
1242 1242 if (spa->spa_root_vdev)
1243 1243 vdev_free(spa->spa_root_vdev);
1244 1244 ASSERT(spa->spa_root_vdev == NULL);
1245 1245
1246 1246 /*
1247 1247 * Close the dsl pool.
1248 1248 */
1249 1249 if (spa->spa_dsl_pool) {
1250 1250 dsl_pool_close(spa->spa_dsl_pool);
1251 1251 spa->spa_dsl_pool = NULL;
1252 1252 spa->spa_meta_objset = NULL;
1253 1253 }
1254 1254
1255 1255 ddt_unload(spa);
1256 1256
1257 1257
1258 1258 /*
1259 1259 * Drop and purge level 2 cache
1260 1260 */
1261 1261 spa_l2cache_drop(spa);
1262 1262
1263 1263 for (i = 0; i < spa->spa_spares.sav_count; i++)
1264 1264 vdev_free(spa->spa_spares.sav_vdevs[i]);
1265 1265 if (spa->spa_spares.sav_vdevs) {
1266 1266 kmem_free(spa->spa_spares.sav_vdevs,
1267 1267 spa->spa_spares.sav_count * sizeof (void *));
1268 1268 spa->spa_spares.sav_vdevs = NULL;
1269 1269 }
1270 1270 if (spa->spa_spares.sav_config) {
1271 1271 nvlist_free(spa->spa_spares.sav_config);
1272 1272 spa->spa_spares.sav_config = NULL;
1273 1273 }
1274 1274 spa->spa_spares.sav_count = 0;
1275 1275
1276 1276 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1277 1277 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1278 1278 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1279 1279 }
1280 1280 if (spa->spa_l2cache.sav_vdevs) {
1281 1281 kmem_free(spa->spa_l2cache.sav_vdevs,
1282 1282 spa->spa_l2cache.sav_count * sizeof (void *));
1283 1283 spa->spa_l2cache.sav_vdevs = NULL;
1284 1284 }
1285 1285 if (spa->spa_l2cache.sav_config) {
1286 1286 nvlist_free(spa->spa_l2cache.sav_config);
1287 1287 spa->spa_l2cache.sav_config = NULL;
1288 1288 }
1289 1289 spa->spa_l2cache.sav_count = 0;
1290 1290
1291 1291 spa->spa_async_suspended = 0;
1292 1292
1293 1293 if (spa->spa_comment != NULL) {
1294 1294 spa_strfree(spa->spa_comment);
1295 1295 spa->spa_comment = NULL;
1296 1296 }
1297 1297
1298 1298 spa_config_exit(spa, SCL_ALL, FTAG);
1299 1299 }
1300 1300
1301 1301 /*
1302 1302 * Load (or re-load) the current list of vdevs describing the active spares for
1303 1303 * this pool. When this is called, we have some form of basic information in
1304 1304 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1305 1305 * then re-generate a more complete list including status information.
1306 1306 */
1307 1307 static void
1308 1308 spa_load_spares(spa_t *spa)
1309 1309 {
1310 1310 nvlist_t **spares;
1311 1311 uint_t nspares;
1312 1312 int i;
1313 1313 vdev_t *vd, *tvd;
1314 1314
1315 1315 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1316 1316
1317 1317 /*
1318 1318 * First, close and free any existing spare vdevs.
1319 1319 */
1320 1320 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1321 1321 vd = spa->spa_spares.sav_vdevs[i];
1322 1322
1323 1323 /* Undo the call to spa_activate() below */
1324 1324 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1325 1325 B_FALSE)) != NULL && tvd->vdev_isspare)
1326 1326 spa_spare_remove(tvd);
1327 1327 vdev_close(vd);
1328 1328 vdev_free(vd);
1329 1329 }
1330 1330
1331 1331 if (spa->spa_spares.sav_vdevs)
1332 1332 kmem_free(spa->spa_spares.sav_vdevs,
1333 1333 spa->spa_spares.sav_count * sizeof (void *));
1334 1334
1335 1335 if (spa->spa_spares.sav_config == NULL)
1336 1336 nspares = 0;
1337 1337 else
1338 1338 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1339 1339 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1340 1340
1341 1341 spa->spa_spares.sav_count = (int)nspares;
1342 1342 spa->spa_spares.sav_vdevs = NULL;
1343 1343
1344 1344 if (nspares == 0)
1345 1345 return;
1346 1346
1347 1347 /*
1348 1348 * Construct the array of vdevs, opening them to get status in the
1349 1349 * process. For each spare, there is potentially two different vdev_t
1350 1350 * structures associated with it: one in the list of spares (used only
1351 1351 * for basic validation purposes) and one in the active vdev
1352 1352 * configuration (if it's spared in). During this phase we open and
1353 1353 * validate each vdev on the spare list. If the vdev also exists in the
1354 1354 * active configuration, then we also mark this vdev as an active spare.
1355 1355 */
1356 1356 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1357 1357 KM_SLEEP);
1358 1358 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1359 1359 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1360 1360 VDEV_ALLOC_SPARE) == 0);
1361 1361 ASSERT(vd != NULL);
1362 1362
1363 1363 spa->spa_spares.sav_vdevs[i] = vd;
1364 1364
1365 1365 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1366 1366 B_FALSE)) != NULL) {
1367 1367 if (!tvd->vdev_isspare)
1368 1368 spa_spare_add(tvd);
1369 1369
1370 1370 /*
1371 1371 * We only mark the spare active if we were successfully
1372 1372 * able to load the vdev. Otherwise, importing a pool
1373 1373 * with a bad active spare would result in strange
1374 1374 * behavior, because multiple pool would think the spare
1375 1375 * is actively in use.
1376 1376 *
1377 1377 * There is a vulnerability here to an equally bizarre
1378 1378 * circumstance, where a dead active spare is later
1379 1379 * brought back to life (onlined or otherwise). Given
1380 1380 * the rarity of this scenario, and the extra complexity
1381 1381 * it adds, we ignore the possibility.
1382 1382 */
1383 1383 if (!vdev_is_dead(tvd))
1384 1384 spa_spare_activate(tvd);
1385 1385 }
1386 1386
1387 1387 vd->vdev_top = vd;
1388 1388 vd->vdev_aux = &spa->spa_spares;
1389 1389
1390 1390 if (vdev_open(vd) != 0)
1391 1391 continue;
1392 1392
1393 1393 if (vdev_validate_aux(vd) == 0)
1394 1394 spa_spare_add(vd);
1395 1395 }
1396 1396
1397 1397 /*
1398 1398 * Recompute the stashed list of spares, with status information
1399 1399 * this time.
1400 1400 */
1401 1401 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1402 1402 DATA_TYPE_NVLIST_ARRAY) == 0);
1403 1403
1404 1404 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1405 1405 KM_SLEEP);
1406 1406 for (i = 0; i < spa->spa_spares.sav_count; i++)
1407 1407 spares[i] = vdev_config_generate(spa,
1408 1408 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1409 1409 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1410 1410 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1411 1411 for (i = 0; i < spa->spa_spares.sav_count; i++)
1412 1412 nvlist_free(spares[i]);
1413 1413 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1414 1414 }
1415 1415
1416 1416 /*
1417 1417 * Load (or re-load) the current list of vdevs describing the active l2cache for
1418 1418 * this pool. When this is called, we have some form of basic information in
1419 1419 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1420 1420 * then re-generate a more complete list including status information.
1421 1421 * Devices which are already active have their details maintained, and are
1422 1422 * not re-opened.
1423 1423 */
1424 1424 static void
1425 1425 spa_load_l2cache(spa_t *spa)
1426 1426 {
1427 1427 nvlist_t **l2cache;
1428 1428 uint_t nl2cache;
1429 1429 int i, j, oldnvdevs;
1430 1430 uint64_t guid;
1431 1431 vdev_t *vd, **oldvdevs, **newvdevs;
1432 1432 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1433 1433
1434 1434 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1435 1435
1436 1436 if (sav->sav_config != NULL) {
1437 1437 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1438 1438 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1439 1439 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1440 1440 } else {
1441 1441 nl2cache = 0;
1442 1442 newvdevs = NULL;
1443 1443 }
1444 1444
1445 1445 oldvdevs = sav->sav_vdevs;
1446 1446 oldnvdevs = sav->sav_count;
1447 1447 sav->sav_vdevs = NULL;
1448 1448 sav->sav_count = 0;
1449 1449
1450 1450 /*
1451 1451 * Process new nvlist of vdevs.
1452 1452 */
1453 1453 for (i = 0; i < nl2cache; i++) {
1454 1454 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1455 1455 &guid) == 0);
1456 1456
1457 1457 newvdevs[i] = NULL;
1458 1458 for (j = 0; j < oldnvdevs; j++) {
1459 1459 vd = oldvdevs[j];
1460 1460 if (vd != NULL && guid == vd->vdev_guid) {
1461 1461 /*
1462 1462 * Retain previous vdev for add/remove ops.
1463 1463 */
1464 1464 newvdevs[i] = vd;
1465 1465 oldvdevs[j] = NULL;
1466 1466 break;
1467 1467 }
1468 1468 }
1469 1469
1470 1470 if (newvdevs[i] == NULL) {
1471 1471 /*
1472 1472 * Create new vdev
1473 1473 */
1474 1474 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1475 1475 VDEV_ALLOC_L2CACHE) == 0);
1476 1476 ASSERT(vd != NULL);
1477 1477 newvdevs[i] = vd;
1478 1478
1479 1479 /*
1480 1480 * Commit this vdev as an l2cache device,
1481 1481 * even if it fails to open.
1482 1482 */
1483 1483 spa_l2cache_add(vd);
1484 1484
1485 1485 vd->vdev_top = vd;
1486 1486 vd->vdev_aux = sav;
1487 1487
1488 1488 spa_l2cache_activate(vd);
1489 1489
1490 1490 if (vdev_open(vd) != 0)
1491 1491 continue;
1492 1492
1493 1493 (void) vdev_validate_aux(vd);
1494 1494
1495 1495 if (!vdev_is_dead(vd))
1496 1496 l2arc_add_vdev(spa, vd);
1497 1497 }
1498 1498 }
1499 1499
1500 1500 /*
1501 1501 * Purge vdevs that were dropped
1502 1502 */
1503 1503 for (i = 0; i < oldnvdevs; i++) {
1504 1504 uint64_t pool;
1505 1505
1506 1506 vd = oldvdevs[i];
1507 1507 if (vd != NULL) {
1508 1508 ASSERT(vd->vdev_isl2cache);
1509 1509
1510 1510 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1511 1511 pool != 0ULL && l2arc_vdev_present(vd))
1512 1512 l2arc_remove_vdev(vd);
1513 1513 vdev_clear_stats(vd);
1514 1514 vdev_free(vd);
1515 1515 }
1516 1516 }
1517 1517
1518 1518 if (oldvdevs)
1519 1519 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1520 1520
1521 1521 if (sav->sav_config == NULL)
1522 1522 goto out;
1523 1523
1524 1524 sav->sav_vdevs = newvdevs;
1525 1525 sav->sav_count = (int)nl2cache;
1526 1526
1527 1527 /*
1528 1528 * Recompute the stashed list of l2cache devices, with status
1529 1529 * information this time.
1530 1530 */
1531 1531 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1532 1532 DATA_TYPE_NVLIST_ARRAY) == 0);
1533 1533
1534 1534 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1535 1535 for (i = 0; i < sav->sav_count; i++)
1536 1536 l2cache[i] = vdev_config_generate(spa,
1537 1537 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1538 1538 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1539 1539 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1540 1540 out:
1541 1541 for (i = 0; i < sav->sav_count; i++)
1542 1542 nvlist_free(l2cache[i]);
1543 1543 if (sav->sav_count)
1544 1544 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1545 1545 }
1546 1546
1547 1547 static int
1548 1548 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1549 1549 {
1550 1550 dmu_buf_t *db;
1551 1551 char *packed = NULL;
1552 1552 size_t nvsize = 0;
1553 1553 int error;
1554 1554 *value = NULL;
1555 1555
1556 1556 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1557 1557 nvsize = *(uint64_t *)db->db_data;
1558 1558 dmu_buf_rele(db, FTAG);
1559 1559
1560 1560 packed = kmem_alloc(nvsize, KM_SLEEP);
1561 1561 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1562 1562 DMU_READ_PREFETCH);
1563 1563 if (error == 0)
1564 1564 error = nvlist_unpack(packed, nvsize, value, 0);
1565 1565 kmem_free(packed, nvsize);
1566 1566
1567 1567 return (error);
1568 1568 }
1569 1569
1570 1570 /*
1571 1571 * Checks to see if the given vdev could not be opened, in which case we post a
1572 1572 * sysevent to notify the autoreplace code that the device has been removed.
1573 1573 */
1574 1574 static void
1575 1575 spa_check_removed(vdev_t *vd)
1576 1576 {
1577 1577 for (int c = 0; c < vd->vdev_children; c++)
1578 1578 spa_check_removed(vd->vdev_child[c]);
1579 1579
1580 1580 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1581 1581 !vd->vdev_ishole) {
1582 1582 zfs_post_autoreplace(vd->vdev_spa, vd);
1583 1583 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1584 1584 }
1585 1585 }
1586 1586
1587 1587 /*
1588 1588 * Validate the current config against the MOS config
1589 1589 */
1590 1590 static boolean_t
1591 1591 spa_config_valid(spa_t *spa, nvlist_t *config)
1592 1592 {
1593 1593 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1594 1594 nvlist_t *nv;
1595 1595
1596 1596 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1597 1597
1598 1598 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1599 1599 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1600 1600
1601 1601 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1602 1602
1603 1603 /*
1604 1604 * If we're doing a normal import, then build up any additional
1605 1605 * diagnostic information about missing devices in this config.
1606 1606 * We'll pass this up to the user for further processing.
1607 1607 */
1608 1608 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1609 1609 nvlist_t **child, *nv;
1610 1610 uint64_t idx = 0;
1611 1611
1612 1612 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1613 1613 KM_SLEEP);
1614 1614 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1615 1615
1616 1616 for (int c = 0; c < rvd->vdev_children; c++) {
1617 1617 vdev_t *tvd = rvd->vdev_child[c];
1618 1618 vdev_t *mtvd = mrvd->vdev_child[c];
1619 1619
1620 1620 if (tvd->vdev_ops == &vdev_missing_ops &&
1621 1621 mtvd->vdev_ops != &vdev_missing_ops &&
1622 1622 mtvd->vdev_islog)
1623 1623 child[idx++] = vdev_config_generate(spa, mtvd,
1624 1624 B_FALSE, 0);
1625 1625 }
1626 1626
1627 1627 if (idx) {
1628 1628 VERIFY(nvlist_add_nvlist_array(nv,
1629 1629 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1630 1630 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1631 1631 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1632 1632
1633 1633 for (int i = 0; i < idx; i++)
1634 1634 nvlist_free(child[i]);
1635 1635 }
1636 1636 nvlist_free(nv);
1637 1637 kmem_free(child, rvd->vdev_children * sizeof (char **));
1638 1638 }
1639 1639
1640 1640 /*
1641 1641 * Compare the root vdev tree with the information we have
1642 1642 * from the MOS config (mrvd). Check each top-level vdev
1643 1643 * with the corresponding MOS config top-level (mtvd).
1644 1644 */
1645 1645 for (int c = 0; c < rvd->vdev_children; c++) {
1646 1646 vdev_t *tvd = rvd->vdev_child[c];
1647 1647 vdev_t *mtvd = mrvd->vdev_child[c];
1648 1648
1649 1649 /*
1650 1650 * Resolve any "missing" vdevs in the current configuration.
1651 1651 * If we find that the MOS config has more accurate information
1652 1652 * about the top-level vdev then use that vdev instead.
1653 1653 */
1654 1654 if (tvd->vdev_ops == &vdev_missing_ops &&
1655 1655 mtvd->vdev_ops != &vdev_missing_ops) {
1656 1656
1657 1657 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1658 1658 continue;
1659 1659
1660 1660 /*
1661 1661 * Device specific actions.
1662 1662 */
1663 1663 if (mtvd->vdev_islog) {
1664 1664 spa_set_log_state(spa, SPA_LOG_CLEAR);
1665 1665 } else {
1666 1666 /*
1667 1667 * XXX - once we have 'readonly' pool
1668 1668 * support we should be able to handle
1669 1669 * missing data devices by transitioning
1670 1670 * the pool to readonly.
1671 1671 */
1672 1672 continue;
1673 1673 }
1674 1674
1675 1675 /*
1676 1676 * Swap the missing vdev with the data we were
1677 1677 * able to obtain from the MOS config.
1678 1678 */
1679 1679 vdev_remove_child(rvd, tvd);
1680 1680 vdev_remove_child(mrvd, mtvd);
1681 1681
1682 1682 vdev_add_child(rvd, mtvd);
1683 1683 vdev_add_child(mrvd, tvd);
1684 1684
1685 1685 spa_config_exit(spa, SCL_ALL, FTAG);
1686 1686 vdev_load(mtvd);
1687 1687 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1688 1688
1689 1689 vdev_reopen(rvd);
1690 1690 } else if (mtvd->vdev_islog) {
1691 1691 /*
1692 1692 * Load the slog device's state from the MOS config
1693 1693 * since it's possible that the label does not
1694 1694 * contain the most up-to-date information.
1695 1695 */
1696 1696 vdev_load_log_state(tvd, mtvd);
1697 1697 vdev_reopen(tvd);
1698 1698 }
1699 1699 }
1700 1700 vdev_free(mrvd);
1701 1701 spa_config_exit(spa, SCL_ALL, FTAG);
1702 1702
1703 1703 /*
1704 1704 * Ensure we were able to validate the config.
1705 1705 */
1706 1706 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1707 1707 }
1708 1708
1709 1709 /*
1710 1710 * Check for missing log devices
1711 1711 */
1712 1712 static boolean_t
1713 1713 spa_check_logs(spa_t *spa)
1714 1714 {
1715 1715 boolean_t rv = B_FALSE;
1716 1716
1717 1717 switch (spa->spa_log_state) {
1718 1718 case SPA_LOG_MISSING:
1719 1719 /* need to recheck in case slog has been restored */
1720 1720 case SPA_LOG_UNKNOWN:
1721 1721 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1722 1722 NULL, DS_FIND_CHILDREN) != 0);
1723 1723 if (rv)
1724 1724 spa_set_log_state(spa, SPA_LOG_MISSING);
1725 1725 break;
1726 1726 }
1727 1727 return (rv);
1728 1728 }
1729 1729
1730 1730 static boolean_t
1731 1731 spa_passivate_log(spa_t *spa)
1732 1732 {
1733 1733 vdev_t *rvd = spa->spa_root_vdev;
1734 1734 boolean_t slog_found = B_FALSE;
1735 1735
1736 1736 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1737 1737
1738 1738 if (!spa_has_slogs(spa))
1739 1739 return (B_FALSE);
1740 1740
1741 1741 for (int c = 0; c < rvd->vdev_children; c++) {
1742 1742 vdev_t *tvd = rvd->vdev_child[c];
1743 1743 metaslab_group_t *mg = tvd->vdev_mg;
1744 1744
1745 1745 if (tvd->vdev_islog) {
1746 1746 metaslab_group_passivate(mg);
1747 1747 slog_found = B_TRUE;
1748 1748 }
1749 1749 }
1750 1750
1751 1751 return (slog_found);
1752 1752 }
1753 1753
1754 1754 static void
1755 1755 spa_activate_log(spa_t *spa)
1756 1756 {
1757 1757 vdev_t *rvd = spa->spa_root_vdev;
1758 1758
1759 1759 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1760 1760
1761 1761 for (int c = 0; c < rvd->vdev_children; c++) {
1762 1762 vdev_t *tvd = rvd->vdev_child[c];
1763 1763 metaslab_group_t *mg = tvd->vdev_mg;
1764 1764
1765 1765 if (tvd->vdev_islog)
1766 1766 metaslab_group_activate(mg);
1767 1767 }
1768 1768 }
1769 1769
1770 1770 int
1771 1771 spa_offline_log(spa_t *spa)
1772 1772 {
1773 1773 int error;
1774 1774
1775 1775 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1776 1776 NULL, DS_FIND_CHILDREN);
1777 1777 if (error == 0) {
1778 1778 /*
1779 1779 * We successfully offlined the log device, sync out the
1780 1780 * current txg so that the "stubby" block can be removed
1781 1781 * by zil_sync().
1782 1782 */
1783 1783 txg_wait_synced(spa->spa_dsl_pool, 0);
1784 1784 }
1785 1785 return (error);
1786 1786 }
1787 1787
1788 1788 static void
1789 1789 spa_aux_check_removed(spa_aux_vdev_t *sav)
1790 1790 {
1791 1791 for (int i = 0; i < sav->sav_count; i++)
1792 1792 spa_check_removed(sav->sav_vdevs[i]);
1793 1793 }
1794 1794
1795 1795 void
1796 1796 spa_claim_notify(zio_t *zio)
1797 1797 {
1798 1798 spa_t *spa = zio->io_spa;
1799 1799
1800 1800 if (zio->io_error)
1801 1801 return;
1802 1802
1803 1803 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1804 1804 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1805 1805 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1806 1806 mutex_exit(&spa->spa_props_lock);
1807 1807 }
1808 1808
1809 1809 typedef struct spa_load_error {
1810 1810 uint64_t sle_meta_count;
1811 1811 uint64_t sle_data_count;
1812 1812 } spa_load_error_t;
1813 1813
1814 1814 static void
1815 1815 spa_load_verify_done(zio_t *zio)
↓ open down ↓ |
1815 lines elided |
↑ open up ↑ |
1816 1816 {
1817 1817 blkptr_t *bp = zio->io_bp;
1818 1818 spa_load_error_t *sle = zio->io_private;
1819 1819 dmu_object_type_t type = BP_GET_TYPE(bp);
1820 1820 int error = zio->io_error;
1821 1821 spa_t *spa = zio->io_spa;
1822 1822
1823 1823 if (error) {
1824 1824 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1825 1825 type != DMU_OT_INTENT_LOG)
1826 - atomic_add_64(&sle->sle_meta_count, 1);
1826 + atomic_inc_64(&sle->sle_meta_count);
1827 1827 else
1828 - atomic_add_64(&sle->sle_data_count, 1);
1828 + atomic_inc_64(&sle->sle_data_count);
1829 1829 }
1830 1830 zio_data_buf_free(zio->io_data, zio->io_size);
1831 1831
1832 1832 mutex_enter(&spa->spa_scrub_lock);
1833 1833 spa->spa_scrub_inflight--;
1834 1834 cv_broadcast(&spa->spa_scrub_io_cv);
1835 1835 mutex_exit(&spa->spa_scrub_lock);
1836 1836 }
1837 1837
1838 1838 /*
1839 1839 * Maximum number of concurrent scrub i/os to create while verifying
1840 1840 * a pool while importing it.
1841 1841 */
1842 1842 int spa_load_verify_maxinflight = 10000;
1843 1843 boolean_t spa_load_verify_metadata = B_TRUE;
1844 1844 boolean_t spa_load_verify_data = B_TRUE;
1845 1845
1846 1846 /*ARGSUSED*/
1847 1847 static int
1848 1848 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1849 1849 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1850 1850 {
1851 1851 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1852 1852 return (0);
1853 1853 /*
1854 1854 * Note: normally this routine will not be called if
1855 1855 * spa_load_verify_metadata is not set. However, it may be useful
1856 1856 * to manually set the flag after the traversal has begun.
1857 1857 */
1858 1858 if (!spa_load_verify_metadata)
1859 1859 return (0);
1860 1860 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data)
1861 1861 return (0);
1862 1862
1863 1863 zio_t *rio = arg;
1864 1864 size_t size = BP_GET_PSIZE(bp);
1865 1865 void *data = zio_data_buf_alloc(size);
1866 1866
1867 1867 mutex_enter(&spa->spa_scrub_lock);
1868 1868 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
1869 1869 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1870 1870 spa->spa_scrub_inflight++;
1871 1871 mutex_exit(&spa->spa_scrub_lock);
1872 1872
1873 1873 zio_nowait(zio_read(rio, spa, bp, data, size,
1874 1874 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1875 1875 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1876 1876 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
1877 1877 return (0);
1878 1878 }
1879 1879
1880 1880 static int
1881 1881 spa_load_verify(spa_t *spa)
1882 1882 {
1883 1883 zio_t *rio;
1884 1884 spa_load_error_t sle = { 0 };
1885 1885 zpool_rewind_policy_t policy;
1886 1886 boolean_t verify_ok = B_FALSE;
1887 1887 int error = 0;
1888 1888
1889 1889 zpool_get_rewind_policy(spa->spa_config, &policy);
1890 1890
1891 1891 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1892 1892 return (0);
1893 1893
1894 1894 rio = zio_root(spa, NULL, &sle,
1895 1895 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1896 1896
1897 1897 if (spa_load_verify_metadata) {
1898 1898 error = traverse_pool(spa, spa->spa_verify_min_txg,
1899 1899 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
1900 1900 spa_load_verify_cb, rio);
1901 1901 }
1902 1902
1903 1903 (void) zio_wait(rio);
1904 1904
1905 1905 spa->spa_load_meta_errors = sle.sle_meta_count;
1906 1906 spa->spa_load_data_errors = sle.sle_data_count;
1907 1907
1908 1908 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1909 1909 sle.sle_data_count <= policy.zrp_maxdata) {
1910 1910 int64_t loss = 0;
1911 1911
1912 1912 verify_ok = B_TRUE;
1913 1913 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1914 1914 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
1915 1915
1916 1916 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1917 1917 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1918 1918 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1919 1919 VERIFY(nvlist_add_int64(spa->spa_load_info,
1920 1920 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1921 1921 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1922 1922 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
1923 1923 } else {
1924 1924 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1925 1925 }
1926 1926
1927 1927 if (error) {
1928 1928 if (error != ENXIO && error != EIO)
1929 1929 error = SET_ERROR(EIO);
1930 1930 return (error);
1931 1931 }
1932 1932
1933 1933 return (verify_ok ? 0 : EIO);
1934 1934 }
1935 1935
1936 1936 /*
1937 1937 * Find a value in the pool props object.
1938 1938 */
1939 1939 static void
1940 1940 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1941 1941 {
1942 1942 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1943 1943 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1944 1944 }
1945 1945
1946 1946 /*
1947 1947 * Find a value in the pool directory object.
1948 1948 */
1949 1949 static int
1950 1950 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1951 1951 {
1952 1952 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1953 1953 name, sizeof (uint64_t), 1, val));
1954 1954 }
1955 1955
1956 1956 static int
1957 1957 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1958 1958 {
1959 1959 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1960 1960 return (err);
1961 1961 }
1962 1962
1963 1963 /*
1964 1964 * Fix up config after a partly-completed split. This is done with the
1965 1965 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
1966 1966 * pool have that entry in their config, but only the splitting one contains
1967 1967 * a list of all the guids of the vdevs that are being split off.
1968 1968 *
1969 1969 * This function determines what to do with that list: either rejoin
1970 1970 * all the disks to the pool, or complete the splitting process. To attempt
1971 1971 * the rejoin, each disk that is offlined is marked online again, and
1972 1972 * we do a reopen() call. If the vdev label for every disk that was
1973 1973 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1974 1974 * then we call vdev_split() on each disk, and complete the split.
1975 1975 *
1976 1976 * Otherwise we leave the config alone, with all the vdevs in place in
1977 1977 * the original pool.
1978 1978 */
1979 1979 static void
1980 1980 spa_try_repair(spa_t *spa, nvlist_t *config)
1981 1981 {
1982 1982 uint_t extracted;
1983 1983 uint64_t *glist;
1984 1984 uint_t i, gcount;
1985 1985 nvlist_t *nvl;
1986 1986 vdev_t **vd;
1987 1987 boolean_t attempt_reopen;
1988 1988
1989 1989 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
1990 1990 return;
1991 1991
1992 1992 /* check that the config is complete */
1993 1993 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
1994 1994 &glist, &gcount) != 0)
1995 1995 return;
1996 1996
1997 1997 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
1998 1998
1999 1999 /* attempt to online all the vdevs & validate */
2000 2000 attempt_reopen = B_TRUE;
2001 2001 for (i = 0; i < gcount; i++) {
2002 2002 if (glist[i] == 0) /* vdev is hole */
2003 2003 continue;
2004 2004
2005 2005 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2006 2006 if (vd[i] == NULL) {
2007 2007 /*
2008 2008 * Don't bother attempting to reopen the disks;
2009 2009 * just do the split.
2010 2010 */
2011 2011 attempt_reopen = B_FALSE;
2012 2012 } else {
2013 2013 /* attempt to re-online it */
2014 2014 vd[i]->vdev_offline = B_FALSE;
2015 2015 }
2016 2016 }
2017 2017
2018 2018 if (attempt_reopen) {
2019 2019 vdev_reopen(spa->spa_root_vdev);
2020 2020
2021 2021 /* check each device to see what state it's in */
2022 2022 for (extracted = 0, i = 0; i < gcount; i++) {
2023 2023 if (vd[i] != NULL &&
2024 2024 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2025 2025 break;
2026 2026 ++extracted;
2027 2027 }
2028 2028 }
2029 2029
2030 2030 /*
2031 2031 * If every disk has been moved to the new pool, or if we never
2032 2032 * even attempted to look at them, then we split them off for
2033 2033 * good.
2034 2034 */
2035 2035 if (!attempt_reopen || gcount == extracted) {
2036 2036 for (i = 0; i < gcount; i++)
2037 2037 if (vd[i] != NULL)
2038 2038 vdev_split(vd[i]);
2039 2039 vdev_reopen(spa->spa_root_vdev);
2040 2040 }
2041 2041
2042 2042 kmem_free(vd, gcount * sizeof (vdev_t *));
2043 2043 }
2044 2044
2045 2045 static int
2046 2046 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2047 2047 boolean_t mosconfig)
2048 2048 {
2049 2049 nvlist_t *config = spa->spa_config;
2050 2050 char *ereport = FM_EREPORT_ZFS_POOL;
2051 2051 char *comment;
2052 2052 int error;
2053 2053 uint64_t pool_guid;
2054 2054 nvlist_t *nvl;
2055 2055
2056 2056 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2057 2057 return (SET_ERROR(EINVAL));
2058 2058
2059 2059 ASSERT(spa->spa_comment == NULL);
2060 2060 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2061 2061 spa->spa_comment = spa_strdup(comment);
2062 2062
2063 2063 /*
2064 2064 * Versioning wasn't explicitly added to the label until later, so if
2065 2065 * it's not present treat it as the initial version.
2066 2066 */
2067 2067 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2068 2068 &spa->spa_ubsync.ub_version) != 0)
2069 2069 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2070 2070
2071 2071 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2072 2072 &spa->spa_config_txg);
2073 2073
2074 2074 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2075 2075 spa_guid_exists(pool_guid, 0)) {
2076 2076 error = SET_ERROR(EEXIST);
2077 2077 } else {
2078 2078 spa->spa_config_guid = pool_guid;
2079 2079
2080 2080 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2081 2081 &nvl) == 0) {
2082 2082 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
2083 2083 KM_SLEEP) == 0);
2084 2084 }
2085 2085
2086 2086 nvlist_free(spa->spa_load_info);
2087 2087 spa->spa_load_info = fnvlist_alloc();
2088 2088
2089 2089 gethrestime(&spa->spa_loaded_ts);
2090 2090 error = spa_load_impl(spa, pool_guid, config, state, type,
2091 2091 mosconfig, &ereport);
2092 2092 }
2093 2093
2094 2094 spa->spa_minref = refcount_count(&spa->spa_refcount);
2095 2095 if (error) {
2096 2096 if (error != EEXIST) {
2097 2097 spa->spa_loaded_ts.tv_sec = 0;
2098 2098 spa->spa_loaded_ts.tv_nsec = 0;
2099 2099 }
2100 2100 if (error != EBADF) {
2101 2101 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2102 2102 }
2103 2103 }
2104 2104 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2105 2105 spa->spa_ena = 0;
2106 2106
2107 2107 return (error);
2108 2108 }
2109 2109
2110 2110 /*
2111 2111 * Load an existing storage pool, using the pool's builtin spa_config as a
2112 2112 * source of configuration information.
2113 2113 */
2114 2114 static int
2115 2115 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2116 2116 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2117 2117 char **ereport)
2118 2118 {
2119 2119 int error = 0;
2120 2120 nvlist_t *nvroot = NULL;
2121 2121 nvlist_t *label;
2122 2122 vdev_t *rvd;
2123 2123 uberblock_t *ub = &spa->spa_uberblock;
2124 2124 uint64_t children, config_cache_txg = spa->spa_config_txg;
2125 2125 int orig_mode = spa->spa_mode;
2126 2126 int parse;
2127 2127 uint64_t obj;
2128 2128 boolean_t missing_feat_write = B_FALSE;
2129 2129
2130 2130 /*
2131 2131 * If this is an untrusted config, access the pool in read-only mode.
2132 2132 * This prevents things like resilvering recently removed devices.
2133 2133 */
2134 2134 if (!mosconfig)
2135 2135 spa->spa_mode = FREAD;
2136 2136
2137 2137 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2138 2138
2139 2139 spa->spa_load_state = state;
2140 2140
2141 2141 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2142 2142 return (SET_ERROR(EINVAL));
2143 2143
2144 2144 parse = (type == SPA_IMPORT_EXISTING ?
2145 2145 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2146 2146
2147 2147 /*
2148 2148 * Create "The Godfather" zio to hold all async IOs
2149 2149 */
2150 2150 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2151 2151 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2152 2152
2153 2153 /*
2154 2154 * Parse the configuration into a vdev tree. We explicitly set the
2155 2155 * value that will be returned by spa_version() since parsing the
2156 2156 * configuration requires knowing the version number.
2157 2157 */
2158 2158 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2159 2159 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2160 2160 spa_config_exit(spa, SCL_ALL, FTAG);
2161 2161
2162 2162 if (error != 0)
2163 2163 return (error);
2164 2164
2165 2165 ASSERT(spa->spa_root_vdev == rvd);
2166 2166
2167 2167 if (type != SPA_IMPORT_ASSEMBLE) {
2168 2168 ASSERT(spa_guid(spa) == pool_guid);
2169 2169 }
2170 2170
2171 2171 /*
2172 2172 * Try to open all vdevs, loading each label in the process.
2173 2173 */
2174 2174 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2175 2175 error = vdev_open(rvd);
2176 2176 spa_config_exit(spa, SCL_ALL, FTAG);
2177 2177 if (error != 0)
2178 2178 return (error);
2179 2179
2180 2180 /*
2181 2181 * We need to validate the vdev labels against the configuration that
2182 2182 * we have in hand, which is dependent on the setting of mosconfig. If
2183 2183 * mosconfig is true then we're validating the vdev labels based on
2184 2184 * that config. Otherwise, we're validating against the cached config
2185 2185 * (zpool.cache) that was read when we loaded the zfs module, and then
2186 2186 * later we will recursively call spa_load() and validate against
2187 2187 * the vdev config.
2188 2188 *
2189 2189 * If we're assembling a new pool that's been split off from an
2190 2190 * existing pool, the labels haven't yet been updated so we skip
2191 2191 * validation for now.
2192 2192 */
2193 2193 if (type != SPA_IMPORT_ASSEMBLE) {
2194 2194 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2195 2195 error = vdev_validate(rvd, mosconfig);
2196 2196 spa_config_exit(spa, SCL_ALL, FTAG);
2197 2197
2198 2198 if (error != 0)
2199 2199 return (error);
2200 2200
2201 2201 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2202 2202 return (SET_ERROR(ENXIO));
2203 2203 }
2204 2204
2205 2205 /*
2206 2206 * Find the best uberblock.
2207 2207 */
2208 2208 vdev_uberblock_load(rvd, ub, &label);
2209 2209
2210 2210 /*
2211 2211 * If we weren't able to find a single valid uberblock, return failure.
2212 2212 */
2213 2213 if (ub->ub_txg == 0) {
2214 2214 nvlist_free(label);
2215 2215 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2216 2216 }
2217 2217
2218 2218 /*
2219 2219 * If the pool has an unsupported version we can't open it.
2220 2220 */
2221 2221 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2222 2222 nvlist_free(label);
2223 2223 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2224 2224 }
2225 2225
2226 2226 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2227 2227 nvlist_t *features;
2228 2228
2229 2229 /*
2230 2230 * If we weren't able to find what's necessary for reading the
2231 2231 * MOS in the label, return failure.
2232 2232 */
2233 2233 if (label == NULL || nvlist_lookup_nvlist(label,
2234 2234 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2235 2235 nvlist_free(label);
2236 2236 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2237 2237 ENXIO));
2238 2238 }
2239 2239
2240 2240 /*
2241 2241 * Update our in-core representation with the definitive values
2242 2242 * from the label.
2243 2243 */
2244 2244 nvlist_free(spa->spa_label_features);
2245 2245 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2246 2246 }
2247 2247
2248 2248 nvlist_free(label);
2249 2249
2250 2250 /*
2251 2251 * Look through entries in the label nvlist's features_for_read. If
2252 2252 * there is a feature listed there which we don't understand then we
2253 2253 * cannot open a pool.
2254 2254 */
2255 2255 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2256 2256 nvlist_t *unsup_feat;
2257 2257
2258 2258 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2259 2259 0);
2260 2260
2261 2261 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2262 2262 NULL); nvp != NULL;
2263 2263 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2264 2264 if (!zfeature_is_supported(nvpair_name(nvp))) {
2265 2265 VERIFY(nvlist_add_string(unsup_feat,
2266 2266 nvpair_name(nvp), "") == 0);
2267 2267 }
2268 2268 }
2269 2269
2270 2270 if (!nvlist_empty(unsup_feat)) {
2271 2271 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2272 2272 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2273 2273 nvlist_free(unsup_feat);
2274 2274 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2275 2275 ENOTSUP));
2276 2276 }
2277 2277
2278 2278 nvlist_free(unsup_feat);
2279 2279 }
2280 2280
2281 2281 /*
2282 2282 * If the vdev guid sum doesn't match the uberblock, we have an
2283 2283 * incomplete configuration. We first check to see if the pool
2284 2284 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2285 2285 * If it is, defer the vdev_guid_sum check till later so we
2286 2286 * can handle missing vdevs.
2287 2287 */
2288 2288 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2289 2289 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2290 2290 rvd->vdev_guid_sum != ub->ub_guid_sum)
2291 2291 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2292 2292
2293 2293 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2294 2294 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2295 2295 spa_try_repair(spa, config);
2296 2296 spa_config_exit(spa, SCL_ALL, FTAG);
2297 2297 nvlist_free(spa->spa_config_splitting);
2298 2298 spa->spa_config_splitting = NULL;
2299 2299 }
2300 2300
2301 2301 /*
2302 2302 * Initialize internal SPA structures.
2303 2303 */
2304 2304 spa->spa_state = POOL_STATE_ACTIVE;
2305 2305 spa->spa_ubsync = spa->spa_uberblock;
2306 2306 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2307 2307 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2308 2308 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2309 2309 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2310 2310 spa->spa_claim_max_txg = spa->spa_first_txg;
2311 2311 spa->spa_prev_software_version = ub->ub_software_version;
2312 2312
2313 2313 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2314 2314 if (error)
2315 2315 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2316 2316 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2317 2317
2318 2318 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2319 2319 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2320 2320
2321 2321 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2322 2322 boolean_t missing_feat_read = B_FALSE;
2323 2323 nvlist_t *unsup_feat, *enabled_feat;
2324 2324
2325 2325 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2326 2326 &spa->spa_feat_for_read_obj) != 0) {
2327 2327 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2328 2328 }
2329 2329
2330 2330 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2331 2331 &spa->spa_feat_for_write_obj) != 0) {
2332 2332 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2333 2333 }
2334 2334
2335 2335 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2336 2336 &spa->spa_feat_desc_obj) != 0) {
2337 2337 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2338 2338 }
2339 2339
2340 2340 enabled_feat = fnvlist_alloc();
2341 2341 unsup_feat = fnvlist_alloc();
2342 2342
2343 2343 if (!spa_features_check(spa, B_FALSE,
2344 2344 unsup_feat, enabled_feat))
2345 2345 missing_feat_read = B_TRUE;
2346 2346
2347 2347 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2348 2348 if (!spa_features_check(spa, B_TRUE,
2349 2349 unsup_feat, enabled_feat)) {
2350 2350 missing_feat_write = B_TRUE;
2351 2351 }
2352 2352 }
2353 2353
2354 2354 fnvlist_add_nvlist(spa->spa_load_info,
2355 2355 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2356 2356
2357 2357 if (!nvlist_empty(unsup_feat)) {
2358 2358 fnvlist_add_nvlist(spa->spa_load_info,
2359 2359 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2360 2360 }
2361 2361
2362 2362 fnvlist_free(enabled_feat);
2363 2363 fnvlist_free(unsup_feat);
2364 2364
2365 2365 if (!missing_feat_read) {
2366 2366 fnvlist_add_boolean(spa->spa_load_info,
2367 2367 ZPOOL_CONFIG_CAN_RDONLY);
2368 2368 }
2369 2369
2370 2370 /*
2371 2371 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2372 2372 * twofold: to determine whether the pool is available for
2373 2373 * import in read-write mode and (if it is not) whether the
2374 2374 * pool is available for import in read-only mode. If the pool
2375 2375 * is available for import in read-write mode, it is displayed
2376 2376 * as available in userland; if it is not available for import
2377 2377 * in read-only mode, it is displayed as unavailable in
2378 2378 * userland. If the pool is available for import in read-only
2379 2379 * mode but not read-write mode, it is displayed as unavailable
2380 2380 * in userland with a special note that the pool is actually
2381 2381 * available for open in read-only mode.
2382 2382 *
2383 2383 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2384 2384 * missing a feature for write, we must first determine whether
2385 2385 * the pool can be opened read-only before returning to
2386 2386 * userland in order to know whether to display the
2387 2387 * abovementioned note.
2388 2388 */
2389 2389 if (missing_feat_read || (missing_feat_write &&
2390 2390 spa_writeable(spa))) {
2391 2391 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2392 2392 ENOTSUP));
2393 2393 }
2394 2394
2395 2395 /*
2396 2396 * Load refcounts for ZFS features from disk into an in-memory
2397 2397 * cache during SPA initialization.
2398 2398 */
2399 2399 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2400 2400 uint64_t refcount;
2401 2401
2402 2402 error = feature_get_refcount_from_disk(spa,
2403 2403 &spa_feature_table[i], &refcount);
2404 2404 if (error == 0) {
2405 2405 spa->spa_feat_refcount_cache[i] = refcount;
2406 2406 } else if (error == ENOTSUP) {
2407 2407 spa->spa_feat_refcount_cache[i] =
2408 2408 SPA_FEATURE_DISABLED;
2409 2409 } else {
2410 2410 return (spa_vdev_err(rvd,
2411 2411 VDEV_AUX_CORRUPT_DATA, EIO));
2412 2412 }
2413 2413 }
2414 2414 }
2415 2415
2416 2416 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2417 2417 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2418 2418 &spa->spa_feat_enabled_txg_obj) != 0)
2419 2419 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2420 2420 }
2421 2421
2422 2422 spa->spa_is_initializing = B_TRUE;
2423 2423 error = dsl_pool_open(spa->spa_dsl_pool);
2424 2424 spa->spa_is_initializing = B_FALSE;
2425 2425 if (error != 0)
2426 2426 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2427 2427
2428 2428 if (!mosconfig) {
2429 2429 uint64_t hostid;
2430 2430 nvlist_t *policy = NULL, *nvconfig;
2431 2431
2432 2432 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2433 2433 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2434 2434
2435 2435 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2436 2436 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2437 2437 char *hostname;
2438 2438 unsigned long myhostid = 0;
2439 2439
2440 2440 VERIFY(nvlist_lookup_string(nvconfig,
2441 2441 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2442 2442
2443 2443 #ifdef _KERNEL
2444 2444 myhostid = zone_get_hostid(NULL);
2445 2445 #else /* _KERNEL */
2446 2446 /*
2447 2447 * We're emulating the system's hostid in userland, so
2448 2448 * we can't use zone_get_hostid().
2449 2449 */
2450 2450 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2451 2451 #endif /* _KERNEL */
2452 2452 if (hostid != 0 && myhostid != 0 &&
2453 2453 hostid != myhostid) {
2454 2454 nvlist_free(nvconfig);
2455 2455 cmn_err(CE_WARN, "pool '%s' could not be "
2456 2456 "loaded as it was last accessed by "
2457 2457 "another system (host: %s hostid: 0x%lx). "
2458 2458 "See: http://illumos.org/msg/ZFS-8000-EY",
2459 2459 spa_name(spa), hostname,
2460 2460 (unsigned long)hostid);
2461 2461 return (SET_ERROR(EBADF));
2462 2462 }
2463 2463 }
2464 2464 if (nvlist_lookup_nvlist(spa->spa_config,
2465 2465 ZPOOL_REWIND_POLICY, &policy) == 0)
2466 2466 VERIFY(nvlist_add_nvlist(nvconfig,
2467 2467 ZPOOL_REWIND_POLICY, policy) == 0);
2468 2468
2469 2469 spa_config_set(spa, nvconfig);
2470 2470 spa_unload(spa);
2471 2471 spa_deactivate(spa);
2472 2472 spa_activate(spa, orig_mode);
2473 2473
2474 2474 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2475 2475 }
2476 2476
2477 2477 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2478 2478 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2479 2479 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2480 2480 if (error != 0)
2481 2481 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2482 2482
2483 2483 /*
2484 2484 * Load the bit that tells us to use the new accounting function
2485 2485 * (raid-z deflation). If we have an older pool, this will not
2486 2486 * be present.
2487 2487 */
2488 2488 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2489 2489 if (error != 0 && error != ENOENT)
2490 2490 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2491 2491
2492 2492 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2493 2493 &spa->spa_creation_version);
2494 2494 if (error != 0 && error != ENOENT)
2495 2495 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2496 2496
2497 2497 /*
2498 2498 * Load the persistent error log. If we have an older pool, this will
2499 2499 * not be present.
2500 2500 */
2501 2501 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2502 2502 if (error != 0 && error != ENOENT)
2503 2503 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2504 2504
2505 2505 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2506 2506 &spa->spa_errlog_scrub);
2507 2507 if (error != 0 && error != ENOENT)
2508 2508 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2509 2509
2510 2510 /*
2511 2511 * Load the history object. If we have an older pool, this
2512 2512 * will not be present.
2513 2513 */
2514 2514 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2515 2515 if (error != 0 && error != ENOENT)
2516 2516 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2517 2517
2518 2518 /*
2519 2519 * If we're assembling the pool from the split-off vdevs of
2520 2520 * an existing pool, we don't want to attach the spares & cache
2521 2521 * devices.
2522 2522 */
2523 2523
2524 2524 /*
2525 2525 * Load any hot spares for this pool.
2526 2526 */
2527 2527 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2528 2528 if (error != 0 && error != ENOENT)
2529 2529 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2530 2530 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2531 2531 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2532 2532 if (load_nvlist(spa, spa->spa_spares.sav_object,
2533 2533 &spa->spa_spares.sav_config) != 0)
2534 2534 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2535 2535
2536 2536 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2537 2537 spa_load_spares(spa);
2538 2538 spa_config_exit(spa, SCL_ALL, FTAG);
2539 2539 } else if (error == 0) {
2540 2540 spa->spa_spares.sav_sync = B_TRUE;
2541 2541 }
2542 2542
2543 2543 /*
2544 2544 * Load any level 2 ARC devices for this pool.
2545 2545 */
2546 2546 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2547 2547 &spa->spa_l2cache.sav_object);
2548 2548 if (error != 0 && error != ENOENT)
2549 2549 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2550 2550 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2551 2551 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2552 2552 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2553 2553 &spa->spa_l2cache.sav_config) != 0)
2554 2554 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2555 2555
2556 2556 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2557 2557 spa_load_l2cache(spa);
2558 2558 spa_config_exit(spa, SCL_ALL, FTAG);
2559 2559 } else if (error == 0) {
2560 2560 spa->spa_l2cache.sav_sync = B_TRUE;
2561 2561 }
2562 2562
2563 2563 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2564 2564
2565 2565 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2566 2566 if (error && error != ENOENT)
2567 2567 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2568 2568
2569 2569 if (error == 0) {
2570 2570 uint64_t autoreplace;
2571 2571
2572 2572 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2573 2573 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2574 2574 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2575 2575 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2576 2576 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2577 2577 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2578 2578 &spa->spa_dedup_ditto);
2579 2579
2580 2580 spa->spa_autoreplace = (autoreplace != 0);
2581 2581 }
2582 2582
2583 2583 /*
2584 2584 * If the 'autoreplace' property is set, then post a resource notifying
2585 2585 * the ZFS DE that it should not issue any faults for unopenable
2586 2586 * devices. We also iterate over the vdevs, and post a sysevent for any
2587 2587 * unopenable vdevs so that the normal autoreplace handler can take
2588 2588 * over.
2589 2589 */
2590 2590 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2591 2591 spa_check_removed(spa->spa_root_vdev);
2592 2592 /*
2593 2593 * For the import case, this is done in spa_import(), because
2594 2594 * at this point we're using the spare definitions from
2595 2595 * the MOS config, not necessarily from the userland config.
2596 2596 */
2597 2597 if (state != SPA_LOAD_IMPORT) {
2598 2598 spa_aux_check_removed(&spa->spa_spares);
2599 2599 spa_aux_check_removed(&spa->spa_l2cache);
2600 2600 }
2601 2601 }
2602 2602
2603 2603 /*
2604 2604 * Load the vdev state for all toplevel vdevs.
2605 2605 */
2606 2606 vdev_load(rvd);
2607 2607
2608 2608 /*
2609 2609 * Propagate the leaf DTLs we just loaded all the way up the tree.
2610 2610 */
2611 2611 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2612 2612 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2613 2613 spa_config_exit(spa, SCL_ALL, FTAG);
2614 2614
2615 2615 /*
2616 2616 * Load the DDTs (dedup tables).
2617 2617 */
2618 2618 error = ddt_load(spa);
2619 2619 if (error != 0)
2620 2620 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2621 2621
2622 2622 spa_update_dspace(spa);
2623 2623
2624 2624 /*
2625 2625 * Validate the config, using the MOS config to fill in any
2626 2626 * information which might be missing. If we fail to validate
2627 2627 * the config then declare the pool unfit for use. If we're
2628 2628 * assembling a pool from a split, the log is not transferred
2629 2629 * over.
2630 2630 */
2631 2631 if (type != SPA_IMPORT_ASSEMBLE) {
2632 2632 nvlist_t *nvconfig;
2633 2633
2634 2634 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2635 2635 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2636 2636
2637 2637 if (!spa_config_valid(spa, nvconfig)) {
2638 2638 nvlist_free(nvconfig);
2639 2639 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2640 2640 ENXIO));
2641 2641 }
2642 2642 nvlist_free(nvconfig);
2643 2643
2644 2644 /*
2645 2645 * Now that we've validated the config, check the state of the
2646 2646 * root vdev. If it can't be opened, it indicates one or
2647 2647 * more toplevel vdevs are faulted.
2648 2648 */
2649 2649 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2650 2650 return (SET_ERROR(ENXIO));
2651 2651
2652 2652 if (spa_check_logs(spa)) {
2653 2653 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2654 2654 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2655 2655 }
2656 2656 }
2657 2657
2658 2658 if (missing_feat_write) {
2659 2659 ASSERT(state == SPA_LOAD_TRYIMPORT);
2660 2660
2661 2661 /*
2662 2662 * At this point, we know that we can open the pool in
2663 2663 * read-only mode but not read-write mode. We now have enough
2664 2664 * information and can return to userland.
2665 2665 */
2666 2666 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2667 2667 }
2668 2668
2669 2669 /*
2670 2670 * We've successfully opened the pool, verify that we're ready
2671 2671 * to start pushing transactions.
2672 2672 */
2673 2673 if (state != SPA_LOAD_TRYIMPORT) {
2674 2674 if (error = spa_load_verify(spa))
2675 2675 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2676 2676 error));
2677 2677 }
2678 2678
2679 2679 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2680 2680 spa->spa_load_max_txg == UINT64_MAX)) {
2681 2681 dmu_tx_t *tx;
2682 2682 int need_update = B_FALSE;
2683 2683
2684 2684 ASSERT(state != SPA_LOAD_TRYIMPORT);
2685 2685
2686 2686 /*
2687 2687 * Claim log blocks that haven't been committed yet.
2688 2688 * This must all happen in a single txg.
2689 2689 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2690 2690 * invoked from zil_claim_log_block()'s i/o done callback.
2691 2691 * Price of rollback is that we abandon the log.
2692 2692 */
2693 2693 spa->spa_claiming = B_TRUE;
2694 2694
2695 2695 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2696 2696 spa_first_txg(spa));
2697 2697 (void) dmu_objset_find(spa_name(spa),
2698 2698 zil_claim, tx, DS_FIND_CHILDREN);
2699 2699 dmu_tx_commit(tx);
2700 2700
2701 2701 spa->spa_claiming = B_FALSE;
2702 2702
2703 2703 spa_set_log_state(spa, SPA_LOG_GOOD);
2704 2704 spa->spa_sync_on = B_TRUE;
2705 2705 txg_sync_start(spa->spa_dsl_pool);
2706 2706
2707 2707 /*
2708 2708 * Wait for all claims to sync. We sync up to the highest
2709 2709 * claimed log block birth time so that claimed log blocks
2710 2710 * don't appear to be from the future. spa_claim_max_txg
2711 2711 * will have been set for us by either zil_check_log_chain()
2712 2712 * (invoked from spa_check_logs()) or zil_claim() above.
2713 2713 */
2714 2714 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2715 2715
2716 2716 /*
2717 2717 * If the config cache is stale, or we have uninitialized
2718 2718 * metaslabs (see spa_vdev_add()), then update the config.
2719 2719 *
2720 2720 * If this is a verbatim import, trust the current
2721 2721 * in-core spa_config and update the disk labels.
2722 2722 */
2723 2723 if (config_cache_txg != spa->spa_config_txg ||
2724 2724 state == SPA_LOAD_IMPORT ||
2725 2725 state == SPA_LOAD_RECOVER ||
2726 2726 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2727 2727 need_update = B_TRUE;
2728 2728
2729 2729 for (int c = 0; c < rvd->vdev_children; c++)
2730 2730 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2731 2731 need_update = B_TRUE;
2732 2732
2733 2733 /*
2734 2734 * Update the config cache asychronously in case we're the
2735 2735 * root pool, in which case the config cache isn't writable yet.
2736 2736 */
2737 2737 if (need_update)
2738 2738 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2739 2739
2740 2740 /*
2741 2741 * Check all DTLs to see if anything needs resilvering.
2742 2742 */
2743 2743 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2744 2744 vdev_resilver_needed(rvd, NULL, NULL))
2745 2745 spa_async_request(spa, SPA_ASYNC_RESILVER);
2746 2746
2747 2747 /*
2748 2748 * Log the fact that we booted up (so that we can detect if
2749 2749 * we rebooted in the middle of an operation).
2750 2750 */
2751 2751 spa_history_log_version(spa, "open");
2752 2752
2753 2753 /*
2754 2754 * Delete any inconsistent datasets.
2755 2755 */
2756 2756 (void) dmu_objset_find(spa_name(spa),
2757 2757 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2758 2758
2759 2759 /*
2760 2760 * Clean up any stale temporary dataset userrefs.
2761 2761 */
2762 2762 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
2763 2763 }
2764 2764
2765 2765 return (0);
2766 2766 }
2767 2767
2768 2768 static int
2769 2769 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2770 2770 {
2771 2771 int mode = spa->spa_mode;
2772 2772
2773 2773 spa_unload(spa);
2774 2774 spa_deactivate(spa);
2775 2775
2776 2776 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
2777 2777
2778 2778 spa_activate(spa, mode);
2779 2779 spa_async_suspend(spa);
2780 2780
2781 2781 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2782 2782 }
2783 2783
2784 2784 /*
2785 2785 * If spa_load() fails this function will try loading prior txg's. If
2786 2786 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2787 2787 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2788 2788 * function will not rewind the pool and will return the same error as
2789 2789 * spa_load().
2790 2790 */
2791 2791 static int
2792 2792 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2793 2793 uint64_t max_request, int rewind_flags)
2794 2794 {
2795 2795 nvlist_t *loadinfo = NULL;
2796 2796 nvlist_t *config = NULL;
2797 2797 int load_error, rewind_error;
2798 2798 uint64_t safe_rewind_txg;
2799 2799 uint64_t min_txg;
2800 2800
2801 2801 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2802 2802 spa->spa_load_max_txg = spa->spa_load_txg;
2803 2803 spa_set_log_state(spa, SPA_LOG_CLEAR);
2804 2804 } else {
2805 2805 spa->spa_load_max_txg = max_request;
2806 2806 if (max_request != UINT64_MAX)
2807 2807 spa->spa_extreme_rewind = B_TRUE;
2808 2808 }
2809 2809
2810 2810 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2811 2811 mosconfig);
2812 2812 if (load_error == 0)
2813 2813 return (0);
2814 2814
2815 2815 if (spa->spa_root_vdev != NULL)
2816 2816 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2817 2817
2818 2818 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2819 2819 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2820 2820
2821 2821 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2822 2822 nvlist_free(config);
2823 2823 return (load_error);
2824 2824 }
2825 2825
2826 2826 if (state == SPA_LOAD_RECOVER) {
2827 2827 /* Price of rolling back is discarding txgs, including log */
2828 2828 spa_set_log_state(spa, SPA_LOG_CLEAR);
2829 2829 } else {
2830 2830 /*
2831 2831 * If we aren't rolling back save the load info from our first
2832 2832 * import attempt so that we can restore it after attempting
2833 2833 * to rewind.
2834 2834 */
2835 2835 loadinfo = spa->spa_load_info;
2836 2836 spa->spa_load_info = fnvlist_alloc();
2837 2837 }
2838 2838
2839 2839 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2840 2840 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2841 2841 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2842 2842 TXG_INITIAL : safe_rewind_txg;
2843 2843
2844 2844 /*
2845 2845 * Continue as long as we're finding errors, we're still within
2846 2846 * the acceptable rewind range, and we're still finding uberblocks
2847 2847 */
2848 2848 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2849 2849 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2850 2850 if (spa->spa_load_max_txg < safe_rewind_txg)
2851 2851 spa->spa_extreme_rewind = B_TRUE;
2852 2852 rewind_error = spa_load_retry(spa, state, mosconfig);
2853 2853 }
2854 2854
2855 2855 spa->spa_extreme_rewind = B_FALSE;
2856 2856 spa->spa_load_max_txg = UINT64_MAX;
2857 2857
2858 2858 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2859 2859 spa_config_set(spa, config);
2860 2860
2861 2861 if (state == SPA_LOAD_RECOVER) {
2862 2862 ASSERT3P(loadinfo, ==, NULL);
2863 2863 return (rewind_error);
2864 2864 } else {
2865 2865 /* Store the rewind info as part of the initial load info */
2866 2866 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2867 2867 spa->spa_load_info);
2868 2868
2869 2869 /* Restore the initial load info */
2870 2870 fnvlist_free(spa->spa_load_info);
2871 2871 spa->spa_load_info = loadinfo;
2872 2872
2873 2873 return (load_error);
2874 2874 }
2875 2875 }
2876 2876
2877 2877 /*
2878 2878 * Pool Open/Import
2879 2879 *
2880 2880 * The import case is identical to an open except that the configuration is sent
2881 2881 * down from userland, instead of grabbed from the configuration cache. For the
2882 2882 * case of an open, the pool configuration will exist in the
2883 2883 * POOL_STATE_UNINITIALIZED state.
2884 2884 *
2885 2885 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2886 2886 * the same time open the pool, without having to keep around the spa_t in some
2887 2887 * ambiguous state.
2888 2888 */
2889 2889 static int
2890 2890 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2891 2891 nvlist_t **config)
2892 2892 {
2893 2893 spa_t *spa;
2894 2894 spa_load_state_t state = SPA_LOAD_OPEN;
2895 2895 int error;
2896 2896 int locked = B_FALSE;
2897 2897
2898 2898 *spapp = NULL;
2899 2899
2900 2900 /*
2901 2901 * As disgusting as this is, we need to support recursive calls to this
2902 2902 * function because dsl_dir_open() is called during spa_load(), and ends
2903 2903 * up calling spa_open() again. The real fix is to figure out how to
2904 2904 * avoid dsl_dir_open() calling this in the first place.
2905 2905 */
2906 2906 if (mutex_owner(&spa_namespace_lock) != curthread) {
2907 2907 mutex_enter(&spa_namespace_lock);
2908 2908 locked = B_TRUE;
2909 2909 }
2910 2910
2911 2911 if ((spa = spa_lookup(pool)) == NULL) {
2912 2912 if (locked)
2913 2913 mutex_exit(&spa_namespace_lock);
2914 2914 return (SET_ERROR(ENOENT));
2915 2915 }
2916 2916
2917 2917 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
2918 2918 zpool_rewind_policy_t policy;
2919 2919
2920 2920 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2921 2921 &policy);
2922 2922 if (policy.zrp_request & ZPOOL_DO_REWIND)
2923 2923 state = SPA_LOAD_RECOVER;
2924 2924
2925 2925 spa_activate(spa, spa_mode_global);
2926 2926
2927 2927 if (state != SPA_LOAD_RECOVER)
2928 2928 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2929 2929
2930 2930 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2931 2931 policy.zrp_request);
2932 2932
2933 2933 if (error == EBADF) {
2934 2934 /*
2935 2935 * If vdev_validate() returns failure (indicated by
2936 2936 * EBADF), it indicates that one of the vdevs indicates
2937 2937 * that the pool has been exported or destroyed. If
2938 2938 * this is the case, the config cache is out of sync and
2939 2939 * we should remove the pool from the namespace.
2940 2940 */
2941 2941 spa_unload(spa);
2942 2942 spa_deactivate(spa);
2943 2943 spa_config_sync(spa, B_TRUE, B_TRUE);
2944 2944 spa_remove(spa);
2945 2945 if (locked)
2946 2946 mutex_exit(&spa_namespace_lock);
2947 2947 return (SET_ERROR(ENOENT));
2948 2948 }
2949 2949
2950 2950 if (error) {
2951 2951 /*
2952 2952 * We can't open the pool, but we still have useful
2953 2953 * information: the state of each vdev after the
2954 2954 * attempted vdev_open(). Return this to the user.
2955 2955 */
2956 2956 if (config != NULL && spa->spa_config) {
2957 2957 VERIFY(nvlist_dup(spa->spa_config, config,
2958 2958 KM_SLEEP) == 0);
2959 2959 VERIFY(nvlist_add_nvlist(*config,
2960 2960 ZPOOL_CONFIG_LOAD_INFO,
2961 2961 spa->spa_load_info) == 0);
2962 2962 }
2963 2963 spa_unload(spa);
2964 2964 spa_deactivate(spa);
2965 2965 spa->spa_last_open_failed = error;
2966 2966 if (locked)
2967 2967 mutex_exit(&spa_namespace_lock);
2968 2968 *spapp = NULL;
2969 2969 return (error);
2970 2970 }
2971 2971 }
2972 2972
2973 2973 spa_open_ref(spa, tag);
2974 2974
2975 2975 if (config != NULL)
2976 2976 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2977 2977
2978 2978 /*
2979 2979 * If we've recovered the pool, pass back any information we
2980 2980 * gathered while doing the load.
2981 2981 */
2982 2982 if (state == SPA_LOAD_RECOVER) {
2983 2983 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2984 2984 spa->spa_load_info) == 0);
2985 2985 }
2986 2986
2987 2987 if (locked) {
2988 2988 spa->spa_last_open_failed = 0;
2989 2989 spa->spa_last_ubsync_txg = 0;
2990 2990 spa->spa_load_txg = 0;
2991 2991 mutex_exit(&spa_namespace_lock);
2992 2992 }
2993 2993
2994 2994 *spapp = spa;
2995 2995
2996 2996 return (0);
2997 2997 }
2998 2998
2999 2999 int
3000 3000 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3001 3001 nvlist_t **config)
3002 3002 {
3003 3003 return (spa_open_common(name, spapp, tag, policy, config));
3004 3004 }
3005 3005
3006 3006 int
3007 3007 spa_open(const char *name, spa_t **spapp, void *tag)
3008 3008 {
3009 3009 return (spa_open_common(name, spapp, tag, NULL, NULL));
3010 3010 }
3011 3011
3012 3012 /*
3013 3013 * Lookup the given spa_t, incrementing the inject count in the process,
3014 3014 * preventing it from being exported or destroyed.
3015 3015 */
3016 3016 spa_t *
3017 3017 spa_inject_addref(char *name)
3018 3018 {
3019 3019 spa_t *spa;
3020 3020
3021 3021 mutex_enter(&spa_namespace_lock);
3022 3022 if ((spa = spa_lookup(name)) == NULL) {
3023 3023 mutex_exit(&spa_namespace_lock);
3024 3024 return (NULL);
3025 3025 }
3026 3026 spa->spa_inject_ref++;
3027 3027 mutex_exit(&spa_namespace_lock);
3028 3028
3029 3029 return (spa);
3030 3030 }
3031 3031
3032 3032 void
3033 3033 spa_inject_delref(spa_t *spa)
3034 3034 {
3035 3035 mutex_enter(&spa_namespace_lock);
3036 3036 spa->spa_inject_ref--;
3037 3037 mutex_exit(&spa_namespace_lock);
3038 3038 }
3039 3039
3040 3040 /*
3041 3041 * Add spares device information to the nvlist.
3042 3042 */
3043 3043 static void
3044 3044 spa_add_spares(spa_t *spa, nvlist_t *config)
3045 3045 {
3046 3046 nvlist_t **spares;
3047 3047 uint_t i, nspares;
3048 3048 nvlist_t *nvroot;
3049 3049 uint64_t guid;
3050 3050 vdev_stat_t *vs;
3051 3051 uint_t vsc;
3052 3052 uint64_t pool;
3053 3053
3054 3054 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3055 3055
3056 3056 if (spa->spa_spares.sav_count == 0)
3057 3057 return;
3058 3058
3059 3059 VERIFY(nvlist_lookup_nvlist(config,
3060 3060 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3061 3061 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3062 3062 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3063 3063 if (nspares != 0) {
3064 3064 VERIFY(nvlist_add_nvlist_array(nvroot,
3065 3065 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3066 3066 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3067 3067 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3068 3068
3069 3069 /*
3070 3070 * Go through and find any spares which have since been
3071 3071 * repurposed as an active spare. If this is the case, update
3072 3072 * their status appropriately.
3073 3073 */
3074 3074 for (i = 0; i < nspares; i++) {
3075 3075 VERIFY(nvlist_lookup_uint64(spares[i],
3076 3076 ZPOOL_CONFIG_GUID, &guid) == 0);
3077 3077 if (spa_spare_exists(guid, &pool, NULL) &&
3078 3078 pool != 0ULL) {
3079 3079 VERIFY(nvlist_lookup_uint64_array(
3080 3080 spares[i], ZPOOL_CONFIG_VDEV_STATS,
3081 3081 (uint64_t **)&vs, &vsc) == 0);
3082 3082 vs->vs_state = VDEV_STATE_CANT_OPEN;
3083 3083 vs->vs_aux = VDEV_AUX_SPARED;
3084 3084 }
3085 3085 }
3086 3086 }
3087 3087 }
3088 3088
3089 3089 /*
3090 3090 * Add l2cache device information to the nvlist, including vdev stats.
3091 3091 */
3092 3092 static void
3093 3093 spa_add_l2cache(spa_t *spa, nvlist_t *config)
3094 3094 {
3095 3095 nvlist_t **l2cache;
3096 3096 uint_t i, j, nl2cache;
3097 3097 nvlist_t *nvroot;
3098 3098 uint64_t guid;
3099 3099 vdev_t *vd;
3100 3100 vdev_stat_t *vs;
3101 3101 uint_t vsc;
3102 3102
3103 3103 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3104 3104
3105 3105 if (spa->spa_l2cache.sav_count == 0)
3106 3106 return;
3107 3107
3108 3108 VERIFY(nvlist_lookup_nvlist(config,
3109 3109 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3110 3110 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3111 3111 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3112 3112 if (nl2cache != 0) {
3113 3113 VERIFY(nvlist_add_nvlist_array(nvroot,
3114 3114 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3115 3115 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3116 3116 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3117 3117
3118 3118 /*
3119 3119 * Update level 2 cache device stats.
3120 3120 */
3121 3121
3122 3122 for (i = 0; i < nl2cache; i++) {
3123 3123 VERIFY(nvlist_lookup_uint64(l2cache[i],
3124 3124 ZPOOL_CONFIG_GUID, &guid) == 0);
3125 3125
3126 3126 vd = NULL;
3127 3127 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3128 3128 if (guid ==
3129 3129 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3130 3130 vd = spa->spa_l2cache.sav_vdevs[j];
3131 3131 break;
3132 3132 }
3133 3133 }
3134 3134 ASSERT(vd != NULL);
3135 3135
3136 3136 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3137 3137 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3138 3138 == 0);
3139 3139 vdev_get_stats(vd, vs);
3140 3140 }
3141 3141 }
3142 3142 }
3143 3143
3144 3144 static void
3145 3145 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3146 3146 {
3147 3147 nvlist_t *features;
3148 3148 zap_cursor_t zc;
3149 3149 zap_attribute_t za;
3150 3150
3151 3151 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3152 3152 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3153 3153
3154 3154 if (spa->spa_feat_for_read_obj != 0) {
3155 3155 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3156 3156 spa->spa_feat_for_read_obj);
3157 3157 zap_cursor_retrieve(&zc, &za) == 0;
3158 3158 zap_cursor_advance(&zc)) {
3159 3159 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3160 3160 za.za_num_integers == 1);
3161 3161 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3162 3162 za.za_first_integer));
3163 3163 }
3164 3164 zap_cursor_fini(&zc);
3165 3165 }
3166 3166
3167 3167 if (spa->spa_feat_for_write_obj != 0) {
3168 3168 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3169 3169 spa->spa_feat_for_write_obj);
3170 3170 zap_cursor_retrieve(&zc, &za) == 0;
3171 3171 zap_cursor_advance(&zc)) {
3172 3172 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3173 3173 za.za_num_integers == 1);
3174 3174 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3175 3175 za.za_first_integer));
3176 3176 }
3177 3177 zap_cursor_fini(&zc);
3178 3178 }
3179 3179
3180 3180 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3181 3181 features) == 0);
3182 3182 nvlist_free(features);
3183 3183 }
3184 3184
3185 3185 int
3186 3186 spa_get_stats(const char *name, nvlist_t **config,
3187 3187 char *altroot, size_t buflen)
3188 3188 {
3189 3189 int error;
3190 3190 spa_t *spa;
3191 3191
3192 3192 *config = NULL;
3193 3193 error = spa_open_common(name, &spa, FTAG, NULL, config);
3194 3194
3195 3195 if (spa != NULL) {
3196 3196 /*
3197 3197 * This still leaves a window of inconsistency where the spares
3198 3198 * or l2cache devices could change and the config would be
3199 3199 * self-inconsistent.
3200 3200 */
3201 3201 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3202 3202
3203 3203 if (*config != NULL) {
3204 3204 uint64_t loadtimes[2];
3205 3205
3206 3206 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3207 3207 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3208 3208 VERIFY(nvlist_add_uint64_array(*config,
3209 3209 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3210 3210
3211 3211 VERIFY(nvlist_add_uint64(*config,
3212 3212 ZPOOL_CONFIG_ERRCOUNT,
3213 3213 spa_get_errlog_size(spa)) == 0);
3214 3214
3215 3215 if (spa_suspended(spa))
3216 3216 VERIFY(nvlist_add_uint64(*config,
3217 3217 ZPOOL_CONFIG_SUSPENDED,
3218 3218 spa->spa_failmode) == 0);
3219 3219
3220 3220 spa_add_spares(spa, *config);
3221 3221 spa_add_l2cache(spa, *config);
3222 3222 spa_add_feature_stats(spa, *config);
3223 3223 }
3224 3224 }
3225 3225
3226 3226 /*
3227 3227 * We want to get the alternate root even for faulted pools, so we cheat
3228 3228 * and call spa_lookup() directly.
3229 3229 */
3230 3230 if (altroot) {
3231 3231 if (spa == NULL) {
3232 3232 mutex_enter(&spa_namespace_lock);
3233 3233 spa = spa_lookup(name);
3234 3234 if (spa)
3235 3235 spa_altroot(spa, altroot, buflen);
3236 3236 else
3237 3237 altroot[0] = '\0';
3238 3238 spa = NULL;
3239 3239 mutex_exit(&spa_namespace_lock);
3240 3240 } else {
3241 3241 spa_altroot(spa, altroot, buflen);
3242 3242 }
3243 3243 }
3244 3244
3245 3245 if (spa != NULL) {
3246 3246 spa_config_exit(spa, SCL_CONFIG, FTAG);
3247 3247 spa_close(spa, FTAG);
3248 3248 }
3249 3249
3250 3250 return (error);
3251 3251 }
3252 3252
3253 3253 /*
3254 3254 * Validate that the auxiliary device array is well formed. We must have an
3255 3255 * array of nvlists, each which describes a valid leaf vdev. If this is an
3256 3256 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3257 3257 * specified, as long as they are well-formed.
3258 3258 */
3259 3259 static int
3260 3260 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3261 3261 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3262 3262 vdev_labeltype_t label)
3263 3263 {
3264 3264 nvlist_t **dev;
3265 3265 uint_t i, ndev;
3266 3266 vdev_t *vd;
3267 3267 int error;
3268 3268
3269 3269 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3270 3270
3271 3271 /*
3272 3272 * It's acceptable to have no devs specified.
3273 3273 */
3274 3274 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3275 3275 return (0);
3276 3276
3277 3277 if (ndev == 0)
3278 3278 return (SET_ERROR(EINVAL));
3279 3279
3280 3280 /*
3281 3281 * Make sure the pool is formatted with a version that supports this
3282 3282 * device type.
3283 3283 */
3284 3284 if (spa_version(spa) < version)
3285 3285 return (SET_ERROR(ENOTSUP));
3286 3286
3287 3287 /*
3288 3288 * Set the pending device list so we correctly handle device in-use
3289 3289 * checking.
3290 3290 */
3291 3291 sav->sav_pending = dev;
3292 3292 sav->sav_npending = ndev;
3293 3293
3294 3294 for (i = 0; i < ndev; i++) {
3295 3295 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3296 3296 mode)) != 0)
3297 3297 goto out;
3298 3298
3299 3299 if (!vd->vdev_ops->vdev_op_leaf) {
3300 3300 vdev_free(vd);
3301 3301 error = SET_ERROR(EINVAL);
3302 3302 goto out;
3303 3303 }
3304 3304
3305 3305 /*
3306 3306 * The L2ARC currently only supports disk devices in
3307 3307 * kernel context. For user-level testing, we allow it.
3308 3308 */
3309 3309 #ifdef _KERNEL
3310 3310 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3311 3311 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3312 3312 error = SET_ERROR(ENOTBLK);
3313 3313 vdev_free(vd);
3314 3314 goto out;
3315 3315 }
3316 3316 #endif
3317 3317 vd->vdev_top = vd;
3318 3318
3319 3319 if ((error = vdev_open(vd)) == 0 &&
3320 3320 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3321 3321 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3322 3322 vd->vdev_guid) == 0);
3323 3323 }
3324 3324
3325 3325 vdev_free(vd);
3326 3326
3327 3327 if (error &&
3328 3328 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3329 3329 goto out;
3330 3330 else
3331 3331 error = 0;
3332 3332 }
3333 3333
3334 3334 out:
3335 3335 sav->sav_pending = NULL;
3336 3336 sav->sav_npending = 0;
3337 3337 return (error);
3338 3338 }
3339 3339
3340 3340 static int
3341 3341 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3342 3342 {
3343 3343 int error;
3344 3344
3345 3345 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3346 3346
3347 3347 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3348 3348 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3349 3349 VDEV_LABEL_SPARE)) != 0) {
3350 3350 return (error);
3351 3351 }
3352 3352
3353 3353 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3354 3354 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3355 3355 VDEV_LABEL_L2CACHE));
3356 3356 }
3357 3357
3358 3358 static void
3359 3359 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3360 3360 const char *config)
3361 3361 {
3362 3362 int i;
3363 3363
3364 3364 if (sav->sav_config != NULL) {
3365 3365 nvlist_t **olddevs;
3366 3366 uint_t oldndevs;
3367 3367 nvlist_t **newdevs;
3368 3368
3369 3369 /*
3370 3370 * Generate new dev list by concatentating with the
3371 3371 * current dev list.
3372 3372 */
3373 3373 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3374 3374 &olddevs, &oldndevs) == 0);
3375 3375
3376 3376 newdevs = kmem_alloc(sizeof (void *) *
3377 3377 (ndevs + oldndevs), KM_SLEEP);
3378 3378 for (i = 0; i < oldndevs; i++)
3379 3379 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3380 3380 KM_SLEEP) == 0);
3381 3381 for (i = 0; i < ndevs; i++)
3382 3382 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3383 3383 KM_SLEEP) == 0);
3384 3384
3385 3385 VERIFY(nvlist_remove(sav->sav_config, config,
3386 3386 DATA_TYPE_NVLIST_ARRAY) == 0);
3387 3387
3388 3388 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3389 3389 config, newdevs, ndevs + oldndevs) == 0);
3390 3390 for (i = 0; i < oldndevs + ndevs; i++)
3391 3391 nvlist_free(newdevs[i]);
3392 3392 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3393 3393 } else {
3394 3394 /*
3395 3395 * Generate a new dev list.
3396 3396 */
3397 3397 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3398 3398 KM_SLEEP) == 0);
3399 3399 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3400 3400 devs, ndevs) == 0);
3401 3401 }
3402 3402 }
3403 3403
3404 3404 /*
3405 3405 * Stop and drop level 2 ARC devices
3406 3406 */
3407 3407 void
3408 3408 spa_l2cache_drop(spa_t *spa)
3409 3409 {
3410 3410 vdev_t *vd;
3411 3411 int i;
3412 3412 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3413 3413
3414 3414 for (i = 0; i < sav->sav_count; i++) {
3415 3415 uint64_t pool;
3416 3416
3417 3417 vd = sav->sav_vdevs[i];
3418 3418 ASSERT(vd != NULL);
3419 3419
3420 3420 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3421 3421 pool != 0ULL && l2arc_vdev_present(vd))
3422 3422 l2arc_remove_vdev(vd);
3423 3423 }
3424 3424 }
3425 3425
3426 3426 /*
3427 3427 * Pool Creation
3428 3428 */
3429 3429 int
3430 3430 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3431 3431 nvlist_t *zplprops)
3432 3432 {
3433 3433 spa_t *spa;
3434 3434 char *altroot = NULL;
3435 3435 vdev_t *rvd;
3436 3436 dsl_pool_t *dp;
3437 3437 dmu_tx_t *tx;
3438 3438 int error = 0;
3439 3439 uint64_t txg = TXG_INITIAL;
3440 3440 nvlist_t **spares, **l2cache;
3441 3441 uint_t nspares, nl2cache;
3442 3442 uint64_t version, obj;
3443 3443 boolean_t has_features;
3444 3444
3445 3445 /*
3446 3446 * If this pool already exists, return failure.
3447 3447 */
3448 3448 mutex_enter(&spa_namespace_lock);
3449 3449 if (spa_lookup(pool) != NULL) {
3450 3450 mutex_exit(&spa_namespace_lock);
3451 3451 return (SET_ERROR(EEXIST));
3452 3452 }
3453 3453
3454 3454 /*
3455 3455 * Allocate a new spa_t structure.
3456 3456 */
3457 3457 (void) nvlist_lookup_string(props,
3458 3458 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3459 3459 spa = spa_add(pool, NULL, altroot);
3460 3460 spa_activate(spa, spa_mode_global);
3461 3461
3462 3462 if (props && (error = spa_prop_validate(spa, props))) {
3463 3463 spa_deactivate(spa);
3464 3464 spa_remove(spa);
3465 3465 mutex_exit(&spa_namespace_lock);
3466 3466 return (error);
3467 3467 }
3468 3468
3469 3469 has_features = B_FALSE;
3470 3470 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3471 3471 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3472 3472 if (zpool_prop_feature(nvpair_name(elem)))
3473 3473 has_features = B_TRUE;
3474 3474 }
3475 3475
3476 3476 if (has_features || nvlist_lookup_uint64(props,
3477 3477 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3478 3478 version = SPA_VERSION;
3479 3479 }
3480 3480 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3481 3481
3482 3482 spa->spa_first_txg = txg;
3483 3483 spa->spa_uberblock.ub_txg = txg - 1;
3484 3484 spa->spa_uberblock.ub_version = version;
3485 3485 spa->spa_ubsync = spa->spa_uberblock;
3486 3486
3487 3487 /*
3488 3488 * Create "The Godfather" zio to hold all async IOs
3489 3489 */
3490 3490 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
3491 3491 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
3492 3492
3493 3493 /*
3494 3494 * Create the root vdev.
3495 3495 */
3496 3496 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3497 3497
3498 3498 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3499 3499
3500 3500 ASSERT(error != 0 || rvd != NULL);
3501 3501 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3502 3502
3503 3503 if (error == 0 && !zfs_allocatable_devs(nvroot))
3504 3504 error = SET_ERROR(EINVAL);
3505 3505
3506 3506 if (error == 0 &&
3507 3507 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3508 3508 (error = spa_validate_aux(spa, nvroot, txg,
3509 3509 VDEV_ALLOC_ADD)) == 0) {
3510 3510 for (int c = 0; c < rvd->vdev_children; c++) {
3511 3511 vdev_metaslab_set_size(rvd->vdev_child[c]);
3512 3512 vdev_expand(rvd->vdev_child[c], txg);
3513 3513 }
3514 3514 }
3515 3515
3516 3516 spa_config_exit(spa, SCL_ALL, FTAG);
3517 3517
3518 3518 if (error != 0) {
3519 3519 spa_unload(spa);
3520 3520 spa_deactivate(spa);
3521 3521 spa_remove(spa);
3522 3522 mutex_exit(&spa_namespace_lock);
3523 3523 return (error);
3524 3524 }
3525 3525
3526 3526 /*
3527 3527 * Get the list of spares, if specified.
3528 3528 */
3529 3529 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3530 3530 &spares, &nspares) == 0) {
3531 3531 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3532 3532 KM_SLEEP) == 0);
3533 3533 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3534 3534 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3535 3535 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3536 3536 spa_load_spares(spa);
3537 3537 spa_config_exit(spa, SCL_ALL, FTAG);
3538 3538 spa->spa_spares.sav_sync = B_TRUE;
3539 3539 }
3540 3540
3541 3541 /*
3542 3542 * Get the list of level 2 cache devices, if specified.
3543 3543 */
3544 3544 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3545 3545 &l2cache, &nl2cache) == 0) {
3546 3546 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3547 3547 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3548 3548 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3549 3549 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3550 3550 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3551 3551 spa_load_l2cache(spa);
3552 3552 spa_config_exit(spa, SCL_ALL, FTAG);
3553 3553 spa->spa_l2cache.sav_sync = B_TRUE;
3554 3554 }
3555 3555
3556 3556 spa->spa_is_initializing = B_TRUE;
3557 3557 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3558 3558 spa->spa_meta_objset = dp->dp_meta_objset;
3559 3559 spa->spa_is_initializing = B_FALSE;
3560 3560
3561 3561 /*
3562 3562 * Create DDTs (dedup tables).
3563 3563 */
3564 3564 ddt_create(spa);
3565 3565
3566 3566 spa_update_dspace(spa);
3567 3567
3568 3568 tx = dmu_tx_create_assigned(dp, txg);
3569 3569
3570 3570 /*
3571 3571 * Create the pool config object.
3572 3572 */
3573 3573 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3574 3574 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3575 3575 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3576 3576
3577 3577 if (zap_add(spa->spa_meta_objset,
3578 3578 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3579 3579 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3580 3580 cmn_err(CE_PANIC, "failed to add pool config");
3581 3581 }
3582 3582
3583 3583 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3584 3584 spa_feature_create_zap_objects(spa, tx);
3585 3585
3586 3586 if (zap_add(spa->spa_meta_objset,
3587 3587 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3588 3588 sizeof (uint64_t), 1, &version, tx) != 0) {
3589 3589 cmn_err(CE_PANIC, "failed to add pool version");
3590 3590 }
3591 3591
3592 3592 /* Newly created pools with the right version are always deflated. */
3593 3593 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3594 3594 spa->spa_deflate = TRUE;
3595 3595 if (zap_add(spa->spa_meta_objset,
3596 3596 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3597 3597 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3598 3598 cmn_err(CE_PANIC, "failed to add deflate");
3599 3599 }
3600 3600 }
3601 3601
3602 3602 /*
3603 3603 * Create the deferred-free bpobj. Turn off compression
3604 3604 * because sync-to-convergence takes longer if the blocksize
3605 3605 * keeps changing.
3606 3606 */
3607 3607 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3608 3608 dmu_object_set_compress(spa->spa_meta_objset, obj,
3609 3609 ZIO_COMPRESS_OFF, tx);
3610 3610 if (zap_add(spa->spa_meta_objset,
3611 3611 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3612 3612 sizeof (uint64_t), 1, &obj, tx) != 0) {
3613 3613 cmn_err(CE_PANIC, "failed to add bpobj");
3614 3614 }
3615 3615 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3616 3616 spa->spa_meta_objset, obj));
3617 3617
3618 3618 /*
3619 3619 * Create the pool's history object.
3620 3620 */
3621 3621 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3622 3622 spa_history_create_obj(spa, tx);
3623 3623
3624 3624 /*
3625 3625 * Set pool properties.
3626 3626 */
3627 3627 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3628 3628 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3629 3629 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3630 3630 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3631 3631
3632 3632 if (props != NULL) {
3633 3633 spa_configfile_set(spa, props, B_FALSE);
3634 3634 spa_sync_props(props, tx);
3635 3635 }
3636 3636
3637 3637 dmu_tx_commit(tx);
3638 3638
3639 3639 spa->spa_sync_on = B_TRUE;
3640 3640 txg_sync_start(spa->spa_dsl_pool);
3641 3641
3642 3642 /*
3643 3643 * We explicitly wait for the first transaction to complete so that our
3644 3644 * bean counters are appropriately updated.
3645 3645 */
3646 3646 txg_wait_synced(spa->spa_dsl_pool, txg);
3647 3647
3648 3648 spa_config_sync(spa, B_FALSE, B_TRUE);
3649 3649
3650 3650 spa_history_log_version(spa, "create");
3651 3651
3652 3652 spa->spa_minref = refcount_count(&spa->spa_refcount);
3653 3653
3654 3654 mutex_exit(&spa_namespace_lock);
3655 3655
3656 3656 return (0);
3657 3657 }
3658 3658
3659 3659 #ifdef _KERNEL
3660 3660 /*
3661 3661 * Get the root pool information from the root disk, then import the root pool
3662 3662 * during the system boot up time.
3663 3663 */
3664 3664 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3665 3665
3666 3666 static nvlist_t *
3667 3667 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3668 3668 {
3669 3669 nvlist_t *config;
3670 3670 nvlist_t *nvtop, *nvroot;
3671 3671 uint64_t pgid;
3672 3672
3673 3673 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3674 3674 return (NULL);
3675 3675
3676 3676 /*
3677 3677 * Add this top-level vdev to the child array.
3678 3678 */
3679 3679 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3680 3680 &nvtop) == 0);
3681 3681 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3682 3682 &pgid) == 0);
3683 3683 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3684 3684
3685 3685 /*
3686 3686 * Put this pool's top-level vdevs into a root vdev.
3687 3687 */
3688 3688 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3689 3689 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3690 3690 VDEV_TYPE_ROOT) == 0);
3691 3691 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3692 3692 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3693 3693 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3694 3694 &nvtop, 1) == 0);
3695 3695
3696 3696 /*
3697 3697 * Replace the existing vdev_tree with the new root vdev in
3698 3698 * this pool's configuration (remove the old, add the new).
3699 3699 */
3700 3700 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3701 3701 nvlist_free(nvroot);
3702 3702 return (config);
3703 3703 }
3704 3704
3705 3705 /*
3706 3706 * Walk the vdev tree and see if we can find a device with "better"
3707 3707 * configuration. A configuration is "better" if the label on that
3708 3708 * device has a more recent txg.
3709 3709 */
3710 3710 static void
3711 3711 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3712 3712 {
3713 3713 for (int c = 0; c < vd->vdev_children; c++)
3714 3714 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3715 3715
3716 3716 if (vd->vdev_ops->vdev_op_leaf) {
3717 3717 nvlist_t *label;
3718 3718 uint64_t label_txg;
3719 3719
3720 3720 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3721 3721 &label) != 0)
3722 3722 return;
3723 3723
3724 3724 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3725 3725 &label_txg) == 0);
3726 3726
3727 3727 /*
3728 3728 * Do we have a better boot device?
3729 3729 */
3730 3730 if (label_txg > *txg) {
3731 3731 *txg = label_txg;
3732 3732 *avd = vd;
3733 3733 }
3734 3734 nvlist_free(label);
3735 3735 }
3736 3736 }
3737 3737
3738 3738 /*
3739 3739 * Import a root pool.
3740 3740 *
3741 3741 * For x86. devpath_list will consist of devid and/or physpath name of
3742 3742 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3743 3743 * The GRUB "findroot" command will return the vdev we should boot.
3744 3744 *
3745 3745 * For Sparc, devpath_list consists the physpath name of the booting device
3746 3746 * no matter the rootpool is a single device pool or a mirrored pool.
3747 3747 * e.g.
3748 3748 * "/pci@1f,0/ide@d/disk@0,0:a"
3749 3749 */
3750 3750 int
3751 3751 spa_import_rootpool(char *devpath, char *devid)
3752 3752 {
3753 3753 spa_t *spa;
3754 3754 vdev_t *rvd, *bvd, *avd = NULL;
3755 3755 nvlist_t *config, *nvtop;
3756 3756 uint64_t guid, txg;
3757 3757 char *pname;
3758 3758 int error;
3759 3759
3760 3760 /*
3761 3761 * Read the label from the boot device and generate a configuration.
3762 3762 */
3763 3763 config = spa_generate_rootconf(devpath, devid, &guid);
3764 3764 #if defined(_OBP) && defined(_KERNEL)
3765 3765 if (config == NULL) {
3766 3766 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3767 3767 /* iscsi boot */
3768 3768 get_iscsi_bootpath_phy(devpath);
3769 3769 config = spa_generate_rootconf(devpath, devid, &guid);
3770 3770 }
3771 3771 }
3772 3772 #endif
3773 3773 if (config == NULL) {
3774 3774 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
3775 3775 devpath);
3776 3776 return (SET_ERROR(EIO));
3777 3777 }
3778 3778
3779 3779 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3780 3780 &pname) == 0);
3781 3781 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3782 3782
3783 3783 mutex_enter(&spa_namespace_lock);
3784 3784 if ((spa = spa_lookup(pname)) != NULL) {
3785 3785 /*
3786 3786 * Remove the existing root pool from the namespace so that we
3787 3787 * can replace it with the correct config we just read in.
3788 3788 */
3789 3789 spa_remove(spa);
3790 3790 }
3791 3791
3792 3792 spa = spa_add(pname, config, NULL);
3793 3793 spa->spa_is_root = B_TRUE;
3794 3794 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
3795 3795
3796 3796 /*
3797 3797 * Build up a vdev tree based on the boot device's label config.
3798 3798 */
3799 3799 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3800 3800 &nvtop) == 0);
3801 3801 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3802 3802 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3803 3803 VDEV_ALLOC_ROOTPOOL);
3804 3804 spa_config_exit(spa, SCL_ALL, FTAG);
3805 3805 if (error) {
3806 3806 mutex_exit(&spa_namespace_lock);
3807 3807 nvlist_free(config);
3808 3808 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3809 3809 pname);
3810 3810 return (error);
3811 3811 }
3812 3812
3813 3813 /*
3814 3814 * Get the boot vdev.
3815 3815 */
3816 3816 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3817 3817 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3818 3818 (u_longlong_t)guid);
3819 3819 error = SET_ERROR(ENOENT);
3820 3820 goto out;
3821 3821 }
3822 3822
3823 3823 /*
3824 3824 * Determine if there is a better boot device.
3825 3825 */
3826 3826 avd = bvd;
3827 3827 spa_alt_rootvdev(rvd, &avd, &txg);
3828 3828 if (avd != bvd) {
3829 3829 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3830 3830 "try booting from '%s'", avd->vdev_path);
3831 3831 error = SET_ERROR(EINVAL);
3832 3832 goto out;
3833 3833 }
3834 3834
3835 3835 /*
3836 3836 * If the boot device is part of a spare vdev then ensure that
3837 3837 * we're booting off the active spare.
3838 3838 */
3839 3839 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3840 3840 !bvd->vdev_isspare) {
3841 3841 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3842 3842 "try booting from '%s'",
3843 3843 bvd->vdev_parent->
3844 3844 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
3845 3845 error = SET_ERROR(EINVAL);
3846 3846 goto out;
3847 3847 }
3848 3848
3849 3849 error = 0;
3850 3850 out:
3851 3851 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3852 3852 vdev_free(rvd);
3853 3853 spa_config_exit(spa, SCL_ALL, FTAG);
3854 3854 mutex_exit(&spa_namespace_lock);
3855 3855
3856 3856 nvlist_free(config);
3857 3857 return (error);
3858 3858 }
3859 3859
3860 3860 #endif
3861 3861
3862 3862 /*
3863 3863 * Import a non-root pool into the system.
3864 3864 */
3865 3865 int
3866 3866 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
3867 3867 {
3868 3868 spa_t *spa;
3869 3869 char *altroot = NULL;
3870 3870 spa_load_state_t state = SPA_LOAD_IMPORT;
3871 3871 zpool_rewind_policy_t policy;
3872 3872 uint64_t mode = spa_mode_global;
3873 3873 uint64_t readonly = B_FALSE;
3874 3874 int error;
3875 3875 nvlist_t *nvroot;
3876 3876 nvlist_t **spares, **l2cache;
3877 3877 uint_t nspares, nl2cache;
3878 3878
3879 3879 /*
3880 3880 * If a pool with this name exists, return failure.
3881 3881 */
3882 3882 mutex_enter(&spa_namespace_lock);
3883 3883 if (spa_lookup(pool) != NULL) {
3884 3884 mutex_exit(&spa_namespace_lock);
3885 3885 return (SET_ERROR(EEXIST));
3886 3886 }
3887 3887
3888 3888 /*
3889 3889 * Create and initialize the spa structure.
3890 3890 */
3891 3891 (void) nvlist_lookup_string(props,
3892 3892 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3893 3893 (void) nvlist_lookup_uint64(props,
3894 3894 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3895 3895 if (readonly)
3896 3896 mode = FREAD;
3897 3897 spa = spa_add(pool, config, altroot);
3898 3898 spa->spa_import_flags = flags;
3899 3899
3900 3900 /*
3901 3901 * Verbatim import - Take a pool and insert it into the namespace
3902 3902 * as if it had been loaded at boot.
3903 3903 */
3904 3904 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3905 3905 if (props != NULL)
3906 3906 spa_configfile_set(spa, props, B_FALSE);
3907 3907
3908 3908 spa_config_sync(spa, B_FALSE, B_TRUE);
3909 3909
3910 3910 mutex_exit(&spa_namespace_lock);
3911 3911 return (0);
3912 3912 }
3913 3913
3914 3914 spa_activate(spa, mode);
3915 3915
3916 3916 /*
3917 3917 * Don't start async tasks until we know everything is healthy.
3918 3918 */
3919 3919 spa_async_suspend(spa);
3920 3920
3921 3921 zpool_get_rewind_policy(config, &policy);
3922 3922 if (policy.zrp_request & ZPOOL_DO_REWIND)
3923 3923 state = SPA_LOAD_RECOVER;
3924 3924
3925 3925 /*
3926 3926 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
3927 3927 * because the user-supplied config is actually the one to trust when
3928 3928 * doing an import.
3929 3929 */
3930 3930 if (state != SPA_LOAD_RECOVER)
3931 3931 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3932 3932
3933 3933 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3934 3934 policy.zrp_request);
3935 3935
3936 3936 /*
3937 3937 * Propagate anything learned while loading the pool and pass it
3938 3938 * back to caller (i.e. rewind info, missing devices, etc).
3939 3939 */
3940 3940 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3941 3941 spa->spa_load_info) == 0);
3942 3942
3943 3943 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3944 3944 /*
3945 3945 * Toss any existing sparelist, as it doesn't have any validity
3946 3946 * anymore, and conflicts with spa_has_spare().
3947 3947 */
3948 3948 if (spa->spa_spares.sav_config) {
3949 3949 nvlist_free(spa->spa_spares.sav_config);
3950 3950 spa->spa_spares.sav_config = NULL;
3951 3951 spa_load_spares(spa);
3952 3952 }
3953 3953 if (spa->spa_l2cache.sav_config) {
3954 3954 nvlist_free(spa->spa_l2cache.sav_config);
3955 3955 spa->spa_l2cache.sav_config = NULL;
3956 3956 spa_load_l2cache(spa);
3957 3957 }
3958 3958
3959 3959 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3960 3960 &nvroot) == 0);
3961 3961 if (error == 0)
3962 3962 error = spa_validate_aux(spa, nvroot, -1ULL,
3963 3963 VDEV_ALLOC_SPARE);
3964 3964 if (error == 0)
3965 3965 error = spa_validate_aux(spa, nvroot, -1ULL,
3966 3966 VDEV_ALLOC_L2CACHE);
3967 3967 spa_config_exit(spa, SCL_ALL, FTAG);
3968 3968
3969 3969 if (props != NULL)
3970 3970 spa_configfile_set(spa, props, B_FALSE);
3971 3971
3972 3972 if (error != 0 || (props && spa_writeable(spa) &&
3973 3973 (error = spa_prop_set(spa, props)))) {
3974 3974 spa_unload(spa);
3975 3975 spa_deactivate(spa);
3976 3976 spa_remove(spa);
3977 3977 mutex_exit(&spa_namespace_lock);
3978 3978 return (error);
3979 3979 }
3980 3980
3981 3981 spa_async_resume(spa);
3982 3982
3983 3983 /*
3984 3984 * Override any spares and level 2 cache devices as specified by
3985 3985 * the user, as these may have correct device names/devids, etc.
3986 3986 */
3987 3987 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3988 3988 &spares, &nspares) == 0) {
3989 3989 if (spa->spa_spares.sav_config)
3990 3990 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
3991 3991 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
3992 3992 else
3993 3993 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
3994 3994 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3995 3995 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3996 3996 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3997 3997 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3998 3998 spa_load_spares(spa);
3999 3999 spa_config_exit(spa, SCL_ALL, FTAG);
4000 4000 spa->spa_spares.sav_sync = B_TRUE;
4001 4001 }
4002 4002 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4003 4003 &l2cache, &nl2cache) == 0) {
4004 4004 if (spa->spa_l2cache.sav_config)
4005 4005 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4006 4006 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4007 4007 else
4008 4008 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4009 4009 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4010 4010 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4011 4011 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4012 4012 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4013 4013 spa_load_l2cache(spa);
4014 4014 spa_config_exit(spa, SCL_ALL, FTAG);
4015 4015 spa->spa_l2cache.sav_sync = B_TRUE;
4016 4016 }
4017 4017
4018 4018 /*
4019 4019 * Check for any removed devices.
4020 4020 */
4021 4021 if (spa->spa_autoreplace) {
4022 4022 spa_aux_check_removed(&spa->spa_spares);
4023 4023 spa_aux_check_removed(&spa->spa_l2cache);
4024 4024 }
4025 4025
4026 4026 if (spa_writeable(spa)) {
4027 4027 /*
4028 4028 * Update the config cache to include the newly-imported pool.
4029 4029 */
4030 4030 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4031 4031 }
4032 4032
4033 4033 /*
4034 4034 * It's possible that the pool was expanded while it was exported.
4035 4035 * We kick off an async task to handle this for us.
4036 4036 */
4037 4037 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4038 4038
4039 4039 mutex_exit(&spa_namespace_lock);
4040 4040 spa_history_log_version(spa, "import");
4041 4041
4042 4042 return (0);
4043 4043 }
4044 4044
4045 4045 nvlist_t *
4046 4046 spa_tryimport(nvlist_t *tryconfig)
4047 4047 {
4048 4048 nvlist_t *config = NULL;
4049 4049 char *poolname;
4050 4050 spa_t *spa;
4051 4051 uint64_t state;
4052 4052 int error;
4053 4053
4054 4054 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4055 4055 return (NULL);
4056 4056
4057 4057 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4058 4058 return (NULL);
4059 4059
4060 4060 /*
4061 4061 * Create and initialize the spa structure.
4062 4062 */
4063 4063 mutex_enter(&spa_namespace_lock);
4064 4064 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
4065 4065 spa_activate(spa, FREAD);
4066 4066
4067 4067 /*
4068 4068 * Pass off the heavy lifting to spa_load().
4069 4069 * Pass TRUE for mosconfig because the user-supplied config
4070 4070 * is actually the one to trust when doing an import.
4071 4071 */
4072 4072 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
4073 4073
4074 4074 /*
4075 4075 * If 'tryconfig' was at least parsable, return the current config.
4076 4076 */
4077 4077 if (spa->spa_root_vdev != NULL) {
4078 4078 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4079 4079 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4080 4080 poolname) == 0);
4081 4081 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4082 4082 state) == 0);
4083 4083 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4084 4084 spa->spa_uberblock.ub_timestamp) == 0);
4085 4085 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4086 4086 spa->spa_load_info) == 0);
4087 4087
4088 4088 /*
4089 4089 * If the bootfs property exists on this pool then we
4090 4090 * copy it out so that external consumers can tell which
4091 4091 * pools are bootable.
4092 4092 */
4093 4093 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4094 4094 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4095 4095
4096 4096 /*
4097 4097 * We have to play games with the name since the
4098 4098 * pool was opened as TRYIMPORT_NAME.
4099 4099 */
4100 4100 if (dsl_dsobj_to_dsname(spa_name(spa),
4101 4101 spa->spa_bootfs, tmpname) == 0) {
4102 4102 char *cp;
4103 4103 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4104 4104
4105 4105 cp = strchr(tmpname, '/');
4106 4106 if (cp == NULL) {
4107 4107 (void) strlcpy(dsname, tmpname,
4108 4108 MAXPATHLEN);
4109 4109 } else {
4110 4110 (void) snprintf(dsname, MAXPATHLEN,
4111 4111 "%s/%s", poolname, ++cp);
4112 4112 }
4113 4113 VERIFY(nvlist_add_string(config,
4114 4114 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4115 4115 kmem_free(dsname, MAXPATHLEN);
4116 4116 }
4117 4117 kmem_free(tmpname, MAXPATHLEN);
4118 4118 }
4119 4119
4120 4120 /*
4121 4121 * Add the list of hot spares and level 2 cache devices.
4122 4122 */
4123 4123 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4124 4124 spa_add_spares(spa, config);
4125 4125 spa_add_l2cache(spa, config);
4126 4126 spa_config_exit(spa, SCL_CONFIG, FTAG);
4127 4127 }
4128 4128
4129 4129 spa_unload(spa);
4130 4130 spa_deactivate(spa);
4131 4131 spa_remove(spa);
4132 4132 mutex_exit(&spa_namespace_lock);
4133 4133
4134 4134 return (config);
4135 4135 }
4136 4136
4137 4137 /*
4138 4138 * Pool export/destroy
4139 4139 *
4140 4140 * The act of destroying or exporting a pool is very simple. We make sure there
4141 4141 * is no more pending I/O and any references to the pool are gone. Then, we
4142 4142 * update the pool state and sync all the labels to disk, removing the
4143 4143 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4144 4144 * we don't sync the labels or remove the configuration cache.
4145 4145 */
4146 4146 static int
4147 4147 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
4148 4148 boolean_t force, boolean_t hardforce)
4149 4149 {
4150 4150 spa_t *spa;
4151 4151
4152 4152 if (oldconfig)
4153 4153 *oldconfig = NULL;
4154 4154
4155 4155 if (!(spa_mode_global & FWRITE))
4156 4156 return (SET_ERROR(EROFS));
4157 4157
4158 4158 mutex_enter(&spa_namespace_lock);
4159 4159 if ((spa = spa_lookup(pool)) == NULL) {
4160 4160 mutex_exit(&spa_namespace_lock);
4161 4161 return (SET_ERROR(ENOENT));
4162 4162 }
4163 4163
4164 4164 /*
4165 4165 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4166 4166 * reacquire the namespace lock, and see if we can export.
4167 4167 */
4168 4168 spa_open_ref(spa, FTAG);
4169 4169 mutex_exit(&spa_namespace_lock);
4170 4170 spa_async_suspend(spa);
4171 4171 mutex_enter(&spa_namespace_lock);
4172 4172 spa_close(spa, FTAG);
4173 4173
4174 4174 /*
4175 4175 * The pool will be in core if it's openable,
4176 4176 * in which case we can modify its state.
4177 4177 */
4178 4178 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4179 4179 /*
4180 4180 * Objsets may be open only because they're dirty, so we
4181 4181 * have to force it to sync before checking spa_refcnt.
4182 4182 */
4183 4183 txg_wait_synced(spa->spa_dsl_pool, 0);
4184 4184
4185 4185 /*
4186 4186 * A pool cannot be exported or destroyed if there are active
4187 4187 * references. If we are resetting a pool, allow references by
4188 4188 * fault injection handlers.
4189 4189 */
4190 4190 if (!spa_refcount_zero(spa) ||
4191 4191 (spa->spa_inject_ref != 0 &&
4192 4192 new_state != POOL_STATE_UNINITIALIZED)) {
4193 4193 spa_async_resume(spa);
4194 4194 mutex_exit(&spa_namespace_lock);
4195 4195 return (SET_ERROR(EBUSY));
4196 4196 }
4197 4197
4198 4198 /*
4199 4199 * A pool cannot be exported if it has an active shared spare.
4200 4200 * This is to prevent other pools stealing the active spare
4201 4201 * from an exported pool. At user's own will, such pool can
4202 4202 * be forcedly exported.
4203 4203 */
4204 4204 if (!force && new_state == POOL_STATE_EXPORTED &&
4205 4205 spa_has_active_shared_spare(spa)) {
4206 4206 spa_async_resume(spa);
4207 4207 mutex_exit(&spa_namespace_lock);
4208 4208 return (SET_ERROR(EXDEV));
4209 4209 }
4210 4210
4211 4211 /*
4212 4212 * We want this to be reflected on every label,
4213 4213 * so mark them all dirty. spa_unload() will do the
4214 4214 * final sync that pushes these changes out.
4215 4215 */
4216 4216 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4217 4217 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4218 4218 spa->spa_state = new_state;
4219 4219 spa->spa_final_txg = spa_last_synced_txg(spa) +
4220 4220 TXG_DEFER_SIZE + 1;
4221 4221 vdev_config_dirty(spa->spa_root_vdev);
4222 4222 spa_config_exit(spa, SCL_ALL, FTAG);
4223 4223 }
4224 4224 }
4225 4225
4226 4226 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4227 4227
4228 4228 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4229 4229 spa_unload(spa);
4230 4230 spa_deactivate(spa);
4231 4231 }
4232 4232
4233 4233 if (oldconfig && spa->spa_config)
4234 4234 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4235 4235
4236 4236 if (new_state != POOL_STATE_UNINITIALIZED) {
4237 4237 if (!hardforce)
4238 4238 spa_config_sync(spa, B_TRUE, B_TRUE);
4239 4239 spa_remove(spa);
4240 4240 }
4241 4241 mutex_exit(&spa_namespace_lock);
4242 4242
4243 4243 return (0);
4244 4244 }
4245 4245
4246 4246 /*
4247 4247 * Destroy a storage pool.
4248 4248 */
4249 4249 int
4250 4250 spa_destroy(char *pool)
4251 4251 {
4252 4252 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4253 4253 B_FALSE, B_FALSE));
4254 4254 }
4255 4255
4256 4256 /*
4257 4257 * Export a storage pool.
4258 4258 */
4259 4259 int
4260 4260 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4261 4261 boolean_t hardforce)
4262 4262 {
4263 4263 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4264 4264 force, hardforce));
4265 4265 }
4266 4266
4267 4267 /*
4268 4268 * Similar to spa_export(), this unloads the spa_t without actually removing it
4269 4269 * from the namespace in any way.
4270 4270 */
4271 4271 int
4272 4272 spa_reset(char *pool)
4273 4273 {
4274 4274 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4275 4275 B_FALSE, B_FALSE));
4276 4276 }
4277 4277
4278 4278 /*
4279 4279 * ==========================================================================
4280 4280 * Device manipulation
4281 4281 * ==========================================================================
4282 4282 */
4283 4283
4284 4284 /*
4285 4285 * Add a device to a storage pool.
4286 4286 */
4287 4287 int
4288 4288 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4289 4289 {
4290 4290 uint64_t txg, id;
4291 4291 int error;
4292 4292 vdev_t *rvd = spa->spa_root_vdev;
4293 4293 vdev_t *vd, *tvd;
4294 4294 nvlist_t **spares, **l2cache;
4295 4295 uint_t nspares, nl2cache;
4296 4296
4297 4297 ASSERT(spa_writeable(spa));
4298 4298
4299 4299 txg = spa_vdev_enter(spa);
4300 4300
4301 4301 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4302 4302 VDEV_ALLOC_ADD)) != 0)
4303 4303 return (spa_vdev_exit(spa, NULL, txg, error));
4304 4304
4305 4305 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
4306 4306
4307 4307 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4308 4308 &nspares) != 0)
4309 4309 nspares = 0;
4310 4310
4311 4311 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4312 4312 &nl2cache) != 0)
4313 4313 nl2cache = 0;
4314 4314
4315 4315 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4316 4316 return (spa_vdev_exit(spa, vd, txg, EINVAL));
4317 4317
4318 4318 if (vd->vdev_children != 0 &&
4319 4319 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4320 4320 return (spa_vdev_exit(spa, vd, txg, error));
4321 4321
4322 4322 /*
4323 4323 * We must validate the spares and l2cache devices after checking the
4324 4324 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4325 4325 */
4326 4326 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4327 4327 return (spa_vdev_exit(spa, vd, txg, error));
4328 4328
4329 4329 /*
4330 4330 * Transfer each new top-level vdev from vd to rvd.
4331 4331 */
4332 4332 for (int c = 0; c < vd->vdev_children; c++) {
4333 4333
4334 4334 /*
4335 4335 * Set the vdev id to the first hole, if one exists.
4336 4336 */
4337 4337 for (id = 0; id < rvd->vdev_children; id++) {
4338 4338 if (rvd->vdev_child[id]->vdev_ishole) {
4339 4339 vdev_free(rvd->vdev_child[id]);
4340 4340 break;
4341 4341 }
4342 4342 }
4343 4343 tvd = vd->vdev_child[c];
4344 4344 vdev_remove_child(vd, tvd);
4345 4345 tvd->vdev_id = id;
4346 4346 vdev_add_child(rvd, tvd);
4347 4347 vdev_config_dirty(tvd);
4348 4348 }
4349 4349
4350 4350 if (nspares != 0) {
4351 4351 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4352 4352 ZPOOL_CONFIG_SPARES);
4353 4353 spa_load_spares(spa);
4354 4354 spa->spa_spares.sav_sync = B_TRUE;
4355 4355 }
4356 4356
4357 4357 if (nl2cache != 0) {
4358 4358 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4359 4359 ZPOOL_CONFIG_L2CACHE);
4360 4360 spa_load_l2cache(spa);
4361 4361 spa->spa_l2cache.sav_sync = B_TRUE;
4362 4362 }
4363 4363
4364 4364 /*
4365 4365 * We have to be careful when adding new vdevs to an existing pool.
4366 4366 * If other threads start allocating from these vdevs before we
4367 4367 * sync the config cache, and we lose power, then upon reboot we may
4368 4368 * fail to open the pool because there are DVAs that the config cache
4369 4369 * can't translate. Therefore, we first add the vdevs without
4370 4370 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4371 4371 * and then let spa_config_update() initialize the new metaslabs.
4372 4372 *
4373 4373 * spa_load() checks for added-but-not-initialized vdevs, so that
4374 4374 * if we lose power at any point in this sequence, the remaining
4375 4375 * steps will be completed the next time we load the pool.
4376 4376 */
4377 4377 (void) spa_vdev_exit(spa, vd, txg, 0);
4378 4378
4379 4379 mutex_enter(&spa_namespace_lock);
4380 4380 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4381 4381 mutex_exit(&spa_namespace_lock);
4382 4382
4383 4383 return (0);
4384 4384 }
4385 4385
4386 4386 /*
4387 4387 * Attach a device to a mirror. The arguments are the path to any device
4388 4388 * in the mirror, and the nvroot for the new device. If the path specifies
4389 4389 * a device that is not mirrored, we automatically insert the mirror vdev.
4390 4390 *
4391 4391 * If 'replacing' is specified, the new device is intended to replace the
4392 4392 * existing device; in this case the two devices are made into their own
4393 4393 * mirror using the 'replacing' vdev, which is functionally identical to
4394 4394 * the mirror vdev (it actually reuses all the same ops) but has a few
4395 4395 * extra rules: you can't attach to it after it's been created, and upon
4396 4396 * completion of resilvering, the first disk (the one being replaced)
4397 4397 * is automatically detached.
4398 4398 */
4399 4399 int
4400 4400 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4401 4401 {
4402 4402 uint64_t txg, dtl_max_txg;
4403 4403 vdev_t *rvd = spa->spa_root_vdev;
4404 4404 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4405 4405 vdev_ops_t *pvops;
4406 4406 char *oldvdpath, *newvdpath;
4407 4407 int newvd_isspare;
4408 4408 int error;
4409 4409
4410 4410 ASSERT(spa_writeable(spa));
4411 4411
4412 4412 txg = spa_vdev_enter(spa);
4413 4413
4414 4414 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4415 4415
4416 4416 if (oldvd == NULL)
4417 4417 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4418 4418
4419 4419 if (!oldvd->vdev_ops->vdev_op_leaf)
4420 4420 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4421 4421
4422 4422 pvd = oldvd->vdev_parent;
4423 4423
4424 4424 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4425 4425 VDEV_ALLOC_ATTACH)) != 0)
4426 4426 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4427 4427
4428 4428 if (newrootvd->vdev_children != 1)
4429 4429 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4430 4430
4431 4431 newvd = newrootvd->vdev_child[0];
4432 4432
4433 4433 if (!newvd->vdev_ops->vdev_op_leaf)
4434 4434 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4435 4435
4436 4436 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4437 4437 return (spa_vdev_exit(spa, newrootvd, txg, error));
4438 4438
4439 4439 /*
4440 4440 * Spares can't replace logs
4441 4441 */
4442 4442 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4443 4443 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4444 4444
4445 4445 if (!replacing) {
4446 4446 /*
4447 4447 * For attach, the only allowable parent is a mirror or the root
4448 4448 * vdev.
4449 4449 */
4450 4450 if (pvd->vdev_ops != &vdev_mirror_ops &&
4451 4451 pvd->vdev_ops != &vdev_root_ops)
4452 4452 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4453 4453
4454 4454 pvops = &vdev_mirror_ops;
4455 4455 } else {
4456 4456 /*
4457 4457 * Active hot spares can only be replaced by inactive hot
4458 4458 * spares.
4459 4459 */
4460 4460 if (pvd->vdev_ops == &vdev_spare_ops &&
4461 4461 oldvd->vdev_isspare &&
4462 4462 !spa_has_spare(spa, newvd->vdev_guid))
4463 4463 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4464 4464
4465 4465 /*
4466 4466 * If the source is a hot spare, and the parent isn't already a
4467 4467 * spare, then we want to create a new hot spare. Otherwise, we
4468 4468 * want to create a replacing vdev. The user is not allowed to
4469 4469 * attach to a spared vdev child unless the 'isspare' state is
4470 4470 * the same (spare replaces spare, non-spare replaces
4471 4471 * non-spare).
4472 4472 */
4473 4473 if (pvd->vdev_ops == &vdev_replacing_ops &&
4474 4474 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4475 4475 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4476 4476 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4477 4477 newvd->vdev_isspare != oldvd->vdev_isspare) {
4478 4478 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4479 4479 }
4480 4480
4481 4481 if (newvd->vdev_isspare)
4482 4482 pvops = &vdev_spare_ops;
4483 4483 else
4484 4484 pvops = &vdev_replacing_ops;
4485 4485 }
4486 4486
4487 4487 /*
4488 4488 * Make sure the new device is big enough.
4489 4489 */
4490 4490 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4491 4491 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4492 4492
4493 4493 /*
4494 4494 * The new device cannot have a higher alignment requirement
4495 4495 * than the top-level vdev.
4496 4496 */
4497 4497 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4498 4498 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4499 4499
4500 4500 /*
4501 4501 * If this is an in-place replacement, update oldvd's path and devid
4502 4502 * to make it distinguishable from newvd, and unopenable from now on.
4503 4503 */
4504 4504 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4505 4505 spa_strfree(oldvd->vdev_path);
4506 4506 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
4507 4507 KM_SLEEP);
4508 4508 (void) sprintf(oldvd->vdev_path, "%s/%s",
4509 4509 newvd->vdev_path, "old");
4510 4510 if (oldvd->vdev_devid != NULL) {
4511 4511 spa_strfree(oldvd->vdev_devid);
4512 4512 oldvd->vdev_devid = NULL;
4513 4513 }
4514 4514 }
4515 4515
4516 4516 /* mark the device being resilvered */
4517 4517 newvd->vdev_resilver_txg = txg;
4518 4518
4519 4519 /*
4520 4520 * If the parent is not a mirror, or if we're replacing, insert the new
4521 4521 * mirror/replacing/spare vdev above oldvd.
4522 4522 */
4523 4523 if (pvd->vdev_ops != pvops)
4524 4524 pvd = vdev_add_parent(oldvd, pvops);
4525 4525
4526 4526 ASSERT(pvd->vdev_top->vdev_parent == rvd);
4527 4527 ASSERT(pvd->vdev_ops == pvops);
4528 4528 ASSERT(oldvd->vdev_parent == pvd);
4529 4529
4530 4530 /*
4531 4531 * Extract the new device from its root and add it to pvd.
4532 4532 */
4533 4533 vdev_remove_child(newrootvd, newvd);
4534 4534 newvd->vdev_id = pvd->vdev_children;
4535 4535 newvd->vdev_crtxg = oldvd->vdev_crtxg;
4536 4536 vdev_add_child(pvd, newvd);
4537 4537
4538 4538 tvd = newvd->vdev_top;
4539 4539 ASSERT(pvd->vdev_top == tvd);
4540 4540 ASSERT(tvd->vdev_parent == rvd);
4541 4541
4542 4542 vdev_config_dirty(tvd);
4543 4543
4544 4544 /*
4545 4545 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4546 4546 * for any dmu_sync-ed blocks. It will propagate upward when
4547 4547 * spa_vdev_exit() calls vdev_dtl_reassess().
4548 4548 */
4549 4549 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
4550 4550
4551 4551 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4552 4552 dtl_max_txg - TXG_INITIAL);
4553 4553
4554 4554 if (newvd->vdev_isspare) {
4555 4555 spa_spare_activate(newvd);
4556 4556 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
4557 4557 }
4558 4558
4559 4559 oldvdpath = spa_strdup(oldvd->vdev_path);
4560 4560 newvdpath = spa_strdup(newvd->vdev_path);
4561 4561 newvd_isspare = newvd->vdev_isspare;
4562 4562
4563 4563 /*
4564 4564 * Mark newvd's DTL dirty in this txg.
4565 4565 */
4566 4566 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4567 4567
4568 4568 /*
4569 4569 * Schedule the resilver to restart in the future. We do this to
4570 4570 * ensure that dmu_sync-ed blocks have been stitched into the
4571 4571 * respective datasets.
4572 4572 */
4573 4573 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4574 4574
4575 4575 /*
4576 4576 * Commit the config
4577 4577 */
4578 4578 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
4579 4579
4580 4580 spa_history_log_internal(spa, "vdev attach", NULL,
4581 4581 "%s vdev=%s %s vdev=%s",
4582 4582 replacing && newvd_isspare ? "spare in" :
4583 4583 replacing ? "replace" : "attach", newvdpath,
4584 4584 replacing ? "for" : "to", oldvdpath);
4585 4585
4586 4586 spa_strfree(oldvdpath);
4587 4587 spa_strfree(newvdpath);
4588 4588
4589 4589 if (spa->spa_bootfs)
4590 4590 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4591 4591
4592 4592 return (0);
4593 4593 }
4594 4594
4595 4595 /*
4596 4596 * Detach a device from a mirror or replacing vdev.
4597 4597 *
4598 4598 * If 'replace_done' is specified, only detach if the parent
4599 4599 * is a replacing vdev.
4600 4600 */
4601 4601 int
4602 4602 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
4603 4603 {
4604 4604 uint64_t txg;
4605 4605 int error;
4606 4606 vdev_t *rvd = spa->spa_root_vdev;
4607 4607 vdev_t *vd, *pvd, *cvd, *tvd;
4608 4608 boolean_t unspare = B_FALSE;
4609 4609 uint64_t unspare_guid = 0;
4610 4610 char *vdpath;
4611 4611
4612 4612 ASSERT(spa_writeable(spa));
4613 4613
4614 4614 txg = spa_vdev_enter(spa);
4615 4615
4616 4616 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
4617 4617
4618 4618 if (vd == NULL)
4619 4619 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4620 4620
4621 4621 if (!vd->vdev_ops->vdev_op_leaf)
4622 4622 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4623 4623
4624 4624 pvd = vd->vdev_parent;
4625 4625
4626 4626 /*
4627 4627 * If the parent/child relationship is not as expected, don't do it.
4628 4628 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4629 4629 * vdev that's replacing B with C. The user's intent in replacing
4630 4630 * is to go from M(A,B) to M(A,C). If the user decides to cancel
4631 4631 * the replace by detaching C, the expected behavior is to end up
4632 4632 * M(A,B). But suppose that right after deciding to detach C,
4633 4633 * the replacement of B completes. We would have M(A,C), and then
4634 4634 * ask to detach C, which would leave us with just A -- not what
4635 4635 * the user wanted. To prevent this, we make sure that the
4636 4636 * parent/child relationship hasn't changed -- in this example,
4637 4637 * that C's parent is still the replacing vdev R.
4638 4638 */
4639 4639 if (pvd->vdev_guid != pguid && pguid != 0)
4640 4640 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4641 4641
4642 4642 /*
4643 4643 * Only 'replacing' or 'spare' vdevs can be replaced.
4644 4644 */
4645 4645 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4646 4646 pvd->vdev_ops != &vdev_spare_ops)
4647 4647 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4648 4648
4649 4649 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4650 4650 spa_version(spa) >= SPA_VERSION_SPARES);
4651 4651
4652 4652 /*
4653 4653 * Only mirror, replacing, and spare vdevs support detach.
4654 4654 */
4655 4655 if (pvd->vdev_ops != &vdev_replacing_ops &&
4656 4656 pvd->vdev_ops != &vdev_mirror_ops &&
4657 4657 pvd->vdev_ops != &vdev_spare_ops)
4658 4658 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4659 4659
4660 4660 /*
4661 4661 * If this device has the only valid copy of some data,
4662 4662 * we cannot safely detach it.
4663 4663 */
4664 4664 if (vdev_dtl_required(vd))
4665 4665 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4666 4666
4667 4667 ASSERT(pvd->vdev_children >= 2);
4668 4668
4669 4669 /*
4670 4670 * If we are detaching the second disk from a replacing vdev, then
4671 4671 * check to see if we changed the original vdev's path to have "/old"
4672 4672 * at the end in spa_vdev_attach(). If so, undo that change now.
4673 4673 */
4674 4674 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4675 4675 vd->vdev_path != NULL) {
4676 4676 size_t len = strlen(vd->vdev_path);
4677 4677
4678 4678 for (int c = 0; c < pvd->vdev_children; c++) {
4679 4679 cvd = pvd->vdev_child[c];
4680 4680
4681 4681 if (cvd == vd || cvd->vdev_path == NULL)
4682 4682 continue;
4683 4683
4684 4684 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4685 4685 strcmp(cvd->vdev_path + len, "/old") == 0) {
4686 4686 spa_strfree(cvd->vdev_path);
4687 4687 cvd->vdev_path = spa_strdup(vd->vdev_path);
4688 4688 break;
4689 4689 }
4690 4690 }
4691 4691 }
4692 4692
4693 4693 /*
4694 4694 * If we are detaching the original disk from a spare, then it implies
4695 4695 * that the spare should become a real disk, and be removed from the
4696 4696 * active spare list for the pool.
4697 4697 */
4698 4698 if (pvd->vdev_ops == &vdev_spare_ops &&
4699 4699 vd->vdev_id == 0 &&
4700 4700 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
4701 4701 unspare = B_TRUE;
4702 4702
4703 4703 /*
4704 4704 * Erase the disk labels so the disk can be used for other things.
4705 4705 * This must be done after all other error cases are handled,
4706 4706 * but before we disembowel vd (so we can still do I/O to it).
4707 4707 * But if we can't do it, don't treat the error as fatal --
4708 4708 * it may be that the unwritability of the disk is the reason
4709 4709 * it's being detached!
4710 4710 */
4711 4711 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4712 4712
4713 4713 /*
4714 4714 * Remove vd from its parent and compact the parent's children.
4715 4715 */
4716 4716 vdev_remove_child(pvd, vd);
4717 4717 vdev_compact_children(pvd);
4718 4718
4719 4719 /*
4720 4720 * Remember one of the remaining children so we can get tvd below.
4721 4721 */
4722 4722 cvd = pvd->vdev_child[pvd->vdev_children - 1];
4723 4723
4724 4724 /*
4725 4725 * If we need to remove the remaining child from the list of hot spares,
4726 4726 * do it now, marking the vdev as no longer a spare in the process.
4727 4727 * We must do this before vdev_remove_parent(), because that can
4728 4728 * change the GUID if it creates a new toplevel GUID. For a similar
4729 4729 * reason, we must remove the spare now, in the same txg as the detach;
4730 4730 * otherwise someone could attach a new sibling, change the GUID, and
4731 4731 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
4732 4732 */
4733 4733 if (unspare) {
4734 4734 ASSERT(cvd->vdev_isspare);
4735 4735 spa_spare_remove(cvd);
4736 4736 unspare_guid = cvd->vdev_guid;
4737 4737 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
4738 4738 cvd->vdev_unspare = B_TRUE;
4739 4739 }
4740 4740
4741 4741 /*
4742 4742 * If the parent mirror/replacing vdev only has one child,
4743 4743 * the parent is no longer needed. Remove it from the tree.
4744 4744 */
4745 4745 if (pvd->vdev_children == 1) {
4746 4746 if (pvd->vdev_ops == &vdev_spare_ops)
4747 4747 cvd->vdev_unspare = B_FALSE;
4748 4748 vdev_remove_parent(cvd);
4749 4749 }
4750 4750
4751 4751
4752 4752 /*
4753 4753 * We don't set tvd until now because the parent we just removed
4754 4754 * may have been the previous top-level vdev.
4755 4755 */
4756 4756 tvd = cvd->vdev_top;
4757 4757 ASSERT(tvd->vdev_parent == rvd);
4758 4758
4759 4759 /*
4760 4760 * Reevaluate the parent vdev state.
4761 4761 */
4762 4762 vdev_propagate_state(cvd);
4763 4763
4764 4764 /*
4765 4765 * If the 'autoexpand' property is set on the pool then automatically
4766 4766 * try to expand the size of the pool. For example if the device we
4767 4767 * just detached was smaller than the others, it may be possible to
4768 4768 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4769 4769 * first so that we can obtain the updated sizes of the leaf vdevs.
4770 4770 */
4771 4771 if (spa->spa_autoexpand) {
4772 4772 vdev_reopen(tvd);
4773 4773 vdev_expand(tvd, txg);
4774 4774 }
4775 4775
4776 4776 vdev_config_dirty(tvd);
4777 4777
4778 4778 /*
4779 4779 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
4780 4780 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4781 4781 * But first make sure we're not on any *other* txg's DTL list, to
4782 4782 * prevent vd from being accessed after it's freed.
4783 4783 */
4784 4784 vdpath = spa_strdup(vd->vdev_path);
4785 4785 for (int t = 0; t < TXG_SIZE; t++)
4786 4786 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4787 4787 vd->vdev_detached = B_TRUE;
4788 4788 vdev_dirty(tvd, VDD_DTL, vd, txg);
4789 4789
4790 4790 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
4791 4791
4792 4792 /* hang on to the spa before we release the lock */
4793 4793 spa_open_ref(spa, FTAG);
4794 4794
4795 4795 error = spa_vdev_exit(spa, vd, txg, 0);
4796 4796
4797 4797 spa_history_log_internal(spa, "detach", NULL,
4798 4798 "vdev=%s", vdpath);
4799 4799 spa_strfree(vdpath);
4800 4800
4801 4801 /*
4802 4802 * If this was the removal of the original device in a hot spare vdev,
4803 4803 * then we want to go through and remove the device from the hot spare
4804 4804 * list of every other pool.
4805 4805 */
4806 4806 if (unspare) {
4807 4807 spa_t *altspa = NULL;
4808 4808
4809 4809 mutex_enter(&spa_namespace_lock);
4810 4810 while ((altspa = spa_next(altspa)) != NULL) {
4811 4811 if (altspa->spa_state != POOL_STATE_ACTIVE ||
4812 4812 altspa == spa)
4813 4813 continue;
4814 4814
4815 4815 spa_open_ref(altspa, FTAG);
4816 4816 mutex_exit(&spa_namespace_lock);
4817 4817 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
4818 4818 mutex_enter(&spa_namespace_lock);
4819 4819 spa_close(altspa, FTAG);
4820 4820 }
4821 4821 mutex_exit(&spa_namespace_lock);
4822 4822
4823 4823 /* search the rest of the vdevs for spares to remove */
4824 4824 spa_vdev_resilver_done(spa);
4825 4825 }
4826 4826
4827 4827 /* all done with the spa; OK to release */
4828 4828 mutex_enter(&spa_namespace_lock);
4829 4829 spa_close(spa, FTAG);
4830 4830 mutex_exit(&spa_namespace_lock);
4831 4831
4832 4832 return (error);
4833 4833 }
4834 4834
4835 4835 /*
4836 4836 * Split a set of devices from their mirrors, and create a new pool from them.
4837 4837 */
4838 4838 int
4839 4839 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4840 4840 nvlist_t *props, boolean_t exp)
4841 4841 {
4842 4842 int error = 0;
4843 4843 uint64_t txg, *glist;
4844 4844 spa_t *newspa;
4845 4845 uint_t c, children, lastlog;
4846 4846 nvlist_t **child, *nvl, *tmp;
4847 4847 dmu_tx_t *tx;
4848 4848 char *altroot = NULL;
4849 4849 vdev_t *rvd, **vml = NULL; /* vdev modify list */
4850 4850 boolean_t activate_slog;
4851 4851
4852 4852 ASSERT(spa_writeable(spa));
4853 4853
4854 4854 txg = spa_vdev_enter(spa);
4855 4855
4856 4856 /* clear the log and flush everything up to now */
4857 4857 activate_slog = spa_passivate_log(spa);
4858 4858 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4859 4859 error = spa_offline_log(spa);
4860 4860 txg = spa_vdev_config_enter(spa);
4861 4861
4862 4862 if (activate_slog)
4863 4863 spa_activate_log(spa);
4864 4864
4865 4865 if (error != 0)
4866 4866 return (spa_vdev_exit(spa, NULL, txg, error));
4867 4867
4868 4868 /* check new spa name before going any further */
4869 4869 if (spa_lookup(newname) != NULL)
4870 4870 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4871 4871
4872 4872 /*
4873 4873 * scan through all the children to ensure they're all mirrors
4874 4874 */
4875 4875 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4876 4876 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4877 4877 &children) != 0)
4878 4878 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4879 4879
4880 4880 /* first, check to ensure we've got the right child count */
4881 4881 rvd = spa->spa_root_vdev;
4882 4882 lastlog = 0;
4883 4883 for (c = 0; c < rvd->vdev_children; c++) {
4884 4884 vdev_t *vd = rvd->vdev_child[c];
4885 4885
4886 4886 /* don't count the holes & logs as children */
4887 4887 if (vd->vdev_islog || vd->vdev_ishole) {
4888 4888 if (lastlog == 0)
4889 4889 lastlog = c;
4890 4890 continue;
4891 4891 }
4892 4892
4893 4893 lastlog = 0;
4894 4894 }
4895 4895 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4896 4896 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4897 4897
4898 4898 /* next, ensure no spare or cache devices are part of the split */
4899 4899 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4900 4900 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4901 4901 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4902 4902
4903 4903 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
4904 4904 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
4905 4905
4906 4906 /* then, loop over each vdev and validate it */
4907 4907 for (c = 0; c < children; c++) {
4908 4908 uint64_t is_hole = 0;
4909 4909
4910 4910 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4911 4911 &is_hole);
4912 4912
4913 4913 if (is_hole != 0) {
4914 4914 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4915 4915 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4916 4916 continue;
4917 4917 } else {
4918 4918 error = SET_ERROR(EINVAL);
4919 4919 break;
4920 4920 }
4921 4921 }
4922 4922
4923 4923 /* which disk is going to be split? */
4924 4924 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4925 4925 &glist[c]) != 0) {
4926 4926 error = SET_ERROR(EINVAL);
4927 4927 break;
4928 4928 }
4929 4929
4930 4930 /* look it up in the spa */
4931 4931 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4932 4932 if (vml[c] == NULL) {
4933 4933 error = SET_ERROR(ENODEV);
4934 4934 break;
4935 4935 }
4936 4936
4937 4937 /* make sure there's nothing stopping the split */
4938 4938 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4939 4939 vml[c]->vdev_islog ||
4940 4940 vml[c]->vdev_ishole ||
4941 4941 vml[c]->vdev_isspare ||
4942 4942 vml[c]->vdev_isl2cache ||
4943 4943 !vdev_writeable(vml[c]) ||
4944 4944 vml[c]->vdev_children != 0 ||
4945 4945 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4946 4946 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
4947 4947 error = SET_ERROR(EINVAL);
4948 4948 break;
4949 4949 }
4950 4950
4951 4951 if (vdev_dtl_required(vml[c])) {
4952 4952 error = SET_ERROR(EBUSY);
4953 4953 break;
4954 4954 }
4955 4955
4956 4956 /* we need certain info from the top level */
4957 4957 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4958 4958 vml[c]->vdev_top->vdev_ms_array) == 0);
4959 4959 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4960 4960 vml[c]->vdev_top->vdev_ms_shift) == 0);
4961 4961 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4962 4962 vml[c]->vdev_top->vdev_asize) == 0);
4963 4963 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4964 4964 vml[c]->vdev_top->vdev_ashift) == 0);
4965 4965 }
4966 4966
4967 4967 if (error != 0) {
4968 4968 kmem_free(vml, children * sizeof (vdev_t *));
4969 4969 kmem_free(glist, children * sizeof (uint64_t));
4970 4970 return (spa_vdev_exit(spa, NULL, txg, error));
4971 4971 }
4972 4972
4973 4973 /* stop writers from using the disks */
4974 4974 for (c = 0; c < children; c++) {
4975 4975 if (vml[c] != NULL)
4976 4976 vml[c]->vdev_offline = B_TRUE;
4977 4977 }
4978 4978 vdev_reopen(spa->spa_root_vdev);
4979 4979
4980 4980 /*
4981 4981 * Temporarily record the splitting vdevs in the spa config. This
4982 4982 * will disappear once the config is regenerated.
4983 4983 */
4984 4984 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4985 4985 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
4986 4986 glist, children) == 0);
4987 4987 kmem_free(glist, children * sizeof (uint64_t));
4988 4988
4989 4989 mutex_enter(&spa->spa_props_lock);
4990 4990 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
4991 4991 nvl) == 0);
4992 4992 mutex_exit(&spa->spa_props_lock);
4993 4993 spa->spa_config_splitting = nvl;
4994 4994 vdev_config_dirty(spa->spa_root_vdev);
4995 4995
4996 4996 /* configure and create the new pool */
4997 4997 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
4998 4998 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4999 4999 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5000 5000 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5001 5001 spa_version(spa)) == 0);
5002 5002 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5003 5003 spa->spa_config_txg) == 0);
5004 5004 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5005 5005 spa_generate_guid(NULL)) == 0);
5006 5006 (void) nvlist_lookup_string(props,
5007 5007 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5008 5008
5009 5009 /* add the new pool to the namespace */
5010 5010 newspa = spa_add(newname, config, altroot);
5011 5011 newspa->spa_config_txg = spa->spa_config_txg;
5012 5012 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5013 5013
5014 5014 /* release the spa config lock, retaining the namespace lock */
5015 5015 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5016 5016
5017 5017 if (zio_injection_enabled)
5018 5018 zio_handle_panic_injection(spa, FTAG, 1);
5019 5019
5020 5020 spa_activate(newspa, spa_mode_global);
5021 5021 spa_async_suspend(newspa);
5022 5022
5023 5023 /* create the new pool from the disks of the original pool */
5024 5024 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5025 5025 if (error)
5026 5026 goto out;
5027 5027
5028 5028 /* if that worked, generate a real config for the new pool */
5029 5029 if (newspa->spa_root_vdev != NULL) {
5030 5030 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
5031 5031 NV_UNIQUE_NAME, KM_SLEEP) == 0);
5032 5032 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5033 5033 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5034 5034 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5035 5035 B_TRUE));
5036 5036 }
5037 5037
5038 5038 /* set the props */
5039 5039 if (props != NULL) {
5040 5040 spa_configfile_set(newspa, props, B_FALSE);
5041 5041 error = spa_prop_set(newspa, props);
5042 5042 if (error)
5043 5043 goto out;
5044 5044 }
5045 5045
5046 5046 /* flush everything */
5047 5047 txg = spa_vdev_config_enter(newspa);
5048 5048 vdev_config_dirty(newspa->spa_root_vdev);
5049 5049 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
5050 5050
5051 5051 if (zio_injection_enabled)
5052 5052 zio_handle_panic_injection(spa, FTAG, 2);
5053 5053
5054 5054 spa_async_resume(newspa);
5055 5055
5056 5056 /* finally, update the original pool's config */
5057 5057 txg = spa_vdev_config_enter(spa);
5058 5058 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5059 5059 error = dmu_tx_assign(tx, TXG_WAIT);
5060 5060 if (error != 0)
5061 5061 dmu_tx_abort(tx);
5062 5062 for (c = 0; c < children; c++) {
5063 5063 if (vml[c] != NULL) {
5064 5064 vdev_split(vml[c]);
5065 5065 if (error == 0)
5066 5066 spa_history_log_internal(spa, "detach", tx,
5067 5067 "vdev=%s", vml[c]->vdev_path);
5068 5068 vdev_free(vml[c]);
5069 5069 }
5070 5070 }
5071 5071 vdev_config_dirty(spa->spa_root_vdev);
5072 5072 spa->spa_config_splitting = NULL;
5073 5073 nvlist_free(nvl);
5074 5074 if (error == 0)
5075 5075 dmu_tx_commit(tx);
5076 5076 (void) spa_vdev_exit(spa, NULL, txg, 0);
5077 5077
5078 5078 if (zio_injection_enabled)
5079 5079 zio_handle_panic_injection(spa, FTAG, 3);
5080 5080
5081 5081 /* split is complete; log a history record */
5082 5082 spa_history_log_internal(newspa, "split", NULL,
5083 5083 "from pool %s", spa_name(spa));
5084 5084
5085 5085 kmem_free(vml, children * sizeof (vdev_t *));
5086 5086
5087 5087 /* if we're not going to mount the filesystems in userland, export */
5088 5088 if (exp)
5089 5089 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5090 5090 B_FALSE, B_FALSE);
5091 5091
5092 5092 return (error);
5093 5093
5094 5094 out:
5095 5095 spa_unload(newspa);
5096 5096 spa_deactivate(newspa);
5097 5097 spa_remove(newspa);
5098 5098
5099 5099 txg = spa_vdev_config_enter(spa);
5100 5100
5101 5101 /* re-online all offlined disks */
5102 5102 for (c = 0; c < children; c++) {
5103 5103 if (vml[c] != NULL)
5104 5104 vml[c]->vdev_offline = B_FALSE;
5105 5105 }
5106 5106 vdev_reopen(spa->spa_root_vdev);
5107 5107
5108 5108 nvlist_free(spa->spa_config_splitting);
5109 5109 spa->spa_config_splitting = NULL;
5110 5110 (void) spa_vdev_exit(spa, NULL, txg, error);
5111 5111
5112 5112 kmem_free(vml, children * sizeof (vdev_t *));
5113 5113 return (error);
5114 5114 }
5115 5115
5116 5116 static nvlist_t *
5117 5117 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
5118 5118 {
5119 5119 for (int i = 0; i < count; i++) {
5120 5120 uint64_t guid;
5121 5121
5122 5122 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5123 5123 &guid) == 0);
5124 5124
5125 5125 if (guid == target_guid)
5126 5126 return (nvpp[i]);
5127 5127 }
5128 5128
5129 5129 return (NULL);
5130 5130 }
5131 5131
5132 5132 static void
5133 5133 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5134 5134 nvlist_t *dev_to_remove)
5135 5135 {
5136 5136 nvlist_t **newdev = NULL;
5137 5137
5138 5138 if (count > 1)
5139 5139 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
5140 5140
5141 5141 for (int i = 0, j = 0; i < count; i++) {
5142 5142 if (dev[i] == dev_to_remove)
5143 5143 continue;
5144 5144 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
5145 5145 }
5146 5146
5147 5147 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5148 5148 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
5149 5149
5150 5150 for (int i = 0; i < count - 1; i++)
5151 5151 nvlist_free(newdev[i]);
5152 5152
5153 5153 if (count > 1)
5154 5154 kmem_free(newdev, (count - 1) * sizeof (void *));
5155 5155 }
5156 5156
5157 5157 /*
5158 5158 * Evacuate the device.
5159 5159 */
5160 5160 static int
5161 5161 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5162 5162 {
5163 5163 uint64_t txg;
5164 5164 int error = 0;
5165 5165
5166 5166 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5167 5167 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5168 5168 ASSERT(vd == vd->vdev_top);
5169 5169
5170 5170 /*
5171 5171 * Evacuate the device. We don't hold the config lock as writer
5172 5172 * since we need to do I/O but we do keep the
5173 5173 * spa_namespace_lock held. Once this completes the device
5174 5174 * should no longer have any blocks allocated on it.
5175 5175 */
5176 5176 if (vd->vdev_islog) {
5177 5177 if (vd->vdev_stat.vs_alloc != 0)
5178 5178 error = spa_offline_log(spa);
5179 5179 } else {
5180 5180 error = SET_ERROR(ENOTSUP);
5181 5181 }
5182 5182
5183 5183 if (error)
5184 5184 return (error);
5185 5185
5186 5186 /*
5187 5187 * The evacuation succeeded. Remove any remaining MOS metadata
5188 5188 * associated with this vdev, and wait for these changes to sync.
5189 5189 */
5190 5190 ASSERT0(vd->vdev_stat.vs_alloc);
5191 5191 txg = spa_vdev_config_enter(spa);
5192 5192 vd->vdev_removing = B_TRUE;
5193 5193 vdev_dirty_leaves(vd, VDD_DTL, txg);
5194 5194 vdev_config_dirty(vd);
5195 5195 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5196 5196
5197 5197 return (0);
5198 5198 }
5199 5199
5200 5200 /*
5201 5201 * Complete the removal by cleaning up the namespace.
5202 5202 */
5203 5203 static void
5204 5204 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5205 5205 {
5206 5206 vdev_t *rvd = spa->spa_root_vdev;
5207 5207 uint64_t id = vd->vdev_id;
5208 5208 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5209 5209
5210 5210 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5211 5211 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5212 5212 ASSERT(vd == vd->vdev_top);
5213 5213
5214 5214 /*
5215 5215 * Only remove any devices which are empty.
5216 5216 */
5217 5217 if (vd->vdev_stat.vs_alloc != 0)
5218 5218 return;
5219 5219
5220 5220 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5221 5221
5222 5222 if (list_link_active(&vd->vdev_state_dirty_node))
5223 5223 vdev_state_clean(vd);
5224 5224 if (list_link_active(&vd->vdev_config_dirty_node))
5225 5225 vdev_config_clean(vd);
5226 5226
5227 5227 vdev_free(vd);
5228 5228
5229 5229 if (last_vdev) {
5230 5230 vdev_compact_children(rvd);
5231 5231 } else {
5232 5232 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5233 5233 vdev_add_child(rvd, vd);
5234 5234 }
5235 5235 vdev_config_dirty(rvd);
5236 5236
5237 5237 /*
5238 5238 * Reassess the health of our root vdev.
5239 5239 */
5240 5240 vdev_reopen(rvd);
5241 5241 }
5242 5242
5243 5243 /*
5244 5244 * Remove a device from the pool -
5245 5245 *
5246 5246 * Removing a device from the vdev namespace requires several steps
5247 5247 * and can take a significant amount of time. As a result we use
5248 5248 * the spa_vdev_config_[enter/exit] functions which allow us to
5249 5249 * grab and release the spa_config_lock while still holding the namespace
5250 5250 * lock. During each step the configuration is synced out.
5251 5251 *
5252 5252 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5253 5253 * devices.
5254 5254 */
5255 5255 int
5256 5256 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5257 5257 {
5258 5258 vdev_t *vd;
5259 5259 metaslab_group_t *mg;
5260 5260 nvlist_t **spares, **l2cache, *nv;
5261 5261 uint64_t txg = 0;
5262 5262 uint_t nspares, nl2cache;
5263 5263 int error = 0;
5264 5264 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5265 5265
5266 5266 ASSERT(spa_writeable(spa));
5267 5267
5268 5268 if (!locked)
5269 5269 txg = spa_vdev_enter(spa);
5270 5270
5271 5271 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5272 5272
5273 5273 if (spa->spa_spares.sav_vdevs != NULL &&
5274 5274 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5275 5275 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5276 5276 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5277 5277 /*
5278 5278 * Only remove the hot spare if it's not currently in use
5279 5279 * in this pool.
5280 5280 */
5281 5281 if (vd == NULL || unspare) {
5282 5282 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5283 5283 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5284 5284 spa_load_spares(spa);
5285 5285 spa->spa_spares.sav_sync = B_TRUE;
5286 5286 } else {
5287 5287 error = SET_ERROR(EBUSY);
5288 5288 }
5289 5289 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
5290 5290 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5291 5291 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5292 5292 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5293 5293 /*
5294 5294 * Cache devices can always be removed.
5295 5295 */
5296 5296 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5297 5297 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5298 5298 spa_load_l2cache(spa);
5299 5299 spa->spa_l2cache.sav_sync = B_TRUE;
5300 5300 } else if (vd != NULL && vd->vdev_islog) {
5301 5301 ASSERT(!locked);
5302 5302 ASSERT(vd == vd->vdev_top);
5303 5303
5304 5304 mg = vd->vdev_mg;
5305 5305
5306 5306 /*
5307 5307 * Stop allocating from this vdev.
5308 5308 */
5309 5309 metaslab_group_passivate(mg);
5310 5310
5311 5311 /*
5312 5312 * Wait for the youngest allocations and frees to sync,
5313 5313 * and then wait for the deferral of those frees to finish.
5314 5314 */
5315 5315 spa_vdev_config_exit(spa, NULL,
5316 5316 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5317 5317
5318 5318 /*
5319 5319 * Attempt to evacuate the vdev.
5320 5320 */
5321 5321 error = spa_vdev_remove_evacuate(spa, vd);
5322 5322
5323 5323 txg = spa_vdev_config_enter(spa);
5324 5324
5325 5325 /*
5326 5326 * If we couldn't evacuate the vdev, unwind.
5327 5327 */
5328 5328 if (error) {
5329 5329 metaslab_group_activate(mg);
5330 5330 return (spa_vdev_exit(spa, NULL, txg, error));
5331 5331 }
5332 5332
5333 5333 /*
5334 5334 * Clean up the vdev namespace.
5335 5335 */
5336 5336 spa_vdev_remove_from_namespace(spa, vd);
5337 5337
5338 5338 } else if (vd != NULL) {
5339 5339 /*
5340 5340 * Normal vdevs cannot be removed (yet).
5341 5341 */
5342 5342 error = SET_ERROR(ENOTSUP);
5343 5343 } else {
5344 5344 /*
5345 5345 * There is no vdev of any kind with the specified guid.
5346 5346 */
5347 5347 error = SET_ERROR(ENOENT);
5348 5348 }
5349 5349
5350 5350 if (!locked)
5351 5351 return (spa_vdev_exit(spa, NULL, txg, error));
5352 5352
5353 5353 return (error);
5354 5354 }
5355 5355
5356 5356 /*
5357 5357 * Find any device that's done replacing, or a vdev marked 'unspare' that's
5358 5358 * currently spared, so we can detach it.
5359 5359 */
5360 5360 static vdev_t *
5361 5361 spa_vdev_resilver_done_hunt(vdev_t *vd)
5362 5362 {
5363 5363 vdev_t *newvd, *oldvd;
5364 5364
5365 5365 for (int c = 0; c < vd->vdev_children; c++) {
5366 5366 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5367 5367 if (oldvd != NULL)
5368 5368 return (oldvd);
5369 5369 }
5370 5370
5371 5371 /*
5372 5372 * Check for a completed replacement. We always consider the first
5373 5373 * vdev in the list to be the oldest vdev, and the last one to be
5374 5374 * the newest (see spa_vdev_attach() for how that works). In
5375 5375 * the case where the newest vdev is faulted, we will not automatically
5376 5376 * remove it after a resilver completes. This is OK as it will require
5377 5377 * user intervention to determine which disk the admin wishes to keep.
5378 5378 */
5379 5379 if (vd->vdev_ops == &vdev_replacing_ops) {
5380 5380 ASSERT(vd->vdev_children > 1);
5381 5381
5382 5382 newvd = vd->vdev_child[vd->vdev_children - 1];
5383 5383 oldvd = vd->vdev_child[0];
5384 5384
5385 5385 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5386 5386 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5387 5387 !vdev_dtl_required(oldvd))
5388 5388 return (oldvd);
5389 5389 }
5390 5390
5391 5391 /*
5392 5392 * Check for a completed resilver with the 'unspare' flag set.
5393 5393 */
5394 5394 if (vd->vdev_ops == &vdev_spare_ops) {
5395 5395 vdev_t *first = vd->vdev_child[0];
5396 5396 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5397 5397
5398 5398 if (last->vdev_unspare) {
5399 5399 oldvd = first;
5400 5400 newvd = last;
5401 5401 } else if (first->vdev_unspare) {
5402 5402 oldvd = last;
5403 5403 newvd = first;
5404 5404 } else {
5405 5405 oldvd = NULL;
5406 5406 }
5407 5407
5408 5408 if (oldvd != NULL &&
5409 5409 vdev_dtl_empty(newvd, DTL_MISSING) &&
5410 5410 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5411 5411 !vdev_dtl_required(oldvd))
5412 5412 return (oldvd);
5413 5413
5414 5414 /*
5415 5415 * If there are more than two spares attached to a disk,
5416 5416 * and those spares are not required, then we want to
5417 5417 * attempt to free them up now so that they can be used
5418 5418 * by other pools. Once we're back down to a single
5419 5419 * disk+spare, we stop removing them.
5420 5420 */
5421 5421 if (vd->vdev_children > 2) {
5422 5422 newvd = vd->vdev_child[1];
5423 5423
5424 5424 if (newvd->vdev_isspare && last->vdev_isspare &&
5425 5425 vdev_dtl_empty(last, DTL_MISSING) &&
5426 5426 vdev_dtl_empty(last, DTL_OUTAGE) &&
5427 5427 !vdev_dtl_required(newvd))
5428 5428 return (newvd);
5429 5429 }
5430 5430 }
5431 5431
5432 5432 return (NULL);
5433 5433 }
5434 5434
5435 5435 static void
5436 5436 spa_vdev_resilver_done(spa_t *spa)
5437 5437 {
5438 5438 vdev_t *vd, *pvd, *ppvd;
5439 5439 uint64_t guid, sguid, pguid, ppguid;
5440 5440
5441 5441 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5442 5442
5443 5443 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5444 5444 pvd = vd->vdev_parent;
5445 5445 ppvd = pvd->vdev_parent;
5446 5446 guid = vd->vdev_guid;
5447 5447 pguid = pvd->vdev_guid;
5448 5448 ppguid = ppvd->vdev_guid;
5449 5449 sguid = 0;
5450 5450 /*
5451 5451 * If we have just finished replacing a hot spared device, then
5452 5452 * we need to detach the parent's first child (the original hot
5453 5453 * spare) as well.
5454 5454 */
5455 5455 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5456 5456 ppvd->vdev_children == 2) {
5457 5457 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5458 5458 sguid = ppvd->vdev_child[1]->vdev_guid;
5459 5459 }
5460 5460 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5461 5461
5462 5462 spa_config_exit(spa, SCL_ALL, FTAG);
5463 5463 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5464 5464 return;
5465 5465 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5466 5466 return;
5467 5467 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5468 5468 }
5469 5469
5470 5470 spa_config_exit(spa, SCL_ALL, FTAG);
5471 5471 }
5472 5472
5473 5473 /*
5474 5474 * Update the stored path or FRU for this vdev.
5475 5475 */
5476 5476 int
5477 5477 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5478 5478 boolean_t ispath)
5479 5479 {
5480 5480 vdev_t *vd;
5481 5481 boolean_t sync = B_FALSE;
5482 5482
5483 5483 ASSERT(spa_writeable(spa));
5484 5484
5485 5485 spa_vdev_state_enter(spa, SCL_ALL);
5486 5486
5487 5487 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
5488 5488 return (spa_vdev_state_exit(spa, NULL, ENOENT));
5489 5489
5490 5490 if (!vd->vdev_ops->vdev_op_leaf)
5491 5491 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
5492 5492
5493 5493 if (ispath) {
5494 5494 if (strcmp(value, vd->vdev_path) != 0) {
5495 5495 spa_strfree(vd->vdev_path);
5496 5496 vd->vdev_path = spa_strdup(value);
5497 5497 sync = B_TRUE;
5498 5498 }
5499 5499 } else {
5500 5500 if (vd->vdev_fru == NULL) {
5501 5501 vd->vdev_fru = spa_strdup(value);
5502 5502 sync = B_TRUE;
5503 5503 } else if (strcmp(value, vd->vdev_fru) != 0) {
5504 5504 spa_strfree(vd->vdev_fru);
5505 5505 vd->vdev_fru = spa_strdup(value);
5506 5506 sync = B_TRUE;
5507 5507 }
5508 5508 }
5509 5509
5510 5510 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
5511 5511 }
5512 5512
5513 5513 int
5514 5514 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5515 5515 {
5516 5516 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5517 5517 }
5518 5518
5519 5519 int
5520 5520 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5521 5521 {
5522 5522 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5523 5523 }
5524 5524
5525 5525 /*
5526 5526 * ==========================================================================
5527 5527 * SPA Scanning
5528 5528 * ==========================================================================
5529 5529 */
5530 5530
5531 5531 int
5532 5532 spa_scan_stop(spa_t *spa)
5533 5533 {
5534 5534 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5535 5535 if (dsl_scan_resilvering(spa->spa_dsl_pool))
5536 5536 return (SET_ERROR(EBUSY));
5537 5537 return (dsl_scan_cancel(spa->spa_dsl_pool));
5538 5538 }
5539 5539
5540 5540 int
5541 5541 spa_scan(spa_t *spa, pool_scan_func_t func)
5542 5542 {
5543 5543 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5544 5544
5545 5545 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
5546 5546 return (SET_ERROR(ENOTSUP));
5547 5547
5548 5548 /*
5549 5549 * If a resilver was requested, but there is no DTL on a
5550 5550 * writeable leaf device, we have nothing to do.
5551 5551 */
5552 5552 if (func == POOL_SCAN_RESILVER &&
5553 5553 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5554 5554 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
5555 5555 return (0);
5556 5556 }
5557 5557
5558 5558 return (dsl_scan(spa->spa_dsl_pool, func));
5559 5559 }
5560 5560
5561 5561 /*
5562 5562 * ==========================================================================
5563 5563 * SPA async task processing
5564 5564 * ==========================================================================
5565 5565 */
5566 5566
5567 5567 static void
5568 5568 spa_async_remove(spa_t *spa, vdev_t *vd)
5569 5569 {
5570 5570 if (vd->vdev_remove_wanted) {
5571 5571 vd->vdev_remove_wanted = B_FALSE;
5572 5572 vd->vdev_delayed_close = B_FALSE;
5573 5573 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
5574 5574
5575 5575 /*
5576 5576 * We want to clear the stats, but we don't want to do a full
5577 5577 * vdev_clear() as that will cause us to throw away
5578 5578 * degraded/faulted state as well as attempt to reopen the
5579 5579 * device, all of which is a waste.
5580 5580 */
5581 5581 vd->vdev_stat.vs_read_errors = 0;
5582 5582 vd->vdev_stat.vs_write_errors = 0;
5583 5583 vd->vdev_stat.vs_checksum_errors = 0;
5584 5584
5585 5585 vdev_state_dirty(vd->vdev_top);
5586 5586 }
5587 5587
5588 5588 for (int c = 0; c < vd->vdev_children; c++)
5589 5589 spa_async_remove(spa, vd->vdev_child[c]);
5590 5590 }
5591 5591
5592 5592 static void
5593 5593 spa_async_probe(spa_t *spa, vdev_t *vd)
5594 5594 {
5595 5595 if (vd->vdev_probe_wanted) {
5596 5596 vd->vdev_probe_wanted = B_FALSE;
5597 5597 vdev_reopen(vd); /* vdev_open() does the actual probe */
5598 5598 }
5599 5599
5600 5600 for (int c = 0; c < vd->vdev_children; c++)
5601 5601 spa_async_probe(spa, vd->vdev_child[c]);
5602 5602 }
5603 5603
5604 5604 static void
5605 5605 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5606 5606 {
5607 5607 sysevent_id_t eid;
5608 5608 nvlist_t *attr;
5609 5609 char *physpath;
5610 5610
5611 5611 if (!spa->spa_autoexpand)
5612 5612 return;
5613 5613
5614 5614 for (int c = 0; c < vd->vdev_children; c++) {
5615 5615 vdev_t *cvd = vd->vdev_child[c];
5616 5616 spa_async_autoexpand(spa, cvd);
5617 5617 }
5618 5618
5619 5619 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5620 5620 return;
5621 5621
5622 5622 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5623 5623 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
5624 5624
5625 5625 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5626 5626 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
5627 5627
5628 5628 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
5629 5629 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
5630 5630
5631 5631 nvlist_free(attr);
5632 5632 kmem_free(physpath, MAXPATHLEN);
5633 5633 }
5634 5634
5635 5635 static void
5636 5636 spa_async_thread(spa_t *spa)
5637 5637 {
5638 5638 int tasks;
5639 5639
5640 5640 ASSERT(spa->spa_sync_on);
5641 5641
5642 5642 mutex_enter(&spa->spa_async_lock);
5643 5643 tasks = spa->spa_async_tasks;
5644 5644 spa->spa_async_tasks = 0;
5645 5645 mutex_exit(&spa->spa_async_lock);
5646 5646
5647 5647 /*
5648 5648 * See if the config needs to be updated.
5649 5649 */
5650 5650 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
5651 5651 uint64_t old_space, new_space;
5652 5652
5653 5653 mutex_enter(&spa_namespace_lock);
5654 5654 old_space = metaslab_class_get_space(spa_normal_class(spa));
5655 5655 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5656 5656 new_space = metaslab_class_get_space(spa_normal_class(spa));
5657 5657 mutex_exit(&spa_namespace_lock);
5658 5658
5659 5659 /*
5660 5660 * If the pool grew as a result of the config update,
5661 5661 * then log an internal history event.
5662 5662 */
5663 5663 if (new_space != old_space) {
5664 5664 spa_history_log_internal(spa, "vdev online", NULL,
5665 5665 "pool '%s' size: %llu(+%llu)",
5666 5666 spa_name(spa), new_space, new_space - old_space);
5667 5667 }
5668 5668 }
5669 5669
5670 5670 /*
5671 5671 * See if any devices need to be marked REMOVED.
5672 5672 */
5673 5673 if (tasks & SPA_ASYNC_REMOVE) {
5674 5674 spa_vdev_state_enter(spa, SCL_NONE);
5675 5675 spa_async_remove(spa, spa->spa_root_vdev);
5676 5676 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
5677 5677 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
5678 5678 for (int i = 0; i < spa->spa_spares.sav_count; i++)
5679 5679 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5680 5680 (void) spa_vdev_state_exit(spa, NULL, 0);
5681 5681 }
5682 5682
5683 5683 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5684 5684 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5685 5685 spa_async_autoexpand(spa, spa->spa_root_vdev);
5686 5686 spa_config_exit(spa, SCL_CONFIG, FTAG);
5687 5687 }
5688 5688
5689 5689 /*
5690 5690 * See if any devices need to be probed.
5691 5691 */
5692 5692 if (tasks & SPA_ASYNC_PROBE) {
5693 5693 spa_vdev_state_enter(spa, SCL_NONE);
5694 5694 spa_async_probe(spa, spa->spa_root_vdev);
5695 5695 (void) spa_vdev_state_exit(spa, NULL, 0);
5696 5696 }
5697 5697
5698 5698 /*
5699 5699 * If any devices are done replacing, detach them.
5700 5700 */
5701 5701 if (tasks & SPA_ASYNC_RESILVER_DONE)
5702 5702 spa_vdev_resilver_done(spa);
5703 5703
5704 5704 /*
5705 5705 * Kick off a resilver.
5706 5706 */
5707 5707 if (tasks & SPA_ASYNC_RESILVER)
5708 5708 dsl_resilver_restart(spa->spa_dsl_pool, 0);
5709 5709
5710 5710 /*
5711 5711 * Let the world know that we're done.
5712 5712 */
5713 5713 mutex_enter(&spa->spa_async_lock);
5714 5714 spa->spa_async_thread = NULL;
5715 5715 cv_broadcast(&spa->spa_async_cv);
5716 5716 mutex_exit(&spa->spa_async_lock);
5717 5717 thread_exit();
5718 5718 }
5719 5719
5720 5720 void
5721 5721 spa_async_suspend(spa_t *spa)
5722 5722 {
5723 5723 mutex_enter(&spa->spa_async_lock);
5724 5724 spa->spa_async_suspended++;
5725 5725 while (spa->spa_async_thread != NULL)
5726 5726 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5727 5727 mutex_exit(&spa->spa_async_lock);
5728 5728 }
5729 5729
5730 5730 void
5731 5731 spa_async_resume(spa_t *spa)
5732 5732 {
5733 5733 mutex_enter(&spa->spa_async_lock);
5734 5734 ASSERT(spa->spa_async_suspended != 0);
5735 5735 spa->spa_async_suspended--;
5736 5736 mutex_exit(&spa->spa_async_lock);
5737 5737 }
5738 5738
5739 5739 static boolean_t
5740 5740 spa_async_tasks_pending(spa_t *spa)
5741 5741 {
5742 5742 uint_t non_config_tasks;
5743 5743 uint_t config_task;
5744 5744 boolean_t config_task_suspended;
5745 5745
5746 5746 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
5747 5747 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
5748 5748 if (spa->spa_ccw_fail_time == 0) {
5749 5749 config_task_suspended = B_FALSE;
5750 5750 } else {
5751 5751 config_task_suspended =
5752 5752 (gethrtime() - spa->spa_ccw_fail_time) <
5753 5753 (zfs_ccw_retry_interval * NANOSEC);
5754 5754 }
5755 5755
5756 5756 return (non_config_tasks || (config_task && !config_task_suspended));
5757 5757 }
5758 5758
5759 5759 static void
5760 5760 spa_async_dispatch(spa_t *spa)
5761 5761 {
5762 5762 mutex_enter(&spa->spa_async_lock);
5763 5763 if (spa_async_tasks_pending(spa) &&
5764 5764 !spa->spa_async_suspended &&
5765 5765 spa->spa_async_thread == NULL &&
5766 5766 rootdir != NULL)
5767 5767 spa->spa_async_thread = thread_create(NULL, 0,
5768 5768 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5769 5769 mutex_exit(&spa->spa_async_lock);
5770 5770 }
5771 5771
5772 5772 void
5773 5773 spa_async_request(spa_t *spa, int task)
5774 5774 {
5775 5775 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
5776 5776 mutex_enter(&spa->spa_async_lock);
5777 5777 spa->spa_async_tasks |= task;
5778 5778 mutex_exit(&spa->spa_async_lock);
5779 5779 }
5780 5780
5781 5781 /*
5782 5782 * ==========================================================================
5783 5783 * SPA syncing routines
5784 5784 * ==========================================================================
5785 5785 */
5786 5786
5787 5787 static int
5788 5788 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5789 5789 {
5790 5790 bpobj_t *bpo = arg;
5791 5791 bpobj_enqueue(bpo, bp, tx);
5792 5792 return (0);
5793 5793 }
5794 5794
5795 5795 static int
5796 5796 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5797 5797 {
5798 5798 zio_t *zio = arg;
5799 5799
5800 5800 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5801 5801 zio->io_flags));
5802 5802 return (0);
5803 5803 }
5804 5804
5805 5805 /*
5806 5806 * Note: this simple function is not inlined to make it easier to dtrace the
5807 5807 * amount of time spent syncing frees.
5808 5808 */
5809 5809 static void
5810 5810 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
5811 5811 {
5812 5812 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5813 5813 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
5814 5814 VERIFY(zio_wait(zio) == 0);
5815 5815 }
5816 5816
5817 5817 /*
5818 5818 * Note: this simple function is not inlined to make it easier to dtrace the
5819 5819 * amount of time spent syncing deferred frees.
5820 5820 */
5821 5821 static void
5822 5822 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
5823 5823 {
5824 5824 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5825 5825 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
5826 5826 spa_free_sync_cb, zio, tx), ==, 0);
5827 5827 VERIFY0(zio_wait(zio));
5828 5828 }
5829 5829
5830 5830
5831 5831 static void
5832 5832 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5833 5833 {
5834 5834 char *packed = NULL;
5835 5835 size_t bufsize;
5836 5836 size_t nvsize = 0;
5837 5837 dmu_buf_t *db;
5838 5838
5839 5839 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5840 5840
5841 5841 /*
5842 5842 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5843 5843 * information. This avoids the dmu_buf_will_dirty() path and
5844 5844 * saves us a pre-read to get data we don't actually care about.
5845 5845 */
5846 5846 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
5847 5847 packed = kmem_alloc(bufsize, KM_SLEEP);
5848 5848
5849 5849 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
5850 5850 KM_SLEEP) == 0);
5851 5851 bzero(packed + nvsize, bufsize - nvsize);
5852 5852
5853 5853 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
5854 5854
5855 5855 kmem_free(packed, bufsize);
5856 5856
5857 5857 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5858 5858 dmu_buf_will_dirty(db, tx);
5859 5859 *(uint64_t *)db->db_data = nvsize;
5860 5860 dmu_buf_rele(db, FTAG);
5861 5861 }
5862 5862
5863 5863 static void
5864 5864 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5865 5865 const char *config, const char *entry)
5866 5866 {
5867 5867 nvlist_t *nvroot;
5868 5868 nvlist_t **list;
5869 5869 int i;
5870 5870
5871 5871 if (!sav->sav_sync)
5872 5872 return;
5873 5873
5874 5874 /*
5875 5875 * Update the MOS nvlist describing the list of available devices.
5876 5876 * spa_validate_aux() will have already made sure this nvlist is
5877 5877 * valid and the vdevs are labeled appropriately.
5878 5878 */
5879 5879 if (sav->sav_object == 0) {
5880 5880 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5881 5881 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5882 5882 sizeof (uint64_t), tx);
5883 5883 VERIFY(zap_update(spa->spa_meta_objset,
5884 5884 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5885 5885 &sav->sav_object, tx) == 0);
5886 5886 }
5887 5887
5888 5888 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5889 5889 if (sav->sav_count == 0) {
5890 5890 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5891 5891 } else {
5892 5892 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
5893 5893 for (i = 0; i < sav->sav_count; i++)
5894 5894 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
5895 5895 B_FALSE, VDEV_CONFIG_L2CACHE);
5896 5896 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5897 5897 sav->sav_count) == 0);
5898 5898 for (i = 0; i < sav->sav_count; i++)
5899 5899 nvlist_free(list[i]);
5900 5900 kmem_free(list, sav->sav_count * sizeof (void *));
5901 5901 }
5902 5902
5903 5903 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5904 5904 nvlist_free(nvroot);
5905 5905
5906 5906 sav->sav_sync = B_FALSE;
5907 5907 }
5908 5908
5909 5909 static void
5910 5910 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5911 5911 {
5912 5912 nvlist_t *config;
5913 5913
5914 5914 if (list_is_empty(&spa->spa_config_dirty_list))
5915 5915 return;
5916 5916
5917 5917 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5918 5918
5919 5919 config = spa_config_generate(spa, spa->spa_root_vdev,
5920 5920 dmu_tx_get_txg(tx), B_FALSE);
5921 5921
5922 5922 /*
5923 5923 * If we're upgrading the spa version then make sure that
5924 5924 * the config object gets updated with the correct version.
5925 5925 */
5926 5926 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
5927 5927 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5928 5928 spa->spa_uberblock.ub_version);
5929 5929
5930 5930 spa_config_exit(spa, SCL_STATE, FTAG);
5931 5931
5932 5932 if (spa->spa_config_syncing)
5933 5933 nvlist_free(spa->spa_config_syncing);
5934 5934 spa->spa_config_syncing = config;
5935 5935
5936 5936 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5937 5937 }
5938 5938
5939 5939 static void
5940 5940 spa_sync_version(void *arg, dmu_tx_t *tx)
5941 5941 {
5942 5942 uint64_t *versionp = arg;
5943 5943 uint64_t version = *versionp;
5944 5944 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5945 5945
5946 5946 /*
5947 5947 * Setting the version is special cased when first creating the pool.
5948 5948 */
5949 5949 ASSERT(tx->tx_txg != TXG_INITIAL);
5950 5950
5951 5951 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
5952 5952 ASSERT(version >= spa_version(spa));
5953 5953
5954 5954 spa->spa_uberblock.ub_version = version;
5955 5955 vdev_config_dirty(spa->spa_root_vdev);
5956 5956 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
5957 5957 }
5958 5958
5959 5959 /*
5960 5960 * Set zpool properties.
5961 5961 */
5962 5962 static void
5963 5963 spa_sync_props(void *arg, dmu_tx_t *tx)
5964 5964 {
5965 5965 nvlist_t *nvp = arg;
5966 5966 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5967 5967 objset_t *mos = spa->spa_meta_objset;
5968 5968 nvpair_t *elem = NULL;
5969 5969
5970 5970 mutex_enter(&spa->spa_props_lock);
5971 5971
5972 5972 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5973 5973 uint64_t intval;
5974 5974 char *strval, *fname;
5975 5975 zpool_prop_t prop;
5976 5976 const char *propname;
5977 5977 zprop_type_t proptype;
5978 5978 spa_feature_t fid;
5979 5979
5980 5980 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5981 5981 case ZPROP_INVAL:
5982 5982 /*
5983 5983 * We checked this earlier in spa_prop_validate().
5984 5984 */
5985 5985 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5986 5986
5987 5987 fname = strchr(nvpair_name(elem), '@') + 1;
5988 5988 VERIFY0(zfeature_lookup_name(fname, &fid));
5989 5989
5990 5990 spa_feature_enable(spa, fid, tx);
5991 5991 spa_history_log_internal(spa, "set", tx,
5992 5992 "%s=enabled", nvpair_name(elem));
5993 5993 break;
5994 5994
5995 5995 case ZPOOL_PROP_VERSION:
5996 5996 intval = fnvpair_value_uint64(elem);
5997 5997 /*
5998 5998 * The version is synced seperatly before other
5999 5999 * properties and should be correct by now.
6000 6000 */
6001 6001 ASSERT3U(spa_version(spa), >=, intval);
6002 6002 break;
6003 6003
6004 6004 case ZPOOL_PROP_ALTROOT:
6005 6005 /*
6006 6006 * 'altroot' is a non-persistent property. It should
6007 6007 * have been set temporarily at creation or import time.
6008 6008 */
6009 6009 ASSERT(spa->spa_root != NULL);
6010 6010 break;
6011 6011
6012 6012 case ZPOOL_PROP_READONLY:
6013 6013 case ZPOOL_PROP_CACHEFILE:
6014 6014 /*
6015 6015 * 'readonly' and 'cachefile' are also non-persisitent
6016 6016 * properties.
6017 6017 */
6018 6018 break;
6019 6019 case ZPOOL_PROP_COMMENT:
6020 6020 strval = fnvpair_value_string(elem);
6021 6021 if (spa->spa_comment != NULL)
6022 6022 spa_strfree(spa->spa_comment);
6023 6023 spa->spa_comment = spa_strdup(strval);
6024 6024 /*
6025 6025 * We need to dirty the configuration on all the vdevs
6026 6026 * so that their labels get updated. It's unnecessary
6027 6027 * to do this for pool creation since the vdev's
6028 6028 * configuratoin has already been dirtied.
6029 6029 */
6030 6030 if (tx->tx_txg != TXG_INITIAL)
6031 6031 vdev_config_dirty(spa->spa_root_vdev);
6032 6032 spa_history_log_internal(spa, "set", tx,
6033 6033 "%s=%s", nvpair_name(elem), strval);
6034 6034 break;
6035 6035 default:
6036 6036 /*
6037 6037 * Set pool property values in the poolprops mos object.
6038 6038 */
6039 6039 if (spa->spa_pool_props_object == 0) {
6040 6040 spa->spa_pool_props_object =
6041 6041 zap_create_link(mos, DMU_OT_POOL_PROPS,
6042 6042 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6043 6043 tx);
6044 6044 }
6045 6045
6046 6046 /* normalize the property name */
6047 6047 propname = zpool_prop_to_name(prop);
6048 6048 proptype = zpool_prop_get_type(prop);
6049 6049
6050 6050 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6051 6051 ASSERT(proptype == PROP_TYPE_STRING);
6052 6052 strval = fnvpair_value_string(elem);
6053 6053 VERIFY0(zap_update(mos,
6054 6054 spa->spa_pool_props_object, propname,
6055 6055 1, strlen(strval) + 1, strval, tx));
6056 6056 spa_history_log_internal(spa, "set", tx,
6057 6057 "%s=%s", nvpair_name(elem), strval);
6058 6058 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6059 6059 intval = fnvpair_value_uint64(elem);
6060 6060
6061 6061 if (proptype == PROP_TYPE_INDEX) {
6062 6062 const char *unused;
6063 6063 VERIFY0(zpool_prop_index_to_string(
6064 6064 prop, intval, &unused));
6065 6065 }
6066 6066 VERIFY0(zap_update(mos,
6067 6067 spa->spa_pool_props_object, propname,
6068 6068 8, 1, &intval, tx));
6069 6069 spa_history_log_internal(spa, "set", tx,
6070 6070 "%s=%lld", nvpair_name(elem), intval);
6071 6071 } else {
6072 6072 ASSERT(0); /* not allowed */
6073 6073 }
6074 6074
6075 6075 switch (prop) {
6076 6076 case ZPOOL_PROP_DELEGATION:
6077 6077 spa->spa_delegation = intval;
6078 6078 break;
6079 6079 case ZPOOL_PROP_BOOTFS:
6080 6080 spa->spa_bootfs = intval;
6081 6081 break;
6082 6082 case ZPOOL_PROP_FAILUREMODE:
6083 6083 spa->spa_failmode = intval;
6084 6084 break;
6085 6085 case ZPOOL_PROP_AUTOEXPAND:
6086 6086 spa->spa_autoexpand = intval;
6087 6087 if (tx->tx_txg != TXG_INITIAL)
6088 6088 spa_async_request(spa,
6089 6089 SPA_ASYNC_AUTOEXPAND);
6090 6090 break;
6091 6091 case ZPOOL_PROP_DEDUPDITTO:
6092 6092 spa->spa_dedup_ditto = intval;
6093 6093 break;
6094 6094 default:
6095 6095 break;
6096 6096 }
6097 6097 }
6098 6098
6099 6099 }
6100 6100
6101 6101 mutex_exit(&spa->spa_props_lock);
6102 6102 }
6103 6103
6104 6104 /*
6105 6105 * Perform one-time upgrade on-disk changes. spa_version() does not
6106 6106 * reflect the new version this txg, so there must be no changes this
6107 6107 * txg to anything that the upgrade code depends on after it executes.
6108 6108 * Therefore this must be called after dsl_pool_sync() does the sync
6109 6109 * tasks.
6110 6110 */
6111 6111 static void
6112 6112 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6113 6113 {
6114 6114 dsl_pool_t *dp = spa->spa_dsl_pool;
6115 6115
6116 6116 ASSERT(spa->spa_sync_pass == 1);
6117 6117
6118 6118 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6119 6119
6120 6120 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6121 6121 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6122 6122 dsl_pool_create_origin(dp, tx);
6123 6123
6124 6124 /* Keeping the origin open increases spa_minref */
6125 6125 spa->spa_minref += 3;
6126 6126 }
6127 6127
6128 6128 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6129 6129 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6130 6130 dsl_pool_upgrade_clones(dp, tx);
6131 6131 }
6132 6132
6133 6133 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6134 6134 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6135 6135 dsl_pool_upgrade_dir_clones(dp, tx);
6136 6136
6137 6137 /* Keeping the freedir open increases spa_minref */
6138 6138 spa->spa_minref += 3;
6139 6139 }
6140 6140
6141 6141 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6142 6142 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6143 6143 spa_feature_create_zap_objects(spa, tx);
6144 6144 }
6145 6145
6146 6146 /*
6147 6147 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6148 6148 * when possibility to use lz4 compression for metadata was added
6149 6149 * Old pools that have this feature enabled must be upgraded to have
6150 6150 * this feature active
6151 6151 */
6152 6152 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6153 6153 boolean_t lz4_en = spa_feature_is_enabled(spa,
6154 6154 SPA_FEATURE_LZ4_COMPRESS);
6155 6155 boolean_t lz4_ac = spa_feature_is_active(spa,
6156 6156 SPA_FEATURE_LZ4_COMPRESS);
6157 6157
6158 6158 if (lz4_en && !lz4_ac)
6159 6159 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6160 6160 }
6161 6161 rrw_exit(&dp->dp_config_rwlock, FTAG);
6162 6162 }
6163 6163
6164 6164 /*
6165 6165 * Sync the specified transaction group. New blocks may be dirtied as
6166 6166 * part of the process, so we iterate until it converges.
6167 6167 */
6168 6168 void
6169 6169 spa_sync(spa_t *spa, uint64_t txg)
6170 6170 {
6171 6171 dsl_pool_t *dp = spa->spa_dsl_pool;
6172 6172 objset_t *mos = spa->spa_meta_objset;
6173 6173 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
6174 6174 vdev_t *rvd = spa->spa_root_vdev;
6175 6175 vdev_t *vd;
6176 6176 dmu_tx_t *tx;
6177 6177 int error;
6178 6178
6179 6179 VERIFY(spa_writeable(spa));
6180 6180
6181 6181 /*
6182 6182 * Lock out configuration changes.
6183 6183 */
6184 6184 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6185 6185
6186 6186 spa->spa_syncing_txg = txg;
6187 6187 spa->spa_sync_pass = 0;
6188 6188
6189 6189 /*
6190 6190 * If there are any pending vdev state changes, convert them
6191 6191 * into config changes that go out with this transaction group.
6192 6192 */
6193 6193 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6194 6194 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6195 6195 /*
6196 6196 * We need the write lock here because, for aux vdevs,
6197 6197 * calling vdev_config_dirty() modifies sav_config.
6198 6198 * This is ugly and will become unnecessary when we
6199 6199 * eliminate the aux vdev wart by integrating all vdevs
6200 6200 * into the root vdev tree.
6201 6201 */
6202 6202 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6203 6203 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6204 6204 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6205 6205 vdev_state_clean(vd);
6206 6206 vdev_config_dirty(vd);
6207 6207 }
6208 6208 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6209 6209 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6210 6210 }
6211 6211 spa_config_exit(spa, SCL_STATE, FTAG);
6212 6212
6213 6213 tx = dmu_tx_create_assigned(dp, txg);
6214 6214
6215 6215 spa->spa_sync_starttime = gethrtime();
6216 6216 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
6217 6217 spa->spa_sync_starttime + spa->spa_deadman_synctime));
6218 6218
6219 6219 /*
6220 6220 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6221 6221 * set spa_deflate if we have no raid-z vdevs.
6222 6222 */
6223 6223 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6224 6224 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6225 6225 int i;
6226 6226
6227 6227 for (i = 0; i < rvd->vdev_children; i++) {
6228 6228 vd = rvd->vdev_child[i];
6229 6229 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6230 6230 break;
6231 6231 }
6232 6232 if (i == rvd->vdev_children) {
6233 6233 spa->spa_deflate = TRUE;
6234 6234 VERIFY(0 == zap_add(spa->spa_meta_objset,
6235 6235 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6236 6236 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6237 6237 }
6238 6238 }
6239 6239
6240 6240 /*
6241 6241 * If anything has changed in this txg, or if someone is waiting
6242 6242 * for this txg to sync (eg, spa_vdev_remove()), push the
6243 6243 * deferred frees from the previous txg. If not, leave them
6244 6244 * alone so that we don't generate work on an otherwise idle
6245 6245 * system.
6246 6246 */
6247 6247 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
6248 6248 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
6249 6249 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
6250 6250 ((dsl_scan_active(dp->dp_scan) ||
6251 6251 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
6252 6252 spa_sync_deferred_frees(spa, tx);
6253 6253 }
6254 6254
6255 6255 /*
6256 6256 * Iterate to convergence.
6257 6257 */
6258 6258 do {
6259 6259 int pass = ++spa->spa_sync_pass;
6260 6260
6261 6261 spa_sync_config_object(spa, tx);
6262 6262 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6263 6263 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6264 6264 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6265 6265 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6266 6266 spa_errlog_sync(spa, txg);
6267 6267 dsl_pool_sync(dp, txg);
6268 6268
6269 6269 if (pass < zfs_sync_pass_deferred_free) {
6270 6270 spa_sync_frees(spa, free_bpl, tx);
6271 6271 } else {
6272 6272 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6273 6273 &spa->spa_deferred_bpobj, tx);
6274 6274 }
6275 6275
6276 6276 ddt_sync(spa, txg);
6277 6277 dsl_scan_sync(dp, tx);
6278 6278
6279 6279 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
6280 6280 vdev_sync(vd, txg);
6281 6281
6282 6282 if (pass == 1)
6283 6283 spa_sync_upgrades(spa, tx);
6284 6284
6285 6285 } while (dmu_objset_is_dirty(mos, txg));
6286 6286
6287 6287 /*
6288 6288 * Rewrite the vdev configuration (which includes the uberblock)
6289 6289 * to commit the transaction group.
6290 6290 *
6291 6291 * If there are no dirty vdevs, we sync the uberblock to a few
6292 6292 * random top-level vdevs that are known to be visible in the
6293 6293 * config cache (see spa_vdev_add() for a complete description).
6294 6294 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
6295 6295 */
6296 6296 for (;;) {
6297 6297 /*
6298 6298 * We hold SCL_STATE to prevent vdev open/close/etc.
6299 6299 * while we're attempting to write the vdev labels.
6300 6300 */
6301 6301 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6302 6302
6303 6303 if (list_is_empty(&spa->spa_config_dirty_list)) {
6304 6304 vdev_t *svd[SPA_DVAS_PER_BP];
6305 6305 int svdcount = 0;
6306 6306 int children = rvd->vdev_children;
6307 6307 int c0 = spa_get_random(children);
6308 6308
6309 6309 for (int c = 0; c < children; c++) {
6310 6310 vd = rvd->vdev_child[(c0 + c) % children];
6311 6311 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6312 6312 continue;
6313 6313 svd[svdcount++] = vd;
6314 6314 if (svdcount == SPA_DVAS_PER_BP)
6315 6315 break;
6316 6316 }
6317 6317 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6318 6318 if (error != 0)
6319 6319 error = vdev_config_sync(svd, svdcount, txg,
6320 6320 B_TRUE);
6321 6321 } else {
6322 6322 error = vdev_config_sync(rvd->vdev_child,
6323 6323 rvd->vdev_children, txg, B_FALSE);
6324 6324 if (error != 0)
6325 6325 error = vdev_config_sync(rvd->vdev_child,
6326 6326 rvd->vdev_children, txg, B_TRUE);
6327 6327 }
6328 6328
6329 6329 if (error == 0)
6330 6330 spa->spa_last_synced_guid = rvd->vdev_guid;
6331 6331
6332 6332 spa_config_exit(spa, SCL_STATE, FTAG);
6333 6333
6334 6334 if (error == 0)
6335 6335 break;
6336 6336 zio_suspend(spa, NULL);
6337 6337 zio_resume_wait(spa);
6338 6338 }
6339 6339 dmu_tx_commit(tx);
6340 6340
6341 6341 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
6342 6342
6343 6343 /*
6344 6344 * Clear the dirty config list.
6345 6345 */
6346 6346 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
6347 6347 vdev_config_clean(vd);
6348 6348
6349 6349 /*
6350 6350 * Now that the new config has synced transactionally,
6351 6351 * let it become visible to the config cache.
6352 6352 */
6353 6353 if (spa->spa_config_syncing != NULL) {
6354 6354 spa_config_set(spa, spa->spa_config_syncing);
6355 6355 spa->spa_config_txg = txg;
6356 6356 spa->spa_config_syncing = NULL;
6357 6357 }
6358 6358
6359 6359 spa->spa_ubsync = spa->spa_uberblock;
6360 6360
6361 6361 dsl_pool_sync_done(dp, txg);
6362 6362
6363 6363 /*
6364 6364 * Update usable space statistics.
6365 6365 */
6366 6366 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
6367 6367 vdev_sync_done(vd, txg);
6368 6368
6369 6369 spa_update_dspace(spa);
6370 6370
6371 6371 /*
6372 6372 * It had better be the case that we didn't dirty anything
6373 6373 * since vdev_config_sync().
6374 6374 */
6375 6375 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6376 6376 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6377 6377 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
6378 6378
6379 6379 spa->spa_sync_pass = 0;
6380 6380
6381 6381 spa_config_exit(spa, SCL_CONFIG, FTAG);
6382 6382
6383 6383 spa_handle_ignored_writes(spa);
6384 6384
6385 6385 /*
6386 6386 * If any async tasks have been requested, kick them off.
6387 6387 */
6388 6388 spa_async_dispatch(spa);
6389 6389 }
6390 6390
6391 6391 /*
6392 6392 * Sync all pools. We don't want to hold the namespace lock across these
6393 6393 * operations, so we take a reference on the spa_t and drop the lock during the
6394 6394 * sync.
6395 6395 */
6396 6396 void
6397 6397 spa_sync_allpools(void)
6398 6398 {
6399 6399 spa_t *spa = NULL;
6400 6400 mutex_enter(&spa_namespace_lock);
6401 6401 while ((spa = spa_next(spa)) != NULL) {
6402 6402 if (spa_state(spa) != POOL_STATE_ACTIVE ||
6403 6403 !spa_writeable(spa) || spa_suspended(spa))
6404 6404 continue;
6405 6405 spa_open_ref(spa, FTAG);
6406 6406 mutex_exit(&spa_namespace_lock);
6407 6407 txg_wait_synced(spa_get_dsl(spa), 0);
6408 6408 mutex_enter(&spa_namespace_lock);
6409 6409 spa_close(spa, FTAG);
6410 6410 }
6411 6411 mutex_exit(&spa_namespace_lock);
6412 6412 }
6413 6413
6414 6414 /*
6415 6415 * ==========================================================================
6416 6416 * Miscellaneous routines
6417 6417 * ==========================================================================
6418 6418 */
6419 6419
6420 6420 /*
6421 6421 * Remove all pools in the system.
6422 6422 */
6423 6423 void
6424 6424 spa_evict_all(void)
6425 6425 {
6426 6426 spa_t *spa;
6427 6427
6428 6428 /*
6429 6429 * Remove all cached state. All pools should be closed now,
6430 6430 * so every spa in the AVL tree should be unreferenced.
6431 6431 */
6432 6432 mutex_enter(&spa_namespace_lock);
6433 6433 while ((spa = spa_next(NULL)) != NULL) {
6434 6434 /*
6435 6435 * Stop async tasks. The async thread may need to detach
6436 6436 * a device that's been replaced, which requires grabbing
6437 6437 * spa_namespace_lock, so we must drop it here.
6438 6438 */
6439 6439 spa_open_ref(spa, FTAG);
6440 6440 mutex_exit(&spa_namespace_lock);
6441 6441 spa_async_suspend(spa);
6442 6442 mutex_enter(&spa_namespace_lock);
6443 6443 spa_close(spa, FTAG);
6444 6444
6445 6445 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6446 6446 spa_unload(spa);
6447 6447 spa_deactivate(spa);
6448 6448 }
6449 6449 spa_remove(spa);
6450 6450 }
6451 6451 mutex_exit(&spa_namespace_lock);
6452 6452 }
6453 6453
6454 6454 vdev_t *
6455 6455 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
6456 6456 {
6457 6457 vdev_t *vd;
6458 6458 int i;
6459 6459
6460 6460 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6461 6461 return (vd);
6462 6462
6463 6463 if (aux) {
6464 6464 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6465 6465 vd = spa->spa_l2cache.sav_vdevs[i];
6466 6466 if (vd->vdev_guid == guid)
6467 6467 return (vd);
6468 6468 }
6469 6469
6470 6470 for (i = 0; i < spa->spa_spares.sav_count; i++) {
6471 6471 vd = spa->spa_spares.sav_vdevs[i];
6472 6472 if (vd->vdev_guid == guid)
6473 6473 return (vd);
6474 6474 }
6475 6475 }
6476 6476
6477 6477 return (NULL);
6478 6478 }
6479 6479
6480 6480 void
6481 6481 spa_upgrade(spa_t *spa, uint64_t version)
6482 6482 {
6483 6483 ASSERT(spa_writeable(spa));
6484 6484
6485 6485 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6486 6486
6487 6487 /*
6488 6488 * This should only be called for a non-faulted pool, and since a
6489 6489 * future version would result in an unopenable pool, this shouldn't be
6490 6490 * possible.
6491 6491 */
6492 6492 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
6493 6493 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
6494 6494
6495 6495 spa->spa_uberblock.ub_version = version;
6496 6496 vdev_config_dirty(spa->spa_root_vdev);
6497 6497
6498 6498 spa_config_exit(spa, SCL_ALL, FTAG);
6499 6499
6500 6500 txg_wait_synced(spa_get_dsl(spa), 0);
6501 6501 }
6502 6502
6503 6503 boolean_t
6504 6504 spa_has_spare(spa_t *spa, uint64_t guid)
6505 6505 {
6506 6506 int i;
6507 6507 uint64_t spareguid;
6508 6508 spa_aux_vdev_t *sav = &spa->spa_spares;
6509 6509
6510 6510 for (i = 0; i < sav->sav_count; i++)
6511 6511 if (sav->sav_vdevs[i]->vdev_guid == guid)
6512 6512 return (B_TRUE);
6513 6513
6514 6514 for (i = 0; i < sav->sav_npending; i++) {
6515 6515 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6516 6516 &spareguid) == 0 && spareguid == guid)
6517 6517 return (B_TRUE);
6518 6518 }
6519 6519
6520 6520 return (B_FALSE);
6521 6521 }
6522 6522
6523 6523 /*
6524 6524 * Check if a pool has an active shared spare device.
6525 6525 * Note: reference count of an active spare is 2, as a spare and as a replace
6526 6526 */
6527 6527 static boolean_t
6528 6528 spa_has_active_shared_spare(spa_t *spa)
6529 6529 {
6530 6530 int i, refcnt;
6531 6531 uint64_t pool;
6532 6532 spa_aux_vdev_t *sav = &spa->spa_spares;
6533 6533
6534 6534 for (i = 0; i < sav->sav_count; i++) {
6535 6535 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6536 6536 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6537 6537 refcnt > 2)
6538 6538 return (B_TRUE);
6539 6539 }
6540 6540
6541 6541 return (B_FALSE);
6542 6542 }
6543 6543
6544 6544 /*
6545 6545 * Post a sysevent corresponding to the given event. The 'name' must be one of
6546 6546 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
6547 6547 * filled in from the spa and (optionally) the vdev. This doesn't do anything
6548 6548 * in the userland libzpool, as we don't want consumers to misinterpret ztest
6549 6549 * or zdb as real changes.
6550 6550 */
6551 6551 void
6552 6552 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6553 6553 {
6554 6554 #ifdef _KERNEL
6555 6555 sysevent_t *ev;
6556 6556 sysevent_attr_list_t *attr = NULL;
6557 6557 sysevent_value_t value;
6558 6558 sysevent_id_t eid;
6559 6559
6560 6560 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
6561 6561 SE_SLEEP);
6562 6562
6563 6563 value.value_type = SE_DATA_TYPE_STRING;
6564 6564 value.value.sv_string = spa_name(spa);
6565 6565 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
6566 6566 goto done;
6567 6567
6568 6568 value.value_type = SE_DATA_TYPE_UINT64;
6569 6569 value.value.sv_uint64 = spa_guid(spa);
6570 6570 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
6571 6571 goto done;
6572 6572
6573 6573 if (vd) {
6574 6574 value.value_type = SE_DATA_TYPE_UINT64;
6575 6575 value.value.sv_uint64 = vd->vdev_guid;
6576 6576 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
6577 6577 SE_SLEEP) != 0)
6578 6578 goto done;
6579 6579
6580 6580 if (vd->vdev_path) {
6581 6581 value.value_type = SE_DATA_TYPE_STRING;
6582 6582 value.value.sv_string = vd->vdev_path;
6583 6583 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
6584 6584 &value, SE_SLEEP) != 0)
6585 6585 goto done;
6586 6586 }
6587 6587 }
6588 6588
6589 6589 if (sysevent_attach_attributes(ev, attr) != 0)
6590 6590 goto done;
6591 6591 attr = NULL;
6592 6592
6593 6593 (void) log_sysevent(ev, SE_SLEEP, &eid);
6594 6594
6595 6595 done:
6596 6596 if (attr)
6597 6597 sysevent_free_attr(attr);
6598 6598 sysevent_free(ev);
6599 6599 #endif
6600 6600 }
↓ open down ↓ |
4762 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX