Print this page
5047 don't use atomic_*_nv if you discard the return value
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu.c
+++ new/usr/src/uts/common/fs/zfs/dmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 24 */
25 25 /* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */
26 26 /* Copyright (c) 2013, Joyent, Inc. All rights reserved. */
27 27 /* Copyright (c) 2014, Nexenta Systems, Inc. All rights reserved. */
28 28
29 29 #include <sys/dmu.h>
30 30 #include <sys/dmu_impl.h>
31 31 #include <sys/dmu_tx.h>
32 32 #include <sys/dbuf.h>
33 33 #include <sys/dnode.h>
34 34 #include <sys/zfs_context.h>
35 35 #include <sys/dmu_objset.h>
36 36 #include <sys/dmu_traverse.h>
37 37 #include <sys/dsl_dataset.h>
38 38 #include <sys/dsl_dir.h>
39 39 #include <sys/dsl_pool.h>
40 40 #include <sys/dsl_synctask.h>
41 41 #include <sys/dsl_prop.h>
42 42 #include <sys/dmu_zfetch.h>
43 43 #include <sys/zfs_ioctl.h>
44 44 #include <sys/zap.h>
45 45 #include <sys/zio_checksum.h>
46 46 #include <sys/zio_compress.h>
47 47 #include <sys/sa.h>
48 48 #include <sys/zfeature.h>
49 49 #ifdef _KERNEL
50 50 #include <sys/vmsystm.h>
51 51 #include <sys/zfs_znode.h>
52 52 #endif
53 53
54 54 /*
55 55 * Enable/disable nopwrite feature.
56 56 */
57 57 int zfs_nopwrite_enabled = 1;
58 58
59 59 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
60 60 { DMU_BSWAP_UINT8, TRUE, "unallocated" },
61 61 { DMU_BSWAP_ZAP, TRUE, "object directory" },
62 62 { DMU_BSWAP_UINT64, TRUE, "object array" },
63 63 { DMU_BSWAP_UINT8, TRUE, "packed nvlist" },
64 64 { DMU_BSWAP_UINT64, TRUE, "packed nvlist size" },
65 65 { DMU_BSWAP_UINT64, TRUE, "bpobj" },
66 66 { DMU_BSWAP_UINT64, TRUE, "bpobj header" },
67 67 { DMU_BSWAP_UINT64, TRUE, "SPA space map header" },
68 68 { DMU_BSWAP_UINT64, TRUE, "SPA space map" },
69 69 { DMU_BSWAP_UINT64, TRUE, "ZIL intent log" },
70 70 { DMU_BSWAP_DNODE, TRUE, "DMU dnode" },
71 71 { DMU_BSWAP_OBJSET, TRUE, "DMU objset" },
72 72 { DMU_BSWAP_UINT64, TRUE, "DSL directory" },
73 73 { DMU_BSWAP_ZAP, TRUE, "DSL directory child map"},
74 74 { DMU_BSWAP_ZAP, TRUE, "DSL dataset snap map" },
75 75 { DMU_BSWAP_ZAP, TRUE, "DSL props" },
76 76 { DMU_BSWAP_UINT64, TRUE, "DSL dataset" },
77 77 { DMU_BSWAP_ZNODE, TRUE, "ZFS znode" },
78 78 { DMU_BSWAP_OLDACL, TRUE, "ZFS V0 ACL" },
79 79 { DMU_BSWAP_UINT8, FALSE, "ZFS plain file" },
80 80 { DMU_BSWAP_ZAP, TRUE, "ZFS directory" },
81 81 { DMU_BSWAP_ZAP, TRUE, "ZFS master node" },
82 82 { DMU_BSWAP_ZAP, TRUE, "ZFS delete queue" },
83 83 { DMU_BSWAP_UINT8, FALSE, "zvol object" },
84 84 { DMU_BSWAP_ZAP, TRUE, "zvol prop" },
85 85 { DMU_BSWAP_UINT8, FALSE, "other uint8[]" },
86 86 { DMU_BSWAP_UINT64, FALSE, "other uint64[]" },
87 87 { DMU_BSWAP_ZAP, TRUE, "other ZAP" },
88 88 { DMU_BSWAP_ZAP, TRUE, "persistent error log" },
89 89 { DMU_BSWAP_UINT8, TRUE, "SPA history" },
90 90 { DMU_BSWAP_UINT64, TRUE, "SPA history offsets" },
91 91 { DMU_BSWAP_ZAP, TRUE, "Pool properties" },
92 92 { DMU_BSWAP_ZAP, TRUE, "DSL permissions" },
93 93 { DMU_BSWAP_ACL, TRUE, "ZFS ACL" },
94 94 { DMU_BSWAP_UINT8, TRUE, "ZFS SYSACL" },
95 95 { DMU_BSWAP_UINT8, TRUE, "FUID table" },
96 96 { DMU_BSWAP_UINT64, TRUE, "FUID table size" },
97 97 { DMU_BSWAP_ZAP, TRUE, "DSL dataset next clones"},
98 98 { DMU_BSWAP_ZAP, TRUE, "scan work queue" },
99 99 { DMU_BSWAP_ZAP, TRUE, "ZFS user/group used" },
100 100 { DMU_BSWAP_ZAP, TRUE, "ZFS user/group quota" },
101 101 { DMU_BSWAP_ZAP, TRUE, "snapshot refcount tags"},
102 102 { DMU_BSWAP_ZAP, TRUE, "DDT ZAP algorithm" },
103 103 { DMU_BSWAP_ZAP, TRUE, "DDT statistics" },
104 104 { DMU_BSWAP_UINT8, TRUE, "System attributes" },
105 105 { DMU_BSWAP_ZAP, TRUE, "SA master node" },
106 106 { DMU_BSWAP_ZAP, TRUE, "SA attr registration" },
107 107 { DMU_BSWAP_ZAP, TRUE, "SA attr layouts" },
108 108 { DMU_BSWAP_ZAP, TRUE, "scan translations" },
109 109 { DMU_BSWAP_UINT8, FALSE, "deduplicated block" },
110 110 { DMU_BSWAP_ZAP, TRUE, "DSL deadlist map" },
111 111 { DMU_BSWAP_UINT64, TRUE, "DSL deadlist map hdr" },
112 112 { DMU_BSWAP_ZAP, TRUE, "DSL dir clones" },
113 113 { DMU_BSWAP_UINT64, TRUE, "bpobj subobj" }
114 114 };
115 115
116 116 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
117 117 { byteswap_uint8_array, "uint8" },
118 118 { byteswap_uint16_array, "uint16" },
119 119 { byteswap_uint32_array, "uint32" },
120 120 { byteswap_uint64_array, "uint64" },
121 121 { zap_byteswap, "zap" },
122 122 { dnode_buf_byteswap, "dnode" },
123 123 { dmu_objset_byteswap, "objset" },
124 124 { zfs_znode_byteswap, "znode" },
125 125 { zfs_oldacl_byteswap, "oldacl" },
126 126 { zfs_acl_byteswap, "acl" }
127 127 };
128 128
129 129 int
130 130 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
131 131 void *tag, dmu_buf_t **dbp)
132 132 {
133 133 dnode_t *dn;
134 134 uint64_t blkid;
135 135 dmu_buf_impl_t *db;
136 136 int err;
137 137
138 138 err = dnode_hold(os, object, FTAG, &dn);
139 139 if (err)
140 140 return (err);
141 141 blkid = dbuf_whichblock(dn, offset);
142 142 rw_enter(&dn->dn_struct_rwlock, RW_READER);
143 143 db = dbuf_hold(dn, blkid, tag);
144 144 rw_exit(&dn->dn_struct_rwlock);
145 145 dnode_rele(dn, FTAG);
146 146
147 147 if (db == NULL) {
148 148 *dbp = NULL;
149 149 return (SET_ERROR(EIO));
150 150 }
151 151
152 152 *dbp = &db->db;
153 153 return (err);
154 154 }
155 155
156 156 int
157 157 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
158 158 void *tag, dmu_buf_t **dbp, int flags)
159 159 {
160 160 int err;
161 161 int db_flags = DB_RF_CANFAIL;
162 162
163 163 if (flags & DMU_READ_NO_PREFETCH)
164 164 db_flags |= DB_RF_NOPREFETCH;
165 165
166 166 err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
167 167 if (err == 0) {
168 168 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
169 169 err = dbuf_read(db, NULL, db_flags);
170 170 if (err != 0) {
171 171 dbuf_rele(db, tag);
172 172 *dbp = NULL;
173 173 }
174 174 }
175 175
176 176 return (err);
177 177 }
178 178
179 179 int
180 180 dmu_bonus_max(void)
181 181 {
182 182 return (DN_MAX_BONUSLEN);
183 183 }
184 184
185 185 int
186 186 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
187 187 {
188 188 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
189 189 dnode_t *dn;
190 190 int error;
191 191
192 192 DB_DNODE_ENTER(db);
193 193 dn = DB_DNODE(db);
194 194
195 195 if (dn->dn_bonus != db) {
196 196 error = SET_ERROR(EINVAL);
197 197 } else if (newsize < 0 || newsize > db_fake->db_size) {
198 198 error = SET_ERROR(EINVAL);
199 199 } else {
200 200 dnode_setbonuslen(dn, newsize, tx);
201 201 error = 0;
202 202 }
203 203
204 204 DB_DNODE_EXIT(db);
205 205 return (error);
206 206 }
207 207
208 208 int
209 209 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
210 210 {
211 211 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
212 212 dnode_t *dn;
213 213 int error;
214 214
215 215 DB_DNODE_ENTER(db);
216 216 dn = DB_DNODE(db);
217 217
218 218 if (!DMU_OT_IS_VALID(type)) {
219 219 error = SET_ERROR(EINVAL);
220 220 } else if (dn->dn_bonus != db) {
221 221 error = SET_ERROR(EINVAL);
222 222 } else {
223 223 dnode_setbonus_type(dn, type, tx);
224 224 error = 0;
225 225 }
226 226
227 227 DB_DNODE_EXIT(db);
228 228 return (error);
229 229 }
230 230
231 231 dmu_object_type_t
232 232 dmu_get_bonustype(dmu_buf_t *db_fake)
233 233 {
234 234 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
235 235 dnode_t *dn;
236 236 dmu_object_type_t type;
237 237
238 238 DB_DNODE_ENTER(db);
239 239 dn = DB_DNODE(db);
240 240 type = dn->dn_bonustype;
241 241 DB_DNODE_EXIT(db);
242 242
243 243 return (type);
244 244 }
245 245
246 246 int
247 247 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
248 248 {
249 249 dnode_t *dn;
250 250 int error;
251 251
252 252 error = dnode_hold(os, object, FTAG, &dn);
253 253 dbuf_rm_spill(dn, tx);
254 254 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
255 255 dnode_rm_spill(dn, tx);
256 256 rw_exit(&dn->dn_struct_rwlock);
257 257 dnode_rele(dn, FTAG);
258 258 return (error);
259 259 }
260 260
261 261 /*
262 262 * returns ENOENT, EIO, or 0.
263 263 */
264 264 int
265 265 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
266 266 {
267 267 dnode_t *dn;
268 268 dmu_buf_impl_t *db;
269 269 int error;
270 270
271 271 error = dnode_hold(os, object, FTAG, &dn);
272 272 if (error)
273 273 return (error);
274 274
275 275 rw_enter(&dn->dn_struct_rwlock, RW_READER);
276 276 if (dn->dn_bonus == NULL) {
↓ open down ↓ |
276 lines elided |
↑ open up ↑ |
277 277 rw_exit(&dn->dn_struct_rwlock);
278 278 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
279 279 if (dn->dn_bonus == NULL)
280 280 dbuf_create_bonus(dn);
281 281 }
282 282 db = dn->dn_bonus;
283 283
284 284 /* as long as the bonus buf is held, the dnode will be held */
285 285 if (refcount_add(&db->db_holds, tag) == 1) {
286 286 VERIFY(dnode_add_ref(dn, db));
287 - (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
287 + atomic_inc_32(&dn->dn_dbufs_count);
288 288 }
289 289
290 290 /*
291 291 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
292 292 * hold and incrementing the dbuf count to ensure that dnode_move() sees
293 293 * a dnode hold for every dbuf.
294 294 */
295 295 rw_exit(&dn->dn_struct_rwlock);
296 296
297 297 dnode_rele(dn, FTAG);
298 298
299 299 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));
300 300
301 301 *dbp = &db->db;
302 302 return (0);
303 303 }
304 304
305 305 /*
306 306 * returns ENOENT, EIO, or 0.
307 307 *
308 308 * This interface will allocate a blank spill dbuf when a spill blk
309 309 * doesn't already exist on the dnode.
310 310 *
311 311 * if you only want to find an already existing spill db, then
312 312 * dmu_spill_hold_existing() should be used.
313 313 */
314 314 int
315 315 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
316 316 {
317 317 dmu_buf_impl_t *db = NULL;
318 318 int err;
319 319
320 320 if ((flags & DB_RF_HAVESTRUCT) == 0)
321 321 rw_enter(&dn->dn_struct_rwlock, RW_READER);
322 322
323 323 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
324 324
325 325 if ((flags & DB_RF_HAVESTRUCT) == 0)
326 326 rw_exit(&dn->dn_struct_rwlock);
327 327
328 328 ASSERT(db != NULL);
329 329 err = dbuf_read(db, NULL, flags);
330 330 if (err == 0)
331 331 *dbp = &db->db;
332 332 else
333 333 dbuf_rele(db, tag);
334 334 return (err);
335 335 }
336 336
337 337 int
338 338 dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
339 339 {
340 340 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
341 341 dnode_t *dn;
342 342 int err;
343 343
344 344 DB_DNODE_ENTER(db);
345 345 dn = DB_DNODE(db);
346 346
347 347 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
348 348 err = SET_ERROR(EINVAL);
349 349 } else {
350 350 rw_enter(&dn->dn_struct_rwlock, RW_READER);
351 351
352 352 if (!dn->dn_have_spill) {
353 353 err = SET_ERROR(ENOENT);
354 354 } else {
355 355 err = dmu_spill_hold_by_dnode(dn,
356 356 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
357 357 }
358 358
359 359 rw_exit(&dn->dn_struct_rwlock);
360 360 }
361 361
362 362 DB_DNODE_EXIT(db);
363 363 return (err);
364 364 }
365 365
366 366 int
367 367 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
368 368 {
369 369 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
370 370 dnode_t *dn;
371 371 int err;
372 372
373 373 DB_DNODE_ENTER(db);
374 374 dn = DB_DNODE(db);
375 375 err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
376 376 DB_DNODE_EXIT(db);
377 377
378 378 return (err);
379 379 }
380 380
381 381 /*
382 382 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
383 383 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
384 384 * and can induce severe lock contention when writing to several files
385 385 * whose dnodes are in the same block.
386 386 */
387 387 static int
388 388 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
389 389 int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
390 390 {
391 391 dmu_buf_t **dbp;
392 392 uint64_t blkid, nblks, i;
393 393 uint32_t dbuf_flags;
394 394 int err;
395 395 zio_t *zio;
396 396
397 397 ASSERT(length <= DMU_MAX_ACCESS);
398 398
399 399 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
400 400 if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
401 401 dbuf_flags |= DB_RF_NOPREFETCH;
402 402
403 403 rw_enter(&dn->dn_struct_rwlock, RW_READER);
404 404 if (dn->dn_datablkshift) {
405 405 int blkshift = dn->dn_datablkshift;
406 406 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
407 407 P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
408 408 } else {
409 409 if (offset + length > dn->dn_datablksz) {
410 410 zfs_panic_recover("zfs: accessing past end of object "
411 411 "%llx/%llx (size=%u access=%llu+%llu)",
412 412 (longlong_t)dn->dn_objset->
413 413 os_dsl_dataset->ds_object,
414 414 (longlong_t)dn->dn_object, dn->dn_datablksz,
415 415 (longlong_t)offset, (longlong_t)length);
416 416 rw_exit(&dn->dn_struct_rwlock);
417 417 return (SET_ERROR(EIO));
418 418 }
419 419 nblks = 1;
420 420 }
421 421 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
422 422
423 423 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
424 424 blkid = dbuf_whichblock(dn, offset);
425 425 for (i = 0; i < nblks; i++) {
426 426 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag);
427 427 if (db == NULL) {
428 428 rw_exit(&dn->dn_struct_rwlock);
429 429 dmu_buf_rele_array(dbp, nblks, tag);
430 430 zio_nowait(zio);
431 431 return (SET_ERROR(EIO));
432 432 }
433 433 /* initiate async i/o */
434 434 if (read) {
435 435 (void) dbuf_read(db, zio, dbuf_flags);
436 436 }
437 437 dbp[i] = &db->db;
438 438 }
439 439 rw_exit(&dn->dn_struct_rwlock);
440 440
441 441 /* wait for async i/o */
442 442 err = zio_wait(zio);
443 443 if (err) {
444 444 dmu_buf_rele_array(dbp, nblks, tag);
445 445 return (err);
446 446 }
447 447
448 448 /* wait for other io to complete */
449 449 if (read) {
450 450 for (i = 0; i < nblks; i++) {
451 451 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
452 452 mutex_enter(&db->db_mtx);
453 453 while (db->db_state == DB_READ ||
454 454 db->db_state == DB_FILL)
455 455 cv_wait(&db->db_changed, &db->db_mtx);
456 456 if (db->db_state == DB_UNCACHED)
457 457 err = SET_ERROR(EIO);
458 458 mutex_exit(&db->db_mtx);
459 459 if (err) {
460 460 dmu_buf_rele_array(dbp, nblks, tag);
461 461 return (err);
462 462 }
463 463 }
464 464 }
465 465
466 466 *numbufsp = nblks;
467 467 *dbpp = dbp;
468 468 return (0);
469 469 }
470 470
471 471 static int
472 472 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
473 473 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
474 474 {
475 475 dnode_t *dn;
476 476 int err;
477 477
478 478 err = dnode_hold(os, object, FTAG, &dn);
479 479 if (err)
480 480 return (err);
481 481
482 482 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
483 483 numbufsp, dbpp, DMU_READ_PREFETCH);
484 484
485 485 dnode_rele(dn, FTAG);
486 486
487 487 return (err);
488 488 }
489 489
490 490 int
491 491 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
492 492 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
493 493 {
494 494 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
495 495 dnode_t *dn;
496 496 int err;
497 497
498 498 DB_DNODE_ENTER(db);
499 499 dn = DB_DNODE(db);
500 500 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
501 501 numbufsp, dbpp, DMU_READ_PREFETCH);
502 502 DB_DNODE_EXIT(db);
503 503
504 504 return (err);
505 505 }
506 506
507 507 void
508 508 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
509 509 {
510 510 int i;
511 511 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
512 512
513 513 if (numbufs == 0)
514 514 return;
515 515
516 516 for (i = 0; i < numbufs; i++) {
517 517 if (dbp[i])
518 518 dbuf_rele(dbp[i], tag);
519 519 }
520 520
521 521 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
522 522 }
523 523
524 524 /*
525 525 * Issue prefetch i/os for the given blocks.
526 526 *
527 527 * Note: The assumption is that we *know* these blocks will be needed
528 528 * almost immediately. Therefore, the prefetch i/os will be issued at
529 529 * ZIO_PRIORITY_SYNC_READ
530 530 *
531 531 * Note: indirect blocks and other metadata will be read synchronously,
532 532 * causing this function to block if they are not already cached.
533 533 */
534 534 void
535 535 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
536 536 {
537 537 dnode_t *dn;
538 538 uint64_t blkid;
539 539 int nblks, err;
540 540
541 541 if (zfs_prefetch_disable)
542 542 return;
543 543
544 544 if (len == 0) { /* they're interested in the bonus buffer */
545 545 dn = DMU_META_DNODE(os);
546 546
547 547 if (object == 0 || object >= DN_MAX_OBJECT)
548 548 return;
549 549
550 550 rw_enter(&dn->dn_struct_rwlock, RW_READER);
551 551 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t));
552 552 dbuf_prefetch(dn, blkid, ZIO_PRIORITY_SYNC_READ);
553 553 rw_exit(&dn->dn_struct_rwlock);
554 554 return;
555 555 }
556 556
557 557 /*
558 558 * XXX - Note, if the dnode for the requested object is not
559 559 * already cached, we will do a *synchronous* read in the
560 560 * dnode_hold() call. The same is true for any indirects.
561 561 */
562 562 err = dnode_hold(os, object, FTAG, &dn);
563 563 if (err != 0)
564 564 return;
565 565
566 566 rw_enter(&dn->dn_struct_rwlock, RW_READER);
567 567 if (dn->dn_datablkshift) {
568 568 int blkshift = dn->dn_datablkshift;
569 569 nblks = (P2ROUNDUP(offset + len, 1 << blkshift) -
570 570 P2ALIGN(offset, 1 << blkshift)) >> blkshift;
571 571 } else {
572 572 nblks = (offset < dn->dn_datablksz);
573 573 }
574 574
575 575 if (nblks != 0) {
576 576 blkid = dbuf_whichblock(dn, offset);
577 577 for (int i = 0; i < nblks; i++)
578 578 dbuf_prefetch(dn, blkid + i, ZIO_PRIORITY_SYNC_READ);
579 579 }
580 580
581 581 rw_exit(&dn->dn_struct_rwlock);
582 582
583 583 dnode_rele(dn, FTAG);
584 584 }
585 585
586 586 /*
587 587 * Get the next "chunk" of file data to free. We traverse the file from
588 588 * the end so that the file gets shorter over time (if we crashes in the
589 589 * middle, this will leave us in a better state). We find allocated file
590 590 * data by simply searching the allocated level 1 indirects.
591 591 *
592 592 * On input, *start should be the first offset that does not need to be
593 593 * freed (e.g. "offset + length"). On return, *start will be the first
594 594 * offset that should be freed.
595 595 */
596 596 static int
597 597 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
598 598 {
599 599 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
600 600 /* bytes of data covered by a level-1 indirect block */
601 601 uint64_t iblkrange =
602 602 dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
603 603
604 604 ASSERT3U(minimum, <=, *start);
605 605
606 606 if (*start - minimum <= iblkrange * maxblks) {
607 607 *start = minimum;
608 608 return (0);
609 609 }
610 610 ASSERT(ISP2(iblkrange));
611 611
612 612 for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
613 613 int err;
614 614
615 615 /*
616 616 * dnode_next_offset(BACKWARDS) will find an allocated L1
617 617 * indirect block at or before the input offset. We must
618 618 * decrement *start so that it is at the end of the region
619 619 * to search.
620 620 */
621 621 (*start)--;
622 622 err = dnode_next_offset(dn,
623 623 DNODE_FIND_BACKWARDS, start, 2, 1, 0);
624 624
625 625 /* if there are no indirect blocks before start, we are done */
626 626 if (err == ESRCH) {
627 627 *start = minimum;
628 628 break;
629 629 } else if (err != 0) {
630 630 return (err);
631 631 }
632 632
633 633 /* set start to the beginning of this L1 indirect */
634 634 *start = P2ALIGN(*start, iblkrange);
635 635 }
636 636 if (*start < minimum)
637 637 *start = minimum;
638 638 return (0);
639 639 }
640 640
641 641 static int
642 642 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
643 643 uint64_t length)
644 644 {
645 645 uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
646 646 int err;
647 647
648 648 if (offset >= object_size)
649 649 return (0);
650 650
651 651 if (length == DMU_OBJECT_END || offset + length > object_size)
652 652 length = object_size - offset;
653 653
654 654 while (length != 0) {
655 655 uint64_t chunk_end, chunk_begin;
656 656
657 657 chunk_end = chunk_begin = offset + length;
658 658
659 659 /* move chunk_begin backwards to the beginning of this chunk */
660 660 err = get_next_chunk(dn, &chunk_begin, offset);
661 661 if (err)
662 662 return (err);
663 663 ASSERT3U(chunk_begin, >=, offset);
664 664 ASSERT3U(chunk_begin, <=, chunk_end);
665 665
666 666 dmu_tx_t *tx = dmu_tx_create(os);
667 667 dmu_tx_hold_free(tx, dn->dn_object,
668 668 chunk_begin, chunk_end - chunk_begin);
669 669
670 670 /*
671 671 * Mark this transaction as typically resulting in a net
672 672 * reduction in space used.
673 673 */
674 674 dmu_tx_mark_netfree(tx);
675 675 err = dmu_tx_assign(tx, TXG_WAIT);
676 676 if (err) {
677 677 dmu_tx_abort(tx);
678 678 return (err);
679 679 }
680 680 dnode_free_range(dn, chunk_begin, chunk_end - chunk_begin, tx);
681 681 dmu_tx_commit(tx);
682 682
683 683 length -= chunk_end - chunk_begin;
684 684 }
685 685 return (0);
686 686 }
687 687
688 688 int
689 689 dmu_free_long_range(objset_t *os, uint64_t object,
690 690 uint64_t offset, uint64_t length)
691 691 {
692 692 dnode_t *dn;
693 693 int err;
694 694
695 695 err = dnode_hold(os, object, FTAG, &dn);
696 696 if (err != 0)
697 697 return (err);
698 698 err = dmu_free_long_range_impl(os, dn, offset, length);
699 699
700 700 /*
701 701 * It is important to zero out the maxblkid when freeing the entire
702 702 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
703 703 * will take the fast path, and (b) dnode_reallocate() can verify
704 704 * that the entire file has been freed.
705 705 */
706 706 if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
707 707 dn->dn_maxblkid = 0;
708 708
709 709 dnode_rele(dn, FTAG);
710 710 return (err);
711 711 }
712 712
713 713 int
714 714 dmu_free_long_object(objset_t *os, uint64_t object)
715 715 {
716 716 dmu_tx_t *tx;
717 717 int err;
718 718
719 719 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
720 720 if (err != 0)
721 721 return (err);
722 722
723 723 tx = dmu_tx_create(os);
724 724 dmu_tx_hold_bonus(tx, object);
725 725 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
726 726 dmu_tx_mark_netfree(tx);
727 727 err = dmu_tx_assign(tx, TXG_WAIT);
728 728 if (err == 0) {
729 729 err = dmu_object_free(os, object, tx);
730 730 dmu_tx_commit(tx);
731 731 } else {
732 732 dmu_tx_abort(tx);
733 733 }
734 734
735 735 return (err);
736 736 }
737 737
738 738 int
739 739 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
740 740 uint64_t size, dmu_tx_t *tx)
741 741 {
742 742 dnode_t *dn;
743 743 int err = dnode_hold(os, object, FTAG, &dn);
744 744 if (err)
745 745 return (err);
746 746 ASSERT(offset < UINT64_MAX);
747 747 ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
748 748 dnode_free_range(dn, offset, size, tx);
749 749 dnode_rele(dn, FTAG);
750 750 return (0);
751 751 }
752 752
753 753 int
754 754 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
755 755 void *buf, uint32_t flags)
756 756 {
757 757 dnode_t *dn;
758 758 dmu_buf_t **dbp;
759 759 int numbufs, err;
760 760
761 761 err = dnode_hold(os, object, FTAG, &dn);
762 762 if (err)
763 763 return (err);
764 764
765 765 /*
766 766 * Deal with odd block sizes, where there can't be data past the first
767 767 * block. If we ever do the tail block optimization, we will need to
768 768 * handle that here as well.
769 769 */
770 770 if (dn->dn_maxblkid == 0) {
771 771 int newsz = offset > dn->dn_datablksz ? 0 :
772 772 MIN(size, dn->dn_datablksz - offset);
773 773 bzero((char *)buf + newsz, size - newsz);
774 774 size = newsz;
775 775 }
776 776
777 777 while (size > 0) {
778 778 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
779 779 int i;
780 780
781 781 /*
782 782 * NB: we could do this block-at-a-time, but it's nice
783 783 * to be reading in parallel.
784 784 */
785 785 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
786 786 TRUE, FTAG, &numbufs, &dbp, flags);
787 787 if (err)
788 788 break;
789 789
790 790 for (i = 0; i < numbufs; i++) {
791 791 int tocpy;
792 792 int bufoff;
793 793 dmu_buf_t *db = dbp[i];
794 794
795 795 ASSERT(size > 0);
796 796
797 797 bufoff = offset - db->db_offset;
798 798 tocpy = (int)MIN(db->db_size - bufoff, size);
799 799
800 800 bcopy((char *)db->db_data + bufoff, buf, tocpy);
801 801
802 802 offset += tocpy;
803 803 size -= tocpy;
804 804 buf = (char *)buf + tocpy;
805 805 }
806 806 dmu_buf_rele_array(dbp, numbufs, FTAG);
807 807 }
808 808 dnode_rele(dn, FTAG);
809 809 return (err);
810 810 }
811 811
812 812 void
813 813 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
814 814 const void *buf, dmu_tx_t *tx)
815 815 {
816 816 dmu_buf_t **dbp;
817 817 int numbufs, i;
818 818
819 819 if (size == 0)
820 820 return;
821 821
822 822 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
823 823 FALSE, FTAG, &numbufs, &dbp));
824 824
825 825 for (i = 0; i < numbufs; i++) {
826 826 int tocpy;
827 827 int bufoff;
828 828 dmu_buf_t *db = dbp[i];
829 829
830 830 ASSERT(size > 0);
831 831
832 832 bufoff = offset - db->db_offset;
833 833 tocpy = (int)MIN(db->db_size - bufoff, size);
834 834
835 835 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
836 836
837 837 if (tocpy == db->db_size)
838 838 dmu_buf_will_fill(db, tx);
839 839 else
840 840 dmu_buf_will_dirty(db, tx);
841 841
842 842 bcopy(buf, (char *)db->db_data + bufoff, tocpy);
843 843
844 844 if (tocpy == db->db_size)
845 845 dmu_buf_fill_done(db, tx);
846 846
847 847 offset += tocpy;
848 848 size -= tocpy;
849 849 buf = (char *)buf + tocpy;
850 850 }
851 851 dmu_buf_rele_array(dbp, numbufs, FTAG);
852 852 }
853 853
854 854 void
855 855 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
856 856 dmu_tx_t *tx)
857 857 {
858 858 dmu_buf_t **dbp;
859 859 int numbufs, i;
860 860
861 861 if (size == 0)
862 862 return;
863 863
864 864 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
865 865 FALSE, FTAG, &numbufs, &dbp));
866 866
867 867 for (i = 0; i < numbufs; i++) {
868 868 dmu_buf_t *db = dbp[i];
869 869
870 870 dmu_buf_will_not_fill(db, tx);
871 871 }
872 872 dmu_buf_rele_array(dbp, numbufs, FTAG);
873 873 }
874 874
875 875 void
876 876 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
877 877 void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
878 878 int compressed_size, int byteorder, dmu_tx_t *tx)
879 879 {
880 880 dmu_buf_t *db;
881 881
882 882 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
883 883 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
884 884 VERIFY0(dmu_buf_hold_noread(os, object, offset,
885 885 FTAG, &db));
886 886
887 887 dmu_buf_write_embedded(db,
888 888 data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
889 889 uncompressed_size, compressed_size, byteorder, tx);
890 890
891 891 dmu_buf_rele(db, FTAG);
892 892 }
893 893
894 894 /*
895 895 * DMU support for xuio
896 896 */
897 897 kstat_t *xuio_ksp = NULL;
898 898
899 899 int
900 900 dmu_xuio_init(xuio_t *xuio, int nblk)
901 901 {
902 902 dmu_xuio_t *priv;
903 903 uio_t *uio = &xuio->xu_uio;
904 904
905 905 uio->uio_iovcnt = nblk;
906 906 uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
907 907
908 908 priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
909 909 priv->cnt = nblk;
910 910 priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
911 911 priv->iovp = uio->uio_iov;
912 912 XUIO_XUZC_PRIV(xuio) = priv;
913 913
914 914 if (XUIO_XUZC_RW(xuio) == UIO_READ)
915 915 XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
916 916 else
917 917 XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
918 918
919 919 return (0);
920 920 }
921 921
922 922 void
923 923 dmu_xuio_fini(xuio_t *xuio)
924 924 {
925 925 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
926 926 int nblk = priv->cnt;
927 927
928 928 kmem_free(priv->iovp, nblk * sizeof (iovec_t));
929 929 kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
930 930 kmem_free(priv, sizeof (dmu_xuio_t));
931 931
932 932 if (XUIO_XUZC_RW(xuio) == UIO_READ)
933 933 XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
934 934 else
935 935 XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
936 936 }
937 937
938 938 /*
939 939 * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
940 940 * and increase priv->next by 1.
941 941 */
942 942 int
943 943 dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
944 944 {
945 945 struct iovec *iov;
946 946 uio_t *uio = &xuio->xu_uio;
947 947 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
948 948 int i = priv->next++;
949 949
950 950 ASSERT(i < priv->cnt);
951 951 ASSERT(off + n <= arc_buf_size(abuf));
952 952 iov = uio->uio_iov + i;
953 953 iov->iov_base = (char *)abuf->b_data + off;
954 954 iov->iov_len = n;
955 955 priv->bufs[i] = abuf;
956 956 return (0);
957 957 }
958 958
959 959 int
960 960 dmu_xuio_cnt(xuio_t *xuio)
961 961 {
962 962 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
963 963 return (priv->cnt);
964 964 }
965 965
966 966 arc_buf_t *
967 967 dmu_xuio_arcbuf(xuio_t *xuio, int i)
968 968 {
969 969 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
970 970
971 971 ASSERT(i < priv->cnt);
972 972 return (priv->bufs[i]);
973 973 }
974 974
975 975 void
976 976 dmu_xuio_clear(xuio_t *xuio, int i)
977 977 {
978 978 dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
979 979
980 980 ASSERT(i < priv->cnt);
981 981 priv->bufs[i] = NULL;
982 982 }
983 983
984 984 static void
985 985 xuio_stat_init(void)
986 986 {
987 987 xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
988 988 KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
989 989 KSTAT_FLAG_VIRTUAL);
990 990 if (xuio_ksp != NULL) {
991 991 xuio_ksp->ks_data = &xuio_stats;
992 992 kstat_install(xuio_ksp);
993 993 }
994 994 }
995 995
996 996 static void
997 997 xuio_stat_fini(void)
998 998 {
999 999 if (xuio_ksp != NULL) {
1000 1000 kstat_delete(xuio_ksp);
1001 1001 xuio_ksp = NULL;
1002 1002 }
1003 1003 }
1004 1004
1005 1005 void
1006 1006 xuio_stat_wbuf_copied()
1007 1007 {
1008 1008 XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1009 1009 }
1010 1010
1011 1011 void
1012 1012 xuio_stat_wbuf_nocopy()
1013 1013 {
1014 1014 XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
1015 1015 }
1016 1016
1017 1017 #ifdef _KERNEL
1018 1018 int
1019 1019 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
1020 1020 {
1021 1021 dmu_buf_t **dbp;
1022 1022 int numbufs, i, err;
1023 1023 xuio_t *xuio = NULL;
1024 1024
1025 1025 /*
1026 1026 * NB: we could do this block-at-a-time, but it's nice
1027 1027 * to be reading in parallel.
1028 1028 */
1029 1029 err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG,
1030 1030 &numbufs, &dbp);
1031 1031 if (err)
1032 1032 return (err);
1033 1033
1034 1034 if (uio->uio_extflg == UIO_XUIO)
1035 1035 xuio = (xuio_t *)uio;
1036 1036
1037 1037 for (i = 0; i < numbufs; i++) {
1038 1038 int tocpy;
1039 1039 int bufoff;
1040 1040 dmu_buf_t *db = dbp[i];
1041 1041
1042 1042 ASSERT(size > 0);
1043 1043
1044 1044 bufoff = uio->uio_loffset - db->db_offset;
1045 1045 tocpy = (int)MIN(db->db_size - bufoff, size);
1046 1046
1047 1047 if (xuio) {
1048 1048 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
1049 1049 arc_buf_t *dbuf_abuf = dbi->db_buf;
1050 1050 arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
1051 1051 err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
1052 1052 if (!err) {
1053 1053 uio->uio_resid -= tocpy;
1054 1054 uio->uio_loffset += tocpy;
1055 1055 }
1056 1056
1057 1057 if (abuf == dbuf_abuf)
1058 1058 XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
1059 1059 else
1060 1060 XUIOSTAT_BUMP(xuiostat_rbuf_copied);
1061 1061 } else {
1062 1062 err = uiomove((char *)db->db_data + bufoff, tocpy,
1063 1063 UIO_READ, uio);
1064 1064 }
1065 1065 if (err)
1066 1066 break;
1067 1067
1068 1068 size -= tocpy;
1069 1069 }
1070 1070 dmu_buf_rele_array(dbp, numbufs, FTAG);
1071 1071
1072 1072 return (err);
1073 1073 }
1074 1074
1075 1075 static int
1076 1076 dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1077 1077 {
1078 1078 dmu_buf_t **dbp;
1079 1079 int numbufs;
1080 1080 int err = 0;
1081 1081 int i;
1082 1082
1083 1083 err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1084 1084 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1085 1085 if (err)
1086 1086 return (err);
1087 1087
1088 1088 for (i = 0; i < numbufs; i++) {
1089 1089 int tocpy;
1090 1090 int bufoff;
1091 1091 dmu_buf_t *db = dbp[i];
1092 1092
1093 1093 ASSERT(size > 0);
1094 1094
1095 1095 bufoff = uio->uio_loffset - db->db_offset;
1096 1096 tocpy = (int)MIN(db->db_size - bufoff, size);
1097 1097
1098 1098 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1099 1099
1100 1100 if (tocpy == db->db_size)
1101 1101 dmu_buf_will_fill(db, tx);
1102 1102 else
1103 1103 dmu_buf_will_dirty(db, tx);
1104 1104
1105 1105 /*
1106 1106 * XXX uiomove could block forever (eg. nfs-backed
1107 1107 * pages). There needs to be a uiolockdown() function
1108 1108 * to lock the pages in memory, so that uiomove won't
1109 1109 * block.
1110 1110 */
1111 1111 err = uiomove((char *)db->db_data + bufoff, tocpy,
1112 1112 UIO_WRITE, uio);
1113 1113
1114 1114 if (tocpy == db->db_size)
1115 1115 dmu_buf_fill_done(db, tx);
1116 1116
1117 1117 if (err)
1118 1118 break;
1119 1119
1120 1120 size -= tocpy;
1121 1121 }
1122 1122
1123 1123 dmu_buf_rele_array(dbp, numbufs, FTAG);
1124 1124 return (err);
1125 1125 }
1126 1126
1127 1127 int
1128 1128 dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
1129 1129 dmu_tx_t *tx)
1130 1130 {
1131 1131 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1132 1132 dnode_t *dn;
1133 1133 int err;
1134 1134
1135 1135 if (size == 0)
1136 1136 return (0);
1137 1137
1138 1138 DB_DNODE_ENTER(db);
1139 1139 dn = DB_DNODE(db);
1140 1140 err = dmu_write_uio_dnode(dn, uio, size, tx);
1141 1141 DB_DNODE_EXIT(db);
1142 1142
1143 1143 return (err);
1144 1144 }
1145 1145
1146 1146 int
1147 1147 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
1148 1148 dmu_tx_t *tx)
1149 1149 {
1150 1150 dnode_t *dn;
1151 1151 int err;
1152 1152
1153 1153 if (size == 0)
1154 1154 return (0);
1155 1155
1156 1156 err = dnode_hold(os, object, FTAG, &dn);
1157 1157 if (err)
1158 1158 return (err);
1159 1159
1160 1160 err = dmu_write_uio_dnode(dn, uio, size, tx);
1161 1161
1162 1162 dnode_rele(dn, FTAG);
1163 1163
1164 1164 return (err);
1165 1165 }
1166 1166
1167 1167 int
1168 1168 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1169 1169 page_t *pp, dmu_tx_t *tx)
1170 1170 {
1171 1171 dmu_buf_t **dbp;
1172 1172 int numbufs, i;
1173 1173 int err;
1174 1174
1175 1175 if (size == 0)
1176 1176 return (0);
1177 1177
1178 1178 err = dmu_buf_hold_array(os, object, offset, size,
1179 1179 FALSE, FTAG, &numbufs, &dbp);
1180 1180 if (err)
1181 1181 return (err);
1182 1182
1183 1183 for (i = 0; i < numbufs; i++) {
1184 1184 int tocpy, copied, thiscpy;
1185 1185 int bufoff;
1186 1186 dmu_buf_t *db = dbp[i];
1187 1187 caddr_t va;
1188 1188
1189 1189 ASSERT(size > 0);
1190 1190 ASSERT3U(db->db_size, >=, PAGESIZE);
1191 1191
1192 1192 bufoff = offset - db->db_offset;
1193 1193 tocpy = (int)MIN(db->db_size - bufoff, size);
1194 1194
1195 1195 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1196 1196
1197 1197 if (tocpy == db->db_size)
1198 1198 dmu_buf_will_fill(db, tx);
1199 1199 else
1200 1200 dmu_buf_will_dirty(db, tx);
1201 1201
1202 1202 for (copied = 0; copied < tocpy; copied += PAGESIZE) {
1203 1203 ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
1204 1204 thiscpy = MIN(PAGESIZE, tocpy - copied);
1205 1205 va = zfs_map_page(pp, S_READ);
1206 1206 bcopy(va, (char *)db->db_data + bufoff, thiscpy);
1207 1207 zfs_unmap_page(pp, va);
1208 1208 pp = pp->p_next;
1209 1209 bufoff += PAGESIZE;
1210 1210 }
1211 1211
1212 1212 if (tocpy == db->db_size)
1213 1213 dmu_buf_fill_done(db, tx);
1214 1214
1215 1215 offset += tocpy;
1216 1216 size -= tocpy;
1217 1217 }
1218 1218 dmu_buf_rele_array(dbp, numbufs, FTAG);
1219 1219 return (err);
1220 1220 }
1221 1221 #endif
1222 1222
1223 1223 /*
1224 1224 * Allocate a loaned anonymous arc buffer.
1225 1225 */
1226 1226 arc_buf_t *
1227 1227 dmu_request_arcbuf(dmu_buf_t *handle, int size)
1228 1228 {
1229 1229 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1230 1230
1231 1231 return (arc_loan_buf(db->db_objset->os_spa, size));
1232 1232 }
1233 1233
1234 1234 /*
1235 1235 * Free a loaned arc buffer.
1236 1236 */
1237 1237 void
1238 1238 dmu_return_arcbuf(arc_buf_t *buf)
1239 1239 {
1240 1240 arc_return_buf(buf, FTAG);
1241 1241 VERIFY(arc_buf_remove_ref(buf, FTAG));
1242 1242 }
1243 1243
1244 1244 /*
1245 1245 * When possible directly assign passed loaned arc buffer to a dbuf.
1246 1246 * If this is not possible copy the contents of passed arc buf via
1247 1247 * dmu_write().
1248 1248 */
1249 1249 void
1250 1250 dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1251 1251 dmu_tx_t *tx)
1252 1252 {
1253 1253 dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1254 1254 dnode_t *dn;
1255 1255 dmu_buf_impl_t *db;
1256 1256 uint32_t blksz = (uint32_t)arc_buf_size(buf);
1257 1257 uint64_t blkid;
1258 1258
1259 1259 DB_DNODE_ENTER(dbuf);
1260 1260 dn = DB_DNODE(dbuf);
1261 1261 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1262 1262 blkid = dbuf_whichblock(dn, offset);
1263 1263 VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
1264 1264 rw_exit(&dn->dn_struct_rwlock);
1265 1265 DB_DNODE_EXIT(dbuf);
1266 1266
1267 1267 if (offset == db->db.db_offset && blksz == db->db.db_size) {
1268 1268 dbuf_assign_arcbuf(db, buf, tx);
1269 1269 dbuf_rele(db, FTAG);
1270 1270 } else {
1271 1271 objset_t *os;
1272 1272 uint64_t object;
1273 1273
1274 1274 DB_DNODE_ENTER(dbuf);
1275 1275 dn = DB_DNODE(dbuf);
1276 1276 os = dn->dn_objset;
1277 1277 object = dn->dn_object;
1278 1278 DB_DNODE_EXIT(dbuf);
1279 1279
1280 1280 dbuf_rele(db, FTAG);
1281 1281 dmu_write(os, object, offset, blksz, buf->b_data, tx);
1282 1282 dmu_return_arcbuf(buf);
1283 1283 XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1284 1284 }
1285 1285 }
1286 1286
1287 1287 typedef struct {
1288 1288 dbuf_dirty_record_t *dsa_dr;
1289 1289 dmu_sync_cb_t *dsa_done;
1290 1290 zgd_t *dsa_zgd;
1291 1291 dmu_tx_t *dsa_tx;
1292 1292 } dmu_sync_arg_t;
1293 1293
1294 1294 /* ARGSUSED */
1295 1295 static void
1296 1296 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1297 1297 {
1298 1298 dmu_sync_arg_t *dsa = varg;
1299 1299 dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1300 1300 blkptr_t *bp = zio->io_bp;
1301 1301
1302 1302 if (zio->io_error == 0) {
1303 1303 if (BP_IS_HOLE(bp)) {
1304 1304 /*
1305 1305 * A block of zeros may compress to a hole, but the
1306 1306 * block size still needs to be known for replay.
1307 1307 */
1308 1308 BP_SET_LSIZE(bp, db->db_size);
1309 1309 } else if (!BP_IS_EMBEDDED(bp)) {
1310 1310 ASSERT(BP_GET_LEVEL(bp) == 0);
1311 1311 bp->blk_fill = 1;
1312 1312 }
1313 1313 }
1314 1314 }
1315 1315
1316 1316 static void
1317 1317 dmu_sync_late_arrival_ready(zio_t *zio)
1318 1318 {
1319 1319 dmu_sync_ready(zio, NULL, zio->io_private);
1320 1320 }
1321 1321
1322 1322 /* ARGSUSED */
1323 1323 static void
1324 1324 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1325 1325 {
1326 1326 dmu_sync_arg_t *dsa = varg;
1327 1327 dbuf_dirty_record_t *dr = dsa->dsa_dr;
1328 1328 dmu_buf_impl_t *db = dr->dr_dbuf;
1329 1329
1330 1330 mutex_enter(&db->db_mtx);
1331 1331 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1332 1332 if (zio->io_error == 0) {
1333 1333 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1334 1334 if (dr->dt.dl.dr_nopwrite) {
1335 1335 blkptr_t *bp = zio->io_bp;
1336 1336 blkptr_t *bp_orig = &zio->io_bp_orig;
1337 1337 uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1338 1338
1339 1339 ASSERT(BP_EQUAL(bp, bp_orig));
1340 1340 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1341 1341 ASSERT(zio_checksum_table[chksum].ci_dedup);
1342 1342 }
1343 1343 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1344 1344 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1345 1345 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1346 1346 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by))
1347 1347 BP_ZERO(&dr->dt.dl.dr_overridden_by);
1348 1348 } else {
1349 1349 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1350 1350 }
1351 1351 cv_broadcast(&db->db_changed);
1352 1352 mutex_exit(&db->db_mtx);
1353 1353
1354 1354 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1355 1355
1356 1356 kmem_free(dsa, sizeof (*dsa));
1357 1357 }
1358 1358
1359 1359 static void
1360 1360 dmu_sync_late_arrival_done(zio_t *zio)
1361 1361 {
1362 1362 blkptr_t *bp = zio->io_bp;
1363 1363 dmu_sync_arg_t *dsa = zio->io_private;
1364 1364 blkptr_t *bp_orig = &zio->io_bp_orig;
1365 1365
1366 1366 if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
1367 1367 /*
1368 1368 * If we didn't allocate a new block (i.e. ZIO_FLAG_NOPWRITE)
1369 1369 * then there is nothing to do here. Otherwise, free the
1370 1370 * newly allocated block in this txg.
1371 1371 */
1372 1372 if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
1373 1373 ASSERT(BP_EQUAL(bp, bp_orig));
1374 1374 } else {
1375 1375 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1376 1376 ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1377 1377 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1378 1378 zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1379 1379 }
1380 1380 }
1381 1381
1382 1382 dmu_tx_commit(dsa->dsa_tx);
1383 1383
1384 1384 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1385 1385
1386 1386 kmem_free(dsa, sizeof (*dsa));
1387 1387 }
1388 1388
1389 1389 static int
1390 1390 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1391 1391 zio_prop_t *zp, zbookmark_phys_t *zb)
1392 1392 {
1393 1393 dmu_sync_arg_t *dsa;
1394 1394 dmu_tx_t *tx;
1395 1395
1396 1396 tx = dmu_tx_create(os);
1397 1397 dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1398 1398 if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1399 1399 dmu_tx_abort(tx);
1400 1400 /* Make zl_get_data do txg_waited_synced() */
1401 1401 return (SET_ERROR(EIO));
1402 1402 }
1403 1403
1404 1404 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1405 1405 dsa->dsa_dr = NULL;
1406 1406 dsa->dsa_done = done;
1407 1407 dsa->dsa_zgd = zgd;
1408 1408 dsa->dsa_tx = tx;
1409 1409
1410 1410 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1411 1411 zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp,
1412 1412 dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, dsa,
1413 1413 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1414 1414
1415 1415 return (0);
1416 1416 }
1417 1417
1418 1418 /*
1419 1419 * Intent log support: sync the block associated with db to disk.
1420 1420 * N.B. and XXX: the caller is responsible for making sure that the
1421 1421 * data isn't changing while dmu_sync() is writing it.
1422 1422 *
1423 1423 * Return values:
1424 1424 *
1425 1425 * EEXIST: this txg has already been synced, so there's nothing to do.
1426 1426 * The caller should not log the write.
1427 1427 *
1428 1428 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1429 1429 * The caller should not log the write.
1430 1430 *
1431 1431 * EALREADY: this block is already in the process of being synced.
1432 1432 * The caller should track its progress (somehow).
1433 1433 *
1434 1434 * EIO: could not do the I/O.
1435 1435 * The caller should do a txg_wait_synced().
1436 1436 *
1437 1437 * 0: the I/O has been initiated.
1438 1438 * The caller should log this blkptr in the done callback.
1439 1439 * It is possible that the I/O will fail, in which case
1440 1440 * the error will be reported to the done callback and
1441 1441 * propagated to pio from zio_done().
1442 1442 */
1443 1443 int
1444 1444 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1445 1445 {
1446 1446 blkptr_t *bp = zgd->zgd_bp;
1447 1447 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1448 1448 objset_t *os = db->db_objset;
1449 1449 dsl_dataset_t *ds = os->os_dsl_dataset;
1450 1450 dbuf_dirty_record_t *dr;
1451 1451 dmu_sync_arg_t *dsa;
1452 1452 zbookmark_phys_t zb;
1453 1453 zio_prop_t zp;
1454 1454 dnode_t *dn;
1455 1455
1456 1456 ASSERT(pio != NULL);
1457 1457 ASSERT(txg != 0);
1458 1458
1459 1459 SET_BOOKMARK(&zb, ds->ds_object,
1460 1460 db->db.db_object, db->db_level, db->db_blkid);
1461 1461
1462 1462 DB_DNODE_ENTER(db);
1463 1463 dn = DB_DNODE(db);
1464 1464 dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1465 1465 DB_DNODE_EXIT(db);
1466 1466
1467 1467 /*
1468 1468 * If we're frozen (running ziltest), we always need to generate a bp.
1469 1469 */
1470 1470 if (txg > spa_freeze_txg(os->os_spa))
1471 1471 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1472 1472
1473 1473 /*
1474 1474 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1475 1475 * and us. If we determine that this txg is not yet syncing,
1476 1476 * but it begins to sync a moment later, that's OK because the
1477 1477 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1478 1478 */
1479 1479 mutex_enter(&db->db_mtx);
1480 1480
1481 1481 if (txg <= spa_last_synced_txg(os->os_spa)) {
1482 1482 /*
1483 1483 * This txg has already synced. There's nothing to do.
1484 1484 */
1485 1485 mutex_exit(&db->db_mtx);
1486 1486 return (SET_ERROR(EEXIST));
1487 1487 }
1488 1488
1489 1489 if (txg <= spa_syncing_txg(os->os_spa)) {
1490 1490 /*
1491 1491 * This txg is currently syncing, so we can't mess with
1492 1492 * the dirty record anymore; just write a new log block.
1493 1493 */
1494 1494 mutex_exit(&db->db_mtx);
1495 1495 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1496 1496 }
1497 1497
1498 1498 dr = db->db_last_dirty;
1499 1499 while (dr && dr->dr_txg != txg)
1500 1500 dr = dr->dr_next;
1501 1501
1502 1502 if (dr == NULL) {
1503 1503 /*
1504 1504 * There's no dr for this dbuf, so it must have been freed.
1505 1505 * There's no need to log writes to freed blocks, so we're done.
1506 1506 */
1507 1507 mutex_exit(&db->db_mtx);
1508 1508 return (SET_ERROR(ENOENT));
1509 1509 }
1510 1510
1511 1511 ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
1512 1512
1513 1513 /*
1514 1514 * Assume the on-disk data is X, the current syncing data is Y,
1515 1515 * and the current in-memory data is Z (currently in dmu_sync).
1516 1516 * X and Z are identical but Y is has been modified. Normally,
1517 1517 * when X and Z are the same we will perform a nopwrite but if Y
1518 1518 * is different we must disable nopwrite since the resulting write
1519 1519 * of Y to disk can free the block containing X. If we allowed a
1520 1520 * nopwrite to occur the block pointing to Z would reference a freed
1521 1521 * block. Since this is a rare case we simplify this by disabling
1522 1522 * nopwrite if the current dmu_sync-ing dbuf has been modified in
1523 1523 * a previous transaction.
1524 1524 */
1525 1525 if (dr->dr_next)
1526 1526 zp.zp_nopwrite = B_FALSE;
1527 1527
1528 1528 ASSERT(dr->dr_txg == txg);
1529 1529 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1530 1530 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1531 1531 /*
1532 1532 * We have already issued a sync write for this buffer,
1533 1533 * or this buffer has already been synced. It could not
1534 1534 * have been dirtied since, or we would have cleared the state.
1535 1535 */
1536 1536 mutex_exit(&db->db_mtx);
1537 1537 return (SET_ERROR(EALREADY));
1538 1538 }
1539 1539
1540 1540 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1541 1541 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1542 1542 mutex_exit(&db->db_mtx);
1543 1543
1544 1544 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1545 1545 dsa->dsa_dr = dr;
1546 1546 dsa->dsa_done = done;
1547 1547 dsa->dsa_zgd = zgd;
1548 1548 dsa->dsa_tx = NULL;
1549 1549
1550 1550 zio_nowait(arc_write(pio, os->os_spa, txg,
1551 1551 bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1552 1552 DBUF_IS_L2COMPRESSIBLE(db), &zp, dmu_sync_ready,
1553 1553 NULL, dmu_sync_done, dsa, ZIO_PRIORITY_SYNC_WRITE,
1554 1554 ZIO_FLAG_CANFAIL, &zb));
1555 1555
1556 1556 return (0);
1557 1557 }
1558 1558
1559 1559 int
1560 1560 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1561 1561 dmu_tx_t *tx)
1562 1562 {
1563 1563 dnode_t *dn;
1564 1564 int err;
1565 1565
1566 1566 err = dnode_hold(os, object, FTAG, &dn);
1567 1567 if (err)
1568 1568 return (err);
1569 1569 err = dnode_set_blksz(dn, size, ibs, tx);
1570 1570 dnode_rele(dn, FTAG);
1571 1571 return (err);
1572 1572 }
1573 1573
1574 1574 void
1575 1575 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1576 1576 dmu_tx_t *tx)
1577 1577 {
1578 1578 dnode_t *dn;
1579 1579
1580 1580 /*
1581 1581 * Send streams include each object's checksum function. This
1582 1582 * check ensures that the receiving system can understand the
1583 1583 * checksum function transmitted.
1584 1584 */
1585 1585 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
1586 1586
1587 1587 VERIFY0(dnode_hold(os, object, FTAG, &dn));
1588 1588 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
1589 1589 dn->dn_checksum = checksum;
1590 1590 dnode_setdirty(dn, tx);
1591 1591 dnode_rele(dn, FTAG);
1592 1592 }
1593 1593
1594 1594 void
1595 1595 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1596 1596 dmu_tx_t *tx)
1597 1597 {
1598 1598 dnode_t *dn;
1599 1599
1600 1600 /*
1601 1601 * Send streams include each object's compression function. This
1602 1602 * check ensures that the receiving system can understand the
1603 1603 * compression function transmitted.
1604 1604 */
1605 1605 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
1606 1606
1607 1607 VERIFY0(dnode_hold(os, object, FTAG, &dn));
1608 1608 dn->dn_compress = compress;
1609 1609 dnode_setdirty(dn, tx);
1610 1610 dnode_rele(dn, FTAG);
1611 1611 }
1612 1612
1613 1613 int zfs_mdcomp_disable = 0;
1614 1614
1615 1615 /*
1616 1616 * When the "redundant_metadata" property is set to "most", only indirect
1617 1617 * blocks of this level and higher will have an additional ditto block.
1618 1618 */
1619 1619 int zfs_redundant_metadata_most_ditto_level = 2;
1620 1620
1621 1621 void
1622 1622 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
1623 1623 {
1624 1624 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
1625 1625 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
1626 1626 (wp & WP_SPILL));
1627 1627 enum zio_checksum checksum = os->os_checksum;
1628 1628 enum zio_compress compress = os->os_compress;
1629 1629 enum zio_checksum dedup_checksum = os->os_dedup_checksum;
1630 1630 boolean_t dedup = B_FALSE;
1631 1631 boolean_t nopwrite = B_FALSE;
1632 1632 boolean_t dedup_verify = os->os_dedup_verify;
1633 1633 int copies = os->os_copies;
1634 1634
1635 1635 /*
1636 1636 * We maintain different write policies for each of the following
1637 1637 * types of data:
1638 1638 * 1. metadata
1639 1639 * 2. preallocated blocks (i.e. level-0 blocks of a dump device)
1640 1640 * 3. all other level 0 blocks
1641 1641 */
1642 1642 if (ismd) {
1643 1643 /*
1644 1644 * XXX -- we should design a compression algorithm
1645 1645 * that specializes in arrays of bps.
1646 1646 */
1647 1647 boolean_t lz4_ac = spa_feature_is_active(os->os_spa,
1648 1648 SPA_FEATURE_LZ4_COMPRESS);
1649 1649
1650 1650 if (zfs_mdcomp_disable) {
1651 1651 compress = ZIO_COMPRESS_EMPTY;
1652 1652 } else if (lz4_ac) {
1653 1653 compress = ZIO_COMPRESS_LZ4;
1654 1654 } else {
1655 1655 compress = ZIO_COMPRESS_LZJB;
1656 1656 }
1657 1657
1658 1658 /*
1659 1659 * Metadata always gets checksummed. If the data
1660 1660 * checksum is multi-bit correctable, and it's not a
1661 1661 * ZBT-style checksum, then it's suitable for metadata
1662 1662 * as well. Otherwise, the metadata checksum defaults
1663 1663 * to fletcher4.
1664 1664 */
1665 1665 if (zio_checksum_table[checksum].ci_correctable < 1 ||
1666 1666 zio_checksum_table[checksum].ci_eck)
1667 1667 checksum = ZIO_CHECKSUM_FLETCHER_4;
1668 1668
1669 1669 if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
1670 1670 (os->os_redundant_metadata ==
1671 1671 ZFS_REDUNDANT_METADATA_MOST &&
1672 1672 (level >= zfs_redundant_metadata_most_ditto_level ||
1673 1673 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
1674 1674 copies++;
1675 1675 } else if (wp & WP_NOFILL) {
1676 1676 ASSERT(level == 0);
1677 1677
1678 1678 /*
1679 1679 * If we're writing preallocated blocks, we aren't actually
1680 1680 * writing them so don't set any policy properties. These
1681 1681 * blocks are currently only used by an external subsystem
1682 1682 * outside of zfs (i.e. dump) and not written by the zio
1683 1683 * pipeline.
1684 1684 */
1685 1685 compress = ZIO_COMPRESS_OFF;
1686 1686 checksum = ZIO_CHECKSUM_NOPARITY;
1687 1687 } else {
1688 1688 compress = zio_compress_select(dn->dn_compress, compress);
1689 1689
1690 1690 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
1691 1691 zio_checksum_select(dn->dn_checksum, checksum) :
1692 1692 dedup_checksum;
1693 1693
1694 1694 /*
1695 1695 * Determine dedup setting. If we are in dmu_sync(),
1696 1696 * we won't actually dedup now because that's all
1697 1697 * done in syncing context; but we do want to use the
1698 1698 * dedup checkum. If the checksum is not strong
1699 1699 * enough to ensure unique signatures, force
1700 1700 * dedup_verify.
1701 1701 */
1702 1702 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
1703 1703 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
1704 1704 if (!zio_checksum_table[checksum].ci_dedup)
1705 1705 dedup_verify = B_TRUE;
1706 1706 }
1707 1707
1708 1708 /*
1709 1709 * Enable nopwrite if we have a cryptographically secure
1710 1710 * checksum that has no known collisions (i.e. SHA-256)
1711 1711 * and compression is enabled. We don't enable nopwrite if
1712 1712 * dedup is enabled as the two features are mutually exclusive.
1713 1713 */
1714 1714 nopwrite = (!dedup && zio_checksum_table[checksum].ci_dedup &&
1715 1715 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
1716 1716 }
1717 1717
1718 1718 zp->zp_checksum = checksum;
1719 1719 zp->zp_compress = compress;
1720 1720 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
1721 1721 zp->zp_level = level;
1722 1722 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
1723 1723 zp->zp_dedup = dedup;
1724 1724 zp->zp_dedup_verify = dedup && dedup_verify;
1725 1725 zp->zp_nopwrite = nopwrite;
1726 1726 }
1727 1727
1728 1728 int
1729 1729 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
1730 1730 {
1731 1731 dnode_t *dn;
1732 1732 int i, err;
1733 1733
1734 1734 err = dnode_hold(os, object, FTAG, &dn);
1735 1735 if (err)
1736 1736 return (err);
1737 1737 /*
1738 1738 * Sync any current changes before
1739 1739 * we go trundling through the block pointers.
1740 1740 */
1741 1741 for (i = 0; i < TXG_SIZE; i++) {
1742 1742 if (list_link_active(&dn->dn_dirty_link[i]))
1743 1743 break;
1744 1744 }
1745 1745 if (i != TXG_SIZE) {
1746 1746 dnode_rele(dn, FTAG);
1747 1747 txg_wait_synced(dmu_objset_pool(os), 0);
1748 1748 err = dnode_hold(os, object, FTAG, &dn);
1749 1749 if (err)
1750 1750 return (err);
1751 1751 }
1752 1752
1753 1753 err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
1754 1754 dnode_rele(dn, FTAG);
1755 1755
1756 1756 return (err);
1757 1757 }
1758 1758
1759 1759 void
1760 1760 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
1761 1761 {
1762 1762 dnode_phys_t *dnp;
1763 1763
1764 1764 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1765 1765 mutex_enter(&dn->dn_mtx);
1766 1766
1767 1767 dnp = dn->dn_phys;
1768 1768
1769 1769 doi->doi_data_block_size = dn->dn_datablksz;
1770 1770 doi->doi_metadata_block_size = dn->dn_indblkshift ?
1771 1771 1ULL << dn->dn_indblkshift : 0;
1772 1772 doi->doi_type = dn->dn_type;
1773 1773 doi->doi_bonus_type = dn->dn_bonustype;
1774 1774 doi->doi_bonus_size = dn->dn_bonuslen;
1775 1775 doi->doi_indirection = dn->dn_nlevels;
1776 1776 doi->doi_checksum = dn->dn_checksum;
1777 1777 doi->doi_compress = dn->dn_compress;
1778 1778 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
1779 1779 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
1780 1780 doi->doi_fill_count = 0;
1781 1781 for (int i = 0; i < dnp->dn_nblkptr; i++)
1782 1782 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
1783 1783
1784 1784 mutex_exit(&dn->dn_mtx);
1785 1785 rw_exit(&dn->dn_struct_rwlock);
1786 1786 }
1787 1787
1788 1788 /*
1789 1789 * Get information on a DMU object.
1790 1790 * If doi is NULL, just indicates whether the object exists.
1791 1791 */
1792 1792 int
1793 1793 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
1794 1794 {
1795 1795 dnode_t *dn;
1796 1796 int err = dnode_hold(os, object, FTAG, &dn);
1797 1797
1798 1798 if (err)
1799 1799 return (err);
1800 1800
1801 1801 if (doi != NULL)
1802 1802 dmu_object_info_from_dnode(dn, doi);
1803 1803
1804 1804 dnode_rele(dn, FTAG);
1805 1805 return (0);
1806 1806 }
1807 1807
1808 1808 /*
1809 1809 * As above, but faster; can be used when you have a held dbuf in hand.
1810 1810 */
1811 1811 void
1812 1812 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
1813 1813 {
1814 1814 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1815 1815
1816 1816 DB_DNODE_ENTER(db);
1817 1817 dmu_object_info_from_dnode(DB_DNODE(db), doi);
1818 1818 DB_DNODE_EXIT(db);
1819 1819 }
1820 1820
1821 1821 /*
1822 1822 * Faster still when you only care about the size.
1823 1823 * This is specifically optimized for zfs_getattr().
1824 1824 */
1825 1825 void
1826 1826 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
1827 1827 u_longlong_t *nblk512)
1828 1828 {
1829 1829 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1830 1830 dnode_t *dn;
1831 1831
1832 1832 DB_DNODE_ENTER(db);
1833 1833 dn = DB_DNODE(db);
1834 1834
1835 1835 *blksize = dn->dn_datablksz;
1836 1836 /* add 1 for dnode space */
1837 1837 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
1838 1838 SPA_MINBLOCKSHIFT) + 1;
1839 1839 DB_DNODE_EXIT(db);
1840 1840 }
1841 1841
1842 1842 void
1843 1843 byteswap_uint64_array(void *vbuf, size_t size)
1844 1844 {
1845 1845 uint64_t *buf = vbuf;
1846 1846 size_t count = size >> 3;
1847 1847 int i;
1848 1848
1849 1849 ASSERT((size & 7) == 0);
1850 1850
1851 1851 for (i = 0; i < count; i++)
1852 1852 buf[i] = BSWAP_64(buf[i]);
1853 1853 }
1854 1854
1855 1855 void
1856 1856 byteswap_uint32_array(void *vbuf, size_t size)
1857 1857 {
1858 1858 uint32_t *buf = vbuf;
1859 1859 size_t count = size >> 2;
1860 1860 int i;
1861 1861
1862 1862 ASSERT((size & 3) == 0);
1863 1863
1864 1864 for (i = 0; i < count; i++)
1865 1865 buf[i] = BSWAP_32(buf[i]);
1866 1866 }
1867 1867
1868 1868 void
1869 1869 byteswap_uint16_array(void *vbuf, size_t size)
1870 1870 {
1871 1871 uint16_t *buf = vbuf;
1872 1872 size_t count = size >> 1;
1873 1873 int i;
1874 1874
1875 1875 ASSERT((size & 1) == 0);
1876 1876
1877 1877 for (i = 0; i < count; i++)
1878 1878 buf[i] = BSWAP_16(buf[i]);
1879 1879 }
1880 1880
1881 1881 /* ARGSUSED */
1882 1882 void
1883 1883 byteswap_uint8_array(void *vbuf, size_t size)
1884 1884 {
1885 1885 }
1886 1886
1887 1887 void
1888 1888 dmu_init(void)
1889 1889 {
1890 1890 zfs_dbgmsg_init();
1891 1891 sa_cache_init();
1892 1892 xuio_stat_init();
1893 1893 dmu_objset_init();
1894 1894 dnode_init();
1895 1895 dbuf_init();
1896 1896 zfetch_init();
1897 1897 l2arc_init();
1898 1898 arc_init();
1899 1899 }
1900 1900
1901 1901 void
1902 1902 dmu_fini(void)
1903 1903 {
1904 1904 arc_fini(); /* arc depends on l2arc, so arc must go first */
1905 1905 l2arc_fini();
1906 1906 zfetch_fini();
1907 1907 dbuf_fini();
1908 1908 dnode_fini();
1909 1909 dmu_objset_fini();
1910 1910 xuio_stat_fini();
1911 1911 sa_cache_fini();
1912 1912 zfs_dbgmsg_fini();
1913 1913 }
↓ open down ↓ |
1616 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX