1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
  24  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  25  * Copyright (c) 2014 Integros [integros.com]
  26  */
  27 
  28 #include <sys/zfs_context.h>
  29 #include <sys/dbuf.h>
  30 #include <sys/dnode.h>
  31 #include <sys/dmu.h>
  32 #include <sys/dmu_impl.h>
  33 #include <sys/dmu_tx.h>
  34 #include <sys/dmu_objset.h>
  35 #include <sys/dsl_dir.h>
  36 #include <sys/dsl_dataset.h>
  37 #include <sys/spa.h>
  38 #include <sys/zio.h>
  39 #include <sys/dmu_zfetch.h>
  40 #include <sys/range_tree.h>
  41 
  42 static kmem_cache_t *dnode_cache;
  43 /*
  44  * Define DNODE_STATS to turn on statistic gathering. By default, it is only
  45  * turned on when DEBUG is also defined.
  46  */
  47 #ifdef  DEBUG
  48 #define DNODE_STATS
  49 #endif  /* DEBUG */
  50 
  51 #ifdef  DNODE_STATS
  52 #define DNODE_STAT_ADD(stat)                    ((stat)++)
  53 #else
  54 #define DNODE_STAT_ADD(stat)                    /* nothing */
  55 #endif  /* DNODE_STATS */
  56 
  57 static dnode_phys_t dnode_phys_zero;
  58 
  59 int zfs_default_bs = SPA_MINBLOCKSHIFT;
  60 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
  61 
  62 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
  63 
  64 static int
  65 dbuf_compare(const void *x1, const void *x2)
  66 {
  67         const dmu_buf_impl_t *d1 = x1;
  68         const dmu_buf_impl_t *d2 = x2;
  69 
  70         if (d1->db_level < d2->db_level) {
  71                 return (-1);
  72         }
  73         if (d1->db_level > d2->db_level) {
  74                 return (1);
  75         }
  76 
  77         if (d1->db_blkid < d2->db_blkid) {
  78                 return (-1);
  79         }
  80         if (d1->db_blkid > d2->db_blkid) {
  81                 return (1);
  82         }
  83 
  84         if (d1->db_state == DB_SEARCH) {
  85                 ASSERT3S(d2->db_state, !=, DB_SEARCH);
  86                 return (-1);
  87         } else if (d2->db_state == DB_SEARCH) {
  88                 ASSERT3S(d1->db_state, !=, DB_SEARCH);
  89                 return (1);
  90         }
  91 
  92         if ((uintptr_t)d1 < (uintptr_t)d2) {
  93                 return (-1);
  94         }
  95         if ((uintptr_t)d1 > (uintptr_t)d2) {
  96                 return (1);
  97         }
  98         return (0);
  99 }
 100 
 101 /* ARGSUSED */
 102 static int
 103 dnode_cons(void *arg, void *unused, int kmflag)
 104 {
 105         dnode_t *dn = arg;
 106         int i;
 107 
 108         rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
 109         mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
 110         mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
 111         cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
 112 
 113         /*
 114          * Every dbuf has a reference, and dropping a tracked reference is
 115          * O(number of references), so don't track dn_holds.
 116          */
 117         refcount_create_untracked(&dn->dn_holds);
 118         refcount_create(&dn->dn_tx_holds);
 119         list_link_init(&dn->dn_link);
 120 
 121         bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
 122         bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
 123         bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
 124         bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
 125         bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
 126         bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
 127         bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
 128 
 129         for (i = 0; i < TXG_SIZE; i++) {
 130                 list_link_init(&dn->dn_dirty_link[i]);
 131                 dn->dn_free_ranges[i] = NULL;
 132                 list_create(&dn->dn_dirty_records[i],
 133                     sizeof (dbuf_dirty_record_t),
 134                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
 135         }
 136 
 137         dn->dn_allocated_txg = 0;
 138         dn->dn_free_txg = 0;
 139         dn->dn_assigned_txg = 0;
 140         dn->dn_dirtyctx = 0;
 141         dn->dn_dirtyctx_firstset = NULL;
 142         dn->dn_bonus = NULL;
 143         dn->dn_have_spill = B_FALSE;
 144         dn->dn_zio = NULL;
 145         dn->dn_oldused = 0;
 146         dn->dn_oldflags = 0;
 147         dn->dn_olduid = 0;
 148         dn->dn_oldgid = 0;
 149         dn->dn_newuid = 0;
 150         dn->dn_newgid = 0;
 151         dn->dn_id_flags = 0;
 152 
 153         dn->dn_dbufs_count = 0;
 154         dn->dn_unlisted_l0_blkid = 0;
 155         avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
 156             offsetof(dmu_buf_impl_t, db_link));
 157 
 158         dn->dn_moved = 0;
 159         return (0);
 160 }
 161 
 162 /* ARGSUSED */
 163 static void
 164 dnode_dest(void *arg, void *unused)
 165 {
 166         int i;
 167         dnode_t *dn = arg;
 168 
 169         rw_destroy(&dn->dn_struct_rwlock);
 170         mutex_destroy(&dn->dn_mtx);
 171         mutex_destroy(&dn->dn_dbufs_mtx);
 172         cv_destroy(&dn->dn_notxholds);
 173         refcount_destroy(&dn->dn_holds);
 174         refcount_destroy(&dn->dn_tx_holds);
 175         ASSERT(!list_link_active(&dn->dn_link));
 176 
 177         for (i = 0; i < TXG_SIZE; i++) {
 178                 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
 179                 ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
 180                 list_destroy(&dn->dn_dirty_records[i]);
 181                 ASSERT0(dn->dn_next_nblkptr[i]);
 182                 ASSERT0(dn->dn_next_nlevels[i]);
 183                 ASSERT0(dn->dn_next_indblkshift[i]);
 184                 ASSERT0(dn->dn_next_bonustype[i]);
 185                 ASSERT0(dn->dn_rm_spillblk[i]);
 186                 ASSERT0(dn->dn_next_bonuslen[i]);
 187                 ASSERT0(dn->dn_next_blksz[i]);
 188         }
 189 
 190         ASSERT0(dn->dn_allocated_txg);
 191         ASSERT0(dn->dn_free_txg);
 192         ASSERT0(dn->dn_assigned_txg);
 193         ASSERT0(dn->dn_dirtyctx);
 194         ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
 195         ASSERT3P(dn->dn_bonus, ==, NULL);
 196         ASSERT(!dn->dn_have_spill);
 197         ASSERT3P(dn->dn_zio, ==, NULL);
 198         ASSERT0(dn->dn_oldused);
 199         ASSERT0(dn->dn_oldflags);
 200         ASSERT0(dn->dn_olduid);
 201         ASSERT0(dn->dn_oldgid);
 202         ASSERT0(dn->dn_newuid);
 203         ASSERT0(dn->dn_newgid);
 204         ASSERT0(dn->dn_id_flags);
 205 
 206         ASSERT0(dn->dn_dbufs_count);
 207         ASSERT0(dn->dn_unlisted_l0_blkid);
 208         avl_destroy(&dn->dn_dbufs);
 209 }
 210 
 211 void
 212 dnode_init(void)
 213 {
 214         ASSERT(dnode_cache == NULL);
 215         dnode_cache = kmem_cache_create("dnode_t",
 216             sizeof (dnode_t),
 217             0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
 218         kmem_cache_set_move(dnode_cache, dnode_move);
 219 }
 220 
 221 void
 222 dnode_fini(void)
 223 {
 224         kmem_cache_destroy(dnode_cache);
 225         dnode_cache = NULL;
 226 }
 227 
 228 
 229 #ifdef ZFS_DEBUG
 230 void
 231 dnode_verify(dnode_t *dn)
 232 {
 233         int drop_struct_lock = FALSE;
 234 
 235         ASSERT(dn->dn_phys);
 236         ASSERT(dn->dn_objset);
 237         ASSERT(dn->dn_handle->dnh_dnode == dn);
 238 
 239         ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
 240 
 241         if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
 242                 return;
 243 
 244         if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
 245                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 246                 drop_struct_lock = TRUE;
 247         }
 248         if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
 249                 int i;
 250                 ASSERT3U(dn->dn_indblkshift, >=, 0);
 251                 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
 252                 if (dn->dn_datablkshift) {
 253                         ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
 254                         ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
 255                         ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
 256                 }
 257                 ASSERT3U(dn->dn_nlevels, <=, 30);
 258                 ASSERT(DMU_OT_IS_VALID(dn->dn_type));
 259                 ASSERT3U(dn->dn_nblkptr, >=, 1);
 260                 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
 261                 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
 262                 ASSERT3U(dn->dn_datablksz, ==,
 263                     dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 264                 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
 265                 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
 266                     dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
 267                 for (i = 0; i < TXG_SIZE; i++) {
 268                         ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
 269                 }
 270         }
 271         if (dn->dn_phys->dn_type != DMU_OT_NONE)
 272                 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
 273         ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
 274         if (dn->dn_dbuf != NULL) {
 275                 ASSERT3P(dn->dn_phys, ==,
 276                     (dnode_phys_t *)dn->dn_dbuf->db.db_data +
 277                     (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
 278         }
 279         if (drop_struct_lock)
 280                 rw_exit(&dn->dn_struct_rwlock);
 281 }
 282 #endif
 283 
 284 void
 285 dnode_byteswap(dnode_phys_t *dnp)
 286 {
 287         uint64_t *buf64 = (void*)&dnp->dn_blkptr;
 288         int i;
 289 
 290         if (dnp->dn_type == DMU_OT_NONE) {
 291                 bzero(dnp, sizeof (dnode_phys_t));
 292                 return;
 293         }
 294 
 295         dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
 296         dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
 297         dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
 298         dnp->dn_used = BSWAP_64(dnp->dn_used);
 299 
 300         /*
 301          * dn_nblkptr is only one byte, so it's OK to read it in either
 302          * byte order.  We can't read dn_bouslen.
 303          */
 304         ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
 305         ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
 306         for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
 307                 buf64[i] = BSWAP_64(buf64[i]);
 308 
 309         /*
 310          * OK to check dn_bonuslen for zero, because it won't matter if
 311          * we have the wrong byte order.  This is necessary because the
 312          * dnode dnode is smaller than a regular dnode.
 313          */
 314         if (dnp->dn_bonuslen != 0) {
 315                 /*
 316                  * Note that the bonus length calculated here may be
 317                  * longer than the actual bonus buffer.  This is because
 318                  * we always put the bonus buffer after the last block
 319                  * pointer (instead of packing it against the end of the
 320                  * dnode buffer).
 321                  */
 322                 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
 323                 size_t len = DN_MAX_BONUSLEN - off;
 324                 ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
 325                 dmu_object_byteswap_t byteswap =
 326                     DMU_OT_BYTESWAP(dnp->dn_bonustype);
 327                 dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
 328         }
 329 
 330         /* Swap SPILL block if we have one */
 331         if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
 332                 byteswap_uint64_array(&dnp->dn_spill, sizeof (blkptr_t));
 333 
 334 }
 335 
 336 void
 337 dnode_buf_byteswap(void *vbuf, size_t size)
 338 {
 339         dnode_phys_t *buf = vbuf;
 340         int i;
 341 
 342         ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
 343         ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
 344 
 345         size >>= DNODE_SHIFT;
 346         for (i = 0; i < size; i++) {
 347                 dnode_byteswap(buf);
 348                 buf++;
 349         }
 350 }
 351 
 352 void
 353 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
 354 {
 355         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
 356 
 357         dnode_setdirty(dn, tx);
 358         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 359         ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
 360             (dn->dn_nblkptr-1) * sizeof (blkptr_t));
 361         dn->dn_bonuslen = newsize;
 362         if (newsize == 0)
 363                 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
 364         else
 365                 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
 366         rw_exit(&dn->dn_struct_rwlock);
 367 }
 368 
 369 void
 370 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
 371 {
 372         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
 373         dnode_setdirty(dn, tx);
 374         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 375         dn->dn_bonustype = newtype;
 376         dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
 377         rw_exit(&dn->dn_struct_rwlock);
 378 }
 379 
 380 void
 381 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 382 {
 383         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
 384         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
 385         dnode_setdirty(dn, tx);
 386         dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
 387         dn->dn_have_spill = B_FALSE;
 388 }
 389 
 390 static void
 391 dnode_setdblksz(dnode_t *dn, int size)
 392 {
 393         ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
 394         ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
 395         ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
 396         ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
 397             1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
 398         dn->dn_datablksz = size;
 399         dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
 400         dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
 401 }
 402 
 403 static dnode_t *
 404 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
 405     uint64_t object, dnode_handle_t *dnh)
 406 {
 407         dnode_t *dn;
 408 
 409         dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
 410         ASSERT(!POINTER_IS_VALID(dn->dn_objset));
 411         dn->dn_moved = 0;
 412 
 413         /*
 414          * Defer setting dn_objset until the dnode is ready to be a candidate
 415          * for the dnode_move() callback.
 416          */
 417         dn->dn_object = object;
 418         dn->dn_dbuf = db;
 419         dn->dn_handle = dnh;
 420         dn->dn_phys = dnp;
 421 
 422         if (dnp->dn_datablkszsec) {
 423                 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 424         } else {
 425                 dn->dn_datablksz = 0;
 426                 dn->dn_datablkszsec = 0;
 427                 dn->dn_datablkshift = 0;
 428         }
 429         dn->dn_indblkshift = dnp->dn_indblkshift;
 430         dn->dn_nlevels = dnp->dn_nlevels;
 431         dn->dn_type = dnp->dn_type;
 432         dn->dn_nblkptr = dnp->dn_nblkptr;
 433         dn->dn_checksum = dnp->dn_checksum;
 434         dn->dn_compress = dnp->dn_compress;
 435         dn->dn_bonustype = dnp->dn_bonustype;
 436         dn->dn_bonuslen = dnp->dn_bonuslen;
 437         dn->dn_maxblkid = dnp->dn_maxblkid;
 438         dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
 439         dn->dn_id_flags = 0;
 440 
 441         dmu_zfetch_init(&dn->dn_zfetch, dn);
 442 
 443         ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
 444 
 445         mutex_enter(&os->os_lock);
 446         if (dnh->dnh_dnode != NULL) {
 447                 /* Lost the allocation race. */
 448                 mutex_exit(&os->os_lock);
 449                 kmem_cache_free(dnode_cache, dn);
 450                 return (dnh->dnh_dnode);
 451         }
 452 
 453         /*
 454          * Exclude special dnodes from os_dnodes so an empty os_dnodes
 455          * signifies that the special dnodes have no references from
 456          * their children (the entries in os_dnodes).  This allows
 457          * dnode_destroy() to easily determine if the last child has
 458          * been removed and then complete eviction of the objset.
 459          */
 460         if (!DMU_OBJECT_IS_SPECIAL(object))
 461                 list_insert_head(&os->os_dnodes, dn);
 462         membar_producer();
 463 
 464         /*
 465          * Everything else must be valid before assigning dn_objset
 466          * makes the dnode eligible for dnode_move().
 467          */
 468         dn->dn_objset = os;
 469 
 470         dnh->dnh_dnode = dn;
 471         mutex_exit(&os->os_lock);
 472 
 473         arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER);
 474         return (dn);
 475 }
 476 
 477 /*
 478  * Caller must be holding the dnode handle, which is released upon return.
 479  */
 480 static void
 481 dnode_destroy(dnode_t *dn)
 482 {
 483         objset_t *os = dn->dn_objset;
 484         boolean_t complete_os_eviction = B_FALSE;
 485 
 486         ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
 487 
 488         mutex_enter(&os->os_lock);
 489         POINTER_INVALIDATE(&dn->dn_objset);
 490         if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
 491                 list_remove(&os->os_dnodes, dn);
 492                 complete_os_eviction =
 493                     list_is_empty(&os->os_dnodes) &&
 494                     list_link_active(&os->os_evicting_node);
 495         }
 496         mutex_exit(&os->os_lock);
 497 
 498         /* the dnode can no longer move, so we can release the handle */
 499         zrl_remove(&dn->dn_handle->dnh_zrlock);
 500 
 501         dn->dn_allocated_txg = 0;
 502         dn->dn_free_txg = 0;
 503         dn->dn_assigned_txg = 0;
 504 
 505         dn->dn_dirtyctx = 0;
 506         if (dn->dn_dirtyctx_firstset != NULL) {
 507                 kmem_free(dn->dn_dirtyctx_firstset, 1);
 508                 dn->dn_dirtyctx_firstset = NULL;
 509         }
 510         if (dn->dn_bonus != NULL) {
 511                 mutex_enter(&dn->dn_bonus->db_mtx);
 512                 dbuf_evict(dn->dn_bonus);
 513                 dn->dn_bonus = NULL;
 514         }
 515         dn->dn_zio = NULL;
 516 
 517         dn->dn_have_spill = B_FALSE;
 518         dn->dn_oldused = 0;
 519         dn->dn_oldflags = 0;
 520         dn->dn_olduid = 0;
 521         dn->dn_oldgid = 0;
 522         dn->dn_newuid = 0;
 523         dn->dn_newgid = 0;
 524         dn->dn_id_flags = 0;
 525         dn->dn_unlisted_l0_blkid = 0;
 526 
 527         dmu_zfetch_fini(&dn->dn_zfetch);
 528         kmem_cache_free(dnode_cache, dn);
 529         arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
 530 
 531         if (complete_os_eviction)
 532                 dmu_objset_evict_done(os);
 533 }
 534 
 535 void
 536 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
 537     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
 538 {
 539         int i;
 540 
 541         ASSERT3U(blocksize, <=,
 542             spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
 543         if (blocksize == 0)
 544                 blocksize = 1 << zfs_default_bs;
 545         else
 546                 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
 547 
 548         if (ibs == 0)
 549                 ibs = zfs_default_ibs;
 550 
 551         ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
 552 
 553         dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
 554             dn->dn_object, tx->tx_txg, blocksize, ibs);
 555 
 556         ASSERT(dn->dn_type == DMU_OT_NONE);
 557         ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
 558         ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
 559         ASSERT(ot != DMU_OT_NONE);
 560         ASSERT(DMU_OT_IS_VALID(ot));
 561         ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
 562             (bonustype == DMU_OT_SA && bonuslen == 0) ||
 563             (bonustype != DMU_OT_NONE && bonuslen != 0));
 564         ASSERT(DMU_OT_IS_VALID(bonustype));
 565         ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
 566         ASSERT(dn->dn_type == DMU_OT_NONE);
 567         ASSERT0(dn->dn_maxblkid);
 568         ASSERT0(dn->dn_allocated_txg);
 569         ASSERT0(dn->dn_assigned_txg);
 570         ASSERT(refcount_is_zero(&dn->dn_tx_holds));
 571         ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
 572         ASSERT(avl_is_empty(&dn->dn_dbufs));
 573 
 574         for (i = 0; i < TXG_SIZE; i++) {
 575                 ASSERT0(dn->dn_next_nblkptr[i]);
 576                 ASSERT0(dn->dn_next_nlevels[i]);
 577                 ASSERT0(dn->dn_next_indblkshift[i]);
 578                 ASSERT0(dn->dn_next_bonuslen[i]);
 579                 ASSERT0(dn->dn_next_bonustype[i]);
 580                 ASSERT0(dn->dn_rm_spillblk[i]);
 581                 ASSERT0(dn->dn_next_blksz[i]);
 582                 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
 583                 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
 584                 ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
 585         }
 586 
 587         dn->dn_type = ot;
 588         dnode_setdblksz(dn, blocksize);
 589         dn->dn_indblkshift = ibs;
 590         dn->dn_nlevels = 1;
 591         if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
 592                 dn->dn_nblkptr = 1;
 593         else
 594                 dn->dn_nblkptr = 1 +
 595                     ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
 596         dn->dn_bonustype = bonustype;
 597         dn->dn_bonuslen = bonuslen;
 598         dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
 599         dn->dn_compress = ZIO_COMPRESS_INHERIT;
 600         dn->dn_dirtyctx = 0;
 601 
 602         dn->dn_free_txg = 0;
 603         if (dn->dn_dirtyctx_firstset) {
 604                 kmem_free(dn->dn_dirtyctx_firstset, 1);
 605                 dn->dn_dirtyctx_firstset = NULL;
 606         }
 607 
 608         dn->dn_allocated_txg = tx->tx_txg;
 609         dn->dn_id_flags = 0;
 610 
 611         dnode_setdirty(dn, tx);
 612         dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
 613         dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
 614         dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
 615         dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
 616 }
 617 
 618 void
 619 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
 620     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
 621 {
 622         int nblkptr;
 623 
 624         ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
 625         ASSERT3U(blocksize, <=,
 626             spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
 627         ASSERT0(blocksize % SPA_MINBLOCKSIZE);
 628         ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
 629         ASSERT(tx->tx_txg != 0);
 630         ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
 631             (bonustype != DMU_OT_NONE && bonuslen != 0) ||
 632             (bonustype == DMU_OT_SA && bonuslen == 0));
 633         ASSERT(DMU_OT_IS_VALID(bonustype));
 634         ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
 635 
 636         /* clean up any unreferenced dbufs */
 637         dnode_evict_dbufs(dn);
 638 
 639         dn->dn_id_flags = 0;
 640 
 641         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 642         dnode_setdirty(dn, tx);
 643         if (dn->dn_datablksz != blocksize) {
 644                 /* change blocksize */
 645                 ASSERT(dn->dn_maxblkid == 0 &&
 646                     (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
 647                     dnode_block_freed(dn, 0)));
 648                 dnode_setdblksz(dn, blocksize);
 649                 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
 650         }
 651         if (dn->dn_bonuslen != bonuslen)
 652                 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
 653 
 654         if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
 655                 nblkptr = 1;
 656         else
 657                 nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
 658         if (dn->dn_bonustype != bonustype)
 659                 dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
 660         if (dn->dn_nblkptr != nblkptr)
 661                 dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
 662         if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
 663                 dbuf_rm_spill(dn, tx);
 664                 dnode_rm_spill(dn, tx);
 665         }
 666         rw_exit(&dn->dn_struct_rwlock);
 667 
 668         /* change type */
 669         dn->dn_type = ot;
 670 
 671         /* change bonus size and type */
 672         mutex_enter(&dn->dn_mtx);
 673         dn->dn_bonustype = bonustype;
 674         dn->dn_bonuslen = bonuslen;
 675         dn->dn_nblkptr = nblkptr;
 676         dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
 677         dn->dn_compress = ZIO_COMPRESS_INHERIT;
 678         ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
 679 
 680         /* fix up the bonus db_size */
 681         if (dn->dn_bonus) {
 682                 dn->dn_bonus->db.db_size =
 683                     DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
 684                 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
 685         }
 686 
 687         dn->dn_allocated_txg = tx->tx_txg;
 688         mutex_exit(&dn->dn_mtx);
 689 }
 690 
 691 #ifdef  DNODE_STATS
 692 static struct {
 693         uint64_t dms_dnode_invalid;
 694         uint64_t dms_dnode_recheck1;
 695         uint64_t dms_dnode_recheck2;
 696         uint64_t dms_dnode_special;
 697         uint64_t dms_dnode_handle;
 698         uint64_t dms_dnode_rwlock;
 699         uint64_t dms_dnode_active;
 700 } dnode_move_stats;
 701 #endif  /* DNODE_STATS */
 702 
 703 static void
 704 dnode_move_impl(dnode_t *odn, dnode_t *ndn)
 705 {
 706         int i;
 707 
 708         ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
 709         ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
 710         ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
 711         ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
 712 
 713         /* Copy fields. */
 714         ndn->dn_objset = odn->dn_objset;
 715         ndn->dn_object = odn->dn_object;
 716         ndn->dn_dbuf = odn->dn_dbuf;
 717         ndn->dn_handle = odn->dn_handle;
 718         ndn->dn_phys = odn->dn_phys;
 719         ndn->dn_type = odn->dn_type;
 720         ndn->dn_bonuslen = odn->dn_bonuslen;
 721         ndn->dn_bonustype = odn->dn_bonustype;
 722         ndn->dn_nblkptr = odn->dn_nblkptr;
 723         ndn->dn_checksum = odn->dn_checksum;
 724         ndn->dn_compress = odn->dn_compress;
 725         ndn->dn_nlevels = odn->dn_nlevels;
 726         ndn->dn_indblkshift = odn->dn_indblkshift;
 727         ndn->dn_datablkshift = odn->dn_datablkshift;
 728         ndn->dn_datablkszsec = odn->dn_datablkszsec;
 729         ndn->dn_datablksz = odn->dn_datablksz;
 730         ndn->dn_maxblkid = odn->dn_maxblkid;
 731         bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
 732             sizeof (odn->dn_next_nblkptr));
 733         bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
 734             sizeof (odn->dn_next_nlevels));
 735         bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
 736             sizeof (odn->dn_next_indblkshift));
 737         bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
 738             sizeof (odn->dn_next_bonustype));
 739         bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
 740             sizeof (odn->dn_rm_spillblk));
 741         bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
 742             sizeof (odn->dn_next_bonuslen));
 743         bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
 744             sizeof (odn->dn_next_blksz));
 745         for (i = 0; i < TXG_SIZE; i++) {
 746                 list_move_tail(&ndn->dn_dirty_records[i],
 747                     &odn->dn_dirty_records[i]);
 748         }
 749         bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
 750             sizeof (odn->dn_free_ranges));
 751         ndn->dn_allocated_txg = odn->dn_allocated_txg;
 752         ndn->dn_free_txg = odn->dn_free_txg;
 753         ndn->dn_assigned_txg = odn->dn_assigned_txg;
 754         ndn->dn_dirtyctx = odn->dn_dirtyctx;
 755         ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
 756         ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
 757         refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
 758         ASSERT(avl_is_empty(&ndn->dn_dbufs));
 759         avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
 760         ndn->dn_dbufs_count = odn->dn_dbufs_count;
 761         ndn->dn_unlisted_l0_blkid = odn->dn_unlisted_l0_blkid;
 762         ndn->dn_bonus = odn->dn_bonus;
 763         ndn->dn_have_spill = odn->dn_have_spill;
 764         ndn->dn_zio = odn->dn_zio;
 765         ndn->dn_oldused = odn->dn_oldused;
 766         ndn->dn_oldflags = odn->dn_oldflags;
 767         ndn->dn_olduid = odn->dn_olduid;
 768         ndn->dn_oldgid = odn->dn_oldgid;
 769         ndn->dn_newuid = odn->dn_newuid;
 770         ndn->dn_newgid = odn->dn_newgid;
 771         ndn->dn_id_flags = odn->dn_id_flags;
 772         dmu_zfetch_init(&ndn->dn_zfetch, NULL);
 773         list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
 774         ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
 775 
 776         /*
 777          * Update back pointers. Updating the handle fixes the back pointer of
 778          * every descendant dbuf as well as the bonus dbuf.
 779          */
 780         ASSERT(ndn->dn_handle->dnh_dnode == odn);
 781         ndn->dn_handle->dnh_dnode = ndn;
 782         if (ndn->dn_zfetch.zf_dnode == odn) {
 783                 ndn->dn_zfetch.zf_dnode = ndn;
 784         }
 785 
 786         /*
 787          * Invalidate the original dnode by clearing all of its back pointers.
 788          */
 789         odn->dn_dbuf = NULL;
 790         odn->dn_handle = NULL;
 791         avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
 792             offsetof(dmu_buf_impl_t, db_link));
 793         odn->dn_dbufs_count = 0;
 794         odn->dn_unlisted_l0_blkid = 0;
 795         odn->dn_bonus = NULL;
 796         odn->dn_zfetch.zf_dnode = NULL;
 797 
 798         /*
 799          * Set the low bit of the objset pointer to ensure that dnode_move()
 800          * recognizes the dnode as invalid in any subsequent callback.
 801          */
 802         POINTER_INVALIDATE(&odn->dn_objset);
 803 
 804         /*
 805          * Satisfy the destructor.
 806          */
 807         for (i = 0; i < TXG_SIZE; i++) {
 808                 list_create(&odn->dn_dirty_records[i],
 809                     sizeof (dbuf_dirty_record_t),
 810                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
 811                 odn->dn_free_ranges[i] = NULL;
 812                 odn->dn_next_nlevels[i] = 0;
 813                 odn->dn_next_indblkshift[i] = 0;
 814                 odn->dn_next_bonustype[i] = 0;
 815                 odn->dn_rm_spillblk[i] = 0;
 816                 odn->dn_next_bonuslen[i] = 0;
 817                 odn->dn_next_blksz[i] = 0;
 818         }
 819         odn->dn_allocated_txg = 0;
 820         odn->dn_free_txg = 0;
 821         odn->dn_assigned_txg = 0;
 822         odn->dn_dirtyctx = 0;
 823         odn->dn_dirtyctx_firstset = NULL;
 824         odn->dn_have_spill = B_FALSE;
 825         odn->dn_zio = NULL;
 826         odn->dn_oldused = 0;
 827         odn->dn_oldflags = 0;
 828         odn->dn_olduid = 0;
 829         odn->dn_oldgid = 0;
 830         odn->dn_newuid = 0;
 831         odn->dn_newgid = 0;
 832         odn->dn_id_flags = 0;
 833 
 834         /*
 835          * Mark the dnode.
 836          */
 837         ndn->dn_moved = 1;
 838         odn->dn_moved = (uint8_t)-1;
 839 }
 840 
 841 #ifdef  _KERNEL
 842 /*ARGSUSED*/
 843 static kmem_cbrc_t
 844 dnode_move(void *buf, void *newbuf, size_t size, void *arg)
 845 {
 846         dnode_t *odn = buf, *ndn = newbuf;
 847         objset_t *os;
 848         int64_t refcount;
 849         uint32_t dbufs;
 850 
 851         /*
 852          * The dnode is on the objset's list of known dnodes if the objset
 853          * pointer is valid. We set the low bit of the objset pointer when
 854          * freeing the dnode to invalidate it, and the memory patterns written
 855          * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
 856          * A newly created dnode sets the objset pointer last of all to indicate
 857          * that the dnode is known and in a valid state to be moved by this
 858          * function.
 859          */
 860         os = odn->dn_objset;
 861         if (!POINTER_IS_VALID(os)) {
 862                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_invalid);
 863                 return (KMEM_CBRC_DONT_KNOW);
 864         }
 865 
 866         /*
 867          * Ensure that the objset does not go away during the move.
 868          */
 869         rw_enter(&os_lock, RW_WRITER);
 870         if (os != odn->dn_objset) {
 871                 rw_exit(&os_lock);
 872                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck1);
 873                 return (KMEM_CBRC_DONT_KNOW);
 874         }
 875 
 876         /*
 877          * If the dnode is still valid, then so is the objset. We know that no
 878          * valid objset can be freed while we hold os_lock, so we can safely
 879          * ensure that the objset remains in use.
 880          */
 881         mutex_enter(&os->os_lock);
 882 
 883         /*
 884          * Recheck the objset pointer in case the dnode was removed just before
 885          * acquiring the lock.
 886          */
 887         if (os != odn->dn_objset) {
 888                 mutex_exit(&os->os_lock);
 889                 rw_exit(&os_lock);
 890                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck2);
 891                 return (KMEM_CBRC_DONT_KNOW);
 892         }
 893 
 894         /*
 895          * At this point we know that as long as we hold os->os_lock, the dnode
 896          * cannot be freed and fields within the dnode can be safely accessed.
 897          * The objset listing this dnode cannot go away as long as this dnode is
 898          * on its list.
 899          */
 900         rw_exit(&os_lock);
 901         if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
 902                 mutex_exit(&os->os_lock);
 903                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_special);
 904                 return (KMEM_CBRC_NO);
 905         }
 906         ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
 907 
 908         /*
 909          * Lock the dnode handle to prevent the dnode from obtaining any new
 910          * holds. This also prevents the descendant dbufs and the bonus dbuf
 911          * from accessing the dnode, so that we can discount their holds. The
 912          * handle is safe to access because we know that while the dnode cannot
 913          * go away, neither can its handle. Once we hold dnh_zrlock, we can
 914          * safely move any dnode referenced only by dbufs.
 915          */
 916         if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
 917                 mutex_exit(&os->os_lock);
 918                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_handle);
 919                 return (KMEM_CBRC_LATER);
 920         }
 921 
 922         /*
 923          * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
 924          * We need to guarantee that there is a hold for every dbuf in order to
 925          * determine whether the dnode is actively referenced. Falsely matching
 926          * a dbuf to an active hold would lead to an unsafe move. It's possible
 927          * that a thread already having an active dnode hold is about to add a
 928          * dbuf, and we can't compare hold and dbuf counts while the add is in
 929          * progress.
 930          */
 931         if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
 932                 zrl_exit(&odn->dn_handle->dnh_zrlock);
 933                 mutex_exit(&os->os_lock);
 934                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_rwlock);
 935                 return (KMEM_CBRC_LATER);
 936         }
 937 
 938         /*
 939          * A dbuf may be removed (evicted) without an active dnode hold. In that
 940          * case, the dbuf count is decremented under the handle lock before the
 941          * dbuf's hold is released. This order ensures that if we count the hold
 942          * after the dbuf is removed but before its hold is released, we will
 943          * treat the unmatched hold as active and exit safely. If we count the
 944          * hold before the dbuf is removed, the hold is discounted, and the
 945          * removal is blocked until the move completes.
 946          */
 947         refcount = refcount_count(&odn->dn_holds);
 948         ASSERT(refcount >= 0);
 949         dbufs = odn->dn_dbufs_count;
 950 
 951         /* We can't have more dbufs than dnode holds. */
 952         ASSERT3U(dbufs, <=, refcount);
 953         DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
 954             uint32_t, dbufs);
 955 
 956         if (refcount > dbufs) {
 957                 rw_exit(&odn->dn_struct_rwlock);
 958                 zrl_exit(&odn->dn_handle->dnh_zrlock);
 959                 mutex_exit(&os->os_lock);
 960                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_active);
 961                 return (KMEM_CBRC_LATER);
 962         }
 963 
 964         rw_exit(&odn->dn_struct_rwlock);
 965 
 966         /*
 967          * At this point we know that anyone with a hold on the dnode is not
 968          * actively referencing it. The dnode is known and in a valid state to
 969          * move. We're holding the locks needed to execute the critical section.
 970          */
 971         dnode_move_impl(odn, ndn);
 972 
 973         list_link_replace(&odn->dn_link, &ndn->dn_link);
 974         /* If the dnode was safe to move, the refcount cannot have changed. */
 975         ASSERT(refcount == refcount_count(&ndn->dn_holds));
 976         ASSERT(dbufs == ndn->dn_dbufs_count);
 977         zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
 978         mutex_exit(&os->os_lock);
 979 
 980         return (KMEM_CBRC_YES);
 981 }
 982 #endif  /* _KERNEL */
 983 
 984 void
 985 dnode_special_close(dnode_handle_t *dnh)
 986 {
 987         dnode_t *dn = dnh->dnh_dnode;
 988 
 989         /*
 990          * Wait for final references to the dnode to clear.  This can
 991          * only happen if the arc is asyncronously evicting state that
 992          * has a hold on this dnode while we are trying to evict this
 993          * dnode.
 994          */
 995         while (refcount_count(&dn->dn_holds) > 0)
 996                 delay(1);
 997         ASSERT(dn->dn_dbuf == NULL ||
 998             dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
 999         zrl_add(&dnh->dnh_zrlock);
1000         dnode_destroy(dn); /* implicit zrl_remove() */
1001         zrl_destroy(&dnh->dnh_zrlock);
1002         dnh->dnh_dnode = NULL;
1003 }
1004 
1005 void
1006 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
1007     dnode_handle_t *dnh)
1008 {
1009         dnode_t *dn;
1010 
1011         dn = dnode_create(os, dnp, NULL, object, dnh);
1012         zrl_init(&dnh->dnh_zrlock);
1013         DNODE_VERIFY(dn);
1014 }
1015 
1016 static void
1017 dnode_buf_pageout(void *dbu)
1018 {
1019         dnode_children_t *children_dnodes = dbu;
1020         int i;
1021 
1022         for (i = 0; i < children_dnodes->dnc_count; i++) {
1023                 dnode_handle_t *dnh = &children_dnodes->dnc_children[i];
1024                 dnode_t *dn;
1025 
1026                 /*
1027                  * The dnode handle lock guards against the dnode moving to
1028                  * another valid address, so there is no need here to guard
1029                  * against changes to or from NULL.
1030                  */
1031                 if (dnh->dnh_dnode == NULL) {
1032                         zrl_destroy(&dnh->dnh_zrlock);
1033                         continue;
1034                 }
1035 
1036                 zrl_add(&dnh->dnh_zrlock);
1037                 dn = dnh->dnh_dnode;
1038                 /*
1039                  * If there are holds on this dnode, then there should
1040                  * be holds on the dnode's containing dbuf as well; thus
1041                  * it wouldn't be eligible for eviction and this function
1042                  * would not have been called.
1043                  */
1044                 ASSERT(refcount_is_zero(&dn->dn_holds));
1045                 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
1046 
1047                 dnode_destroy(dn); /* implicit zrl_remove() */
1048                 zrl_destroy(&dnh->dnh_zrlock);
1049                 dnh->dnh_dnode = NULL;
1050         }
1051         kmem_free(children_dnodes, sizeof (dnode_children_t) +
1052             children_dnodes->dnc_count * sizeof (dnode_handle_t));
1053 }
1054 
1055 /*
1056  * errors:
1057  * EINVAL - invalid object number.
1058  * EIO - i/o error.
1059  * succeeds even for free dnodes.
1060  */
1061 int
1062 dnode_hold_impl(objset_t *os, uint64_t object, int flag,
1063     void *tag, dnode_t **dnp)
1064 {
1065         int epb, idx, err;
1066         int drop_struct_lock = FALSE;
1067         int type;
1068         uint64_t blk;
1069         dnode_t *mdn, *dn;
1070         dmu_buf_impl_t *db;
1071         dnode_children_t *children_dnodes;
1072         dnode_handle_t *dnh;
1073 
1074         /*
1075          * If you are holding the spa config lock as writer, you shouldn't
1076          * be asking the DMU to do *anything* unless it's the root pool
1077          * which may require us to read from the root filesystem while
1078          * holding some (not all) of the locks as writer.
1079          */
1080         ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1081             (spa_is_root(os->os_spa) &&
1082             spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1083 
1084         if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) {
1085                 dn = (object == DMU_USERUSED_OBJECT) ?
1086                     DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
1087                 if (dn == NULL)
1088                         return (SET_ERROR(ENOENT));
1089                 type = dn->dn_type;
1090                 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1091                         return (SET_ERROR(ENOENT));
1092                 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1093                         return (SET_ERROR(EEXIST));
1094                 DNODE_VERIFY(dn);
1095                 (void) refcount_add(&dn->dn_holds, tag);
1096                 *dnp = dn;
1097                 return (0);
1098         }
1099 
1100         if (object == 0 || object >= DN_MAX_OBJECT)
1101                 return (SET_ERROR(EINVAL));
1102 
1103         mdn = DMU_META_DNODE(os);
1104         ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1105 
1106         DNODE_VERIFY(mdn);
1107 
1108         if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1109                 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1110                 drop_struct_lock = TRUE;
1111         }
1112 
1113         blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1114 
1115         db = dbuf_hold(mdn, blk, FTAG);
1116         if (drop_struct_lock)
1117                 rw_exit(&mdn->dn_struct_rwlock);
1118         if (db == NULL)
1119                 return (SET_ERROR(EIO));
1120         err = dbuf_read(db, NULL, DB_RF_CANFAIL);
1121         if (err) {
1122                 dbuf_rele(db, FTAG);
1123                 return (err);
1124         }
1125 
1126         ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
1127         epb = db->db.db_size >> DNODE_SHIFT;
1128 
1129         idx = object & (epb-1);
1130 
1131         ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
1132         children_dnodes = dmu_buf_get_user(&db->db);
1133         if (children_dnodes == NULL) {
1134                 int i;
1135                 dnode_children_t *winner;
1136                 children_dnodes = kmem_zalloc(sizeof (dnode_children_t) +
1137                     epb * sizeof (dnode_handle_t), KM_SLEEP);
1138                 children_dnodes->dnc_count = epb;
1139                 dnh = &children_dnodes->dnc_children[0];
1140                 for (i = 0; i < epb; i++) {
1141                         zrl_init(&dnh[i].dnh_zrlock);
1142                 }
1143                 dmu_buf_init_user(&children_dnodes->dnc_dbu, NULL,
1144                     dnode_buf_pageout, NULL);
1145                 winner = dmu_buf_set_user(&db->db, &children_dnodes->dnc_dbu);
1146                 if (winner != NULL) {
1147 
1148                         for (i = 0; i < epb; i++) {
1149                                 zrl_destroy(&dnh[i].dnh_zrlock);
1150                         }
1151 
1152                         kmem_free(children_dnodes, sizeof (dnode_children_t) +
1153                             epb * sizeof (dnode_handle_t));
1154                         children_dnodes = winner;
1155                 }
1156         }
1157         ASSERT(children_dnodes->dnc_count == epb);
1158 
1159         dnh = &children_dnodes->dnc_children[idx];
1160         zrl_add(&dnh->dnh_zrlock);
1161         dn = dnh->dnh_dnode;
1162         if (dn == NULL) {
1163                 dnode_phys_t *phys = (dnode_phys_t *)db->db.db_data+idx;
1164 
1165                 dn = dnode_create(os, phys, db, object, dnh);
1166         }
1167 
1168         mutex_enter(&dn->dn_mtx);
1169         type = dn->dn_type;
1170         if (dn->dn_free_txg ||
1171             ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
1172             ((flag & DNODE_MUST_BE_FREE) &&
1173             (type != DMU_OT_NONE || !refcount_is_zero(&dn->dn_holds)))) {
1174                 mutex_exit(&dn->dn_mtx);
1175                 zrl_remove(&dnh->dnh_zrlock);
1176                 dbuf_rele(db, FTAG);
1177                 return (type == DMU_OT_NONE ? ENOENT : EEXIST);
1178         }
1179         if (refcount_add(&dn->dn_holds, tag) == 1)
1180                 dbuf_add_ref(db, dnh);
1181         mutex_exit(&dn->dn_mtx);
1182 
1183         /* Now we can rely on the hold to prevent the dnode from moving. */
1184         zrl_remove(&dnh->dnh_zrlock);
1185 
1186         DNODE_VERIFY(dn);
1187         ASSERT3P(dn->dn_dbuf, ==, db);
1188         ASSERT3U(dn->dn_object, ==, object);
1189         dbuf_rele(db, FTAG);
1190 
1191         *dnp = dn;
1192         return (0);
1193 }
1194 
1195 /*
1196  * Return held dnode if the object is allocated, NULL if not.
1197  */
1198 int
1199 dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
1200 {
1201         return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
1202 }
1203 
1204 /*
1205  * Can only add a reference if there is already at least one
1206  * reference on the dnode.  Returns FALSE if unable to add a
1207  * new reference.
1208  */
1209 boolean_t
1210 dnode_add_ref(dnode_t *dn, void *tag)
1211 {
1212         mutex_enter(&dn->dn_mtx);
1213         if (refcount_is_zero(&dn->dn_holds)) {
1214                 mutex_exit(&dn->dn_mtx);
1215                 return (FALSE);
1216         }
1217         VERIFY(1 < refcount_add(&dn->dn_holds, tag));
1218         mutex_exit(&dn->dn_mtx);
1219         return (TRUE);
1220 }
1221 
1222 void
1223 dnode_rele(dnode_t *dn, void *tag)
1224 {
1225         mutex_enter(&dn->dn_mtx);
1226         dnode_rele_and_unlock(dn, tag);
1227 }
1228 
1229 void
1230 dnode_rele_and_unlock(dnode_t *dn, void *tag)
1231 {
1232         uint64_t refs;
1233         /* Get while the hold prevents the dnode from moving. */
1234         dmu_buf_impl_t *db = dn->dn_dbuf;
1235         dnode_handle_t *dnh = dn->dn_handle;
1236 
1237         refs = refcount_remove(&dn->dn_holds, tag);
1238         mutex_exit(&dn->dn_mtx);
1239 
1240         /*
1241          * It's unsafe to release the last hold on a dnode by dnode_rele() or
1242          * indirectly by dbuf_rele() while relying on the dnode handle to
1243          * prevent the dnode from moving, since releasing the last hold could
1244          * result in the dnode's parent dbuf evicting its dnode handles. For
1245          * that reason anyone calling dnode_rele() or dbuf_rele() without some
1246          * other direct or indirect hold on the dnode must first drop the dnode
1247          * handle.
1248          */
1249         ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1250 
1251         /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1252         if (refs == 0 && db != NULL) {
1253                 /*
1254                  * Another thread could add a hold to the dnode handle in
1255                  * dnode_hold_impl() while holding the parent dbuf. Since the
1256                  * hold on the parent dbuf prevents the handle from being
1257                  * destroyed, the hold on the handle is OK. We can't yet assert
1258                  * that the handle has zero references, but that will be
1259                  * asserted anyway when the handle gets destroyed.
1260                  */
1261                 dbuf_rele(db, dnh);
1262         }
1263 }
1264 
1265 void
1266 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1267 {
1268         objset_t *os = dn->dn_objset;
1269         uint64_t txg = tx->tx_txg;
1270 
1271         if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1272                 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1273                 return;
1274         }
1275 
1276         DNODE_VERIFY(dn);
1277 
1278 #ifdef ZFS_DEBUG
1279         mutex_enter(&dn->dn_mtx);
1280         ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1281         ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1282         mutex_exit(&dn->dn_mtx);
1283 #endif
1284 
1285         /*
1286          * Determine old uid/gid when necessary
1287          */
1288         dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
1289 
1290         mutex_enter(&os->os_lock);
1291 
1292         /*
1293          * If we are already marked dirty, we're done.
1294          */
1295         if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1296                 mutex_exit(&os->os_lock);
1297                 return;
1298         }
1299 
1300         ASSERT(!refcount_is_zero(&dn->dn_holds) ||
1301             !avl_is_empty(&dn->dn_dbufs));
1302         ASSERT(dn->dn_datablksz != 0);
1303         ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1304         ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
1305         ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
1306 
1307         dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1308             dn->dn_object, txg);
1309 
1310         if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
1311                 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
1312         } else {
1313                 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
1314         }
1315 
1316         mutex_exit(&os->os_lock);
1317 
1318         /*
1319          * The dnode maintains a hold on its containing dbuf as
1320          * long as there are holds on it.  Each instantiated child
1321          * dbuf maintains a hold on the dnode.  When the last child
1322          * drops its hold, the dnode will drop its hold on the
1323          * containing dbuf. We add a "dirty hold" here so that the
1324          * dnode will hang around after we finish processing its
1325          * children.
1326          */
1327         VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1328 
1329         (void) dbuf_dirty(dn->dn_dbuf, tx);
1330 
1331         dsl_dataset_dirty(os->os_dsl_dataset, tx);
1332 }
1333 
1334 void
1335 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1336 {
1337         int txgoff = tx->tx_txg & TXG_MASK;
1338 
1339         dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
1340 
1341         /* we should be the only holder... hopefully */
1342         /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
1343 
1344         mutex_enter(&dn->dn_mtx);
1345         if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1346                 mutex_exit(&dn->dn_mtx);
1347                 return;
1348         }
1349         dn->dn_free_txg = tx->tx_txg;
1350         mutex_exit(&dn->dn_mtx);
1351 
1352         /*
1353          * If the dnode is already dirty, it needs to be moved from
1354          * the dirty list to the free list.
1355          */
1356         mutex_enter(&dn->dn_objset->os_lock);
1357         if (list_link_active(&dn->dn_dirty_link[txgoff])) {
1358                 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
1359                 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
1360                 mutex_exit(&dn->dn_objset->os_lock);
1361         } else {
1362                 mutex_exit(&dn->dn_objset->os_lock);
1363                 dnode_setdirty(dn, tx);
1364         }
1365 }
1366 
1367 /*
1368  * Try to change the block size for the indicated dnode.  This can only
1369  * succeed if there are no blocks allocated or dirty beyond first block
1370  */
1371 int
1372 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
1373 {
1374         dmu_buf_impl_t *db;
1375         int err;
1376 
1377         ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
1378         if (size == 0)
1379                 size = SPA_MINBLOCKSIZE;
1380         else
1381                 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
1382 
1383         if (ibs == dn->dn_indblkshift)
1384                 ibs = 0;
1385 
1386         if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
1387                 return (0);
1388 
1389         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1390 
1391         /* Check for any allocated blocks beyond the first */
1392         if (dn->dn_maxblkid != 0)
1393                 goto fail;
1394 
1395         mutex_enter(&dn->dn_dbufs_mtx);
1396         for (db = avl_first(&dn->dn_dbufs); db != NULL;
1397             db = AVL_NEXT(&dn->dn_dbufs, db)) {
1398                 if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
1399                     db->db_blkid != DMU_SPILL_BLKID) {
1400                         mutex_exit(&dn->dn_dbufs_mtx);
1401                         goto fail;
1402                 }
1403         }
1404         mutex_exit(&dn->dn_dbufs_mtx);
1405 
1406         if (ibs && dn->dn_nlevels != 1)
1407                 goto fail;
1408 
1409         /* resize the old block */
1410         err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
1411         if (err == 0)
1412                 dbuf_new_size(db, size, tx);
1413         else if (err != ENOENT)
1414                 goto fail;
1415 
1416         dnode_setdblksz(dn, size);
1417         dnode_setdirty(dn, tx);
1418         dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
1419         if (ibs) {
1420                 dn->dn_indblkshift = ibs;
1421                 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
1422         }
1423         /* rele after we have fixed the blocksize in the dnode */
1424         if (db)
1425                 dbuf_rele(db, FTAG);
1426 
1427         rw_exit(&dn->dn_struct_rwlock);
1428         return (0);
1429 
1430 fail:
1431         rw_exit(&dn->dn_struct_rwlock);
1432         return (SET_ERROR(ENOTSUP));
1433 }
1434 
1435 /* read-holding callers must not rely on the lock being continuously held */
1436 void
1437 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
1438 {
1439         uint64_t txgoff = tx->tx_txg & TXG_MASK;
1440         int epbs, new_nlevels;
1441         uint64_t sz;
1442 
1443         ASSERT(blkid != DMU_BONUS_BLKID);
1444 
1445         ASSERT(have_read ?
1446             RW_READ_HELD(&dn->dn_struct_rwlock) :
1447             RW_WRITE_HELD(&dn->dn_struct_rwlock));
1448 
1449         /*
1450          * if we have a read-lock, check to see if we need to do any work
1451          * before upgrading to a write-lock.
1452          */
1453         if (have_read) {
1454                 if (blkid <= dn->dn_maxblkid)
1455                         return;
1456 
1457                 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
1458                         rw_exit(&dn->dn_struct_rwlock);
1459                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1460                 }
1461         }
1462 
1463         if (blkid <= dn->dn_maxblkid)
1464                 goto out;
1465 
1466         dn->dn_maxblkid = blkid;
1467 
1468         /*
1469          * Compute the number of levels necessary to support the new maxblkid.
1470          */
1471         new_nlevels = 1;
1472         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1473         for (sz = dn->dn_nblkptr;
1474             sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1475                 new_nlevels++;
1476 
1477         if (new_nlevels > dn->dn_nlevels) {
1478                 int old_nlevels = dn->dn_nlevels;
1479                 dmu_buf_impl_t *db;
1480                 list_t *list;
1481                 dbuf_dirty_record_t *new, *dr, *dr_next;
1482 
1483                 dn->dn_nlevels = new_nlevels;
1484 
1485                 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1486                 dn->dn_next_nlevels[txgoff] = new_nlevels;
1487 
1488                 /* dirty the left indirects */
1489                 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1490                 ASSERT(db != NULL);
1491                 new = dbuf_dirty(db, tx);
1492                 dbuf_rele(db, FTAG);
1493 
1494                 /* transfer the dirty records to the new indirect */
1495                 mutex_enter(&dn->dn_mtx);
1496                 mutex_enter(&new->dt.di.dr_mtx);
1497                 list = &dn->dn_dirty_records[txgoff];
1498                 for (dr = list_head(list); dr; dr = dr_next) {
1499                         dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1500                         if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1501                             dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1502                             dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1503                                 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1504                                 list_remove(&dn->dn_dirty_records[txgoff], dr);
1505                                 list_insert_tail(&new->dt.di.dr_children, dr);
1506                                 dr->dr_parent = new;
1507                         }
1508                 }
1509                 mutex_exit(&new->dt.di.dr_mtx);
1510                 mutex_exit(&dn->dn_mtx);
1511         }
1512 
1513 out:
1514         if (have_read)
1515                 rw_downgrade(&dn->dn_struct_rwlock);
1516 }
1517 
1518 static void
1519 dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
1520 {
1521         dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
1522         if (db != NULL) {
1523                 dmu_buf_will_dirty(&db->db, tx);
1524                 dbuf_rele(db, FTAG);
1525         }
1526 }
1527 
1528 void
1529 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
1530 {
1531         dmu_buf_impl_t *db;
1532         uint64_t blkoff, blkid, nblks;
1533         int blksz, blkshift, head, tail;
1534         int trunc = FALSE;
1535         int epbs;
1536 
1537         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1538         blksz = dn->dn_datablksz;
1539         blkshift = dn->dn_datablkshift;
1540         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1541 
1542         if (len == DMU_OBJECT_END) {
1543                 len = UINT64_MAX - off;
1544                 trunc = TRUE;
1545         }
1546 
1547         /*
1548          * First, block align the region to free:
1549          */
1550         if (ISP2(blksz)) {
1551                 head = P2NPHASE(off, blksz);
1552                 blkoff = P2PHASE(off, blksz);
1553                 if ((off >> blkshift) > dn->dn_maxblkid)
1554                         goto out;
1555         } else {
1556                 ASSERT(dn->dn_maxblkid == 0);
1557                 if (off == 0 && len >= blksz) {
1558                         /*
1559                          * Freeing the whole block; fast-track this request.
1560                          * Note that we won't dirty any indirect blocks,
1561                          * which is fine because we will be freeing the entire
1562                          * file and thus all indirect blocks will be freed
1563                          * by free_children().
1564                          */
1565                         blkid = 0;
1566                         nblks = 1;
1567                         goto done;
1568                 } else if (off >= blksz) {
1569                         /* Freeing past end-of-data */
1570                         goto out;
1571                 } else {
1572                         /* Freeing part of the block. */
1573                         head = blksz - off;
1574                         ASSERT3U(head, >, 0);
1575                 }
1576                 blkoff = off;
1577         }
1578         /* zero out any partial block data at the start of the range */
1579         if (head) {
1580                 ASSERT3U(blkoff + head, ==, blksz);
1581                 if (len < head)
1582                         head = len;
1583                 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
1584                     TRUE, FALSE, FTAG, &db) == 0) {
1585                         caddr_t data;
1586 
1587                         /* don't dirty if it isn't on disk and isn't dirty */
1588                         if (db->db_last_dirty ||
1589                             (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1590                                 rw_exit(&dn->dn_struct_rwlock);
1591                                 dmu_buf_will_dirty(&db->db, tx);
1592                                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1593                                 data = db->db.db_data;
1594                                 bzero(data + blkoff, head);
1595                         }
1596                         dbuf_rele(db, FTAG);
1597                 }
1598                 off += head;
1599                 len -= head;
1600         }
1601 
1602         /* If the range was less than one block, we're done */
1603         if (len == 0)
1604                 goto out;
1605 
1606         /* If the remaining range is past end of file, we're done */
1607         if ((off >> blkshift) > dn->dn_maxblkid)
1608                 goto out;
1609 
1610         ASSERT(ISP2(blksz));
1611         if (trunc)
1612                 tail = 0;
1613         else
1614                 tail = P2PHASE(len, blksz);
1615 
1616         ASSERT0(P2PHASE(off, blksz));
1617         /* zero out any partial block data at the end of the range */
1618         if (tail) {
1619                 if (len < tail)
1620                         tail = len;
1621                 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
1622                     TRUE, FALSE, FTAG, &db) == 0) {
1623                         /* don't dirty if not on disk and not dirty */
1624                         if (db->db_last_dirty ||
1625                             (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1626                                 rw_exit(&dn->dn_struct_rwlock);
1627                                 dmu_buf_will_dirty(&db->db, tx);
1628                                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1629                                 bzero(db->db.db_data, tail);
1630                         }
1631                         dbuf_rele(db, FTAG);
1632                 }
1633                 len -= tail;
1634         }
1635 
1636         /* If the range did not include a full block, we are done */
1637         if (len == 0)
1638                 goto out;
1639 
1640         ASSERT(IS_P2ALIGNED(off, blksz));
1641         ASSERT(trunc || IS_P2ALIGNED(len, blksz));
1642         blkid = off >> blkshift;
1643         nblks = len >> blkshift;
1644         if (trunc)
1645                 nblks += 1;
1646 
1647         /*
1648          * Dirty all the indirect blocks in this range.  Note that only
1649          * the first and last indirect blocks can actually be written
1650          * (if they were partially freed) -- they must be dirtied, even if
1651          * they do not exist on disk yet.  The interior blocks will
1652          * be freed by free_children(), so they will not actually be written.
1653          * Even though these interior blocks will not be written, we
1654          * dirty them for two reasons:
1655          *
1656          *  - It ensures that the indirect blocks remain in memory until
1657          *    syncing context.  (They have already been prefetched by
1658          *    dmu_tx_hold_free(), so we don't have to worry about reading
1659          *    them serially here.)
1660          *
1661          *  - The dirty space accounting will put pressure on the txg sync
1662          *    mechanism to begin syncing, and to delay transactions if there
1663          *    is a large amount of freeing.  Even though these indirect
1664          *    blocks will not be written, we could need to write the same
1665          *    amount of space if we copy the freed BPs into deadlists.
1666          */
1667         if (dn->dn_nlevels > 1) {
1668                 uint64_t first, last;
1669 
1670                 first = blkid >> epbs;
1671                 dnode_dirty_l1(dn, first, tx);
1672                 if (trunc)
1673                         last = dn->dn_maxblkid >> epbs;
1674                 else
1675                         last = (blkid + nblks - 1) >> epbs;
1676                 if (last != first)
1677                         dnode_dirty_l1(dn, last, tx);
1678 
1679                 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
1680                     SPA_BLKPTRSHIFT;
1681                 for (uint64_t i = first + 1; i < last; i++) {
1682                         /*
1683                          * Set i to the blockid of the next non-hole
1684                          * level-1 indirect block at or after i.  Note
1685                          * that dnode_next_offset() operates in terms of
1686                          * level-0-equivalent bytes.
1687                          */
1688                         uint64_t ibyte = i << shift;
1689                         int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
1690                             &ibyte, 2, 1, 0);
1691                         i = ibyte >> shift;
1692                         if (i >= last)
1693                                 break;
1694 
1695                         /*
1696                          * Normally we should not see an error, either
1697                          * from dnode_next_offset() or dbuf_hold_level()
1698                          * (except for ESRCH from dnode_next_offset).
1699                          * If there is an i/o error, then when we read
1700                          * this block in syncing context, it will use
1701                          * ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
1702                          * to the "failmode" property.  dnode_next_offset()
1703                          * doesn't have a flag to indicate MUSTSUCCEED.
1704                          */
1705                         if (err != 0)
1706                                 break;
1707 
1708                         dnode_dirty_l1(dn, i, tx);
1709                 }
1710         }
1711 
1712 done:
1713         /*
1714          * Add this range to the dnode range list.
1715          * We will finish up this free operation in the syncing phase.
1716          */
1717         mutex_enter(&dn->dn_mtx);
1718         int txgoff = tx->tx_txg & TXG_MASK;
1719         if (dn->dn_free_ranges[txgoff] == NULL) {
1720                 dn->dn_free_ranges[txgoff] =
1721                     range_tree_create(NULL, NULL, &dn->dn_mtx);
1722         }
1723         range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
1724         range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
1725         dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1726             blkid, nblks, tx->tx_txg);
1727         mutex_exit(&dn->dn_mtx);
1728 
1729         dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
1730         dnode_setdirty(dn, tx);
1731 out:
1732 
1733         rw_exit(&dn->dn_struct_rwlock);
1734 }
1735 
1736 static boolean_t
1737 dnode_spill_freed(dnode_t *dn)
1738 {
1739         int i;
1740 
1741         mutex_enter(&dn->dn_mtx);
1742         for (i = 0; i < TXG_SIZE; i++) {
1743                 if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
1744                         break;
1745         }
1746         mutex_exit(&dn->dn_mtx);
1747         return (i < TXG_SIZE);
1748 }
1749 
1750 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
1751 uint64_t
1752 dnode_block_freed(dnode_t *dn, uint64_t blkid)
1753 {
1754         void *dp = spa_get_dsl(dn->dn_objset->os_spa);
1755         int i;
1756 
1757         if (blkid == DMU_BONUS_BLKID)
1758                 return (FALSE);
1759 
1760         /*
1761          * If we're in the process of opening the pool, dp will not be
1762          * set yet, but there shouldn't be anything dirty.
1763          */
1764         if (dp == NULL)
1765                 return (FALSE);
1766 
1767         if (dn->dn_free_txg)
1768                 return (TRUE);
1769 
1770         if (blkid == DMU_SPILL_BLKID)
1771                 return (dnode_spill_freed(dn));
1772 
1773         mutex_enter(&dn->dn_mtx);
1774         for (i = 0; i < TXG_SIZE; i++) {
1775                 if (dn->dn_free_ranges[i] != NULL &&
1776                     range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
1777                         break;
1778         }
1779         mutex_exit(&dn->dn_mtx);
1780         return (i < TXG_SIZE);
1781 }
1782 
1783 /* call from syncing context when we actually write/free space for this dnode */
1784 void
1785 dnode_diduse_space(dnode_t *dn, int64_t delta)
1786 {
1787         uint64_t space;
1788         dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
1789             dn, dn->dn_phys,
1790             (u_longlong_t)dn->dn_phys->dn_used,
1791             (longlong_t)delta);
1792 
1793         mutex_enter(&dn->dn_mtx);
1794         space = DN_USED_BYTES(dn->dn_phys);
1795         if (delta > 0) {
1796                 ASSERT3U(space + delta, >=, space); /* no overflow */
1797         } else {
1798                 ASSERT3U(space, >=, -delta); /* no underflow */
1799         }
1800         space += delta;
1801         if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
1802                 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
1803                 ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
1804                 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
1805         } else {
1806                 dn->dn_phys->dn_used = space;
1807                 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
1808         }
1809         mutex_exit(&dn->dn_mtx);
1810 }
1811 
1812 /*
1813  * Call when we think we're going to write/free space in open context to track
1814  * the amount of memory in use by the currently open txg.
1815  */
1816 void
1817 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
1818 {
1819         objset_t *os = dn->dn_objset;
1820         dsl_dataset_t *ds = os->os_dsl_dataset;
1821         int64_t aspace = spa_get_asize(os->os_spa, space);
1822 
1823         if (ds != NULL) {
1824                 dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
1825                 dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
1826         }
1827 
1828         dmu_tx_willuse_space(tx, aspace);
1829 }
1830 
1831 /*
1832  * Scans a block at the indicated "level" looking for a hole or data,
1833  * depending on 'flags'.
1834  *
1835  * If level > 0, then we are scanning an indirect block looking at its
1836  * pointers.  If level == 0, then we are looking at a block of dnodes.
1837  *
1838  * If we don't find what we are looking for in the block, we return ESRCH.
1839  * Otherwise, return with *offset pointing to the beginning (if searching
1840  * forwards) or end (if searching backwards) of the range covered by the
1841  * block pointer we matched on (or dnode).
1842  *
1843  * The basic search algorithm used below by dnode_next_offset() is to
1844  * use this function to search up the block tree (widen the search) until
1845  * we find something (i.e., we don't return ESRCH) and then search back
1846  * down the tree (narrow the search) until we reach our original search
1847  * level.
1848  */
1849 static int
1850 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
1851     int lvl, uint64_t blkfill, uint64_t txg)
1852 {
1853         dmu_buf_impl_t *db = NULL;
1854         void *data = NULL;
1855         uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1856         uint64_t epb = 1ULL << epbs;
1857         uint64_t minfill, maxfill;
1858         boolean_t hole;
1859         int i, inc, error, span;
1860 
1861         dprintf("probing object %llu offset %llx level %d of %u\n",
1862             dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
1863 
1864         hole = ((flags & DNODE_FIND_HOLE) != 0);
1865         inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
1866         ASSERT(txg == 0 || !hole);
1867 
1868         if (lvl == dn->dn_phys->dn_nlevels) {
1869                 error = 0;
1870                 epb = dn->dn_phys->dn_nblkptr;
1871                 data = dn->dn_phys->dn_blkptr;
1872         } else {
1873                 uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
1874                 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
1875                 if (error) {
1876                         if (error != ENOENT)
1877                                 return (error);
1878                         if (hole)
1879                                 return (0);
1880                         /*
1881                          * This can only happen when we are searching up
1882                          * the block tree for data.  We don't really need to
1883                          * adjust the offset, as we will just end up looking
1884                          * at the pointer to this block in its parent, and its
1885                          * going to be unallocated, so we will skip over it.
1886                          */
1887                         return (SET_ERROR(ESRCH));
1888                 }
1889                 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
1890                 if (error) {
1891                         dbuf_rele(db, FTAG);
1892                         return (error);
1893                 }
1894                 data = db->db.db_data;
1895         }
1896 
1897 
1898         if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
1899             db->db_blkptr->blk_birth <= txg ||
1900             BP_IS_HOLE(db->db_blkptr))) {
1901                 /*
1902                  * This can only happen when we are searching up the tree
1903                  * and these conditions mean that we need to keep climbing.
1904                  */
1905                 error = SET_ERROR(ESRCH);
1906         } else if (lvl == 0) {
1907                 dnode_phys_t *dnp = data;
1908                 span = DNODE_SHIFT;
1909                 ASSERT(dn->dn_type == DMU_OT_DNODE);
1910 
1911                 for (i = (*offset >> span) & (blkfill - 1);
1912                     i >= 0 && i < blkfill; i += inc) {
1913                         if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
1914                                 break;
1915                         *offset += (1ULL << span) * inc;
1916                 }
1917                 if (i < 0 || i == blkfill)
1918                         error = SET_ERROR(ESRCH);
1919         } else {
1920                 blkptr_t *bp = data;
1921                 uint64_t start = *offset;
1922                 span = (lvl - 1) * epbs + dn->dn_datablkshift;
1923                 minfill = 0;
1924                 maxfill = blkfill << ((lvl - 1) * epbs);
1925 
1926                 if (hole)
1927                         maxfill--;
1928                 else
1929                         minfill++;
1930 
1931                 *offset = *offset >> span;
1932                 for (i = BF64_GET(*offset, 0, epbs);
1933                     i >= 0 && i < epb; i += inc) {
1934                         if (BP_GET_FILL(&bp[i]) >= minfill &&
1935                             BP_GET_FILL(&bp[i]) <= maxfill &&
1936                             (hole || bp[i].blk_birth > txg))
1937                                 break;
1938                         if (inc > 0 || *offset > 0)
1939                                 *offset += inc;
1940                 }
1941                 *offset = *offset << span;
1942                 if (inc < 0) {
1943                         /* traversing backwards; position offset at the end */
1944                         ASSERT3U(*offset, <=, start);
1945                         *offset = MIN(*offset + (1ULL << span) - 1, start);
1946                 } else if (*offset < start) {
1947                         *offset = start;
1948                 }
1949                 if (i < 0 || i >= epb)
1950                         error = SET_ERROR(ESRCH);
1951         }
1952 
1953         if (db)
1954                 dbuf_rele(db, FTAG);
1955 
1956         return (error);
1957 }
1958 
1959 /*
1960  * Find the next hole, data, or sparse region at or after *offset.
1961  * The value 'blkfill' tells us how many items we expect to find
1962  * in an L0 data block; this value is 1 for normal objects,
1963  * DNODES_PER_BLOCK for the meta dnode, and some fraction of
1964  * DNODES_PER_BLOCK when searching for sparse regions thereof.
1965  *
1966  * Examples:
1967  *
1968  * dnode_next_offset(dn, flags, offset, 1, 1, 0);
1969  *      Finds the next/previous hole/data in a file.
1970  *      Used in dmu_offset_next().
1971  *
1972  * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
1973  *      Finds the next free/allocated dnode an objset's meta-dnode.
1974  *      Only finds objects that have new contents since txg (ie.
1975  *      bonus buffer changes and content removal are ignored).
1976  *      Used in dmu_object_next().
1977  *
1978  * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
1979  *      Finds the next L2 meta-dnode bp that's at most 1/4 full.
1980  *      Used in dmu_object_alloc().
1981  */
1982 int
1983 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
1984     int minlvl, uint64_t blkfill, uint64_t txg)
1985 {
1986         uint64_t initial_offset = *offset;
1987         int lvl, maxlvl;
1988         int error = 0;
1989 
1990         if (!(flags & DNODE_FIND_HAVELOCK))
1991                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1992 
1993         if (dn->dn_phys->dn_nlevels == 0) {
1994                 error = SET_ERROR(ESRCH);
1995                 goto out;
1996         }
1997 
1998         if (dn->dn_datablkshift == 0) {
1999                 if (*offset < dn->dn_datablksz) {
2000                         if (flags & DNODE_FIND_HOLE)
2001                                 *offset = dn->dn_datablksz;
2002                 } else {
2003                         error = SET_ERROR(ESRCH);
2004                 }
2005                 goto out;
2006         }
2007 
2008         maxlvl = dn->dn_phys->dn_nlevels;
2009 
2010         for (lvl = minlvl; lvl <= maxlvl; lvl++) {
2011                 error = dnode_next_offset_level(dn,
2012                     flags, offset, lvl, blkfill, txg);
2013                 if (error != ESRCH)
2014                         break;
2015         }
2016 
2017         while (error == 0 && --lvl >= minlvl) {
2018                 error = dnode_next_offset_level(dn,
2019                     flags, offset, lvl, blkfill, txg);
2020         }
2021 
2022         /*
2023          * There's always a "virtual hole" at the end of the object, even
2024          * if all BP's which physically exist are non-holes.
2025          */
2026         if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
2027             minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
2028                 error = 0;
2029         }
2030 
2031         if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
2032             initial_offset < *offset : initial_offset > *offset))
2033                 error = SET_ERROR(ESRCH);
2034 out:
2035         if (!(flags & DNODE_FIND_HAVELOCK))
2036                 rw_exit(&dn->dn_struct_rwlock);
2037 
2038         return (error);
2039 }