64
65 if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
66 zap_leaf_byteswap(vbuf, size);
67 else {
68 /* it's a ptrtbl block */
69 byteswap_uint64_array(vbuf, size);
70 }
71 }
72
73 void
74 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
75 {
76 dmu_buf_t *db;
77 zap_leaf_t *l;
78 int i;
79 zap_phys_t *zp;
80
81 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
82 zap->zap_ismicro = FALSE;
83
84 zap->zap_dbu.dbu_evict_func = zap_evict;
85
86 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
87 zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1;
88
89 zp = zap_f_phys(zap);
90 /*
91 * explicitly zero it since it might be coming from an
92 * initialized microzap
93 */
94 bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
95 zp->zap_block_type = ZBT_HEADER;
96 zp->zap_magic = ZAP_MAGIC;
97
98 zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
99
100 zp->zap_freeblk = 2; /* block 1 will be the first leaf */
101 zp->zap_num_leafs = 1;
102 zp->zap_num_entries = 0;
103 zp->zap_salt = zap->zap_salt;
396 rw_destroy(&l->l_rwlock);
397 kmem_free(l, sizeof (zap_leaf_t));
398 }
399
400 static zap_leaf_t *
401 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
402 {
403 void *winner;
404 zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
405
406 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
407
408 rw_init(&l->l_rwlock, 0, 0, 0);
409 rw_enter(&l->l_rwlock, RW_WRITER);
410 l->l_blkid = zap_allocate_blocks(zap, 1);
411 l->l_dbuf = NULL;
412
413 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
414 l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
415 DMU_READ_NO_PREFETCH));
416 dmu_buf_init_user(&l->l_dbu, zap_leaf_pageout, &l->l_dbuf);
417 winner = dmu_buf_set_user(l->l_dbuf, &l->l_dbu);
418 ASSERT(winner == NULL);
419 dmu_buf_will_dirty(l->l_dbuf, tx);
420
421 zap_leaf_init(l, zap->zap_normflags != 0);
422
423 zap_f_phys(zap)->zap_num_leafs++;
424
425 return (l);
426 }
427
428 int
429 fzap_count(zap_t *zap, uint64_t *count)
430 {
431 ASSERT(!zap->zap_ismicro);
432 mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
433 *count = zap_f_phys(zap)->zap_num_entries;
434 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
435 return (0);
436 }
443 zap_put_leaf(zap_leaf_t *l)
444 {
445 rw_exit(&l->l_rwlock);
446 dmu_buf_rele(l->l_dbuf, NULL);
447 }
448
449 static zap_leaf_t *
450 zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
451 {
452 zap_leaf_t *l, *winner;
453
454 ASSERT(blkid != 0);
455
456 l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
457 rw_init(&l->l_rwlock, 0, 0, 0);
458 rw_enter(&l->l_rwlock, RW_WRITER);
459 l->l_blkid = blkid;
460 l->l_bs = highbit64(db->db_size) - 1;
461 l->l_dbuf = db;
462
463 dmu_buf_init_user(&l->l_dbu, zap_leaf_pageout, &l->l_dbuf);
464 winner = dmu_buf_set_user(db, &l->l_dbu);
465
466 rw_exit(&l->l_rwlock);
467 if (winner != NULL) {
468 /* someone else set it first */
469 zap_leaf_pageout(&l->l_dbu);
470 l = winner;
471 }
472
473 /*
474 * lhr_pad was previously used for the next leaf in the leaf
475 * chain. There should be no chained leafs (as we have removed
476 * support for them).
477 */
478 ASSERT0(zap_leaf_phys(l)->l_hdr.lh_pad1);
479
480 /*
481 * There should be more hash entries than there can be
482 * chunks to put in the hash table
483 */
|
64
65 if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
66 zap_leaf_byteswap(vbuf, size);
67 else {
68 /* it's a ptrtbl block */
69 byteswap_uint64_array(vbuf, size);
70 }
71 }
72
73 void
74 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
75 {
76 dmu_buf_t *db;
77 zap_leaf_t *l;
78 int i;
79 zap_phys_t *zp;
80
81 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
82 zap->zap_ismicro = FALSE;
83
84 zap->zap_dbu.dbu_evict_func_prep = NULL;
85 zap->zap_dbu.dbu_evict_func = zap_evict;
86
87 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
88 zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1;
89
90 zp = zap_f_phys(zap);
91 /*
92 * explicitly zero it since it might be coming from an
93 * initialized microzap
94 */
95 bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
96 zp->zap_block_type = ZBT_HEADER;
97 zp->zap_magic = ZAP_MAGIC;
98
99 zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
100
101 zp->zap_freeblk = 2; /* block 1 will be the first leaf */
102 zp->zap_num_leafs = 1;
103 zp->zap_num_entries = 0;
104 zp->zap_salt = zap->zap_salt;
397 rw_destroy(&l->l_rwlock);
398 kmem_free(l, sizeof (zap_leaf_t));
399 }
400
401 static zap_leaf_t *
402 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
403 {
404 void *winner;
405 zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
406
407 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
408
409 rw_init(&l->l_rwlock, 0, 0, 0);
410 rw_enter(&l->l_rwlock, RW_WRITER);
411 l->l_blkid = zap_allocate_blocks(zap, 1);
412 l->l_dbuf = NULL;
413
414 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
415 l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
416 DMU_READ_NO_PREFETCH));
417 dmu_buf_init_user(&l->l_dbu, NULL, zap_leaf_pageout, &l->l_dbuf);
418 winner = dmu_buf_set_user(l->l_dbuf, &l->l_dbu);
419 ASSERT(winner == NULL);
420 dmu_buf_will_dirty(l->l_dbuf, tx);
421
422 zap_leaf_init(l, zap->zap_normflags != 0);
423
424 zap_f_phys(zap)->zap_num_leafs++;
425
426 return (l);
427 }
428
429 int
430 fzap_count(zap_t *zap, uint64_t *count)
431 {
432 ASSERT(!zap->zap_ismicro);
433 mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
434 *count = zap_f_phys(zap)->zap_num_entries;
435 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
436 return (0);
437 }
444 zap_put_leaf(zap_leaf_t *l)
445 {
446 rw_exit(&l->l_rwlock);
447 dmu_buf_rele(l->l_dbuf, NULL);
448 }
449
450 static zap_leaf_t *
451 zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
452 {
453 zap_leaf_t *l, *winner;
454
455 ASSERT(blkid != 0);
456
457 l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
458 rw_init(&l->l_rwlock, 0, 0, 0);
459 rw_enter(&l->l_rwlock, RW_WRITER);
460 l->l_blkid = blkid;
461 l->l_bs = highbit64(db->db_size) - 1;
462 l->l_dbuf = db;
463
464 dmu_buf_init_user(&l->l_dbu, NULL, zap_leaf_pageout, &l->l_dbuf);
465 winner = dmu_buf_set_user(db, &l->l_dbu);
466
467 rw_exit(&l->l_rwlock);
468 if (winner != NULL) {
469 /* someone else set it first */
470 zap_leaf_pageout(&l->l_dbu);
471 l = winner;
472 }
473
474 /*
475 * lhr_pad was previously used for the next leaf in the leaf
476 * chain. There should be no chained leafs (as we have removed
477 * support for them).
478 */
479 ASSERT0(zap_leaf_phys(l)->l_hdr.lh_pad1);
480
481 /*
482 * There should be more hash entries than there can be
483 * chunks to put in the hash table
484 */
|