dmu.h revision 288571
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
27 * Copyright 2013 DEY Storage Systems, Inc.
28 * Copyright 2014 HybridCluster. All rights reserved.
29 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
30 */
31
32/* Portions Copyright 2010 Robert Milkowski */
33
34#ifndef	_SYS_DMU_H
35#define	_SYS_DMU_H
36
37/*
38 * This file describes the interface that the DMU provides for its
39 * consumers.
40 *
41 * The DMU also interacts with the SPA.  That interface is described in
42 * dmu_spa.h.
43 */
44
45#include <sys/zfs_context.h>
46#include <sys/cred.h>
47#include <sys/fs/zfs.h>
48#include <sys/zio_priority.h>
49
50#ifdef	__cplusplus
51extern "C" {
52#endif
53
54struct uio;
55struct xuio;
56struct page;
57struct vnode;
58struct spa;
59struct zilog;
60struct zio;
61struct blkptr;
62struct zap_cursor;
63struct dsl_dataset;
64struct dsl_pool;
65struct dnode;
66struct drr_begin;
67struct drr_end;
68struct zbookmark_phys;
69struct spa;
70struct nvlist;
71struct arc_buf;
72struct zio_prop;
73struct sa_handle;
74struct file;
75
76typedef struct objset objset_t;
77typedef struct dmu_tx dmu_tx_t;
78typedef struct dsl_dir dsl_dir_t;
79
80typedef enum dmu_object_byteswap {
81	DMU_BSWAP_UINT8,
82	DMU_BSWAP_UINT16,
83	DMU_BSWAP_UINT32,
84	DMU_BSWAP_UINT64,
85	DMU_BSWAP_ZAP,
86	DMU_BSWAP_DNODE,
87	DMU_BSWAP_OBJSET,
88	DMU_BSWAP_ZNODE,
89	DMU_BSWAP_OLDACL,
90	DMU_BSWAP_ACL,
91	/*
92	 * Allocating a new byteswap type number makes the on-disk format
93	 * incompatible with any other format that uses the same number.
94	 *
95	 * Data can usually be structured to work with one of the
96	 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
97	 */
98	DMU_BSWAP_NUMFUNCS
99} dmu_object_byteswap_t;
100
101#define	DMU_OT_NEWTYPE 0x80
102#define	DMU_OT_METADATA 0x40
103#define	DMU_OT_BYTESWAP_MASK 0x3f
104
105/*
106 * Defines a uint8_t object type. Object types specify if the data
107 * in the object is metadata (boolean) and how to byteswap the data
108 * (dmu_object_byteswap_t).
109 */
110#define	DMU_OT(byteswap, metadata) \
111	(DMU_OT_NEWTYPE | \
112	((metadata) ? DMU_OT_METADATA : 0) | \
113	((byteswap) & DMU_OT_BYTESWAP_MASK))
114
115#define	DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \
116	((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \
117	(ot) < DMU_OT_NUMTYPES)
118
119#define	DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \
120	((ot) & DMU_OT_METADATA) : \
121	dmu_ot[(ot)].ot_metadata)
122
123/*
124 * These object types use bp_fill != 1 for their L0 bp's. Therefore they can't
125 * have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill
126 * is repurposed for embedded BPs.
127 */
128#define	DMU_OT_HAS_FILL(ot) \
129	((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET)
130
131#define	DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \
132	((ot) & DMU_OT_BYTESWAP_MASK) : \
133	dmu_ot[(ot)].ot_byteswap)
134
135typedef enum dmu_object_type {
136	DMU_OT_NONE,
137	/* general: */
138	DMU_OT_OBJECT_DIRECTORY,	/* ZAP */
139	DMU_OT_OBJECT_ARRAY,		/* UINT64 */
140	DMU_OT_PACKED_NVLIST,		/* UINT8 (XDR by nvlist_pack/unpack) */
141	DMU_OT_PACKED_NVLIST_SIZE,	/* UINT64 */
142	DMU_OT_BPOBJ,			/* UINT64 */
143	DMU_OT_BPOBJ_HDR,		/* UINT64 */
144	/* spa: */
145	DMU_OT_SPACE_MAP_HEADER,	/* UINT64 */
146	DMU_OT_SPACE_MAP,		/* UINT64 */
147	/* zil: */
148	DMU_OT_INTENT_LOG,		/* UINT64 */
149	/* dmu: */
150	DMU_OT_DNODE,			/* DNODE */
151	DMU_OT_OBJSET,			/* OBJSET */
152	/* dsl: */
153	DMU_OT_DSL_DIR,			/* UINT64 */
154	DMU_OT_DSL_DIR_CHILD_MAP,	/* ZAP */
155	DMU_OT_DSL_DS_SNAP_MAP,		/* ZAP */
156	DMU_OT_DSL_PROPS,		/* ZAP */
157	DMU_OT_DSL_DATASET,		/* UINT64 */
158	/* zpl: */
159	DMU_OT_ZNODE,			/* ZNODE */
160	DMU_OT_OLDACL,			/* Old ACL */
161	DMU_OT_PLAIN_FILE_CONTENTS,	/* UINT8 */
162	DMU_OT_DIRECTORY_CONTENTS,	/* ZAP */
163	DMU_OT_MASTER_NODE,		/* ZAP */
164	DMU_OT_UNLINKED_SET,		/* ZAP */
165	/* zvol: */
166	DMU_OT_ZVOL,			/* UINT8 */
167	DMU_OT_ZVOL_PROP,		/* ZAP */
168	/* other; for testing only! */
169	DMU_OT_PLAIN_OTHER,		/* UINT8 */
170	DMU_OT_UINT64_OTHER,		/* UINT64 */
171	DMU_OT_ZAP_OTHER,		/* ZAP */
172	/* new object types: */
173	DMU_OT_ERROR_LOG,		/* ZAP */
174	DMU_OT_SPA_HISTORY,		/* UINT8 */
175	DMU_OT_SPA_HISTORY_OFFSETS,	/* spa_his_phys_t */
176	DMU_OT_POOL_PROPS,		/* ZAP */
177	DMU_OT_DSL_PERMS,		/* ZAP */
178	DMU_OT_ACL,			/* ACL */
179	DMU_OT_SYSACL,			/* SYSACL */
180	DMU_OT_FUID,			/* FUID table (Packed NVLIST UINT8) */
181	DMU_OT_FUID_SIZE,		/* FUID table size UINT64 */
182	DMU_OT_NEXT_CLONES,		/* ZAP */
183	DMU_OT_SCAN_QUEUE,		/* ZAP */
184	DMU_OT_USERGROUP_USED,		/* ZAP */
185	DMU_OT_USERGROUP_QUOTA,		/* ZAP */
186	DMU_OT_USERREFS,		/* ZAP */
187	DMU_OT_DDT_ZAP,			/* ZAP */
188	DMU_OT_DDT_STATS,		/* ZAP */
189	DMU_OT_SA,			/* System attr */
190	DMU_OT_SA_MASTER_NODE,		/* ZAP */
191	DMU_OT_SA_ATTR_REGISTRATION,	/* ZAP */
192	DMU_OT_SA_ATTR_LAYOUTS,		/* ZAP */
193	DMU_OT_SCAN_XLATE,		/* ZAP */
194	DMU_OT_DEDUP,			/* fake dedup BP from ddt_bp_create() */
195	DMU_OT_DEADLIST,		/* ZAP */
196	DMU_OT_DEADLIST_HDR,		/* UINT64 */
197	DMU_OT_DSL_CLONES,		/* ZAP */
198	DMU_OT_BPOBJ_SUBOBJ,		/* UINT64 */
199	/*
200	 * Do not allocate new object types here. Doing so makes the on-disk
201	 * format incompatible with any other format that uses the same object
202	 * type number.
203	 *
204	 * When creating an object which does not have one of the above types
205	 * use the DMU_OTN_* type with the correct byteswap and metadata
206	 * values.
207	 *
208	 * The DMU_OTN_* types do not have entries in the dmu_ot table,
209	 * use the DMU_OT_IS_METDATA() and DMU_OT_BYTESWAP() macros instead
210	 * of indexing into dmu_ot directly (this works for both DMU_OT_* types
211	 * and DMU_OTN_* types).
212	 */
213	DMU_OT_NUMTYPES,
214
215	/*
216	 * Names for valid types declared with DMU_OT().
217	 */
218	DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE),
219	DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE),
220	DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE),
221	DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE),
222	DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE),
223	DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE),
224	DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE),
225	DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE),
226	DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE),
227	DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE),
228} dmu_object_type_t;
229
230typedef enum txg_how {
231	TXG_WAIT = 1,
232	TXG_NOWAIT,
233	TXG_WAITED,
234} txg_how_t;
235
236void byteswap_uint64_array(void *buf, size_t size);
237void byteswap_uint32_array(void *buf, size_t size);
238void byteswap_uint16_array(void *buf, size_t size);
239void byteswap_uint8_array(void *buf, size_t size);
240void zap_byteswap(void *buf, size_t size);
241void zfs_oldacl_byteswap(void *buf, size_t size);
242void zfs_acl_byteswap(void *buf, size_t size);
243void zfs_znode_byteswap(void *buf, size_t size);
244
245#define	DS_FIND_SNAPSHOTS	(1<<0)
246#define	DS_FIND_CHILDREN	(1<<1)
247#define	DS_FIND_SERIALIZE	(1<<2)
248
249/*
250 * The maximum number of bytes that can be accessed as part of one
251 * operation, including metadata.
252 */
253#define	DMU_MAX_ACCESS (32 * 1024 * 1024) /* 32MB */
254#define	DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */
255
256#define	DMU_USERUSED_OBJECT	(-1ULL)
257#define	DMU_GROUPUSED_OBJECT	(-2ULL)
258
259/*
260 * artificial blkids for bonus buffer and spill blocks
261 */
262#define	DMU_BONUS_BLKID		(-1ULL)
263#define	DMU_SPILL_BLKID		(-2ULL)
264/*
265 * Public routines to create, destroy, open, and close objsets.
266 */
267int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
268int dmu_objset_own(const char *name, dmu_objset_type_t type,
269    boolean_t readonly, void *tag, objset_t **osp);
270void dmu_objset_rele(objset_t *os, void *tag);
271void dmu_objset_disown(objset_t *os, void *tag);
272int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp);
273
274void dmu_objset_evict_dbufs(objset_t *os);
275int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
276    void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg);
277int dmu_get_recursive_snaps_nvl(char *fsname, const char *snapname,
278    struct nvlist *snaps);
279int dmu_objset_clone(const char *name, const char *origin);
280int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer,
281    struct nvlist *errlist);
282int dmu_objset_snapshot_one(const char *fsname, const char *snapname);
283int dmu_objset_snapshot_tmp(const char *, const char *, int);
284int dmu_objset_find(char *name, int func(const char *, void *), void *arg,
285    int flags);
286void dmu_objset_byteswap(void *buf, size_t size);
287int dsl_dataset_rename_snapshot(const char *fsname,
288    const char *oldsnapname, const char *newsnapname, boolean_t recursive);
289
290typedef struct dmu_buf {
291	uint64_t db_object;		/* object that this buffer is part of */
292	uint64_t db_offset;		/* byte offset in this object */
293	uint64_t db_size;		/* size of buffer in bytes */
294	void *db_data;			/* data in buffer */
295} dmu_buf_t;
296
297/*
298 * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
299 */
300#define	DMU_POOL_DIRECTORY_OBJECT	1
301#define	DMU_POOL_CONFIG			"config"
302#define	DMU_POOL_FEATURES_FOR_WRITE	"features_for_write"
303#define	DMU_POOL_FEATURES_FOR_READ	"features_for_read"
304#define	DMU_POOL_FEATURE_DESCRIPTIONS	"feature_descriptions"
305#define	DMU_POOL_FEATURE_ENABLED_TXG	"feature_enabled_txg"
306#define	DMU_POOL_ROOT_DATASET		"root_dataset"
307#define	DMU_POOL_SYNC_BPOBJ		"sync_bplist"
308#define	DMU_POOL_ERRLOG_SCRUB		"errlog_scrub"
309#define	DMU_POOL_ERRLOG_LAST		"errlog_last"
310#define	DMU_POOL_SPARES			"spares"
311#define	DMU_POOL_DEFLATE		"deflate"
312#define	DMU_POOL_HISTORY		"history"
313#define	DMU_POOL_PROPS			"pool_props"
314#define	DMU_POOL_L2CACHE		"l2cache"
315#define	DMU_POOL_TMP_USERREFS		"tmp_userrefs"
316#define	DMU_POOL_DDT			"DDT-%s-%s-%s"
317#define	DMU_POOL_DDT_STATS		"DDT-statistics"
318#define	DMU_POOL_CREATION_VERSION	"creation_version"
319#define	DMU_POOL_SCAN			"scan"
320#define	DMU_POOL_FREE_BPOBJ		"free_bpobj"
321#define	DMU_POOL_BPTREE_OBJ		"bptree_obj"
322#define	DMU_POOL_EMPTY_BPOBJ		"empty_bpobj"
323
324/*
325 * Allocate an object from this objset.  The range of object numbers
326 * available is (0, DN_MAX_OBJECT).  Object 0 is the meta-dnode.
327 *
328 * The transaction must be assigned to a txg.  The newly allocated
329 * object will be "held" in the transaction (ie. you can modify the
330 * newly allocated object in this transaction).
331 *
332 * dmu_object_alloc() chooses an object and returns it in *objectp.
333 *
334 * dmu_object_claim() allocates a specific object number.  If that
335 * number is already allocated, it fails and returns EEXIST.
336 *
337 * Return 0 on success, or ENOSPC or EEXIST as specified above.
338 */
339uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot,
340    int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
341int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
342    int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
343int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
344    int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp);
345
346/*
347 * Free an object from this objset.
348 *
349 * The object's data will be freed as well (ie. you don't need to call
350 * dmu_free(object, 0, -1, tx)).
351 *
352 * The object need not be held in the transaction.
353 *
354 * If there are any holds on this object's buffers (via dmu_buf_hold()),
355 * or tx holds on the object (via dmu_tx_hold_object()), you can not
356 * free it; it fails and returns EBUSY.
357 *
358 * If the object is not allocated, it fails and returns ENOENT.
359 *
360 * Return 0 on success, or EBUSY or ENOENT as specified above.
361 */
362int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx);
363
364/*
365 * Find the next allocated or free object.
366 *
367 * The objectp parameter is in-out.  It will be updated to be the next
368 * object which is allocated.  Ignore objects which have not been
369 * modified since txg.
370 *
371 * XXX Can only be called on a objset with no dirty data.
372 *
373 * Returns 0 on success, or ENOENT if there are no more objects.
374 */
375int dmu_object_next(objset_t *os, uint64_t *objectp,
376    boolean_t hole, uint64_t txg);
377
378/*
379 * Set the data blocksize for an object.
380 *
381 * The object cannot have any blocks allcated beyond the first.  If
382 * the first block is allocated already, the new size must be greater
383 * than the current block size.  If these conditions are not met,
384 * ENOTSUP will be returned.
385 *
386 * Returns 0 on success, or EBUSY if there are any holds on the object
387 * contents, or ENOTSUP as described above.
388 */
389int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size,
390    int ibs, dmu_tx_t *tx);
391
392/*
393 * Set the checksum property on a dnode.  The new checksum algorithm will
394 * apply to all newly written blocks; existing blocks will not be affected.
395 */
396void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
397    dmu_tx_t *tx);
398
399/*
400 * Set the compress property on a dnode.  The new compression algorithm will
401 * apply to all newly written blocks; existing blocks will not be affected.
402 */
403void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
404    dmu_tx_t *tx);
405
406void
407dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
408    void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
409    int compressed_size, int byteorder, dmu_tx_t *tx);
410
411/*
412 * Decide how to write a block: checksum, compression, number of copies, etc.
413 */
414#define	WP_NOFILL	0x1
415#define	WP_DMU_SYNC	0x2
416#define	WP_SPILL	0x4
417
418void dmu_write_policy(objset_t *os, struct dnode *dn, int level, int wp,
419    struct zio_prop *zp);
420/*
421 * The bonus data is accessed more or less like a regular buffer.
422 * You must dmu_bonus_hold() to get the buffer, which will give you a
423 * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus
424 * data.  As with any normal buffer, you must call dmu_buf_read() to
425 * read db_data, dmu_buf_will_dirty() before modifying it, and the
426 * object must be held in an assigned transaction before calling
427 * dmu_buf_will_dirty.  You may use dmu_buf_set_user() on the bonus
428 * buffer as well.  You must release your hold with dmu_buf_rele().
429 *
430 * Returns ENOENT, EIO, or 0.
431 */
432int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **);
433int dmu_bonus_max(void);
434int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *);
435int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *);
436dmu_object_type_t dmu_get_bonustype(dmu_buf_t *);
437int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *);
438
439/*
440 * Special spill buffer support used by "SA" framework
441 */
442
443int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp);
444int dmu_spill_hold_by_dnode(struct dnode *dn, uint32_t flags,
445    void *tag, dmu_buf_t **dbp);
446int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp);
447
448/*
449 * Obtain the DMU buffer from the specified object which contains the
450 * specified offset.  dmu_buf_hold() puts a "hold" on the buffer, so
451 * that it will remain in memory.  You must release the hold with
452 * dmu_buf_rele().  You musn't access the dmu_buf_t after releasing your
453 * hold.  You must have a hold on any dmu_buf_t* you pass to the DMU.
454 *
455 * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill
456 * on the returned buffer before reading or writing the buffer's
457 * db_data.  The comments for those routines describe what particular
458 * operations are valid after calling them.
459 *
460 * The object number must be a valid, allocated object number.
461 */
462int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
463    void *tag, dmu_buf_t **, int flags);
464
465/*
466 * Add a reference to a dmu buffer that has already been held via
467 * dmu_buf_hold() in the current context.
468 */
469void dmu_buf_add_ref(dmu_buf_t *db, void* tag);
470
471/*
472 * Attempt to add a reference to a dmu buffer that is in an unknown state,
473 * using a pointer that may have been invalidated by eviction processing.
474 * The request will succeed if the passed in dbuf still represents the
475 * same os/object/blkid, is ineligible for eviction, and has at least
476 * one hold by a user other than the syncer.
477 */
478boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object,
479    uint64_t blkid, void *tag);
480
481void dmu_buf_rele(dmu_buf_t *db, void *tag);
482uint64_t dmu_buf_refcount(dmu_buf_t *db);
483
484/*
485 * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a
486 * range of an object.  A pointer to an array of dmu_buf_t*'s is
487 * returned (in *dbpp).
488 *
489 * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and
490 * frees the array.  The hold on the array of buffers MUST be released
491 * with dmu_buf_rele_array.  You can NOT release the hold on each buffer
492 * individually with dmu_buf_rele.
493 */
494int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
495    uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp);
496void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag);
497
498typedef void dmu_buf_evict_func_t(void *user_ptr);
499
500/*
501 * A DMU buffer user object may be associated with a dbuf for the
502 * duration of its lifetime.  This allows the user of a dbuf (client)
503 * to attach private data to a dbuf (e.g. in-core only data such as a
504 * dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified
505 * when that dbuf has been evicted.  Clients typically respond to the
506 * eviction notification by freeing their private data, thus ensuring
507 * the same lifetime for both dbuf and private data.
508 *
509 * The mapping from a dmu_buf_user_t to any client private data is the
510 * client's responsibility.  All current consumers of the API with private
511 * data embed a dmu_buf_user_t as the first member of the structure for
512 * their private data.  This allows conversions between the two types
513 * with a simple cast.  Since the DMU buf user API never needs access
514 * to the private data, other strategies can be employed if necessary
515 * or convenient for the client (e.g. using container_of() to do the
516 * conversion for private data that cannot have the dmu_buf_user_t as
517 * its first member).
518 *
519 * Eviction callbacks are executed without the dbuf mutex held or any
520 * other type of mechanism to guarantee that the dbuf is still available.
521 * For this reason, users must assume the dbuf has already been freed
522 * and not reference the dbuf from the callback context.
523 *
524 * Users requesting "immediate eviction" are notified as soon as the dbuf
525 * is only referenced by dirty records (dirties == holds).  Otherwise the
526 * notification occurs after eviction processing for the dbuf begins.
527 */
528typedef struct dmu_buf_user {
529	/*
530	 * Asynchronous user eviction callback state.
531	 */
532	taskq_ent_t	dbu_tqent;
533
534	/* This instance's eviction function pointer. */
535	dmu_buf_evict_func_t *dbu_evict_func;
536#ifdef ZFS_DEBUG
537	/*
538	 * Pointer to user's dbuf pointer.  NULL for clients that do
539	 * not associate a dbuf with their user data.
540	 *
541	 * The dbuf pointer is cleared upon eviction so as to catch
542	 * use-after-evict bugs in clients.
543	 */
544	dmu_buf_t **dbu_clear_on_evict_dbufp;
545#endif
546} dmu_buf_user_t;
547
548/*
549 * Initialize the given dmu_buf_user_t instance with the eviction function
550 * evict_func, to be called when the user is evicted.
551 *
552 * NOTE: This function should only be called once on a given dmu_buf_user_t.
553 *       To allow enforcement of this, dbu must already be zeroed on entry.
554 */
555#ifdef __lint
556/* Very ugly, but it beats issuing suppression directives in many Makefiles. */
557extern void
558dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func,
559    dmu_buf_t **clear_on_evict_dbufp);
560#else /* __lint */
561inline void
562dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func,
563    dmu_buf_t **clear_on_evict_dbufp)
564{
565	ASSERT(dbu->dbu_evict_func == NULL);
566	ASSERT(evict_func != NULL);
567	dbu->dbu_evict_func = evict_func;
568#ifdef ZFS_DEBUG
569	dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp;
570#endif
571}
572#endif /* __lint */
573
574/*
575 * Attach user data to a dbuf and mark it for normal (when the dbuf's
576 * data is cleared or its reference count goes to zero) eviction processing.
577 *
578 * Returns NULL on success, or the existing user if another user currently
579 * owns the buffer.
580 */
581void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user);
582
583/*
584 * Attach user data to a dbuf and mark it for immediate (its dirty and
585 * reference counts are equal) eviction processing.
586 *
587 * Returns NULL on success, or the existing user if another user currently
588 * owns the buffer.
589 */
590void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user);
591
592/*
593 * Replace the current user of a dbuf.
594 *
595 * If given the current user of a dbuf, replaces the dbuf's user with
596 * "new_user" and returns the user data pointer that was replaced.
597 * Otherwise returns the current, and unmodified, dbuf user pointer.
598 */
599void *dmu_buf_replace_user(dmu_buf_t *db,
600    dmu_buf_user_t *old_user, dmu_buf_user_t *new_user);
601
602/*
603 * Remove the specified user data for a DMU buffer.
604 *
605 * Returns the user that was removed on success, or the current user if
606 * another user currently owns the buffer.
607 */
608void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user);
609
610/*
611 * Returns the user data (dmu_buf_user_t *) associated with this dbuf.
612 */
613void *dmu_buf_get_user(dmu_buf_t *db);
614
615/* Block until any in-progress dmu buf user evictions complete. */
616void dmu_buf_user_evict_wait(void);
617
618/*
619 * Returns the blkptr associated with this dbuf, or NULL if not set.
620 */
621struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db);
622
623/*
624 * Indicate that you are going to modify the buffer's data (db_data).
625 *
626 * The transaction (tx) must be assigned to a txg (ie. you've called
627 * dmu_tx_assign()).  The buffer's object must be held in the tx
628 * (ie. you've called dmu_tx_hold_object(tx, db->db_object)).
629 */
630void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx);
631
632/*
633 * Tells if the given dbuf is freeable.
634 */
635boolean_t dmu_buf_freeable(dmu_buf_t *);
636
637/*
638 * You must create a transaction, then hold the objects which you will
639 * (or might) modify as part of this transaction.  Then you must assign
640 * the transaction to a transaction group.  Once the transaction has
641 * been assigned, you can modify buffers which belong to held objects as
642 * part of this transaction.  You can't modify buffers before the
643 * transaction has been assigned; you can't modify buffers which don't
644 * belong to objects which this transaction holds; you can't hold
645 * objects once the transaction has been assigned.  You may hold an
646 * object which you are going to free (with dmu_object_free()), but you
647 * don't have to.
648 *
649 * You can abort the transaction before it has been assigned.
650 *
651 * Note that you may hold buffers (with dmu_buf_hold) at any time,
652 * regardless of transaction state.
653 */
654
655#define	DMU_NEW_OBJECT	(-1ULL)
656#define	DMU_OBJECT_END	(-1ULL)
657
658dmu_tx_t *dmu_tx_create(objset_t *os);
659void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len);
660void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off,
661    uint64_t len);
662void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name);
663void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object);
664void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
665void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
666void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
667void dmu_tx_abort(dmu_tx_t *tx);
668int dmu_tx_assign(dmu_tx_t *tx, enum txg_how txg_how);
669void dmu_tx_wait(dmu_tx_t *tx);
670void dmu_tx_commit(dmu_tx_t *tx);
671void dmu_tx_mark_netfree(dmu_tx_t *tx);
672
673/*
674 * To register a commit callback, dmu_tx_callback_register() must be called.
675 *
676 * dcb_data is a pointer to caller private data that is passed on as a
677 * callback parameter. The caller is responsible for properly allocating and
678 * freeing it.
679 *
680 * When registering a callback, the transaction must be already created, but
681 * it cannot be committed or aborted. It can be assigned to a txg or not.
682 *
683 * The callback will be called after the transaction has been safely written
684 * to stable storage and will also be called if the dmu_tx is aborted.
685 * If there is any error which prevents the transaction from being committed to
686 * disk, the callback will be called with a value of error != 0.
687 */
688typedef void dmu_tx_callback_func_t(void *dcb_data, int error);
689
690void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func,
691    void *dcb_data);
692
693/*
694 * Free up the data blocks for a defined range of a file.  If size is
695 * -1, the range from offset to end-of-file is freed.
696 */
697int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
698	uint64_t size, dmu_tx_t *tx);
699int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset,
700	uint64_t size);
701int dmu_free_long_object(objset_t *os, uint64_t object);
702
703/*
704 * Convenience functions.
705 *
706 * Canfail routines will return 0 on success, or an errno if there is a
707 * nonrecoverable I/O error.
708 */
709#define	DMU_READ_PREFETCH	0 /* prefetch */
710#define	DMU_READ_NO_PREFETCH	1 /* don't prefetch */
711int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
712	void *buf, uint32_t flags);
713void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
714	const void *buf, dmu_tx_t *tx);
715void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
716	dmu_tx_t *tx);
717int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size);
718int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size);
719int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size,
720    dmu_tx_t *tx);
721int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size,
722    dmu_tx_t *tx);
723#ifdef _KERNEL
724#ifdef sun
725int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
726    uint64_t size, struct page *pp, dmu_tx_t *tx);
727#else
728int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
729    uint64_t size, struct vm_page **ppa, dmu_tx_t *tx);
730#endif
731#endif
732struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
733void dmu_return_arcbuf(struct arc_buf *buf);
734void dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf,
735    dmu_tx_t *tx);
736int dmu_xuio_init(struct xuio *uio, int niov);
737void dmu_xuio_fini(struct xuio *uio);
738int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off,
739    size_t n);
740int dmu_xuio_cnt(struct xuio *uio);
741struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i);
742void dmu_xuio_clear(struct xuio *uio, int i);
743void xuio_stat_wbuf_copied();
744void xuio_stat_wbuf_nocopy();
745
746extern int zfs_prefetch_disable;
747extern int zfs_max_recordsize;
748
749/*
750 * Asynchronously try to read in the data.
751 */
752void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
753    uint64_t len, enum zio_priority pri);
754
755typedef struct dmu_object_info {
756	/* All sizes are in bytes unless otherwise indicated. */
757	uint32_t doi_data_block_size;
758	uint32_t doi_metadata_block_size;
759	dmu_object_type_t doi_type;
760	dmu_object_type_t doi_bonus_type;
761	uint64_t doi_bonus_size;
762	uint8_t doi_indirection;		/* 2 = dnode->indirect->data */
763	uint8_t doi_checksum;
764	uint8_t doi_compress;
765	uint8_t doi_nblkptr;
766	uint8_t doi_pad[4];
767	uint64_t doi_physical_blocks_512;	/* data + metadata, 512b blks */
768	uint64_t doi_max_offset;
769	uint64_t doi_fill_count;		/* number of non-empty blocks */
770} dmu_object_info_t;
771
772typedef void arc_byteswap_func_t(void *buf, size_t size);
773
774typedef struct dmu_object_type_info {
775	dmu_object_byteswap_t	ot_byteswap;
776	boolean_t		ot_metadata;
777	char			*ot_name;
778} dmu_object_type_info_t;
779
780typedef struct dmu_object_byteswap_info {
781	arc_byteswap_func_t	*ob_func;
782	char			*ob_name;
783} dmu_object_byteswap_info_t;
784
785extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES];
786extern const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS];
787
788/*
789 * Get information on a DMU object.
790 *
791 * Return 0 on success or ENOENT if object is not allocated.
792 *
793 * If doi is NULL, just indicates whether the object exists.
794 */
795int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi);
796/* Like dmu_object_info, but faster if you have a held dnode in hand. */
797void dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi);
798/* Like dmu_object_info, but faster if you have a held dbuf in hand. */
799void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi);
800/*
801 * Like dmu_object_info_from_db, but faster still when you only care about
802 * the size.  This is specifically optimized for zfs_getattr().
803 */
804void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize,
805    u_longlong_t *nblk512);
806
807typedef struct dmu_objset_stats {
808	uint64_t dds_num_clones; /* number of clones of this */
809	uint64_t dds_creation_txg;
810	uint64_t dds_guid;
811	dmu_objset_type_t dds_type;
812	uint8_t dds_is_snapshot;
813	uint8_t dds_inconsistent;
814	char dds_origin[MAXNAMELEN];
815} dmu_objset_stats_t;
816
817/*
818 * Get stats on a dataset.
819 */
820void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
821
822/*
823 * Add entries to the nvlist for all the objset's properties.  See
824 * zfs_prop_table[] and zfs(1m) for details on the properties.
825 */
826void dmu_objset_stats(objset_t *os, struct nvlist *nv);
827
828/*
829 * Get the space usage statistics for statvfs().
830 *
831 * refdbytes is the amount of space "referenced" by this objset.
832 * availbytes is the amount of space available to this objset, taking
833 * into account quotas & reservations, assuming that no other objsets
834 * use the space first.  These values correspond to the 'referenced' and
835 * 'available' properties, described in the zfs(1m) manpage.
836 *
837 * usedobjs and availobjs are the number of objects currently allocated,
838 * and available.
839 */
840void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
841    uint64_t *usedobjsp, uint64_t *availobjsp);
842
843/*
844 * The fsid_guid is a 56-bit ID that can change to avoid collisions.
845 * (Contrast with the ds_guid which is a 64-bit ID that will never
846 * change, so there is a small probability that it will collide.)
847 */
848uint64_t dmu_objset_fsid_guid(objset_t *os);
849
850/*
851 * Get the [cm]time for an objset's snapshot dir
852 */
853timestruc_t dmu_objset_snap_cmtime(objset_t *os);
854
855int dmu_objset_is_snapshot(objset_t *os);
856
857extern struct spa *dmu_objset_spa(objset_t *os);
858extern struct zilog *dmu_objset_zil(objset_t *os);
859extern struct dsl_pool *dmu_objset_pool(objset_t *os);
860extern struct dsl_dataset *dmu_objset_ds(objset_t *os);
861extern void dmu_objset_name(objset_t *os, char *buf);
862extern dmu_objset_type_t dmu_objset_type(objset_t *os);
863extern uint64_t dmu_objset_id(objset_t *os);
864extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os);
865extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os);
866extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
867    uint64_t *id, uint64_t *offp, boolean_t *case_conflict);
868extern int dmu_snapshot_realname(objset_t *os, char *name, char *real,
869    int maxlen, boolean_t *conflict);
870extern int dmu_dir_list_next(objset_t *os, int namelen, char *name,
871    uint64_t *idp, uint64_t *offp);
872
873typedef int objset_used_cb_t(dmu_object_type_t bonustype,
874    void *bonus, uint64_t *userp, uint64_t *groupp);
875extern void dmu_objset_register_type(dmu_objset_type_t ost,
876    objset_used_cb_t *cb);
877extern void dmu_objset_set_user(objset_t *os, void *user_ptr);
878extern void *dmu_objset_get_user(objset_t *os);
879
880/*
881 * Return the txg number for the given assigned transaction.
882 */
883uint64_t dmu_tx_get_txg(dmu_tx_t *tx);
884
885/*
886 * Synchronous write.
887 * If a parent zio is provided this function initiates a write on the
888 * provided buffer as a child of the parent zio.
889 * In the absence of a parent zio, the write is completed synchronously.
890 * At write completion, blk is filled with the bp of the written block.
891 * Note that while the data covered by this function will be on stable
892 * storage when the write completes this new data does not become a
893 * permanent part of the file until the associated transaction commits.
894 */
895
896/*
897 * {zfs,zvol,ztest}_get_done() args
898 */
899typedef struct zgd {
900	struct zilog	*zgd_zilog;
901	struct blkptr	*zgd_bp;
902	dmu_buf_t	*zgd_db;
903	struct rl	*zgd_rl;
904	void		*zgd_private;
905} zgd_t;
906
907typedef void dmu_sync_cb_t(zgd_t *arg, int error);
908int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd);
909
910/*
911 * Find the next hole or data block in file starting at *off
912 * Return found offset in *off. Return ESRCH for end of file.
913 */
914int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole,
915    uint64_t *off);
916
917/*
918 * Initial setup and final teardown.
919 */
920extern void dmu_init(void);
921extern void dmu_fini(void);
922
923typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp,
924    uint64_t object, uint64_t offset, int len);
925void dmu_traverse_objset(objset_t *os, uint64_t txg_start,
926    dmu_traverse_cb_t cb, void *arg);
927int dmu_diff(const char *tosnap_name, const char *fromsnap_name,
928    struct file *fp, offset_t *offp);
929
930/* CRC64 table */
931#define	ZFS_CRC64_POLY	0xC96C5795D7870F42ULL	/* ECMA-182, reflected form */
932extern uint64_t zfs_crc64_table[256];
933
934extern int zfs_mdcomp_disable;
935
936#ifdef	__cplusplus
937}
938#endif
939
940#endif	/* _SYS_DMU_H */
941