block.h revision 287802
1/*
2 * XenBSD block device driver
3 *
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
11 *
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * $FreeBSD: stable/10/sys/dev/xen/blkfront/block.h 287802 2015-09-14 19:37:51Z cperciva $
31 */
32
33#ifndef __XEN_BLKFRONT_BLOCK_H__
34#define __XEN_BLKFRONT_BLOCK_H__
35#include <xen/blkif.h>
36
37/**
38 * Given a number of blkif segments, compute the maximum I/O size supported.
39 *
40 * \note This calculation assumes that all but the first and last segments
41 *       of the I/O are fully utilized.
42 *
43 * \note We reserve a segement from the maximum supported by the transport to
44 *       guarantee we can handle an unaligned transfer without the need to
45 *       use a bounce buffer.
46 */
47#define	XBD_SEGS_TO_SIZE(segs)						\
48	(((segs) - 1) * PAGE_SIZE)
49
50/**
51 * Compute the maximum number of blkif segments requried to represent
52 * an I/O of the given size.
53 *
54 * \note This calculation assumes that all but the first and last segments
55 *       of the I/O are fully utilized.
56 *
57 * \note We reserve a segement to guarantee we can handle an unaligned
58 *       transfer without the need to use a bounce buffer.
59 */
60#define	XBD_SIZE_TO_SEGS(size)						\
61	((size / PAGE_SIZE) + 1)
62
63/**
64 * The maximum number of shared memory ring pages we will allow in a
65 * negotiated block-front/back communication channel.  Allow enough
66 * ring space for all requests to be  XBD_MAX_REQUEST_SIZE'd.
67 */
68#define XBD_MAX_RING_PAGES		32
69
70/**
71 * The maximum number of outstanding requests we will allow in a negotiated
72 * block-front/back communication channel.
73 */
74#define XBD_MAX_REQUESTS						\
75	__CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
76
77/**
78 * The maximum number of blkif segments which can be provided per indirect
79 * page in an indirect request.
80 */
81#define XBD_MAX_SEGMENTS_PER_PAGE					\
82	(PAGE_SIZE / sizeof(struct blkif_request_segment))
83
84/**
85 * The maximum number of blkif segments which can be provided in an indirect
86 * request.
87 */
88#define XBD_MAX_INDIRECT_SEGMENTS					\
89	(BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE)
90
91/**
92 * Compute the number of indirect segment pages required for an I/O with the
93 * specified number of indirect segments.
94 */
95#define XBD_INDIRECT_SEGS_TO_PAGES(segs)				\
96	((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE)
97
98typedef enum {
99	XBDCF_Q_MASK		= 0xFF,
100	/* This command has contributed to xbd_qfrozen_cnt. */
101	XBDCF_FROZEN		= 1<<8,
102	/* Freeze the command queue on dispatch (i.e. single step command). */
103	XBDCF_Q_FREEZE		= 1<<9,
104	/* Bus DMA returned EINPROGRESS for this command. */
105	XBDCF_ASYNC_MAPPING	= 1<<10,
106	XBDCF_INITIALIZER	= XBDCF_Q_MASK
107} xbdc_flag_t;
108
109struct xbd_command;
110typedef void xbd_cbcf_t(struct xbd_command *);
111
112struct xbd_command {
113	TAILQ_ENTRY(xbd_command) cm_link;
114	struct xbd_softc	*cm_sc;
115	xbdc_flag_t		 cm_flags;
116	bus_dmamap_t		 cm_map;
117	uint64_t		 cm_id;
118	grant_ref_t		*cm_sg_refs;
119	struct bio		*cm_bp;
120	grant_ref_t		 cm_gref_head;
121	void			*cm_data;
122	size_t			 cm_datalen;
123	u_int			 cm_nseg;
124	int			 cm_operation;
125	blkif_sector_t		 cm_sector_number;
126	int			 cm_status;
127	xbd_cbcf_t		*cm_complete;
128	void			*cm_indirectionpages;
129	grant_ref_t		 cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
130};
131
132typedef enum {
133	XBD_Q_FREE,
134	XBD_Q_READY,
135	XBD_Q_BUSY,
136	XBD_Q_COMPLETE,
137	XBD_Q_BIO,
138	XBD_Q_COUNT,
139	XBD_Q_NONE = XBDCF_Q_MASK
140} xbd_q_index_t;
141
142typedef struct xbd_cm_q {
143	TAILQ_HEAD(, xbd_command) q_tailq;
144	uint32_t		  q_length;
145	uint32_t		  q_max;
146} xbd_cm_q_t;
147
148typedef enum {
149	XBD_STATE_DISCONNECTED,
150	XBD_STATE_CONNECTED,
151	XBD_STATE_SUSPENDED
152} xbd_state_t;
153
154typedef enum {
155	XBDF_NONE	  = 0,
156	XBDF_OPEN	  = 1 << 0, /* drive is open (can't shut down) */
157	XBDF_BARRIER	  = 1 << 1, /* backend supports barriers */
158	XBDF_FLUSH	  = 1 << 2, /* backend supports flush */
159	XBDF_READY	  = 1 << 3, /* Is ready */
160	XBDF_CM_SHORTAGE  = 1 << 4, /* Free cm resource shortage active. */
161	XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */
162	XBDF_WAIT_IDLE	  = 1 << 6  /*
163				     * No new work until oustanding work
164				     * completes.
165				     */
166} xbd_flag_t;
167
168/*
169 * We have one of these per vbd, whether ide, scsi or 'other'.
170 */
171struct xbd_softc {
172	device_t			 xbd_dev;
173	struct disk			*xbd_disk;	/* disk params */
174	struct bio_queue_head 		 xbd_bioq;	/* sort queue */
175	int				 xbd_unit;
176	xbd_flag_t			 xbd_flags;
177	int				 xbd_qfrozen_cnt;
178	int				 xbd_vdevice;
179	xbd_state_t			 xbd_state;
180	u_int				 xbd_ring_pages;
181	uint32_t			 xbd_max_requests;
182	uint32_t			 xbd_max_request_segments;
183	uint32_t			 xbd_max_request_size;
184	uint32_t			 xbd_max_request_indirectpages;
185	grant_ref_t			 xbd_ring_ref[XBD_MAX_RING_PAGES];
186	blkif_front_ring_t		 xbd_ring;
187	xen_intr_handle_t		 xen_intr_handle;
188	struct gnttab_free_callback	 xbd_callback;
189	xbd_cm_q_t			 xbd_cm_q[XBD_Q_COUNT];
190	bus_dma_tag_t			 xbd_io_dmat;
191
192	/**
193	 * The number of people holding this device open.  We won't allow a
194	 * hot-unplug unless this is 0.
195	 */
196	int				 xbd_users;
197	struct mtx			 xbd_io_lock;
198
199	struct xbd_command		*xbd_shadow;
200};
201
202int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
203			uint16_t vdisk_info, unsigned long sector_size);
204
205static inline void
206xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
207{
208	struct xbd_cm_q *cmq;
209
210	cmq = &sc->xbd_cm_q[index];
211	cmq->q_length++;
212	if (cmq->q_length > cmq->q_max)
213		cmq->q_max = cmq->q_length;
214}
215
216static inline void
217xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
218{
219	sc->xbd_cm_q[index].q_length--;
220}
221
222static inline uint32_t
223xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
224{
225	return (sc->xbd_cm_q[index].q_length);
226}
227
228static inline void
229xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
230{
231	struct xbd_cm_q *cmq;
232
233	cmq = &sc->xbd_cm_q[index];
234	TAILQ_INIT(&cmq->q_tailq);
235	cmq->q_length = 0;
236	cmq->q_max = 0;
237}
238
239static inline void
240xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
241{
242	KASSERT(index != XBD_Q_BIO,
243	    ("%s: Commands cannot access the bio queue.", __func__));
244	if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
245		panic("%s: command %p is already on queue %d.",
246		    __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
247	TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
248	cm->cm_flags &= ~XBDCF_Q_MASK;
249	cm->cm_flags |= index;
250	xbd_added_qentry(cm->cm_sc, index);
251}
252
253static inline void
254xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
255{
256	KASSERT(index != XBD_Q_BIO,
257	    ("%s: Commands cannot access the bio queue.", __func__));
258	if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
259		panic("%s: command %p is already on queue %d.",
260		    __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
261	TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
262	cm->cm_flags &= ~XBDCF_Q_MASK;
263	cm->cm_flags |= index;
264	xbd_added_qentry(cm->cm_sc, index);
265}
266
267static inline struct xbd_command *
268xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
269{
270	struct xbd_command *cm;
271
272	KASSERT(index != XBD_Q_BIO,
273	    ("%s: Commands cannot access the bio queue.", __func__));
274
275	if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
276		if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
277			panic("%s: command %p is on queue %d, "
278			    "not specified queue %d",
279			    __func__, cm,
280			    cm->cm_flags & XBDCF_Q_MASK,
281			    index);
282		}
283		TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
284		cm->cm_flags &= ~XBDCF_Q_MASK;
285		cm->cm_flags |= XBD_Q_NONE;
286		xbd_removed_qentry(cm->cm_sc, index);
287	}
288	return (cm);
289}
290
291static inline void
292xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
293{
294	xbd_q_index_t index;
295
296	index = cm->cm_flags & XBDCF_Q_MASK;
297
298	KASSERT(index != XBD_Q_BIO,
299	    ("%s: Commands cannot access the bio queue.", __func__));
300
301	if (index != expected_index) {
302		panic("%s: command %p is on queue %d, not specified queue %d",
303		    __func__, cm, index, expected_index);
304	}
305	TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
306	cm->cm_flags &= ~XBDCF_Q_MASK;
307	cm->cm_flags |= XBD_Q_NONE;
308	xbd_removed_qentry(cm->cm_sc, index);
309}
310
311static inline void
312xbd_initq_bio(struct xbd_softc *sc)
313{
314	bioq_init(&sc->xbd_bioq);
315}
316
317static inline void
318xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
319{
320	bioq_insert_tail(&sc->xbd_bioq, bp);
321	xbd_added_qentry(sc, XBD_Q_BIO);
322}
323
324static inline void
325xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
326{
327	bioq_insert_head(&sc->xbd_bioq, bp);
328	xbd_added_qentry(sc, XBD_Q_BIO);
329}
330
331static inline struct bio *
332xbd_dequeue_bio(struct xbd_softc *sc)
333{
334	struct bio *bp;
335
336	if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
337		bioq_remove(&sc->xbd_bioq, bp);
338		xbd_removed_qentry(sc, XBD_Q_BIO);
339	}
340	return (bp);
341}
342
343static inline void
344xbd_initqs(struct xbd_softc *sc)
345{
346	u_int index;
347
348	for (index = 0; index < XBD_Q_COUNT; index++)
349		xbd_initq_cm(sc, index);
350
351	xbd_initq_bio(sc);
352}
353
354#endif /* __XEN_BLKFRONT_BLOCK_H__ */
355