1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2012 Spectra Logic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 *    substantially similar to the "NO WARRANTY" disclaimer below
15 *    ("Disclaimer") and any redistribution must be conditioned upon
16 *    including a substantially similar Disclaimer requirement for further
17 *    binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGES.
31 *
32 * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
33 *          Ken Merry           (Spectra Logic Corporation)
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/**
39 * \file blkback.c
40 *
41 * \brief Device driver supporting the vending of block storage from
42 *        a FreeBSD domain to other domains.
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/malloc.h>
49
50#include <sys/bio.h>
51#include <sys/bus.h>
52#include <sys/conf.h>
53#include <sys/devicestat.h>
54#include <sys/disk.h>
55#include <sys/fcntl.h>
56#include <sys/filedesc.h>
57#include <sys/kdb.h>
58#include <sys/module.h>
59#include <sys/namei.h>
60#include <sys/proc.h>
61#include <sys/rman.h>
62#include <sys/taskqueue.h>
63#include <sys/types.h>
64#include <sys/vnode.h>
65#include <sys/mount.h>
66#include <sys/sysctl.h>
67#include <sys/bitstring.h>
68#include <sys/sdt.h>
69
70#include <geom/geom.h>
71
72#include <machine/_inttypes.h>
73
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76#include <vm/vm_kern.h>
77
78#include <xen/xen-os.h>
79#include <xen/blkif.h>
80#include <xen/gnttab.h>
81#include <xen/xen_intr.h>
82
83#include <xen/interface/event_channel.h>
84#include <xen/interface/grant_table.h>
85
86#include <xen/xenbus/xenbusvar.h>
87
88/*--------------------------- Compile-time Tunables --------------------------*/
89/**
90 * The maximum number of shared memory ring pages we will allow in a
91 * negotiated block-front/back communication channel.  Allow enough
92 * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd.
93 */
94#define	XBB_MAX_RING_PAGES		32
95
96/**
97 * The maximum number of outstanding request blocks (request headers plus
98 * additional segment blocks) we will allow in a negotiated block-front/back
99 * communication channel.
100 */
101#define	XBB_MAX_REQUESTS 					\
102	__CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES)
103
104/**
105 * \brief Define to force all I/O to be performed on memory owned by the
106 *        backend device, with a copy-in/out to the remote domain's memory.
107 *
108 * \note  This option is currently required when this driver's domain is
109 *        operating in HVM mode on a system using an IOMMU.
110 *
111 * This driver uses Xen's grant table API to gain access to the memory of
112 * the remote domains it serves.  When our domain is operating in PV mode,
113 * the grant table mechanism directly updates our domain's page table entries
114 * to point to the physical pages of the remote domain.  This scheme guarantees
115 * that blkback and the backing devices it uses can safely perform DMA
116 * operations to satisfy requests.  In HVM mode, Xen may use a HW IOMMU to
117 * insure that our domain cannot DMA to pages owned by another domain.  As
118 * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant
119 * table API.  For this reason, in HVM mode, we must bounce all requests into
120 * memory that is mapped into our domain at domain startup and thus has
121 * valid IOMMU mappings.
122 */
123#define XBB_USE_BOUNCE_BUFFERS
124
125/**
126 * \brief Define to enable rudimentary request logging to the console.
127 */
128#undef XBB_DEBUG
129
130/*---------------------------------- Macros ----------------------------------*/
131/**
132 * Custom malloc type for all driver allocations.
133 */
134static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
135
136#ifdef XBB_DEBUG
137#define DPRINTF(fmt, args...)					\
138    printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
139#else
140#define DPRINTF(fmt, args...) do {} while(0)
141#endif
142
143/**
144 * The maximum mapped region size per request we will allow in a negotiated
145 * block-front/back communication channel.
146 * Use old default of MAXPHYS == 128K.
147 */
148#define	XBB_MAX_REQUEST_SIZE					\
149	MIN(128 * 1024, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE)
150
151/**
152 * The maximum number of segments (within a request header and accompanying
153 * segment blocks) per request we will allow in a negotiated block-front/back
154 * communication channel.
155 */
156#define	XBB_MAX_SEGMENTS_PER_REQUEST				\
157	(MIN(UIO_MAXIOV,					\
158	     MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,		\
159		 (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1)))
160
161/**
162 * The maximum number of ring pages that we can allow per request list.
163 * We limit this to the maximum number of segments per request, because
164 * that is already a reasonable number of segments to aggregate.  This
165 * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST,
166 * because that would leave situations where we can't dispatch even one
167 * large request.
168 */
169#define	XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST
170
171/*--------------------------- Forward Declarations ---------------------------*/
172struct xbb_softc;
173struct xbb_xen_req;
174
175static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
176			      ...) __attribute__((format(printf, 3, 4)));
177static int  xbb_shutdown(struct xbb_softc *xbb);
178
179/*------------------------------ Data Structures -----------------------------*/
180
181STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req);
182
183typedef enum {
184	XBB_REQLIST_NONE	= 0x00,
185	XBB_REQLIST_MAPPED	= 0x01
186} xbb_reqlist_flags;
187
188struct xbb_xen_reqlist {
189	/**
190	 * Back reference to the parent block back instance for this
191	 * request.  Used during bio_done handling.
192	 */
193	struct xbb_softc        *xbb;
194
195	/**
196	 * BLKIF_OP code for this request.
197	 */
198	int			 operation;
199
200	/**
201	 * Set to BLKIF_RSP_* to indicate request status.
202	 *
203	 * This field allows an error status to be recorded even if the
204	 * delivery of this status must be deferred.  Deferred reporting
205	 * is necessary, for example, when an error is detected during
206	 * completion processing of one bio when other bios for this
207	 * request are still outstanding.
208	 */
209	int			 status;
210
211	/**
212	 * Number of 512 byte sectors not transferred.
213	 */
214	int			 residual_512b_sectors;
215
216	/**
217	 * Starting sector number of the first request in the list.
218	 */
219	off_t			 starting_sector_number;
220
221	/**
222	 * If we're going to coalesce, the next contiguous sector would be
223	 * this one.
224	 */
225	off_t			 next_contig_sector;
226
227	/**
228	 * Number of child requests in the list.
229	 */
230	int			 num_children;
231
232	/**
233	 * Number of I/O requests still pending on the backend.
234	 */
235	int			 pendcnt;
236
237	/**
238	 * Total number of segments for requests in the list.
239	 */
240	int			 nr_segments;
241
242	/**
243	 * Flags for this particular request list.
244	 */
245	xbb_reqlist_flags	 flags;
246
247	/**
248	 * Kernel virtual address space reserved for this request
249	 * list structure and used to map the remote domain's pages for
250	 * this I/O, into our domain's address space.
251	 */
252	uint8_t			*kva;
253
254	/**
255	 * Base, pseudo-physical address, corresponding to the start
256	 * of this request's kva region.
257	 */
258	uint64_t	 	 gnt_base;
259
260#ifdef XBB_USE_BOUNCE_BUFFERS
261	/**
262	 * Pre-allocated domain local memory used to proxy remote
263	 * domain memory during I/O operations.
264	 */
265	uint8_t			*bounce;
266#endif
267
268	/**
269	 * Array of grant handles (one per page) used to map this request.
270	 */
271	grant_handle_t		*gnt_handles;
272
273	/**
274	 * Device statistics request ordering type (ordered or simple).
275	 */
276	devstat_tag_type	 ds_tag_type;
277
278	/**
279	 * Device statistics request type (read, write, no_data).
280	 */
281	devstat_trans_flags	 ds_trans_type;
282
283	/**
284	 * The start time for this request.
285	 */
286	struct bintime		 ds_t0;
287
288	/**
289	 * Linked list of contiguous requests with the same operation type.
290	 */
291	struct xbb_xen_req_list	 contig_req_list;
292
293	/**
294	 * Linked list links used to aggregate idle requests in the
295	 * request list free pool (xbb->reqlist_free_stailq) and pending
296	 * requests waiting for execution (xbb->reqlist_pending_stailq).
297	 */
298	STAILQ_ENTRY(xbb_xen_reqlist) links;
299};
300
301STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist);
302
303/**
304 * \brief Object tracking an in-flight I/O from a Xen VBD consumer.
305 */
306struct xbb_xen_req {
307	/**
308	 * Linked list links used to aggregate requests into a reqlist
309	 * and to store them in the request free pool.
310	 */
311	STAILQ_ENTRY(xbb_xen_req) links;
312
313	/**
314	 * The remote domain's identifier for this I/O request.
315	 */
316	uint64_t		  id;
317
318	/**
319	 * The number of pages currently mapped for this request.
320	 */
321	int			  nr_pages;
322
323	/**
324	 * The number of 512 byte sectors comprising this requests.
325	 */
326	int			  nr_512b_sectors;
327
328	/**
329	 * BLKIF_OP code for this request.
330	 */
331	int			  operation;
332
333	/**
334	 * Storage used for non-native ring requests.
335	 */
336	blkif_request_t		 ring_req_storage;
337
338	/**
339	 * Pointer to the Xen request in the ring.
340	 */
341	blkif_request_t		*ring_req;
342
343	/**
344	 * Consumer index for this request.
345	 */
346	RING_IDX		 req_ring_idx;
347
348	/**
349	 * The start time for this request.
350	 */
351	struct bintime		 ds_t0;
352
353	/**
354	 * Pointer back to our parent request list.
355	 */
356	struct xbb_xen_reqlist  *reqlist;
357};
358SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req);
359
360/**
361 * \brief Configuration data for the shared memory request ring
362 *        used to communicate with the front-end client of this
363 *        this driver.
364 */
365struct xbb_ring_config {
366	/** KVA address where ring memory is mapped. */
367	vm_offset_t	va;
368
369	/** The pseudo-physical address where ring memory is mapped.*/
370	uint64_t	gnt_addr;
371
372	/**
373	 * Grant table handles, one per-ring page, returned by the
374	 * hyperpervisor upon mapping of the ring and required to
375	 * unmap it when a connection is torn down.
376	 */
377	grant_handle_t	handle[XBB_MAX_RING_PAGES];
378
379	/**
380	 * The device bus address returned by the hypervisor when
381	 * mapping the ring and required to unmap it when a connection
382	 * is torn down.
383	 */
384	uint64_t	bus_addr[XBB_MAX_RING_PAGES];
385
386	/** The number of ring pages mapped for the current connection. */
387	u_int		ring_pages;
388
389	/**
390	 * The grant references, one per-ring page, supplied by the
391	 * front-end, allowing us to reference the ring pages in the
392	 * front-end's domain and to map these pages into our own domain.
393	 */
394	grant_ref_t	ring_ref[XBB_MAX_RING_PAGES];
395
396	/** The interrupt driven even channel used to signal ring events. */
397	evtchn_port_t   evtchn;
398};
399
400/**
401 * Per-instance connection state flags.
402 */
403typedef enum
404{
405	/**
406	 * The front-end requested a read-only mount of the
407	 * back-end device/file.
408	 */
409	XBBF_READ_ONLY         = 0x01,
410
411	/** Communication with the front-end has been established. */
412	XBBF_RING_CONNECTED    = 0x02,
413
414	/**
415	 * Front-end requests exist in the ring and are waiting for
416	 * xbb_xen_req objects to free up.
417	 */
418	XBBF_RESOURCE_SHORTAGE = 0x04,
419
420	/** Connection teardown in progress. */
421	XBBF_SHUTDOWN          = 0x08,
422
423	/** A thread is already performing shutdown processing. */
424	XBBF_IN_SHUTDOWN       = 0x10
425} xbb_flag_t;
426
427/** Backend device type.  */
428typedef enum {
429	/** Backend type unknown. */
430	XBB_TYPE_NONE		= 0x00,
431
432	/**
433	 * Backend type disk (access via cdev switch
434	 * strategy routine).
435	 */
436	XBB_TYPE_DISK		= 0x01,
437
438	/** Backend type file (access vnode operations.). */
439	XBB_TYPE_FILE		= 0x02
440} xbb_type;
441
442/**
443 * \brief Structure used to memoize information about a per-request
444 *        scatter-gather list.
445 *
446 * The chief benefit of using this data structure is it avoids having
447 * to reparse the possibly discontiguous S/G list in the original
448 * request.  Due to the way that the mapping of the memory backing an
449 * I/O transaction is handled by Xen, a second pass is unavoidable.
450 * At least this way the second walk is a simple array traversal.
451 *
452 * \note A single Scatter/Gather element in the block interface covers
453 *       at most 1 machine page.  In this context a sector (blkif
454 *       nomenclature, not what I'd choose) is a 512b aligned unit
455 *       of mapping within the machine page referenced by an S/G
456 *       element.
457 */
458struct xbb_sg {
459	/** The number of 512b data chunks mapped in this S/G element. */
460	int16_t nsect;
461
462	/**
463	 * The index (0 based) of the first 512b data chunk mapped
464	 * in this S/G element.
465	 */
466	uint8_t first_sect;
467
468	/**
469	 * The index (0 based) of the last 512b data chunk mapped
470	 * in this S/G element.
471	 */
472	uint8_t last_sect;
473};
474
475/**
476 * Character device backend specific configuration data.
477 */
478struct xbb_dev_data {
479	/** Cdev used for device backend access.  */
480	struct cdev   *cdev;
481
482	/** Cdev switch used for device backend access.  */
483	struct cdevsw *csw;
484
485	/** Used to hold a reference on opened cdev backend devices. */
486	int	       dev_ref;
487};
488
489/**
490 * File backend specific configuration data.
491 */
492struct xbb_file_data {
493	/** Credentials to use for vnode backed (file based) I/O. */
494	struct ucred   *cred;
495
496	/**
497	 * \brief Array of io vectors used to process file based I/O.
498	 *
499	 * Only a single file based request is outstanding per-xbb instance,
500	 * so we only need one of these.
501	 */
502	struct iovec	xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
503#ifdef XBB_USE_BOUNCE_BUFFERS
504
505	/**
506	 * \brief Array of io vectors used to handle bouncing of file reads.
507	 *
508	 * Vnode operations are free to modify uio data during their
509	 * exectuion.  In the case of a read with bounce buffering active,
510	 * we need some of the data from the original uio in order to
511	 * bounce-out the read data.  This array serves as the temporary
512	 * storage for this saved data.
513	 */
514	struct iovec	saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
515
516	/**
517	 * \brief Array of memoized bounce buffer kva offsets used
518	 *        in the file based backend.
519	 *
520	 * Due to the way that the mapping of the memory backing an
521	 * I/O transaction is handled by Xen, a second pass through
522	 * the request sg elements is unavoidable. We memoize the computed
523	 * bounce address here to reduce the cost of the second walk.
524	 */
525	void		*xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST];
526#endif /* XBB_USE_BOUNCE_BUFFERS */
527};
528
529/**
530 * Collection of backend type specific data.
531 */
532union xbb_backend_data {
533	struct xbb_dev_data  dev;
534	struct xbb_file_data file;
535};
536
537/**
538 * Function signature of backend specific I/O handlers.
539 */
540typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
541			      struct xbb_xen_reqlist *reqlist, int operation,
542			      int flags);
543
544/**
545 * Per-instance configuration data.
546 */
547struct xbb_softc {
548	/**
549	 * Task-queue used to process I/O requests.
550	 */
551	struct taskqueue	 *io_taskqueue;
552
553	/**
554	 * Single "run the request queue" task enqueued
555	 * on io_taskqueue.
556	 */
557	struct task		  io_task;
558
559	/** Device type for this instance. */
560	xbb_type		  device_type;
561
562	/** NewBus device corresponding to this instance. */
563	device_t		  dev;
564
565	/** Backend specific dispatch routine for this instance. */
566	xbb_dispatch_t		  dispatch_io;
567
568	/** The number of requests outstanding on the backend device/file. */
569	int			  active_request_count;
570
571	/** Free pool of request tracking structures. */
572	struct xbb_xen_req_list   request_free_stailq;
573
574	/** Array, sized at connection time, of request tracking structures. */
575	struct xbb_xen_req	 *requests;
576
577	/** Free pool of request list structures. */
578	struct xbb_xen_reqlist_list reqlist_free_stailq;
579
580	/** List of pending request lists awaiting execution. */
581	struct xbb_xen_reqlist_list reqlist_pending_stailq;
582
583	/** Array, sized at connection time, of request list structures. */
584	struct xbb_xen_reqlist	 *request_lists;
585
586	/**
587	 * Global pool of kva used for mapping remote domain ring
588	 * and I/O transaction data.
589	 */
590	vm_offset_t		  kva;
591
592	/** Pseudo-physical address corresponding to kva. */
593	uint64_t		  gnt_base_addr;
594
595	/** The size of the global kva pool. */
596	int			  kva_size;
597
598	/** The size of the KVA area used for request lists. */
599	int			  reqlist_kva_size;
600
601	/** The number of pages of KVA used for request lists */
602	int			  reqlist_kva_pages;
603
604	/** Bitmap of free KVA pages */
605	bitstr_t		 *kva_free;
606
607	/**
608	 * \brief Cached value of the front-end's domain id.
609	 *
610	 * This value is used at once for each mapped page in
611	 * a transaction.  We cache it to avoid incuring the
612	 * cost of an ivar access every time this is needed.
613	 */
614	domid_t			  otherend_id;
615
616	/**
617	 * \brief The blkif protocol abi in effect.
618	 *
619	 * There are situations where the back and front ends can
620	 * have a different, native abi (e.g. intel x86_64 and
621	 * 32bit x86 domains on the same machine).  The back-end
622	 * always accommodates the front-end's native abi.  That
623	 * value is pulled from the XenStore and recorded here.
624	 */
625	int			  abi;
626
627	/**
628	 * \brief The maximum number of requests and request lists allowed
629	 *        to be in flight at a time.
630	 *
631	 * This value is negotiated via the XenStore.
632	 */
633	u_int			  max_requests;
634
635	/**
636	 * \brief The maximum number of segments (1 page per segment)
637	 *	  that can be mapped by a request.
638	 *
639	 * This value is negotiated via the XenStore.
640	 */
641	u_int			  max_request_segments;
642
643	/**
644	 * \brief Maximum number of segments per request list.
645	 *
646	 * This value is derived from and will generally be larger than
647	 * max_request_segments.
648	 */
649	u_int			  max_reqlist_segments;
650
651	/**
652	 * The maximum size of any request to this back-end
653	 * device.
654	 *
655	 * This value is negotiated via the XenStore.
656	 */
657	u_int			  max_request_size;
658
659	/**
660	 * The maximum size of any request list.  This is derived directly
661	 * from max_reqlist_segments.
662	 */
663	u_int			  max_reqlist_size;
664
665	/** Various configuration and state bit flags. */
666	xbb_flag_t		  flags;
667
668	/** Ring mapping and interrupt configuration data. */
669	struct xbb_ring_config	  ring_config;
670
671	/** Runtime, cross-abi safe, structures for ring access. */
672	blkif_back_rings_t	  rings;
673
674	/** IRQ mapping for the communication ring event channel. */
675	xen_intr_handle_t	  xen_intr_handle;
676
677	/**
678	 * \brief Backend access mode flags (e.g. write, or read-only).
679	 *
680	 * This value is passed to us by the front-end via the XenStore.
681	 */
682	char			 *dev_mode;
683
684	/**
685	 * \brief Backend device type (e.g. "disk", "cdrom", "floppy").
686	 *
687	 * This value is passed to us by the front-end via the XenStore.
688	 * Currently unused.
689	 */
690	char			 *dev_type;
691
692	/**
693	 * \brief Backend device/file identifier.
694	 *
695	 * This value is passed to us by the front-end via the XenStore.
696	 * We expect this to be a POSIX path indicating the file or
697	 * device to open.
698	 */
699	char			 *dev_name;
700
701	/**
702	 * Vnode corresponding to the backend device node or file
703	 * we are acessing.
704	 */
705	struct vnode		 *vn;
706
707	union xbb_backend_data	  backend;
708
709	/** The native sector size of the backend. */
710	u_int			  sector_size;
711
712	/** log2 of sector_size.  */
713	u_int			  sector_size_shift;
714
715	/** Size in bytes of the backend device or file.  */
716	off_t			  media_size;
717
718	/**
719	 * \brief media_size expressed in terms of the backend native
720	 *	  sector size.
721	 *
722	 * (e.g. xbb->media_size >> xbb->sector_size_shift).
723	 */
724	uint64_t		  media_num_sectors;
725
726	/**
727	 * \brief Array of memoized scatter gather data computed during the
728	 *	  conversion of blkif ring requests to internal xbb_xen_req
729	 *	  structures.
730	 *
731	 * Ring processing is serialized so we only need one of these.
732	 */
733	struct xbb_sg		  xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST];
734
735	/**
736	 * Temporary grant table map used in xbb_dispatch_io().  When
737	 * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the
738	 * stack could cause a stack overflow.
739	 */
740	struct gnttab_map_grant_ref   maps[XBB_MAX_SEGMENTS_PER_REQLIST];
741
742	/** Mutex protecting per-instance data. */
743	struct mtx		  lock;
744
745	/**
746	 * Resource representing allocated physical address space
747	 * associated with our per-instance kva region.
748	 */
749	struct resource		 *pseudo_phys_res;
750
751	/** Resource id for allocated physical address space. */
752	int			  pseudo_phys_res_id;
753
754	/**
755	 * I/O statistics from BlockBack dispatch down.  These are
756	 * coalesced requests, and we start them right before execution.
757	 */
758	struct devstat		 *xbb_stats;
759
760	/**
761	 * I/O statistics coming into BlockBack.  These are the requests as
762	 * we get them from BlockFront.  They are started as soon as we
763	 * receive a request, and completed when the I/O is complete.
764	 */
765	struct devstat		 *xbb_stats_in;
766
767	/** Disable sending flush to the backend */
768	int			  disable_flush;
769
770	/** Send a real flush for every N flush requests */
771	int			  flush_interval;
772
773	/** Count of flush requests in the interval */
774	int			  flush_count;
775
776	/** Don't coalesce requests if this is set */
777	int			  no_coalesce_reqs;
778
779	/** Number of requests we have received */
780	uint64_t		  reqs_received;
781
782	/** Number of requests we have completed*/
783	uint64_t		  reqs_completed;
784
785	/** Number of requests we queued but not pushed*/
786	uint64_t		  reqs_queued_for_completion;
787
788	/** Number of requests we completed with an error status*/
789	uint64_t		  reqs_completed_with_error;
790
791	/** How many forced dispatches (i.e. without coalescing) have happened */
792	uint64_t		  forced_dispatch;
793
794	/** How many normal dispatches have happened */
795	uint64_t		  normal_dispatch;
796
797	/** How many total dispatches have happened */
798	uint64_t		  total_dispatch;
799
800	/** How many times we have run out of KVA */
801	uint64_t		  kva_shortages;
802
803	/** How many times we have run out of request structures */
804	uint64_t		  request_shortages;
805
806	/** Watch to wait for hotplug script execution */
807	struct xs_watch		  hotplug_watch;
808
809	/** Got the needed data from hotplug scripts? */
810	bool			  hotplug_done;
811};
812
813/*---------------------------- Request Processing ----------------------------*/
814/**
815 * Allocate an internal transaction tracking structure from the free pool.
816 *
817 * \param xbb  Per-instance xbb configuration structure.
818 *
819 * \return  On success, a pointer to the allocated xbb_xen_req structure.
820 *          Otherwise NULL.
821 */
822static inline struct xbb_xen_req *
823xbb_get_req(struct xbb_softc *xbb)
824{
825	struct xbb_xen_req *req;
826
827	req = NULL;
828
829	mtx_assert(&xbb->lock, MA_OWNED);
830
831	if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
832		STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
833		xbb->active_request_count++;
834	}
835
836	return (req);
837}
838
839/**
840 * Return an allocated transaction tracking structure to the free pool.
841 *
842 * \param xbb  Per-instance xbb configuration structure.
843 * \param req  The request structure to free.
844 */
845static inline void
846xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
847{
848	mtx_assert(&xbb->lock, MA_OWNED);
849
850	STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
851	xbb->active_request_count--;
852
853	KASSERT(xbb->active_request_count >= 0,
854		("xbb_release_req: negative active count"));
855}
856
857/**
858 * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool.
859 *
860 * \param xbb	    Per-instance xbb configuration structure.
861 * \param req_list  The list of requests to free.
862 * \param nreqs	    The number of items in the list.
863 */
864static inline void
865xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
866		 int nreqs)
867{
868	mtx_assert(&xbb->lock, MA_OWNED);
869
870	STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
871	xbb->active_request_count -= nreqs;
872
873	KASSERT(xbb->active_request_count >= 0,
874		("xbb_release_reqs: negative active count"));
875}
876
877/**
878 * Given a page index and 512b sector offset within that page,
879 * calculate an offset into a request's kva region.
880 *
881 * \param reqlist The request structure whose kva region will be accessed.
882 * \param pagenr  The page index used to compute the kva offset.
883 * \param sector  The 512b sector index used to compute the page relative
884 *                kva offset.
885 *
886 * \return  The computed global KVA offset.
887 */
888static inline uint8_t *
889xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
890{
891	return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
892}
893
894#ifdef XBB_USE_BOUNCE_BUFFERS
895/**
896 * Given a page index and 512b sector offset within that page,
897 * calculate an offset into a request's local bounce memory region.
898 *
899 * \param reqlist The request structure whose bounce region will be accessed.
900 * \param pagenr  The page index used to compute the bounce offset.
901 * \param sector  The 512b sector index used to compute the page relative
902 *                bounce offset.
903 *
904 * \return  The computed global bounce buffer address.
905 */
906static inline uint8_t *
907xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
908{
909	return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
910}
911#endif
912
913/**
914 * Given a page number and 512b sector offset within that page,
915 * calculate an offset into the request's memory region that the
916 * underlying backend device/file should use for I/O.
917 *
918 * \param reqlist The request structure whose I/O region will be accessed.
919 * \param pagenr  The page index used to compute the I/O offset.
920 * \param sector  The 512b sector index used to compute the page relative
921 *                I/O offset.
922 *
923 * \return  The computed global I/O address.
924 *
925 * Depending on configuration, this will either be a local bounce buffer
926 * or a pointer to the memory mapped in from the front-end domain for
927 * this request.
928 */
929static inline uint8_t *
930xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
931{
932#ifdef XBB_USE_BOUNCE_BUFFERS
933	return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
934#else
935	return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
936#endif
937}
938
939/**
940 * Given a page index and 512b sector offset within that page, calculate
941 * an offset into the local pseudo-physical address space used to map a
942 * front-end's request data into a request.
943 *
944 * \param reqlist The request list structure whose pseudo-physical region
945 *                will be accessed.
946 * \param pagenr  The page index used to compute the pseudo-physical offset.
947 * \param sector  The 512b sector index used to compute the page relative
948 *                pseudo-physical offset.
949 *
950 * \return  The computed global pseudo-phsyical address.
951 *
952 * Depending on configuration, this will either be a local bounce buffer
953 * or a pointer to the memory mapped in from the front-end domain for
954 * this request.
955 */
956static inline uintptr_t
957xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
958{
959	struct xbb_softc *xbb;
960
961	xbb = reqlist->xbb;
962
963	return ((uintptr_t)(xbb->gnt_base_addr +
964		(uintptr_t)(reqlist->kva - xbb->kva) +
965		(PAGE_SIZE * pagenr) + (sector << 9)));
966}
967
968/**
969 * Get Kernel Virtual Address space for mapping requests.
970 *
971 * \param xbb         Per-instance xbb configuration structure.
972 * \param nr_pages    Number of pages needed.
973 * \param check_only  If set, check for free KVA but don't allocate it.
974 * \param have_lock   If set, xbb lock is already held.
975 *
976 * \return  On success, a pointer to the allocated KVA region.  Otherwise NULL.
977 *
978 * Note:  This should be unnecessary once we have either chaining or
979 * scatter/gather support for struct bio.  At that point we'll be able to
980 * put multiple addresses and lengths in one bio/bio chain and won't need
981 * to map everything into one virtual segment.
982 */
983static uint8_t *
984xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
985{
986	int first_clear;
987	int num_clear;
988	uint8_t *free_kva;
989	int      i;
990
991	KASSERT(nr_pages != 0, ("xbb_get_kva of zero length"));
992
993	first_clear = 0;
994	free_kva = NULL;
995
996	mtx_lock(&xbb->lock);
997
998	/*
999	 * Look for the first available page.  If there are none, we're done.
1000	 */
1001	bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
1002
1003	if (first_clear == -1)
1004		goto bailout;
1005
1006	/*
1007	 * Starting at the first available page, look for consecutive free
1008	 * pages that will satisfy the user's request.
1009	 */
1010	for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
1011		/*
1012		 * If this is true, the page is used, so we have to reset
1013		 * the number of clear pages and the first clear page
1014		 * (since it pointed to a region with an insufficient number
1015		 * of clear pages).
1016		 */
1017		if (bit_test(xbb->kva_free, i)) {
1018			num_clear = 0;
1019			first_clear = -1;
1020			continue;
1021		}
1022
1023		if (first_clear == -1)
1024			first_clear = i;
1025
1026		/*
1027		 * If this is true, we've found a large enough free region
1028		 * to satisfy the request.
1029		 */
1030		if (++num_clear == nr_pages) {
1031			bit_nset(xbb->kva_free, first_clear,
1032				 first_clear + nr_pages - 1);
1033
1034			free_kva = xbb->kva +
1035				(uint8_t *)((intptr_t)first_clear * PAGE_SIZE);
1036
1037			KASSERT(free_kva >= (uint8_t *)xbb->kva &&
1038				free_kva + (nr_pages * PAGE_SIZE) <=
1039				(uint8_t *)xbb->ring_config.va,
1040				("Free KVA %p len %d out of range, "
1041				 "kva = %#jx, ring VA = %#jx\n", free_kva,
1042				 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
1043				 (uintmax_t)xbb->ring_config.va));
1044			break;
1045		}
1046	}
1047
1048bailout:
1049
1050	if (free_kva == NULL) {
1051		xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1052		xbb->kva_shortages++;
1053	}
1054
1055	mtx_unlock(&xbb->lock);
1056
1057	return (free_kva);
1058}
1059
1060/**
1061 * Free allocated KVA.
1062 *
1063 * \param xbb	    Per-instance xbb configuration structure.
1064 * \param kva_ptr   Pointer to allocated KVA region.
1065 * \param nr_pages  Number of pages in the KVA region.
1066 */
1067static void
1068xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
1069{
1070	intptr_t start_page;
1071
1072	mtx_assert(&xbb->lock, MA_OWNED);
1073
1074	start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
1075	bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
1076
1077}
1078
1079/**
1080 * Unmap the front-end pages associated with this I/O request.
1081 *
1082 * \param req  The request structure to unmap.
1083 */
1084static void
1085xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
1086{
1087	struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST];
1088	u_int			      i;
1089	u_int			      invcount;
1090	int			      error;
1091
1092	invcount = 0;
1093	for (i = 0; i < reqlist->nr_segments; i++) {
1094		if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
1095			continue;
1096
1097		unmap[invcount].host_addr    = xbb_get_gntaddr(reqlist, i, 0);
1098		unmap[invcount].dev_bus_addr = 0;
1099		unmap[invcount].handle       = reqlist->gnt_handles[i];
1100		reqlist->gnt_handles[i]	     = GRANT_REF_INVALID;
1101		invcount++;
1102	}
1103
1104	error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
1105					  unmap, invcount);
1106	KASSERT(error == 0, ("Grant table operation failed"));
1107}
1108
1109/**
1110 * Allocate an internal transaction tracking structure from the free pool.
1111 *
1112 * \param xbb  Per-instance xbb configuration structure.
1113 *
1114 * \return  On success, a pointer to the allocated xbb_xen_reqlist structure.
1115 *          Otherwise NULL.
1116 */
1117static inline struct xbb_xen_reqlist *
1118xbb_get_reqlist(struct xbb_softc *xbb)
1119{
1120	struct xbb_xen_reqlist *reqlist;
1121
1122	reqlist = NULL;
1123
1124	mtx_assert(&xbb->lock, MA_OWNED);
1125
1126	if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1127		STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
1128		reqlist->flags = XBB_REQLIST_NONE;
1129		reqlist->kva = NULL;
1130		reqlist->status = BLKIF_RSP_OKAY;
1131		reqlist->residual_512b_sectors = 0;
1132		reqlist->num_children = 0;
1133		reqlist->nr_segments = 0;
1134		STAILQ_INIT(&reqlist->contig_req_list);
1135	}
1136
1137	return (reqlist);
1138}
1139
1140/**
1141 * Return an allocated transaction tracking structure to the free pool.
1142 *
1143 * \param xbb        Per-instance xbb configuration structure.
1144 * \param req        The request list structure to free.
1145 * \param wakeup     If set, wakeup the work thread if freeing this reqlist
1146 *                   during a resource shortage condition.
1147 */
1148static inline void
1149xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1150		    int wakeup)
1151{
1152
1153	mtx_assert(&xbb->lock, MA_OWNED);
1154
1155	if (wakeup) {
1156		wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
1157		xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
1158	}
1159
1160	if (reqlist->kva != NULL)
1161		xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1162
1163	xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1164
1165	STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1166
1167	if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1168		/*
1169		 * Shutdown is in progress.  See if we can
1170		 * progress further now that one more request
1171		 * has completed and been returned to the
1172		 * free pool.
1173		 */
1174		xbb_shutdown(xbb);
1175	}
1176
1177	if (wakeup != 0)
1178		taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1179}
1180
1181/**
1182 * Request resources and do basic request setup.
1183 *
1184 * \param xbb          Per-instance xbb configuration structure.
1185 * \param reqlist      Pointer to reqlist pointer.
1186 * \param ring_req     Pointer to a block ring request.
1187 * \param ring_index   The ring index of this request.
1188 *
1189 * \return  0 for success, non-zero for failure.
1190 */
1191static int
1192xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1193		  blkif_request_t *ring_req, RING_IDX ring_idx)
1194{
1195	struct xbb_xen_reqlist *nreqlist;
1196	struct xbb_xen_req     *nreq;
1197
1198	nreqlist = NULL;
1199	nreq     = NULL;
1200
1201	mtx_lock(&xbb->lock);
1202
1203	/*
1204	 * We don't allow new resources to be allocated if we're in the
1205	 * process of shutting down.
1206	 */
1207	if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1208		mtx_unlock(&xbb->lock);
1209		return (1);
1210	}
1211
1212	/*
1213	 * Allocate a reqlist if the caller doesn't have one already.
1214	 */
1215	if (*reqlist == NULL) {
1216		nreqlist = xbb_get_reqlist(xbb);
1217		if (nreqlist == NULL)
1218			goto bailout_error;
1219	}
1220
1221	/* We always allocate a request. */
1222	nreq = xbb_get_req(xbb);
1223	if (nreq == NULL)
1224		goto bailout_error;
1225
1226	mtx_unlock(&xbb->lock);
1227
1228	if (*reqlist == NULL) {
1229		*reqlist = nreqlist;
1230		nreqlist->operation = ring_req->operation;
1231		nreqlist->starting_sector_number = ring_req->sector_number;
1232		STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
1233				   links);
1234	}
1235
1236	nreq->reqlist = *reqlist;
1237	nreq->req_ring_idx = ring_idx;
1238	nreq->id = ring_req->id;
1239	nreq->operation = ring_req->operation;
1240
1241	if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
1242		bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req));
1243		nreq->ring_req = &nreq->ring_req_storage;
1244	} else {
1245		nreq->ring_req = ring_req;
1246	}
1247
1248	binuptime(&nreq->ds_t0);
1249	devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
1250	STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
1251	(*reqlist)->num_children++;
1252	(*reqlist)->nr_segments += ring_req->nr_segments;
1253
1254	return (0);
1255
1256bailout_error:
1257
1258	/*
1259	 * We're out of resources, so set the shortage flag.  The next time
1260	 * a request is released, we'll try waking up the work thread to
1261	 * see if we can allocate more resources.
1262	 */
1263	xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1264	xbb->request_shortages++;
1265
1266	if (nreq != NULL)
1267		xbb_release_req(xbb, nreq);
1268
1269	if (nreqlist != NULL)
1270		xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
1271
1272	mtx_unlock(&xbb->lock);
1273
1274	return (1);
1275}
1276
1277/**
1278 * Create and queue a response to a blkif request.
1279 *
1280 * \param xbb     Per-instance xbb configuration structure.
1281 * \param req     The request structure to which to respond.
1282 * \param status  The status code to report.  See BLKIF_RSP_*
1283 *                in sys/xen/interface/io/blkif.h.
1284 */
1285static void
1286xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
1287{
1288	blkif_response_t *resp;
1289
1290	/*
1291	 * The mutex is required here, and should be held across this call
1292	 * until after the subsequent call to xbb_push_responses().  This
1293	 * is to guarantee that another context won't queue responses and
1294	 * push them while we're active.
1295	 *
1296	 * That could lead to the other end being notified of responses
1297	 * before the resources have been freed on this end.  The other end
1298	 * would then be able to queue additional I/O, and we may run out
1299 	 * of resources because we haven't freed them all yet.
1300	 */
1301	mtx_assert(&xbb->lock, MA_OWNED);
1302
1303	/*
1304	 * Place on the response ring for the relevant domain.
1305	 * For now, only the spacing between entries is different
1306	 * in the different ABIs, not the response entry layout.
1307	 */
1308	switch (xbb->abi) {
1309	case BLKIF_PROTOCOL_NATIVE:
1310		resp = RING_GET_RESPONSE(&xbb->rings.native,
1311					 xbb->rings.native.rsp_prod_pvt);
1312		break;
1313	case BLKIF_PROTOCOL_X86_32:
1314		resp = (blkif_response_t *)
1315		    RING_GET_RESPONSE(&xbb->rings.x86_32,
1316				      xbb->rings.x86_32.rsp_prod_pvt);
1317		break;
1318	case BLKIF_PROTOCOL_X86_64:
1319		resp = (blkif_response_t *)
1320		    RING_GET_RESPONSE(&xbb->rings.x86_64,
1321				      xbb->rings.x86_64.rsp_prod_pvt);
1322		break;
1323	default:
1324		panic("Unexpected blkif protocol ABI.");
1325	}
1326
1327	resp->id        = req->id;
1328	resp->operation = req->operation;
1329	resp->status    = status;
1330
1331	if (status != BLKIF_RSP_OKAY)
1332		xbb->reqs_completed_with_error++;
1333
1334	xbb->rings.common.rsp_prod_pvt++;
1335
1336	xbb->reqs_queued_for_completion++;
1337
1338}
1339
1340/**
1341 * Send queued responses to blkif requests.
1342 *
1343 * \param xbb            Per-instance xbb configuration structure.
1344 * \param run_taskqueue  Flag that is set to 1 if the taskqueue
1345 *			 should be run, 0 if it does not need to be run.
1346 * \param notify	 Flag that is set to 1 if the other end should be
1347 * 			 notified via irq, 0 if the other end should not be
1348 *			 notified.
1349 */
1350static void
1351xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
1352{
1353	int more_to_do;
1354
1355	/*
1356	 * The mutex is required here.
1357	 */
1358	mtx_assert(&xbb->lock, MA_OWNED);
1359
1360	more_to_do = 0;
1361
1362	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
1363
1364	if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
1365		/*
1366		 * Tail check for pending requests. Allows frontend to avoid
1367		 * notifications if requests are already in flight (lower
1368		 * overheads and promotes batching).
1369		 */
1370		RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
1371	} else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
1372		more_to_do = 1;
1373	}
1374
1375	xbb->reqs_completed += xbb->reqs_queued_for_completion;
1376	xbb->reqs_queued_for_completion = 0;
1377
1378	*run_taskqueue = more_to_do;
1379}
1380
1381/**
1382 * Complete a request list.
1383 *
1384 * \param xbb        Per-instance xbb configuration structure.
1385 * \param reqlist    Allocated internal request list structure.
1386 */
1387static void
1388xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1389{
1390	struct xbb_xen_req *nreq;
1391	off_t		    sectors_sent;
1392	int		    notify, run_taskqueue;
1393
1394	sectors_sent = 0;
1395
1396	if (reqlist->flags & XBB_REQLIST_MAPPED)
1397		xbb_unmap_reqlist(reqlist);
1398
1399	mtx_lock(&xbb->lock);
1400
1401	/*
1402	 * All I/O is done, send the response. A lock is not necessary
1403	 * to protect the request list, because all requests have
1404	 * completed.  Therefore this is the only context accessing this
1405	 * reqlist right now.  However, in order to make sure that no one
1406	 * else queues responses onto the queue or pushes them to the other
1407	 * side while we're active, we need to hold the lock across the
1408	 * calls to xbb_queue_response() and xbb_push_responses().
1409	 */
1410	STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1411		off_t cur_sectors_sent;
1412
1413		/* Put this response on the ring, but don't push yet */
1414		xbb_queue_response(xbb, nreq, reqlist->status);
1415
1416		/* We don't report bytes sent if there is an error. */
1417		if (reqlist->status == BLKIF_RSP_OKAY)
1418			cur_sectors_sent = nreq->nr_512b_sectors;
1419		else
1420			cur_sectors_sent = 0;
1421
1422		sectors_sent += cur_sectors_sent;
1423
1424		devstat_end_transaction(xbb->xbb_stats_in,
1425					/*bytes*/cur_sectors_sent << 9,
1426					reqlist->ds_tag_type,
1427					reqlist->ds_trans_type,
1428					/*now*/NULL,
1429					/*then*/&nreq->ds_t0);
1430	}
1431
1432	/*
1433	 * Take out any sectors not sent.  If we wind up negative (which
1434	 * might happen if an error is reported as well as a residual), just
1435	 * report 0 sectors sent.
1436	 */
1437	sectors_sent -= reqlist->residual_512b_sectors;
1438	if (sectors_sent < 0)
1439		sectors_sent = 0;
1440
1441	devstat_end_transaction(xbb->xbb_stats,
1442				/*bytes*/ sectors_sent << 9,
1443				reqlist->ds_tag_type,
1444				reqlist->ds_trans_type,
1445				/*now*/NULL,
1446				/*then*/&reqlist->ds_t0);
1447
1448	xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1449
1450	xbb_push_responses(xbb, &run_taskqueue, &notify);
1451
1452	mtx_unlock(&xbb->lock);
1453
1454	if (run_taskqueue)
1455		taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1456
1457	if (notify)
1458		xen_intr_signal(xbb->xen_intr_handle);
1459}
1460
1461/**
1462 * Completion handler for buffer I/O requests issued by the device
1463 * backend driver.
1464 *
1465 * \param bio  The buffer I/O request on which to perform completion
1466 *             processing.
1467 */
1468static void
1469xbb_bio_done(struct bio *bio)
1470{
1471	struct xbb_softc       *xbb;
1472	struct xbb_xen_reqlist *reqlist;
1473
1474	reqlist = bio->bio_caller1;
1475	xbb     = reqlist->xbb;
1476
1477	reqlist->residual_512b_sectors += bio->bio_resid >> 9;
1478
1479	/*
1480	 * This is a bit imprecise.  With aggregated I/O a single
1481	 * request list can contain multiple front-end requests and
1482	 * a multiple bios may point to a single request.  By carefully
1483	 * walking the request list, we could map residuals and errors
1484	 * back to the original front-end request, but the interface
1485	 * isn't sufficiently rich for us to properly report the error.
1486	 * So, we just treat the entire request list as having failed if an
1487	 * error occurs on any part.  And, if an error occurs, we treat
1488	 * the amount of data transferred as 0.
1489	 *
1490	 * For residuals, we report it on the overall aggregated device,
1491	 * but not on the individual requests, since we don't currently
1492	 * do the work to determine which front-end request to which the
1493	 * residual applies.
1494	 */
1495	if (bio->bio_error) {
1496		DPRINTF("BIO returned error %d for operation on device %s\n",
1497			bio->bio_error, xbb->dev_name);
1498		reqlist->status = BLKIF_RSP_ERROR;
1499
1500		if (bio->bio_error == ENXIO
1501		 && xenbus_get_state(xbb->dev) == XenbusStateConnected) {
1502			/*
1503			 * Backend device has disappeared.  Signal the
1504			 * front-end that we (the device proxy) want to
1505			 * go away.
1506			 */
1507			xenbus_set_state(xbb->dev, XenbusStateClosing);
1508		}
1509	}
1510
1511#ifdef XBB_USE_BOUNCE_BUFFERS
1512	if (bio->bio_cmd == BIO_READ) {
1513		vm_offset_t kva_offset;
1514
1515		kva_offset = (vm_offset_t)bio->bio_data
1516			   - (vm_offset_t)reqlist->bounce;
1517		memcpy((uint8_t *)reqlist->kva + kva_offset,
1518		       bio->bio_data, bio->bio_bcount);
1519	}
1520#endif /* XBB_USE_BOUNCE_BUFFERS */
1521
1522	/*
1523	 * Decrement the pending count for the request list.  When we're
1524	 * done with the requests, send status back for all of them.
1525	 */
1526	if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
1527		xbb_complete_reqlist(xbb, reqlist);
1528
1529	g_destroy_bio(bio);
1530}
1531
1532/**
1533 * Parse a blkif request into an internal request structure and send
1534 * it to the backend for processing.
1535 *
1536 * \param xbb       Per-instance xbb configuration structure.
1537 * \param reqlist   Allocated internal request list structure.
1538 *
1539 * \return          On success, 0.  For resource shortages, non-zero.
1540 *
1541 * This routine performs the backend common aspects of request parsing
1542 * including compiling an internal request structure, parsing the S/G
1543 * list and any secondary ring requests in which they may reside, and
1544 * the mapping of front-end I/O pages into our domain.
1545 */
1546static int
1547xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1548{
1549	struct xbb_sg                *xbb_sg;
1550	struct gnttab_map_grant_ref  *map;
1551	struct blkif_request_segment *sg;
1552	struct blkif_request_segment *last_block_sg;
1553	struct xbb_xen_req	     *nreq;
1554	u_int			      nseg;
1555	u_int			      seg_idx;
1556	u_int			      block_segs;
1557	int			      nr_sects;
1558	int			      total_sects;
1559	int			      operation;
1560	uint8_t			      bio_flags;
1561	int			      error;
1562
1563	reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1564	bio_flags            = 0;
1565	total_sects	     = 0;
1566	nr_sects	     = 0;
1567
1568	/*
1569	 * First determine whether we have enough free KVA to satisfy this
1570	 * request list.  If not, tell xbb_run_queue() so it can go to
1571	 * sleep until we have more KVA.
1572	 */
1573	reqlist->kva = NULL;
1574	if (reqlist->nr_segments != 0) {
1575		reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1576		if (reqlist->kva == NULL) {
1577			/*
1578			 * If we're out of KVA, return ENOMEM.
1579			 */
1580			return (ENOMEM);
1581		}
1582	}
1583
1584	binuptime(&reqlist->ds_t0);
1585	devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1586
1587	switch (reqlist->operation) {
1588	case BLKIF_OP_WRITE_BARRIER:
1589		bio_flags       |= BIO_ORDERED;
1590		reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1591		/* FALLTHROUGH */
1592	case BLKIF_OP_WRITE:
1593		operation = BIO_WRITE;
1594		reqlist->ds_trans_type = DEVSTAT_WRITE;
1595		if ((xbb->flags & XBBF_READ_ONLY) != 0) {
1596			DPRINTF("Attempt to write to read only device %s\n",
1597				xbb->dev_name);
1598			reqlist->status = BLKIF_RSP_ERROR;
1599			goto send_response;
1600		}
1601		break;
1602	case BLKIF_OP_READ:
1603		operation = BIO_READ;
1604		reqlist->ds_trans_type = DEVSTAT_READ;
1605		break;
1606	case BLKIF_OP_FLUSH_DISKCACHE:
1607		/*
1608		 * If this is true, the user has requested that we disable
1609		 * flush support.  So we just complete the requests
1610		 * successfully.
1611		 */
1612		if (xbb->disable_flush != 0) {
1613			goto send_response;
1614		}
1615
1616		/*
1617		 * The user has requested that we only send a real flush
1618		 * for every N flush requests.  So keep count, and either
1619		 * complete the request immediately or queue it for the
1620		 * backend.
1621		 */
1622		if (xbb->flush_interval != 0) {
1623		 	if (++(xbb->flush_count) < xbb->flush_interval) {
1624				goto send_response;
1625			} else
1626				xbb->flush_count = 0;
1627		}
1628
1629		operation = BIO_FLUSH;
1630		reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1631		reqlist->ds_trans_type = DEVSTAT_NO_DATA;
1632		goto do_dispatch;
1633		/*NOTREACHED*/
1634	default:
1635		DPRINTF("error: unknown block io operation [%d]\n",
1636			reqlist->operation);
1637		reqlist->status = BLKIF_RSP_ERROR;
1638		goto send_response;
1639	}
1640
1641	reqlist->xbb  = xbb;
1642	xbb_sg        = xbb->xbb_sgs;
1643	map	      = xbb->maps;
1644	seg_idx	      = 0;
1645
1646	STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1647		blkif_request_t		*ring_req;
1648		RING_IDX		 req_ring_idx;
1649		u_int			 req_seg_idx;
1650
1651		ring_req	      = nreq->ring_req;
1652		req_ring_idx	      = nreq->req_ring_idx;
1653		nr_sects              = 0;
1654		nseg                  = ring_req->nr_segments;
1655		nreq->nr_pages        = nseg;
1656		nreq->nr_512b_sectors = 0;
1657		req_seg_idx	      = 0;
1658		sg	              = NULL;
1659
1660		/* Check that number of segments is sane. */
1661		if (__predict_false(nseg == 0)
1662		 || __predict_false(nseg > xbb->max_request_segments)) {
1663			DPRINTF("Bad number of segments in request (%d)\n",
1664				nseg);
1665			reqlist->status = BLKIF_RSP_ERROR;
1666			goto send_response;
1667		}
1668
1669		block_segs    = nseg;
1670		sg            = ring_req->seg;
1671		last_block_sg = sg + block_segs;
1672
1673		while (sg < last_block_sg) {
1674			KASSERT(seg_idx <
1675				XBB_MAX_SEGMENTS_PER_REQLIST,
1676				("seg_idx %d is too large, max "
1677				"segs %d\n", seg_idx,
1678				XBB_MAX_SEGMENTS_PER_REQLIST));
1679
1680			xbb_sg->first_sect = sg->first_sect;
1681			xbb_sg->last_sect  = sg->last_sect;
1682			xbb_sg->nsect =
1683			    (int8_t)(sg->last_sect -
1684			    sg->first_sect + 1);
1685
1686			if ((sg->last_sect >= (PAGE_SIZE >> 9))
1687			 || (xbb_sg->nsect <= 0)) {
1688				reqlist->status = BLKIF_RSP_ERROR;
1689				goto send_response;
1690			}
1691
1692			nr_sects += xbb_sg->nsect;
1693			map->host_addr = xbb_get_gntaddr(reqlist,
1694						seg_idx, /*sector*/0);
1695			KASSERT(map->host_addr + PAGE_SIZE <=
1696				xbb->ring_config.gnt_addr,
1697				("Host address %#jx len %d overlaps "
1698				 "ring address %#jx\n",
1699				(uintmax_t)map->host_addr, PAGE_SIZE,
1700				(uintmax_t)xbb->ring_config.gnt_addr));
1701
1702			map->flags     = GNTMAP_host_map;
1703			map->ref       = sg->gref;
1704			map->dom       = xbb->otherend_id;
1705			if (operation == BIO_WRITE)
1706				map->flags |= GNTMAP_readonly;
1707			sg++;
1708			map++;
1709			xbb_sg++;
1710			seg_idx++;
1711			req_seg_idx++;
1712		}
1713
1714		/* Convert to the disk's sector size */
1715		nreq->nr_512b_sectors = nr_sects;
1716		nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
1717		total_sects += nr_sects;
1718
1719		if ((nreq->nr_512b_sectors &
1720		    ((xbb->sector_size >> 9) - 1)) != 0) {
1721			device_printf(xbb->dev, "%s: I/O size (%d) is not "
1722				      "a multiple of the backing store sector "
1723				      "size (%d)\n", __func__,
1724				      nreq->nr_512b_sectors << 9,
1725				      xbb->sector_size);
1726			reqlist->status = BLKIF_RSP_ERROR;
1727			goto send_response;
1728		}
1729	}
1730
1731	error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
1732					  xbb->maps, reqlist->nr_segments);
1733	if (error != 0)
1734		panic("Grant table operation failed (%d)", error);
1735
1736	reqlist->flags |= XBB_REQLIST_MAPPED;
1737
1738	for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1739	     seg_idx++, map++){
1740		if (__predict_false(map->status != 0)) {
1741			DPRINTF("invalid buffer -- could not remap "
1742			        "it (%d)\n", map->status);
1743			DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags "
1744			        "0x%x ref 0x%x, dom %d\n", seg_idx,
1745				map->host_addr, map->flags, map->ref,
1746				map->dom);
1747			reqlist->status = BLKIF_RSP_ERROR;
1748			goto send_response;
1749		}
1750
1751		reqlist->gnt_handles[seg_idx] = map->handle;
1752	}
1753	if (reqlist->starting_sector_number + total_sects >
1754	    xbb->media_num_sectors) {
1755		DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] "
1756			"extends past end of device %s\n",
1757			operation == BIO_READ ? "read" : "write",
1758			reqlist->starting_sector_number,
1759			reqlist->starting_sector_number + total_sects,
1760			xbb->dev_name);
1761		reqlist->status = BLKIF_RSP_ERROR;
1762		goto send_response;
1763	}
1764
1765do_dispatch:
1766
1767	error = xbb->dispatch_io(xbb,
1768				 reqlist,
1769				 operation,
1770				 bio_flags);
1771
1772	if (error != 0) {
1773		reqlist->status = BLKIF_RSP_ERROR;
1774		goto send_response;
1775	}
1776
1777	return (0);
1778
1779send_response:
1780
1781	xbb_complete_reqlist(xbb, reqlist);
1782
1783	return (0);
1784}
1785
1786static __inline int
1787xbb_count_sects(blkif_request_t *ring_req)
1788{
1789	int i;
1790	int cur_size = 0;
1791
1792	for (i = 0; i < ring_req->nr_segments; i++) {
1793		int nsect;
1794
1795		nsect = (int8_t)(ring_req->seg[i].last_sect -
1796			ring_req->seg[i].first_sect + 1);
1797		if (nsect <= 0)
1798			break;
1799
1800		cur_size += nsect;
1801	}
1802
1803	return (cur_size);
1804}
1805
1806/**
1807 * Process incoming requests from the shared communication ring in response
1808 * to a signal on the ring's event channel.
1809 *
1810 * \param context  Callback argument registerd during task initialization -
1811 *                 the xbb_softc for this instance.
1812 * \param pending  The number of taskqueue_enqueue events that have
1813 *                 occurred since this handler was last run.
1814 */
1815static void
1816xbb_run_queue(void *context, int pending)
1817{
1818	struct xbb_softc       *xbb;
1819	blkif_back_rings_t     *rings;
1820	RING_IDX		rp;
1821	uint64_t		cur_sector;
1822	int			cur_operation;
1823	struct xbb_xen_reqlist *reqlist;
1824
1825	xbb   = (struct xbb_softc *)context;
1826	rings = &xbb->rings;
1827
1828	/*
1829	 * Work gather and dispatch loop.  Note that we have a bias here
1830	 * towards gathering I/O sent by blockfront.  We first gather up
1831	 * everything in the ring, as long as we have resources.  Then we
1832	 * dispatch one request, and then attempt to gather up any
1833	 * additional requests that have come in while we were dispatching
1834	 * the request.
1835	 *
1836	 * This allows us to get a clearer picture (via devstat) of how
1837	 * many requests blockfront is queueing to us at any given time.
1838	 */
1839	for (;;) {
1840		int retval;
1841
1842		/*
1843		 * Initialize reqlist to the last element in the pending
1844		 * queue, if there is one.  This allows us to add more
1845		 * requests to that request list, if we have room.
1846		 */
1847		reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1848				      xbb_xen_reqlist, links);
1849		if (reqlist != NULL) {
1850			cur_sector = reqlist->next_contig_sector;
1851			cur_operation = reqlist->operation;
1852		} else {
1853			cur_operation = 0;
1854			cur_sector    = 0;
1855		}
1856
1857		/*
1858		 * Cache req_prod to avoid accessing a cache line shared
1859		 * with the frontend.
1860		 */
1861		rp = rings->common.sring->req_prod;
1862
1863		/* Ensure we see queued requests up to 'rp'. */
1864		rmb();
1865
1866		/**
1867		 * Run so long as there is work to consume and the generation
1868		 * of a response will not overflow the ring.
1869		 *
1870		 * @note There's a 1 to 1 relationship between requests and
1871		 *       responses, so an overflow should never occur.  This
1872		 *       test is to protect our domain from digesting bogus
1873		 *       data.  Shouldn't we log this?
1874		 */
1875		while (rings->common.req_cons != rp
1876		    && RING_REQUEST_CONS_OVERFLOW(&rings->common,
1877						  rings->common.req_cons) == 0){
1878			blkif_request_t	        ring_req_storage;
1879			blkif_request_t	       *ring_req;
1880			int			cur_size;
1881
1882			switch (xbb->abi) {
1883			case BLKIF_PROTOCOL_NATIVE:
1884				ring_req = RING_GET_REQUEST(&xbb->rings.native,
1885				    rings->common.req_cons);
1886				break;
1887			case BLKIF_PROTOCOL_X86_32:
1888			{
1889				struct blkif_x86_32_request *ring_req32;
1890
1891				ring_req32 = RING_GET_REQUEST(
1892				    &xbb->rings.x86_32, rings->common.req_cons);
1893				blkif_get_x86_32_req(&ring_req_storage,
1894						     ring_req32);
1895				ring_req = &ring_req_storage;
1896				break;
1897			}
1898			case BLKIF_PROTOCOL_X86_64:
1899			{
1900				struct blkif_x86_64_request *ring_req64;
1901
1902				ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
1903				    rings->common.req_cons);
1904				blkif_get_x86_64_req(&ring_req_storage,
1905						     ring_req64);
1906				ring_req = &ring_req_storage;
1907				break;
1908			}
1909			default:
1910				panic("Unexpected blkif protocol ABI.");
1911				/* NOTREACHED */
1912			}
1913
1914			/*
1915			 * Check for situations that would require closing
1916			 * off this I/O for further coalescing:
1917			 *  - Coalescing is turned off.
1918			 *  - Current I/O is out of sequence with the previous
1919			 *    I/O.
1920			 *  - Coalesced I/O would be too large.
1921			 */
1922			if ((reqlist != NULL)
1923			 && ((xbb->no_coalesce_reqs != 0)
1924			  || ((xbb->no_coalesce_reqs == 0)
1925			   && ((ring_req->sector_number != cur_sector)
1926			    || (ring_req->operation != cur_operation)
1927			    || ((ring_req->nr_segments + reqlist->nr_segments) >
1928			         xbb->max_reqlist_segments))))) {
1929				reqlist = NULL;
1930			}
1931
1932			/*
1933			 * Grab and check for all resources in one shot.
1934			 * If we can't get all of the resources we need,
1935			 * the shortage is noted and the thread will get
1936			 * woken up when more resources are available.
1937			 */
1938			retval = xbb_get_resources(xbb, &reqlist, ring_req,
1939						   xbb->rings.common.req_cons);
1940
1941			if (retval != 0) {
1942				/*
1943				 * Resource shortage has been recorded.
1944				 * We'll be scheduled to run once a request
1945				 * object frees up due to a completion.
1946				 */
1947				break;
1948			}
1949
1950			/*
1951			 * Signify that	we can overwrite this request with
1952			 * a response by incrementing our consumer index.
1953			 * The response won't be generated until after
1954			 * we've already consumed all necessary data out
1955			 * of the version of the request in the ring buffer
1956			 * (for native mode).  We must update the consumer
1957			 * index  before issuing back-end I/O so there is
1958			 * no possibility that it will complete and a
1959			 * response be generated before we make room in
1960			 * the queue for that response.
1961			 */
1962			xbb->rings.common.req_cons++;
1963			xbb->reqs_received++;
1964
1965			cur_size = xbb_count_sects(ring_req);
1966			cur_sector = ring_req->sector_number + cur_size;
1967			reqlist->next_contig_sector = cur_sector;
1968			cur_operation = ring_req->operation;
1969		}
1970
1971		/* Check for I/O to dispatch */
1972		reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1973		if (reqlist == NULL) {
1974			/*
1975			 * We're out of work to do, put the task queue to
1976			 * sleep.
1977			 */
1978			break;
1979		}
1980
1981		/*
1982		 * Grab the first request off the queue and attempt
1983		 * to dispatch it.
1984		 */
1985		STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
1986
1987		retval = xbb_dispatch_io(xbb, reqlist);
1988		if (retval != 0) {
1989			/*
1990			 * xbb_dispatch_io() returns non-zero only when
1991			 * there is a resource shortage.  If that's the
1992			 * case, re-queue this request on the head of the
1993			 * queue, and go to sleep until we have more
1994			 * resources.
1995			 */
1996			STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
1997					   reqlist, links);
1998			break;
1999		} else {
2000			/*
2001			 * If we still have anything on the queue after
2002			 * removing the head entry, that is because we
2003			 * met one of the criteria to create a new
2004			 * request list (outlined above), and we'll call
2005			 * that a forced dispatch for statistical purposes.
2006			 *
2007			 * Otherwise, if there is only one element on the
2008			 * queue, we coalesced everything available on
2009			 * the ring and we'll call that a normal dispatch.
2010			 */
2011			reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2012
2013			if (reqlist != NULL)
2014				xbb->forced_dispatch++;
2015			else
2016				xbb->normal_dispatch++;
2017
2018			xbb->total_dispatch++;
2019		}
2020	}
2021}
2022
2023/**
2024 * Interrupt handler bound to the shared ring's event channel.
2025 *
2026 * \param arg  Callback argument registerd during event channel
2027 *             binding - the xbb_softc for this instance.
2028 */
2029static int
2030xbb_filter(void *arg)
2031{
2032	struct xbb_softc *xbb;
2033
2034	/* Defer to taskqueue thread. */
2035	xbb = (struct xbb_softc *)arg;
2036	taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
2037
2038	return (FILTER_HANDLED);
2039}
2040
2041SDT_PROVIDER_DEFINE(xbb);
2042SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
2043SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
2044		  "uint64_t");
2045SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
2046		  "uint64_t", "uint64_t");
2047
2048/*----------------------------- Backend Handlers -----------------------------*/
2049/**
2050 * Backend handler for character device access.
2051 *
2052 * \param xbb        Per-instance xbb configuration structure.
2053 * \param reqlist    Allocated internal request list structure.
2054 * \param operation  BIO_* I/O operation code.
2055 * \param bio_flags  Additional bio_flag data to pass to any generated
2056 *                   bios (e.g. BIO_ORDERED)..
2057 *
2058 * \return  0 for success, errno codes for failure.
2059 */
2060static int
2061xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2062		 int operation, int bio_flags)
2063{
2064	struct xbb_dev_data *dev_data;
2065	struct bio          *bios[XBB_MAX_SEGMENTS_PER_REQLIST];
2066	off_t                bio_offset;
2067	struct bio          *bio;
2068	struct xbb_sg       *xbb_sg;
2069	u_int	             nbio;
2070	u_int                bio_idx;
2071	u_int		     nseg;
2072	u_int                seg_idx;
2073	int                  error;
2074
2075	dev_data   = &xbb->backend.dev;
2076	bio_offset = (off_t)reqlist->starting_sector_number
2077		   << xbb->sector_size_shift;
2078	error      = 0;
2079	nbio       = 0;
2080	bio_idx    = 0;
2081
2082	if (operation == BIO_FLUSH) {
2083		bio = g_new_bio();
2084		if (__predict_false(bio == NULL)) {
2085			DPRINTF("Unable to allocate bio for BIO_FLUSH\n");
2086			error = ENOMEM;
2087			return (error);
2088		}
2089
2090		bio->bio_cmd	 = BIO_FLUSH;
2091		bio->bio_flags	|= BIO_ORDERED;
2092		bio->bio_dev	 = dev_data->cdev;
2093		bio->bio_offset	 = 0;
2094		bio->bio_data	 = 0;
2095		bio->bio_done	 = xbb_bio_done;
2096		bio->bio_caller1 = reqlist;
2097		bio->bio_pblkno	 = 0;
2098
2099		reqlist->pendcnt = 1;
2100
2101		SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
2102			   device_get_unit(xbb->dev));
2103
2104		(*dev_data->csw->d_strategy)(bio);
2105
2106		return (0);
2107	}
2108
2109	xbb_sg = xbb->xbb_sgs;
2110	bio    = NULL;
2111	nseg = reqlist->nr_segments;
2112
2113	for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
2114		/*
2115		 * KVA will not be contiguous, so any additional
2116		 * I/O will need to be represented in a new bio.
2117		 */
2118		if ((bio != NULL)
2119		 && (xbb_sg->first_sect != 0)) {
2120			if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2121				printf("%s: Discontiguous I/O request "
2122				       "from domain %d ends on "
2123				       "non-sector boundary\n",
2124				       __func__, xbb->otherend_id);
2125				error = EINVAL;
2126				goto fail_free_bios;
2127			}
2128			bio = NULL;
2129		}
2130
2131		if (bio == NULL) {
2132			/*
2133			 * Make sure that the start of this bio is
2134			 * aligned to a device sector.
2135			 */
2136			if ((bio_offset & (xbb->sector_size - 1)) != 0){
2137				printf("%s: Misaligned I/O request "
2138				       "from domain %d\n", __func__,
2139				       xbb->otherend_id);
2140				error = EINVAL;
2141				goto fail_free_bios;
2142			}
2143
2144			bio = bios[nbio++] = g_new_bio();
2145			if (__predict_false(bio == NULL)) {
2146				error = ENOMEM;
2147				goto fail_free_bios;
2148			}
2149			bio->bio_cmd     = operation;
2150			bio->bio_flags  |= bio_flags;
2151			bio->bio_dev     = dev_data->cdev;
2152			bio->bio_offset  = bio_offset;
2153			bio->bio_data    = xbb_reqlist_ioaddr(reqlist, seg_idx,
2154						xbb_sg->first_sect);
2155			bio->bio_done    = xbb_bio_done;
2156			bio->bio_caller1 = reqlist;
2157			bio->bio_pblkno  = bio_offset >> xbb->sector_size_shift;
2158		}
2159
2160		bio->bio_length += xbb_sg->nsect << 9;
2161		bio->bio_bcount  = bio->bio_length;
2162		bio_offset      += xbb_sg->nsect << 9;
2163
2164		if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) {
2165			if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2166				printf("%s: Discontiguous I/O request "
2167				       "from domain %d ends on "
2168				       "non-sector boundary\n",
2169				       __func__, xbb->otherend_id);
2170				error = EINVAL;
2171				goto fail_free_bios;
2172			}
2173			/*
2174			 * KVA will not be contiguous, so any additional
2175			 * I/O will need to be represented in a new bio.
2176			 */
2177			bio = NULL;
2178		}
2179	}
2180
2181	reqlist->pendcnt = nbio;
2182
2183	for (bio_idx = 0; bio_idx < nbio; bio_idx++)
2184	{
2185#ifdef XBB_USE_BOUNCE_BUFFERS
2186		vm_offset_t kva_offset;
2187
2188		kva_offset = (vm_offset_t)bios[bio_idx]->bio_data
2189			   - (vm_offset_t)reqlist->bounce;
2190		if (operation == BIO_WRITE) {
2191			memcpy(bios[bio_idx]->bio_data,
2192			       (uint8_t *)reqlist->kva + kva_offset,
2193			       bios[bio_idx]->bio_bcount);
2194		}
2195#endif
2196		if (operation == BIO_READ) {
2197			SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
2198				   device_get_unit(xbb->dev),
2199				   bios[bio_idx]->bio_offset,
2200				   bios[bio_idx]->bio_length);
2201		} else if (operation == BIO_WRITE) {
2202			SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
2203				   device_get_unit(xbb->dev),
2204				   bios[bio_idx]->bio_offset,
2205				   bios[bio_idx]->bio_length);
2206		}
2207		(*dev_data->csw->d_strategy)(bios[bio_idx]);
2208	}
2209
2210	return (error);
2211
2212fail_free_bios:
2213	for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++)
2214		g_destroy_bio(bios[bio_idx]);
2215
2216	return (error);
2217}
2218
2219SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2220SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2221		  "uint64_t");
2222SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2223		  "uint64_t", "uint64_t");
2224
2225/**
2226 * Backend handler for file access.
2227 *
2228 * \param xbb        Per-instance xbb configuration structure.
2229 * \param reqlist    Allocated internal request list.
2230 * \param operation  BIO_* I/O operation code.
2231 * \param flags      Additional bio_flag data to pass to any generated bios
2232 *                   (e.g. BIO_ORDERED)..
2233 *
2234 * \return  0 for success, errno codes for failure.
2235 */
2236static int
2237xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2238		  int operation, int flags)
2239{
2240	struct xbb_file_data *file_data;
2241	u_int                 seg_idx;
2242	u_int		      nseg;
2243	struct uio            xuio;
2244	struct xbb_sg        *xbb_sg;
2245	struct iovec         *xiovec;
2246#ifdef XBB_USE_BOUNCE_BUFFERS
2247	void                **p_vaddr;
2248	int                   saved_uio_iovcnt;
2249#endif /* XBB_USE_BOUNCE_BUFFERS */
2250	int                   error;
2251
2252	file_data = &xbb->backend.file;
2253	error = 0;
2254	bzero(&xuio, sizeof(xuio));
2255
2256	switch (operation) {
2257	case BIO_READ:
2258		xuio.uio_rw = UIO_READ;
2259		break;
2260	case BIO_WRITE:
2261		xuio.uio_rw = UIO_WRITE;
2262		break;
2263	case BIO_FLUSH: {
2264		struct mount *mountpoint;
2265
2266		SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
2267			   device_get_unit(xbb->dev));
2268
2269		(void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2270
2271		vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2272		error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
2273		VOP_UNLOCK(xbb->vn);
2274
2275		vn_finished_write(mountpoint);
2276
2277		goto bailout_send_response;
2278		/* NOTREACHED */
2279	}
2280	default:
2281		panic("invalid operation %d", operation);
2282		/* NOTREACHED */
2283	}
2284	xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
2285			<< xbb->sector_size_shift;
2286	xuio.uio_segflg = UIO_SYSSPACE;
2287	xuio.uio_iov = file_data->xiovecs;
2288	xuio.uio_iovcnt = 0;
2289	xbb_sg = xbb->xbb_sgs;
2290	nseg = reqlist->nr_segments;
2291
2292	for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
2293		/*
2294		 * If the first sector is not 0, the KVA will
2295		 * not be contiguous and we'll need to go on
2296		 * to another segment.
2297		 */
2298		if (xbb_sg->first_sect != 0)
2299			xiovec = NULL;
2300
2301		if (xiovec == NULL) {
2302			xiovec = &file_data->xiovecs[xuio.uio_iovcnt];
2303			xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
2304			    seg_idx, xbb_sg->first_sect);
2305#ifdef XBB_USE_BOUNCE_BUFFERS
2306			/*
2307			 * Store the address of the incoming
2308			 * buffer at this particular offset
2309			 * as well, so we can do the copy
2310			 * later without having to do more
2311			 * work to recalculate this address.
2312		 	 */
2313			p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt];
2314			*p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
2315			    xbb_sg->first_sect);
2316#endif /* XBB_USE_BOUNCE_BUFFERS */
2317			xiovec->iov_len = 0;
2318			xuio.uio_iovcnt++;
2319		}
2320
2321		xiovec->iov_len += xbb_sg->nsect << 9;
2322
2323		xuio.uio_resid += xbb_sg->nsect << 9;
2324
2325		/*
2326		 * If the last sector is not the full page
2327		 * size count, the next segment will not be
2328		 * contiguous in KVA and we need a new iovec.
2329		 */
2330		if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9)
2331			xiovec = NULL;
2332	}
2333
2334	xuio.uio_td = curthread;
2335
2336#ifdef XBB_USE_BOUNCE_BUFFERS
2337	saved_uio_iovcnt = xuio.uio_iovcnt;
2338
2339	if (operation == BIO_WRITE) {
2340		/* Copy the write data to the local buffer. */
2341		for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
2342		     xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt;
2343		     seg_idx++, xiovec++, p_vaddr++) {
2344			memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len);
2345		}
2346	} else {
2347		/*
2348		 * We only need to save off the iovecs in the case of a
2349		 * read, because the copy for the read happens after the
2350		 * VOP_READ().  (The uio will get modified in that call
2351		 * sequence.)
2352		 */
2353		memcpy(file_data->saved_xiovecs, xuio.uio_iov,
2354		       xuio.uio_iovcnt * sizeof(xuio.uio_iov[0]));
2355	}
2356#endif /* XBB_USE_BOUNCE_BUFFERS */
2357
2358	switch (operation) {
2359	case BIO_READ:
2360
2361		SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
2362			   device_get_unit(xbb->dev), xuio.uio_offset,
2363			   xuio.uio_resid);
2364
2365		vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2366
2367		/*
2368		 * UFS pays attention to IO_DIRECT for reads.  If the
2369		 * DIRECTIO option is configured into the kernel, it calls
2370		 * ffs_rawread().  But that only works for single-segment
2371		 * uios with user space addresses.  In our case, with a
2372		 * kernel uio, it still reads into the buffer cache, but it
2373		 * will just try to release the buffer from the cache later
2374		 * on in ffs_read().
2375		 *
2376		 * ZFS does not pay attention to IO_DIRECT for reads.
2377		 *
2378		 * UFS does not pay attention to IO_SYNC for reads.
2379		 *
2380		 * ZFS pays attention to IO_SYNC (which translates into the
2381		 * Solaris define FRSYNC for zfs_read()) for reads.  It
2382		 * attempts to sync the file before reading.
2383		 *
2384		 * So, to attempt to provide some barrier semantics in the
2385		 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
2386		 */
2387		error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2388				 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
2389
2390		VOP_UNLOCK(xbb->vn);
2391		break;
2392	case BIO_WRITE: {
2393		struct mount *mountpoint;
2394
2395		SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
2396			   device_get_unit(xbb->dev), xuio.uio_offset,
2397			   xuio.uio_resid);
2398
2399		(void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2400
2401		vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2402
2403		/*
2404		 * UFS pays attention to IO_DIRECT for writes.  The write
2405		 * is done asynchronously.  (Normally the write would just
2406		 * get put into cache.
2407		 *
2408		 * UFS pays attention to IO_SYNC for writes.  It will
2409		 * attempt to write the buffer out synchronously if that
2410		 * flag is set.
2411		 *
2412		 * ZFS does not pay attention to IO_DIRECT for writes.
2413		 *
2414		 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
2415		 * for writes.  It will flush the transaction from the
2416		 * cache before returning.
2417		 *
2418		 * So if we've got the BIO_ORDERED flag set, we want
2419		 * IO_SYNC in either the UFS or ZFS case.
2420		 */
2421		error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2422				  IO_SYNC : 0, file_data->cred);
2423		VOP_UNLOCK(xbb->vn);
2424
2425		vn_finished_write(mountpoint);
2426
2427		break;
2428	}
2429	default:
2430		panic("invalid operation %d", operation);
2431		/* NOTREACHED */
2432	}
2433
2434#ifdef XBB_USE_BOUNCE_BUFFERS
2435	/* We only need to copy here for read operations */
2436	if (operation == BIO_READ) {
2437		for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
2438		     xiovec = file_data->saved_xiovecs;
2439		     seg_idx < saved_uio_iovcnt; seg_idx++,
2440		     xiovec++, p_vaddr++) {
2441			/*
2442			 * Note that we have to use the copy of the
2443			 * io vector we made above.  uiomove() modifies
2444			 * the uio and its referenced vector as uiomove
2445			 * performs the copy, so we can't rely on any
2446			 * state from the original uio.
2447			 */
2448			memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len);
2449		}
2450	}
2451#endif /* XBB_USE_BOUNCE_BUFFERS */
2452
2453bailout_send_response:
2454
2455	if (error != 0)
2456		reqlist->status = BLKIF_RSP_ERROR;
2457
2458	xbb_complete_reqlist(xbb, reqlist);
2459
2460	return (0);
2461}
2462
2463/*--------------------------- Backend Configuration --------------------------*/
2464/**
2465 * Close and cleanup any backend device/file specific state for this
2466 * block back instance.
2467 *
2468 * \param xbb  Per-instance xbb configuration structure.
2469 */
2470static void
2471xbb_close_backend(struct xbb_softc *xbb)
2472{
2473	DROP_GIANT();
2474	DPRINTF("closing dev=%s\n", xbb->dev_name);
2475	if (xbb->vn) {
2476		int flags = FREAD;
2477
2478		if ((xbb->flags & XBBF_READ_ONLY) == 0)
2479			flags |= FWRITE;
2480
2481		switch (xbb->device_type) {
2482		case XBB_TYPE_DISK:
2483			if (xbb->backend.dev.csw) {
2484				dev_relthread(xbb->backend.dev.cdev,
2485					      xbb->backend.dev.dev_ref);
2486				xbb->backend.dev.csw  = NULL;
2487				xbb->backend.dev.cdev = NULL;
2488			}
2489			break;
2490		case XBB_TYPE_FILE:
2491			break;
2492		case XBB_TYPE_NONE:
2493		default:
2494			panic("Unexpected backend type.");
2495			break;
2496		}
2497
2498		(void)vn_close(xbb->vn, flags, NOCRED, curthread);
2499		xbb->vn = NULL;
2500
2501		switch (xbb->device_type) {
2502		case XBB_TYPE_DISK:
2503			break;
2504		case XBB_TYPE_FILE:
2505			if (xbb->backend.file.cred != NULL) {
2506				crfree(xbb->backend.file.cred);
2507				xbb->backend.file.cred = NULL;
2508			}
2509			break;
2510		case XBB_TYPE_NONE:
2511		default:
2512			panic("Unexpected backend type.");
2513			break;
2514		}
2515	}
2516	PICKUP_GIANT();
2517}
2518
2519/**
2520 * Open a character device to be used for backend I/O.
2521 *
2522 * \param xbb  Per-instance xbb configuration structure.
2523 *
2524 * \return  0 for success, errno codes for failure.
2525 */
2526static int
2527xbb_open_dev(struct xbb_softc *xbb)
2528{
2529	struct vattr   vattr;
2530	struct cdev   *dev;
2531	struct cdevsw *devsw;
2532	int	       error;
2533
2534	xbb->device_type = XBB_TYPE_DISK;
2535	xbb->dispatch_io = xbb_dispatch_dev;
2536	xbb->backend.dev.cdev = xbb->vn->v_rdev;
2537	xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
2538					     &xbb->backend.dev.dev_ref);
2539	if (xbb->backend.dev.csw == NULL)
2540		panic("Unable to retrieve device switch");
2541
2542	error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
2543	if (error) {
2544		xenbus_dev_fatal(xbb->dev, error, "error getting "
2545				 "vnode attributes for device %s",
2546				 xbb->dev_name);
2547		return (error);
2548	}
2549
2550	dev = xbb->vn->v_rdev;
2551	devsw = dev->si_devsw;
2552	if (!devsw->d_ioctl) {
2553		xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
2554				 "device %s!", xbb->dev_name);
2555		return (ENODEV);
2556	}
2557
2558	error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
2559			       (caddr_t)&xbb->sector_size, FREAD,
2560			       curthread);
2561	if (error) {
2562		xenbus_dev_fatal(xbb->dev, error,
2563				 "error calling ioctl DIOCGSECTORSIZE "
2564				 "for device %s", xbb->dev_name);
2565		return (error);
2566	}
2567
2568	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
2569			       (caddr_t)&xbb->media_size, FREAD,
2570			       curthread);
2571	if (error) {
2572		xenbus_dev_fatal(xbb->dev, error,
2573				 "error calling ioctl DIOCGMEDIASIZE "
2574				 "for device %s", xbb->dev_name);
2575		return (error);
2576	}
2577
2578	return (0);
2579}
2580
2581/**
2582 * Open a file to be used for backend I/O.
2583 *
2584 * \param xbb  Per-instance xbb configuration structure.
2585 *
2586 * \return  0 for success, errno codes for failure.
2587 */
2588static int
2589xbb_open_file(struct xbb_softc *xbb)
2590{
2591	struct xbb_file_data *file_data;
2592	struct vattr          vattr;
2593	int                   error;
2594
2595	file_data = &xbb->backend.file;
2596	xbb->device_type = XBB_TYPE_FILE;
2597	xbb->dispatch_io = xbb_dispatch_file;
2598	error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
2599	if (error != 0) {
2600		xenbus_dev_fatal(xbb->dev, error,
2601				 "error calling VOP_GETATTR()"
2602				 "for file %s", xbb->dev_name);
2603		return (error);
2604	}
2605
2606	/*
2607	 * Verify that we have the ability to upgrade to exclusive
2608	 * access on this file so we can trap errors at open instead
2609	 * of reporting them during first access.
2610	 */
2611	if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
2612		vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
2613		if (VN_IS_DOOMED(xbb->vn)) {
2614			error = EBADF;
2615			xenbus_dev_fatal(xbb->dev, error,
2616					 "error locking file %s",
2617					 xbb->dev_name);
2618
2619			return (error);
2620		}
2621	}
2622
2623	file_data->cred = crhold(curthread->td_ucred);
2624	xbb->media_size = vattr.va_size;
2625
2626	/*
2627	 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
2628	 * With ZFS, it is 131072 bytes.  Block sizes that large don't work
2629	 * with disklabel and UFS on FreeBSD at least.  Large block sizes
2630	 * may not work with other OSes as well.  So just export a sector
2631	 * size of 512 bytes, which should work with any OS or
2632	 * application.  Since our backing is a file, any block size will
2633	 * work fine for the backing store.
2634	 */
2635#if 0
2636	xbb->sector_size = vattr.va_blocksize;
2637#endif
2638	xbb->sector_size = 512;
2639
2640	/*
2641	 * Sanity check.  The media size has to be at least one
2642	 * sector long.
2643	 */
2644	if (xbb->media_size < xbb->sector_size) {
2645		error = EINVAL;
2646		xenbus_dev_fatal(xbb->dev, error,
2647				 "file %s size %ju < block size %u",
2648				 xbb->dev_name,
2649				 (uintmax_t)xbb->media_size,
2650				 xbb->sector_size);
2651	}
2652	return (error);
2653}
2654
2655/**
2656 * Open the backend provider for this connection.
2657 *
2658 * \param xbb  Per-instance xbb configuration structure.
2659 *
2660 * \return  0 for success, errno codes for failure.
2661 */
2662static int
2663xbb_open_backend(struct xbb_softc *xbb)
2664{
2665	struct nameidata nd;
2666	int		 flags;
2667	int		 error;
2668
2669	flags = FREAD;
2670	error = 0;
2671
2672	DPRINTF("opening dev=%s\n", xbb->dev_name);
2673
2674	if (rootvnode == NULL) {
2675		xenbus_dev_fatal(xbb->dev, ENOENT,
2676				 "Root file system not mounted");
2677		return (ENOENT);
2678	}
2679
2680	if ((xbb->flags & XBBF_READ_ONLY) == 0)
2681		flags |= FWRITE;
2682
2683	pwd_ensure_dirs();
2684
2685 again:
2686	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread);
2687	error = vn_open(&nd, &flags, 0, NULL);
2688	if (error) {
2689		/*
2690		 * This is the only reasonable guess we can make as far as
2691		 * path if the user doesn't give us a fully qualified path.
2692		 * If they want to specify a file, they need to specify the
2693		 * full path.
2694		 */
2695		if (xbb->dev_name[0] != '/') {
2696			char *dev_path = "/dev/";
2697			char *dev_name;
2698
2699			/* Try adding device path at beginning of name */
2700			dev_name = malloc(strlen(xbb->dev_name)
2701					+ strlen(dev_path) + 1,
2702					  M_XENBLOCKBACK, M_NOWAIT);
2703			if (dev_name) {
2704				sprintf(dev_name, "%s%s", dev_path,
2705					xbb->dev_name);
2706				free(xbb->dev_name, M_XENBLOCKBACK);
2707				xbb->dev_name = dev_name;
2708				goto again;
2709			}
2710		}
2711		xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
2712				 xbb->dev_name);
2713		return (error);
2714	}
2715
2716	NDFREE(&nd, NDF_ONLY_PNBUF);
2717
2718	xbb->vn = nd.ni_vp;
2719
2720	/* We only support disks and files. */
2721	if (vn_isdisk_error(xbb->vn, &error)) {
2722		error = xbb_open_dev(xbb);
2723	} else if (xbb->vn->v_type == VREG) {
2724		error = xbb_open_file(xbb);
2725	} else {
2726		error = EINVAL;
2727		xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
2728				 "or file", xbb->dev_name);
2729	}
2730	VOP_UNLOCK(xbb->vn);
2731
2732	if (error != 0) {
2733		xbb_close_backend(xbb);
2734		return (error);
2735	}
2736
2737	xbb->sector_size_shift = fls(xbb->sector_size) - 1;
2738	xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
2739
2740	DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n",
2741		(xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
2742		xbb->dev_name, xbb->sector_size, xbb->media_size);
2743
2744	return (0);
2745}
2746
2747/*------------------------ Inter-Domain Communication ------------------------*/
2748/**
2749 * Free dynamically allocated KVA or pseudo-physical address allocations.
2750 *
2751 * \param xbb  Per-instance xbb configuration structure.
2752 */
2753static void
2754xbb_free_communication_mem(struct xbb_softc *xbb)
2755{
2756	if (xbb->kva != 0) {
2757		if (xbb->pseudo_phys_res != NULL) {
2758			xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
2759			    xbb->pseudo_phys_res);
2760			xbb->pseudo_phys_res = NULL;
2761		}
2762	}
2763	xbb->kva = 0;
2764	xbb->gnt_base_addr = 0;
2765	if (xbb->kva_free != NULL) {
2766		free(xbb->kva_free, M_XENBLOCKBACK);
2767		xbb->kva_free = NULL;
2768	}
2769}
2770
2771/**
2772 * Cleanup all inter-domain communication mechanisms.
2773 *
2774 * \param xbb  Per-instance xbb configuration structure.
2775 */
2776static int
2777xbb_disconnect(struct xbb_softc *xbb)
2778{
2779	struct gnttab_unmap_grant_ref  ops[XBB_MAX_RING_PAGES];
2780	struct gnttab_unmap_grant_ref *op;
2781	u_int			       ring_idx;
2782	int			       error;
2783
2784	DPRINTF("\n");
2785
2786	if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
2787		return (0);
2788
2789	mtx_unlock(&xbb->lock);
2790	xen_intr_unbind(&xbb->xen_intr_handle);
2791	taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
2792	mtx_lock(&xbb->lock);
2793
2794	/*
2795	 * No new interrupts can generate work, but we must wait
2796	 * for all currently active requests to drain.
2797	 */
2798	if (xbb->active_request_count != 0)
2799		return (EAGAIN);
2800
2801	for (ring_idx = 0, op = ops;
2802	     ring_idx < xbb->ring_config.ring_pages;
2803	     ring_idx++, op++) {
2804		op->host_addr    = xbb->ring_config.gnt_addr
2805			         + (ring_idx * PAGE_SIZE);
2806		op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
2807		op->handle	 = xbb->ring_config.handle[ring_idx];
2808	}
2809
2810	error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops,
2811					  xbb->ring_config.ring_pages);
2812	if (error != 0)
2813		panic("Grant table op failed (%d)", error);
2814
2815	xbb_free_communication_mem(xbb);
2816
2817	if (xbb->requests != NULL) {
2818		free(xbb->requests, M_XENBLOCKBACK);
2819		xbb->requests = NULL;
2820	}
2821
2822	if (xbb->request_lists != NULL) {
2823		struct xbb_xen_reqlist *reqlist;
2824		int i;
2825
2826		/* There is one request list for ever allocated request. */
2827		for (i = 0, reqlist = xbb->request_lists;
2828		     i < xbb->max_requests; i++, reqlist++){
2829#ifdef XBB_USE_BOUNCE_BUFFERS
2830			if (reqlist->bounce != NULL) {
2831				free(reqlist->bounce, M_XENBLOCKBACK);
2832				reqlist->bounce = NULL;
2833			}
2834#endif
2835			if (reqlist->gnt_handles != NULL) {
2836				free(reqlist->gnt_handles, M_XENBLOCKBACK);
2837				reqlist->gnt_handles = NULL;
2838			}
2839		}
2840		free(xbb->request_lists, M_XENBLOCKBACK);
2841		xbb->request_lists = NULL;
2842	}
2843
2844	xbb->flags &= ~XBBF_RING_CONNECTED;
2845	return (0);
2846}
2847
2848/**
2849 * Map shared memory ring into domain local address space, initialize
2850 * ring control structures, and bind an interrupt to the event channel
2851 * used to notify us of ring changes.
2852 *
2853 * \param xbb  Per-instance xbb configuration structure.
2854 */
2855static int
2856xbb_connect_ring(struct xbb_softc *xbb)
2857{
2858	struct gnttab_map_grant_ref  gnts[XBB_MAX_RING_PAGES];
2859	struct gnttab_map_grant_ref *gnt;
2860	u_int			     ring_idx;
2861	int			     error;
2862
2863	if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
2864		return (0);
2865
2866	/*
2867	 * Kva for our ring is at the tail of the region of kva allocated
2868	 * by xbb_alloc_communication_mem().
2869	 */
2870	xbb->ring_config.va = xbb->kva
2871			    + (xbb->kva_size
2872			     - (xbb->ring_config.ring_pages * PAGE_SIZE));
2873	xbb->ring_config.gnt_addr = xbb->gnt_base_addr
2874				  + (xbb->kva_size
2875				   - (xbb->ring_config.ring_pages * PAGE_SIZE));
2876
2877	for (ring_idx = 0, gnt = gnts;
2878	     ring_idx < xbb->ring_config.ring_pages;
2879	     ring_idx++, gnt++) {
2880		gnt->host_addr = xbb->ring_config.gnt_addr
2881			       + (ring_idx * PAGE_SIZE);
2882		gnt->flags     = GNTMAP_host_map;
2883		gnt->ref       = xbb->ring_config.ring_ref[ring_idx];
2884		gnt->dom       = xbb->otherend_id;
2885	}
2886
2887	error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts,
2888					  xbb->ring_config.ring_pages);
2889	if (error)
2890		panic("blkback: Ring page grant table op failed (%d)", error);
2891
2892	for (ring_idx = 0, gnt = gnts;
2893	     ring_idx < xbb->ring_config.ring_pages;
2894	     ring_idx++, gnt++) {
2895		if (gnt->status != 0) {
2896			struct gnttab_unmap_grant_ref unmap[XBB_MAX_RING_PAGES];
2897			unsigned int i, j;
2898
2899			xbb->ring_config.va = 0;
2900			xenbus_dev_fatal(xbb->dev, EACCES,
2901					 "Ring shared page mapping failed. "
2902					 "Status %d.", gnt->status);
2903
2904			/* Unmap everything to avoid leaking grant table maps */
2905			for (i = 0, j = 0; i < xbb->ring_config.ring_pages;
2906			    i++) {
2907				if (gnts[i].status != GNTST_okay)
2908					continue;
2909
2910				unmap[j].host_addr = gnts[i].host_addr;
2911				unmap[j].dev_bus_addr = gnts[i].dev_bus_addr;
2912				unmap[j++].handle = gnts[i].handle;
2913			}
2914			if (j != 0) {
2915				error = HYPERVISOR_grant_table_op(
2916				    GNTTABOP_unmap_grant_ref, unmap, j);
2917				if (error != 0)
2918					panic("Unable to unmap grants (%d)",
2919					    error);
2920			}
2921			return (EACCES);
2922		}
2923		xbb->ring_config.handle[ring_idx]   = gnt->handle;
2924		xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
2925	}
2926
2927	/* Initialize the ring based on ABI. */
2928	switch (xbb->abi) {
2929	case BLKIF_PROTOCOL_NATIVE:
2930	{
2931		blkif_sring_t *sring;
2932		sring = (blkif_sring_t *)xbb->ring_config.va;
2933		BACK_RING_INIT(&xbb->rings.native, sring,
2934			       xbb->ring_config.ring_pages * PAGE_SIZE);
2935		break;
2936	}
2937	case BLKIF_PROTOCOL_X86_32:
2938	{
2939		blkif_x86_32_sring_t *sring_x86_32;
2940		sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
2941		BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
2942			       xbb->ring_config.ring_pages * PAGE_SIZE);
2943		break;
2944	}
2945	case BLKIF_PROTOCOL_X86_64:
2946	{
2947		blkif_x86_64_sring_t *sring_x86_64;
2948		sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
2949		BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
2950			       xbb->ring_config.ring_pages * PAGE_SIZE);
2951		break;
2952	}
2953	default:
2954		panic("Unexpected blkif protocol ABI.");
2955	}
2956
2957	xbb->flags |= XBBF_RING_CONNECTED;
2958
2959	error = xen_intr_bind_remote_port(xbb->dev,
2960					  xbb->otherend_id,
2961					  xbb->ring_config.evtchn,
2962					  xbb_filter,
2963					  /*ithread_handler*/NULL,
2964					  /*arg*/xbb,
2965					  INTR_TYPE_BIO | INTR_MPSAFE,
2966					  &xbb->xen_intr_handle);
2967	if (error) {
2968		(void)xbb_disconnect(xbb);
2969		xenbus_dev_fatal(xbb->dev, error, "binding event channel");
2970		return (error);
2971	}
2972
2973	DPRINTF("rings connected!\n");
2974
2975	return 0;
2976}
2977
2978/**
2979 * Size KVA and pseudo-physical address allocations based on negotiated
2980 * values for the size and number of I/O requests, and the size of our
2981 * communication ring.
2982 *
2983 * \param xbb  Per-instance xbb configuration structure.
2984 *
2985 * These address spaces are used to dynamically map pages in the
2986 * front-end's domain into our own.
2987 */
2988static int
2989xbb_alloc_communication_mem(struct xbb_softc *xbb)
2990{
2991	xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
2992	xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
2993	xbb->kva_size = xbb->reqlist_kva_size +
2994			(xbb->ring_config.ring_pages * PAGE_SIZE);
2995
2996	xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT);
2997	if (xbb->kva_free == NULL)
2998		return (ENOMEM);
2999
3000	DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n",
3001		device_get_nameunit(xbb->dev), xbb->kva_size,
3002		xbb->reqlist_kva_size);
3003	/*
3004	 * Reserve a range of pseudo physical memory that we can map
3005	 * into kva.  These pages will only be backed by machine
3006	 * pages ("real memory") during the lifetime of front-end requests
3007	 * via grant table operations.
3008	 */
3009	xbb->pseudo_phys_res_id = 0;
3010	xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
3011	    xbb->kva_size);
3012	if (xbb->pseudo_phys_res == NULL) {
3013		xbb->kva = 0;
3014		return (ENOMEM);
3015	}
3016	xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
3017	xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
3018
3019	DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
3020		device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
3021		(uintmax_t)xbb->gnt_base_addr);
3022	return (0);
3023}
3024
3025/**
3026 * Collect front-end information from the XenStore.
3027 *
3028 * \param xbb  Per-instance xbb configuration structure.
3029 */
3030static int
3031xbb_collect_frontend_info(struct xbb_softc *xbb)
3032{
3033	char	    protocol_abi[64];
3034	const char *otherend_path;
3035	int	    error;
3036	u_int	    ring_idx;
3037	u_int	    ring_page_order;
3038	size_t	    ring_size;
3039
3040	otherend_path = xenbus_get_otherend_path(xbb->dev);
3041
3042	/*
3043	 * Protocol defaults valid even if all negotiation fails.
3044	 */
3045	xbb->ring_config.ring_pages = 1;
3046	xbb->max_request_segments   = BLKIF_MAX_SEGMENTS_PER_REQUEST;
3047	xbb->max_request_size	    = xbb->max_request_segments * PAGE_SIZE;
3048
3049	/*
3050	 * Mandatory data (used in all versions of the protocol) first.
3051	 */
3052	error = xs_scanf(XST_NIL, otherend_path,
3053			 "event-channel", NULL, "%" PRIu32,
3054			 &xbb->ring_config.evtchn);
3055	if (error != 0) {
3056		xenbus_dev_fatal(xbb->dev, error,
3057				 "Unable to retrieve event-channel information "
3058				 "from frontend %s.  Unable to connect.",
3059				 xenbus_get_otherend_path(xbb->dev));
3060		return (error);
3061	}
3062
3063	/*
3064	 * These fields are initialized to legacy protocol defaults
3065	 * so we only need to fail if reading the updated value succeeds
3066	 * and the new value is outside of its allowed range.
3067	 *
3068	 * \note xs_gather() returns on the first encountered error, so
3069	 *       we must use independent calls in order to guarantee
3070	 *       we don't miss information in a sparsly populated front-end
3071	 *       tree.
3072	 *
3073	 * \note xs_scanf() does not update variables for unmatched
3074	 *       fields.
3075	 */
3076	ring_page_order = 0;
3077	xbb->max_requests = 32;
3078
3079	(void)xs_scanf(XST_NIL, otherend_path,
3080		       "ring-page-order", NULL, "%u",
3081		       &ring_page_order);
3082	xbb->ring_config.ring_pages = 1 << ring_page_order;
3083	ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
3084	xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
3085
3086	if (xbb->ring_config.ring_pages	> XBB_MAX_RING_PAGES) {
3087		xenbus_dev_fatal(xbb->dev, EINVAL,
3088				 "Front-end specified ring-pages of %u "
3089				 "exceeds backend limit of %u.  "
3090				 "Unable to connect.",
3091				 xbb->ring_config.ring_pages,
3092				 XBB_MAX_RING_PAGES);
3093		return (EINVAL);
3094	}
3095
3096	if (xbb->ring_config.ring_pages	== 1) {
3097		error = xs_gather(XST_NIL, otherend_path,
3098				  "ring-ref", "%" PRIu32,
3099				  &xbb->ring_config.ring_ref[0],
3100				  NULL);
3101		if (error != 0) {
3102			xenbus_dev_fatal(xbb->dev, error,
3103					 "Unable to retrieve ring information "
3104					 "from frontend %s.  Unable to "
3105					 "connect.",
3106					 xenbus_get_otherend_path(xbb->dev));
3107			return (error);
3108		}
3109	} else {
3110		/* Multi-page ring format. */
3111		for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
3112		     ring_idx++) {
3113			char ring_ref_name[]= "ring_refXX";
3114
3115			snprintf(ring_ref_name, sizeof(ring_ref_name),
3116				 "ring-ref%u", ring_idx);
3117			error = xs_scanf(XST_NIL, otherend_path,
3118					 ring_ref_name, NULL, "%" PRIu32,
3119					 &xbb->ring_config.ring_ref[ring_idx]);
3120			if (error != 0) {
3121				xenbus_dev_fatal(xbb->dev, error,
3122						 "Failed to retriev grant "
3123						 "reference for page %u of "
3124						 "shared ring.  Unable "
3125						 "to connect.", ring_idx);
3126				return (error);
3127			}
3128		}
3129	}
3130
3131	error = xs_gather(XST_NIL, otherend_path,
3132			  "protocol", "%63s", protocol_abi,
3133			  NULL);
3134	if (error != 0
3135	 || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) {
3136		/*
3137		 * Assume native if the frontend has not
3138		 * published ABI data or it has published and
3139		 * matches our own ABI.
3140		 */
3141		xbb->abi = BLKIF_PROTOCOL_NATIVE;
3142	} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) {
3143		xbb->abi = BLKIF_PROTOCOL_X86_32;
3144	} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) {
3145		xbb->abi = BLKIF_PROTOCOL_X86_64;
3146	} else {
3147		xenbus_dev_fatal(xbb->dev, EINVAL,
3148				 "Unknown protocol ABI (%s) published by "
3149				 "frontend.  Unable to connect.", protocol_abi);
3150		return (EINVAL);
3151	}
3152	return (0);
3153}
3154
3155/**
3156 * Allocate per-request data structures given request size and number
3157 * information negotiated with the front-end.
3158 *
3159 * \param xbb  Per-instance xbb configuration structure.
3160 */
3161static int
3162xbb_alloc_requests(struct xbb_softc *xbb)
3163{
3164	struct xbb_xen_req *req;
3165	struct xbb_xen_req *last_req;
3166
3167	/*
3168	 * Allocate request book keeping datastructures.
3169	 */
3170	xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
3171			       M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3172	if (xbb->requests == NULL) {
3173		xenbus_dev_fatal(xbb->dev, ENOMEM,
3174				  "Unable to allocate request structures");
3175		return (ENOMEM);
3176	}
3177
3178	req      = xbb->requests;
3179	last_req = &xbb->requests[xbb->max_requests - 1];
3180	STAILQ_INIT(&xbb->request_free_stailq);
3181	while (req <= last_req) {
3182		STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
3183		req++;
3184	}
3185	return (0);
3186}
3187
3188static int
3189xbb_alloc_request_lists(struct xbb_softc *xbb)
3190{
3191	struct xbb_xen_reqlist *reqlist;
3192	int			i;
3193
3194	/*
3195	 * If no requests can be merged, we need 1 request list per
3196	 * in flight request.
3197	 */
3198	xbb->request_lists = malloc(xbb->max_requests *
3199		sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3200	if (xbb->request_lists == NULL) {
3201		xenbus_dev_fatal(xbb->dev, ENOMEM,
3202				  "Unable to allocate request list structures");
3203		return (ENOMEM);
3204	}
3205
3206	STAILQ_INIT(&xbb->reqlist_free_stailq);
3207	STAILQ_INIT(&xbb->reqlist_pending_stailq);
3208	for (i = 0; i < xbb->max_requests; i++) {
3209		int seg;
3210
3211		reqlist      = &xbb->request_lists[i];
3212
3213		reqlist->xbb = xbb;
3214
3215#ifdef XBB_USE_BOUNCE_BUFFERS
3216		reqlist->bounce = malloc(xbb->max_reqlist_size,
3217					 M_XENBLOCKBACK, M_NOWAIT);
3218		if (reqlist->bounce == NULL) {
3219			xenbus_dev_fatal(xbb->dev, ENOMEM,
3220					 "Unable to allocate request "
3221					 "bounce buffers");
3222			return (ENOMEM);
3223		}
3224#endif /* XBB_USE_BOUNCE_BUFFERS */
3225
3226		reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3227					      sizeof(*reqlist->gnt_handles),
3228					      M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3229		if (reqlist->gnt_handles == NULL) {
3230			xenbus_dev_fatal(xbb->dev, ENOMEM,
3231					  "Unable to allocate request "
3232					  "grant references");
3233			return (ENOMEM);
3234		}
3235
3236		for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
3237			reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
3238
3239		STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3240	}
3241	return (0);
3242}
3243
3244/**
3245 * Supply information about the physical device to the frontend
3246 * via XenBus.
3247 *
3248 * \param xbb  Per-instance xbb configuration structure.
3249 */
3250static int
3251xbb_publish_backend_info(struct xbb_softc *xbb)
3252{
3253	struct xs_transaction xst;
3254	const char	     *our_path;
3255	const char	     *leaf;
3256	int		      error;
3257
3258	our_path = xenbus_get_node(xbb->dev);
3259	while (1) {
3260		error = xs_transaction_start(&xst);
3261		if (error != 0) {
3262			xenbus_dev_fatal(xbb->dev, error,
3263					 "Error publishing backend info "
3264					 "(start transaction)");
3265			return (error);
3266		}
3267
3268		leaf = "sectors";
3269		error = xs_printf(xst, our_path, leaf,
3270				  "%"PRIu64, xbb->media_num_sectors);
3271		if (error != 0)
3272			break;
3273
3274		/* XXX Support all VBD attributes here. */
3275		leaf = "info";
3276		error = xs_printf(xst, our_path, leaf, "%u",
3277				  xbb->flags & XBBF_READ_ONLY
3278				? VDISK_READONLY : 0);
3279		if (error != 0)
3280			break;
3281
3282		leaf = "sector-size";
3283		error = xs_printf(xst, our_path, leaf, "%u",
3284				  xbb->sector_size);
3285		if (error != 0)
3286			break;
3287
3288		error = xs_transaction_end(xst, 0);
3289		if (error == 0) {
3290			return (0);
3291		} else if (error != EAGAIN) {
3292			xenbus_dev_fatal(xbb->dev, error, "ending transaction");
3293			return (error);
3294		}
3295	}
3296
3297	xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
3298			our_path, leaf);
3299	xs_transaction_end(xst, 1);
3300	return (error);
3301}
3302
3303/**
3304 * Connect to our blkfront peer now that it has completed publishing
3305 * its configuration into the XenStore.
3306 *
3307 * \param xbb  Per-instance xbb configuration structure.
3308 */
3309static void
3310xbb_connect(struct xbb_softc *xbb)
3311{
3312	int error;
3313
3314	if (!xbb->hotplug_done ||
3315	    (xenbus_get_state(xbb->dev) != XenbusStateInitWait) ||
3316	    (xbb_collect_frontend_info(xbb) != 0))
3317		return;
3318
3319	xbb->flags &= ~XBBF_SHUTDOWN;
3320
3321	/*
3322	 * We limit the maximum number of reqlist segments to the maximum
3323	 * number of segments in the ring, or our absolute maximum,
3324	 * whichever is smaller.
3325	 */
3326	xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
3327		xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
3328
3329	/*
3330	 * The maximum size is simply a function of the number of segments
3331	 * we can handle.
3332	 */
3333	xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
3334
3335	/* Allocate resources whose size depends on front-end configuration. */
3336	error = xbb_alloc_communication_mem(xbb);
3337	if (error != 0) {
3338		xenbus_dev_fatal(xbb->dev, error,
3339				 "Unable to allocate communication memory");
3340		return;
3341	}
3342
3343	error = xbb_alloc_requests(xbb);
3344	if (error != 0) {
3345		/* Specific errors are reported by xbb_alloc_requests(). */
3346		return;
3347	}
3348
3349	error = xbb_alloc_request_lists(xbb);
3350	if (error != 0) {
3351		/* Specific errors are reported by xbb_alloc_request_lists(). */
3352		return;
3353	}
3354
3355	/*
3356	 * Connect communication channel.
3357	 */
3358	error = xbb_connect_ring(xbb);
3359	if (error != 0) {
3360		/* Specific errors are reported by xbb_connect_ring(). */
3361		return;
3362	}
3363
3364	if (xbb_publish_backend_info(xbb) != 0) {
3365		/*
3366		 * If we can't publish our data, we cannot participate
3367		 * in this connection, and waiting for a front-end state
3368		 * change will not help the situation.
3369		 */
3370		(void)xbb_disconnect(xbb);
3371		return;
3372	}
3373
3374	/* Ready for I/O. */
3375	xenbus_set_state(xbb->dev, XenbusStateConnected);
3376}
3377
3378/*-------------------------- Device Teardown Support -------------------------*/
3379/**
3380 * Perform device shutdown functions.
3381 *
3382 * \param xbb  Per-instance xbb configuration structure.
3383 *
3384 * Mark this instance as shutting down, wait for any active I/O on the
3385 * backend device/file to drain, disconnect from the front-end, and notify
3386 * any waiters (e.g. a thread invoking our detach method) that detach can
3387 * now proceed.
3388 */
3389static int
3390xbb_shutdown(struct xbb_softc *xbb)
3391{
3392	XenbusState frontState;
3393	int	    error;
3394
3395	DPRINTF("\n");
3396
3397	/*
3398	 * Due to the need to drop our mutex during some
3399	 * xenbus operations, it is possible for two threads
3400	 * to attempt to close out shutdown processing at
3401	 * the same time.  Tell the caller that hits this
3402	 * race to try back later.
3403	 */
3404	if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
3405		return (EAGAIN);
3406
3407	xbb->flags |= XBBF_IN_SHUTDOWN;
3408	mtx_unlock(&xbb->lock);
3409
3410	if (xbb->hotplug_watch.node != NULL) {
3411		xs_unregister_watch(&xbb->hotplug_watch);
3412		free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3413		xbb->hotplug_watch.node = NULL;
3414	}
3415	xbb->hotplug_done = false;
3416
3417	if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
3418		xenbus_set_state(xbb->dev, XenbusStateClosing);
3419
3420	frontState = xenbus_get_otherend_state(xbb->dev);
3421	mtx_lock(&xbb->lock);
3422	xbb->flags &= ~XBBF_IN_SHUTDOWN;
3423
3424	/* Wait for the frontend to disconnect (if it's connected). */
3425	if (frontState == XenbusStateConnected)
3426		return (EAGAIN);
3427
3428	DPRINTF("\n");
3429
3430	/* Indicate shutdown is in progress. */
3431	xbb->flags |= XBBF_SHUTDOWN;
3432
3433	/* Disconnect from the front-end. */
3434	error = xbb_disconnect(xbb);
3435	if (error != 0) {
3436		/*
3437		 * Requests still outstanding.  We'll be called again
3438		 * once they complete.
3439		 */
3440		KASSERT(error == EAGAIN,
3441			("%s: Unexpected xbb_disconnect() failure %d",
3442			 __func__, error));
3443
3444		return (error);
3445	}
3446
3447	DPRINTF("\n");
3448
3449	/* Indicate to xbb_detach() that is it safe to proceed. */
3450	wakeup(xbb);
3451
3452	return (0);
3453}
3454
3455/**
3456 * Report an attach time error to the console and Xen, and cleanup
3457 * this instance by forcing immediate detach processing.
3458 *
3459 * \param xbb  Per-instance xbb configuration structure.
3460 * \param err  Errno describing the error.
3461 * \param fmt  Printf style format and arguments
3462 */
3463static void
3464xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
3465{
3466	va_list ap;
3467	va_list ap_hotplug;
3468
3469	va_start(ap, fmt);
3470	va_copy(ap_hotplug, ap);
3471	xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
3472		  "hotplug-error", fmt, ap_hotplug);
3473	va_end(ap_hotplug);
3474	xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3475		  "hotplug-status", "error");
3476
3477	xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
3478	va_end(ap);
3479
3480	xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3481		  "online", "0");
3482	mtx_lock(&xbb->lock);
3483	xbb_shutdown(xbb);
3484	mtx_unlock(&xbb->lock);
3485}
3486
3487/*---------------------------- NewBus Entrypoints ----------------------------*/
3488/**
3489 * Inspect a XenBus device and claim it if is of the appropriate type.
3490 *
3491 * \param dev  NewBus device object representing a candidate XenBus device.
3492 *
3493 * \return  0 for success, errno codes for failure.
3494 */
3495static int
3496xbb_probe(device_t dev)
3497{
3498
3499        if (!strcmp(xenbus_get_type(dev), "vbd")) {
3500                device_set_desc(dev, "Backend Virtual Block Device");
3501                device_quiet(dev);
3502                return (0);
3503        }
3504
3505        return (ENXIO);
3506}
3507
3508/**
3509 * Setup sysctl variables to control various Block Back parameters.
3510 *
3511 * \param xbb  Xen Block Back softc.
3512 *
3513 */
3514static void
3515xbb_setup_sysctl(struct xbb_softc *xbb)
3516{
3517	struct sysctl_ctx_list *sysctl_ctx = NULL;
3518	struct sysctl_oid      *sysctl_tree = NULL;
3519
3520	sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
3521	if (sysctl_ctx == NULL)
3522		return;
3523
3524	sysctl_tree = device_get_sysctl_tree(xbb->dev);
3525	if (sysctl_tree == NULL)
3526		return;
3527
3528	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3529		       "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
3530		       "fake the flush command");
3531
3532	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3533		       "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
3534		       "send a real flush for N flush requests");
3535
3536	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3537		       "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
3538		       "Don't coalesce contiguous requests");
3539
3540	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3541			 "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
3542			 "how many I/O requests we have received");
3543
3544	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3545			 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
3546			 "how many I/O requests have been completed");
3547
3548	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3549			 "reqs_queued_for_completion", CTLFLAG_RW,
3550			 &xbb->reqs_queued_for_completion,
3551			 "how many I/O requests queued but not yet pushed");
3552
3553	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3554			 "reqs_completed_with_error", CTLFLAG_RW,
3555			 &xbb->reqs_completed_with_error,
3556			 "how many I/O requests completed with error status");
3557
3558	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3559			 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
3560			 "how many I/O dispatches were forced");
3561
3562	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3563			 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
3564			 "how many I/O dispatches were normal");
3565
3566	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3567			 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
3568			 "total number of I/O dispatches");
3569
3570	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3571			 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
3572			 "how many times we have run out of KVA");
3573
3574	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3575			 "request_shortages", CTLFLAG_RW,
3576			 &xbb->request_shortages,
3577			 "how many times we have run out of requests");
3578
3579	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3580		        "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
3581		        "maximum outstanding requests (negotiated)");
3582
3583	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3584		        "max_request_segments", CTLFLAG_RD,
3585		        &xbb->max_request_segments, 0,
3586		        "maximum number of pages per requests (negotiated)");
3587
3588	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3589		        "max_request_size", CTLFLAG_RD,
3590		        &xbb->max_request_size, 0,
3591		        "maximum size in bytes of a request (negotiated)");
3592
3593	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3594		        "ring_pages", CTLFLAG_RD,
3595		        &xbb->ring_config.ring_pages, 0,
3596		        "communication channel pages (negotiated)");
3597}
3598
3599static void
3600xbb_attach_disk(struct xs_watch *watch, const char **vec, unsigned int len)
3601{
3602	device_t		 dev;
3603	struct xbb_softc	*xbb;
3604	int			 error;
3605
3606	dev = (device_t) watch->callback_data;
3607	xbb = device_get_softc(dev);
3608
3609	error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path",
3610	    NULL, &xbb->dev_name, NULL);
3611	if (error != 0)
3612		return;
3613
3614	xs_unregister_watch(watch);
3615	free(watch->node, M_XENBLOCKBACK);
3616	watch->node = NULL;
3617
3618	/* Collect physical device information. */
3619	error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev),
3620			  "device-type", NULL, &xbb->dev_type,
3621			  NULL);
3622	if (error != 0)
3623		xbb->dev_type = NULL;
3624
3625	error = xs_gather(XST_NIL, xenbus_get_node(dev),
3626                          "mode", NULL, &xbb->dev_mode,
3627                          NULL);
3628	if (error != 0) {
3629		xbb_attach_failed(xbb, error, "reading backend fields at %s",
3630				  xenbus_get_node(dev));
3631                return;
3632        }
3633
3634	/* Parse fopen style mode flags. */
3635	if (strchr(xbb->dev_mode, 'w') == NULL)
3636		xbb->flags |= XBBF_READ_ONLY;
3637
3638	/*
3639	 * Verify the physical device is present and can support
3640	 * the desired I/O mode.
3641	 */
3642	error = xbb_open_backend(xbb);
3643	if (error != 0) {
3644		xbb_attach_failed(xbb, error, "Unable to open %s",
3645				  xbb->dev_name);
3646		return;
3647	}
3648
3649	/* Use devstat(9) for recording statistics. */
3650	xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
3651					   xbb->sector_size,
3652					   DEVSTAT_ALL_SUPPORTED,
3653					   DEVSTAT_TYPE_DIRECT
3654					 | DEVSTAT_TYPE_IF_OTHER,
3655					   DEVSTAT_PRIORITY_OTHER);
3656
3657	xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
3658					      xbb->sector_size,
3659					      DEVSTAT_ALL_SUPPORTED,
3660					      DEVSTAT_TYPE_DIRECT
3661					    | DEVSTAT_TYPE_IF_OTHER,
3662					      DEVSTAT_PRIORITY_OTHER);
3663	/*
3664	 * Setup sysctl variables.
3665	 */
3666	xbb_setup_sysctl(xbb);
3667
3668	/*
3669	 * Create a taskqueue for doing work that must occur from a
3670	 * thread context.
3671	 */
3672	xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
3673						  M_NOWAIT,
3674						  taskqueue_thread_enqueue,
3675						  /*contxt*/&xbb->io_taskqueue);
3676	if (xbb->io_taskqueue == NULL) {
3677		xbb_attach_failed(xbb, error, "Unable to create taskqueue");
3678		return;
3679	}
3680
3681	taskqueue_start_threads(&xbb->io_taskqueue,
3682				/*num threads*/1,
3683				/*priority*/PWAIT,
3684				/*thread name*/
3685				"%s taskq", device_get_nameunit(dev));
3686
3687	/* Update hot-plug status to satisfy xend. */
3688	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3689			  "hotplug-status", "connected");
3690	if (error) {
3691		xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
3692				  xenbus_get_node(xbb->dev));
3693		return;
3694	}
3695
3696	xbb->hotplug_done = true;
3697
3698	/* The front end might be waiting for the backend, attach if so. */
3699	if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised)
3700		xbb_connect(xbb);
3701}
3702
3703/**
3704 * Attach to a XenBus device that has been claimed by our probe routine.
3705 *
3706 * \param dev  NewBus device object representing this Xen Block Back instance.
3707 *
3708 * \return  0 for success, errno codes for failure.
3709 */
3710static int
3711xbb_attach(device_t dev)
3712{
3713	struct xbb_softc	*xbb;
3714	int			 error;
3715	u_int			 max_ring_page_order;
3716	struct sbuf		*watch_path;
3717
3718	DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
3719
3720	/*
3721	 * Basic initialization.
3722	 * After this block it is safe to call xbb_detach()
3723	 * to clean up any allocated data for this instance.
3724	 */
3725	xbb = device_get_softc(dev);
3726	xbb->dev = dev;
3727	xbb->otherend_id = xenbus_get_otherend_id(dev);
3728	TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
3729	mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
3730
3731	/*
3732	 * Publish protocol capabilities for consumption by the
3733	 * front-end.
3734	 */
3735	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3736			  "feature-barrier", "1");
3737	if (error) {
3738		xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
3739				  xenbus_get_node(xbb->dev));
3740		return (error);
3741	}
3742
3743	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3744			  "feature-flush-cache", "1");
3745	if (error) {
3746		xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
3747				  xenbus_get_node(xbb->dev));
3748		return (error);
3749	}
3750
3751	max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1;
3752	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3753			  "max-ring-page-order", "%u", max_ring_page_order);
3754	if (error) {
3755		xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
3756				  xenbus_get_node(xbb->dev));
3757		return (error);
3758	}
3759
3760	/*
3761	 * We need to wait for hotplug script execution before
3762	 * moving forward.
3763	 */
3764	KASSERT(!xbb->hotplug_done, ("Hotplug scripts already executed"));
3765	watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path");
3766	xbb->hotplug_watch.callback_data = (uintptr_t)dev;
3767	xbb->hotplug_watch.callback = xbb_attach_disk;
3768	KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
3769	xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
3770	/*
3771	 * We don't care about the path updated, just about the value changes
3772	 * on that single node, hence there's no need to queue more that one
3773	 * event.
3774	 */
3775	xbb->hotplug_watch.max_pending = 1;
3776	sbuf_delete(watch_path);
3777	error = xs_register_watch(&xbb->hotplug_watch);
3778	if (error != 0) {
3779		xbb_attach_failed(xbb, error, "failed to create watch on %s",
3780		    xbb->hotplug_watch.node);
3781		free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3782		return (error);
3783	}
3784
3785	/* Tell the toolstack blkback has attached. */
3786	xenbus_set_state(dev, XenbusStateInitWait);
3787
3788	return (0);
3789}
3790
3791/**
3792 * Detach from a block back device instance.
3793 *
3794 * \param dev  NewBus device object representing this Xen Block Back instance.
3795 *
3796 * \return  0 for success, errno codes for failure.
3797 *
3798 * \note A block back device may be detached at any time in its life-cycle,
3799 *       including part way through the attach process.  For this reason,
3800 *       initialization order and the initialization state checks in this
3801 *       routine must be carefully coupled so that attach time failures
3802 *       are gracefully handled.
3803 */
3804static int
3805xbb_detach(device_t dev)
3806{
3807        struct xbb_softc *xbb;
3808
3809	DPRINTF("\n");
3810
3811        xbb = device_get_softc(dev);
3812	mtx_lock(&xbb->lock);
3813	while (xbb_shutdown(xbb) == EAGAIN) {
3814		msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
3815		       "xbb_shutdown", 0);
3816	}
3817	mtx_unlock(&xbb->lock);
3818
3819	DPRINTF("\n");
3820
3821	if (xbb->io_taskqueue != NULL)
3822		taskqueue_free(xbb->io_taskqueue);
3823
3824	if (xbb->xbb_stats != NULL)
3825		devstat_remove_entry(xbb->xbb_stats);
3826
3827	if (xbb->xbb_stats_in != NULL)
3828		devstat_remove_entry(xbb->xbb_stats_in);
3829
3830	xbb_close_backend(xbb);
3831
3832	if (xbb->dev_mode != NULL) {
3833		free(xbb->dev_mode, M_XENSTORE);
3834		xbb->dev_mode = NULL;
3835	}
3836
3837	if (xbb->dev_type != NULL) {
3838		free(xbb->dev_type, M_XENSTORE);
3839		xbb->dev_type = NULL;
3840	}
3841
3842	if (xbb->dev_name != NULL) {
3843		free(xbb->dev_name, M_XENSTORE);
3844		xbb->dev_name = NULL;
3845	}
3846
3847	mtx_destroy(&xbb->lock);
3848        return (0);
3849}
3850
3851/**
3852 * Prepare this block back device for suspension of this VM.
3853 *
3854 * \param dev  NewBus device object representing this Xen Block Back instance.
3855 *
3856 * \return  0 for success, errno codes for failure.
3857 */
3858static int
3859xbb_suspend(device_t dev)
3860{
3861#ifdef NOT_YET
3862        struct xbb_softc *sc = device_get_softc(dev);
3863
3864        /* Prevent new requests being issued until we fix things up. */
3865        mtx_lock(&sc->xb_io_lock);
3866        sc->connected = BLKIF_STATE_SUSPENDED;
3867        mtx_unlock(&sc->xb_io_lock);
3868#endif
3869
3870        return (0);
3871}
3872
3873/**
3874 * Perform any processing required to recover from a suspended state.
3875 *
3876 * \param dev  NewBus device object representing this Xen Block Back instance.
3877 *
3878 * \return  0 for success, errno codes for failure.
3879 */
3880static int
3881xbb_resume(device_t dev)
3882{
3883	return (0);
3884}
3885
3886/**
3887 * Handle state changes expressed via the XenStore by our front-end peer.
3888 *
3889 * \param dev             NewBus device object representing this Xen
3890 *                        Block Back instance.
3891 * \param frontend_state  The new state of the front-end.
3892 *
3893 * \return  0 for success, errno codes for failure.
3894 */
3895static void
3896xbb_frontend_changed(device_t dev, XenbusState frontend_state)
3897{
3898	struct xbb_softc *xbb = device_get_softc(dev);
3899
3900	DPRINTF("frontend_state=%s, xbb_state=%s\n",
3901	        xenbus_strstate(frontend_state),
3902		xenbus_strstate(xenbus_get_state(xbb->dev)));
3903
3904	switch (frontend_state) {
3905	case XenbusStateInitialising:
3906		break;
3907	case XenbusStateInitialised:
3908	case XenbusStateConnected:
3909		xbb_connect(xbb);
3910		break;
3911	case XenbusStateClosing:
3912	case XenbusStateClosed:
3913		mtx_lock(&xbb->lock);
3914		xbb_shutdown(xbb);
3915		mtx_unlock(&xbb->lock);
3916		if (frontend_state == XenbusStateClosed)
3917			xenbus_set_state(xbb->dev, XenbusStateClosed);
3918		break;
3919	default:
3920		xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",
3921				 frontend_state);
3922		break;
3923	}
3924}
3925
3926/*---------------------------- NewBus Registration ---------------------------*/
3927static device_method_t xbb_methods[] = {
3928	/* Device interface */
3929	DEVMETHOD(device_probe,		xbb_probe),
3930	DEVMETHOD(device_attach,	xbb_attach),
3931	DEVMETHOD(device_detach,	xbb_detach),
3932	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
3933	DEVMETHOD(device_suspend,	xbb_suspend),
3934	DEVMETHOD(device_resume,	xbb_resume),
3935
3936	/* Xenbus interface */
3937	DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed),
3938	{ 0, 0 }
3939};
3940
3941static driver_t xbb_driver = {
3942        "xbbd",
3943        xbb_methods,
3944        sizeof(struct xbb_softc),
3945};
3946devclass_t xbb_devclass;
3947
3948DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0);
3949