usb_transfer.c revision 278278
12061Sjkh/* $FreeBSD: stable/10/sys/dev/usb/usb_transfer.c 278278 2015-02-05 20:03:02Z hselasky $ */
215603Smarkm/*-
32061Sjkh * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
42061Sjkh *
515603Smarkm * Redistribution and use in source and binary forms, with or without
62061Sjkh * modification, are permitted provided that the following conditions
72061Sjkh * are met:
83197Scsgr * 1. Redistributions of source code must retain the above copyright
93197Scsgr *    notice, this list of conditions and the following disclaimer.
102061Sjkh * 2. Redistributions in binary form must reproduce the above copyright
1112483Speter *    notice, this list of conditions and the following disclaimer in the
122160Scsgr *    documentation and/or other materials provided with the distribution.
132834Swollman *
142061Sjkh * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
152061Sjkh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
162160Scsgr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171594Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
182061Sjkh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
192061Sjkh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201594Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
217407Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
227407Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
237108Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
247108Sphk * SUCH DAMAGE.
257108Sphk */
267407Srgrimes
277407Srgrimes#ifdef USB_GLOBAL_INCLUDE_FILE
287407Srgrimes#include USB_GLOBAL_INCLUDE_FILE
297108Sphk#else
302061Sjkh#include <sys/stdint.h>
312061Sjkh#include <sys/stddef.h>
322061Sjkh#include <sys/param.h>
332061Sjkh#include <sys/queue.h>
342061Sjkh#include <sys/types.h>
352061Sjkh#include <sys/systm.h>
362061Sjkh#include <sys/kernel.h>
372061Sjkh#include <sys/bus.h>
382061Sjkh#include <sys/module.h>
392061Sjkh#include <sys/lock.h>
402061Sjkh#include <sys/mutex.h>
412061Sjkh#include <sys/condvar.h>
423197Scsgr#include <sys/sysctl.h>
432626Scsgr#include <sys/sx.h>
442626Scsgr#include <sys/unistd.h>
452061Sjkh#include <sys/callout.h>
462061Sjkh#include <sys/malloc.h>
472061Sjkh#include <sys/priv.h>
482061Sjkh#include <sys/proc.h>
492061Sjkh
502061Sjkh#include <dev/usb/usb.h>
512061Sjkh#include <dev/usb/usbdi.h>
522061Sjkh#include <dev/usb/usbdi_util.h>
532061Sjkh
542061Sjkh#define	USB_DEBUG_VAR usb_debug
552061Sjkh
562061Sjkh#include <dev/usb/usb_core.h>
572061Sjkh#include <dev/usb/usb_busdma.h>
582061Sjkh#include <dev/usb/usb_process.h>
592061Sjkh#include <dev/usb/usb_transfer.h>
602061Sjkh#include <dev/usb/usb_device.h>
612061Sjkh#include <dev/usb/usb_debug.h>
622061Sjkh#include <dev/usb/usb_util.h>
632834Swollman
642834Swollman#include <dev/usb/usb_controller.h>
652834Swollman#include <dev/usb/usb_bus.h>
662834Swollman#include <dev/usb/usb_pf.h>
672834Swollman#endif			/* USB_GLOBAL_INCLUDE_FILE */
682834Swollman
691594Srgrimesstruct usb_std_packet_size {
704486Sphk	struct {
714486Sphk		uint16_t min;		/* inclusive */
724486Sphk		uint16_t max;		/* inclusive */
734486Sphk	}	range;
744486Sphk
752061Sjkh	uint16_t fixed[4];
762061Sjkh};
772061Sjkh
782061Sjkhstatic usb_callback_t usb_request_callback;
792061Sjkh
802061Sjkhstatic const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
812061Sjkh
822061Sjkh	/* This transfer is used for generic control endpoint transfers */
832061Sjkh
842061Sjkh	[0] = {
852061Sjkh		.type = UE_CONTROL,
862061Sjkh		.endpoint = 0x00,	/* Control endpoint */
872061Sjkh		.direction = UE_DIR_ANY,
882061Sjkh		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
892061Sjkh		.flags = {.proxy_buffer = 1,},
9012483Speter		.callback = &usb_request_callback,
9112483Speter		.usb_mode = USB_MODE_DUAL,	/* both modes */
9212483Speter	},
9312483Speter
9412483Speter	/* This transfer is used for generic clear stall only */
9512483Speter
962061Sjkh	[1] = {
972061Sjkh		.type = UE_CONTROL,
988854Srgrimes		.endpoint = 0x00,	/* Control pipe */
992061Sjkh		.direction = UE_DIR_ANY,
1002061Sjkh		.bufsize = sizeof(struct usb_device_request),
10112483Speter		.callback = &usb_do_clear_stall_callback,
1022061Sjkh		.timeout = 1000,	/* 1 second */
10315603Smarkm		.interval = 50,	/* 50ms */
10415603Smarkm		.usb_mode = USB_MODE_HOST,
10514787Spaul	},
1062061Sjkh};
1072061Sjkh
1082061Sjkh/* function prototypes */
1092061Sjkh
1102061Sjkhstatic void	usbd_update_max_frame_size(struct usb_xfer *);
1112061Sjkhstatic void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
11214545Sjkhstatic void	usbd_control_transfer_init(struct usb_xfer *);
1132061Sjkhstatic int	usbd_setup_ctrl_transfer(struct usb_xfer *);
11414787Spaulstatic void	usb_callback_proc(struct usb_proc_msg *);
11515603Smarkmstatic void	usbd_callback_ss_done_defer(struct usb_xfer *);
11615603Smarkmstatic void	usbd_callback_wrapper(struct usb_xfer_queue *);
11715603Smarkmstatic void	usbd_transfer_start_cb(void *);
11815603Smarkmstatic uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
11914787Spaulstatic void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
12012483Speter		    uint8_t type, enum usb_dev_speed speed);
12112483Speter
12212483Speter/*------------------------------------------------------------------------*
12312483Speter *	usb_request_callback
12412483Speter *------------------------------------------------------------------------*/
12512483Speterstatic void
12612483Speterusb_request_callback(struct usb_xfer *xfer, usb_error_t error)
12712483Speter{
1283030Srgrimes	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
1292061Sjkh		usb_handle_request_callback(xfer, error);
1303030Srgrimes	else
1312061Sjkh		usbd_do_request_callback(xfer, error);
1326722Sphk}
1332061Sjkh
1342302Spaul/*------------------------------------------------------------------------*
1352302Spaul *	usbd_update_max_frame_size
1362302Spaul *
1372302Spaul * This function updates the maximum frame size, hence high speed USB
1382302Spaul * can transfer multiple consecutive packets.
1392302Spaul *------------------------------------------------------------------------*/
14010760Sachestatic void
14110760Sacheusbd_update_max_frame_size(struct usb_xfer *xfer)
1422302Spaul{
14310760Sache	/* compute maximum frame size */
14410760Sache	/* this computation should not overflow 16-bit */
14510760Sache	/* max = 15 * 1024 */
14610760Sache
1472302Spaul	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
1482302Spaul}
1492302Spaul
1502302Spaul/*------------------------------------------------------------------------*
1512302Spaul *	usbd_get_dma_delay
1522302Spaul *
1532302Spaul * The following function is called when we need to
1542061Sjkh * synchronize with DMA hardware.
1552061Sjkh *
1562061Sjkh * Returns:
1572061Sjkh *    0: no DMA delay required
1582061Sjkh * Else: milliseconds of DMA delay
1592061Sjkh *------------------------------------------------------------------------*/
1602061Sjkhusb_timeout_t
1612061Sjkhusbd_get_dma_delay(struct usb_device *udev)
1622061Sjkh{
1632061Sjkh	struct usb_bus_methods *mtod;
1642061Sjkh	uint32_t temp;
1652061Sjkh
1662061Sjkh	mtod = udev->bus->methods;
1672061Sjkh	temp = 0;
1682061Sjkh
1692061Sjkh	if (mtod->get_dma_delay) {
1702061Sjkh		(mtod->get_dma_delay) (udev, &temp);
1712061Sjkh		/*
1722061Sjkh		 * Round up and convert to milliseconds. Note that we use
1732061Sjkh		 * 1024 milliseconds per second. to save a division.
1742061Sjkh		 */
1752061Sjkh		temp += 0x3FF;
1762061Sjkh		temp /= 0x400;
1772061Sjkh	}
1782061Sjkh	return (temp);
1792061Sjkh}
1803626Swollman
1813626Swollman/*------------------------------------------------------------------------*
1823626Swollman *	usbd_transfer_setup_sub_malloc
1833626Swollman *
1843626Swollman * This function will allocate one or more DMA'able memory chunks
1853626Swollman * according to "size", "align" and "count" arguments. "ppc" is
1863626Swollman * pointed to a linear array of USB page caches afterwards.
1873626Swollman *
1883626Swollman * If the "align" argument is equal to "1" a non-contiguous allocation
1893626Swollman * can happen. Else if the "align" argument is greater than "1", the
1903626Swollman * allocation will always be contiguous in memory.
1917059Sroberto *
1923626Swollman * Returns:
1933626Swollman *    0: Success
1943626Swollman * Else: Failure
1953626Swollman *------------------------------------------------------------------------*/
1963626Swollman#if USB_HAVE_BUSDMA
1973626Swollmanuint8_t
1983626Swollmanusbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
1993626Swollman    struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
2003626Swollman    usb_size_t count)
2013626Swollman{
2023626Swollman	struct usb_page_cache *pc;
2033626Swollman	struct usb_page *pg;
2043626Swollman	void *buf;
2053626Swollman	usb_size_t n_dma_pc;
2063626Swollman	usb_size_t n_dma_pg;
2073626Swollman	usb_size_t n_obj;
2083626Swollman	usb_size_t x;
2093626Swollman	usb_size_t y;
2107446Ssos	usb_size_t r;
2113626Swollman	usb_size_t z;
2123626Swollman
2133626Swollman	USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
2143626Swollman	    align));
2153626Swollman	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
2163626Swollman
2173626Swollman	if (count == 0) {
2182061Sjkh		return (0);		/* nothing to allocate */
2192061Sjkh	}
2202061Sjkh	/*
2212061Sjkh	 * Make sure that the size is aligned properly.
2222061Sjkh	 */
2232061Sjkh	size = -((-size) & (-align));
22414119Speter
2252061Sjkh	/*
2262061Sjkh	 * Try multi-allocation chunks to reduce the number of DMA
2272061Sjkh	 * allocations, hence DMA allocations are slow.
2282061Sjkh	 */
2292061Sjkh	if (align == 1) {
2307130Srgrimes		/* special case - non-cached multi page DMA memory */
2317130Srgrimes		n_dma_pc = count;
2327130Srgrimes		n_dma_pg = (2 + (size / USB_PAGE_SIZE));
2332061Sjkh		n_obj = 1;
2342061Sjkh	} else if (size >= USB_PAGE_SIZE) {
2354249Sache		n_dma_pc = count;
2362685Srgrimes		n_dma_pg = 1;
2376927Snate		n_obj = 1;
2382685Srgrimes	} else {
2393518Sache		/* compute number of objects per page */
2403197Scsgr		n_obj = (USB_PAGE_SIZE / size);
2413197Scsgr		/*
24212166Sjkh		 * Compute number of DMA chunks, rounded up
24312485Sjkh		 * to nearest one:
2443197Scsgr		 */
2452061Sjkh		n_dma_pc = ((count + n_obj - 1) / n_obj);
2462061Sjkh		n_dma_pg = 1;
2472061Sjkh	}
2482883Sphk
2493429Sache	/*
2503429Sache	 * DMA memory is allocated once, but mapped twice. That's why
2517281Srgrimes	 * there is one list for auto-free and another list for
2523242Spaul	 * non-auto-free which only holds the mapping and not the
2533242Spaul	 * allocation.
2547171Sats	 */
2552061Sjkh	if (parm->buf == NULL) {
2563213Spst		/* reserve memory (auto-free) */
2574942Sache		parm->dma_page_ptr += n_dma_pc * n_dma_pg;
2585749Swollman		parm->dma_page_cache_ptr += n_dma_pc;
2595772Swollman
2605865Sache		/* reserve memory (no-auto-free) */
2615866Sache		parm->dma_page_ptr += count * n_dma_pg;
26213138Speter		parm->xfer_page_cache_ptr += count;
2632061Sjkh		return (0);
2645366Snate	}
2655366Snate	for (x = 0; x != n_dma_pc; x++) {
2666934Sse		/* need to initialize the page cache */
2675366Snate		parm->dma_page_cache_ptr[x].tag_parent =
2685366Snate		    &parm->curr_xfer->xroot->dma_parent_tag;
26912507Snate	}
27015603Smarkm	for (x = 0; x != count; x++) {
2717292Srgrimes		/* need to initialize the page cache */
27215603Smarkm		parm->xfer_page_cache_ptr[x].tag_parent =
2735366Snate		    &parm->curr_xfer->xroot->dma_parent_tag;
27415603Smarkm	}
2755366Snate
27615603Smarkm	if (ppc) {
2775366Snate		*ppc = parm->xfer_page_cache_ptr;
27815603Smarkm	}
2795772Swollman	r = count;			/* set remainder count */
28015603Smarkm	z = n_obj * size;		/* set allocation size */
2815728Swollman	pc = parm->xfer_page_cache_ptr;
28215603Smarkm	pg = parm->dma_page_ptr;
2835728Swollman
2845728Swollman	for (x = 0; x != n_dma_pc; x++) {
28515603Smarkm
2865366Snate		if (r < n_obj) {
2872061Sjkh			/* compute last remainder */
2882061Sjkh			z = r * size;
2892061Sjkh			n_obj = r;
2902061Sjkh		}
2912061Sjkh		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
2928295Srgrimes		    pg, z, align)) {
2938295Srgrimes			return (1);	/* failure */
29415603Smarkm		}
2958295Srgrimes		/* Set beginning of current buffer */
2968489Srgrimes		buf = parm->dma_page_cache_ptr->buffer;
2978489Srgrimes		/* Make room for one DMA page cache and one page */
29815603Smarkm		parm->dma_page_cache_ptr++;
2998489Srgrimes		pg += n_dma_pg;
3008489Srgrimes
3018489Srgrimes		for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
30215603Smarkm
3038489Srgrimes			/* Load sub-chunk into DMA */
3048295Srgrimes			if (usb_pc_dmamap_create(pc, size)) {
3052468Spaul				return (1);	/* failure */
30615603Smarkm			}
3072273Spaul			pc->buffer = USB_ADD_BYTES(buf, y * size);
30815603Smarkm			pc->page_start = pg;
3098295Srgrimes
3102160Scsgr			mtx_lock(pc->tag_parent->mtx);
3112160Scsgr			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
31215603Smarkm				mtx_unlock(pc->tag_parent->mtx);
3132160Scsgr				return (1);	/* failure */
3142279Spaul			}
3154054Spst			mtx_unlock(pc->tag_parent->mtx);
31615603Smarkm		}
3172061Sjkh	}
31815603Smarkm
3192279Spaul	parm->xfer_page_cache_ptr = pc;
32011772Snate	parm->dma_page_ptr = pg;
3212468Spaul	return (0);
32215603Smarkm}
32311772Snate#endif
3243197Scsgr
32510838Sjkh/*------------------------------------------------------------------------*
32615603Smarkm *	usbd_transfer_setup_sub - transfer setup subroutine
3272626Scsgr *
3288304Srgrimes * This function must be called from the "xfer_setup" callback of the
3298304Srgrimes * USB Host or Device controller driver when setting up an USB
33015603Smarkm * transfer. This function will setup correct packet sizes, buffer
3318304Srgrimes * sizes, flags and more, that are stored in the "usb_xfer"
3322061Sjkh * structure.
33313725Snate *------------------------------------------------------------------------*/
33413725Snatevoid
33513725Snateusbd_transfer_setup_sub(struct usb_setup_params *parm)
33613725Snate{
33713725Snate	enum {
33813726Snate		REQ_SIZE = 8,
33915603Smarkm		MIN_PKT = 8,
34013725Snate	};
34115603Smarkm	struct usb_xfer *xfer = parm->curr_xfer;
34213725Snate	const struct usb_config *setup = parm->curr_setup;
34311806Sphk	struct usb_endpoint_ss_comp_descriptor *ecomp;
3442061Sjkh	struct usb_endpoint_descriptor *edesc;
34512106Sjfieber	struct usb_std_packet_size std_size;
3462061Sjkh	usb_frcount_t n_frlengths;
3472061Sjkh	usb_frcount_t n_frbuffers;
3482273Spaul	usb_frcount_t x;
34915603Smarkm	uint16_t maxp_old;
3502061Sjkh	uint8_t type;
35115603Smarkm	uint8_t zmps;
35211769Sphk
35315603Smarkm	/*
35412106Sjfieber	 * Sanity check. The following parameters must be initialized before
35515603Smarkm	 * calling this function.
35612106Sjfieber	 */
35715603Smarkm	if ((parm->hc_max_packet_size == 0) ||
35810479Sdg	    (parm->hc_max_packet_count == 0) ||
35915603Smarkm	    (parm->hc_max_frame_size == 0)) {
3602061Sjkh		parm->err = USB_ERR_INVAL;
3611594Srgrimes		goto done;
362	}
363	edesc = xfer->endpoint->edesc;
364	ecomp = xfer->endpoint->ecomp;
365
366	type = (edesc->bmAttributes & UE_XFERTYPE);
367
368	xfer->flags = setup->flags;
369	xfer->nframes = setup->frames;
370	xfer->timeout = setup->timeout;
371	xfer->callback = setup->callback;
372	xfer->interval = setup->interval;
373	xfer->endpointno = edesc->bEndpointAddress;
374	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
375	xfer->max_packet_count = 1;
376	/* make a shadow copy: */
377	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
378
379	parm->bufsize = setup->bufsize;
380
381	switch (parm->speed) {
382	case USB_SPEED_HIGH:
383		switch (type) {
384		case UE_ISOCHRONOUS:
385		case UE_INTERRUPT:
386			xfer->max_packet_count +=
387			    (xfer->max_packet_size >> 11) & 3;
388
389			/* check for invalid max packet count */
390			if (xfer->max_packet_count > 3)
391				xfer->max_packet_count = 3;
392			break;
393		default:
394			break;
395		}
396		xfer->max_packet_size &= 0x7FF;
397		break;
398	case USB_SPEED_SUPER:
399		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
400
401		if (ecomp != NULL)
402			xfer->max_packet_count += ecomp->bMaxBurst;
403
404		if ((xfer->max_packet_count == 0) ||
405		    (xfer->max_packet_count > 16))
406			xfer->max_packet_count = 16;
407
408		switch (type) {
409		case UE_CONTROL:
410			xfer->max_packet_count = 1;
411			break;
412		case UE_ISOCHRONOUS:
413			if (ecomp != NULL) {
414				uint8_t mult;
415
416				mult = UE_GET_SS_ISO_MULT(
417				    ecomp->bmAttributes) + 1;
418				if (mult > 3)
419					mult = 3;
420
421				xfer->max_packet_count *= mult;
422			}
423			break;
424		default:
425			break;
426		}
427		xfer->max_packet_size &= 0x7FF;
428		break;
429	default:
430		break;
431	}
432	/* range check "max_packet_count" */
433
434	if (xfer->max_packet_count > parm->hc_max_packet_count) {
435		xfer->max_packet_count = parm->hc_max_packet_count;
436	}
437
438	/* store max packet size value before filtering */
439
440	maxp_old = xfer->max_packet_size;
441
442	/* filter "wMaxPacketSize" according to HC capabilities */
443
444	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
445	    (xfer->max_packet_size == 0)) {
446		xfer->max_packet_size = parm->hc_max_packet_size;
447	}
448	/* filter "wMaxPacketSize" according to standard sizes */
449
450	usbd_get_std_packet_size(&std_size, type, parm->speed);
451
452	if (std_size.range.min || std_size.range.max) {
453
454		if (xfer->max_packet_size < std_size.range.min) {
455			xfer->max_packet_size = std_size.range.min;
456		}
457		if (xfer->max_packet_size > std_size.range.max) {
458			xfer->max_packet_size = std_size.range.max;
459		}
460	} else {
461
462		if (xfer->max_packet_size >= std_size.fixed[3]) {
463			xfer->max_packet_size = std_size.fixed[3];
464		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
465			xfer->max_packet_size = std_size.fixed[2];
466		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
467			xfer->max_packet_size = std_size.fixed[1];
468		} else {
469			/* only one possibility left */
470			xfer->max_packet_size = std_size.fixed[0];
471		}
472	}
473
474	/*
475	 * Check if the max packet size was outside its allowed range
476	 * and clamped to a valid value:
477	 */
478	if (maxp_old != xfer->max_packet_size)
479		xfer->flags_int.maxp_was_clamped = 1;
480
481	/* compute "max_frame_size" */
482
483	usbd_update_max_frame_size(xfer);
484
485	/* check interrupt interval and transfer pre-delay */
486
487	if (type == UE_ISOCHRONOUS) {
488
489		uint16_t frame_limit;
490
491		xfer->interval = 0;	/* not used, must be zero */
492		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
493
494		if (xfer->timeout == 0) {
495			/*
496			 * set a default timeout in
497			 * case something goes wrong!
498			 */
499			xfer->timeout = 1000 / 4;
500		}
501		switch (parm->speed) {
502		case USB_SPEED_LOW:
503		case USB_SPEED_FULL:
504			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
505			xfer->fps_shift = 0;
506			break;
507		default:
508			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
509			xfer->fps_shift = edesc->bInterval;
510			if (xfer->fps_shift > 0)
511				xfer->fps_shift--;
512			if (xfer->fps_shift > 3)
513				xfer->fps_shift = 3;
514			if (xfer->flags.pre_scale_frames != 0)
515				xfer->nframes <<= (3 - xfer->fps_shift);
516			break;
517		}
518
519		if (xfer->nframes > frame_limit) {
520			/*
521			 * this is not going to work
522			 * cross hardware
523			 */
524			parm->err = USB_ERR_INVAL;
525			goto done;
526		}
527		if (xfer->nframes == 0) {
528			/*
529			 * this is not a valid value
530			 */
531			parm->err = USB_ERR_ZERO_NFRAMES;
532			goto done;
533		}
534	} else {
535
536		/*
537		 * If a value is specified use that else check the
538		 * endpoint descriptor!
539		 */
540		if (type == UE_INTERRUPT) {
541
542			uint32_t temp;
543
544			if (xfer->interval == 0) {
545
546				xfer->interval = edesc->bInterval;
547
548				switch (parm->speed) {
549				case USB_SPEED_LOW:
550				case USB_SPEED_FULL:
551					break;
552				default:
553					/* 125us -> 1ms */
554					if (xfer->interval < 4)
555						xfer->interval = 1;
556					else if (xfer->interval > 16)
557						xfer->interval = (1 << (16 - 4));
558					else
559						xfer->interval =
560						    (1 << (xfer->interval - 4));
561					break;
562				}
563			}
564
565			if (xfer->interval == 0) {
566				/*
567				 * One millisecond is the smallest
568				 * interval we support:
569				 */
570				xfer->interval = 1;
571			}
572
573			xfer->fps_shift = 0;
574			temp = 1;
575
576			while ((temp != 0) && (temp < xfer->interval)) {
577				xfer->fps_shift++;
578				temp *= 2;
579			}
580
581			switch (parm->speed) {
582			case USB_SPEED_LOW:
583			case USB_SPEED_FULL:
584				break;
585			default:
586				xfer->fps_shift += 3;
587				break;
588			}
589		}
590	}
591
592	/*
593	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
594	 * to be equal to zero when setting up USB transfers, hence
595	 * this leads to alot of extra code in the USB kernel.
596	 */
597
598	if ((xfer->max_frame_size == 0) ||
599	    (xfer->max_packet_size == 0)) {
600
601		zmps = 1;
602
603		if ((parm->bufsize <= MIN_PKT) &&
604		    (type != UE_CONTROL) &&
605		    (type != UE_BULK)) {
606
607			/* workaround */
608			xfer->max_packet_size = MIN_PKT;
609			xfer->max_packet_count = 1;
610			parm->bufsize = 0;	/* automatic setup length */
611			usbd_update_max_frame_size(xfer);
612
613		} else {
614			parm->err = USB_ERR_ZERO_MAXP;
615			goto done;
616		}
617
618	} else {
619		zmps = 0;
620	}
621
622	/*
623	 * check if we should setup a default
624	 * length:
625	 */
626
627	if (parm->bufsize == 0) {
628
629		parm->bufsize = xfer->max_frame_size;
630
631		if (type == UE_ISOCHRONOUS) {
632			parm->bufsize *= xfer->nframes;
633		}
634	}
635	/*
636	 * check if we are about to setup a proxy
637	 * type of buffer:
638	 */
639
640	if (xfer->flags.proxy_buffer) {
641
642		/* round bufsize up */
643
644		parm->bufsize += (xfer->max_frame_size - 1);
645
646		if (parm->bufsize < xfer->max_frame_size) {
647			/* length wrapped around */
648			parm->err = USB_ERR_INVAL;
649			goto done;
650		}
651		/* subtract remainder */
652
653		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
654
655		/* add length of USB device request structure, if any */
656
657		if (type == UE_CONTROL) {
658			parm->bufsize += REQ_SIZE;	/* SETUP message */
659		}
660	}
661	xfer->max_data_length = parm->bufsize;
662
663	/* Setup "n_frlengths" and "n_frbuffers" */
664
665	if (type == UE_ISOCHRONOUS) {
666		n_frlengths = xfer->nframes;
667		n_frbuffers = 1;
668	} else {
669
670		if (type == UE_CONTROL) {
671			xfer->flags_int.control_xfr = 1;
672			if (xfer->nframes == 0) {
673				if (parm->bufsize <= REQ_SIZE) {
674					/*
675					 * there will never be any data
676					 * stage
677					 */
678					xfer->nframes = 1;
679				} else {
680					xfer->nframes = 2;
681				}
682			}
683		} else {
684			if (xfer->nframes == 0) {
685				xfer->nframes = 1;
686			}
687		}
688
689		n_frlengths = xfer->nframes;
690		n_frbuffers = xfer->nframes;
691	}
692
693	/*
694	 * check if we have room for the
695	 * USB device request structure:
696	 */
697
698	if (type == UE_CONTROL) {
699
700		if (xfer->max_data_length < REQ_SIZE) {
701			/* length wrapped around or too small bufsize */
702			parm->err = USB_ERR_INVAL;
703			goto done;
704		}
705		xfer->max_data_length -= REQ_SIZE;
706	}
707	/*
708	 * Setup "frlengths" and shadow "frlengths" for keeping the
709	 * initial frame lengths when a USB transfer is complete. This
710	 * information is useful when computing isochronous offsets.
711	 */
712	xfer->frlengths = parm->xfer_length_ptr;
713	parm->xfer_length_ptr += 2 * n_frlengths;
714
715	/* setup "frbuffers" */
716	xfer->frbuffers = parm->xfer_page_cache_ptr;
717	parm->xfer_page_cache_ptr += n_frbuffers;
718
719	/* initialize max frame count */
720	xfer->max_frame_count = xfer->nframes;
721
722	/*
723	 * check if we need to setup
724	 * a local buffer:
725	 */
726
727	if (!xfer->flags.ext_buffer) {
728#if USB_HAVE_BUSDMA
729		struct usb_page_search page_info;
730		struct usb_page_cache *pc;
731
732		if (usbd_transfer_setup_sub_malloc(parm,
733		    &pc, parm->bufsize, 1, 1)) {
734			parm->err = USB_ERR_NOMEM;
735		} else if (parm->buf != NULL) {
736
737			usbd_get_page(pc, 0, &page_info);
738
739			xfer->local_buffer = page_info.buffer;
740
741			usbd_xfer_set_frame_offset(xfer, 0, 0);
742
743			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
744				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
745			}
746		}
747#else
748		/* align data */
749		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
750
751		if (parm->buf != NULL) {
752			xfer->local_buffer =
753			    USB_ADD_BYTES(parm->buf, parm->size[0]);
754
755			usbd_xfer_set_frame_offset(xfer, 0, 0);
756
757			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
758				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
759			}
760		}
761		parm->size[0] += parm->bufsize;
762
763		/* align data again */
764		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
765#endif
766	}
767	/*
768	 * Compute maximum buffer size
769	 */
770
771	if (parm->bufsize_max < parm->bufsize) {
772		parm->bufsize_max = parm->bufsize;
773	}
774#if USB_HAVE_BUSDMA
775	if (xfer->flags_int.bdma_enable) {
776		/*
777		 * Setup "dma_page_ptr".
778		 *
779		 * Proof for formula below:
780		 *
781		 * Assume there are three USB frames having length "a", "b" and
782		 * "c". These USB frames will at maximum need "z"
783		 * "usb_page" structures. "z" is given by:
784		 *
785		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
786		 * ((c / USB_PAGE_SIZE) + 2);
787		 *
788		 * Constraining "a", "b" and "c" like this:
789		 *
790		 * (a + b + c) <= parm->bufsize
791		 *
792		 * We know that:
793		 *
794		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
795		 *
796		 * Here is the general formula:
797		 */
798		xfer->dma_page_ptr = parm->dma_page_ptr;
799		parm->dma_page_ptr += (2 * n_frbuffers);
800		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
801	}
802#endif
803	if (zmps) {
804		/* correct maximum data length */
805		xfer->max_data_length = 0;
806	}
807	/* subtract USB frame remainder from "hc_max_frame_size" */
808
809	xfer->max_hc_frame_size =
810	    (parm->hc_max_frame_size -
811	    (parm->hc_max_frame_size % xfer->max_frame_size));
812
813	if (xfer->max_hc_frame_size == 0) {
814		parm->err = USB_ERR_INVAL;
815		goto done;
816	}
817
818	/* initialize frame buffers */
819
820	if (parm->buf) {
821		for (x = 0; x != n_frbuffers; x++) {
822			xfer->frbuffers[x].tag_parent =
823			    &xfer->xroot->dma_parent_tag;
824#if USB_HAVE_BUSDMA
825			if (xfer->flags_int.bdma_enable &&
826			    (parm->bufsize_max > 0)) {
827
828				if (usb_pc_dmamap_create(
829				    xfer->frbuffers + x,
830				    parm->bufsize_max)) {
831					parm->err = USB_ERR_NOMEM;
832					goto done;
833				}
834			}
835#endif
836		}
837	}
838done:
839	if (parm->err) {
840		/*
841		 * Set some dummy values so that we avoid division by zero:
842		 */
843		xfer->max_hc_frame_size = 1;
844		xfer->max_frame_size = 1;
845		xfer->max_packet_size = 1;
846		xfer->max_data_length = 0;
847		xfer->nframes = 0;
848		xfer->max_frame_count = 0;
849	}
850}
851
852/*------------------------------------------------------------------------*
853 *	usbd_transfer_setup - setup an array of USB transfers
854 *
855 * NOTE: You must always call "usbd_transfer_unsetup" after calling
856 * "usbd_transfer_setup" if success was returned.
857 *
858 * The idea is that the USB device driver should pre-allocate all its
859 * transfers by one call to this function.
860 *
861 * Return values:
862 *    0: Success
863 * Else: Failure
864 *------------------------------------------------------------------------*/
865usb_error_t
866usbd_transfer_setup(struct usb_device *udev,
867    const uint8_t *ifaces, struct usb_xfer **ppxfer,
868    const struct usb_config *setup_start, uint16_t n_setup,
869    void *priv_sc, struct mtx *xfer_mtx)
870{
871	const struct usb_config *setup_end = setup_start + n_setup;
872	const struct usb_config *setup;
873	struct usb_setup_params *parm;
874	struct usb_endpoint *ep;
875	struct usb_xfer_root *info;
876	struct usb_xfer *xfer;
877	void *buf = NULL;
878	usb_error_t error = 0;
879	uint16_t n;
880	uint16_t refcount;
881	uint8_t do_unlock;
882
883	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
884	    "usbd_transfer_setup can sleep!");
885
886	/* do some checking first */
887
888	if (n_setup == 0) {
889		DPRINTFN(6, "setup array has zero length!\n");
890		return (USB_ERR_INVAL);
891	}
892	if (ifaces == 0) {
893		DPRINTFN(6, "ifaces array is NULL!\n");
894		return (USB_ERR_INVAL);
895	}
896	if (xfer_mtx == NULL) {
897		DPRINTFN(6, "using global lock\n");
898		xfer_mtx = &Giant;
899	}
900
901	/* more sanity checks */
902
903	for (setup = setup_start, n = 0;
904	    setup != setup_end; setup++, n++) {
905		if (setup->bufsize == (usb_frlength_t)-1) {
906			error = USB_ERR_BAD_BUFSIZE;
907			DPRINTF("invalid bufsize\n");
908		}
909		if (setup->callback == NULL) {
910			error = USB_ERR_NO_CALLBACK;
911			DPRINTF("no callback\n");
912		}
913		ppxfer[n] = NULL;
914	}
915
916	if (error)
917		return (error);
918
919	/* Protect scratch area */
920	do_unlock = usbd_enum_lock(udev);
921
922	refcount = 0;
923	info = NULL;
924
925	parm = &udev->scratch.xfer_setup[0].parm;
926	memset(parm, 0, sizeof(*parm));
927
928	parm->udev = udev;
929	parm->speed = usbd_get_speed(udev);
930	parm->hc_max_packet_count = 1;
931
932	if (parm->speed >= USB_SPEED_MAX) {
933		parm->err = USB_ERR_INVAL;
934		goto done;
935	}
936	/* setup all transfers */
937
938	while (1) {
939
940		if (buf) {
941			/*
942			 * Initialize the "usb_xfer_root" structure,
943			 * which is common for all our USB transfers.
944			 */
945			info = USB_ADD_BYTES(buf, 0);
946
947			info->memory_base = buf;
948			info->memory_size = parm->size[0];
949
950#if USB_HAVE_BUSDMA
951			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
952			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
953#endif
954			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
955			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
956
957			cv_init(&info->cv_drain, "WDRAIN");
958
959			info->xfer_mtx = xfer_mtx;
960#if USB_HAVE_BUSDMA
961			usb_dma_tag_setup(&info->dma_parent_tag,
962			    parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
963			    xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
964			    parm->dma_tag_max);
965#endif
966
967			info->bus = udev->bus;
968			info->udev = udev;
969
970			TAILQ_INIT(&info->done_q.head);
971			info->done_q.command = &usbd_callback_wrapper;
972#if USB_HAVE_BUSDMA
973			TAILQ_INIT(&info->dma_q.head);
974			info->dma_q.command = &usb_bdma_work_loop;
975#endif
976			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
977			info->done_m[0].xroot = info;
978			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
979			info->done_m[1].xroot = info;
980
981			/*
982			 * In device side mode control endpoint
983			 * requests need to run from a separate
984			 * context, else there is a chance of
985			 * deadlock!
986			 */
987			if (setup_start == usb_control_ep_cfg)
988				info->done_p =
989				    USB_BUS_CONTROL_XFER_PROC(udev->bus);
990			else if (xfer_mtx == &Giant)
991				info->done_p =
992				    USB_BUS_GIANT_PROC(udev->bus);
993			else
994				info->done_p =
995				    USB_BUS_NON_GIANT_PROC(udev->bus);
996		}
997		/* reset sizes */
998
999		parm->size[0] = 0;
1000		parm->buf = buf;
1001		parm->size[0] += sizeof(info[0]);
1002
1003		for (setup = setup_start, n = 0;
1004		    setup != setup_end; setup++, n++) {
1005
1006			/* skip USB transfers without callbacks: */
1007			if (setup->callback == NULL) {
1008				continue;
1009			}
1010			/* see if there is a matching endpoint */
1011			ep = usbd_get_endpoint(udev,
1012			    ifaces[setup->if_index], setup);
1013
1014			/*
1015			 * Check that the USB PIPE is valid and that
1016			 * the endpoint mode is proper.
1017			 *
1018			 * Make sure we don't allocate a streams
1019			 * transfer when such a combination is not
1020			 * valid.
1021			 */
1022			if ((ep == NULL) || (ep->methods == NULL) ||
1023			    ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1024			    (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1025			    (setup->stream_id != 0 &&
1026			    (setup->stream_id >= USB_MAX_EP_STREAMS ||
1027			    (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1028				if (setup->flags.no_pipe_ok)
1029					continue;
1030				if ((setup->usb_mode != USB_MODE_DUAL) &&
1031				    (setup->usb_mode != udev->flags.usb_mode))
1032					continue;
1033				parm->err = USB_ERR_NO_PIPE;
1034				goto done;
1035			}
1036
1037			/* align data properly */
1038			parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1039
1040			/* store current setup pointer */
1041			parm->curr_setup = setup;
1042
1043			if (buf) {
1044				/*
1045				 * Common initialization of the
1046				 * "usb_xfer" structure.
1047				 */
1048				xfer = USB_ADD_BYTES(buf, parm->size[0]);
1049				xfer->address = udev->address;
1050				xfer->priv_sc = priv_sc;
1051				xfer->xroot = info;
1052
1053				usb_callout_init_mtx(&xfer->timeout_handle,
1054				    &udev->bus->bus_mtx, 0);
1055			} else {
1056				/*
1057				 * Setup a dummy xfer, hence we are
1058				 * writing to the "usb_xfer"
1059				 * structure pointed to by "xfer"
1060				 * before we have allocated any
1061				 * memory:
1062				 */
1063				xfer = &udev->scratch.xfer_setup[0].dummy;
1064				memset(xfer, 0, sizeof(*xfer));
1065				refcount++;
1066			}
1067
1068			/* set transfer endpoint pointer */
1069			xfer->endpoint = ep;
1070
1071			/* set transfer stream ID */
1072			xfer->stream_id = setup->stream_id;
1073
1074			parm->size[0] += sizeof(xfer[0]);
1075			parm->methods = xfer->endpoint->methods;
1076			parm->curr_xfer = xfer;
1077
1078			/*
1079			 * Call the Host or Device controller transfer
1080			 * setup routine:
1081			 */
1082			(udev->bus->methods->xfer_setup) (parm);
1083
1084			/* check for error */
1085			if (parm->err)
1086				goto done;
1087
1088			if (buf) {
1089				/*
1090				 * Increment the endpoint refcount. This
1091				 * basically prevents setting a new
1092				 * configuration and alternate setting
1093				 * when USB transfers are in use on
1094				 * the given interface. Search the USB
1095				 * code for "endpoint->refcount_alloc" if you
1096				 * want more information.
1097				 */
1098				USB_BUS_LOCK(info->bus);
1099				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1100					parm->err = USB_ERR_INVAL;
1101
1102				xfer->endpoint->refcount_alloc++;
1103
1104				if (xfer->endpoint->refcount_alloc == 0)
1105					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1106				USB_BUS_UNLOCK(info->bus);
1107
1108				/*
1109				 * Whenever we set ppxfer[] then we
1110				 * also need to increment the
1111				 * "setup_refcount":
1112				 */
1113				info->setup_refcount++;
1114
1115				/*
1116				 * Transfer is successfully setup and
1117				 * can be used:
1118				 */
1119				ppxfer[n] = xfer;
1120			}
1121
1122			/* check for error */
1123			if (parm->err)
1124				goto done;
1125		}
1126
1127		if (buf != NULL || parm->err != 0)
1128			goto done;
1129
1130		/* if no transfers, nothing to do */
1131		if (refcount == 0)
1132			goto done;
1133
1134		/* align data properly */
1135		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1136
1137		/* store offset temporarily */
1138		parm->size[1] = parm->size[0];
1139
1140		/*
1141		 * The number of DMA tags required depends on
1142		 * the number of endpoints. The current estimate
1143		 * for maximum number of DMA tags per endpoint
1144		 * is three:
1145		 * 1) for loading memory
1146		 * 2) for allocating memory
1147		 * 3) for fixing memory [UHCI]
1148		 */
1149		parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1150
1151		/*
1152		 * DMA tags for QH, TD, Data and more.
1153		 */
1154		parm->dma_tag_max += 8;
1155
1156		parm->dma_tag_p += parm->dma_tag_max;
1157
1158		parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1159		    ((uint8_t *)0);
1160
1161		/* align data properly */
1162		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1163
1164		/* store offset temporarily */
1165		parm->size[3] = parm->size[0];
1166
1167		parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1168		    ((uint8_t *)0);
1169
1170		/* align data properly */
1171		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1172
1173		/* store offset temporarily */
1174		parm->size[4] = parm->size[0];
1175
1176		parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1177		    ((uint8_t *)0);
1178
1179		/* store end offset temporarily */
1180		parm->size[5] = parm->size[0];
1181
1182		parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1183		    ((uint8_t *)0);
1184
1185		/* store end offset temporarily */
1186
1187		parm->size[2] = parm->size[0];
1188
1189		/* align data properly */
1190		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1191
1192		parm->size[6] = parm->size[0];
1193
1194		parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1195		    ((uint8_t *)0);
1196
1197		/* align data properly */
1198		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1199
1200		/* allocate zeroed memory */
1201		buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1202
1203		if (buf == NULL) {
1204			parm->err = USB_ERR_NOMEM;
1205			DPRINTFN(0, "cannot allocate memory block for "
1206			    "configuration (%d bytes)\n",
1207			    parm->size[0]);
1208			goto done;
1209		}
1210		parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1211		parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1212		parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1213		parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1214		parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1215	}
1216
1217done:
1218	if (buf) {
1219		if (info->setup_refcount == 0) {
1220			/*
1221			 * "usbd_transfer_unsetup_sub" will unlock
1222			 * the bus mutex before returning !
1223			 */
1224			USB_BUS_LOCK(info->bus);
1225
1226			/* something went wrong */
1227			usbd_transfer_unsetup_sub(info, 0);
1228		}
1229	}
1230
1231	/* check if any errors happened */
1232	if (parm->err)
1233		usbd_transfer_unsetup(ppxfer, n_setup);
1234
1235	error = parm->err;
1236
1237	if (do_unlock)
1238		usbd_enum_unlock(udev);
1239
1240	return (error);
1241}
1242
1243/*------------------------------------------------------------------------*
1244 *	usbd_transfer_unsetup_sub - factored out code
1245 *------------------------------------------------------------------------*/
1246static void
1247usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1248{
1249#if USB_HAVE_BUSDMA
1250	struct usb_page_cache *pc;
1251#endif
1252
1253	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1254
1255	/* wait for any outstanding DMA operations */
1256
1257	if (needs_delay) {
1258		usb_timeout_t temp;
1259		temp = usbd_get_dma_delay(info->udev);
1260		if (temp != 0) {
1261			usb_pause_mtx(&info->bus->bus_mtx,
1262			    USB_MS_TO_TICKS(temp));
1263		}
1264	}
1265
1266	/* make sure that our done messages are not queued anywhere */
1267	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1268
1269	USB_BUS_UNLOCK(info->bus);
1270
1271#if USB_HAVE_BUSDMA
1272	/* free DMA'able memory, if any */
1273	pc = info->dma_page_cache_start;
1274	while (pc != info->dma_page_cache_end) {
1275		usb_pc_free_mem(pc);
1276		pc++;
1277	}
1278
1279	/* free DMA maps in all "xfer->frbuffers" */
1280	pc = info->xfer_page_cache_start;
1281	while (pc != info->xfer_page_cache_end) {
1282		usb_pc_dmamap_destroy(pc);
1283		pc++;
1284	}
1285
1286	/* free all DMA tags */
1287	usb_dma_tag_unsetup(&info->dma_parent_tag);
1288#endif
1289
1290	cv_destroy(&info->cv_drain);
1291
1292	/*
1293	 * free the "memory_base" last, hence the "info" structure is
1294	 * contained within the "memory_base"!
1295	 */
1296	free(info->memory_base, M_USB);
1297}
1298
1299/*------------------------------------------------------------------------*
1300 *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1301 *
1302 * NOTE: All USB transfers in progress will get called back passing
1303 * the error code "USB_ERR_CANCELLED" before this function
1304 * returns.
1305 *------------------------------------------------------------------------*/
1306void
1307usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1308{
1309	struct usb_xfer *xfer;
1310	struct usb_xfer_root *info;
1311	uint8_t needs_delay = 0;
1312
1313	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1314	    "usbd_transfer_unsetup can sleep!");
1315
1316	while (n_setup--) {
1317		xfer = pxfer[n_setup];
1318
1319		if (xfer == NULL)
1320			continue;
1321
1322		info = xfer->xroot;
1323
1324		USB_XFER_LOCK(xfer);
1325		USB_BUS_LOCK(info->bus);
1326
1327		/*
1328		 * HINT: when you start/stop a transfer, it might be a
1329		 * good idea to directly use the "pxfer[]" structure:
1330		 *
1331		 * usbd_transfer_start(sc->pxfer[0]);
1332		 * usbd_transfer_stop(sc->pxfer[0]);
1333		 *
1334		 * That way, if your code has many parts that will not
1335		 * stop running under the same lock, in other words
1336		 * "xfer_mtx", the usbd_transfer_start and
1337		 * usbd_transfer_stop functions will simply return
1338		 * when they detect a NULL pointer argument.
1339		 *
1340		 * To avoid any races we clear the "pxfer[]" pointer
1341		 * while holding the private mutex of the driver:
1342		 */
1343		pxfer[n_setup] = NULL;
1344
1345		USB_BUS_UNLOCK(info->bus);
1346		USB_XFER_UNLOCK(xfer);
1347
1348		usbd_transfer_drain(xfer);
1349
1350#if USB_HAVE_BUSDMA
1351		if (xfer->flags_int.bdma_enable)
1352			needs_delay = 1;
1353#endif
1354		/*
1355		 * NOTE: default endpoint does not have an
1356		 * interface, even if endpoint->iface_index == 0
1357		 */
1358		USB_BUS_LOCK(info->bus);
1359		xfer->endpoint->refcount_alloc--;
1360		USB_BUS_UNLOCK(info->bus);
1361
1362		usb_callout_drain(&xfer->timeout_handle);
1363
1364		USB_BUS_LOCK(info->bus);
1365
1366		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1367		    "reference count\n"));
1368
1369		info->setup_refcount--;
1370
1371		if (info->setup_refcount == 0) {
1372			usbd_transfer_unsetup_sub(info,
1373			    needs_delay);
1374		} else {
1375			USB_BUS_UNLOCK(info->bus);
1376		}
1377	}
1378}
1379
1380/*------------------------------------------------------------------------*
1381 *	usbd_control_transfer_init - factored out code
1382 *
1383 * In USB Device Mode we have to wait for the SETUP packet which
1384 * containst the "struct usb_device_request" structure, before we can
1385 * transfer any data. In USB Host Mode we already have the SETUP
1386 * packet at the moment the USB transfer is started. This leads us to
1387 * having to setup the USB transfer at two different places in
1388 * time. This function just contains factored out control transfer
1389 * initialisation code, so that we don't duplicate the code.
1390 *------------------------------------------------------------------------*/
1391static void
1392usbd_control_transfer_init(struct usb_xfer *xfer)
1393{
1394	struct usb_device_request req;
1395
1396	/* copy out the USB request header */
1397
1398	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1399
1400	/* setup remainder */
1401
1402	xfer->flags_int.control_rem = UGETW(req.wLength);
1403
1404	/* copy direction to endpoint variable */
1405
1406	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1407	xfer->endpointno |=
1408	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1409}
1410
1411/*------------------------------------------------------------------------*
1412 *	usbd_setup_ctrl_transfer
1413 *
1414 * This function handles initialisation of control transfers. Control
1415 * transfers are special in that regard that they can both transmit
1416 * and receive data.
1417 *
1418 * Return values:
1419 *    0: Success
1420 * Else: Failure
1421 *------------------------------------------------------------------------*/
1422static int
1423usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1424{
1425	usb_frlength_t len;
1426
1427	/* Check for control endpoint stall */
1428	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1429		/* the control transfer is no longer active */
1430		xfer->flags_int.control_stall = 1;
1431		xfer->flags_int.control_act = 0;
1432	} else {
1433		/* don't stall control transfer by default */
1434		xfer->flags_int.control_stall = 0;
1435	}
1436
1437	/* Check for invalid number of frames */
1438	if (xfer->nframes > 2) {
1439		/*
1440		 * If you need to split a control transfer, you
1441		 * have to do one part at a time. Only with
1442		 * non-control transfers you can do multiple
1443		 * parts a time.
1444		 */
1445		DPRINTFN(0, "Too many frames: %u\n",
1446		    (unsigned int)xfer->nframes);
1447		goto error;
1448	}
1449
1450	/*
1451         * Check if there is a control
1452         * transfer in progress:
1453         */
1454	if (xfer->flags_int.control_act) {
1455
1456		if (xfer->flags_int.control_hdr) {
1457
1458			/* clear send header flag */
1459
1460			xfer->flags_int.control_hdr = 0;
1461
1462			/* setup control transfer */
1463			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1464				usbd_control_transfer_init(xfer);
1465			}
1466		}
1467		/* get data length */
1468
1469		len = xfer->sumlen;
1470
1471	} else {
1472
1473		/* the size of the SETUP structure is hardcoded ! */
1474
1475		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1476			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1477			    xfer->frlengths[0], sizeof(struct
1478			    usb_device_request));
1479			goto error;
1480		}
1481		/* check USB mode */
1482		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1483
1484			/* check number of frames */
1485			if (xfer->nframes != 1) {
1486				/*
1487			         * We need to receive the setup
1488			         * message first so that we know the
1489			         * data direction!
1490			         */
1491				DPRINTF("Misconfigured transfer\n");
1492				goto error;
1493			}
1494			/*
1495			 * Set a dummy "control_rem" value.  This
1496			 * variable will be overwritten later by a
1497			 * call to "usbd_control_transfer_init()" !
1498			 */
1499			xfer->flags_int.control_rem = 0xFFFF;
1500		} else {
1501
1502			/* setup "endpoint" and "control_rem" */
1503
1504			usbd_control_transfer_init(xfer);
1505		}
1506
1507		/* set transfer-header flag */
1508
1509		xfer->flags_int.control_hdr = 1;
1510
1511		/* get data length */
1512
1513		len = (xfer->sumlen - sizeof(struct usb_device_request));
1514	}
1515
1516	/* check if there is a length mismatch */
1517
1518	if (len > xfer->flags_int.control_rem) {
1519		DPRINTFN(0, "Length (%d) greater than "
1520		    "remaining length (%d)\n", len,
1521		    xfer->flags_int.control_rem);
1522		goto error;
1523	}
1524	/* check if we are doing a short transfer */
1525
1526	if (xfer->flags.force_short_xfer) {
1527		xfer->flags_int.control_rem = 0;
1528	} else {
1529		if ((len != xfer->max_data_length) &&
1530		    (len != xfer->flags_int.control_rem) &&
1531		    (xfer->nframes != 1)) {
1532			DPRINTFN(0, "Short control transfer without "
1533			    "force_short_xfer set\n");
1534			goto error;
1535		}
1536		xfer->flags_int.control_rem -= len;
1537	}
1538
1539	/* the status part is executed when "control_act" is 0 */
1540
1541	if ((xfer->flags_int.control_rem > 0) ||
1542	    (xfer->flags.manual_status)) {
1543		/* don't execute the STATUS stage yet */
1544		xfer->flags_int.control_act = 1;
1545
1546		/* sanity check */
1547		if ((!xfer->flags_int.control_hdr) &&
1548		    (xfer->nframes == 1)) {
1549			/*
1550		         * This is not a valid operation!
1551		         */
1552			DPRINTFN(0, "Invalid parameter "
1553			    "combination\n");
1554			goto error;
1555		}
1556	} else {
1557		/* time to execute the STATUS stage */
1558		xfer->flags_int.control_act = 0;
1559	}
1560	return (0);			/* success */
1561
1562error:
1563	return (1);			/* failure */
1564}
1565
1566/*------------------------------------------------------------------------*
1567 *	usbd_transfer_submit - start USB hardware for the given transfer
1568 *
1569 * This function should only be called from the USB callback.
1570 *------------------------------------------------------------------------*/
1571void
1572usbd_transfer_submit(struct usb_xfer *xfer)
1573{
1574	struct usb_xfer_root *info;
1575	struct usb_bus *bus;
1576	usb_frcount_t x;
1577
1578	info = xfer->xroot;
1579	bus = info->bus;
1580
1581	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1582	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1583	    "read" : "write");
1584
1585#ifdef USB_DEBUG
1586	if (USB_DEBUG_VAR > 0) {
1587		USB_BUS_LOCK(bus);
1588
1589		usb_dump_endpoint(xfer->endpoint);
1590
1591		USB_BUS_UNLOCK(bus);
1592	}
1593#endif
1594
1595	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1596	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1597
1598	/* Only open the USB transfer once! */
1599	if (!xfer->flags_int.open) {
1600		xfer->flags_int.open = 1;
1601
1602		DPRINTF("open\n");
1603
1604		USB_BUS_LOCK(bus);
1605		(xfer->endpoint->methods->open) (xfer);
1606		USB_BUS_UNLOCK(bus);
1607	}
1608	/* set "transferring" flag */
1609	xfer->flags_int.transferring = 1;
1610
1611#if USB_HAVE_POWERD
1612	/* increment power reference */
1613	usbd_transfer_power_ref(xfer, 1);
1614#endif
1615	/*
1616	 * Check if the transfer is waiting on a queue, most
1617	 * frequently the "done_q":
1618	 */
1619	if (xfer->wait_queue) {
1620		USB_BUS_LOCK(bus);
1621		usbd_transfer_dequeue(xfer);
1622		USB_BUS_UNLOCK(bus);
1623	}
1624	/* clear "did_dma_delay" flag */
1625	xfer->flags_int.did_dma_delay = 0;
1626
1627	/* clear "did_close" flag */
1628	xfer->flags_int.did_close = 0;
1629
1630#if USB_HAVE_BUSDMA
1631	/* clear "bdma_setup" flag */
1632	xfer->flags_int.bdma_setup = 0;
1633#endif
1634	/* by default we cannot cancel any USB transfer immediately */
1635	xfer->flags_int.can_cancel_immed = 0;
1636
1637	/* clear lengths and frame counts by default */
1638	xfer->sumlen = 0;
1639	xfer->actlen = 0;
1640	xfer->aframes = 0;
1641
1642	/* clear any previous errors */
1643	xfer->error = 0;
1644
1645	/* Check if the device is still alive */
1646	if (info->udev->state < USB_STATE_POWERED) {
1647		USB_BUS_LOCK(bus);
1648		/*
1649		 * Must return cancelled error code else
1650		 * device drivers can hang.
1651		 */
1652		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1653		USB_BUS_UNLOCK(bus);
1654		return;
1655	}
1656
1657	/* sanity check */
1658	if (xfer->nframes == 0) {
1659		if (xfer->flags.stall_pipe) {
1660			/*
1661			 * Special case - want to stall without transferring
1662			 * any data:
1663			 */
1664			DPRINTF("xfer=%p nframes=0: stall "
1665			    "or clear stall!\n", xfer);
1666			USB_BUS_LOCK(bus);
1667			xfer->flags_int.can_cancel_immed = 1;
1668			/* start the transfer */
1669			usb_command_wrapper(&xfer->endpoint->
1670			    endpoint_q[xfer->stream_id], xfer);
1671			USB_BUS_UNLOCK(bus);
1672			return;
1673		}
1674		USB_BUS_LOCK(bus);
1675		usbd_transfer_done(xfer, USB_ERR_INVAL);
1676		USB_BUS_UNLOCK(bus);
1677		return;
1678	}
1679	/* compute some variables */
1680
1681	for (x = 0; x != xfer->nframes; x++) {
1682		/* make a copy of the frlenghts[] */
1683		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1684		/* compute total transfer length */
1685		xfer->sumlen += xfer->frlengths[x];
1686		if (xfer->sumlen < xfer->frlengths[x]) {
1687			/* length wrapped around */
1688			USB_BUS_LOCK(bus);
1689			usbd_transfer_done(xfer, USB_ERR_INVAL);
1690			USB_BUS_UNLOCK(bus);
1691			return;
1692		}
1693	}
1694
1695	/* clear some internal flags */
1696
1697	xfer->flags_int.short_xfer_ok = 0;
1698	xfer->flags_int.short_frames_ok = 0;
1699
1700	/* check if this is a control transfer */
1701
1702	if (xfer->flags_int.control_xfr) {
1703
1704		if (usbd_setup_ctrl_transfer(xfer)) {
1705			USB_BUS_LOCK(bus);
1706			usbd_transfer_done(xfer, USB_ERR_STALLED);
1707			USB_BUS_UNLOCK(bus);
1708			return;
1709		}
1710	}
1711	/*
1712	 * Setup filtered version of some transfer flags,
1713	 * in case of data read direction
1714	 */
1715	if (USB_GET_DATA_ISREAD(xfer)) {
1716
1717		if (xfer->flags.short_frames_ok) {
1718			xfer->flags_int.short_xfer_ok = 1;
1719			xfer->flags_int.short_frames_ok = 1;
1720		} else if (xfer->flags.short_xfer_ok) {
1721			xfer->flags_int.short_xfer_ok = 1;
1722
1723			/* check for control transfer */
1724			if (xfer->flags_int.control_xfr) {
1725				/*
1726				 * 1) Control transfers do not support
1727				 * reception of multiple short USB
1728				 * frames in host mode and device side
1729				 * mode, with exception of:
1730				 *
1731				 * 2) Due to sometimes buggy device
1732				 * side firmware we need to do a
1733				 * STATUS stage in case of short
1734				 * control transfers in USB host mode.
1735				 * The STATUS stage then becomes the
1736				 * "alt_next" to the DATA stage.
1737				 */
1738				xfer->flags_int.short_frames_ok = 1;
1739			}
1740		}
1741	}
1742	/*
1743	 * Check if BUS-DMA support is enabled and try to load virtual
1744	 * buffers into DMA, if any:
1745	 */
1746#if USB_HAVE_BUSDMA
1747	if (xfer->flags_int.bdma_enable) {
1748		/* insert the USB transfer last in the BUS-DMA queue */
1749		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1750		return;
1751	}
1752#endif
1753	/*
1754	 * Enter the USB transfer into the Host Controller or
1755	 * Device Controller schedule:
1756	 */
1757	usbd_pipe_enter(xfer);
1758}
1759
1760/*------------------------------------------------------------------------*
1761 *	usbd_pipe_enter - factored out code
1762 *------------------------------------------------------------------------*/
1763void
1764usbd_pipe_enter(struct usb_xfer *xfer)
1765{
1766	struct usb_endpoint *ep;
1767
1768	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1769
1770	USB_BUS_LOCK(xfer->xroot->bus);
1771
1772	ep = xfer->endpoint;
1773
1774	DPRINTF("enter\n");
1775
1776	/* the transfer can now be cancelled */
1777	xfer->flags_int.can_cancel_immed = 1;
1778
1779	/* enter the transfer */
1780	(ep->methods->enter) (xfer);
1781
1782	/* check for transfer error */
1783	if (xfer->error) {
1784		/* some error has happened */
1785		usbd_transfer_done(xfer, 0);
1786		USB_BUS_UNLOCK(xfer->xroot->bus);
1787		return;
1788	}
1789
1790	/* start the transfer */
1791	usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1792	USB_BUS_UNLOCK(xfer->xroot->bus);
1793}
1794
1795/*------------------------------------------------------------------------*
1796 *	usbd_transfer_start - start an USB transfer
1797 *
1798 * NOTE: Calling this function more than one time will only
1799 *       result in a single transfer start, until the USB transfer
1800 *       completes.
1801 *------------------------------------------------------------------------*/
1802void
1803usbd_transfer_start(struct usb_xfer *xfer)
1804{
1805	if (xfer == NULL) {
1806		/* transfer is gone */
1807		return;
1808	}
1809	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1810
1811	/* mark the USB transfer started */
1812
1813	if (!xfer->flags_int.started) {
1814		/* lock the BUS lock to avoid races updating flags_int */
1815		USB_BUS_LOCK(xfer->xroot->bus);
1816		xfer->flags_int.started = 1;
1817		USB_BUS_UNLOCK(xfer->xroot->bus);
1818	}
1819	/* check if the USB transfer callback is already transferring */
1820
1821	if (xfer->flags_int.transferring) {
1822		return;
1823	}
1824	USB_BUS_LOCK(xfer->xroot->bus);
1825	/* call the USB transfer callback */
1826	usbd_callback_ss_done_defer(xfer);
1827	USB_BUS_UNLOCK(xfer->xroot->bus);
1828}
1829
1830/*------------------------------------------------------------------------*
1831 *	usbd_transfer_stop - stop an USB transfer
1832 *
1833 * NOTE: Calling this function more than one time will only
1834 *       result in a single transfer stop.
1835 * NOTE: When this function returns it is not safe to free nor
1836 *       reuse any DMA buffers. See "usbd_transfer_drain()".
1837 *------------------------------------------------------------------------*/
1838void
1839usbd_transfer_stop(struct usb_xfer *xfer)
1840{
1841	struct usb_endpoint *ep;
1842
1843	if (xfer == NULL) {
1844		/* transfer is gone */
1845		return;
1846	}
1847	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1848
1849	/* check if the USB transfer was ever opened */
1850
1851	if (!xfer->flags_int.open) {
1852		if (xfer->flags_int.started) {
1853			/* nothing to do except clearing the "started" flag */
1854			/* lock the BUS lock to avoid races updating flags_int */
1855			USB_BUS_LOCK(xfer->xroot->bus);
1856			xfer->flags_int.started = 0;
1857			USB_BUS_UNLOCK(xfer->xroot->bus);
1858		}
1859		return;
1860	}
1861	/* try to stop the current USB transfer */
1862
1863	USB_BUS_LOCK(xfer->xroot->bus);
1864	/* override any previous error */
1865	xfer->error = USB_ERR_CANCELLED;
1866
1867	/*
1868	 * Clear "open" and "started" when both private and USB lock
1869	 * is locked so that we don't get a race updating "flags_int"
1870	 */
1871	xfer->flags_int.open = 0;
1872	xfer->flags_int.started = 0;
1873
1874	/*
1875	 * Check if we can cancel the USB transfer immediately.
1876	 */
1877	if (xfer->flags_int.transferring) {
1878		if (xfer->flags_int.can_cancel_immed &&
1879		    (!xfer->flags_int.did_close)) {
1880			DPRINTF("close\n");
1881			/*
1882			 * The following will lead to an USB_ERR_CANCELLED
1883			 * error code being passed to the USB callback.
1884			 */
1885			(xfer->endpoint->methods->close) (xfer);
1886			/* only close once */
1887			xfer->flags_int.did_close = 1;
1888		} else {
1889			/* need to wait for the next done callback */
1890		}
1891	} else {
1892		DPRINTF("close\n");
1893
1894		/* close here and now */
1895		(xfer->endpoint->methods->close) (xfer);
1896
1897		/*
1898		 * Any additional DMA delay is done by
1899		 * "usbd_transfer_unsetup()".
1900		 */
1901
1902		/*
1903		 * Special case. Check if we need to restart a blocked
1904		 * endpoint.
1905		 */
1906		ep = xfer->endpoint;
1907
1908		/*
1909		 * If the current USB transfer is completing we need
1910		 * to start the next one:
1911		 */
1912		if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1913			usb_command_wrapper(
1914			    &ep->endpoint_q[xfer->stream_id], NULL);
1915		}
1916	}
1917
1918	USB_BUS_UNLOCK(xfer->xroot->bus);
1919}
1920
1921/*------------------------------------------------------------------------*
1922 *	usbd_transfer_pending
1923 *
1924 * This function will check if an USB transfer is pending which is a
1925 * little bit complicated!
1926 * Return values:
1927 * 0: Not pending
1928 * 1: Pending: The USB transfer will receive a callback in the future.
1929 *------------------------------------------------------------------------*/
1930uint8_t
1931usbd_transfer_pending(struct usb_xfer *xfer)
1932{
1933	struct usb_xfer_root *info;
1934	struct usb_xfer_queue *pq;
1935
1936	if (xfer == NULL) {
1937		/* transfer is gone */
1938		return (0);
1939	}
1940	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1941
1942	if (xfer->flags_int.transferring) {
1943		/* trivial case */
1944		return (1);
1945	}
1946	USB_BUS_LOCK(xfer->xroot->bus);
1947	if (xfer->wait_queue) {
1948		/* we are waiting on a queue somewhere */
1949		USB_BUS_UNLOCK(xfer->xroot->bus);
1950		return (1);
1951	}
1952	info = xfer->xroot;
1953	pq = &info->done_q;
1954
1955	if (pq->curr == xfer) {
1956		/* we are currently scheduled for callback */
1957		USB_BUS_UNLOCK(xfer->xroot->bus);
1958		return (1);
1959	}
1960	/* we are not pending */
1961	USB_BUS_UNLOCK(xfer->xroot->bus);
1962	return (0);
1963}
1964
1965/*------------------------------------------------------------------------*
1966 *	usbd_transfer_drain
1967 *
1968 * This function will stop the USB transfer and wait for any
1969 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1970 * are loaded into DMA can safely be freed or reused after that this
1971 * function has returned.
1972 *------------------------------------------------------------------------*/
1973void
1974usbd_transfer_drain(struct usb_xfer *xfer)
1975{
1976	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1977	    "usbd_transfer_drain can sleep!");
1978
1979	if (xfer == NULL) {
1980		/* transfer is gone */
1981		return;
1982	}
1983	if (xfer->xroot->xfer_mtx != &Giant) {
1984		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1985	}
1986	USB_XFER_LOCK(xfer);
1987
1988	usbd_transfer_stop(xfer);
1989
1990	while (usbd_transfer_pending(xfer) ||
1991	    xfer->flags_int.doing_callback) {
1992
1993		/*
1994		 * It is allowed that the callback can drop its
1995		 * transfer mutex. In that case checking only
1996		 * "usbd_transfer_pending()" is not enough to tell if
1997		 * the USB transfer is fully drained. We also need to
1998		 * check the internal "doing_callback" flag.
1999		 */
2000		xfer->flags_int.draining = 1;
2001
2002		/*
2003		 * Wait until the current outstanding USB
2004		 * transfer is complete !
2005		 */
2006		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2007	}
2008	USB_XFER_UNLOCK(xfer);
2009}
2010
2011struct usb_page_cache *
2012usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2013{
2014	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2015
2016	return (&xfer->frbuffers[frindex]);
2017}
2018
2019void *
2020usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2021{
2022	struct usb_page_search page_info;
2023
2024	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2025
2026	usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2027	return (page_info.buffer);
2028}
2029
2030/*------------------------------------------------------------------------*
2031 *	usbd_xfer_get_fps_shift
2032 *
2033 * The following function is only useful for isochronous transfers. It
2034 * returns how many times the frame execution rate has been shifted
2035 * down.
2036 *
2037 * Return value:
2038 * Success: 0..3
2039 * Failure: 0
2040 *------------------------------------------------------------------------*/
2041uint8_t
2042usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2043{
2044	return (xfer->fps_shift);
2045}
2046
2047usb_frlength_t
2048usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2049{
2050	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2051
2052	return (xfer->frlengths[frindex]);
2053}
2054
2055/*------------------------------------------------------------------------*
2056 *	usbd_xfer_set_frame_data
2057 *
2058 * This function sets the pointer of the buffer that should
2059 * loaded directly into DMA for the given USB frame. Passing "ptr"
2060 * equal to NULL while the corresponding "frlength" is greater
2061 * than zero gives undefined results!
2062 *------------------------------------------------------------------------*/
2063void
2064usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2065    void *ptr, usb_frlength_t len)
2066{
2067	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2068
2069	/* set virtual address to load and length */
2070	xfer->frbuffers[frindex].buffer = ptr;
2071	usbd_xfer_set_frame_len(xfer, frindex, len);
2072}
2073
2074void
2075usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2076    void **ptr, int *len)
2077{
2078	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2079
2080	if (ptr != NULL)
2081		*ptr = xfer->frbuffers[frindex].buffer;
2082	if (len != NULL)
2083		*len = xfer->frlengths[frindex];
2084}
2085
2086/*------------------------------------------------------------------------*
2087 *	usbd_xfer_old_frame_length
2088 *
2089 * This function returns the framelength of the given frame at the
2090 * time the transfer was submitted. This function can be used to
2091 * compute the starting data pointer of the next isochronous frame
2092 * when an isochronous transfer has completed.
2093 *------------------------------------------------------------------------*/
2094usb_frlength_t
2095usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2096{
2097	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2098
2099	return (xfer->frlengths[frindex + xfer->max_frame_count]);
2100}
2101
2102void
2103usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2104    int *nframes)
2105{
2106	if (actlen != NULL)
2107		*actlen = xfer->actlen;
2108	if (sumlen != NULL)
2109		*sumlen = xfer->sumlen;
2110	if (aframes != NULL)
2111		*aframes = xfer->aframes;
2112	if (nframes != NULL)
2113		*nframes = xfer->nframes;
2114}
2115
2116/*------------------------------------------------------------------------*
2117 *	usbd_xfer_set_frame_offset
2118 *
2119 * This function sets the frame data buffer offset relative to the beginning
2120 * of the USB DMA buffer allocated for this USB transfer.
2121 *------------------------------------------------------------------------*/
2122void
2123usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2124    usb_frcount_t frindex)
2125{
2126	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2127	    "when the USB buffer is external\n"));
2128	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2129
2130	/* set virtual address to load */
2131	xfer->frbuffers[frindex].buffer =
2132	    USB_ADD_BYTES(xfer->local_buffer, offset);
2133}
2134
2135void
2136usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2137{
2138	xfer->interval = i;
2139}
2140
2141void
2142usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2143{
2144	xfer->timeout = t;
2145}
2146
2147void
2148usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2149{
2150	xfer->nframes = n;
2151}
2152
2153usb_frcount_t
2154usbd_xfer_max_frames(struct usb_xfer *xfer)
2155{
2156	return (xfer->max_frame_count);
2157}
2158
2159usb_frlength_t
2160usbd_xfer_max_len(struct usb_xfer *xfer)
2161{
2162	return (xfer->max_data_length);
2163}
2164
2165usb_frlength_t
2166usbd_xfer_max_framelen(struct usb_xfer *xfer)
2167{
2168	return (xfer->max_frame_size);
2169}
2170
2171void
2172usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2173    usb_frlength_t len)
2174{
2175	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2176
2177	xfer->frlengths[frindex] = len;
2178}
2179
2180/*------------------------------------------------------------------------*
2181 *	usb_callback_proc - factored out code
2182 *
2183 * This function performs USB callbacks.
2184 *------------------------------------------------------------------------*/
2185static void
2186usb_callback_proc(struct usb_proc_msg *_pm)
2187{
2188	struct usb_done_msg *pm = (void *)_pm;
2189	struct usb_xfer_root *info = pm->xroot;
2190
2191	/* Change locking order */
2192	USB_BUS_UNLOCK(info->bus);
2193
2194	/*
2195	 * We exploit the fact that the mutex is the same for all
2196	 * callbacks that will be called from this thread:
2197	 */
2198	mtx_lock(info->xfer_mtx);
2199	USB_BUS_LOCK(info->bus);
2200
2201	/* Continue where we lost track */
2202	usb_command_wrapper(&info->done_q,
2203	    info->done_q.curr);
2204
2205	mtx_unlock(info->xfer_mtx);
2206}
2207
2208/*------------------------------------------------------------------------*
2209 *	usbd_callback_ss_done_defer
2210 *
2211 * This function will defer the start, stop and done callback to the
2212 * correct thread.
2213 *------------------------------------------------------------------------*/
2214static void
2215usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2216{
2217	struct usb_xfer_root *info = xfer->xroot;
2218	struct usb_xfer_queue *pq = &info->done_q;
2219
2220	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2221
2222	if (pq->curr != xfer) {
2223		usbd_transfer_enqueue(pq, xfer);
2224	}
2225	if (!pq->recurse_1) {
2226
2227		/*
2228	         * We have to postpone the callback due to the fact we
2229	         * will have a Lock Order Reversal, LOR, if we try to
2230	         * proceed !
2231	         */
2232		if (usb_proc_msignal(info->done_p,
2233		    &info->done_m[0], &info->done_m[1])) {
2234			/* ignore */
2235		}
2236	} else {
2237		/* clear second recurse flag */
2238		pq->recurse_2 = 0;
2239	}
2240	return;
2241
2242}
2243
2244/*------------------------------------------------------------------------*
2245 *	usbd_callback_wrapper
2246 *
2247 * This is a wrapper for USB callbacks. This wrapper does some
2248 * auto-magic things like figuring out if we can call the callback
2249 * directly from the current context or if we need to wakeup the
2250 * interrupt process.
2251 *------------------------------------------------------------------------*/
2252static void
2253usbd_callback_wrapper(struct usb_xfer_queue *pq)
2254{
2255	struct usb_xfer *xfer = pq->curr;
2256	struct usb_xfer_root *info = xfer->xroot;
2257
2258	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2259	if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2260		/*
2261	       	 * Cases that end up here:
2262		 *
2263		 * 5) HW interrupt done callback or other source.
2264		 */
2265		DPRINTFN(3, "case 5\n");
2266
2267		/*
2268	         * We have to postpone the callback due to the fact we
2269	         * will have a Lock Order Reversal, LOR, if we try to
2270	         * proceed !
2271	         */
2272		if (usb_proc_msignal(info->done_p,
2273		    &info->done_m[0], &info->done_m[1])) {
2274			/* ignore */
2275		}
2276		return;
2277	}
2278	/*
2279	 * Cases that end up here:
2280	 *
2281	 * 1) We are starting a transfer
2282	 * 2) We are prematurely calling back a transfer
2283	 * 3) We are stopping a transfer
2284	 * 4) We are doing an ordinary callback
2285	 */
2286	DPRINTFN(3, "case 1-4\n");
2287	/* get next USB transfer in the queue */
2288	info->done_q.curr = NULL;
2289
2290	/* set flag in case of drain */
2291	xfer->flags_int.doing_callback = 1;
2292
2293	USB_BUS_UNLOCK(info->bus);
2294	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2295
2296	/* set correct USB state for callback */
2297	if (!xfer->flags_int.transferring) {
2298		xfer->usb_state = USB_ST_SETUP;
2299		if (!xfer->flags_int.started) {
2300			/* we got stopped before we even got started */
2301			USB_BUS_LOCK(info->bus);
2302			goto done;
2303		}
2304	} else {
2305
2306		if (usbd_callback_wrapper_sub(xfer)) {
2307			/* the callback has been deferred */
2308			USB_BUS_LOCK(info->bus);
2309			goto done;
2310		}
2311#if USB_HAVE_POWERD
2312		/* decrement power reference */
2313		usbd_transfer_power_ref(xfer, -1);
2314#endif
2315		xfer->flags_int.transferring = 0;
2316
2317		if (xfer->error) {
2318			xfer->usb_state = USB_ST_ERROR;
2319		} else {
2320			/* set transferred state */
2321			xfer->usb_state = USB_ST_TRANSFERRED;
2322#if USB_HAVE_BUSDMA
2323			/* sync DMA memory, if any */
2324			if (xfer->flags_int.bdma_enable &&
2325			    (!xfer->flags_int.bdma_no_post_sync)) {
2326				usb_bdma_post_sync(xfer);
2327			}
2328#endif
2329		}
2330	}
2331
2332#if USB_HAVE_PF
2333	if (xfer->usb_state != USB_ST_SETUP)
2334		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2335#endif
2336	/* call processing routine */
2337	(xfer->callback) (xfer, xfer->error);
2338
2339	/* pickup the USB mutex again */
2340	USB_BUS_LOCK(info->bus);
2341
2342	/*
2343	 * Check if we got started after that we got cancelled, but
2344	 * before we managed to do the callback.
2345	 */
2346	if ((!xfer->flags_int.open) &&
2347	    (xfer->flags_int.started) &&
2348	    (xfer->usb_state == USB_ST_ERROR)) {
2349		/* clear flag in case of drain */
2350		xfer->flags_int.doing_callback = 0;
2351		/* try to loop, but not recursivly */
2352		usb_command_wrapper(&info->done_q, xfer);
2353		return;
2354	}
2355
2356done:
2357	/* clear flag in case of drain */
2358	xfer->flags_int.doing_callback = 0;
2359
2360	/*
2361	 * Check if we are draining.
2362	 */
2363	if (xfer->flags_int.draining &&
2364	    (!xfer->flags_int.transferring)) {
2365		/* "usbd_transfer_drain()" is waiting for end of transfer */
2366		xfer->flags_int.draining = 0;
2367		cv_broadcast(&info->cv_drain);
2368	}
2369
2370	/* do the next callback, if any */
2371	usb_command_wrapper(&info->done_q,
2372	    info->done_q.curr);
2373}
2374
2375/*------------------------------------------------------------------------*
2376 *	usb_dma_delay_done_cb
2377 *
2378 * This function is called when the DMA delay has been exectuded, and
2379 * will make sure that the callback is called to complete the USB
2380 * transfer. This code path is ususally only used when there is an USB
2381 * error like USB_ERR_CANCELLED.
2382 *------------------------------------------------------------------------*/
2383void
2384usb_dma_delay_done_cb(struct usb_xfer *xfer)
2385{
2386	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2387
2388	DPRINTFN(3, "Completed %p\n", xfer);
2389
2390	/* queue callback for execution, again */
2391	usbd_transfer_done(xfer, 0);
2392}
2393
2394/*------------------------------------------------------------------------*
2395 *	usbd_transfer_dequeue
2396 *
2397 *  - This function is used to remove an USB transfer from a USB
2398 *  transfer queue.
2399 *
2400 *  - This function can be called multiple times in a row.
2401 *------------------------------------------------------------------------*/
2402void
2403usbd_transfer_dequeue(struct usb_xfer *xfer)
2404{
2405	struct usb_xfer_queue *pq;
2406
2407	pq = xfer->wait_queue;
2408	if (pq) {
2409		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2410		xfer->wait_queue = NULL;
2411	}
2412}
2413
2414/*------------------------------------------------------------------------*
2415 *	usbd_transfer_enqueue
2416 *
2417 *  - This function is used to insert an USB transfer into a USB *
2418 *  transfer queue.
2419 *
2420 *  - This function can be called multiple times in a row.
2421 *------------------------------------------------------------------------*/
2422void
2423usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2424{
2425	/*
2426	 * Insert the USB transfer into the queue, if it is not
2427	 * already on a USB transfer queue:
2428	 */
2429	if (xfer->wait_queue == NULL) {
2430		xfer->wait_queue = pq;
2431		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2432	}
2433}
2434
2435/*------------------------------------------------------------------------*
2436 *	usbd_transfer_done
2437 *
2438 *  - This function is used to remove an USB transfer from the busdma,
2439 *  pipe or interrupt queue.
2440 *
2441 *  - This function is used to queue the USB transfer on the done
2442 *  queue.
2443 *
2444 *  - This function is used to stop any USB transfer timeouts.
2445 *------------------------------------------------------------------------*/
2446void
2447usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2448{
2449	struct usb_xfer_root *info = xfer->xroot;
2450
2451	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2452
2453	DPRINTF("err=%s\n", usbd_errstr(error));
2454
2455	/*
2456	 * If we are not transferring then just return.
2457	 * This can happen during transfer cancel.
2458	 */
2459	if (!xfer->flags_int.transferring) {
2460		DPRINTF("not transferring\n");
2461		/* end of control transfer, if any */
2462		xfer->flags_int.control_act = 0;
2463		return;
2464	}
2465	/* only set transfer error, if not already set */
2466	if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2467		xfer->error = error;
2468
2469	/* stop any callouts */
2470	usb_callout_stop(&xfer->timeout_handle);
2471
2472	/*
2473	 * If we are waiting on a queue, just remove the USB transfer
2474	 * from the queue, if any. We should have the required locks
2475	 * locked to do the remove when this function is called.
2476	 */
2477	usbd_transfer_dequeue(xfer);
2478
2479#if USB_HAVE_BUSDMA
2480	if (mtx_owned(info->xfer_mtx)) {
2481		struct usb_xfer_queue *pq;
2482
2483		/*
2484		 * If the private USB lock is not locked, then we assume
2485		 * that the BUS-DMA load stage has been passed:
2486		 */
2487		pq = &info->dma_q;
2488
2489		if (pq->curr == xfer) {
2490			/* start the next BUS-DMA load, if any */
2491			usb_command_wrapper(pq, NULL);
2492		}
2493	}
2494#endif
2495	/* keep some statistics */
2496	if (xfer->error) {
2497		info->bus->stats_err.uds_requests
2498		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2499	} else {
2500		info->bus->stats_ok.uds_requests
2501		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2502	}
2503
2504	/* call the USB transfer callback */
2505	usbd_callback_ss_done_defer(xfer);
2506}
2507
2508/*------------------------------------------------------------------------*
2509 *	usbd_transfer_start_cb
2510 *
2511 * This function is called to start the USB transfer when
2512 * "xfer->interval" is greater than zero, and and the endpoint type is
2513 * BULK or CONTROL.
2514 *------------------------------------------------------------------------*/
2515static void
2516usbd_transfer_start_cb(void *arg)
2517{
2518	struct usb_xfer *xfer = arg;
2519	struct usb_endpoint *ep = xfer->endpoint;
2520
2521	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2522
2523	DPRINTF("start\n");
2524
2525#if USB_HAVE_PF
2526	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2527#endif
2528
2529	/* the transfer can now be cancelled */
2530	xfer->flags_int.can_cancel_immed = 1;
2531
2532	/* start USB transfer, if no error */
2533	if (xfer->error == 0)
2534		(ep->methods->start) (xfer);
2535
2536	/* check for transfer error */
2537	if (xfer->error) {
2538		/* some error has happened */
2539		usbd_transfer_done(xfer, 0);
2540	}
2541}
2542
2543/*------------------------------------------------------------------------*
2544 *	usbd_xfer_set_stall
2545 *
2546 * This function is used to set the stall flag outside the
2547 * callback. This function is NULL safe.
2548 *------------------------------------------------------------------------*/
2549void
2550usbd_xfer_set_stall(struct usb_xfer *xfer)
2551{
2552	if (xfer == NULL) {
2553		/* tearing down */
2554		return;
2555	}
2556	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2557
2558	/* avoid any races by locking the USB mutex */
2559	USB_BUS_LOCK(xfer->xroot->bus);
2560	xfer->flags.stall_pipe = 1;
2561	USB_BUS_UNLOCK(xfer->xroot->bus);
2562}
2563
2564int
2565usbd_xfer_is_stalled(struct usb_xfer *xfer)
2566{
2567	return (xfer->endpoint->is_stalled);
2568}
2569
2570/*------------------------------------------------------------------------*
2571 *	usbd_transfer_clear_stall
2572 *
2573 * This function is used to clear the stall flag outside the
2574 * callback. This function is NULL safe.
2575 *------------------------------------------------------------------------*/
2576void
2577usbd_transfer_clear_stall(struct usb_xfer *xfer)
2578{
2579	if (xfer == NULL) {
2580		/* tearing down */
2581		return;
2582	}
2583	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2584
2585	/* avoid any races by locking the USB mutex */
2586	USB_BUS_LOCK(xfer->xroot->bus);
2587
2588	xfer->flags.stall_pipe = 0;
2589
2590	USB_BUS_UNLOCK(xfer->xroot->bus);
2591}
2592
2593/*------------------------------------------------------------------------*
2594 *	usbd_pipe_start
2595 *
2596 * This function is used to add an USB transfer to the pipe transfer list.
2597 *------------------------------------------------------------------------*/
2598void
2599usbd_pipe_start(struct usb_xfer_queue *pq)
2600{
2601	struct usb_endpoint *ep;
2602	struct usb_xfer *xfer;
2603	uint8_t type;
2604
2605	xfer = pq->curr;
2606	ep = xfer->endpoint;
2607
2608	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2609
2610	/*
2611	 * If the endpoint is already stalled we do nothing !
2612	 */
2613	if (ep->is_stalled) {
2614		return;
2615	}
2616	/*
2617	 * Check if we are supposed to stall the endpoint:
2618	 */
2619	if (xfer->flags.stall_pipe) {
2620		struct usb_device *udev;
2621		struct usb_xfer_root *info;
2622
2623		/* clear stall command */
2624		xfer->flags.stall_pipe = 0;
2625
2626		/* get pointer to USB device */
2627		info = xfer->xroot;
2628		udev = info->udev;
2629
2630		/*
2631		 * Only stall BULK and INTERRUPT endpoints.
2632		 */
2633		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2634		if ((type == UE_BULK) ||
2635		    (type == UE_INTERRUPT)) {
2636			uint8_t did_stall;
2637
2638			did_stall = 1;
2639
2640			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2641				(udev->bus->methods->set_stall) (
2642				    udev, ep, &did_stall);
2643			} else if (udev->ctrl_xfer[1]) {
2644				info = udev->ctrl_xfer[1]->xroot;
2645				usb_proc_msignal(
2646				    USB_BUS_NON_GIANT_PROC(info->bus),
2647				    &udev->cs_msg[0], &udev->cs_msg[1]);
2648			} else {
2649				/* should not happen */
2650				DPRINTFN(0, "No stall handler\n");
2651			}
2652			/*
2653			 * Check if we should stall. Some USB hardware
2654			 * handles set- and clear-stall in hardware.
2655			 */
2656			if (did_stall) {
2657				/*
2658				 * The transfer will be continued when
2659				 * the clear-stall control endpoint
2660				 * message is received.
2661				 */
2662				ep->is_stalled = 1;
2663				return;
2664			}
2665		} else if (type == UE_ISOCHRONOUS) {
2666
2667			/*
2668			 * Make sure any FIFO overflow or other FIFO
2669			 * error conditions go away by resetting the
2670			 * endpoint FIFO through the clear stall
2671			 * method.
2672			 */
2673			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2674				(udev->bus->methods->clear_stall) (udev, ep);
2675			}
2676		}
2677	}
2678	/* Set or clear stall complete - special case */
2679	if (xfer->nframes == 0) {
2680		/* we are complete */
2681		xfer->aframes = 0;
2682		usbd_transfer_done(xfer, 0);
2683		return;
2684	}
2685	/*
2686	 * Handled cases:
2687	 *
2688	 * 1) Start the first transfer queued.
2689	 *
2690	 * 2) Re-start the current USB transfer.
2691	 */
2692	/*
2693	 * Check if there should be any
2694	 * pre transfer start delay:
2695	 */
2696	if (xfer->interval > 0) {
2697		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2698		if ((type == UE_BULK) ||
2699		    (type == UE_CONTROL)) {
2700			usbd_transfer_timeout_ms(xfer,
2701			    &usbd_transfer_start_cb,
2702			    xfer->interval);
2703			return;
2704		}
2705	}
2706	DPRINTF("start\n");
2707
2708#if USB_HAVE_PF
2709	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2710#endif
2711	/* the transfer can now be cancelled */
2712	xfer->flags_int.can_cancel_immed = 1;
2713
2714	/* start USB transfer, if no error */
2715	if (xfer->error == 0)
2716		(ep->methods->start) (xfer);
2717
2718	/* check for transfer error */
2719	if (xfer->error) {
2720		/* some error has happened */
2721		usbd_transfer_done(xfer, 0);
2722	}
2723}
2724
2725/*------------------------------------------------------------------------*
2726 *	usbd_transfer_timeout_ms
2727 *
2728 * This function is used to setup a timeout on the given USB
2729 * transfer. If the timeout has been deferred the callback given by
2730 * "cb" will get called after "ms" milliseconds.
2731 *------------------------------------------------------------------------*/
2732void
2733usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2734    void (*cb) (void *arg), usb_timeout_t ms)
2735{
2736	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2737
2738	/* defer delay */
2739	usb_callout_reset(&xfer->timeout_handle,
2740	    USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2741}
2742
2743/*------------------------------------------------------------------------*
2744 *	usbd_callback_wrapper_sub
2745 *
2746 *  - This function will update variables in an USB transfer after
2747 *  that the USB transfer is complete.
2748 *
2749 *  - This function is used to start the next USB transfer on the
2750 *  ep transfer queue, if any.
2751 *
2752 * NOTE: In some special cases the USB transfer will not be removed from
2753 * the pipe queue, but remain first. To enforce USB transfer removal call
2754 * this function passing the error code "USB_ERR_CANCELLED".
2755 *
2756 * Return values:
2757 * 0: Success.
2758 * Else: The callback has been deferred.
2759 *------------------------------------------------------------------------*/
2760static uint8_t
2761usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2762{
2763	struct usb_endpoint *ep;
2764	struct usb_bus *bus;
2765	usb_frcount_t x;
2766
2767	bus = xfer->xroot->bus;
2768
2769	if ((!xfer->flags_int.open) &&
2770	    (!xfer->flags_int.did_close)) {
2771		DPRINTF("close\n");
2772		USB_BUS_LOCK(bus);
2773		(xfer->endpoint->methods->close) (xfer);
2774		USB_BUS_UNLOCK(bus);
2775		/* only close once */
2776		xfer->flags_int.did_close = 1;
2777		return (1);		/* wait for new callback */
2778	}
2779	/*
2780	 * If we have a non-hardware induced error we
2781	 * need to do the DMA delay!
2782	 */
2783	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2784	    (xfer->error == USB_ERR_CANCELLED ||
2785	    xfer->error == USB_ERR_TIMEOUT ||
2786	    bus->methods->start_dma_delay != NULL)) {
2787
2788		usb_timeout_t temp;
2789
2790		/* only delay once */
2791		xfer->flags_int.did_dma_delay = 1;
2792
2793		/* we can not cancel this delay */
2794		xfer->flags_int.can_cancel_immed = 0;
2795
2796		temp = usbd_get_dma_delay(xfer->xroot->udev);
2797
2798		DPRINTFN(3, "DMA delay, %u ms, "
2799		    "on %p\n", temp, xfer);
2800
2801		if (temp != 0) {
2802			USB_BUS_LOCK(bus);
2803			/*
2804			 * Some hardware solutions have dedicated
2805			 * events when it is safe to free DMA'ed
2806			 * memory. For the other hardware platforms we
2807			 * use a static delay.
2808			 */
2809			if (bus->methods->start_dma_delay != NULL) {
2810				(bus->methods->start_dma_delay) (xfer);
2811			} else {
2812				usbd_transfer_timeout_ms(xfer,
2813				    (void (*)(void *))&usb_dma_delay_done_cb,
2814				    temp);
2815			}
2816			USB_BUS_UNLOCK(bus);
2817			return (1);	/* wait for new callback */
2818		}
2819	}
2820	/* check actual number of frames */
2821	if (xfer->aframes > xfer->nframes) {
2822		if (xfer->error == 0) {
2823			panic("%s: actual number of frames, %d, is "
2824			    "greater than initial number of frames, %d\n",
2825			    __FUNCTION__, xfer->aframes, xfer->nframes);
2826		} else {
2827			/* just set some valid value */
2828			xfer->aframes = xfer->nframes;
2829		}
2830	}
2831	/* compute actual length */
2832	xfer->actlen = 0;
2833
2834	for (x = 0; x != xfer->aframes; x++) {
2835		xfer->actlen += xfer->frlengths[x];
2836	}
2837
2838	/*
2839	 * Frames that were not transferred get zero actual length in
2840	 * case the USB device driver does not check the actual number
2841	 * of frames transferred, "xfer->aframes":
2842	 */
2843	for (; x < xfer->nframes; x++) {
2844		usbd_xfer_set_frame_len(xfer, x, 0);
2845	}
2846
2847	/* check actual length */
2848	if (xfer->actlen > xfer->sumlen) {
2849		if (xfer->error == 0) {
2850			panic("%s: actual length, %d, is greater than "
2851			    "initial length, %d\n",
2852			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2853		} else {
2854			/* just set some valid value */
2855			xfer->actlen = xfer->sumlen;
2856		}
2857	}
2858	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2859	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2860	    xfer->aframes, xfer->nframes);
2861
2862	if (xfer->error) {
2863		/* end of control transfer, if any */
2864		xfer->flags_int.control_act = 0;
2865
2866#if USB_HAVE_TT_SUPPORT
2867		switch (xfer->error) {
2868		case USB_ERR_NORMAL_COMPLETION:
2869		case USB_ERR_SHORT_XFER:
2870		case USB_ERR_STALLED:
2871		case USB_ERR_CANCELLED:
2872			/* nothing to do */
2873			break;
2874		default:
2875			/* try to reset the TT, if any */
2876			USB_BUS_LOCK(bus);
2877			uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2878			USB_BUS_UNLOCK(bus);
2879			break;
2880		}
2881#endif
2882		/* check if we should block the execution queue */
2883		if ((xfer->error != USB_ERR_CANCELLED) &&
2884		    (xfer->flags.pipe_bof)) {
2885			DPRINTFN(2, "xfer=%p: Block On Failure "
2886			    "on endpoint=%p\n", xfer, xfer->endpoint);
2887			goto done;
2888		}
2889	} else {
2890		/* check for short transfers */
2891		if (xfer->actlen < xfer->sumlen) {
2892
2893			/* end of control transfer, if any */
2894			xfer->flags_int.control_act = 0;
2895
2896			if (!xfer->flags_int.short_xfer_ok) {
2897				xfer->error = USB_ERR_SHORT_XFER;
2898				if (xfer->flags.pipe_bof) {
2899					DPRINTFN(2, "xfer=%p: Block On Failure on "
2900					    "Short Transfer on endpoint %p.\n",
2901					    xfer, xfer->endpoint);
2902					goto done;
2903				}
2904			}
2905		} else {
2906			/*
2907			 * Check if we are in the middle of a
2908			 * control transfer:
2909			 */
2910			if (xfer->flags_int.control_act) {
2911				DPRINTFN(5, "xfer=%p: Control transfer "
2912				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2913				goto done;
2914			}
2915		}
2916	}
2917
2918	ep = xfer->endpoint;
2919
2920	/*
2921	 * If the current USB transfer is completing we need to start the
2922	 * next one:
2923	 */
2924	USB_BUS_LOCK(bus);
2925	if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2926		usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2927
2928		if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
2929		    TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
2930			/* there is another USB transfer waiting */
2931		} else {
2932			/* this is the last USB transfer */
2933			/* clear isochronous sync flag */
2934			xfer->endpoint->is_synced = 0;
2935		}
2936	}
2937	USB_BUS_UNLOCK(bus);
2938done:
2939	return (0);
2940}
2941
2942/*------------------------------------------------------------------------*
2943 *	usb_command_wrapper
2944 *
2945 * This function is used to execute commands non-recursivly on an USB
2946 * transfer.
2947 *------------------------------------------------------------------------*/
2948void
2949usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2950{
2951	if (xfer) {
2952		/*
2953		 * If the transfer is not already processing,
2954		 * queue it!
2955		 */
2956		if (pq->curr != xfer) {
2957			usbd_transfer_enqueue(pq, xfer);
2958			if (pq->curr != NULL) {
2959				/* something is already processing */
2960				DPRINTFN(6, "busy %p\n", pq->curr);
2961				return;
2962			}
2963		}
2964	} else {
2965		/* Get next element in queue */
2966		pq->curr = NULL;
2967	}
2968
2969	if (!pq->recurse_1) {
2970
2971		do {
2972
2973			/* set both recurse flags */
2974			pq->recurse_1 = 1;
2975			pq->recurse_2 = 1;
2976
2977			if (pq->curr == NULL) {
2978				xfer = TAILQ_FIRST(&pq->head);
2979				if (xfer) {
2980					TAILQ_REMOVE(&pq->head, xfer,
2981					    wait_entry);
2982					xfer->wait_queue = NULL;
2983					pq->curr = xfer;
2984				} else {
2985					break;
2986				}
2987			}
2988			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2989			(pq->command) (pq);
2990			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2991
2992		} while (!pq->recurse_2);
2993
2994		/* clear first recurse flag */
2995		pq->recurse_1 = 0;
2996
2997	} else {
2998		/* clear second recurse flag */
2999		pq->recurse_2 = 0;
3000	}
3001}
3002
3003/*------------------------------------------------------------------------*
3004 *	usbd_ctrl_transfer_setup
3005 *
3006 * This function is used to setup the default USB control endpoint
3007 * transfer.
3008 *------------------------------------------------------------------------*/
3009void
3010usbd_ctrl_transfer_setup(struct usb_device *udev)
3011{
3012	struct usb_xfer *xfer;
3013	uint8_t no_resetup;
3014	uint8_t iface_index;
3015
3016	/* check for root HUB */
3017	if (udev->parent_hub == NULL)
3018		return;
3019repeat:
3020
3021	xfer = udev->ctrl_xfer[0];
3022	if (xfer) {
3023		USB_XFER_LOCK(xfer);
3024		no_resetup =
3025		    ((xfer->address == udev->address) &&
3026		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3027		    udev->ddesc.bMaxPacketSize));
3028		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3029			if (no_resetup) {
3030				/*
3031				 * NOTE: checking "xfer->address" and
3032				 * starting the USB transfer must be
3033				 * atomic!
3034				 */
3035				usbd_transfer_start(xfer);
3036			}
3037		}
3038		USB_XFER_UNLOCK(xfer);
3039	} else {
3040		no_resetup = 0;
3041	}
3042
3043	if (no_resetup) {
3044		/*
3045	         * All parameters are exactly the same like before.
3046	         * Just return.
3047	         */
3048		return;
3049	}
3050	/*
3051	 * Update wMaxPacketSize for the default control endpoint:
3052	 */
3053	udev->ctrl_ep_desc.wMaxPacketSize[0] =
3054	    udev->ddesc.bMaxPacketSize;
3055
3056	/*
3057	 * Unsetup any existing USB transfer:
3058	 */
3059	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3060
3061	/*
3062	 * Reset clear stall error counter.
3063	 */
3064	udev->clear_stall_errors = 0;
3065
3066	/*
3067	 * Try to setup a new USB transfer for the
3068	 * default control endpoint:
3069	 */
3070	iface_index = 0;
3071	if (usbd_transfer_setup(udev, &iface_index,
3072	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3073	    &udev->device_mtx)) {
3074		DPRINTFN(0, "could not setup default "
3075		    "USB transfer\n");
3076	} else {
3077		goto repeat;
3078	}
3079}
3080
3081/*------------------------------------------------------------------------*
3082 *	usbd_clear_data_toggle - factored out code
3083 *
3084 * NOTE: the intention of this function is not to reset the hardware
3085 * data toggle.
3086 *------------------------------------------------------------------------*/
3087void
3088usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3089{
3090	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3091
3092	/* check that we have a valid case */
3093	if (udev->flags.usb_mode == USB_MODE_HOST &&
3094	    udev->parent_hub != NULL &&
3095	    udev->bus->methods->clear_stall != NULL &&
3096	    ep->methods != NULL) {
3097		(udev->bus->methods->clear_stall) (udev, ep);
3098	}
3099}
3100
3101/*------------------------------------------------------------------------*
3102 *	usbd_clear_data_toggle - factored out code
3103 *
3104 * NOTE: the intention of this function is not to reset the hardware
3105 * data toggle on the USB device side.
3106 *------------------------------------------------------------------------*/
3107void
3108usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3109{
3110	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3111
3112	USB_BUS_LOCK(udev->bus);
3113	ep->toggle_next = 0;
3114	/* some hardware needs a callback to clear the data toggle */
3115	usbd_clear_stall_locked(udev, ep);
3116	USB_BUS_UNLOCK(udev->bus);
3117}
3118
3119/*------------------------------------------------------------------------*
3120 *	usbd_clear_stall_callback - factored out clear stall callback
3121 *
3122 * Input parameters:
3123 *  xfer1: Clear Stall Control Transfer
3124 *  xfer2: Stalled USB Transfer
3125 *
3126 * This function is NULL safe.
3127 *
3128 * Return values:
3129 *   0: In progress
3130 *   Else: Finished
3131 *
3132 * Clear stall config example:
3133 *
3134 * static const struct usb_config my_clearstall =  {
3135 *	.type = UE_CONTROL,
3136 *	.endpoint = 0,
3137 *	.direction = UE_DIR_ANY,
3138 *	.interval = 50, //50 milliseconds
3139 *	.bufsize = sizeof(struct usb_device_request),
3140 *	.timeout = 1000, //1.000 seconds
3141 *	.callback = &my_clear_stall_callback, // **
3142 *	.usb_mode = USB_MODE_HOST,
3143 * };
3144 *
3145 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3146 * passing the correct parameters.
3147 *------------------------------------------------------------------------*/
3148uint8_t
3149usbd_clear_stall_callback(struct usb_xfer *xfer1,
3150    struct usb_xfer *xfer2)
3151{
3152	struct usb_device_request req;
3153
3154	if (xfer2 == NULL) {
3155		/* looks like we are tearing down */
3156		DPRINTF("NULL input parameter\n");
3157		return (0);
3158	}
3159	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3160	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3161
3162	switch (USB_GET_STATE(xfer1)) {
3163	case USB_ST_SETUP:
3164
3165		/*
3166		 * pre-clear the data toggle to DATA0 ("umass.c" and
3167		 * "ata-usb.c" depends on this)
3168		 */
3169
3170		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3171
3172		/* setup a clear-stall packet */
3173
3174		req.bmRequestType = UT_WRITE_ENDPOINT;
3175		req.bRequest = UR_CLEAR_FEATURE;
3176		USETW(req.wValue, UF_ENDPOINT_HALT);
3177		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3178		req.wIndex[1] = 0;
3179		USETW(req.wLength, 0);
3180
3181		/*
3182		 * "usbd_transfer_setup_sub()" will ensure that
3183		 * we have sufficient room in the buffer for
3184		 * the request structure!
3185		 */
3186
3187		/* copy in the transfer */
3188
3189		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3190
3191		/* set length */
3192		xfer1->frlengths[0] = sizeof(req);
3193		xfer1->nframes = 1;
3194
3195		usbd_transfer_submit(xfer1);
3196		return (0);
3197
3198	case USB_ST_TRANSFERRED:
3199		break;
3200
3201	default:			/* Error */
3202		if (xfer1->error == USB_ERR_CANCELLED) {
3203			return (0);
3204		}
3205		break;
3206	}
3207	return (1);			/* Clear Stall Finished */
3208}
3209
3210/*------------------------------------------------------------------------*
3211 *	usbd_transfer_poll
3212 *
3213 * The following function gets called from the USB keyboard driver and
3214 * UMASS when the system has paniced.
3215 *
3216 * NOTE: It is currently not possible to resume normal operation on
3217 * the USB controller which has been polled, due to clearing of the
3218 * "up_dsleep" and "up_msleep" flags.
3219 *------------------------------------------------------------------------*/
3220void
3221usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3222{
3223	struct usb_xfer *xfer;
3224	struct usb_xfer_root *xroot;
3225	struct usb_device *udev;
3226	struct usb_proc_msg *pm;
3227	uint16_t n;
3228	uint16_t drop_bus;
3229	uint16_t drop_xfer;
3230
3231	for (n = 0; n != max; n++) {
3232		/* Extra checks to avoid panic */
3233		xfer = ppxfer[n];
3234		if (xfer == NULL)
3235			continue;	/* no USB transfer */
3236		xroot = xfer->xroot;
3237		if (xroot == NULL)
3238			continue;	/* no USB root */
3239		udev = xroot->udev;
3240		if (udev == NULL)
3241			continue;	/* no USB device */
3242		if (udev->bus == NULL)
3243			continue;	/* no BUS structure */
3244		if (udev->bus->methods == NULL)
3245			continue;	/* no BUS methods */
3246		if (udev->bus->methods->xfer_poll == NULL)
3247			continue;	/* no poll method */
3248
3249		/* make sure that the BUS mutex is not locked */
3250		drop_bus = 0;
3251		while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3252			mtx_unlock(&xroot->udev->bus->bus_mtx);
3253			drop_bus++;
3254		}
3255
3256		/* make sure that the transfer mutex is not locked */
3257		drop_xfer = 0;
3258		while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3259			mtx_unlock(xroot->xfer_mtx);
3260			drop_xfer++;
3261		}
3262
3263		/* Make sure cv_signal() and cv_broadcast() is not called */
3264		USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3265		USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3266		USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3267		USB_BUS_NON_GIANT_PROC(udev->bus)->up_msleep = 0;
3268
3269		/* poll USB hardware */
3270		(udev->bus->methods->xfer_poll) (udev->bus);
3271
3272		USB_BUS_LOCK(xroot->bus);
3273
3274		/* check for clear stall */
3275		if (udev->ctrl_xfer[1] != NULL) {
3276
3277			/* poll clear stall start */
3278			pm = &udev->cs_msg[0].hdr;
3279			(pm->pm_callback) (pm);
3280			/* poll clear stall done thread */
3281			pm = &udev->ctrl_xfer[1]->
3282			    xroot->done_m[0].hdr;
3283			(pm->pm_callback) (pm);
3284		}
3285
3286		/* poll done thread */
3287		pm = &xroot->done_m[0].hdr;
3288		(pm->pm_callback) (pm);
3289
3290		USB_BUS_UNLOCK(xroot->bus);
3291
3292		/* restore transfer mutex */
3293		while (drop_xfer--)
3294			mtx_lock(xroot->xfer_mtx);
3295
3296		/* restore BUS mutex */
3297		while (drop_bus--)
3298			mtx_lock(&xroot->udev->bus->bus_mtx);
3299	}
3300}
3301
3302static void
3303usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3304    uint8_t type, enum usb_dev_speed speed)
3305{
3306	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3307		[USB_SPEED_LOW] = 8,
3308		[USB_SPEED_FULL] = 64,
3309		[USB_SPEED_HIGH] = 1024,
3310		[USB_SPEED_VARIABLE] = 1024,
3311		[USB_SPEED_SUPER] = 1024,
3312	};
3313
3314	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3315		[USB_SPEED_LOW] = 0,	/* invalid */
3316		[USB_SPEED_FULL] = 1023,
3317		[USB_SPEED_HIGH] = 1024,
3318		[USB_SPEED_VARIABLE] = 3584,
3319		[USB_SPEED_SUPER] = 1024,
3320	};
3321
3322	static const uint16_t control_min[USB_SPEED_MAX] = {
3323		[USB_SPEED_LOW] = 8,
3324		[USB_SPEED_FULL] = 8,
3325		[USB_SPEED_HIGH] = 64,
3326		[USB_SPEED_VARIABLE] = 512,
3327		[USB_SPEED_SUPER] = 512,
3328	};
3329
3330	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3331		[USB_SPEED_LOW] = 8,
3332		[USB_SPEED_FULL] = 8,
3333		[USB_SPEED_HIGH] = 512,
3334		[USB_SPEED_VARIABLE] = 512,
3335		[USB_SPEED_SUPER] = 1024,
3336	};
3337
3338	uint16_t temp;
3339
3340	memset(ptr, 0, sizeof(*ptr));
3341
3342	switch (type) {
3343	case UE_INTERRUPT:
3344		ptr->range.max = intr_range_max[speed];
3345		break;
3346	case UE_ISOCHRONOUS:
3347		ptr->range.max = isoc_range_max[speed];
3348		break;
3349	default:
3350		if (type == UE_BULK)
3351			temp = bulk_min[speed];
3352		else /* UE_CONTROL */
3353			temp = control_min[speed];
3354
3355		/* default is fixed */
3356		ptr->fixed[0] = temp;
3357		ptr->fixed[1] = temp;
3358		ptr->fixed[2] = temp;
3359		ptr->fixed[3] = temp;
3360
3361		if (speed == USB_SPEED_FULL) {
3362			/* multiple sizes */
3363			ptr->fixed[1] = 16;
3364			ptr->fixed[2] = 32;
3365			ptr->fixed[3] = 64;
3366		}
3367		if ((speed == USB_SPEED_VARIABLE) &&
3368		    (type == UE_BULK)) {
3369			/* multiple sizes */
3370			ptr->fixed[2] = 1024;
3371			ptr->fixed[3] = 1536;
3372		}
3373		break;
3374	}
3375}
3376
3377void	*
3378usbd_xfer_softc(struct usb_xfer *xfer)
3379{
3380	return (xfer->priv_sc);
3381}
3382
3383void *
3384usbd_xfer_get_priv(struct usb_xfer *xfer)
3385{
3386	return (xfer->priv_fifo);
3387}
3388
3389void
3390usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3391{
3392	xfer->priv_fifo = ptr;
3393}
3394
3395uint8_t
3396usbd_xfer_state(struct usb_xfer *xfer)
3397{
3398	return (xfer->usb_state);
3399}
3400
3401void
3402usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3403{
3404	switch (flag) {
3405		case USB_FORCE_SHORT_XFER:
3406			xfer->flags.force_short_xfer = 1;
3407			break;
3408		case USB_SHORT_XFER_OK:
3409			xfer->flags.short_xfer_ok = 1;
3410			break;
3411		case USB_MULTI_SHORT_OK:
3412			xfer->flags.short_frames_ok = 1;
3413			break;
3414		case USB_MANUAL_STATUS:
3415			xfer->flags.manual_status = 1;
3416			break;
3417	}
3418}
3419
3420void
3421usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3422{
3423	switch (flag) {
3424		case USB_FORCE_SHORT_XFER:
3425			xfer->flags.force_short_xfer = 0;
3426			break;
3427		case USB_SHORT_XFER_OK:
3428			xfer->flags.short_xfer_ok = 0;
3429			break;
3430		case USB_MULTI_SHORT_OK:
3431			xfer->flags.short_frames_ok = 0;
3432			break;
3433		case USB_MANUAL_STATUS:
3434			xfer->flags.manual_status = 0;
3435			break;
3436	}
3437}
3438
3439/*
3440 * The following function returns in milliseconds when the isochronous
3441 * transfer was completed by the hardware. The returned value wraps
3442 * around 65536 milliseconds.
3443 */
3444uint16_t
3445usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3446{
3447	return (xfer->isoc_time_complete);
3448}
3449
3450/*
3451 * The following function returns non-zero if the max packet size
3452 * field was clamped to a valid value. Else it returns zero.
3453 */
3454uint8_t
3455usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3456{
3457	return (xfer->flags_int.maxp_was_clamped);
3458}
3459