a10_mmc.c revision 308274
1/*-
2 * Copyright (c) 2013 Alexander Fedorov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/arm/allwinner/a10_mmc.c 308274 2016-11-04 00:54:21Z manu $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/mutex.h>
38#include <sys/resource.h>
39#include <sys/rman.h>
40#include <sys/sysctl.h>
41
42#include <machine/bus.h>
43
44#include <dev/ofw/ofw_bus.h>
45#include <dev/ofw/ofw_bus_subr.h>
46
47#include <dev/mmc/bridge.h>
48#include <dev/mmc/mmcreg.h>
49#include <dev/mmc/mmcbrvar.h>
50
51#include <arm/allwinner/aw_machdep.h>
52#include <arm/allwinner/a10_mmc.h>
53#include <dev/extres/clk/clk.h>
54#include <dev/extres/hwreset/hwreset.h>
55
56#define	A10_MMC_MEMRES		0
57#define	A10_MMC_IRQRES		1
58#define	A10_MMC_RESSZ		2
59#define	A10_MMC_DMA_SEGS	16
60#define	A10_MMC_DMA_MAX_SIZE	0x2000
61#define	A10_MMC_DMA_FTRGLEVEL	0x20070008
62
63#define	CARD_ID_FREQUENCY	400000
64
65static int a10_mmc_pio_mode = 0;
66
67TUNABLE_INT("hw.a10.mmc.pio_mode", &a10_mmc_pio_mode);
68
69static struct ofw_compat_data compat_data[] = {
70	{"allwinner,sun4i-a10-mmc", 1},
71	{"allwinner,sun5i-a13-mmc", 1},
72	{NULL,             0}
73};
74
75struct a10_mmc_softc {
76	bus_space_handle_t	a10_bsh;
77	bus_space_tag_t		a10_bst;
78	device_t		a10_dev;
79	clk_t			a10_clk_ahb;
80	clk_t			a10_clk_mmc;
81	hwreset_t		a10_rst_ahb;
82	int			a10_bus_busy;
83	int			a10_id;
84	int			a10_resid;
85	int			a10_timeout;
86	struct callout		a10_timeoutc;
87	struct mmc_host		a10_host;
88	struct mmc_request *	a10_req;
89	struct mtx		a10_mtx;
90	struct resource *	a10_res[A10_MMC_RESSZ];
91	uint32_t		a10_intr;
92	uint32_t		a10_intr_wait;
93	void *			a10_intrhand;
94	bus_size_t		a10_fifo_reg;
95
96	/* Fields required for DMA access. */
97	bus_addr_t	  	a10_dma_desc_phys;
98	bus_dmamap_t		a10_dma_map;
99	bus_dma_tag_t 		a10_dma_tag;
100	void * 			a10_dma_desc;
101	bus_dmamap_t		a10_dma_buf_map;
102	bus_dma_tag_t		a10_dma_buf_tag;
103	int			a10_dma_inuse;
104	int			a10_dma_map_err;
105};
106
107static struct resource_spec a10_mmc_res_spec[] = {
108	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
109	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
110	{ -1,			0,	0 }
111};
112
113static int a10_mmc_probe(device_t);
114static int a10_mmc_attach(device_t);
115static int a10_mmc_detach(device_t);
116static int a10_mmc_setup_dma(struct a10_mmc_softc *);
117static int a10_mmc_reset(struct a10_mmc_softc *);
118static void a10_mmc_intr(void *);
119static int a10_mmc_update_clock(struct a10_mmc_softc *);
120
121static int a10_mmc_update_ios(device_t, device_t);
122static int a10_mmc_request(device_t, device_t, struct mmc_request *);
123static int a10_mmc_get_ro(device_t, device_t);
124static int a10_mmc_acquire_host(device_t, device_t);
125static int a10_mmc_release_host(device_t, device_t);
126
127#define	A10_MMC_LOCK(_sc)	mtx_lock(&(_sc)->a10_mtx)
128#define	A10_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->a10_mtx)
129#define	A10_MMC_READ_4(_sc, _reg)					\
130	bus_space_read_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg)
131#define	A10_MMC_WRITE_4(_sc, _reg, _value)				\
132	bus_space_write_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg, _value)
133
134static int
135a10_mmc_probe(device_t dev)
136{
137
138	if (!ofw_bus_status_okay(dev))
139		return (ENXIO);
140	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
141		return (ENXIO);
142
143	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
144
145	return (BUS_PROBE_DEFAULT);
146}
147
148static int
149a10_mmc_attach(device_t dev)
150{
151	device_t child;
152	struct a10_mmc_softc *sc;
153	struct sysctl_ctx_list *ctx;
154	struct sysctl_oid_list *tree;
155	uint32_t bus_width;
156	phandle_t node;
157	int error;
158
159	node = ofw_bus_get_node(dev);
160	sc = device_get_softc(dev);
161	sc->a10_dev = dev;
162	sc->a10_req = NULL;
163	sc->a10_id = device_get_unit(dev);
164	if (sc->a10_id > 3) {
165		device_printf(dev, "only 4 hosts are supported (0-3)\n");
166		return (ENXIO);
167	}
168	if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
169		device_printf(dev, "cannot allocate device resources\n");
170		return (ENXIO);
171	}
172	sc->a10_bst = rman_get_bustag(sc->a10_res[A10_MMC_MEMRES]);
173	sc->a10_bsh = rman_get_bushandle(sc->a10_res[A10_MMC_MEMRES]);
174	if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
175	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
176	    &sc->a10_intrhand)) {
177		bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
178		device_printf(dev, "cannot setup interrupt handler\n");
179		return (ENXIO);
180	}
181	mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
182	    MTX_DEF);
183	callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
184
185	/*
186	 * Later chips use a different FIFO offset. Unfortunately the FDT
187	 * uses the same compatible string for old and new implementations.
188	 */
189	switch (allwinner_soc_family()) {
190	case ALLWINNERSOC_SUN4I:
191	case ALLWINNERSOC_SUN5I:
192	case ALLWINNERSOC_SUN7I:
193		sc->a10_fifo_reg = A10_MMC_FIFO;
194		break;
195	default:
196		sc->a10_fifo_reg = A31_MMC_FIFO;
197		break;
198	}
199
200	/* De-assert reset */
201	if (hwreset_get_by_ofw_name(dev, "ahb", &sc->a10_rst_ahb) == 0) {
202		error = hwreset_deassert(sc->a10_rst_ahb);
203		if (error != 0) {
204			device_printf(dev, "cannot de-assert reset\n");
205			return (error);
206		}
207	}
208
209	/* Activate the module clock. */
210	error = clk_get_by_ofw_name(dev, "ahb", &sc->a10_clk_ahb);
211	if (error != 0) {
212		device_printf(dev, "cannot get ahb clock\n");
213		goto fail;
214	}
215	error = clk_enable(sc->a10_clk_ahb);
216	if (error != 0) {
217		device_printf(dev, "cannot enable ahb clock\n");
218		goto fail;
219	}
220	error = clk_get_by_ofw_name(dev, "mmc", &sc->a10_clk_mmc);
221	if (error != 0) {
222		device_printf(dev, "cannot get mmc clock\n");
223		goto fail;
224	}
225	error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
226	    CLK_SET_ROUND_DOWN);
227	if (error != 0) {
228		device_printf(dev, "cannot init mmc clock\n");
229		goto fail;
230	}
231	error = clk_enable(sc->a10_clk_mmc);
232	if (error != 0) {
233		device_printf(dev, "cannot enable mmc clock\n");
234		goto fail;
235	}
236
237	sc->a10_timeout = 10;
238	ctx = device_get_sysctl_ctx(dev);
239	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
240	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
241	    &sc->a10_timeout, 0, "Request timeout in seconds");
242
243	/* Reset controller. */
244	if (a10_mmc_reset(sc) != 0) {
245		device_printf(dev, "cannot reset the controller\n");
246		goto fail;
247	}
248
249	if (a10_mmc_pio_mode == 0 && a10_mmc_setup_dma(sc) != 0) {
250		device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
251		a10_mmc_pio_mode = 1;
252	}
253	if (bootverbose)
254		device_printf(sc->a10_dev, "DMA status: %s\n",
255		    a10_mmc_pio_mode ? "disabled" : "enabled");
256
257	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
258		bus_width = 1;
259
260	sc->a10_host.f_min = 400000;
261	sc->a10_host.f_max = 50000000;
262	sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
263	sc->a10_host.mode = mode_sd;
264	sc->a10_host.caps = MMC_CAP_HSPEED;
265	if (bus_width >= 4)
266		sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
267	if (bus_width >= 8)
268		sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
269
270	child = device_add_child(dev, "mmc", -1);
271	if (child == NULL) {
272		device_printf(dev, "attaching MMC bus failed!\n");
273		goto fail;
274	}
275	if (device_probe_and_attach(child) != 0) {
276		device_printf(dev, "attaching MMC child failed!\n");
277		device_delete_child(dev, child);
278		goto fail;
279	}
280
281	return (0);
282
283fail:
284	callout_drain(&sc->a10_timeoutc);
285	mtx_destroy(&sc->a10_mtx);
286	bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
287	bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
288
289	return (ENXIO);
290}
291
292static int
293a10_mmc_detach(device_t dev)
294{
295
296	return (EBUSY);
297}
298
299static void
300a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
301{
302	struct a10_mmc_softc *sc;
303
304	sc = (struct a10_mmc_softc *)arg;
305	if (err) {
306		sc->a10_dma_map_err = err;
307		return;
308	}
309	sc->a10_dma_desc_phys = segs[0].ds_addr;
310}
311
312static int
313a10_mmc_setup_dma(struct a10_mmc_softc *sc)
314{
315	int dma_desc_size, error;
316
317	/* Allocate the DMA descriptor memory. */
318	dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
319	error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0,
320	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
321	    dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
322	if (error)
323		return (error);
324	error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
325	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
326	if (error)
327		return (error);
328
329	error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
330	    sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
331	if (error)
332		return (error);
333	if (sc->a10_dma_map_err)
334		return (sc->a10_dma_map_err);
335
336	/* Create the DMA map for data transfers. */
337	error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0,
338	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
339	    A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
340	    A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
341	    &sc->a10_dma_buf_tag);
342	if (error)
343		return (error);
344	error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
345	    &sc->a10_dma_buf_map);
346	if (error)
347		return (error);
348
349	return (0);
350}
351
352static void
353a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
354{
355	int i;
356	struct a10_mmc_dma_desc *dma_desc;
357	struct a10_mmc_softc *sc;
358
359	sc = (struct a10_mmc_softc *)arg;
360	sc->a10_dma_map_err = err;
361	dma_desc = sc->a10_dma_desc;
362	/* Note nsegs is guaranteed to be zero if err is non-zero. */
363	for (i = 0; i < nsegs; i++) {
364		dma_desc[i].buf_size = segs[i].ds_len;
365		dma_desc[i].buf_addr = segs[i].ds_addr;
366		dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
367		    A10_MMC_DMA_CONFIG_OWN;
368		if (i == 0)
369			dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
370		if (i < (nsegs - 1)) {
371			dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
372			dma_desc[i].next = sc->a10_dma_desc_phys +
373			    ((i + 1) * sizeof(struct a10_mmc_dma_desc));
374		} else {
375			dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
376			    A10_MMC_DMA_CONFIG_ER;
377			dma_desc[i].next = 0;
378		}
379 	}
380}
381
382static int
383a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
384{
385	bus_dmasync_op_t sync_op;
386	int error;
387	struct mmc_command *cmd;
388	uint32_t val;
389
390	cmd = sc->a10_req->cmd;
391	if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
392		return (EFBIG);
393	error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
394	    cmd->data->data, cmd->data->len, a10_dma_cb, sc, BUS_DMA_NOWAIT);
395	if (error)
396		return (error);
397	if (sc->a10_dma_map_err)
398		return (sc->a10_dma_map_err);
399
400	sc->a10_dma_inuse = 1;
401	if (cmd->data->flags & MMC_DATA_WRITE)
402		sync_op = BUS_DMASYNC_PREWRITE;
403	else
404		sync_op = BUS_DMASYNC_PREREAD;
405	bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
406	bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
407
408	val = A10_MMC_READ_4(sc, A10_MMC_IMASK);
409	val &= ~(A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ);
410	A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val);
411	val = A10_MMC_READ_4(sc, A10_MMC_GCTRL);
412	val &= ~A10_MMC_ACCESS_BY_AHB;
413	val |= A10_MMC_DMA_ENABLE;
414	A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
415	val |= A10_MMC_DMA_RESET;
416	A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
417	A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_IDMAC_SOFT_RST);
418	A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
419	    A10_MMC_IDMAC_IDMA_ON | A10_MMC_IDMAC_FIX_BURST);
420	val = A10_MMC_READ_4(sc, A10_MMC_IDIE);
421	val &= ~(A10_MMC_IDMAC_RECEIVE_INT | A10_MMC_IDMAC_TRANSMIT_INT);
422	if (cmd->data->flags & MMC_DATA_WRITE)
423		val |= A10_MMC_IDMAC_TRANSMIT_INT;
424	else
425		val |= A10_MMC_IDMAC_RECEIVE_INT;
426	A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
427	A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
428	A10_MMC_WRITE_4(sc, A10_MMC_FTRGL, A10_MMC_DMA_FTRGLEVEL);
429
430	return (0);
431}
432
433static int
434a10_mmc_reset(struct a10_mmc_softc *sc)
435{
436	int timeout;
437
438	A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
439	    A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_RESET);
440	timeout = 1000;
441	while (--timeout > 0) {
442		if ((A10_MMC_READ_4(sc, A10_MMC_GCTRL) & A10_MMC_RESET) == 0)
443			break;
444		DELAY(100);
445	}
446	if (timeout == 0)
447		return (ETIMEDOUT);
448
449	/* Set the timeout. */
450	A10_MMC_WRITE_4(sc, A10_MMC_TIMEOUT, 0xffffffff);
451
452	/* Clear pending interrupts. */
453	A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
454	A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
455	/* Unmask interrupts. */
456	A10_MMC_WRITE_4(sc, A10_MMC_IMASK,
457	    A10_MMC_CMD_DONE | A10_MMC_INT_ERR_BIT |
458	    A10_MMC_DATA_OVER | A10_MMC_AUTOCMD_DONE);
459	/* Enable interrupts and AHB access. */
460	A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
461	    A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_INT_ENABLE);
462
463	return (0);
464}
465
466static void
467a10_mmc_req_done(struct a10_mmc_softc *sc)
468{
469	struct mmc_command *cmd;
470	struct mmc_request *req;
471
472	cmd = sc->a10_req->cmd;
473	if (cmd->error != MMC_ERR_NONE) {
474		/* Reset the controller. */
475		a10_mmc_reset(sc);
476		a10_mmc_update_clock(sc);
477	}
478	if (sc->a10_dma_inuse == 0) {
479		/* Reset the FIFO. */
480		A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
481		    A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_FIFO_RESET);
482	}
483
484	req = sc->a10_req;
485	callout_stop(&sc->a10_timeoutc);
486	sc->a10_req = NULL;
487	sc->a10_intr = 0;
488	sc->a10_resid = 0;
489	sc->a10_dma_inuse = 0;
490	sc->a10_dma_map_err = 0;
491	sc->a10_intr_wait = 0;
492	req->done(req);
493}
494
495static void
496a10_mmc_req_ok(struct a10_mmc_softc *sc)
497{
498	int timeout;
499	struct mmc_command *cmd;
500	uint32_t status;
501
502	timeout = 1000;
503	while (--timeout > 0) {
504		status = A10_MMC_READ_4(sc, A10_MMC_STAS);
505		if ((status & A10_MMC_CARD_DATA_BUSY) == 0)
506			break;
507		DELAY(1000);
508	}
509	cmd = sc->a10_req->cmd;
510	if (timeout == 0) {
511		cmd->error = MMC_ERR_FAILED;
512		a10_mmc_req_done(sc);
513		return;
514	}
515	if (cmd->flags & MMC_RSP_PRESENT) {
516		if (cmd->flags & MMC_RSP_136) {
517			cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
518			cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
519			cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
520			cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
521		} else
522			cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
523	}
524	/* All data has been transferred ? */
525	if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
526		cmd->error = MMC_ERR_FAILED;
527	a10_mmc_req_done(sc);
528}
529
530static void
531a10_mmc_timeout(void *arg)
532{
533	struct a10_mmc_softc *sc;
534
535	sc = (struct a10_mmc_softc *)arg;
536	if (sc->a10_req != NULL) {
537		device_printf(sc->a10_dev, "controller timeout\n");
538		sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
539		a10_mmc_req_done(sc);
540	} else
541		device_printf(sc->a10_dev,
542		    "Spurious timeout - no active request\n");
543}
544
545static int
546a10_mmc_pio_transfer(struct a10_mmc_softc *sc, struct mmc_data *data)
547{
548	int i, write;
549	uint32_t bit, *buf;
550
551	buf = (uint32_t *)data->data;
552	write = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
553	bit = write ? A10_MMC_FIFO_FULL : A10_MMC_FIFO_EMPTY;
554	for (i = sc->a10_resid; i < (data->len >> 2); i++) {
555		if ((A10_MMC_READ_4(sc, A10_MMC_STAS) & bit))
556			return (1);
557		if (write)
558			A10_MMC_WRITE_4(sc, sc->a10_fifo_reg, buf[i]);
559		else
560			buf[i] = A10_MMC_READ_4(sc, sc->a10_fifo_reg);
561		sc->a10_resid = i + 1;
562	}
563
564	return (0);
565}
566
567static void
568a10_mmc_intr(void *arg)
569{
570	bus_dmasync_op_t sync_op;
571	struct a10_mmc_softc *sc;
572	struct mmc_data *data;
573	uint32_t idst, imask, rint;
574
575	sc = (struct a10_mmc_softc *)arg;
576	A10_MMC_LOCK(sc);
577	rint = A10_MMC_READ_4(sc, A10_MMC_RINTR);
578	idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
579	imask = A10_MMC_READ_4(sc, A10_MMC_IMASK);
580	if (idst == 0 && imask == 0 && rint == 0) {
581		A10_MMC_UNLOCK(sc);
582		return;
583	}
584#ifdef DEBUG
585	device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
586	    idst, imask, rint);
587#endif
588	if (sc->a10_req == NULL) {
589		device_printf(sc->a10_dev,
590		    "Spurious interrupt - no active request, rint: 0x%08X\n",
591		    rint);
592		goto end;
593	}
594	if (rint & A10_MMC_INT_ERR_BIT) {
595		device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
596		if (rint & A10_MMC_RESP_TIMEOUT)
597			sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
598		else
599			sc->a10_req->cmd->error = MMC_ERR_FAILED;
600		a10_mmc_req_done(sc);
601		goto end;
602	}
603	if (idst & A10_MMC_IDMAC_ERROR) {
604		device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
605		sc->a10_req->cmd->error = MMC_ERR_FAILED;
606		a10_mmc_req_done(sc);
607		goto end;
608	}
609
610	sc->a10_intr |= rint;
611	data = sc->a10_req->cmd->data;
612	if (data != NULL && sc->a10_dma_inuse == 1 &&
613	    (idst & A10_MMC_IDMAC_COMPLETE)) {
614		if (data->flags & MMC_DATA_WRITE)
615			sync_op = BUS_DMASYNC_POSTWRITE;
616		else
617			sync_op = BUS_DMASYNC_POSTREAD;
618		bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
619		    sync_op);
620		bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
621		    BUS_DMASYNC_POSTWRITE);
622		bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
623		sc->a10_resid = data->len >> 2;
624	} else if (data != NULL && sc->a10_dma_inuse == 0 &&
625	    (rint & (A10_MMC_DATA_OVER | A10_MMC_RX_DATA_REQ |
626	    A10_MMC_TX_DATA_REQ)) != 0)
627		a10_mmc_pio_transfer(sc, data);
628	if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
629		a10_mmc_req_ok(sc);
630
631end:
632	A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
633	A10_MMC_WRITE_4(sc, A10_MMC_RINTR, rint);
634	A10_MMC_UNLOCK(sc);
635}
636
637static int
638a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
639{
640	int blksz;
641	struct a10_mmc_softc *sc;
642	struct mmc_command *cmd;
643	uint32_t cmdreg, val;
644
645	sc = device_get_softc(bus);
646	A10_MMC_LOCK(sc);
647	if (sc->a10_req) {
648		A10_MMC_UNLOCK(sc);
649		return (EBUSY);
650	}
651	sc->a10_req = req;
652	cmd = req->cmd;
653	cmdreg = A10_MMC_START;
654	if (cmd->opcode == MMC_GO_IDLE_STATE)
655		cmdreg |= A10_MMC_SEND_INIT_SEQ;
656	if (cmd->flags & MMC_RSP_PRESENT)
657		cmdreg |= A10_MMC_RESP_EXP;
658	if (cmd->flags & MMC_RSP_136)
659		cmdreg |= A10_MMC_LONG_RESP;
660	if (cmd->flags & MMC_RSP_CRC)
661		cmdreg |= A10_MMC_CHECK_RESP_CRC;
662
663	sc->a10_intr = 0;
664	sc->a10_resid = 0;
665	sc->a10_intr_wait = A10_MMC_CMD_DONE;
666	cmd->error = MMC_ERR_NONE;
667	if (cmd->data != NULL) {
668		sc->a10_intr_wait |= A10_MMC_DATA_OVER;
669		cmdreg |= A10_MMC_DATA_EXP | A10_MMC_WAIT_PREOVER;
670		if (cmd->data->flags & MMC_DATA_MULTI) {
671			cmdreg |= A10_MMC_SEND_AUTOSTOP;
672			sc->a10_intr_wait |= A10_MMC_AUTOCMD_DONE;
673		}
674		if (cmd->data->flags & MMC_DATA_WRITE)
675			cmdreg |= A10_MMC_WRITE;
676		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
677		A10_MMC_WRITE_4(sc, A10_MMC_BLKSZ, blksz);
678		A10_MMC_WRITE_4(sc, A10_MMC_BCNTR, cmd->data->len);
679
680		if (a10_mmc_pio_mode == 0)
681			a10_mmc_prepare_dma(sc);
682		/* Enable PIO access if sc->a10_dma_inuse is not set. */
683		if (sc->a10_dma_inuse == 0) {
684			val = A10_MMC_READ_4(sc, A10_MMC_GCTRL);
685			val &= ~A10_MMC_DMA_ENABLE;
686			val |= A10_MMC_ACCESS_BY_AHB;
687			A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
688			val = A10_MMC_READ_4(sc, A10_MMC_IMASK);
689			val |= A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ;
690			A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val);
691		}
692	}
693
694	A10_MMC_WRITE_4(sc, A10_MMC_CARG, cmd->arg);
695	A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
696	callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
697	    a10_mmc_timeout, sc);
698	A10_MMC_UNLOCK(sc);
699
700	return (0);
701}
702
703static int
704a10_mmc_read_ivar(device_t bus, device_t child, int which,
705    uintptr_t *result)
706{
707	struct a10_mmc_softc *sc;
708
709	sc = device_get_softc(bus);
710	switch (which) {
711	default:
712		return (EINVAL);
713	case MMCBR_IVAR_BUS_MODE:
714		*(int *)result = sc->a10_host.ios.bus_mode;
715		break;
716	case MMCBR_IVAR_BUS_WIDTH:
717		*(int *)result = sc->a10_host.ios.bus_width;
718		break;
719	case MMCBR_IVAR_CHIP_SELECT:
720		*(int *)result = sc->a10_host.ios.chip_select;
721		break;
722	case MMCBR_IVAR_CLOCK:
723		*(int *)result = sc->a10_host.ios.clock;
724		break;
725	case MMCBR_IVAR_F_MIN:
726		*(int *)result = sc->a10_host.f_min;
727		break;
728	case MMCBR_IVAR_F_MAX:
729		*(int *)result = sc->a10_host.f_max;
730		break;
731	case MMCBR_IVAR_HOST_OCR:
732		*(int *)result = sc->a10_host.host_ocr;
733		break;
734	case MMCBR_IVAR_MODE:
735		*(int *)result = sc->a10_host.mode;
736		break;
737	case MMCBR_IVAR_OCR:
738		*(int *)result = sc->a10_host.ocr;
739		break;
740	case MMCBR_IVAR_POWER_MODE:
741		*(int *)result = sc->a10_host.ios.power_mode;
742		break;
743	case MMCBR_IVAR_VDD:
744		*(int *)result = sc->a10_host.ios.vdd;
745		break;
746	case MMCBR_IVAR_CAPS:
747		*(int *)result = sc->a10_host.caps;
748		break;
749	case MMCBR_IVAR_MAX_DATA:
750		*(int *)result = 65535;
751		break;
752	}
753
754	return (0);
755}
756
757static int
758a10_mmc_write_ivar(device_t bus, device_t child, int which,
759    uintptr_t value)
760{
761	struct a10_mmc_softc *sc;
762
763	sc = device_get_softc(bus);
764	switch (which) {
765	default:
766		return (EINVAL);
767	case MMCBR_IVAR_BUS_MODE:
768		sc->a10_host.ios.bus_mode = value;
769		break;
770	case MMCBR_IVAR_BUS_WIDTH:
771		sc->a10_host.ios.bus_width = value;
772		break;
773	case MMCBR_IVAR_CHIP_SELECT:
774		sc->a10_host.ios.chip_select = value;
775		break;
776	case MMCBR_IVAR_CLOCK:
777		sc->a10_host.ios.clock = value;
778		break;
779	case MMCBR_IVAR_MODE:
780		sc->a10_host.mode = value;
781		break;
782	case MMCBR_IVAR_OCR:
783		sc->a10_host.ocr = value;
784		break;
785	case MMCBR_IVAR_POWER_MODE:
786		sc->a10_host.ios.power_mode = value;
787		break;
788	case MMCBR_IVAR_VDD:
789		sc->a10_host.ios.vdd = value;
790		break;
791	/* These are read-only */
792	case MMCBR_IVAR_CAPS:
793	case MMCBR_IVAR_HOST_OCR:
794	case MMCBR_IVAR_F_MIN:
795	case MMCBR_IVAR_F_MAX:
796	case MMCBR_IVAR_MAX_DATA:
797		return (EINVAL);
798	}
799
800	return (0);
801}
802
803static int
804a10_mmc_update_clock(struct a10_mmc_softc *sc)
805{
806	uint32_t cmdreg;
807	int retry;
808
809	cmdreg = A10_MMC_START | A10_MMC_UPCLK_ONLY |
810	    A10_MMC_WAIT_PREOVER;
811	A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
812	retry = 0xfffff;
813	while (--retry > 0) {
814		if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_START) == 0) {
815			A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
816			return (0);
817		}
818		DELAY(10);
819	}
820	A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
821	device_printf(sc->a10_dev, "timeout updating clock\n");
822
823	return (ETIMEDOUT);
824}
825
826static int
827a10_mmc_update_ios(device_t bus, device_t child)
828{
829	int error;
830	struct a10_mmc_softc *sc;
831	struct mmc_ios *ios;
832	uint32_t clkcr;
833
834	sc = device_get_softc(bus);
835	clkcr = A10_MMC_READ_4(sc, A10_MMC_CLKCR);
836	if (clkcr & A10_MMC_CARD_CLK_ON) {
837		/* Disable clock. */
838		clkcr &= ~A10_MMC_CARD_CLK_ON;
839		A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
840		error = a10_mmc_update_clock(sc);
841		if (error != 0)
842			return (error);
843	}
844
845	ios = &sc->a10_host.ios;
846	if (ios->clock) {
847		/* Reset the divider. */
848		clkcr &= ~A10_MMC_CLKCR_DIV;
849		A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
850		error = a10_mmc_update_clock(sc);
851		if (error != 0)
852			return (error);
853
854		/* Set the MMC clock. */
855		error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
856		    CLK_SET_ROUND_DOWN);
857		if (error != 0) {
858			device_printf(sc->a10_dev,
859			    "failed to set frequency to %u Hz: %d\n",
860			    ios->clock, error);
861			return (error);
862		}
863
864		/* Enable clock. */
865		clkcr |= A10_MMC_CARD_CLK_ON;
866		A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
867		error = a10_mmc_update_clock(sc);
868		if (error != 0)
869			return (error);
870	}
871
872	/* Set the bus width. */
873	switch (ios->bus_width) {
874	case bus_width_1:
875		A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH1);
876		break;
877	case bus_width_4:
878		A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH4);
879		break;
880	case bus_width_8:
881		A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH8);
882		break;
883	}
884
885	return (0);
886}
887
888static int
889a10_mmc_get_ro(device_t bus, device_t child)
890{
891
892	return (0);
893}
894
895static int
896a10_mmc_acquire_host(device_t bus, device_t child)
897{
898	struct a10_mmc_softc *sc;
899	int error;
900
901	sc = device_get_softc(bus);
902	A10_MMC_LOCK(sc);
903	while (sc->a10_bus_busy) {
904		error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
905		if (error != 0) {
906			A10_MMC_UNLOCK(sc);
907			return (error);
908		}
909	}
910	sc->a10_bus_busy++;
911	A10_MMC_UNLOCK(sc);
912
913	return (0);
914}
915
916static int
917a10_mmc_release_host(device_t bus, device_t child)
918{
919	struct a10_mmc_softc *sc;
920
921	sc = device_get_softc(bus);
922	A10_MMC_LOCK(sc);
923	sc->a10_bus_busy--;
924	wakeup(sc);
925	A10_MMC_UNLOCK(sc);
926
927	return (0);
928}
929
930static device_method_t a10_mmc_methods[] = {
931	/* Device interface */
932	DEVMETHOD(device_probe,		a10_mmc_probe),
933	DEVMETHOD(device_attach,	a10_mmc_attach),
934	DEVMETHOD(device_detach,	a10_mmc_detach),
935
936	/* Bus interface */
937	DEVMETHOD(bus_read_ivar,	a10_mmc_read_ivar),
938	DEVMETHOD(bus_write_ivar,	a10_mmc_write_ivar),
939	DEVMETHOD(bus_print_child,	bus_generic_print_child),
940
941	/* MMC bridge interface */
942	DEVMETHOD(mmcbr_update_ios,	a10_mmc_update_ios),
943	DEVMETHOD(mmcbr_request,	a10_mmc_request),
944	DEVMETHOD(mmcbr_get_ro,		a10_mmc_get_ro),
945	DEVMETHOD(mmcbr_acquire_host,	a10_mmc_acquire_host),
946	DEVMETHOD(mmcbr_release_host,	a10_mmc_release_host),
947
948	DEVMETHOD_END
949};
950
951static devclass_t a10_mmc_devclass;
952
953static driver_t a10_mmc_driver = {
954	"a10_mmc",
955	a10_mmc_methods,
956	sizeof(struct a10_mmc_softc),
957};
958
959DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0);
960DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL);
961MODULE_DEPEND(a10_mmc, mmc, 1, 1, 1);
962