a10_mmc.c revision 308280
1/*-
2 * Copyright (c) 2013 Alexander Fedorov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/arm/allwinner/a10_mmc.c 308280 2016-11-04 01:56:29Z manu $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/mutex.h>
38#include <sys/resource.h>
39#include <sys/rman.h>
40#include <sys/sysctl.h>
41
42#include <machine/bus.h>
43
44#include <dev/ofw/ofw_bus.h>
45#include <dev/ofw/ofw_bus_subr.h>
46
47#include <dev/mmc/bridge.h>
48#include <dev/mmc/mmcreg.h>
49#include <dev/mmc/mmcbrvar.h>
50
51#include <arm/allwinner/a10_mmc.h>
52#include <dev/extres/clk/clk.h>
53#include <dev/extres/hwreset/hwreset.h>
54
55#define	A10_MMC_MEMRES		0
56#define	A10_MMC_IRQRES		1
57#define	A10_MMC_RESSZ		2
58#define	A10_MMC_DMA_SEGS		((MAXPHYS / PAGE_SIZE) + 1)
59#define	A10_MMC_DMA_MAX_SIZE	0x2000
60#define	A10_MMC_DMA_FTRGLEVEL	0x20070008
61
62#define	CARD_ID_FREQUENCY	400000
63
64static struct ofw_compat_data compat_data[] = {
65	{"allwinner,sun4i-a10-mmc", 1},
66	{"allwinner,sun5i-a13-mmc", 1},
67	{NULL,             0}
68};
69
70struct a10_mmc_softc {
71	device_t		a10_dev;
72	clk_t			a10_clk_ahb;
73	clk_t			a10_clk_mmc;
74	hwreset_t		a10_rst_ahb;
75	int			a10_bus_busy;
76	int			a10_resid;
77	int			a10_timeout;
78	struct callout		a10_timeoutc;
79	struct mmc_host		a10_host;
80	struct mmc_request *	a10_req;
81	struct mtx		a10_mtx;
82	struct resource *	a10_res[A10_MMC_RESSZ];
83	uint32_t		a10_intr;
84	uint32_t		a10_intr_wait;
85	void *			a10_intrhand;
86
87	/* Fields required for DMA access. */
88	bus_addr_t	  	a10_dma_desc_phys;
89	bus_dmamap_t		a10_dma_map;
90	bus_dma_tag_t 		a10_dma_tag;
91	void * 			a10_dma_desc;
92	bus_dmamap_t		a10_dma_buf_map;
93	bus_dma_tag_t		a10_dma_buf_tag;
94	int			a10_dma_map_err;
95};
96
97static struct resource_spec a10_mmc_res_spec[] = {
98	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
99	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
100	{ -1,			0,	0 }
101};
102
103static int a10_mmc_probe(device_t);
104static int a10_mmc_attach(device_t);
105static int a10_mmc_detach(device_t);
106static int a10_mmc_setup_dma(struct a10_mmc_softc *);
107static int a10_mmc_reset(struct a10_mmc_softc *);
108static void a10_mmc_intr(void *);
109static int a10_mmc_update_clock(struct a10_mmc_softc *, uint32_t);
110
111static int a10_mmc_update_ios(device_t, device_t);
112static int a10_mmc_request(device_t, device_t, struct mmc_request *);
113static int a10_mmc_get_ro(device_t, device_t);
114static int a10_mmc_acquire_host(device_t, device_t);
115static int a10_mmc_release_host(device_t, device_t);
116
117#define	A10_MMC_LOCK(_sc)	mtx_lock(&(_sc)->a10_mtx)
118#define	A10_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->a10_mtx)
119#define	A10_MMC_READ_4(_sc, _reg)					\
120	bus_read_4((_sc)->a10_res[A10_MMC_MEMRES], _reg)
121#define	A10_MMC_WRITE_4(_sc, _reg, _value)				\
122	bus_write_4((_sc)->a10_res[A10_MMC_MEMRES], _reg, _value)
123
124static int
125a10_mmc_probe(device_t dev)
126{
127
128	if (!ofw_bus_status_okay(dev))
129		return (ENXIO);
130	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
131		return (ENXIO);
132
133	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
134
135	return (BUS_PROBE_DEFAULT);
136}
137
138static int
139a10_mmc_attach(device_t dev)
140{
141	device_t child;
142	struct a10_mmc_softc *sc;
143	struct sysctl_ctx_list *ctx;
144	struct sysctl_oid_list *tree;
145	uint32_t bus_width;
146	phandle_t node;
147	int error;
148
149	node = ofw_bus_get_node(dev);
150	sc = device_get_softc(dev);
151	sc->a10_dev = dev;
152	sc->a10_req = NULL;
153	if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
154		device_printf(dev, "cannot allocate device resources\n");
155		return (ENXIO);
156	}
157	if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
158	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
159	    &sc->a10_intrhand)) {
160		bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
161		device_printf(dev, "cannot setup interrupt handler\n");
162		return (ENXIO);
163	}
164	mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
165	    MTX_DEF);
166	callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
167
168	/* De-assert reset */
169	if (hwreset_get_by_ofw_name(dev, "ahb", &sc->a10_rst_ahb) == 0) {
170		error = hwreset_deassert(sc->a10_rst_ahb);
171		if (error != 0) {
172			device_printf(dev, "cannot de-assert reset\n");
173			goto fail;
174		}
175	}
176
177	/* Activate the module clock. */
178	error = clk_get_by_ofw_name(dev, "ahb", &sc->a10_clk_ahb);
179	if (error != 0) {
180		device_printf(dev, "cannot get ahb clock\n");
181		goto fail;
182	}
183	error = clk_enable(sc->a10_clk_ahb);
184	if (error != 0) {
185		device_printf(dev, "cannot enable ahb clock\n");
186		goto fail;
187	}
188	error = clk_get_by_ofw_name(dev, "mmc", &sc->a10_clk_mmc);
189	if (error != 0) {
190		device_printf(dev, "cannot get mmc clock\n");
191		goto fail;
192	}
193	error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
194	    CLK_SET_ROUND_DOWN);
195	if (error != 0) {
196		device_printf(dev, "cannot init mmc clock\n");
197		goto fail;
198	}
199	error = clk_enable(sc->a10_clk_mmc);
200	if (error != 0) {
201		device_printf(dev, "cannot enable mmc clock\n");
202		goto fail;
203	}
204
205	sc->a10_timeout = 10;
206	ctx = device_get_sysctl_ctx(dev);
207	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
208	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
209	    &sc->a10_timeout, 0, "Request timeout in seconds");
210
211	/* Hardware reset */
212	A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 1);
213	DELAY(100);
214	A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 0);
215	DELAY(500);
216
217	/* Soft Reset controller. */
218	if (a10_mmc_reset(sc) != 0) {
219		device_printf(dev, "cannot reset the controller\n");
220		goto fail;
221	}
222
223	if (a10_mmc_setup_dma(sc) != 0) {
224		device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
225		goto fail;
226	}
227
228	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
229		bus_width = 4;
230
231	sc->a10_host.f_min = 400000;
232	sc->a10_host.f_max = 50000000;
233	sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
234	sc->a10_host.mode = mode_sd;
235	sc->a10_host.caps = MMC_CAP_HSPEED;
236	if (bus_width >= 4)
237		sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
238	if (bus_width >= 8)
239		sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
240
241	child = device_add_child(dev, "mmc", -1);
242	if (child == NULL) {
243		device_printf(dev, "attaching MMC bus failed!\n");
244		goto fail;
245	}
246	if (device_probe_and_attach(child) != 0) {
247		device_printf(dev, "attaching MMC child failed!\n");
248		device_delete_child(dev, child);
249		goto fail;
250	}
251
252	return (0);
253
254fail:
255	callout_drain(&sc->a10_timeoutc);
256	mtx_destroy(&sc->a10_mtx);
257	bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
258	bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
259
260	return (ENXIO);
261}
262
263static int
264a10_mmc_detach(device_t dev)
265{
266
267	return (EBUSY);
268}
269
270static void
271a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
272{
273	struct a10_mmc_softc *sc;
274
275	sc = (struct a10_mmc_softc *)arg;
276	if (err) {
277		sc->a10_dma_map_err = err;
278		return;
279	}
280	sc->a10_dma_desc_phys = segs[0].ds_addr;
281}
282
283static int
284a10_mmc_setup_dma(struct a10_mmc_softc *sc)
285{
286	int dma_desc_size, error;
287
288	/* Allocate the DMA descriptor memory. */
289	dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
290	error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
291	    A10_MMC_DMA_ALIGN, 0,
292	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
293	    dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
294	if (error)
295		return (error);
296	error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
297	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
298	if (error)
299		return (error);
300
301	error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
302	    sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
303	if (error)
304		return (error);
305	if (sc->a10_dma_map_err)
306		return (sc->a10_dma_map_err);
307
308	/* Create the DMA map for data transfers. */
309	error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
310	    A10_MMC_DMA_ALIGN, 0,
311	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
312	    A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
313	    A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
314	    &sc->a10_dma_buf_tag);
315	if (error)
316		return (error);
317	error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
318	    &sc->a10_dma_buf_map);
319	if (error)
320		return (error);
321
322	return (0);
323}
324
325static void
326a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
327{
328	int i;
329	struct a10_mmc_dma_desc *dma_desc;
330	struct a10_mmc_softc *sc;
331
332	sc = (struct a10_mmc_softc *)arg;
333	sc->a10_dma_map_err = err;
334
335	if (err)
336		return;
337
338	dma_desc = sc->a10_dma_desc;
339	for (i = 0; i < nsegs; i++) {
340		dma_desc[i].buf_size = segs[i].ds_len;
341		dma_desc[i].buf_addr = segs[i].ds_addr;
342		dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
343		    A10_MMC_DMA_CONFIG_OWN;
344		if (i == 0)
345			dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
346		if (i < (nsegs - 1)) {
347			dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
348			dma_desc[i].next = sc->a10_dma_desc_phys +
349			    ((i + 1) * sizeof(struct a10_mmc_dma_desc));
350		} else {
351			dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
352			    A10_MMC_DMA_CONFIG_ER;
353			dma_desc[i].next = 0;
354		}
355	}
356}
357
358static int
359a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
360{
361	bus_dmasync_op_t sync_op;
362	int error;
363	struct mmc_command *cmd;
364	uint32_t val;
365
366	cmd = sc->a10_req->cmd;
367	if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
368		return (EFBIG);
369	error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
370	    cmd->data->data, cmd->data->len, a10_dma_cb, sc, 0);
371	if (error)
372		return (error);
373	if (sc->a10_dma_map_err)
374		return (sc->a10_dma_map_err);
375
376	if (cmd->data->flags & MMC_DATA_WRITE)
377		sync_op = BUS_DMASYNC_PREWRITE;
378	else
379		sync_op = BUS_DMASYNC_PREREAD;
380	bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
381	bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
382
383	/* Enable DMA */
384	val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
385	val &= ~A10_MMC_CTRL_FIFO_AC_MOD;
386	val |= A10_MMC_CTRL_DMA_ENB;
387	A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
388
389	/* Reset DMA */
390	val |= A10_MMC_CTRL_DMA_RST;
391	A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
392
393	A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_DMAC_IDMAC_SOFT_RST);
394	A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
395	    A10_MMC_DMAC_IDMAC_IDMA_ON | A10_MMC_DMAC_IDMAC_FIX_BURST);
396
397	/* Enable RX or TX DMA interrupt */
398	if (cmd->data->flags & MMC_DATA_WRITE)
399		val |= A10_MMC_IDST_TX_INT;
400	else
401		val |= A10_MMC_IDST_RX_INT;
402	A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
403
404	/* Set DMA descritptor list address */
405	A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
406
407	/* FIFO trigger level */
408	A10_MMC_WRITE_4(sc, A10_MMC_FWLR, A10_MMC_DMA_FTRGLEVEL);
409
410	return (0);
411}
412
413static int
414a10_mmc_reset(struct a10_mmc_softc *sc)
415{
416	int timeout;
417
418	A10_MMC_WRITE_4(sc, A10_MMC_GCTL, A10_MMC_RESET);
419	timeout = 1000;
420	while (--timeout > 0) {
421		if ((A10_MMC_READ_4(sc, A10_MMC_GCTL) & A10_MMC_RESET) == 0)
422			break;
423		DELAY(100);
424	}
425	if (timeout == 0)
426		return (ETIMEDOUT);
427
428	/* Set the timeout. */
429	A10_MMC_WRITE_4(sc, A10_MMC_TMOR,
430	    A10_MMC_TMOR_DTO_LMT_SHIFT(A10_MMC_TMOR_DTO_LMT_MASK) |
431	    A10_MMC_TMOR_RTO_LMT_SHIFT(A10_MMC_TMOR_RTO_LMT_MASK));
432
433	/* Clear pending interrupts. */
434	A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
435	A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
436	/* Unmask interrupts. */
437	A10_MMC_WRITE_4(sc, A10_MMC_IMKR,
438	    A10_MMC_INT_CMD_DONE | A10_MMC_INT_ERR_BIT |
439	    A10_MMC_INT_DATA_OVER | A10_MMC_INT_AUTO_STOP_DONE);
440	/* Enable interrupts and AHB access. */
441	A10_MMC_WRITE_4(sc, A10_MMC_GCTL,
442	    A10_MMC_READ_4(sc, A10_MMC_GCTL) | A10_MMC_CTRL_INT_ENB);
443
444	return (0);
445}
446
447static void
448a10_mmc_req_done(struct a10_mmc_softc *sc)
449{
450	struct mmc_command *cmd;
451	struct mmc_request *req;
452
453	cmd = sc->a10_req->cmd;
454	if (cmd->error != MMC_ERR_NONE) {
455		/* Reset the controller. */
456		a10_mmc_reset(sc);
457	}
458
459	req = sc->a10_req;
460	callout_stop(&sc->a10_timeoutc);
461	sc->a10_req = NULL;
462	sc->a10_intr = 0;
463	sc->a10_resid = 0;
464	sc->a10_dma_map_err = 0;
465	sc->a10_intr_wait = 0;
466	req->done(req);
467}
468
469static void
470a10_mmc_req_ok(struct a10_mmc_softc *sc)
471{
472	int timeout;
473	struct mmc_command *cmd;
474	uint32_t status;
475
476	timeout = 1000;
477	while (--timeout > 0) {
478		status = A10_MMC_READ_4(sc, A10_MMC_STAR);
479		if ((status & A10_MMC_STAR_CARD_BUSY) == 0)
480			break;
481		DELAY(1000);
482	}
483	cmd = sc->a10_req->cmd;
484	if (timeout == 0) {
485		cmd->error = MMC_ERR_FAILED;
486		a10_mmc_req_done(sc);
487		return;
488	}
489	if (cmd->flags & MMC_RSP_PRESENT) {
490		if (cmd->flags & MMC_RSP_136) {
491			cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
492			cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
493			cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
494			cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
495		} else
496			cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
497	}
498	/* All data has been transferred ? */
499	if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
500		cmd->error = MMC_ERR_FAILED;
501	a10_mmc_req_done(sc);
502}
503
504static void
505a10_mmc_timeout(void *arg)
506{
507	struct a10_mmc_softc *sc;
508
509	sc = (struct a10_mmc_softc *)arg;
510	if (sc->a10_req != NULL) {
511		device_printf(sc->a10_dev, "controller timeout\n");
512		sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
513		a10_mmc_req_done(sc);
514	} else
515		device_printf(sc->a10_dev,
516		    "Spurious timeout - no active request\n");
517}
518
519static void
520a10_mmc_intr(void *arg)
521{
522	bus_dmasync_op_t sync_op;
523	struct a10_mmc_softc *sc;
524	struct mmc_data *data;
525	uint32_t idst, imask, rint;
526
527	sc = (struct a10_mmc_softc *)arg;
528	A10_MMC_LOCK(sc);
529	rint = A10_MMC_READ_4(sc, A10_MMC_RISR);
530	idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
531	imask = A10_MMC_READ_4(sc, A10_MMC_IMKR);
532	if (idst == 0 && imask == 0 && rint == 0) {
533		A10_MMC_UNLOCK(sc);
534		return;
535	}
536#ifdef DEBUG
537	device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
538	    idst, imask, rint);
539#endif
540	if (sc->a10_req == NULL) {
541		device_printf(sc->a10_dev,
542		    "Spurious interrupt - no active request, rint: 0x%08X\n",
543		    rint);
544		goto end;
545	}
546	if (rint & A10_MMC_INT_ERR_BIT) {
547		device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
548		if (rint & A10_MMC_INT_RESP_TIMEOUT)
549			sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
550		else
551			sc->a10_req->cmd->error = MMC_ERR_FAILED;
552		a10_mmc_req_done(sc);
553		goto end;
554	}
555	if (idst & A10_MMC_IDST_ERROR) {
556		device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
557		sc->a10_req->cmd->error = MMC_ERR_FAILED;
558		a10_mmc_req_done(sc);
559		goto end;
560	}
561
562	sc->a10_intr |= rint;
563	data = sc->a10_req->cmd->data;
564	if (data != NULL && (idst & A10_MMC_IDST_COMPLETE) != 0) {
565		if (data->flags & MMC_DATA_WRITE)
566			sync_op = BUS_DMASYNC_POSTWRITE;
567		else
568			sync_op = BUS_DMASYNC_POSTREAD;
569		bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
570		    sync_op);
571		bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
572		    BUS_DMASYNC_POSTWRITE);
573		bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
574		sc->a10_resid = data->len >> 2;
575	}
576	if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
577		a10_mmc_req_ok(sc);
578
579end:
580	A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
581	A10_MMC_WRITE_4(sc, A10_MMC_RISR, rint);
582	A10_MMC_UNLOCK(sc);
583}
584
585static int
586a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
587{
588	int blksz;
589	struct a10_mmc_softc *sc;
590	struct mmc_command *cmd;
591	uint32_t cmdreg;
592	int err;
593
594	sc = device_get_softc(bus);
595	A10_MMC_LOCK(sc);
596	if (sc->a10_req) {
597		A10_MMC_UNLOCK(sc);
598		return (EBUSY);
599	}
600	sc->a10_req = req;
601	cmd = req->cmd;
602	cmdreg = A10_MMC_CMDR_LOAD;
603	if (cmd->opcode == MMC_GO_IDLE_STATE)
604		cmdreg |= A10_MMC_CMDR_SEND_INIT_SEQ;
605	if (cmd->flags & MMC_RSP_PRESENT)
606		cmdreg |= A10_MMC_CMDR_RESP_RCV;
607	if (cmd->flags & MMC_RSP_136)
608		cmdreg |= A10_MMC_CMDR_LONG_RESP;
609	if (cmd->flags & MMC_RSP_CRC)
610		cmdreg |= A10_MMC_CMDR_CHK_RESP_CRC;
611
612	sc->a10_intr = 0;
613	sc->a10_resid = 0;
614	sc->a10_intr_wait = A10_MMC_INT_CMD_DONE;
615	cmd->error = MMC_ERR_NONE;
616	if (cmd->data != NULL) {
617		sc->a10_intr_wait |= A10_MMC_INT_DATA_OVER;
618		cmdreg |= A10_MMC_CMDR_DATA_TRANS | A10_MMC_CMDR_WAIT_PRE_OVER;
619		if (cmd->data->flags & MMC_DATA_MULTI) {
620			cmdreg |= A10_MMC_CMDR_STOP_CMD_FLAG;
621			sc->a10_intr_wait |= A10_MMC_INT_AUTO_STOP_DONE;
622		}
623		if (cmd->data->flags & MMC_DATA_WRITE)
624			cmdreg |= A10_MMC_CMDR_DIR_WRITE;
625		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
626		A10_MMC_WRITE_4(sc, A10_MMC_BKSR, blksz);
627		A10_MMC_WRITE_4(sc, A10_MMC_BYCR, cmd->data->len);
628
629		err = a10_mmc_prepare_dma(sc);
630		if (err != 0)
631			device_printf(sc->a10_dev, "prepare_dma failed: %d\n", err);
632	}
633
634	A10_MMC_WRITE_4(sc, A10_MMC_CAGR, cmd->arg);
635	A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
636	callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
637	    a10_mmc_timeout, sc);
638	A10_MMC_UNLOCK(sc);
639
640	return (0);
641}
642
643static int
644a10_mmc_read_ivar(device_t bus, device_t child, int which,
645    uintptr_t *result)
646{
647	struct a10_mmc_softc *sc;
648
649	sc = device_get_softc(bus);
650	switch (which) {
651	default:
652		return (EINVAL);
653	case MMCBR_IVAR_BUS_MODE:
654		*(int *)result = sc->a10_host.ios.bus_mode;
655		break;
656	case MMCBR_IVAR_BUS_WIDTH:
657		*(int *)result = sc->a10_host.ios.bus_width;
658		break;
659	case MMCBR_IVAR_CHIP_SELECT:
660		*(int *)result = sc->a10_host.ios.chip_select;
661		break;
662	case MMCBR_IVAR_CLOCK:
663		*(int *)result = sc->a10_host.ios.clock;
664		break;
665	case MMCBR_IVAR_F_MIN:
666		*(int *)result = sc->a10_host.f_min;
667		break;
668	case MMCBR_IVAR_F_MAX:
669		*(int *)result = sc->a10_host.f_max;
670		break;
671	case MMCBR_IVAR_HOST_OCR:
672		*(int *)result = sc->a10_host.host_ocr;
673		break;
674	case MMCBR_IVAR_MODE:
675		*(int *)result = sc->a10_host.mode;
676		break;
677	case MMCBR_IVAR_OCR:
678		*(int *)result = sc->a10_host.ocr;
679		break;
680	case MMCBR_IVAR_POWER_MODE:
681		*(int *)result = sc->a10_host.ios.power_mode;
682		break;
683	case MMCBR_IVAR_VDD:
684		*(int *)result = sc->a10_host.ios.vdd;
685		break;
686	case MMCBR_IVAR_CAPS:
687		*(int *)result = sc->a10_host.caps;
688		break;
689	case MMCBR_IVAR_MAX_DATA:
690		*(int *)result = 65535;
691		break;
692	}
693
694	return (0);
695}
696
697static int
698a10_mmc_write_ivar(device_t bus, device_t child, int which,
699    uintptr_t value)
700{
701	struct a10_mmc_softc *sc;
702
703	sc = device_get_softc(bus);
704	switch (which) {
705	default:
706		return (EINVAL);
707	case MMCBR_IVAR_BUS_MODE:
708		sc->a10_host.ios.bus_mode = value;
709		break;
710	case MMCBR_IVAR_BUS_WIDTH:
711		sc->a10_host.ios.bus_width = value;
712		break;
713	case MMCBR_IVAR_CHIP_SELECT:
714		sc->a10_host.ios.chip_select = value;
715		break;
716	case MMCBR_IVAR_CLOCK:
717		sc->a10_host.ios.clock = value;
718		break;
719	case MMCBR_IVAR_MODE:
720		sc->a10_host.mode = value;
721		break;
722	case MMCBR_IVAR_OCR:
723		sc->a10_host.ocr = value;
724		break;
725	case MMCBR_IVAR_POWER_MODE:
726		sc->a10_host.ios.power_mode = value;
727		break;
728	case MMCBR_IVAR_VDD:
729		sc->a10_host.ios.vdd = value;
730		break;
731	/* These are read-only */
732	case MMCBR_IVAR_CAPS:
733	case MMCBR_IVAR_HOST_OCR:
734	case MMCBR_IVAR_F_MIN:
735	case MMCBR_IVAR_F_MAX:
736	case MMCBR_IVAR_MAX_DATA:
737		return (EINVAL);
738	}
739
740	return (0);
741}
742
743static int
744a10_mmc_update_clock(struct a10_mmc_softc *sc, uint32_t clkon)
745{
746	uint32_t cmdreg;
747	int retry;
748	uint32_t ckcr;
749
750	ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
751	ckcr &= ~(A10_MMC_CKCR_CCLK_ENB | A10_MMC_CKCR_CCLK_CTRL);
752
753	if (clkon)
754		ckcr |= A10_MMC_CKCR_CCLK_ENB;
755
756	A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
757
758	cmdreg = A10_MMC_CMDR_LOAD | A10_MMC_CMDR_PRG_CLK |
759	    A10_MMC_CMDR_WAIT_PRE_OVER;
760	A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
761	retry = 0xfffff;
762	while (--retry > 0) {
763		if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_CMDR_LOAD) == 0) {
764			A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
765			return (0);
766		}
767		DELAY(10);
768	}
769	A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
770	device_printf(sc->a10_dev, "timeout updating clock\n");
771
772	return (ETIMEDOUT);
773}
774
775static int
776a10_mmc_update_ios(device_t bus, device_t child)
777{
778	int error;
779	struct a10_mmc_softc *sc;
780	struct mmc_ios *ios;
781	uint32_t ckcr;
782
783	sc = device_get_softc(bus);
784
785	ios = &sc->a10_host.ios;
786
787	/* Set the bus width. */
788	switch (ios->bus_width) {
789	case bus_width_1:
790		A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR1);
791		break;
792	case bus_width_4:
793		A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR4);
794		break;
795	case bus_width_8:
796		A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR8);
797		break;
798	}
799
800	if (ios->clock) {
801
802		/* Disable clock */
803		error = a10_mmc_update_clock(sc, 0);
804		if (error != 0)
805			return (error);
806
807		/* Reset the divider. */
808		ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
809		ckcr &= ~A10_MMC_CKCR_CCLK_DIV;
810		A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
811
812		/* Set the MMC clock. */
813		error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
814		    CLK_SET_ROUND_DOWN);
815		if (error != 0) {
816			device_printf(sc->a10_dev,
817			    "failed to set frequency to %u Hz: %d\n",
818			    ios->clock, error);
819			return (error);
820		}
821
822		/* Enable clock. */
823		error = a10_mmc_update_clock(sc, 1);
824		if (error != 0)
825			return (error);
826	}
827
828
829	return (0);
830}
831
832static int
833a10_mmc_get_ro(device_t bus, device_t child)
834{
835
836	return (0);
837}
838
839static int
840a10_mmc_acquire_host(device_t bus, device_t child)
841{
842	struct a10_mmc_softc *sc;
843	int error;
844
845	sc = device_get_softc(bus);
846	A10_MMC_LOCK(sc);
847	while (sc->a10_bus_busy) {
848		error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
849		if (error != 0) {
850			A10_MMC_UNLOCK(sc);
851			return (error);
852		}
853	}
854	sc->a10_bus_busy++;
855	A10_MMC_UNLOCK(sc);
856
857	return (0);
858}
859
860static int
861a10_mmc_release_host(device_t bus, device_t child)
862{
863	struct a10_mmc_softc *sc;
864
865	sc = device_get_softc(bus);
866	A10_MMC_LOCK(sc);
867	sc->a10_bus_busy--;
868	wakeup(sc);
869	A10_MMC_UNLOCK(sc);
870
871	return (0);
872}
873
874static device_method_t a10_mmc_methods[] = {
875	/* Device interface */
876	DEVMETHOD(device_probe,		a10_mmc_probe),
877	DEVMETHOD(device_attach,	a10_mmc_attach),
878	DEVMETHOD(device_detach,	a10_mmc_detach),
879
880	/* Bus interface */
881	DEVMETHOD(bus_read_ivar,	a10_mmc_read_ivar),
882	DEVMETHOD(bus_write_ivar,	a10_mmc_write_ivar),
883	DEVMETHOD(bus_print_child,	bus_generic_print_child),
884
885	/* MMC bridge interface */
886	DEVMETHOD(mmcbr_update_ios,	a10_mmc_update_ios),
887	DEVMETHOD(mmcbr_request,	a10_mmc_request),
888	DEVMETHOD(mmcbr_get_ro,		a10_mmc_get_ro),
889	DEVMETHOD(mmcbr_acquire_host,	a10_mmc_acquire_host),
890	DEVMETHOD(mmcbr_release_host,	a10_mmc_release_host),
891
892	DEVMETHOD_END
893};
894
895static devclass_t a10_mmc_devclass;
896
897static driver_t a10_mmc_driver = {
898	"a10_mmc",
899	a10_mmc_methods,
900	sizeof(struct a10_mmc_softc),
901};
902
903DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0);
904DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL);
905MODULE_DEPEND(a10_mmc, mmc, 1, 1, 1);
906