2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/highmem.h>
22 #include <linux/log2.h>
23 #include <linux/mmc/pm.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
26 #include <linux/amba/bus.h>
27 #include <linux/clk.h>
28 #include <linux/scatterlist.h>
29 #include <linux/gpio.h>
30 #include <linux/of_gpio.h>
31 #include <linux/regulator/consumer.h>
32 #include <linux/dmaengine.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/amba/mmci.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/types.h>
37 #include <linux/pinctrl/consumer.h>
39 #include <asm/div64.h>
41 #include <asm/sizes.h>
45 #define DRIVER_NAME "mmci-pl18x"
47 static unsigned int fmax = 515633;
50 * struct variant_data - MMCI variant-specific quirks
51 * @clkreg: default value for MCICLOCK register
52 * @clkreg_enable: enable value for MMCICLOCK register
53 * @datalength_bits: number of bits in the MMCIDATALENGTH register
54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
55 * is asserted (likewise for RX)
56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
57 * is asserted (likewise for RX)
58 * @sdio: variant supports SDIO
59 * @st_clkdiv: true if using a ST-specific clock divider algorithm
60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
61 * @pwrreg_powerup: power up value for MMCIPOWER register
62 * @signal_direction: input/out direction of bus signals can be indicated
63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
64 * @busy_detect: true if busy detection on dat0 is supported
68 unsigned int clkreg_enable;
69 unsigned int datalength_bits;
70 unsigned int fifosize;
71 unsigned int fifohalfsize;
74 bool blksz_datactrl16;
76 bool signal_direction;
81 static struct variant_data variant_arm = {
83 .fifohalfsize = 8 * 4,
84 .datalength_bits = 16,
85 .pwrreg_powerup = MCI_PWR_UP,
88 static struct variant_data variant_arm_extended_fifo = {
90 .fifohalfsize = 64 * 4,
91 .datalength_bits = 16,
92 .pwrreg_powerup = MCI_PWR_UP,
95 static struct variant_data variant_arm_extended_fifo_hwfc = {
97 .fifohalfsize = 64 * 4,
98 .clkreg_enable = MCI_ARM_HWFCEN,
99 .datalength_bits = 16,
100 .pwrreg_powerup = MCI_PWR_UP,
103 static struct variant_data variant_u300 = {
105 .fifohalfsize = 8 * 4,
106 .clkreg_enable = MCI_ST_U300_HWFCEN,
107 .datalength_bits = 16,
109 .pwrreg_powerup = MCI_PWR_ON,
110 .signal_direction = true,
111 .pwrreg_clkgate = true,
114 static struct variant_data variant_nomadik = {
116 .fifohalfsize = 8 * 4,
117 .clkreg = MCI_CLK_ENABLE,
118 .datalength_bits = 24,
121 .pwrreg_powerup = MCI_PWR_ON,
122 .signal_direction = true,
123 .pwrreg_clkgate = true,
126 static struct variant_data variant_ux500 = {
128 .fifohalfsize = 8 * 4,
129 .clkreg = MCI_CLK_ENABLE,
130 .clkreg_enable = MCI_ST_UX500_HWFCEN,
131 .datalength_bits = 24,
134 .pwrreg_powerup = MCI_PWR_ON,
135 .signal_direction = true,
136 .pwrreg_clkgate = true,
140 static struct variant_data variant_ux500v2 = {
142 .fifohalfsize = 8 * 4,
143 .clkreg = MCI_CLK_ENABLE,
144 .clkreg_enable = MCI_ST_UX500_HWFCEN,
145 .datalength_bits = 24,
148 .blksz_datactrl16 = true,
149 .pwrreg_powerup = MCI_PWR_ON,
150 .signal_direction = true,
151 .pwrreg_clkgate = true,
155 static int mmci_card_busy(struct mmc_host *mmc)
157 struct mmci_host *host = mmc_priv(mmc);
161 pm_runtime_get_sync(mmc_dev(mmc));
163 spin_lock_irqsave(&host->lock, flags);
164 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
166 spin_unlock_irqrestore(&host->lock, flags);
168 pm_runtime_mark_last_busy(mmc_dev(mmc));
169 pm_runtime_put_autosuspend(mmc_dev(mmc));
175 * Validate mmc prerequisites
177 static int mmci_validate_data(struct mmci_host *host,
178 struct mmc_data *data)
183 if (!is_power_of_2(data->blksz)) {
184 dev_err(mmc_dev(host->mmc),
185 "unsupported block size (%d bytes)\n", data->blksz);
192 static void mmci_reg_delay(struct mmci_host *host)
195 * According to the spec, at least three feedback clock cycles
196 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
197 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
198 * Worst delay time during card init is at 100 kHz => 30 us.
199 * Worst delay time when up and running is at 25 MHz => 120 ns.
201 if (host->cclk < 25000000)
208 * This must be called with host->lock held
210 static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
212 if (host->clk_reg != clk) {
214 writel(clk, host->base + MMCICLOCK);
219 * This must be called with host->lock held
221 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
223 if (host->pwr_reg != pwr) {
225 writel(pwr, host->base + MMCIPOWER);
230 * This must be called with host->lock held
232 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
234 /* Keep ST Micro busy mode if enabled */
235 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
237 if (host->datactrl_reg != datactrl) {
238 host->datactrl_reg = datactrl;
239 writel(datactrl, host->base + MMCIDATACTRL);
244 * This must be called with host->lock held
246 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
248 struct variant_data *variant = host->variant;
249 u32 clk = variant->clkreg;
251 /* Make sure cclk reflects the current calculated clock */
255 if (desired >= host->mclk) {
256 clk = MCI_CLK_BYPASS;
257 if (variant->st_clkdiv)
258 clk |= MCI_ST_UX500_NEG_EDGE;
259 host->cclk = host->mclk;
260 } else if (variant->st_clkdiv) {
262 * DB8500 TRM says f = mclk / (clkdiv + 2)
263 * => clkdiv = (mclk / f) - 2
264 * Round the divider up so we don't exceed the max
267 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
270 host->cclk = host->mclk / (clk + 2);
273 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
274 * => clkdiv = mclk / (2 * f) - 1
276 clk = host->mclk / (2 * desired) - 1;
279 host->cclk = host->mclk / (2 * (clk + 1));
282 clk |= variant->clkreg_enable;
283 clk |= MCI_CLK_ENABLE;
284 /* This hasn't proven to be worthwhile */
285 /* clk |= MCI_CLK_PWRSAVE; */
288 /* Set actual clock for debug */
289 host->mmc->actual_clock = host->cclk;
291 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
293 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
294 clk |= MCI_ST_8BIT_BUS;
296 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
297 clk |= MCI_ST_UX500_NEG_EDGE;
299 mmci_write_clkreg(host, clk);
303 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
305 writel(0, host->base + MMCICOMMAND);
312 mmc_request_done(host->mmc, mrq);
314 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
315 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
318 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
320 void __iomem *base = host->base;
322 if (host->singleirq) {
323 unsigned int mask0 = readl(base + MMCIMASK0);
325 mask0 &= ~MCI_IRQ1MASK;
328 writel(mask0, base + MMCIMASK0);
331 writel(mask, base + MMCIMASK1);
334 static void mmci_stop_data(struct mmci_host *host)
336 mmci_write_datactrlreg(host, 0);
337 mmci_set_mask1(host, 0);
341 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
343 unsigned int flags = SG_MITER_ATOMIC;
345 if (data->flags & MMC_DATA_READ)
346 flags |= SG_MITER_TO_SG;
348 flags |= SG_MITER_FROM_SG;
350 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
354 * All the DMA operation mode stuff goes inside this ifdef.
355 * This assumes that you have a generic DMA device interface,
356 * no custom DMA interfaces are supported.
358 #ifdef CONFIG_DMA_ENGINE
359 static void mmci_dma_setup(struct mmci_host *host)
361 struct mmci_platform_data *plat = host->plat;
362 const char *rxname, *txname;
365 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
366 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
368 /* initialize pre request cookie */
369 host->next_data.cookie = 1;
371 /* Try to acquire a generic DMA engine slave channel */
373 dma_cap_set(DMA_SLAVE, mask);
375 if (plat && plat->dma_filter) {
376 if (!host->dma_rx_channel && plat->dma_rx_param) {
377 host->dma_rx_channel = dma_request_channel(mask,
380 /* E.g if no DMA hardware is present */
381 if (!host->dma_rx_channel)
382 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
385 if (!host->dma_tx_channel && plat->dma_tx_param) {
386 host->dma_tx_channel = dma_request_channel(mask,
389 if (!host->dma_tx_channel)
390 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
395 * If only an RX channel is specified, the driver will
396 * attempt to use it bidirectionally, however if it is
397 * is specified but cannot be located, DMA will be disabled.
399 if (host->dma_rx_channel && !host->dma_tx_channel)
400 host->dma_tx_channel = host->dma_rx_channel;
402 if (host->dma_rx_channel)
403 rxname = dma_chan_name(host->dma_rx_channel);
407 if (host->dma_tx_channel)
408 txname = dma_chan_name(host->dma_tx_channel);
412 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
416 * Limit the maximum segment size in any SG entry according to
417 * the parameters of the DMA engine device.
419 if (host->dma_tx_channel) {
420 struct device *dev = host->dma_tx_channel->device->dev;
421 unsigned int max_seg_size = dma_get_max_seg_size(dev);
423 if (max_seg_size < host->mmc->max_seg_size)
424 host->mmc->max_seg_size = max_seg_size;
426 if (host->dma_rx_channel) {
427 struct device *dev = host->dma_rx_channel->device->dev;
428 unsigned int max_seg_size = dma_get_max_seg_size(dev);
430 if (max_seg_size < host->mmc->max_seg_size)
431 host->mmc->max_seg_size = max_seg_size;
436 * This is used in or so inline it
437 * so it can be discarded.
439 static inline void mmci_dma_release(struct mmci_host *host)
441 struct mmci_platform_data *plat = host->plat;
443 if (host->dma_rx_channel)
444 dma_release_channel(host->dma_rx_channel);
445 if (host->dma_tx_channel && plat->dma_tx_param)
446 dma_release_channel(host->dma_tx_channel);
447 host->dma_rx_channel = host->dma_tx_channel = NULL;
450 static void mmci_dma_data_error(struct mmci_host *host)
452 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
453 dmaengine_terminate_all(host->dma_current);
454 host->dma_current = NULL;
455 host->dma_desc_current = NULL;
456 host->data->host_cookie = 0;
459 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
461 struct dma_chan *chan;
462 enum dma_data_direction dir;
464 if (data->flags & MMC_DATA_READ) {
465 dir = DMA_FROM_DEVICE;
466 chan = host->dma_rx_channel;
469 chan = host->dma_tx_channel;
472 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
475 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
480 /* Wait up to 1ms for the DMA to complete */
482 status = readl(host->base + MMCISTATUS);
483 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
489 * Check to see whether we still have some data left in the FIFO -
490 * this catches DMA controllers which are unable to monitor the
491 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
492 * contiguous buffers. On TX, we'll get a FIFO underrun error.
494 if (status & MCI_RXDATAAVLBLMASK) {
495 mmci_dma_data_error(host);
500 if (!data->host_cookie)
501 mmci_dma_unmap(host, data);
504 * Use of DMA with scatter-gather is impossible.
505 * Give up with DMA and switch back to PIO mode.
507 if (status & MCI_RXDATAAVLBLMASK) {
508 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
509 mmci_dma_release(host);
512 host->dma_current = NULL;
513 host->dma_desc_current = NULL;
516 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
517 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
518 struct dma_chan **dma_chan,
519 struct dma_async_tx_descriptor **dma_desc)
521 struct variant_data *variant = host->variant;
522 struct dma_slave_config conf = {
523 .src_addr = host->phybase + MMCIFIFO,
524 .dst_addr = host->phybase + MMCIFIFO,
525 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
526 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
527 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
528 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
531 struct dma_chan *chan;
532 struct dma_device *device;
533 struct dma_async_tx_descriptor *desc;
534 enum dma_data_direction buffer_dirn;
537 if (data->flags & MMC_DATA_READ) {
538 conf.direction = DMA_DEV_TO_MEM;
539 buffer_dirn = DMA_FROM_DEVICE;
540 chan = host->dma_rx_channel;
542 conf.direction = DMA_MEM_TO_DEV;
543 buffer_dirn = DMA_TO_DEVICE;
544 chan = host->dma_tx_channel;
547 /* If there's no DMA channel, fall back to PIO */
551 /* If less than or equal to the fifo size, don't bother with DMA */
552 if (data->blksz * data->blocks <= variant->fifosize)
555 device = chan->device;
556 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
560 dmaengine_slave_config(chan, &conf);
561 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
562 conf.direction, DMA_CTRL_ACK);
572 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
576 static inline int mmci_dma_prep_data(struct mmci_host *host,
577 struct mmc_data *data)
579 /* Check if next job is already prepared. */
580 if (host->dma_current && host->dma_desc_current)
583 /* No job were prepared thus do it now. */
584 return __mmci_dma_prep_data(host, data, &host->dma_current,
585 &host->dma_desc_current);
588 static inline int mmci_dma_prep_next(struct mmci_host *host,
589 struct mmc_data *data)
591 struct mmci_host_next *nd = &host->next_data;
592 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
595 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
598 struct mmc_data *data = host->data;
600 ret = mmci_dma_prep_data(host, host->data);
604 /* Okay, go for it. */
605 dev_vdbg(mmc_dev(host->mmc),
606 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
607 data->sg_len, data->blksz, data->blocks, data->flags);
608 dmaengine_submit(host->dma_desc_current);
609 dma_async_issue_pending(host->dma_current);
611 datactrl |= MCI_DPSM_DMAENABLE;
613 /* Trigger the DMA transfer */
614 mmci_write_datactrlreg(host, datactrl);
617 * Let the MMCI say when the data is ended and it's time
618 * to fire next DMA request. When that happens, MMCI will
619 * call mmci_data_end()
621 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
622 host->base + MMCIMASK0);
626 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
628 struct mmci_host_next *next = &host->next_data;
630 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
631 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
633 host->dma_desc_current = next->dma_desc;
634 host->dma_current = next->dma_chan;
635 next->dma_desc = NULL;
636 next->dma_chan = NULL;
639 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
642 struct mmci_host *host = mmc_priv(mmc);
643 struct mmc_data *data = mrq->data;
644 struct mmci_host_next *nd = &host->next_data;
649 BUG_ON(data->host_cookie);
651 if (mmci_validate_data(host, data))
654 if (!mmci_dma_prep_next(host, data))
655 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
658 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
661 struct mmci_host *host = mmc_priv(mmc);
662 struct mmc_data *data = mrq->data;
664 if (!data || !data->host_cookie)
667 mmci_dma_unmap(host, data);
670 struct mmci_host_next *next = &host->next_data;
671 struct dma_chan *chan;
672 if (data->flags & MMC_DATA_READ)
673 chan = host->dma_rx_channel;
675 chan = host->dma_tx_channel;
676 dmaengine_terminate_all(chan);
678 next->dma_desc = NULL;
679 next->dma_chan = NULL;
684 /* Blank functions if the DMA engine is not available */
685 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
688 static inline void mmci_dma_setup(struct mmci_host *host)
692 static inline void mmci_dma_release(struct mmci_host *host)
696 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
700 static inline void mmci_dma_finalize(struct mmci_host *host,
701 struct mmc_data *data)
705 static inline void mmci_dma_data_error(struct mmci_host *host)
709 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
714 #define mmci_pre_request NULL
715 #define mmci_post_request NULL
719 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
721 struct variant_data *variant = host->variant;
722 unsigned int datactrl, timeout, irqmask;
723 unsigned long long clks;
727 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
728 data->blksz, data->blocks, data->flags);
731 host->size = data->blksz * data->blocks;
732 data->bytes_xfered = 0;
734 clks = (unsigned long long)data->timeout_ns * host->cclk;
735 do_div(clks, 1000000000UL);
737 timeout = data->timeout_clks + (unsigned int)clks;
740 writel(timeout, base + MMCIDATATIMER);
741 writel(host->size, base + MMCIDATALENGTH);
743 blksz_bits = ffs(data->blksz) - 1;
744 BUG_ON(1 << blksz_bits != data->blksz);
746 if (variant->blksz_datactrl16)
747 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
749 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
751 if (data->flags & MMC_DATA_READ)
752 datactrl |= MCI_DPSM_DIRECTION;
754 /* The ST Micro variants has a special bit to enable SDIO */
755 if (variant->sdio && host->mmc->card)
756 if (mmc_card_sdio(host->mmc->card)) {
758 * The ST Micro variants has a special bit
763 datactrl |= MCI_ST_DPSM_SDIOEN;
766 * The ST Micro variant for SDIO small write transfers
767 * needs to have clock H/W flow control disabled,
768 * otherwise the transfer will not start. The threshold
769 * depends on the rate of MCLK.
771 if (data->flags & MMC_DATA_WRITE &&
773 (host->size <= 8 && host->mclk > 50000000)))
774 clk = host->clk_reg & ~variant->clkreg_enable;
776 clk = host->clk_reg | variant->clkreg_enable;
778 mmci_write_clkreg(host, clk);
781 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
782 datactrl |= MCI_ST_DPSM_DDRMODE;
785 * Attempt to use DMA operation mode, if this
786 * should fail, fall back to PIO mode
788 if (!mmci_dma_start_data(host, datactrl))
791 /* IRQ mode, map the SG list for CPU reading/writing */
792 mmci_init_sg(host, data);
794 if (data->flags & MMC_DATA_READ) {
795 irqmask = MCI_RXFIFOHALFFULLMASK;
798 * If we have less than the fifo 'half-full' threshold to
799 * transfer, trigger a PIO interrupt as soon as any data
802 if (host->size < variant->fifohalfsize)
803 irqmask |= MCI_RXDATAAVLBLMASK;
806 * We don't actually need to include "FIFO empty" here
807 * since its implicit in "FIFO half empty".
809 irqmask = MCI_TXFIFOHALFEMPTYMASK;
812 mmci_write_datactrlreg(host, datactrl);
813 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
814 mmci_set_mask1(host, irqmask);
818 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
820 void __iomem *base = host->base;
822 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
823 cmd->opcode, cmd->arg, cmd->flags);
825 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
826 writel(0, base + MMCICOMMAND);
830 c |= cmd->opcode | MCI_CPSM_ENABLE;
831 if (cmd->flags & MMC_RSP_PRESENT) {
832 if (cmd->flags & MMC_RSP_136)
833 c |= MCI_CPSM_LONGRSP;
834 c |= MCI_CPSM_RESPONSE;
837 c |= MCI_CPSM_INTERRUPT;
841 writel(cmd->arg, base + MMCIARGUMENT);
842 writel(c, base + MMCICOMMAND);
846 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
849 /* First check for errors */
850 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
851 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
854 /* Terminate the DMA transfer */
855 if (dma_inprogress(host)) {
856 mmci_dma_data_error(host);
857 mmci_dma_unmap(host, data);
861 * Calculate how far we are into the transfer. Note that
862 * the data counter gives the number of bytes transferred
863 * on the MMC bus, not on the host side. On reads, this
864 * can be as much as a FIFO-worth of data ahead. This
865 * matters for FIFO overruns only.
867 remain = readl(host->base + MMCIDATACNT);
868 success = data->blksz * data->blocks - remain;
870 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
872 if (status & MCI_DATACRCFAIL) {
873 /* Last block was not successful */
875 data->error = -EILSEQ;
876 } else if (status & MCI_DATATIMEOUT) {
877 data->error = -ETIMEDOUT;
878 } else if (status & MCI_STARTBITERR) {
879 data->error = -ECOMM;
880 } else if (status & MCI_TXUNDERRUN) {
882 } else if (status & MCI_RXOVERRUN) {
883 if (success > host->variant->fifosize)
884 success -= host->variant->fifosize;
889 data->bytes_xfered = round_down(success, data->blksz);
892 if (status & MCI_DATABLOCKEND)
893 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
895 if (status & MCI_DATAEND || data->error) {
896 if (dma_inprogress(host))
897 mmci_dma_finalize(host, data);
898 mmci_stop_data(host);
901 /* The error clause is handled above, success! */
902 data->bytes_xfered = data->blksz * data->blocks;
904 if (!data->stop || host->mrq->sbc) {
905 mmci_request_end(host, data->mrq);
907 mmci_start_command(host, data->stop, 0);
913 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
916 void __iomem *base = host->base;
917 bool sbc = (cmd == host->mrq->sbc);
921 if (status & MCI_CMDTIMEOUT) {
922 cmd->error = -ETIMEDOUT;
923 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
924 cmd->error = -EILSEQ;
926 cmd->resp[0] = readl(base + MMCIRESPONSE0);
927 cmd->resp[1] = readl(base + MMCIRESPONSE1);
928 cmd->resp[2] = readl(base + MMCIRESPONSE2);
929 cmd->resp[3] = readl(base + MMCIRESPONSE3);
932 if ((!sbc && !cmd->data) || cmd->error) {
934 /* Terminate the DMA transfer */
935 if (dma_inprogress(host)) {
936 mmci_dma_data_error(host);
937 mmci_dma_unmap(host, host->data);
939 mmci_stop_data(host);
941 mmci_request_end(host, host->mrq);
943 mmci_start_command(host, host->mrq->cmd, 0);
944 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
945 mmci_start_data(host, cmd->data);
949 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
951 void __iomem *base = host->base;
954 int host_remain = host->size;
957 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
966 * SDIO especially may want to send something that is
967 * not divisible by 4 (as opposed to card sectors
968 * etc). Therefore make sure to always read the last bytes
969 * while only doing full 32-bit reads towards the FIFO.
971 if (unlikely(count & 0x3)) {
973 unsigned char buf[4];
974 ioread32_rep(base + MMCIFIFO, buf, 1);
975 memcpy(ptr, buf, count);
977 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
981 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
986 host_remain -= count;
991 status = readl(base + MMCISTATUS);
992 } while (status & MCI_RXDATAAVLBL);
997 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
999 struct variant_data *variant = host->variant;
1000 void __iomem *base = host->base;
1004 unsigned int count, maxcnt;
1006 maxcnt = status & MCI_TXFIFOEMPTY ?
1007 variant->fifosize : variant->fifohalfsize;
1008 count = min(remain, maxcnt);
1011 * SDIO especially may want to send something that is
1012 * not divisible by 4 (as opposed to card sectors
1013 * etc), and the FIFO only accept full 32-bit writes.
1014 * So compensate by adding +3 on the count, a single
1015 * byte become a 32bit write, 7 bytes will be two
1018 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1026 status = readl(base + MMCISTATUS);
1027 } while (status & MCI_TXFIFOHALFEMPTY);
1029 return ptr - buffer;
1033 * PIO data transfer IRQ handler.
1035 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1037 struct mmci_host *host = dev_id;
1038 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1039 struct variant_data *variant = host->variant;
1040 void __iomem *base = host->base;
1041 unsigned long flags;
1044 status = readl(base + MMCISTATUS);
1046 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1048 local_irq_save(flags);
1051 unsigned int remain, len;
1055 * For write, we only need to test the half-empty flag
1056 * here - if the FIFO is completely empty, then by
1057 * definition it is more than half empty.
1059 * For read, check for data available.
1061 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1064 if (!sg_miter_next(sg_miter))
1067 buffer = sg_miter->addr;
1068 remain = sg_miter->length;
1071 if (status & MCI_RXACTIVE)
1072 len = mmci_pio_read(host, buffer, remain);
1073 if (status & MCI_TXACTIVE)
1074 len = mmci_pio_write(host, buffer, remain, status);
1076 sg_miter->consumed = len;
1084 status = readl(base + MMCISTATUS);
1087 sg_miter_stop(sg_miter);
1089 local_irq_restore(flags);
1092 * If we have less than the fifo 'half-full' threshold to transfer,
1093 * trigger a PIO interrupt as soon as any data is available.
1095 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1096 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1099 * If we run out of data, disable the data IRQs; this
1100 * prevents a race where the FIFO becomes empty before
1101 * the chip itself has disabled the data path, and
1102 * stops us racing with our data end IRQ.
1104 if (host->size == 0) {
1105 mmci_set_mask1(host, 0);
1106 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1113 * Handle completion of command and data transfers.
1115 static irqreturn_t mmci_irq(int irq, void *dev_id)
1117 struct mmci_host *host = dev_id;
1121 spin_lock(&host->lock);
1124 struct mmc_command *cmd;
1125 struct mmc_data *data;
1127 status = readl(host->base + MMCISTATUS);
1129 if (host->singleirq) {
1130 if (status & readl(host->base + MMCIMASK1))
1131 mmci_pio_irq(irq, dev_id);
1133 status &= ~MCI_IRQ1MASK;
1136 status &= readl(host->base + MMCIMASK0);
1137 writel(status, host->base + MMCICLEAR);
1139 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1142 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1143 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1144 MCI_DATABLOCKEND) && data)
1145 mmci_data_irq(host, data, status);
1148 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1149 mmci_cmd_irq(host, cmd, status);
1154 spin_unlock(&host->lock);
1156 return IRQ_RETVAL(ret);
1159 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1161 struct mmci_host *host = mmc_priv(mmc);
1162 unsigned long flags;
1164 WARN_ON(host->mrq != NULL);
1166 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1167 if (mrq->cmd->error) {
1168 mmc_request_done(mmc, mrq);
1172 pm_runtime_get_sync(mmc_dev(mmc));
1174 spin_lock_irqsave(&host->lock, flags);
1179 mmci_get_next_data(host, mrq->data);
1181 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1182 mmci_start_data(host, mrq->data);
1185 mmci_start_command(host, mrq->sbc, 0);
1187 mmci_start_command(host, mrq->cmd, 0);
1189 spin_unlock_irqrestore(&host->lock, flags);
1192 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1194 struct mmci_host *host = mmc_priv(mmc);
1195 struct variant_data *variant = host->variant;
1197 unsigned long flags;
1200 pm_runtime_get_sync(mmc_dev(mmc));
1202 if (host->plat->ios_handler &&
1203 host->plat->ios_handler(mmc_dev(mmc), ios))
1204 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1206 switch (ios->power_mode) {
1208 if (!IS_ERR(mmc->supply.vmmc))
1209 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1211 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1212 regulator_disable(mmc->supply.vqmmc);
1213 host->vqmmc_enabled = false;
1218 if (!IS_ERR(mmc->supply.vmmc))
1219 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1222 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1223 * and instead uses MCI_PWR_ON so apply whatever value is
1224 * configured in the variant data.
1226 pwr |= variant->pwrreg_powerup;
1230 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1231 ret = regulator_enable(mmc->supply.vqmmc);
1233 dev_err(mmc_dev(mmc),
1234 "failed to enable vqmmc regulator\n");
1236 host->vqmmc_enabled = true;
1243 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1245 * The ST Micro variant has some additional bits
1246 * indicating signal direction for the signals in
1247 * the SD/MMC bus and feedback-clock usage.
1249 pwr |= host->plat->sigdir;
1251 if (ios->bus_width == MMC_BUS_WIDTH_4)
1252 pwr &= ~MCI_ST_DATA74DIREN;
1253 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1254 pwr &= (~MCI_ST_DATA74DIREN &
1255 ~MCI_ST_DATA31DIREN &
1256 ~MCI_ST_DATA2DIREN);
1259 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1260 if (host->hw_designer != AMBA_VENDOR_ST)
1264 * The ST Micro variant use the ROD bit for something
1265 * else and only has OD (Open Drain).
1272 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1273 * gating the clock, the MCI_PWR_ON bit is cleared.
1275 if (!ios->clock && variant->pwrreg_clkgate)
1278 spin_lock_irqsave(&host->lock, flags);
1280 mmci_set_clkreg(host, ios->clock);
1281 mmci_write_pwrreg(host, pwr);
1282 mmci_reg_delay(host);
1284 spin_unlock_irqrestore(&host->lock, flags);
1286 pm_runtime_mark_last_busy(mmc_dev(mmc));
1287 pm_runtime_put_autosuspend(mmc_dev(mmc));
1290 static int mmci_get_ro(struct mmc_host *mmc)
1292 struct mmci_host *host = mmc_priv(mmc);
1294 if (host->gpio_wp == -ENOSYS)
1297 return gpio_get_value_cansleep(host->gpio_wp);
1300 static int mmci_get_cd(struct mmc_host *mmc)
1302 struct mmci_host *host = mmc_priv(mmc);
1303 struct mmci_platform_data *plat = host->plat;
1304 unsigned int status;
1306 if (host->gpio_cd == -ENOSYS) {
1308 return 1; /* Assume always present */
1310 status = plat->status(mmc_dev(host->mmc));
1312 status = !!gpio_get_value_cansleep(host->gpio_cd)
1316 * Use positive logic throughout - status is zero for no card,
1317 * non-zero for card inserted.
1322 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1326 if (!IS_ERR(mmc->supply.vqmmc)) {
1328 pm_runtime_get_sync(mmc_dev(mmc));
1330 switch (ios->signal_voltage) {
1331 case MMC_SIGNAL_VOLTAGE_330:
1332 ret = regulator_set_voltage(mmc->supply.vqmmc,
1335 case MMC_SIGNAL_VOLTAGE_180:
1336 ret = regulator_set_voltage(mmc->supply.vqmmc,
1339 case MMC_SIGNAL_VOLTAGE_120:
1340 ret = regulator_set_voltage(mmc->supply.vqmmc,
1346 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1348 pm_runtime_mark_last_busy(mmc_dev(mmc));
1349 pm_runtime_put_autosuspend(mmc_dev(mmc));
1355 static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
1357 struct mmci_host *host = dev_id;
1359 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1364 static struct mmc_host_ops mmci_ops = {
1365 .request = mmci_request,
1366 .pre_req = mmci_pre_request,
1367 .post_req = mmci_post_request,
1368 .set_ios = mmci_set_ios,
1369 .get_ro = mmci_get_ro,
1370 .get_cd = mmci_get_cd,
1371 .start_signal_voltage_switch = mmci_sig_volt_switch,
1375 static void mmci_dt_populate_generic_pdata(struct device_node *np,
1376 struct mmci_platform_data *pdata)
1380 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1381 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1383 if (of_get_property(np, "cd-inverted", NULL))
1384 pdata->cd_invert = true;
1386 pdata->cd_invert = false;
1388 of_property_read_u32(np, "max-frequency", &pdata->f_max);
1390 pr_warn("%s has no 'max-frequency' property\n", np->full_name);
1392 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1393 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
1394 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1395 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
1397 of_property_read_u32(np, "bus-width", &bus_width);
1398 switch (bus_width) {
1400 /* No bus-width supplied. */
1403 pdata->capabilities |= MMC_CAP_4_BIT_DATA;
1406 pdata->capabilities |= MMC_CAP_8_BIT_DATA;
1409 pr_warn("%s: Unsupported bus width\n", np->full_name);
1413 static void mmci_dt_populate_generic_pdata(struct device_node *np,
1414 struct mmci_platform_data *pdata)
1420 static int mmci_probe(struct amba_device *dev,
1421 const struct amba_id *id)
1423 struct mmci_platform_data *plat = dev->dev.platform_data;
1424 struct device_node *np = dev->dev.of_node;
1425 struct variant_data *variant = id->data;
1426 struct mmci_host *host;
1427 struct mmc_host *mmc;
1430 /* Must have platform data or Device Tree. */
1432 dev_err(&dev->dev, "No plat data or DT found\n");
1437 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1443 mmci_dt_populate_generic_pdata(np, plat);
1445 ret = amba_request_regions(dev, DRIVER_NAME);
1449 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1455 host = mmc_priv(mmc);
1458 host->gpio_wp = -ENOSYS;
1459 host->gpio_cd = -ENOSYS;
1460 host->gpio_cd_irq = -1;
1462 host->hw_designer = amba_manf(dev);
1463 host->hw_revision = amba_rev(dev);
1464 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1465 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1467 host->clk = devm_clk_get(&dev->dev, NULL);
1468 if (IS_ERR(host->clk)) {
1469 ret = PTR_ERR(host->clk);
1473 ret = clk_prepare_enable(host->clk);
1478 host->variant = variant;
1479 host->mclk = clk_get_rate(host->clk);
1481 * According to the spec, mclk is max 100 MHz,
1482 * so we try to adjust the clock down to this,
1485 if (host->mclk > 100000000) {
1486 ret = clk_set_rate(host->clk, 100000000);
1489 host->mclk = clk_get_rate(host->clk);
1490 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1493 host->phybase = dev->res.start;
1494 host->base = ioremap(dev->res.start, resource_size(&dev->res));
1500 if (variant->busy_detect) {
1501 mmci_ops.card_busy = mmci_card_busy;
1502 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
1505 mmc->ops = &mmci_ops;
1507 * The ARM and ST versions of the block have slightly different
1508 * clock divider equations which means that the minimum divider
1511 if (variant->st_clkdiv)
1512 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1514 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1516 * If the platform data supplies a maximum operating
1517 * frequency, this takes precedence. Else, we fall back
1518 * to using the module parameter, which has a (low)
1519 * default value in case it is not specified. Either
1520 * value must not exceed the clock rate into the block,
1524 mmc->f_max = min(host->mclk, plat->f_max);
1526 mmc->f_max = min(host->mclk, fmax);
1527 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1529 /* Get regulators and the supported OCR mask */
1530 mmc_regulator_get_supply(mmc);
1531 if (!mmc->ocr_avail)
1532 mmc->ocr_avail = plat->ocr_mask;
1533 else if (plat->ocr_mask)
1534 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1536 mmc->caps = plat->capabilities;
1537 mmc->caps2 = plat->capabilities2;
1539 /* We support these PM capabilities. */
1540 mmc->pm_caps = MMC_PM_KEEP_POWER;
1545 mmc->max_segs = NR_SG;
1548 * Since only a certain number of bits are valid in the data length
1549 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1552 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1555 * Set the maximum segment size. Since we aren't doing DMA
1556 * (yet) we are only limited by the data length register.
1558 mmc->max_seg_size = mmc->max_req_size;
1561 * Block size can be up to 2048 bytes, but must be a power of two.
1563 mmc->max_blk_size = 1 << 11;
1566 * Limit the number of blocks transferred so that we don't overflow
1567 * the maximum request size.
1569 mmc->max_blk_count = mmc->max_req_size >> 11;
1571 spin_lock_init(&host->lock);
1573 writel(0, host->base + MMCIMASK0);
1574 writel(0, host->base + MMCIMASK1);
1575 writel(0xfff, host->base + MMCICLEAR);
1577 if (plat->gpio_cd == -EPROBE_DEFER) {
1578 ret = -EPROBE_DEFER;
1581 if (gpio_is_valid(plat->gpio_cd)) {
1582 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1584 ret = gpio_direction_input(plat->gpio_cd);
1586 host->gpio_cd = plat->gpio_cd;
1587 else if (ret != -ENOSYS)
1591 * A gpio pin that will detect cards when inserted and removed
1592 * will most likely want to trigger on the edges if it is
1593 * 0 when ejected and 1 when inserted (or mutatis mutandis
1594 * for the inverted case) so we request triggers on both
1597 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1599 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1600 DRIVER_NAME " (cd)", host);
1602 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1604 if (plat->gpio_wp == -EPROBE_DEFER) {
1605 ret = -EPROBE_DEFER;
1608 if (gpio_is_valid(plat->gpio_wp)) {
1609 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1611 ret = gpio_direction_input(plat->gpio_wp);
1613 host->gpio_wp = plat->gpio_wp;
1614 else if (ret != -ENOSYS)
1618 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1619 && host->gpio_cd_irq < 0)
1620 mmc->caps |= MMC_CAP_NEEDS_POLL;
1622 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1627 host->singleirq = true;
1629 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1630 DRIVER_NAME " (pio)", host);
1635 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1637 amba_set_drvdata(dev, mmc);
1639 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1640 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1641 amba_rev(dev), (unsigned long long)dev->res.start,
1642 dev->irq[0], dev->irq[1]);
1644 mmci_dma_setup(host);
1646 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1647 pm_runtime_use_autosuspend(&dev->dev);
1648 pm_runtime_put(&dev->dev);
1655 free_irq(dev->irq[0], host);
1657 if (host->gpio_wp != -ENOSYS)
1658 gpio_free(host->gpio_wp);
1660 if (host->gpio_cd_irq >= 0)
1661 free_irq(host->gpio_cd_irq, host);
1662 if (host->gpio_cd != -ENOSYS)
1663 gpio_free(host->gpio_cd);
1665 iounmap(host->base);
1667 clk_disable_unprepare(host->clk);
1671 amba_release_regions(dev);
1676 static int mmci_remove(struct amba_device *dev)
1678 struct mmc_host *mmc = amba_get_drvdata(dev);
1680 amba_set_drvdata(dev, NULL);
1683 struct mmci_host *host = mmc_priv(mmc);
1686 * Undo pm_runtime_put() in probe. We use the _sync
1687 * version here so that we can access the primecell.
1689 pm_runtime_get_sync(&dev->dev);
1691 mmc_remove_host(mmc);
1693 writel(0, host->base + MMCIMASK0);
1694 writel(0, host->base + MMCIMASK1);
1696 writel(0, host->base + MMCICOMMAND);
1697 writel(0, host->base + MMCIDATACTRL);
1699 mmci_dma_release(host);
1700 free_irq(dev->irq[0], host);
1701 if (!host->singleirq)
1702 free_irq(dev->irq[1], host);
1704 if (host->gpio_wp != -ENOSYS)
1705 gpio_free(host->gpio_wp);
1706 if (host->gpio_cd_irq >= 0)
1707 free_irq(host->gpio_cd_irq, host);
1708 if (host->gpio_cd != -ENOSYS)
1709 gpio_free(host->gpio_cd);
1711 iounmap(host->base);
1712 clk_disable_unprepare(host->clk);
1716 amba_release_regions(dev);
1722 #ifdef CONFIG_SUSPEND
1723 static int mmci_suspend(struct device *dev)
1725 struct amba_device *adev = to_amba_device(dev);
1726 struct mmc_host *mmc = amba_get_drvdata(adev);
1730 struct mmci_host *host = mmc_priv(mmc);
1732 ret = mmc_suspend_host(mmc);
1734 pm_runtime_get_sync(dev);
1735 writel(0, host->base + MMCIMASK0);
1742 static int mmci_resume(struct device *dev)
1744 struct amba_device *adev = to_amba_device(dev);
1745 struct mmc_host *mmc = amba_get_drvdata(adev);
1749 struct mmci_host *host = mmc_priv(mmc);
1751 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1752 pm_runtime_put(dev);
1754 ret = mmc_resume_host(mmc);
1761 #ifdef CONFIG_PM_RUNTIME
1762 static int mmci_runtime_suspend(struct device *dev)
1764 struct amba_device *adev = to_amba_device(dev);
1765 struct mmc_host *mmc = amba_get_drvdata(adev);
1768 struct mmci_host *host = mmc_priv(mmc);
1769 pinctrl_pm_select_sleep_state(dev);
1770 clk_disable_unprepare(host->clk);
1776 static int mmci_runtime_resume(struct device *dev)
1778 struct amba_device *adev = to_amba_device(dev);
1779 struct mmc_host *mmc = amba_get_drvdata(adev);
1782 struct mmci_host *host = mmc_priv(mmc);
1783 clk_prepare_enable(host->clk);
1784 pinctrl_pm_select_default_state(dev);
1791 static const struct dev_pm_ops mmci_dev_pm_ops = {
1792 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
1793 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1796 static struct amba_id mmci_ids[] = {
1800 .data = &variant_arm,
1805 .data = &variant_arm_extended_fifo,
1810 .data = &variant_arm_extended_fifo_hwfc,
1815 .data = &variant_arm,
1817 /* ST Micro variants */
1821 .data = &variant_u300,
1826 .data = &variant_nomadik,
1831 .data = &variant_u300,
1836 .data = &variant_ux500,
1841 .data = &variant_ux500v2,
1846 MODULE_DEVICE_TABLE(amba, mmci_ids);
1848 static struct amba_driver mmci_driver = {
1850 .name = DRIVER_NAME,
1851 .pm = &mmci_dev_pm_ops,
1853 .probe = mmci_probe,
1854 .remove = mmci_remove,
1855 .id_table = mmci_ids,
1858 module_amba_driver(mmci_driver);
1860 module_param(fmax, uint, 0444);
1862 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1863 MODULE_LICENSE("GPL");