Merge branch 'for-3.11-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-drm-fsl-dcu.git] / drivers / mmc / host / omap.c
1 /*
2  *  linux/drivers/mmc/host/omap.c
3  *
4  *  Copyright (C) 2004 Nokia Corporation
5  *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7  *  Other hacks (DMA, SD, etc) by David Brownell
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/timer.h>
25 #include <linux/omap-dma.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/card.h>
28 #include <linux/clk.h>
29 #include <linux/scatterlist.h>
30 #include <linux/slab.h>
31 #include <linux/platform_data/mmc-omap.h>
32
33
34 #define OMAP_MMC_REG_CMD        0x00
35 #define OMAP_MMC_REG_ARGL       0x01
36 #define OMAP_MMC_REG_ARGH       0x02
37 #define OMAP_MMC_REG_CON        0x03
38 #define OMAP_MMC_REG_STAT       0x04
39 #define OMAP_MMC_REG_IE         0x05
40 #define OMAP_MMC_REG_CTO        0x06
41 #define OMAP_MMC_REG_DTO        0x07
42 #define OMAP_MMC_REG_DATA       0x08
43 #define OMAP_MMC_REG_BLEN       0x09
44 #define OMAP_MMC_REG_NBLK       0x0a
45 #define OMAP_MMC_REG_BUF        0x0b
46 #define OMAP_MMC_REG_SDIO       0x0d
47 #define OMAP_MMC_REG_REV        0x0f
48 #define OMAP_MMC_REG_RSP0       0x10
49 #define OMAP_MMC_REG_RSP1       0x11
50 #define OMAP_MMC_REG_RSP2       0x12
51 #define OMAP_MMC_REG_RSP3       0x13
52 #define OMAP_MMC_REG_RSP4       0x14
53 #define OMAP_MMC_REG_RSP5       0x15
54 #define OMAP_MMC_REG_RSP6       0x16
55 #define OMAP_MMC_REG_RSP7       0x17
56 #define OMAP_MMC_REG_IOSR       0x18
57 #define OMAP_MMC_REG_SYSC       0x19
58 #define OMAP_MMC_REG_SYSS       0x1a
59
60 #define OMAP_MMC_STAT_CARD_ERR          (1 << 14)
61 #define OMAP_MMC_STAT_CARD_IRQ          (1 << 13)
62 #define OMAP_MMC_STAT_OCR_BUSY          (1 << 12)
63 #define OMAP_MMC_STAT_A_EMPTY           (1 << 11)
64 #define OMAP_MMC_STAT_A_FULL            (1 << 10)
65 #define OMAP_MMC_STAT_CMD_CRC           (1 <<  8)
66 #define OMAP_MMC_STAT_CMD_TOUT          (1 <<  7)
67 #define OMAP_MMC_STAT_DATA_CRC          (1 <<  6)
68 #define OMAP_MMC_STAT_DATA_TOUT         (1 <<  5)
69 #define OMAP_MMC_STAT_END_BUSY          (1 <<  4)
70 #define OMAP_MMC_STAT_END_OF_DATA       (1 <<  3)
71 #define OMAP_MMC_STAT_CARD_BUSY         (1 <<  2)
72 #define OMAP_MMC_STAT_END_OF_CMD        (1 <<  0)
73
74 #define mmc_omap7xx()   (host->features & MMC_OMAP7XX)
75 #define mmc_omap15xx()  (host->features & MMC_OMAP15XX)
76 #define mmc_omap16xx()  (host->features & MMC_OMAP16XX)
77 #define MMC_OMAP1_MASK  (MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX)
78 #define mmc_omap1()     (host->features & MMC_OMAP1_MASK)
79 #define mmc_omap2()     (!mmc_omap1())
80
81 #define OMAP_MMC_REG(host, reg)         (OMAP_MMC_REG_##reg << (host)->reg_shift)
82 #define OMAP_MMC_READ(host, reg)        __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
83 #define OMAP_MMC_WRITE(host, reg, val)  __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
84
85 /*
86  * Command types
87  */
88 #define OMAP_MMC_CMDTYPE_BC     0
89 #define OMAP_MMC_CMDTYPE_BCR    1
90 #define OMAP_MMC_CMDTYPE_AC     2
91 #define OMAP_MMC_CMDTYPE_ADTC   3
92
93 #define OMAP_DMA_MMC_TX         21
94 #define OMAP_DMA_MMC_RX         22
95 #define OMAP_DMA_MMC2_TX        54
96 #define OMAP_DMA_MMC2_RX        55
97
98 #define OMAP24XX_DMA_MMC2_TX    47
99 #define OMAP24XX_DMA_MMC2_RX    48
100 #define OMAP24XX_DMA_MMC1_TX    61
101 #define OMAP24XX_DMA_MMC1_RX    62
102
103
104 #define DRIVER_NAME "mmci-omap"
105
106 /* Specifies how often in millisecs to poll for card status changes
107  * when the cover switch is open */
108 #define OMAP_MMC_COVER_POLL_DELAY       500
109
110 struct mmc_omap_host;
111
112 struct mmc_omap_slot {
113         int                     id;
114         unsigned int            vdd;
115         u16                     saved_con;
116         u16                     bus_mode;
117         unsigned int            fclk_freq;
118
119         struct tasklet_struct   cover_tasklet;
120         struct timer_list       cover_timer;
121         unsigned                cover_open;
122
123         struct mmc_request      *mrq;
124         struct mmc_omap_host    *host;
125         struct mmc_host         *mmc;
126         struct omap_mmc_slot_data *pdata;
127 };
128
129 struct mmc_omap_host {
130         int                     initialized;
131         int                     suspended;
132         struct mmc_request *    mrq;
133         struct mmc_command *    cmd;
134         struct mmc_data *       data;
135         struct mmc_host *       mmc;
136         struct device *         dev;
137         unsigned char           id; /* 16xx chips have 2 MMC blocks */
138         struct clk *            iclk;
139         struct clk *            fclk;
140         struct dma_chan         *dma_rx;
141         u32                     dma_rx_burst;
142         struct dma_chan         *dma_tx;
143         u32                     dma_tx_burst;
144         struct resource         *mem_res;
145         void __iomem            *virt_base;
146         unsigned int            phys_base;
147         int                     irq;
148         unsigned char           bus_mode;
149         unsigned int            reg_shift;
150
151         struct work_struct      cmd_abort_work;
152         unsigned                abort:1;
153         struct timer_list       cmd_abort_timer;
154
155         struct work_struct      slot_release_work;
156         struct mmc_omap_slot    *next_slot;
157         struct work_struct      send_stop_work;
158         struct mmc_data         *stop_data;
159
160         unsigned int            sg_len;
161         int                     sg_idx;
162         u16 *                   buffer;
163         u32                     buffer_bytes_left;
164         u32                     total_bytes_left;
165
166         unsigned                features;
167         unsigned                use_dma:1;
168         unsigned                brs_received:1, dma_done:1;
169         unsigned                dma_in_use:1;
170         spinlock_t              dma_lock;
171
172         struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
173         struct mmc_omap_slot    *current_slot;
174         spinlock_t              slot_lock;
175         wait_queue_head_t       slot_wq;
176         int                     nr_slots;
177
178         struct timer_list       clk_timer;
179         spinlock_t              clk_lock;     /* for changing enabled state */
180         unsigned int            fclk_enabled:1;
181         struct workqueue_struct *mmc_omap_wq;
182
183         struct omap_mmc_platform_data *pdata;
184 };
185
186
187 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
188 {
189         unsigned long tick_ns;
190
191         if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
192                 tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
193                 ndelay(8 * tick_ns);
194         }
195 }
196
197 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
198 {
199         unsigned long flags;
200
201         spin_lock_irqsave(&host->clk_lock, flags);
202         if (host->fclk_enabled != enable) {
203                 host->fclk_enabled = enable;
204                 if (enable)
205                         clk_enable(host->fclk);
206                 else
207                         clk_disable(host->fclk);
208         }
209         spin_unlock_irqrestore(&host->clk_lock, flags);
210 }
211
212 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
213 {
214         struct mmc_omap_host *host = slot->host;
215         unsigned long flags;
216
217         if (claimed)
218                 goto no_claim;
219         spin_lock_irqsave(&host->slot_lock, flags);
220         while (host->mmc != NULL) {
221                 spin_unlock_irqrestore(&host->slot_lock, flags);
222                 wait_event(host->slot_wq, host->mmc == NULL);
223                 spin_lock_irqsave(&host->slot_lock, flags);
224         }
225         host->mmc = slot->mmc;
226         spin_unlock_irqrestore(&host->slot_lock, flags);
227 no_claim:
228         del_timer(&host->clk_timer);
229         if (host->current_slot != slot || !claimed)
230                 mmc_omap_fclk_offdelay(host->current_slot);
231
232         if (host->current_slot != slot) {
233                 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
234                 if (host->pdata->switch_slot != NULL)
235                         host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
236                 host->current_slot = slot;
237         }
238
239         if (claimed) {
240                 mmc_omap_fclk_enable(host, 1);
241
242                 /* Doing the dummy read here seems to work around some bug
243                  * at least in OMAP24xx silicon where the command would not
244                  * start after writing the CMD register. Sigh. */
245                 OMAP_MMC_READ(host, CON);
246
247                 OMAP_MMC_WRITE(host, CON, slot->saved_con);
248         } else
249                 mmc_omap_fclk_enable(host, 0);
250 }
251
252 static void mmc_omap_start_request(struct mmc_omap_host *host,
253                                    struct mmc_request *req);
254
255 static void mmc_omap_slot_release_work(struct work_struct *work)
256 {
257         struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
258                                                   slot_release_work);
259         struct mmc_omap_slot *next_slot = host->next_slot;
260         struct mmc_request *rq;
261
262         host->next_slot = NULL;
263         mmc_omap_select_slot(next_slot, 1);
264
265         rq = next_slot->mrq;
266         next_slot->mrq = NULL;
267         mmc_omap_start_request(host, rq);
268 }
269
270 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
271 {
272         struct mmc_omap_host *host = slot->host;
273         unsigned long flags;
274         int i;
275
276         BUG_ON(slot == NULL || host->mmc == NULL);
277
278         if (clk_enabled)
279                 /* Keeps clock running for at least 8 cycles on valid freq */
280                 mod_timer(&host->clk_timer, jiffies  + HZ/10);
281         else {
282                 del_timer(&host->clk_timer);
283                 mmc_omap_fclk_offdelay(slot);
284                 mmc_omap_fclk_enable(host, 0);
285         }
286
287         spin_lock_irqsave(&host->slot_lock, flags);
288         /* Check for any pending requests */
289         for (i = 0; i < host->nr_slots; i++) {
290                 struct mmc_omap_slot *new_slot;
291
292                 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
293                         continue;
294
295                 BUG_ON(host->next_slot != NULL);
296                 new_slot = host->slots[i];
297                 /* The current slot should not have a request in queue */
298                 BUG_ON(new_slot == host->current_slot);
299
300                 host->next_slot = new_slot;
301                 host->mmc = new_slot->mmc;
302                 spin_unlock_irqrestore(&host->slot_lock, flags);
303                 queue_work(host->mmc_omap_wq, &host->slot_release_work);
304                 return;
305         }
306
307         host->mmc = NULL;
308         wake_up(&host->slot_wq);
309         spin_unlock_irqrestore(&host->slot_lock, flags);
310 }
311
312 static inline
313 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
314 {
315         if (slot->pdata->get_cover_state)
316                 return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
317                                                     slot->id);
318         return 0;
319 }
320
321 static ssize_t
322 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
323                            char *buf)
324 {
325         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
326         struct mmc_omap_slot *slot = mmc_priv(mmc);
327
328         return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
329                        "closed");
330 }
331
332 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
333
334 static ssize_t
335 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
336                         char *buf)
337 {
338         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
339         struct mmc_omap_slot *slot = mmc_priv(mmc);
340
341         return sprintf(buf, "%s\n", slot->pdata->name);
342 }
343
344 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
345
346 static void
347 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
348 {
349         u32 cmdreg;
350         u32 resptype;
351         u32 cmdtype;
352
353         host->cmd = cmd;
354
355         resptype = 0;
356         cmdtype = 0;
357
358         /* Our hardware needs to know exact type */
359         switch (mmc_resp_type(cmd)) {
360         case MMC_RSP_NONE:
361                 break;
362         case MMC_RSP_R1:
363         case MMC_RSP_R1B:
364                 /* resp 1, 1b, 6, 7 */
365                 resptype = 1;
366                 break;
367         case MMC_RSP_R2:
368                 resptype = 2;
369                 break;
370         case MMC_RSP_R3:
371                 resptype = 3;
372                 break;
373         default:
374                 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
375                 break;
376         }
377
378         if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
379                 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
380         } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
381                 cmdtype = OMAP_MMC_CMDTYPE_BC;
382         } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
383                 cmdtype = OMAP_MMC_CMDTYPE_BCR;
384         } else {
385                 cmdtype = OMAP_MMC_CMDTYPE_AC;
386         }
387
388         cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
389
390         if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
391                 cmdreg |= 1 << 6;
392
393         if (cmd->flags & MMC_RSP_BUSY)
394                 cmdreg |= 1 << 11;
395
396         if (host->data && !(host->data->flags & MMC_DATA_WRITE))
397                 cmdreg |= 1 << 15;
398
399         mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
400
401         OMAP_MMC_WRITE(host, CTO, 200);
402         OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
403         OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
404         OMAP_MMC_WRITE(host, IE,
405                        OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
406                        OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
407                        OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
408                        OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
409                        OMAP_MMC_STAT_END_OF_DATA);
410         OMAP_MMC_WRITE(host, CMD, cmdreg);
411 }
412
413 static void
414 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
415                      int abort)
416 {
417         enum dma_data_direction dma_data_dir;
418         struct device *dev = mmc_dev(host->mmc);
419         struct dma_chan *c;
420
421         if (data->flags & MMC_DATA_WRITE) {
422                 dma_data_dir = DMA_TO_DEVICE;
423                 c = host->dma_tx;
424         } else {
425                 dma_data_dir = DMA_FROM_DEVICE;
426                 c = host->dma_rx;
427         }
428         if (c) {
429                 if (data->error) {
430                         dmaengine_terminate_all(c);
431                         /* Claim nothing transferred on error... */
432                         data->bytes_xfered = 0;
433                 }
434                 dev = c->device->dev;
435         }
436         dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
437 }
438
439 static void mmc_omap_send_stop_work(struct work_struct *work)
440 {
441         struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
442                                                   send_stop_work);
443         struct mmc_omap_slot *slot = host->current_slot;
444         struct mmc_data *data = host->stop_data;
445         unsigned long tick_ns;
446
447         tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
448         ndelay(8*tick_ns);
449
450         mmc_omap_start_command(host, data->stop);
451 }
452
453 static void
454 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
455 {
456         if (host->dma_in_use)
457                 mmc_omap_release_dma(host, data, data->error);
458
459         host->data = NULL;
460         host->sg_len = 0;
461
462         /* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
463          * dozens of requests until the card finishes writing data.
464          * It'd be cheaper to just wait till an EOFB interrupt arrives...
465          */
466
467         if (!data->stop) {
468                 struct mmc_host *mmc;
469
470                 host->mrq = NULL;
471                 mmc = host->mmc;
472                 mmc_omap_release_slot(host->current_slot, 1);
473                 mmc_request_done(mmc, data->mrq);
474                 return;
475         }
476
477         host->stop_data = data;
478         queue_work(host->mmc_omap_wq, &host->send_stop_work);
479 }
480
481 static void
482 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
483 {
484         struct mmc_omap_slot *slot = host->current_slot;
485         unsigned int restarts, passes, timeout;
486         u16 stat = 0;
487
488         /* Sending abort takes 80 clocks. Have some extra and round up */
489         timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
490         restarts = 0;
491         while (restarts < maxloops) {
492                 OMAP_MMC_WRITE(host, STAT, 0xFFFF);
493                 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
494
495                 passes = 0;
496                 while (passes < timeout) {
497                         stat = OMAP_MMC_READ(host, STAT);
498                         if (stat & OMAP_MMC_STAT_END_OF_CMD)
499                                 goto out;
500                         udelay(1);
501                         passes++;
502                 }
503
504                 restarts++;
505         }
506 out:
507         OMAP_MMC_WRITE(host, STAT, stat);
508 }
509
510 static void
511 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
512 {
513         if (host->dma_in_use)
514                 mmc_omap_release_dma(host, data, 1);
515
516         host->data = NULL;
517         host->sg_len = 0;
518
519         mmc_omap_send_abort(host, 10000);
520 }
521
522 static void
523 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
524 {
525         unsigned long flags;
526         int done;
527
528         if (!host->dma_in_use) {
529                 mmc_omap_xfer_done(host, data);
530                 return;
531         }
532         done = 0;
533         spin_lock_irqsave(&host->dma_lock, flags);
534         if (host->dma_done)
535                 done = 1;
536         else
537                 host->brs_received = 1;
538         spin_unlock_irqrestore(&host->dma_lock, flags);
539         if (done)
540                 mmc_omap_xfer_done(host, data);
541 }
542
543 static void
544 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
545 {
546         unsigned long flags;
547         int done;
548
549         done = 0;
550         spin_lock_irqsave(&host->dma_lock, flags);
551         if (host->brs_received)
552                 done = 1;
553         else
554                 host->dma_done = 1;
555         spin_unlock_irqrestore(&host->dma_lock, flags);
556         if (done)
557                 mmc_omap_xfer_done(host, data);
558 }
559
560 static void
561 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
562 {
563         host->cmd = NULL;
564
565         del_timer(&host->cmd_abort_timer);
566
567         if (cmd->flags & MMC_RSP_PRESENT) {
568                 if (cmd->flags & MMC_RSP_136) {
569                         /* response type 2 */
570                         cmd->resp[3] =
571                                 OMAP_MMC_READ(host, RSP0) |
572                                 (OMAP_MMC_READ(host, RSP1) << 16);
573                         cmd->resp[2] =
574                                 OMAP_MMC_READ(host, RSP2) |
575                                 (OMAP_MMC_READ(host, RSP3) << 16);
576                         cmd->resp[1] =
577                                 OMAP_MMC_READ(host, RSP4) |
578                                 (OMAP_MMC_READ(host, RSP5) << 16);
579                         cmd->resp[0] =
580                                 OMAP_MMC_READ(host, RSP6) |
581                                 (OMAP_MMC_READ(host, RSP7) << 16);
582                 } else {
583                         /* response types 1, 1b, 3, 4, 5, 6 */
584                         cmd->resp[0] =
585                                 OMAP_MMC_READ(host, RSP6) |
586                                 (OMAP_MMC_READ(host, RSP7) << 16);
587                 }
588         }
589
590         if (host->data == NULL || cmd->error) {
591                 struct mmc_host *mmc;
592
593                 if (host->data != NULL)
594                         mmc_omap_abort_xfer(host, host->data);
595                 host->mrq = NULL;
596                 mmc = host->mmc;
597                 mmc_omap_release_slot(host->current_slot, 1);
598                 mmc_request_done(mmc, cmd->mrq);
599         }
600 }
601
602 /*
603  * Abort stuck command. Can occur when card is removed while it is being
604  * read.
605  */
606 static void mmc_omap_abort_command(struct work_struct *work)
607 {
608         struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
609                                                   cmd_abort_work);
610         BUG_ON(!host->cmd);
611
612         dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
613                 host->cmd->opcode);
614
615         if (host->cmd->error == 0)
616                 host->cmd->error = -ETIMEDOUT;
617
618         if (host->data == NULL) {
619                 struct mmc_command *cmd;
620                 struct mmc_host    *mmc;
621
622                 cmd = host->cmd;
623                 host->cmd = NULL;
624                 mmc_omap_send_abort(host, 10000);
625
626                 host->mrq = NULL;
627                 mmc = host->mmc;
628                 mmc_omap_release_slot(host->current_slot, 1);
629                 mmc_request_done(mmc, cmd->mrq);
630         } else
631                 mmc_omap_cmd_done(host, host->cmd);
632
633         host->abort = 0;
634         enable_irq(host->irq);
635 }
636
637 static void
638 mmc_omap_cmd_timer(unsigned long data)
639 {
640         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
641         unsigned long flags;
642
643         spin_lock_irqsave(&host->slot_lock, flags);
644         if (host->cmd != NULL && !host->abort) {
645                 OMAP_MMC_WRITE(host, IE, 0);
646                 disable_irq(host->irq);
647                 host->abort = 1;
648                 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
649         }
650         spin_unlock_irqrestore(&host->slot_lock, flags);
651 }
652
653 /* PIO only */
654 static void
655 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
656 {
657         struct scatterlist *sg;
658
659         sg = host->data->sg + host->sg_idx;
660         host->buffer_bytes_left = sg->length;
661         host->buffer = sg_virt(sg);
662         if (host->buffer_bytes_left > host->total_bytes_left)
663                 host->buffer_bytes_left = host->total_bytes_left;
664 }
665
666 static void
667 mmc_omap_clk_timer(unsigned long data)
668 {
669         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
670
671         mmc_omap_fclk_enable(host, 0);
672 }
673
674 /* PIO only */
675 static void
676 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
677 {
678         int n, nwords;
679
680         if (host->buffer_bytes_left == 0) {
681                 host->sg_idx++;
682                 BUG_ON(host->sg_idx == host->sg_len);
683                 mmc_omap_sg_to_buf(host);
684         }
685         n = 64;
686         if (n > host->buffer_bytes_left)
687                 n = host->buffer_bytes_left;
688
689         nwords = n / 2;
690         nwords += n & 1; /* handle odd number of bytes to transfer */
691
692         host->buffer_bytes_left -= n;
693         host->total_bytes_left -= n;
694         host->data->bytes_xfered += n;
695
696         if (write) {
697                 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
698                               host->buffer, nwords);
699         } else {
700                 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
701                              host->buffer, nwords);
702         }
703
704         host->buffer += nwords;
705 }
706
707 #ifdef CONFIG_MMC_DEBUG
708 static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
709 {
710         static const char *mmc_omap_status_bits[] = {
711                 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
712                 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
713         };
714         int i;
715         char res[64], *buf = res;
716
717         buf += sprintf(buf, "MMC IRQ 0x%x:", status);
718
719         for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
720                 if (status & (1 << i))
721                         buf += sprintf(buf, " %s", mmc_omap_status_bits[i]);
722         dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
723 }
724 #else
725 static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
726 {
727 }
728 #endif
729
730
731 static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
732 {
733         struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
734         u16 status;
735         int end_command;
736         int end_transfer;
737         int transfer_error, cmd_error;
738
739         if (host->cmd == NULL && host->data == NULL) {
740                 status = OMAP_MMC_READ(host, STAT);
741                 dev_info(mmc_dev(host->slots[0]->mmc),
742                          "Spurious IRQ 0x%04x\n", status);
743                 if (status != 0) {
744                         OMAP_MMC_WRITE(host, STAT, status);
745                         OMAP_MMC_WRITE(host, IE, 0);
746                 }
747                 return IRQ_HANDLED;
748         }
749
750         end_command = 0;
751         end_transfer = 0;
752         transfer_error = 0;
753         cmd_error = 0;
754
755         while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
756                 int cmd;
757
758                 OMAP_MMC_WRITE(host, STAT, status);
759                 if (host->cmd != NULL)
760                         cmd = host->cmd->opcode;
761                 else
762                         cmd = -1;
763                 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
764                         status, cmd);
765                 mmc_omap_report_irq(host, status);
766
767                 if (host->total_bytes_left) {
768                         if ((status & OMAP_MMC_STAT_A_FULL) ||
769                             (status & OMAP_MMC_STAT_END_OF_DATA))
770                                 mmc_omap_xfer_data(host, 0);
771                         if (status & OMAP_MMC_STAT_A_EMPTY)
772                                 mmc_omap_xfer_data(host, 1);
773                 }
774
775                 if (status & OMAP_MMC_STAT_END_OF_DATA)
776                         end_transfer = 1;
777
778                 if (status & OMAP_MMC_STAT_DATA_TOUT) {
779                         dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
780                                 cmd);
781                         if (host->data) {
782                                 host->data->error = -ETIMEDOUT;
783                                 transfer_error = 1;
784                         }
785                 }
786
787                 if (status & OMAP_MMC_STAT_DATA_CRC) {
788                         if (host->data) {
789                                 host->data->error = -EILSEQ;
790                                 dev_dbg(mmc_dev(host->mmc),
791                                          "data CRC error, bytes left %d\n",
792                                         host->total_bytes_left);
793                                 transfer_error = 1;
794                         } else {
795                                 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
796                         }
797                 }
798
799                 if (status & OMAP_MMC_STAT_CMD_TOUT) {
800                         /* Timeouts are routine with some commands */
801                         if (host->cmd) {
802                                 struct mmc_omap_slot *slot =
803                                         host->current_slot;
804                                 if (slot == NULL ||
805                                     !mmc_omap_cover_is_open(slot))
806                                         dev_err(mmc_dev(host->mmc),
807                                                 "command timeout (CMD%d)\n",
808                                                 cmd);
809                                 host->cmd->error = -ETIMEDOUT;
810                                 end_command = 1;
811                                 cmd_error = 1;
812                         }
813                 }
814
815                 if (status & OMAP_MMC_STAT_CMD_CRC) {
816                         if (host->cmd) {
817                                 dev_err(mmc_dev(host->mmc),
818                                         "command CRC error (CMD%d, arg 0x%08x)\n",
819                                         cmd, host->cmd->arg);
820                                 host->cmd->error = -EILSEQ;
821                                 end_command = 1;
822                                 cmd_error = 1;
823                         } else
824                                 dev_err(mmc_dev(host->mmc),
825                                         "command CRC error without cmd?\n");
826                 }
827
828                 if (status & OMAP_MMC_STAT_CARD_ERR) {
829                         dev_dbg(mmc_dev(host->mmc),
830                                 "ignoring card status error (CMD%d)\n",
831                                 cmd);
832                         end_command = 1;
833                 }
834
835                 /*
836                  * NOTE: On 1610 the END_OF_CMD may come too early when
837                  * starting a write
838                  */
839                 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
840                     (!(status & OMAP_MMC_STAT_A_EMPTY))) {
841                         end_command = 1;
842                 }
843         }
844
845         if (cmd_error && host->data) {
846                 del_timer(&host->cmd_abort_timer);
847                 host->abort = 1;
848                 OMAP_MMC_WRITE(host, IE, 0);
849                 disable_irq_nosync(host->irq);
850                 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
851                 return IRQ_HANDLED;
852         }
853
854         if (end_command && host->cmd)
855                 mmc_omap_cmd_done(host, host->cmd);
856         if (host->data != NULL) {
857                 if (transfer_error)
858                         mmc_omap_xfer_done(host, host->data);
859                 else if (end_transfer)
860                         mmc_omap_end_of_data(host, host->data);
861         }
862
863         return IRQ_HANDLED;
864 }
865
866 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
867 {
868         int cover_open;
869         struct mmc_omap_host *host = dev_get_drvdata(dev);
870         struct mmc_omap_slot *slot = host->slots[num];
871
872         BUG_ON(num >= host->nr_slots);
873
874         /* Other subsystems can call in here before we're initialised. */
875         if (host->nr_slots == 0 || !host->slots[num])
876                 return;
877
878         cover_open = mmc_omap_cover_is_open(slot);
879         if (cover_open != slot->cover_open) {
880                 slot->cover_open = cover_open;
881                 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
882         }
883
884         tasklet_hi_schedule(&slot->cover_tasklet);
885 }
886
887 static void mmc_omap_cover_timer(unsigned long arg)
888 {
889         struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
890         tasklet_schedule(&slot->cover_tasklet);
891 }
892
893 static void mmc_omap_cover_handler(unsigned long param)
894 {
895         struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
896         int cover_open = mmc_omap_cover_is_open(slot);
897
898         mmc_detect_change(slot->mmc, 0);
899         if (!cover_open)
900                 return;
901
902         /*
903          * If no card is inserted, we postpone polling until
904          * the cover has been closed.
905          */
906         if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
907                 return;
908
909         mod_timer(&slot->cover_timer,
910                   jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
911 }
912
913 static void mmc_omap_dma_callback(void *priv)
914 {
915         struct mmc_omap_host *host = priv;
916         struct mmc_data *data = host->data;
917
918         /* If we got to the end of DMA, assume everything went well */
919         data->bytes_xfered += data->blocks * data->blksz;
920
921         mmc_omap_dma_done(host, data);
922 }
923
924 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
925 {
926         u16 reg;
927
928         reg = OMAP_MMC_READ(host, SDIO);
929         reg &= ~(1 << 5);
930         OMAP_MMC_WRITE(host, SDIO, reg);
931         /* Set maximum timeout */
932         OMAP_MMC_WRITE(host, CTO, 0xff);
933 }
934
935 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
936 {
937         unsigned int timeout, cycle_ns;
938         u16 reg;
939
940         cycle_ns = 1000000000 / host->current_slot->fclk_freq;
941         timeout = req->data->timeout_ns / cycle_ns;
942         timeout += req->data->timeout_clks;
943
944         /* Check if we need to use timeout multiplier register */
945         reg = OMAP_MMC_READ(host, SDIO);
946         if (timeout > 0xffff) {
947                 reg |= (1 << 5);
948                 timeout /= 1024;
949         } else
950                 reg &= ~(1 << 5);
951         OMAP_MMC_WRITE(host, SDIO, reg);
952         OMAP_MMC_WRITE(host, DTO, timeout);
953 }
954
955 static void
956 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
957 {
958         struct mmc_data *data = req->data;
959         int i, use_dma, block_size;
960         unsigned sg_len;
961
962         host->data = data;
963         if (data == NULL) {
964                 OMAP_MMC_WRITE(host, BLEN, 0);
965                 OMAP_MMC_WRITE(host, NBLK, 0);
966                 OMAP_MMC_WRITE(host, BUF, 0);
967                 host->dma_in_use = 0;
968                 set_cmd_timeout(host, req);
969                 return;
970         }
971
972         block_size = data->blksz;
973
974         OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
975         OMAP_MMC_WRITE(host, BLEN, block_size - 1);
976         set_data_timeout(host, req);
977
978         /* cope with calling layer confusion; it issues "single
979          * block" writes using multi-block scatterlists.
980          */
981         sg_len = (data->blocks == 1) ? 1 : data->sg_len;
982
983         /* Only do DMA for entire blocks */
984         use_dma = host->use_dma;
985         if (use_dma) {
986                 for (i = 0; i < sg_len; i++) {
987                         if ((data->sg[i].length % block_size) != 0) {
988                                 use_dma = 0;
989                                 break;
990                         }
991                 }
992         }
993
994         host->sg_idx = 0;
995         if (use_dma) {
996                 enum dma_data_direction dma_data_dir;
997                 struct dma_async_tx_descriptor *tx;
998                 struct dma_chan *c;
999                 u32 burst, *bp;
1000                 u16 buf;
1001
1002                 /*
1003                  * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
1004                  * and 24xx. Use 16 or 32 word frames when the
1005                  * blocksize is at least that large. Blocksize is
1006                  * usually 512 bytes; but not for some SD reads.
1007                  */
1008                 burst = mmc_omap15xx() ? 32 : 64;
1009                 if (burst > data->blksz)
1010                         burst = data->blksz;
1011
1012                 burst >>= 1;
1013
1014                 if (data->flags & MMC_DATA_WRITE) {
1015                         c = host->dma_tx;
1016                         bp = &host->dma_tx_burst;
1017                         buf = 0x0f80 | (burst - 1) << 0;
1018                         dma_data_dir = DMA_TO_DEVICE;
1019                 } else {
1020                         c = host->dma_rx;
1021                         bp = &host->dma_rx_burst;
1022                         buf = 0x800f | (burst - 1) << 8;
1023                         dma_data_dir = DMA_FROM_DEVICE;
1024                 }
1025
1026                 if (!c)
1027                         goto use_pio;
1028
1029                 /* Only reconfigure if we have a different burst size */
1030                 if (*bp != burst) {
1031                         struct dma_slave_config cfg;
1032
1033                         cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1034                         cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1035                         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1036                         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1037                         cfg.src_maxburst = burst;
1038                         cfg.dst_maxburst = burst;
1039
1040                         if (dmaengine_slave_config(c, &cfg))
1041                                 goto use_pio;
1042
1043                         *bp = burst;
1044                 }
1045
1046                 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
1047                                           dma_data_dir);
1048                 if (host->sg_len == 0)
1049                         goto use_pio;
1050
1051                 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
1052                         data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1053                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1054                 if (!tx)
1055                         goto use_pio;
1056
1057                 OMAP_MMC_WRITE(host, BUF, buf);
1058
1059                 tx->callback = mmc_omap_dma_callback;
1060                 tx->callback_param = host;
1061                 dmaengine_submit(tx);
1062                 host->brs_received = 0;
1063                 host->dma_done = 0;
1064                 host->dma_in_use = 1;
1065                 return;
1066         }
1067  use_pio:
1068
1069         /* Revert to PIO? */
1070         OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1071         host->total_bytes_left = data->blocks * block_size;
1072         host->sg_len = sg_len;
1073         mmc_omap_sg_to_buf(host);
1074         host->dma_in_use = 0;
1075 }
1076
1077 static void mmc_omap_start_request(struct mmc_omap_host *host,
1078                                    struct mmc_request *req)
1079 {
1080         BUG_ON(host->mrq != NULL);
1081
1082         host->mrq = req;
1083
1084         /* only touch fifo AFTER the controller readies it */
1085         mmc_omap_prepare_data(host, req);
1086         mmc_omap_start_command(host, req->cmd);
1087         if (host->dma_in_use) {
1088                 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
1089                                 host->dma_tx : host->dma_rx;
1090
1091                 dma_async_issue_pending(c);
1092         }
1093 }
1094
1095 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1096 {
1097         struct mmc_omap_slot *slot = mmc_priv(mmc);
1098         struct mmc_omap_host *host = slot->host;
1099         unsigned long flags;
1100
1101         spin_lock_irqsave(&host->slot_lock, flags);
1102         if (host->mmc != NULL) {
1103                 BUG_ON(slot->mrq != NULL);
1104                 slot->mrq = req;
1105                 spin_unlock_irqrestore(&host->slot_lock, flags);
1106                 return;
1107         } else
1108                 host->mmc = mmc;
1109         spin_unlock_irqrestore(&host->slot_lock, flags);
1110         mmc_omap_select_slot(slot, 1);
1111         mmc_omap_start_request(host, req);
1112 }
1113
1114 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1115                                 int vdd)
1116 {
1117         struct mmc_omap_host *host;
1118
1119         host = slot->host;
1120
1121         if (slot->pdata->set_power != NULL)
1122                 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1123                                         vdd);
1124         if (mmc_omap2()) {
1125                 u16 w;
1126
1127                 if (power_on) {
1128                         w = OMAP_MMC_READ(host, CON);
1129                         OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1130                 } else {
1131                         w = OMAP_MMC_READ(host, CON);
1132                         OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1133                 }
1134         }
1135 }
1136
1137 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
1138 {
1139         struct mmc_omap_slot *slot = mmc_priv(mmc);
1140         struct mmc_omap_host *host = slot->host;
1141         int func_clk_rate = clk_get_rate(host->fclk);
1142         int dsor;
1143
1144         if (ios->clock == 0)
1145                 return 0;
1146
1147         dsor = func_clk_rate / ios->clock;
1148         if (dsor < 1)
1149                 dsor = 1;
1150
1151         if (func_clk_rate / dsor > ios->clock)
1152                 dsor++;
1153
1154         if (dsor > 250)
1155                 dsor = 250;
1156
1157         slot->fclk_freq = func_clk_rate / dsor;
1158
1159         if (ios->bus_width == MMC_BUS_WIDTH_4)
1160                 dsor |= 1 << 15;
1161
1162         return dsor;
1163 }
1164
1165 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1166 {
1167         struct mmc_omap_slot *slot = mmc_priv(mmc);
1168         struct mmc_omap_host *host = slot->host;
1169         int i, dsor;
1170         int clk_enabled;
1171
1172         mmc_omap_select_slot(slot, 0);
1173
1174         dsor = mmc_omap_calc_divisor(mmc, ios);
1175
1176         if (ios->vdd != slot->vdd)
1177                 slot->vdd = ios->vdd;
1178
1179         clk_enabled = 0;
1180         switch (ios->power_mode) {
1181         case MMC_POWER_OFF:
1182                 mmc_omap_set_power(slot, 0, ios->vdd);
1183                 break;
1184         case MMC_POWER_UP:
1185                 /* Cannot touch dsor yet, just power up MMC */
1186                 mmc_omap_set_power(slot, 1, ios->vdd);
1187                 goto exit;
1188         case MMC_POWER_ON:
1189                 mmc_omap_fclk_enable(host, 1);
1190                 clk_enabled = 1;
1191                 dsor |= 1 << 11;
1192                 break;
1193         }
1194
1195         if (slot->bus_mode != ios->bus_mode) {
1196                 if (slot->pdata->set_bus_mode != NULL)
1197                         slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1198                                                   ios->bus_mode);
1199                 slot->bus_mode = ios->bus_mode;
1200         }
1201
1202         /* On insanely high arm_per frequencies something sometimes
1203          * goes somehow out of sync, and the POW bit is not being set,
1204          * which results in the while loop below getting stuck.
1205          * Writing to the CON register twice seems to do the trick. */
1206         for (i = 0; i < 2; i++)
1207                 OMAP_MMC_WRITE(host, CON, dsor);
1208         slot->saved_con = dsor;
1209         if (ios->power_mode == MMC_POWER_ON) {
1210                 /* worst case at 400kHz, 80 cycles makes 200 microsecs */
1211                 int usecs = 250;
1212
1213                 /* Send clock cycles, poll completion */
1214                 OMAP_MMC_WRITE(host, IE, 0);
1215                 OMAP_MMC_WRITE(host, STAT, 0xffff);
1216                 OMAP_MMC_WRITE(host, CMD, 1 << 7);
1217                 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
1218                         udelay(1);
1219                         usecs--;
1220                 }
1221                 OMAP_MMC_WRITE(host, STAT, 1);
1222         }
1223
1224 exit:
1225         mmc_omap_release_slot(slot, clk_enabled);
1226 }
1227
1228 static const struct mmc_host_ops mmc_omap_ops = {
1229         .request        = mmc_omap_request,
1230         .set_ios        = mmc_omap_set_ios,
1231 };
1232
1233 static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1234 {
1235         struct mmc_omap_slot *slot = NULL;
1236         struct mmc_host *mmc;
1237         int r;
1238
1239         mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1240         if (mmc == NULL)
1241                 return -ENOMEM;
1242
1243         slot = mmc_priv(mmc);
1244         slot->host = host;
1245         slot->mmc = mmc;
1246         slot->id = id;
1247         slot->pdata = &host->pdata->slots[id];
1248
1249         host->slots[id] = slot;
1250
1251         mmc->caps = 0;
1252         if (host->pdata->slots[id].wires >= 4)
1253                 mmc->caps |= MMC_CAP_4_BIT_DATA;
1254
1255         mmc->ops = &mmc_omap_ops;
1256         mmc->f_min = 400000;
1257
1258         if (mmc_omap2())
1259                 mmc->f_max = 48000000;
1260         else
1261                 mmc->f_max = 24000000;
1262         if (host->pdata->max_freq)
1263                 mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1264         mmc->ocr_avail = slot->pdata->ocr_mask;
1265
1266         /* Use scatterlist DMA to reduce per-transfer costs.
1267          * NOTE max_seg_size assumption that small blocks aren't
1268          * normally used (except e.g. for reading SD registers).
1269          */
1270         mmc->max_segs = 32;
1271         mmc->max_blk_size = 2048;       /* BLEN is 11 bits (+1) */
1272         mmc->max_blk_count = 2048;      /* NBLK is 11 bits (+1) */
1273         mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1274         mmc->max_seg_size = mmc->max_req_size;
1275
1276         r = mmc_add_host(mmc);
1277         if (r < 0)
1278                 goto err_remove_host;
1279
1280         if (slot->pdata->name != NULL) {
1281                 r = device_create_file(&mmc->class_dev,
1282                                         &dev_attr_slot_name);
1283                 if (r < 0)
1284                         goto err_remove_host;
1285         }
1286
1287         if (slot->pdata->get_cover_state != NULL) {
1288                 r = device_create_file(&mmc->class_dev,
1289                                         &dev_attr_cover_switch);
1290                 if (r < 0)
1291                         goto err_remove_slot_name;
1292
1293                 setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
1294                             (unsigned long)slot);
1295                 tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
1296                              (unsigned long)slot);
1297                 tasklet_schedule(&slot->cover_tasklet);
1298         }
1299
1300         return 0;
1301
1302 err_remove_slot_name:
1303         if (slot->pdata->name != NULL)
1304                 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1305 err_remove_host:
1306         mmc_remove_host(mmc);
1307         mmc_free_host(mmc);
1308         return r;
1309 }
1310
1311 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1312 {
1313         struct mmc_host *mmc = slot->mmc;
1314
1315         if (slot->pdata->name != NULL)
1316                 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1317         if (slot->pdata->get_cover_state != NULL)
1318                 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1319
1320         tasklet_kill(&slot->cover_tasklet);
1321         del_timer_sync(&slot->cover_timer);
1322         flush_workqueue(slot->host->mmc_omap_wq);
1323
1324         mmc_remove_host(mmc);
1325         mmc_free_host(mmc);
1326 }
1327
1328 static int mmc_omap_probe(struct platform_device *pdev)
1329 {
1330         struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1331         struct mmc_omap_host *host = NULL;
1332         struct resource *res;
1333         dma_cap_mask_t mask;
1334         unsigned sig;
1335         int i, ret = 0;
1336         int irq;
1337
1338         if (pdata == NULL) {
1339                 dev_err(&pdev->dev, "platform data missing\n");
1340                 return -ENXIO;
1341         }
1342         if (pdata->nr_slots == 0) {
1343                 dev_err(&pdev->dev, "no slots\n");
1344                 return -ENXIO;
1345         }
1346
1347         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1348         irq = platform_get_irq(pdev, 0);
1349         if (res == NULL || irq < 0)
1350                 return -ENXIO;
1351
1352         res = request_mem_region(res->start, resource_size(res),
1353                                  pdev->name);
1354         if (res == NULL)
1355                 return -EBUSY;
1356
1357         host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1358         if (host == NULL) {
1359                 ret = -ENOMEM;
1360                 goto err_free_mem_region;
1361         }
1362
1363         INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
1364         INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
1365
1366         INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1367         setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
1368                     (unsigned long) host);
1369
1370         spin_lock_init(&host->clk_lock);
1371         setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1372
1373         spin_lock_init(&host->dma_lock);
1374         spin_lock_init(&host->slot_lock);
1375         init_waitqueue_head(&host->slot_wq);
1376
1377         host->pdata = pdata;
1378         host->features = host->pdata->slots[0].features;
1379         host->dev = &pdev->dev;
1380         platform_set_drvdata(pdev, host);
1381
1382         host->id = pdev->id;
1383         host->mem_res = res;
1384         host->irq = irq;
1385         host->use_dma = 1;
1386         host->irq = irq;
1387         host->phys_base = host->mem_res->start;
1388         host->virt_base = ioremap(res->start, resource_size(res));
1389         if (!host->virt_base)
1390                 goto err_ioremap;
1391
1392         host->iclk = clk_get(&pdev->dev, "ick");
1393         if (IS_ERR(host->iclk)) {
1394                 ret = PTR_ERR(host->iclk);
1395                 goto err_free_mmc_host;
1396         }
1397         clk_enable(host->iclk);
1398
1399         host->fclk = clk_get(&pdev->dev, "fck");
1400         if (IS_ERR(host->fclk)) {
1401                 ret = PTR_ERR(host->fclk);
1402                 goto err_free_iclk;
1403         }
1404
1405         dma_cap_zero(mask);
1406         dma_cap_set(DMA_SLAVE, mask);
1407
1408         host->dma_tx_burst = -1;
1409         host->dma_rx_burst = -1;
1410
1411         if (mmc_omap2())
1412                 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
1413         else
1414                 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
1415         host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1416         if (!host->dma_tx)
1417                 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1418                         sig);
1419         if (mmc_omap2())
1420                 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
1421         else
1422                 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
1423         host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1424         if (!host->dma_rx)
1425                 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1426                         sig);
1427
1428         ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1429         if (ret)
1430                 goto err_free_dma;
1431
1432         if (pdata->init != NULL) {
1433                 ret = pdata->init(&pdev->dev);
1434                 if (ret < 0)
1435                         goto err_free_irq;
1436         }
1437
1438         host->nr_slots = pdata->nr_slots;
1439         host->reg_shift = (mmc_omap7xx() ? 1 : 2);
1440
1441         host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1442         if (!host->mmc_omap_wq)
1443                 goto err_plat_cleanup;
1444
1445         for (i = 0; i < pdata->nr_slots; i++) {
1446                 ret = mmc_omap_new_slot(host, i);
1447                 if (ret < 0) {
1448                         while (--i >= 0)
1449                                 mmc_omap_remove_slot(host->slots[i]);
1450
1451                         goto err_destroy_wq;
1452                 }
1453         }
1454
1455         return 0;
1456
1457 err_destroy_wq:
1458         destroy_workqueue(host->mmc_omap_wq);
1459 err_plat_cleanup:
1460         if (pdata->cleanup)
1461                 pdata->cleanup(&pdev->dev);
1462 err_free_irq:
1463         free_irq(host->irq, host);
1464 err_free_dma:
1465         if (host->dma_tx)
1466                 dma_release_channel(host->dma_tx);
1467         if (host->dma_rx)
1468                 dma_release_channel(host->dma_rx);
1469         clk_put(host->fclk);
1470 err_free_iclk:
1471         clk_disable(host->iclk);
1472         clk_put(host->iclk);
1473 err_free_mmc_host:
1474         iounmap(host->virt_base);
1475 err_ioremap:
1476         kfree(host);
1477 err_free_mem_region:
1478         release_mem_region(res->start, resource_size(res));
1479         return ret;
1480 }
1481
1482 static int mmc_omap_remove(struct platform_device *pdev)
1483 {
1484         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1485         int i;
1486
1487         BUG_ON(host == NULL);
1488
1489         for (i = 0; i < host->nr_slots; i++)
1490                 mmc_omap_remove_slot(host->slots[i]);
1491
1492         if (host->pdata->cleanup)
1493                 host->pdata->cleanup(&pdev->dev);
1494
1495         mmc_omap_fclk_enable(host, 0);
1496         free_irq(host->irq, host);
1497         clk_put(host->fclk);
1498         clk_disable(host->iclk);
1499         clk_put(host->iclk);
1500
1501         if (host->dma_tx)
1502                 dma_release_channel(host->dma_tx);
1503         if (host->dma_rx)
1504                 dma_release_channel(host->dma_rx);
1505
1506         iounmap(host->virt_base);
1507         release_mem_region(pdev->resource[0].start,
1508                            pdev->resource[0].end - pdev->resource[0].start + 1);
1509         destroy_workqueue(host->mmc_omap_wq);
1510
1511         kfree(host);
1512
1513         return 0;
1514 }
1515
1516 #ifdef CONFIG_PM
1517 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1518 {
1519         int i, ret = 0;
1520         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1521
1522         if (host == NULL || host->suspended)
1523                 return 0;
1524
1525         for (i = 0; i < host->nr_slots; i++) {
1526                 struct mmc_omap_slot *slot;
1527
1528                 slot = host->slots[i];
1529                 ret = mmc_suspend_host(slot->mmc);
1530                 if (ret < 0) {
1531                         while (--i >= 0) {
1532                                 slot = host->slots[i];
1533                                 mmc_resume_host(slot->mmc);
1534                         }
1535                         return ret;
1536                 }
1537         }
1538         host->suspended = 1;
1539         return 0;
1540 }
1541
1542 static int mmc_omap_resume(struct platform_device *pdev)
1543 {
1544         int i, ret = 0;
1545         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1546
1547         if (host == NULL || !host->suspended)
1548                 return 0;
1549
1550         for (i = 0; i < host->nr_slots; i++) {
1551                 struct mmc_omap_slot *slot;
1552                 slot = host->slots[i];
1553                 ret = mmc_resume_host(slot->mmc);
1554                 if (ret < 0)
1555                         return ret;
1556
1557                 host->suspended = 0;
1558         }
1559         return 0;
1560 }
1561 #else
1562 #define mmc_omap_suspend        NULL
1563 #define mmc_omap_resume         NULL
1564 #endif
1565
1566 static struct platform_driver mmc_omap_driver = {
1567         .probe          = mmc_omap_probe,
1568         .remove         = mmc_omap_remove,
1569         .suspend        = mmc_omap_suspend,
1570         .resume         = mmc_omap_resume,
1571         .driver         = {
1572                 .name   = DRIVER_NAME,
1573                 .owner  = THIS_MODULE,
1574         },
1575 };
1576
1577 module_platform_driver(mmc_omap_driver);
1578 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1579 MODULE_LICENSE("GPL");
1580 MODULE_ALIAS("platform:" DRIVER_NAME);
1581 MODULE_AUTHOR("Juha Yrjölä");