ASoC: dapm: Fix build warning
[linux-drm-fsl-dcu.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_mtd.h>
30
31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32 #define ARCH_HAS_DMA
33 #endif
34
35 #ifdef ARCH_HAS_DMA
36 #include <mach/dma.h>
37 #endif
38
39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40
41 #define CHIP_DELAY_TIMEOUT      (2 * HZ/10)
42 #define NAND_STOP_DELAY         (2 * HZ/50)
43 #define PAGE_CHUNK_SIZE         (2048)
44
45 /*
46  * Define a buffer size for the initial command that detects the flash device:
47  * STATUS, READID and PARAM. The largest of these is the PARAM command,
48  * needing 256 bytes.
49  */
50 #define INIT_BUFFER_SIZE        256
51
52 /* registers and bit definitions */
53 #define NDCR            (0x00) /* Control register */
54 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
55 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
56 #define NDSR            (0x14) /* Status Register */
57 #define NDPCR           (0x18) /* Page Count Register */
58 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
59 #define NDBDR1          (0x20) /* Bad Block Register 1 */
60 #define NDECCCTRL       (0x28) /* ECC control */
61 #define NDDB            (0x40) /* Data Buffer */
62 #define NDCB0           (0x48) /* Command Buffer0 */
63 #define NDCB1           (0x4C) /* Command Buffer1 */
64 #define NDCB2           (0x50) /* Command Buffer2 */
65
66 #define NDCR_SPARE_EN           (0x1 << 31)
67 #define NDCR_ECC_EN             (0x1 << 30)
68 #define NDCR_DMA_EN             (0x1 << 29)
69 #define NDCR_ND_RUN             (0x1 << 28)
70 #define NDCR_DWIDTH_C           (0x1 << 27)
71 #define NDCR_DWIDTH_M           (0x1 << 26)
72 #define NDCR_PAGE_SZ            (0x1 << 24)
73 #define NDCR_NCSX               (0x1 << 23)
74 #define NDCR_ND_MODE            (0x3 << 21)
75 #define NDCR_NAND_MODE          (0x0)
76 #define NDCR_CLR_PG_CNT         (0x1 << 20)
77 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
78 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
79 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
80
81 #define NDCR_RA_START           (0x1 << 15)
82 #define NDCR_PG_PER_BLK         (0x1 << 14)
83 #define NDCR_ND_ARB_EN          (0x1 << 12)
84 #define NDCR_INT_MASK           (0xFFF)
85
86 #define NDSR_MASK               (0xfff)
87 #define NDSR_ERR_CNT_OFF        (16)
88 #define NDSR_ERR_CNT_MASK       (0x1f)
89 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
90 #define NDSR_RDY                (0x1 << 12)
91 #define NDSR_FLASH_RDY          (0x1 << 11)
92 #define NDSR_CS0_PAGED          (0x1 << 10)
93 #define NDSR_CS1_PAGED          (0x1 << 9)
94 #define NDSR_CS0_CMDD           (0x1 << 8)
95 #define NDSR_CS1_CMDD           (0x1 << 7)
96 #define NDSR_CS0_BBD            (0x1 << 6)
97 #define NDSR_CS1_BBD            (0x1 << 5)
98 #define NDSR_UNCORERR           (0x1 << 4)
99 #define NDSR_CORERR             (0x1 << 3)
100 #define NDSR_WRDREQ             (0x1 << 2)
101 #define NDSR_RDDREQ             (0x1 << 1)
102 #define NDSR_WRCMDREQ           (0x1)
103
104 #define NDCB0_LEN_OVRD          (0x1 << 28)
105 #define NDCB0_ST_ROW_EN         (0x1 << 26)
106 #define NDCB0_AUTO_RS           (0x1 << 25)
107 #define NDCB0_CSEL              (0x1 << 24)
108 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
110 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
111 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112 #define NDCB0_NC                (0x1 << 20)
113 #define NDCB0_DBC               (0x1 << 19)
114 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
115 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116 #define NDCB0_CMD2_MASK         (0xff << 8)
117 #define NDCB0_CMD1_MASK         (0xff)
118 #define NDCB0_ADDR_CYC_SHIFT    (16)
119
120 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
121 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
122 #define EXT_CMD_TYPE_READ       4 /* Read */
123 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
124 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
125 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
126 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
127
128 /* macros for registers read/write */
129 #define nand_writel(info, off, val)     \
130         writel_relaxed((val), (info)->mmio_base + (off))
131
132 #define nand_readl(info, off)           \
133         readl_relaxed((info)->mmio_base + (off))
134
135 /* error code and state */
136 enum {
137         ERR_NONE        = 0,
138         ERR_DMABUSERR   = -1,
139         ERR_SENDCMD     = -2,
140         ERR_UNCORERR    = -3,
141         ERR_BBERR       = -4,
142         ERR_CORERR      = -5,
143 };
144
145 enum {
146         STATE_IDLE = 0,
147         STATE_PREPARED,
148         STATE_CMD_HANDLE,
149         STATE_DMA_READING,
150         STATE_DMA_WRITING,
151         STATE_DMA_DONE,
152         STATE_PIO_READING,
153         STATE_PIO_WRITING,
154         STATE_CMD_DONE,
155         STATE_READY,
156 };
157
158 enum pxa3xx_nand_variant {
159         PXA3XX_NAND_VARIANT_PXA,
160         PXA3XX_NAND_VARIANT_ARMADA370,
161 };
162
163 struct pxa3xx_nand_host {
164         struct nand_chip        chip;
165         struct mtd_info         *mtd;
166         void                    *info_data;
167
168         /* page size of attached chip */
169         int                     use_ecc;
170         int                     cs;
171
172         /* calculated from pxa3xx_nand_flash data */
173         unsigned int            col_addr_cycles;
174         unsigned int            row_addr_cycles;
175         size_t                  read_id_bytes;
176
177 };
178
179 struct pxa3xx_nand_info {
180         struct nand_hw_control  controller;
181         struct platform_device   *pdev;
182
183         struct clk              *clk;
184         void __iomem            *mmio_base;
185         unsigned long           mmio_phys;
186         struct completion       cmd_complete, dev_ready;
187
188         unsigned int            buf_start;
189         unsigned int            buf_count;
190         unsigned int            buf_size;
191         unsigned int            data_buff_pos;
192         unsigned int            oob_buff_pos;
193
194         /* DMA information */
195         int                     drcmr_dat;
196         int                     drcmr_cmd;
197
198         unsigned char           *data_buff;
199         unsigned char           *oob_buff;
200         dma_addr_t              data_buff_phys;
201         int                     data_dma_ch;
202         struct pxa_dma_desc     *data_desc;
203         dma_addr_t              data_desc_addr;
204
205         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
206         unsigned int            state;
207
208         /*
209          * This driver supports NFCv1 (as found in PXA SoC)
210          * and NFCv2 (as found in Armada 370/XP SoC).
211          */
212         enum pxa3xx_nand_variant variant;
213
214         int                     cs;
215         int                     use_ecc;        /* use HW ECC ? */
216         int                     ecc_bch;        /* using BCH ECC? */
217         int                     use_dma;        /* use DMA ? */
218         int                     use_spare;      /* use spare ? */
219         int                     need_wait;
220
221         unsigned int            data_size;      /* data to be read from FIFO */
222         unsigned int            chunk_size;     /* split commands chunk size */
223         unsigned int            oob_size;
224         unsigned int            spare_size;
225         unsigned int            ecc_size;
226         unsigned int            ecc_err_cnt;
227         unsigned int            max_bitflips;
228         int                     retcode;
229
230         /* cached register value */
231         uint32_t                reg_ndcr;
232         uint32_t                ndtr0cs0;
233         uint32_t                ndtr1cs0;
234
235         /* generated NDCBx register values */
236         uint32_t                ndcb0;
237         uint32_t                ndcb1;
238         uint32_t                ndcb2;
239         uint32_t                ndcb3;
240 };
241
242 static bool use_dma = 1;
243 module_param(use_dma, bool, 0444);
244 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
245
246 static struct pxa3xx_nand_timing timing[] = {
247         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
249         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
250         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
251 };
252
253 static struct pxa3xx_nand_flash builtin_flash_types[] = {
254 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
255 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
256 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
257 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
258 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
259 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
260 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
261 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
262 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
263 };
264
265 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
266 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
267
268 static struct nand_bbt_descr bbt_main_descr = {
269         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
270                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
271         .offs = 8,
272         .len = 6,
273         .veroffs = 14,
274         .maxblocks = 8,         /* Last 8 blocks in each chip */
275         .pattern = bbt_pattern
276 };
277
278 static struct nand_bbt_descr bbt_mirror_descr = {
279         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
281         .offs = 8,
282         .len = 6,
283         .veroffs = 14,
284         .maxblocks = 8,         /* Last 8 blocks in each chip */
285         .pattern = bbt_mirror_pattern
286 };
287
288 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
289         .eccbytes = 32,
290         .eccpos = {
291                 32, 33, 34, 35, 36, 37, 38, 39,
292                 40, 41, 42, 43, 44, 45, 46, 47,
293                 48, 49, 50, 51, 52, 53, 54, 55,
294                 56, 57, 58, 59, 60, 61, 62, 63},
295         .oobfree = { {2, 30} }
296 };
297
298 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
299         .eccbytes = 64,
300         .eccpos = {
301                 32,  33,  34,  35,  36,  37,  38,  39,
302                 40,  41,  42,  43,  44,  45,  46,  47,
303                 48,  49,  50,  51,  52,  53,  54,  55,
304                 56,  57,  58,  59,  60,  61,  62,  63,
305                 96,  97,  98,  99,  100, 101, 102, 103,
306                 104, 105, 106, 107, 108, 109, 110, 111,
307                 112, 113, 114, 115, 116, 117, 118, 119,
308                 120, 121, 122, 123, 124, 125, 126, 127},
309         /* Bootrom looks in bytes 0 & 5 for bad blocks */
310         .oobfree = { {6, 26}, { 64, 32} }
311 };
312
313 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
314         .eccbytes = 128,
315         .eccpos = {
316                 32,  33,  34,  35,  36,  37,  38,  39,
317                 40,  41,  42,  43,  44,  45,  46,  47,
318                 48,  49,  50,  51,  52,  53,  54,  55,
319                 56,  57,  58,  59,  60,  61,  62,  63},
320         .oobfree = { }
321 };
322
323 /* Define a default flash type setting serve as flash detecting only */
324 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
325
326 #define NDTR0_tCH(c)    (min((c), 7) << 19)
327 #define NDTR0_tCS(c)    (min((c), 7) << 16)
328 #define NDTR0_tWH(c)    (min((c), 7) << 11)
329 #define NDTR0_tWP(c)    (min((c), 7) << 8)
330 #define NDTR0_tRH(c)    (min((c), 7) << 3)
331 #define NDTR0_tRP(c)    (min((c), 7) << 0)
332
333 #define NDTR1_tR(c)     (min((c), 65535) << 16)
334 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
335 #define NDTR1_tAR(c)    (min((c), 15) << 0)
336
337 /* convert nano-seconds to nand flash controller clock cycles */
338 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
339
340 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
341         {
342                 .compatible = "marvell,pxa3xx-nand",
343                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
344         },
345         {
346                 .compatible = "marvell,armada370-nand",
347                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
348         },
349         {}
350 };
351 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
352
353 static enum pxa3xx_nand_variant
354 pxa3xx_nand_get_variant(struct platform_device *pdev)
355 {
356         const struct of_device_id *of_id =
357                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
358         if (!of_id)
359                 return PXA3XX_NAND_VARIANT_PXA;
360         return (enum pxa3xx_nand_variant)of_id->data;
361 }
362
363 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
364                                    const struct pxa3xx_nand_timing *t)
365 {
366         struct pxa3xx_nand_info *info = host->info_data;
367         unsigned long nand_clk = clk_get_rate(info->clk);
368         uint32_t ndtr0, ndtr1;
369
370         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
371                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
372                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
373                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
374                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
375                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
376
377         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
378                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
379                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
380
381         info->ndtr0cs0 = ndtr0;
382         info->ndtr1cs0 = ndtr1;
383         nand_writel(info, NDTR0CS0, ndtr0);
384         nand_writel(info, NDTR1CS0, ndtr1);
385 }
386
387 /*
388  * Set the data and OOB size, depending on the selected
389  * spare and ECC configuration.
390  * Only applicable to READ0, READOOB and PAGEPROG commands.
391  */
392 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
393                                 struct mtd_info *mtd)
394 {
395         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
396
397         info->data_size = mtd->writesize;
398         if (!oob_enable)
399                 return;
400
401         info->oob_size = info->spare_size;
402         if (!info->use_ecc)
403                 info->oob_size += info->ecc_size;
404 }
405
406 /**
407  * NOTE: it is a must to set ND_RUN firstly, then write
408  * command buffer, otherwise, it does not work.
409  * We enable all the interrupt at the same time, and
410  * let pxa3xx_nand_irq to handle all logic.
411  */
412 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
413 {
414         uint32_t ndcr;
415
416         ndcr = info->reg_ndcr;
417
418         if (info->use_ecc) {
419                 ndcr |= NDCR_ECC_EN;
420                 if (info->ecc_bch)
421                         nand_writel(info, NDECCCTRL, 0x1);
422         } else {
423                 ndcr &= ~NDCR_ECC_EN;
424                 if (info->ecc_bch)
425                         nand_writel(info, NDECCCTRL, 0x0);
426         }
427
428         if (info->use_dma)
429                 ndcr |= NDCR_DMA_EN;
430         else
431                 ndcr &= ~NDCR_DMA_EN;
432
433         if (info->use_spare)
434                 ndcr |= NDCR_SPARE_EN;
435         else
436                 ndcr &= ~NDCR_SPARE_EN;
437
438         ndcr |= NDCR_ND_RUN;
439
440         /* clear status bits and run */
441         nand_writel(info, NDCR, 0);
442         nand_writel(info, NDSR, NDSR_MASK);
443         nand_writel(info, NDCR, ndcr);
444 }
445
446 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
447 {
448         uint32_t ndcr;
449         int timeout = NAND_STOP_DELAY;
450
451         /* wait RUN bit in NDCR become 0 */
452         ndcr = nand_readl(info, NDCR);
453         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
454                 ndcr = nand_readl(info, NDCR);
455                 udelay(1);
456         }
457
458         if (timeout <= 0) {
459                 ndcr &= ~NDCR_ND_RUN;
460                 nand_writel(info, NDCR, ndcr);
461         }
462         /* clear status bits */
463         nand_writel(info, NDSR, NDSR_MASK);
464 }
465
466 static void __maybe_unused
467 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
468 {
469         uint32_t ndcr;
470
471         ndcr = nand_readl(info, NDCR);
472         nand_writel(info, NDCR, ndcr & ~int_mask);
473 }
474
475 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
476 {
477         uint32_t ndcr;
478
479         ndcr = nand_readl(info, NDCR);
480         nand_writel(info, NDCR, ndcr | int_mask);
481 }
482
483 static void handle_data_pio(struct pxa3xx_nand_info *info)
484 {
485         unsigned int do_bytes = min(info->data_size, info->chunk_size);
486
487         switch (info->state) {
488         case STATE_PIO_WRITING:
489                 __raw_writesl(info->mmio_base + NDDB,
490                               info->data_buff + info->data_buff_pos,
491                               DIV_ROUND_UP(do_bytes, 4));
492
493                 if (info->oob_size > 0)
494                         __raw_writesl(info->mmio_base + NDDB,
495                                       info->oob_buff + info->oob_buff_pos,
496                                       DIV_ROUND_UP(info->oob_size, 4));
497                 break;
498         case STATE_PIO_READING:
499                 __raw_readsl(info->mmio_base + NDDB,
500                              info->data_buff + info->data_buff_pos,
501                              DIV_ROUND_UP(do_bytes, 4));
502
503                 if (info->oob_size > 0)
504                         __raw_readsl(info->mmio_base + NDDB,
505                                      info->oob_buff + info->oob_buff_pos,
506                                      DIV_ROUND_UP(info->oob_size, 4));
507                 break;
508         default:
509                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
510                                 info->state);
511                 BUG();
512         }
513
514         /* Update buffer pointers for multi-page read/write */
515         info->data_buff_pos += do_bytes;
516         info->oob_buff_pos += info->oob_size;
517         info->data_size -= do_bytes;
518 }
519
520 #ifdef ARCH_HAS_DMA
521 static void start_data_dma(struct pxa3xx_nand_info *info)
522 {
523         struct pxa_dma_desc *desc = info->data_desc;
524         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
525
526         desc->ddadr = DDADR_STOP;
527         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
528
529         switch (info->state) {
530         case STATE_DMA_WRITING:
531                 desc->dsadr = info->data_buff_phys;
532                 desc->dtadr = info->mmio_phys + NDDB;
533                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
534                 break;
535         case STATE_DMA_READING:
536                 desc->dtadr = info->data_buff_phys;
537                 desc->dsadr = info->mmio_phys + NDDB;
538                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
539                 break;
540         default:
541                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
542                                 info->state);
543                 BUG();
544         }
545
546         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
547         DDADR(info->data_dma_ch) = info->data_desc_addr;
548         DCSR(info->data_dma_ch) |= DCSR_RUN;
549 }
550
551 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
552 {
553         struct pxa3xx_nand_info *info = data;
554         uint32_t dcsr;
555
556         dcsr = DCSR(channel);
557         DCSR(channel) = dcsr;
558
559         if (dcsr & DCSR_BUSERR) {
560                 info->retcode = ERR_DMABUSERR;
561         }
562
563         info->state = STATE_DMA_DONE;
564         enable_int(info, NDCR_INT_MASK);
565         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
566 }
567 #else
568 static void start_data_dma(struct pxa3xx_nand_info *info)
569 {}
570 #endif
571
572 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
573 {
574         struct pxa3xx_nand_info *info = devid;
575         unsigned int status, is_completed = 0, is_ready = 0;
576         unsigned int ready, cmd_done;
577
578         if (info->cs == 0) {
579                 ready           = NDSR_FLASH_RDY;
580                 cmd_done        = NDSR_CS0_CMDD;
581         } else {
582                 ready           = NDSR_RDY;
583                 cmd_done        = NDSR_CS1_CMDD;
584         }
585
586         status = nand_readl(info, NDSR);
587
588         if (status & NDSR_UNCORERR)
589                 info->retcode = ERR_UNCORERR;
590         if (status & NDSR_CORERR) {
591                 info->retcode = ERR_CORERR;
592                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
593                     info->ecc_bch)
594                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
595                 else
596                         info->ecc_err_cnt = 1;
597
598                 /*
599                  * Each chunk composing a page is corrected independently,
600                  * and we need to store maximum number of corrected bitflips
601                  * to return it to the MTD layer in ecc.read_page().
602                  */
603                 info->max_bitflips = max_t(unsigned int,
604                                            info->max_bitflips,
605                                            info->ecc_err_cnt);
606         }
607         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
608                 /* whether use dma to transfer data */
609                 if (info->use_dma) {
610                         disable_int(info, NDCR_INT_MASK);
611                         info->state = (status & NDSR_RDDREQ) ?
612                                       STATE_DMA_READING : STATE_DMA_WRITING;
613                         start_data_dma(info);
614                         goto NORMAL_IRQ_EXIT;
615                 } else {
616                         info->state = (status & NDSR_RDDREQ) ?
617                                       STATE_PIO_READING : STATE_PIO_WRITING;
618                         handle_data_pio(info);
619                 }
620         }
621         if (status & cmd_done) {
622                 info->state = STATE_CMD_DONE;
623                 is_completed = 1;
624         }
625         if (status & ready) {
626                 info->state = STATE_READY;
627                 is_ready = 1;
628         }
629
630         if (status & NDSR_WRCMDREQ) {
631                 nand_writel(info, NDSR, NDSR_WRCMDREQ);
632                 status &= ~NDSR_WRCMDREQ;
633                 info->state = STATE_CMD_HANDLE;
634
635                 /*
636                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
637                  * must be loaded by writing directly either 12 or 16
638                  * bytes directly to NDCB0, four bytes at a time.
639                  *
640                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
641                  * but each NDCBx register can be read.
642                  */
643                 nand_writel(info, NDCB0, info->ndcb0);
644                 nand_writel(info, NDCB0, info->ndcb1);
645                 nand_writel(info, NDCB0, info->ndcb2);
646
647                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
648                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
649                         nand_writel(info, NDCB0, info->ndcb3);
650         }
651
652         /* clear NDSR to let the controller exit the IRQ */
653         nand_writel(info, NDSR, status);
654         if (is_completed)
655                 complete(&info->cmd_complete);
656         if (is_ready)
657                 complete(&info->dev_ready);
658 NORMAL_IRQ_EXIT:
659         return IRQ_HANDLED;
660 }
661
662 static inline int is_buf_blank(uint8_t *buf, size_t len)
663 {
664         for (; len > 0; len--)
665                 if (*buf++ != 0xff)
666                         return 0;
667         return 1;
668 }
669
670 static void set_command_address(struct pxa3xx_nand_info *info,
671                 unsigned int page_size, uint16_t column, int page_addr)
672 {
673         /* small page addr setting */
674         if (page_size < PAGE_CHUNK_SIZE) {
675                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
676                                 | (column & 0xFF);
677
678                 info->ndcb2 = 0;
679         } else {
680                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
681                                 | (column & 0xFFFF);
682
683                 if (page_addr & 0xFF0000)
684                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
685                 else
686                         info->ndcb2 = 0;
687         }
688 }
689
690 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
691 {
692         struct pxa3xx_nand_host *host = info->host[info->cs];
693         struct mtd_info *mtd = host->mtd;
694
695         /* reset data and oob column point to handle data */
696         info->buf_start         = 0;
697         info->buf_count         = 0;
698         info->oob_size          = 0;
699         info->data_buff_pos     = 0;
700         info->oob_buff_pos      = 0;
701         info->use_ecc           = 0;
702         info->use_spare         = 1;
703         info->retcode           = ERR_NONE;
704         info->ecc_err_cnt       = 0;
705         info->ndcb3             = 0;
706         info->need_wait         = 0;
707
708         switch (command) {
709         case NAND_CMD_READ0:
710         case NAND_CMD_PAGEPROG:
711                 info->use_ecc = 1;
712         case NAND_CMD_READOOB:
713                 pxa3xx_set_datasize(info, mtd);
714                 break;
715         case NAND_CMD_PARAM:
716                 info->use_spare = 0;
717                 break;
718         default:
719                 info->ndcb1 = 0;
720                 info->ndcb2 = 0;
721                 break;
722         }
723
724         /*
725          * If we are about to issue a read command, or about to set
726          * the write address, then clean the data buffer.
727          */
728         if (command == NAND_CMD_READ0 ||
729             command == NAND_CMD_READOOB ||
730             command == NAND_CMD_SEQIN) {
731
732                 info->buf_count = mtd->writesize + mtd->oobsize;
733                 memset(info->data_buff, 0xFF, info->buf_count);
734         }
735
736 }
737
738 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
739                 int ext_cmd_type, uint16_t column, int page_addr)
740 {
741         int addr_cycle, exec_cmd;
742         struct pxa3xx_nand_host *host;
743         struct mtd_info *mtd;
744
745         host = info->host[info->cs];
746         mtd = host->mtd;
747         addr_cycle = 0;
748         exec_cmd = 1;
749
750         if (info->cs != 0)
751                 info->ndcb0 = NDCB0_CSEL;
752         else
753                 info->ndcb0 = 0;
754
755         if (command == NAND_CMD_SEQIN)
756                 exec_cmd = 0;
757
758         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
759                                     + host->col_addr_cycles);
760
761         switch (command) {
762         case NAND_CMD_READOOB:
763         case NAND_CMD_READ0:
764                 info->buf_start = column;
765                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
766                                 | addr_cycle
767                                 | NAND_CMD_READ0;
768
769                 if (command == NAND_CMD_READOOB)
770                         info->buf_start += mtd->writesize;
771
772                 /*
773                  * Multiple page read needs an 'extended command type' field,
774                  * which is either naked-read or last-read according to the
775                  * state.
776                  */
777                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
778                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
779                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
780                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
781                                         | NDCB0_LEN_OVRD
782                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
783                         info->ndcb3 = info->chunk_size +
784                                       info->oob_size;
785                 }
786
787                 set_command_address(info, mtd->writesize, column, page_addr);
788                 break;
789
790         case NAND_CMD_SEQIN:
791
792                 info->buf_start = column;
793                 set_command_address(info, mtd->writesize, 0, page_addr);
794
795                 /*
796                  * Multiple page programming needs to execute the initial
797                  * SEQIN command that sets the page address.
798                  */
799                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
800                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
801                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
802                                 | addr_cycle
803                                 | command;
804                         /* No data transfer in this case */
805                         info->data_size = 0;
806                         exec_cmd = 1;
807                 }
808                 break;
809
810         case NAND_CMD_PAGEPROG:
811                 if (is_buf_blank(info->data_buff,
812                                         (mtd->writesize + mtd->oobsize))) {
813                         exec_cmd = 0;
814                         break;
815                 }
816
817                 /* Second command setting for large pages */
818                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
819                         /*
820                          * Multiple page write uses the 'extended command'
821                          * field. This can be used to issue a command dispatch
822                          * or a naked-write depending on the current stage.
823                          */
824                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
825                                         | NDCB0_LEN_OVRD
826                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
827                         info->ndcb3 = info->chunk_size +
828                                       info->oob_size;
829
830                         /*
831                          * This is the command dispatch that completes a chunked
832                          * page program operation.
833                          */
834                         if (info->data_size == 0) {
835                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
836                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
837                                         | command;
838                                 info->ndcb1 = 0;
839                                 info->ndcb2 = 0;
840                                 info->ndcb3 = 0;
841                         }
842                 } else {
843                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
844                                         | NDCB0_AUTO_RS
845                                         | NDCB0_ST_ROW_EN
846                                         | NDCB0_DBC
847                                         | (NAND_CMD_PAGEPROG << 8)
848                                         | NAND_CMD_SEQIN
849                                         | addr_cycle;
850                 }
851                 break;
852
853         case NAND_CMD_PARAM:
854                 info->buf_count = 256;
855                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
856                                 | NDCB0_ADDR_CYC(1)
857                                 | NDCB0_LEN_OVRD
858                                 | command;
859                 info->ndcb1 = (column & 0xFF);
860                 info->ndcb3 = 256;
861                 info->data_size = 256;
862                 break;
863
864         case NAND_CMD_READID:
865                 info->buf_count = host->read_id_bytes;
866                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
867                                 | NDCB0_ADDR_CYC(1)
868                                 | command;
869                 info->ndcb1 = (column & 0xFF);
870
871                 info->data_size = 8;
872                 break;
873         case NAND_CMD_STATUS:
874                 info->buf_count = 1;
875                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
876                                 | NDCB0_ADDR_CYC(1)
877                                 | command;
878
879                 info->data_size = 8;
880                 break;
881
882         case NAND_CMD_ERASE1:
883                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
884                                 | NDCB0_AUTO_RS
885                                 | NDCB0_ADDR_CYC(3)
886                                 | NDCB0_DBC
887                                 | (NAND_CMD_ERASE2 << 8)
888                                 | NAND_CMD_ERASE1;
889                 info->ndcb1 = page_addr;
890                 info->ndcb2 = 0;
891
892                 break;
893         case NAND_CMD_RESET:
894                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
895                                 | command;
896
897                 break;
898
899         case NAND_CMD_ERASE2:
900                 exec_cmd = 0;
901                 break;
902
903         default:
904                 exec_cmd = 0;
905                 dev_err(&info->pdev->dev, "non-supported command %x\n",
906                                 command);
907                 break;
908         }
909
910         return exec_cmd;
911 }
912
913 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
914                          int column, int page_addr)
915 {
916         struct pxa3xx_nand_host *host = mtd->priv;
917         struct pxa3xx_nand_info *info = host->info_data;
918         int ret, exec_cmd;
919
920         /*
921          * if this is a x16 device ,then convert the input
922          * "byte" address into a "word" address appropriate
923          * for indexing a word-oriented device
924          */
925         if (info->reg_ndcr & NDCR_DWIDTH_M)
926                 column /= 2;
927
928         /*
929          * There may be different NAND chip hooked to
930          * different chip select, so check whether
931          * chip select has been changed, if yes, reset the timing
932          */
933         if (info->cs != host->cs) {
934                 info->cs = host->cs;
935                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
936                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
937         }
938
939         prepare_start_command(info, command);
940
941         info->state = STATE_PREPARED;
942         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
943
944         if (exec_cmd) {
945                 init_completion(&info->cmd_complete);
946                 init_completion(&info->dev_ready);
947                 info->need_wait = 1;
948                 pxa3xx_nand_start(info);
949
950                 ret = wait_for_completion_timeout(&info->cmd_complete,
951                                 CHIP_DELAY_TIMEOUT);
952                 if (!ret) {
953                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
954                         /* Stop State Machine for next command cycle */
955                         pxa3xx_nand_stop(info);
956                 }
957         }
958         info->state = STATE_IDLE;
959 }
960
961 static void nand_cmdfunc_extended(struct mtd_info *mtd,
962                                   const unsigned command,
963                                   int column, int page_addr)
964 {
965         struct pxa3xx_nand_host *host = mtd->priv;
966         struct pxa3xx_nand_info *info = host->info_data;
967         int ret, exec_cmd, ext_cmd_type;
968
969         /*
970          * if this is a x16 device then convert the input
971          * "byte" address into a "word" address appropriate
972          * for indexing a word-oriented device
973          */
974         if (info->reg_ndcr & NDCR_DWIDTH_M)
975                 column /= 2;
976
977         /*
978          * There may be different NAND chip hooked to
979          * different chip select, so check whether
980          * chip select has been changed, if yes, reset the timing
981          */
982         if (info->cs != host->cs) {
983                 info->cs = host->cs;
984                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
985                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
986         }
987
988         /* Select the extended command for the first command */
989         switch (command) {
990         case NAND_CMD_READ0:
991         case NAND_CMD_READOOB:
992                 ext_cmd_type = EXT_CMD_TYPE_MONO;
993                 break;
994         case NAND_CMD_SEQIN:
995                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
996                 break;
997         case NAND_CMD_PAGEPROG:
998                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
999                 break;
1000         default:
1001                 ext_cmd_type = 0;
1002                 break;
1003         }
1004
1005         prepare_start_command(info, command);
1006
1007         /*
1008          * Prepare the "is ready" completion before starting a command
1009          * transaction sequence. If the command is not executed the
1010          * completion will be completed, see below.
1011          *
1012          * We can do that inside the loop because the command variable
1013          * is invariant and thus so is the exec_cmd.
1014          */
1015         info->need_wait = 1;
1016         init_completion(&info->dev_ready);
1017         do {
1018                 info->state = STATE_PREPARED;
1019                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1020                                                column, page_addr);
1021                 if (!exec_cmd) {
1022                         info->need_wait = 0;
1023                         complete(&info->dev_ready);
1024                         break;
1025                 }
1026
1027                 init_completion(&info->cmd_complete);
1028                 pxa3xx_nand_start(info);
1029
1030                 ret = wait_for_completion_timeout(&info->cmd_complete,
1031                                 CHIP_DELAY_TIMEOUT);
1032                 if (!ret) {
1033                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1034                         /* Stop State Machine for next command cycle */
1035                         pxa3xx_nand_stop(info);
1036                         break;
1037                 }
1038
1039                 /* Check if the sequence is complete */
1040                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1041                         break;
1042
1043                 /*
1044                  * After a splitted program command sequence has issued
1045                  * the command dispatch, the command sequence is complete.
1046                  */
1047                 if (info->data_size == 0 &&
1048                     command == NAND_CMD_PAGEPROG &&
1049                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1050                         break;
1051
1052                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1053                         /* Last read: issue a 'last naked read' */
1054                         if (info->data_size == info->chunk_size)
1055                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1056                         else
1057                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1058
1059                 /*
1060                  * If a splitted program command has no more data to transfer,
1061                  * the command dispatch must be issued to complete.
1062                  */
1063                 } else if (command == NAND_CMD_PAGEPROG &&
1064                            info->data_size == 0) {
1065                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1066                 }
1067         } while (1);
1068
1069         info->state = STATE_IDLE;
1070 }
1071
1072 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1073                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1074 {
1075         chip->write_buf(mtd, buf, mtd->writesize);
1076         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1077
1078         return 0;
1079 }
1080
1081 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1082                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1083                 int page)
1084 {
1085         struct pxa3xx_nand_host *host = mtd->priv;
1086         struct pxa3xx_nand_info *info = host->info_data;
1087
1088         chip->read_buf(mtd, buf, mtd->writesize);
1089         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1090
1091         if (info->retcode == ERR_CORERR && info->use_ecc) {
1092                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1093
1094         } else if (info->retcode == ERR_UNCORERR) {
1095                 /*
1096                  * for blank page (all 0xff), HW will calculate its ECC as
1097                  * 0, which is different from the ECC information within
1098                  * OOB, ignore such uncorrectable errors
1099                  */
1100                 if (is_buf_blank(buf, mtd->writesize))
1101                         info->retcode = ERR_NONE;
1102                 else
1103                         mtd->ecc_stats.failed++;
1104         }
1105
1106         return info->max_bitflips;
1107 }
1108
1109 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1110 {
1111         struct pxa3xx_nand_host *host = mtd->priv;
1112         struct pxa3xx_nand_info *info = host->info_data;
1113         char retval = 0xFF;
1114
1115         if (info->buf_start < info->buf_count)
1116                 /* Has just send a new command? */
1117                 retval = info->data_buff[info->buf_start++];
1118
1119         return retval;
1120 }
1121
1122 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1123 {
1124         struct pxa3xx_nand_host *host = mtd->priv;
1125         struct pxa3xx_nand_info *info = host->info_data;
1126         u16 retval = 0xFFFF;
1127
1128         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1129                 retval = *((u16 *)(info->data_buff+info->buf_start));
1130                 info->buf_start += 2;
1131         }
1132         return retval;
1133 }
1134
1135 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1136 {
1137         struct pxa3xx_nand_host *host = mtd->priv;
1138         struct pxa3xx_nand_info *info = host->info_data;
1139         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1140
1141         memcpy(buf, info->data_buff + info->buf_start, real_len);
1142         info->buf_start += real_len;
1143 }
1144
1145 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1146                 const uint8_t *buf, int len)
1147 {
1148         struct pxa3xx_nand_host *host = mtd->priv;
1149         struct pxa3xx_nand_info *info = host->info_data;
1150         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1151
1152         memcpy(info->data_buff + info->buf_start, buf, real_len);
1153         info->buf_start += real_len;
1154 }
1155
1156 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1157 {
1158         return;
1159 }
1160
1161 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1162 {
1163         struct pxa3xx_nand_host *host = mtd->priv;
1164         struct pxa3xx_nand_info *info = host->info_data;
1165         int ret;
1166
1167         if (info->need_wait) {
1168                 ret = wait_for_completion_timeout(&info->dev_ready,
1169                                 CHIP_DELAY_TIMEOUT);
1170                 info->need_wait = 0;
1171                 if (!ret) {
1172                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1173                         return NAND_STATUS_FAIL;
1174                 }
1175         }
1176
1177         /* pxa3xx_nand_send_command has waited for command complete */
1178         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1179                 if (info->retcode == ERR_NONE)
1180                         return 0;
1181                 else
1182                         return NAND_STATUS_FAIL;
1183         }
1184
1185         return NAND_STATUS_READY;
1186 }
1187
1188 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1189                                     const struct pxa3xx_nand_flash *f)
1190 {
1191         struct platform_device *pdev = info->pdev;
1192         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1193         struct pxa3xx_nand_host *host = info->host[info->cs];
1194         uint32_t ndcr = 0x0; /* enable all interrupts */
1195
1196         if (f->page_size != 2048 && f->page_size != 512) {
1197                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1198                 return -EINVAL;
1199         }
1200
1201         if (f->flash_width != 16 && f->flash_width != 8) {
1202                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1203                 return -EINVAL;
1204         }
1205
1206         /* calculate flash information */
1207         host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1208
1209         /* calculate addressing information */
1210         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1211
1212         if (f->num_blocks * f->page_per_block > 65536)
1213                 host->row_addr_cycles = 3;
1214         else
1215                 host->row_addr_cycles = 2;
1216
1217         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1218         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1219         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1220         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1221         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1222         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1223
1224         ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1225         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1226
1227         info->reg_ndcr = ndcr;
1228
1229         pxa3xx_nand_set_timing(host, f->timing);
1230         return 0;
1231 }
1232
1233 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1234 {
1235         /*
1236          * We set 0 by hard coding here, for we don't support keep_config
1237          * when there is more than one chip attached to the controller
1238          */
1239         struct pxa3xx_nand_host *host = info->host[0];
1240         uint32_t ndcr = nand_readl(info, NDCR);
1241
1242         if (ndcr & NDCR_PAGE_SZ) {
1243                 /* Controller's FIFO size */
1244                 info->chunk_size = 2048;
1245                 host->read_id_bytes = 4;
1246         } else {
1247                 info->chunk_size = 512;
1248                 host->read_id_bytes = 2;
1249         }
1250
1251         /* Set an initial chunk size */
1252         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1253         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1254         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1255         return 0;
1256 }
1257
1258 #ifdef ARCH_HAS_DMA
1259 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1260 {
1261         struct platform_device *pdev = info->pdev;
1262         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1263
1264         if (use_dma == 0) {
1265                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1266                 if (info->data_buff == NULL)
1267                         return -ENOMEM;
1268                 return 0;
1269         }
1270
1271         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1272                                 &info->data_buff_phys, GFP_KERNEL);
1273         if (info->data_buff == NULL) {
1274                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1275                 return -ENOMEM;
1276         }
1277
1278         info->data_desc = (void *)info->data_buff + data_desc_offset;
1279         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1280
1281         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1282                                 pxa3xx_nand_data_dma_irq, info);
1283         if (info->data_dma_ch < 0) {
1284                 dev_err(&pdev->dev, "failed to request data dma\n");
1285                 dma_free_coherent(&pdev->dev, info->buf_size,
1286                                 info->data_buff, info->data_buff_phys);
1287                 return info->data_dma_ch;
1288         }
1289
1290         /*
1291          * Now that DMA buffers are allocated we turn on
1292          * DMA proper for I/O operations.
1293          */
1294         info->use_dma = 1;
1295         return 0;
1296 }
1297
1298 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1299 {
1300         struct platform_device *pdev = info->pdev;
1301         if (info->use_dma) {
1302                 pxa_free_dma(info->data_dma_ch);
1303                 dma_free_coherent(&pdev->dev, info->buf_size,
1304                                   info->data_buff, info->data_buff_phys);
1305         } else {
1306                 kfree(info->data_buff);
1307         }
1308 }
1309 #else
1310 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1311 {
1312         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1313         if (info->data_buff == NULL)
1314                 return -ENOMEM;
1315         return 0;
1316 }
1317
1318 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1319 {
1320         kfree(info->data_buff);
1321 }
1322 #endif
1323
1324 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1325 {
1326         struct mtd_info *mtd;
1327         struct nand_chip *chip;
1328         int ret;
1329
1330         mtd = info->host[info->cs]->mtd;
1331         chip = mtd->priv;
1332
1333         /* use the common timing to make a try */
1334         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1335         if (ret)
1336                 return ret;
1337
1338         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1339         ret = chip->waitfunc(mtd, chip);
1340         if (ret & NAND_STATUS_FAIL)
1341                 return -ENODEV;
1342
1343         return 0;
1344 }
1345
1346 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1347                         struct nand_ecc_ctrl *ecc,
1348                         int strength, int ecc_stepsize, int page_size)
1349 {
1350         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1351                 info->chunk_size = 2048;
1352                 info->spare_size = 40;
1353                 info->ecc_size = 24;
1354                 ecc->mode = NAND_ECC_HW;
1355                 ecc->size = 512;
1356                 ecc->strength = 1;
1357
1358         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1359                 info->chunk_size = 512;
1360                 info->spare_size = 8;
1361                 info->ecc_size = 8;
1362                 ecc->mode = NAND_ECC_HW;
1363                 ecc->size = 512;
1364                 ecc->strength = 1;
1365
1366         /*
1367          * Required ECC: 4-bit correction per 512 bytes
1368          * Select: 16-bit correction per 2048 bytes
1369          */
1370         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1371                 info->ecc_bch = 1;
1372                 info->chunk_size = 2048;
1373                 info->spare_size = 32;
1374                 info->ecc_size = 32;
1375                 ecc->mode = NAND_ECC_HW;
1376                 ecc->size = info->chunk_size;
1377                 ecc->layout = &ecc_layout_2KB_bch4bit;
1378                 ecc->strength = 16;
1379
1380         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1381                 info->ecc_bch = 1;
1382                 info->chunk_size = 2048;
1383                 info->spare_size = 32;
1384                 info->ecc_size = 32;
1385                 ecc->mode = NAND_ECC_HW;
1386                 ecc->size = info->chunk_size;
1387                 ecc->layout = &ecc_layout_4KB_bch4bit;
1388                 ecc->strength = 16;
1389
1390         /*
1391          * Required ECC: 8-bit correction per 512 bytes
1392          * Select: 16-bit correction per 1024 bytes
1393          */
1394         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1395                 info->ecc_bch = 1;
1396                 info->chunk_size = 1024;
1397                 info->spare_size = 0;
1398                 info->ecc_size = 32;
1399                 ecc->mode = NAND_ECC_HW;
1400                 ecc->size = info->chunk_size;
1401                 ecc->layout = &ecc_layout_4KB_bch8bit;
1402                 ecc->strength = 16;
1403         } else {
1404                 dev_err(&info->pdev->dev,
1405                         "ECC strength %d at page size %d is not supported\n",
1406                         strength, page_size);
1407                 return -ENODEV;
1408         }
1409
1410         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1411                  ecc->strength, ecc->size);
1412         return 0;
1413 }
1414
1415 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1416 {
1417         struct pxa3xx_nand_host *host = mtd->priv;
1418         struct pxa3xx_nand_info *info = host->info_data;
1419         struct platform_device *pdev = info->pdev;
1420         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1421         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1422         const struct pxa3xx_nand_flash *f = NULL;
1423         struct nand_chip *chip = mtd->priv;
1424         uint32_t id = -1;
1425         uint64_t chipsize;
1426         int i, ret, num;
1427         uint16_t ecc_strength, ecc_step;
1428
1429         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1430                 goto KEEP_CONFIG;
1431
1432         ret = pxa3xx_nand_sensing(info);
1433         if (ret) {
1434                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1435                          info->cs);
1436
1437                 return ret;
1438         }
1439
1440         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1441         id = *((uint16_t *)(info->data_buff));
1442         if (id != 0)
1443                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1444         else {
1445                 dev_warn(&info->pdev->dev,
1446                          "Read out ID 0, potential timing set wrong!!\n");
1447
1448                 return -EINVAL;
1449         }
1450
1451         num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1452         for (i = 0; i < num; i++) {
1453                 if (i < pdata->num_flash)
1454                         f = pdata->flash + i;
1455                 else
1456                         f = &builtin_flash_types[i - pdata->num_flash + 1];
1457
1458                 /* find the chip in default list */
1459                 if (f->chip_id == id)
1460                         break;
1461         }
1462
1463         if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1464                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1465
1466                 return -EINVAL;
1467         }
1468
1469         ret = pxa3xx_nand_config_flash(info, f);
1470         if (ret) {
1471                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1472                 return ret;
1473         }
1474
1475         pxa3xx_flash_ids[0].name = f->name;
1476         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1477         pxa3xx_flash_ids[0].pagesize = f->page_size;
1478         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1479         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1480         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1481         if (f->flash_width == 16)
1482                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1483         pxa3xx_flash_ids[1].name = NULL;
1484         def = pxa3xx_flash_ids;
1485 KEEP_CONFIG:
1486         if (info->reg_ndcr & NDCR_DWIDTH_M)
1487                 chip->options |= NAND_BUSWIDTH_16;
1488
1489         /* Device detection must be done with ECC disabled */
1490         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1491                 nand_writel(info, NDECCCTRL, 0x0);
1492
1493         if (nand_scan_ident(mtd, 1, def))
1494                 return -ENODEV;
1495
1496         if (pdata->flash_bbt) {
1497                 /*
1498                  * We'll use a bad block table stored in-flash and don't
1499                  * allow writing the bad block marker to the flash.
1500                  */
1501                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1502                                      NAND_BBT_NO_OOB_BBM;
1503                 chip->bbt_td = &bbt_main_descr;
1504                 chip->bbt_md = &bbt_mirror_descr;
1505         }
1506
1507         /*
1508          * If the page size is bigger than the FIFO size, let's check
1509          * we are given the right variant and then switch to the extended
1510          * (aka splitted) command handling,
1511          */
1512         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1513                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1514                         chip->cmdfunc = nand_cmdfunc_extended;
1515                 } else {
1516                         dev_err(&info->pdev->dev,
1517                                 "unsupported page size on this variant\n");
1518                         return -ENODEV;
1519                 }
1520         }
1521
1522         if (pdata->ecc_strength && pdata->ecc_step_size) {
1523                 ecc_strength = pdata->ecc_strength;
1524                 ecc_step = pdata->ecc_step_size;
1525         } else {
1526                 ecc_strength = chip->ecc_strength_ds;
1527                 ecc_step = chip->ecc_step_ds;
1528         }
1529
1530         /* Set default ECC strength requirements on non-ONFI devices */
1531         if (ecc_strength < 1 && ecc_step < 1) {
1532                 ecc_strength = 1;
1533                 ecc_step = 512;
1534         }
1535
1536         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1537                            ecc_step, mtd->writesize);
1538         if (ret)
1539                 return ret;
1540
1541         /* calculate addressing information */
1542         if (mtd->writesize >= 2048)
1543                 host->col_addr_cycles = 2;
1544         else
1545                 host->col_addr_cycles = 1;
1546
1547         /* release the initial buffer */
1548         kfree(info->data_buff);
1549
1550         /* allocate the real data + oob buffer */
1551         info->buf_size = mtd->writesize + mtd->oobsize;
1552         ret = pxa3xx_nand_init_buff(info);
1553         if (ret)
1554                 return ret;
1555         info->oob_buff = info->data_buff + mtd->writesize;
1556
1557         if ((mtd->size >> chip->page_shift) > 65536)
1558                 host->row_addr_cycles = 3;
1559         else
1560                 host->row_addr_cycles = 2;
1561         return nand_scan_tail(mtd);
1562 }
1563
1564 static int alloc_nand_resource(struct platform_device *pdev)
1565 {
1566         struct pxa3xx_nand_platform_data *pdata;
1567         struct pxa3xx_nand_info *info;
1568         struct pxa3xx_nand_host *host;
1569         struct nand_chip *chip = NULL;
1570         struct mtd_info *mtd;
1571         struct resource *r;
1572         int ret, irq, cs;
1573
1574         pdata = dev_get_platdata(&pdev->dev);
1575         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1576                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1577         if (!info)
1578                 return -ENOMEM;
1579
1580         info->pdev = pdev;
1581         info->variant = pxa3xx_nand_get_variant(pdev);
1582         for (cs = 0; cs < pdata->num_cs; cs++) {
1583                 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1584                       (sizeof(*mtd) + sizeof(*host)) * cs);
1585                 chip = (struct nand_chip *)(&mtd[1]);
1586                 host = (struct pxa3xx_nand_host *)chip;
1587                 info->host[cs] = host;
1588                 host->mtd = mtd;
1589                 host->cs = cs;
1590                 host->info_data = info;
1591                 mtd->priv = host;
1592                 mtd->owner = THIS_MODULE;
1593
1594                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1595                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1596                 chip->controller        = &info->controller;
1597                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1598                 chip->select_chip       = pxa3xx_nand_select_chip;
1599                 chip->read_word         = pxa3xx_nand_read_word;
1600                 chip->read_byte         = pxa3xx_nand_read_byte;
1601                 chip->read_buf          = pxa3xx_nand_read_buf;
1602                 chip->write_buf         = pxa3xx_nand_write_buf;
1603                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1604                 chip->cmdfunc           = nand_cmdfunc;
1605         }
1606
1607         spin_lock_init(&chip->controller->lock);
1608         init_waitqueue_head(&chip->controller->wq);
1609         info->clk = devm_clk_get(&pdev->dev, NULL);
1610         if (IS_ERR(info->clk)) {
1611                 dev_err(&pdev->dev, "failed to get nand clock\n");
1612                 return PTR_ERR(info->clk);
1613         }
1614         ret = clk_prepare_enable(info->clk);
1615         if (ret < 0)
1616                 return ret;
1617
1618         if (use_dma) {
1619                 /*
1620                  * This is a dirty hack to make this driver work from
1621                  * devicetree bindings. It can be removed once we have
1622                  * a prober DMA controller framework for DT.
1623                  */
1624                 if (pdev->dev.of_node &&
1625                     of_machine_is_compatible("marvell,pxa3xx")) {
1626                         info->drcmr_dat = 97;
1627                         info->drcmr_cmd = 99;
1628                 } else {
1629                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1630                         if (r == NULL) {
1631                                 dev_err(&pdev->dev,
1632                                         "no resource defined for data DMA\n");
1633                                 ret = -ENXIO;
1634                                 goto fail_disable_clk;
1635                         }
1636                         info->drcmr_dat = r->start;
1637
1638                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1639                         if (r == NULL) {
1640                                 dev_err(&pdev->dev,
1641                                         "no resource defined for cmd DMA\n");
1642                                 ret = -ENXIO;
1643                                 goto fail_disable_clk;
1644                         }
1645                         info->drcmr_cmd = r->start;
1646                 }
1647         }
1648
1649         irq = platform_get_irq(pdev, 0);
1650         if (irq < 0) {
1651                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1652                 ret = -ENXIO;
1653                 goto fail_disable_clk;
1654         }
1655
1656         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1657         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1658         if (IS_ERR(info->mmio_base)) {
1659                 ret = PTR_ERR(info->mmio_base);
1660                 goto fail_disable_clk;
1661         }
1662         info->mmio_phys = r->start;
1663
1664         /* Allocate a buffer to allow flash detection */
1665         info->buf_size = INIT_BUFFER_SIZE;
1666         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1667         if (info->data_buff == NULL) {
1668                 ret = -ENOMEM;
1669                 goto fail_disable_clk;
1670         }
1671
1672         /* initialize all interrupts to be disabled */
1673         disable_int(info, NDSR_MASK);
1674
1675         ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
1676         if (ret < 0) {
1677                 dev_err(&pdev->dev, "failed to request IRQ\n");
1678                 goto fail_free_buf;
1679         }
1680
1681         platform_set_drvdata(pdev, info);
1682
1683         return 0;
1684
1685 fail_free_buf:
1686         free_irq(irq, info);
1687         kfree(info->data_buff);
1688 fail_disable_clk:
1689         clk_disable_unprepare(info->clk);
1690         return ret;
1691 }
1692
1693 static int pxa3xx_nand_remove(struct platform_device *pdev)
1694 {
1695         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1696         struct pxa3xx_nand_platform_data *pdata;
1697         int irq, cs;
1698
1699         if (!info)
1700                 return 0;
1701
1702         pdata = dev_get_platdata(&pdev->dev);
1703
1704         irq = platform_get_irq(pdev, 0);
1705         if (irq >= 0)
1706                 free_irq(irq, info);
1707         pxa3xx_nand_free_buff(info);
1708
1709         clk_disable_unprepare(info->clk);
1710
1711         for (cs = 0; cs < pdata->num_cs; cs++)
1712                 nand_release(info->host[cs]->mtd);
1713         return 0;
1714 }
1715
1716 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1717 {
1718         struct pxa3xx_nand_platform_data *pdata;
1719         struct device_node *np = pdev->dev.of_node;
1720         const struct of_device_id *of_id =
1721                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1722
1723         if (!of_id)
1724                 return 0;
1725
1726         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1727         if (!pdata)
1728                 return -ENOMEM;
1729
1730         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1731                 pdata->enable_arbiter = 1;
1732         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1733                 pdata->keep_config = 1;
1734         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1735         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1736
1737         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1738         if (pdata->ecc_strength < 0)
1739                 pdata->ecc_strength = 0;
1740
1741         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1742         if (pdata->ecc_step_size < 0)
1743                 pdata->ecc_step_size = 0;
1744
1745         pdev->dev.platform_data = pdata;
1746
1747         return 0;
1748 }
1749
1750 static int pxa3xx_nand_probe(struct platform_device *pdev)
1751 {
1752         struct pxa3xx_nand_platform_data *pdata;
1753         struct mtd_part_parser_data ppdata = {};
1754         struct pxa3xx_nand_info *info;
1755         int ret, cs, probe_success;
1756
1757 #ifndef ARCH_HAS_DMA
1758         if (use_dma) {
1759                 use_dma = 0;
1760                 dev_warn(&pdev->dev,
1761                          "This platform can't do DMA on this device\n");
1762         }
1763 #endif
1764         ret = pxa3xx_nand_probe_dt(pdev);
1765         if (ret)
1766                 return ret;
1767
1768         pdata = dev_get_platdata(&pdev->dev);
1769         if (!pdata) {
1770                 dev_err(&pdev->dev, "no platform data defined\n");
1771                 return -ENODEV;
1772         }
1773
1774         ret = alloc_nand_resource(pdev);
1775         if (ret) {
1776                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1777                 return ret;
1778         }
1779
1780         info = platform_get_drvdata(pdev);
1781         probe_success = 0;
1782         for (cs = 0; cs < pdata->num_cs; cs++) {
1783                 struct mtd_info *mtd = info->host[cs]->mtd;
1784
1785                 /*
1786                  * The mtd name matches the one used in 'mtdparts' kernel
1787                  * parameter. This name cannot be changed or otherwise
1788                  * user's mtd partitions configuration would get broken.
1789                  */
1790                 mtd->name = "pxa3xx_nand-0";
1791                 info->cs = cs;
1792                 ret = pxa3xx_nand_scan(mtd);
1793                 if (ret) {
1794                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1795                                 cs);
1796                         continue;
1797                 }
1798
1799                 ppdata.of_node = pdev->dev.of_node;
1800                 ret = mtd_device_parse_register(mtd, NULL,
1801                                                 &ppdata, pdata->parts[cs],
1802                                                 pdata->nr_parts[cs]);
1803                 if (!ret)
1804                         probe_success = 1;
1805         }
1806
1807         if (!probe_success) {
1808                 pxa3xx_nand_remove(pdev);
1809                 return -ENODEV;
1810         }
1811
1812         return 0;
1813 }
1814
1815 #ifdef CONFIG_PM
1816 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1817 {
1818         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1819         struct pxa3xx_nand_platform_data *pdata;
1820         struct mtd_info *mtd;
1821         int cs;
1822
1823         pdata = dev_get_platdata(&pdev->dev);
1824         if (info->state) {
1825                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1826                 return -EAGAIN;
1827         }
1828
1829         for (cs = 0; cs < pdata->num_cs; cs++) {
1830                 mtd = info->host[cs]->mtd;
1831                 mtd_suspend(mtd);
1832         }
1833
1834         return 0;
1835 }
1836
1837 static int pxa3xx_nand_resume(struct platform_device *pdev)
1838 {
1839         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1840         struct pxa3xx_nand_platform_data *pdata;
1841         struct mtd_info *mtd;
1842         int cs;
1843
1844         pdata = dev_get_platdata(&pdev->dev);
1845         /* We don't want to handle interrupt without calling mtd routine */
1846         disable_int(info, NDCR_INT_MASK);
1847
1848         /*
1849          * Directly set the chip select to a invalid value,
1850          * then the driver would reset the timing according
1851          * to current chip select at the beginning of cmdfunc
1852          */
1853         info->cs = 0xff;
1854
1855         /*
1856          * As the spec says, the NDSR would be updated to 0x1800 when
1857          * doing the nand_clk disable/enable.
1858          * To prevent it damaging state machine of the driver, clear
1859          * all status before resume
1860          */
1861         nand_writel(info, NDSR, NDSR_MASK);
1862         for (cs = 0; cs < pdata->num_cs; cs++) {
1863                 mtd = info->host[cs]->mtd;
1864                 mtd_resume(mtd);
1865         }
1866
1867         return 0;
1868 }
1869 #else
1870 #define pxa3xx_nand_suspend     NULL
1871 #define pxa3xx_nand_resume      NULL
1872 #endif
1873
1874 static struct platform_driver pxa3xx_nand_driver = {
1875         .driver = {
1876                 .name   = "pxa3xx-nand",
1877                 .of_match_table = pxa3xx_nand_dt_ids,
1878         },
1879         .probe          = pxa3xx_nand_probe,
1880         .remove         = pxa3xx_nand_remove,
1881         .suspend        = pxa3xx_nand_suspend,
1882         .resume         = pxa3xx_nand_resume,
1883 };
1884
1885 module_platform_driver(pxa3xx_nand_driver);
1886
1887 MODULE_LICENSE("GPL");
1888 MODULE_DESCRIPTION("PXA3xx NAND controller driver");