lantiq: update 3.14 patches
[openwrt.git] / target / linux / lantiq / patches-3.14 / 0018-MTD-nand-lots-of-xrx200-fixes.patch
1 From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 9 Sep 2014 23:12:15 +0200
4 Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8  drivers/mtd/nand/xway_nand.c |   63 ++++++++++++++++++++++++++++++++++++++++++
9  1 file changed, 63 insertions(+)
10
11 diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
12 index e430f2d..fedf2c4 100644
13 --- a/drivers/mtd/nand/xway_nand.c
14 +++ b/drivers/mtd/nand/xway_nand.c
15 @@ -54,8 +54,27 @@
16  #define NAND_CON_CSMUX         (1 << 1)
17  #define NAND_CON_NANDM         1
18  
19 +#define DANUBE_PCI_REG32( addr )    (*(volatile u32 *)(addr))
20 +#define PCI_CR_PR_OFFSET           (KSEG1+0x1E105400)
21 +#define PCI_CR_PC_ARB              (PCI_CR_PR_OFFSET + 0x0080)
22 +
23  static u32 xway_latchcmd;
24  
25 +/*
26 + * req_mask provides a mechanism to prevent interference between
27 + * nand and pci (probably only relevant for the BT Home Hub 2B).
28 + * Setting it causes the corresponding pci req pins to be masked
29 + * during nand access, and also moves ebu locking from the read/write
30 + * functions to the chip select function to ensure that the whole
31 + * operation runs with interrupts disabled.
32 + * In addition it switches on some extra waiting in xway_cmd_ctrl().
33 + * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
34 + * which in turn seems to be necessary for the nor chip to be recognised
35 + * reliably, on a board (Home Hub 2B again) which has both nor and nand.
36 + */
37 +
38 +static __be32 req_mask = 0;
39 +
40  static void xway_reset_chip(struct nand_chip *chip)
41  {
42         unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
43 @@ -86,12 +105,24 @@ static void xway_select_chip(struct mtd_info *mtd, int chip)
44         case -1:
45                 ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
46                 ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
47 +
48 +               if (req_mask) {
49 +                       /* Unmask all external PCI request */
50 +                       DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
51 +               }
52                 spin_unlock_irqrestore(&ebu_lock, csflags);
53 +
54                 break;
55         case 0:
56                 spin_lock_irqsave(&ebu_lock, csflags);
57 +               if (req_mask) {
58 +                       /* Mask all external PCI request */
59 +                       DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
60 +               }
61 +
62                 ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
63                 ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
64 +
65                 break;
66         default:
67                 BUG();
68 @@ -103,6 +134,12 @@ static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
69         struct nand_chip *this = mtd->priv;
70         unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
71  
72 +       if (req_mask) {
73 +               if (cmd != NAND_CMD_STATUS)
74 +                       ltq_ebu_w32(EBU_NAND_WAIT, 0); /* Clear nand ready */
75 +       }
76 +
77 +
78         if (ctrl & NAND_CTRL_CHANGE) {
79                 if (ctrl & NAND_CLE)
80                         xway_latchcmd = NAND_WRITE_CMD;
81 @@ -115,6 +152,24 @@ static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
82                 while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
83                         ;
84         }
85 +
86 +       if (req_mask) {
87 +              /*
88 +               * program and erase have their own busy handlers
89 +               * status and sequential in needs no delay
90 +               */
91 +               switch (cmd) {
92 +                       case NAND_CMD_ERASE1:
93 +                       case NAND_CMD_SEQIN:
94 +                       case NAND_CMD_STATUS:
95 +                       case NAND_CMD_READID:
96 +                       return;
97 +               }
98 +
99 +               /* wait until command is processed */
100 +               while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
101 +                       ;
102 +       }
103  }
104  
105  static int xway_dev_ready(struct mtd_info *mtd)
106 @@ -157,6 +212,8 @@ static int xway_nand_probe(struct platform_device *pdev)
107  {
108         struct nand_chip *this = platform_get_drvdata(pdev);
109         unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
110 +       const __be32 *req_mask_ptr = of_get_property(pdev->dev.of_node,
111 +                                       "req-mask", NULL);
112         const __be32 *cs = of_get_property(pdev->dev.of_node,
113                                         "lantiq,cs", NULL);
114         u32 cs_flag = 0;
115 @@ -165,6 +222,12 @@ static int xway_nand_probe(struct platform_device *pdev)
116         if (cs && (*cs == 1))
117                 cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
118  
119 +       /*
120 +        * Load the PCI req lines to mask from the device tree. If the
121 +        * property is not present, setting req_mask to 0 disables masking.
122 +        */
123 +       req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
124 +
125         /* setup the EBU to run in NAND mode on our base addr */
126         ltq_ebu_w32(CPHYSADDR(nandaddr)
127                 | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
128 -- 
129 1.7.10.4
130