3f082543af0bc0a8d32ae1025d11790628badbf9
[openwrt.git] / target / linux / bcm53xx / patches-3.14 / 420-mtd-bcm5301x_nand.patch
1 --- a/drivers/mtd/nand/Kconfig
2 +++ b/drivers/mtd/nand/Kconfig
3 @@ -510,4 +510,10 @@ config MTD_NAND_XWAY
4           Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
5           to the External Bus Unit (EBU).
6  
7 +config MTD_NAND_BCM
8 +       tristate "Support for NAND on some Broadcom SoC"
9 +       help
10 +         This driver is currently used for the NAND flash controller on the
11 +         Broadcom BCM5301X (NorthStar) SoCs.
12 +
13  endif # MTD_NAND
14 --- a/drivers/mtd/nand/Makefile
15 +++ b/drivers/mtd/nand/Makefile
16 @@ -49,5 +49,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740)         += jz4740
17  obj-$(CONFIG_MTD_NAND_GPMI_NAND)       += gpmi-nand/
18  obj-$(CONFIG_MTD_NAND_XWAY)            += xway_nand.o
19  obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)   += bcm47xxnflash/
20 +obj-$(CONFIG_MTD_NAND_BCM)             += bcm_nand.o
21  
22  nand-objs := nand_base.o nand_bbt.o
23 --- /dev/null
24 +++ b/drivers/mtd/nand/bcm_nand.c
25 @@ -0,0 +1,1583 @@
26 +/*
27 + * Nortstar NAND controller driver
28 + *
29 + * (c) Broadcom, Inc. 2012 All Rights Reserved.
30 + * Copyright 2014 Hauke Mehrtens <hauke@hauke-m.de>
31 + *
32 + * Licensed under the GNU/GPL. See COPYING for details.
33 + *
34 + * This module interfaces the NAND controller and hardware ECC capabilities
35 + * tp the generic NAND chip support in the NAND library.
36 + *
37 + * Notes:
38 + *     This driver depends on generic NAND driver, but works at the
39 + *     page level for operations.
40 + *
41 + *     When a page is written, the ECC calculated also protects the OOB
42 + *     bytes not taken by ECC, and so the OOB must be combined with any
43 + *     OOB data that preceded the page-write operation in order for the
44 + *     ECC to be calculated correctly.
45 + *     Also, when the page is erased, but OOB data is not, HW ECC will
46 + *     indicate an error, because it checks OOB too, which calls for some
47 + *     help from the software in this driver.
48 + *
49 + * TBD:
50 + *     Block locking/unlocking support, OTP support
51 + */
52 +
53 +
54 +#include <linux/kernel.h>
55 +#include <linux/module.h>
56 +#include <linux/io.h>
57 +#include <linux/ioport.h>
58 +#include <linux/interrupt.h>
59 +#include <linux/delay.h>
60 +#include <linux/err.h>
61 +#include <linux/slab.h>
62 +#include <linux/bcma/bcma.h>
63 +#include <linux/of_irq.h>
64 +
65 +#include <linux/mtd/mtd.h>
66 +#include <linux/mtd/nand.h>
67 +#include <linux/mtd/partitions.h>
68 +
69 +#define NANDC_MAX_CHIPS                2       /* Only 2 CSn supported in NorthStar */
70 +
71 +/*
72 + * Driver private control structure
73 + */
74 +struct bcmnand_ctrl {
75 +       struct mtd_info         mtd;
76 +       struct nand_chip        nand;
77 +       struct bcma_device      *core;
78 +
79 +       struct completion       op_completion;
80 +
81 +       struct nand_ecclayout   ecclayout;
82 +       int                     cmd_ret;        /* saved error code */
83 +       unsigned char           oob_index;
84 +       unsigned char           id_byte_index;
85 +       unsigned char           chip_num;
86 +       unsigned char           last_cmd;
87 +       unsigned char           ecc_level;
88 +       unsigned char           sector_size_shift;
89 +       unsigned char           sec_per_page_shift;
90 +};
91 +
92 +
93 +/*
94 + * IRQ numbers - offset from first irq in nandc_irq resource
95 + */
96 +#define NANDC_IRQ_RD_MISS              0
97 +#define NANDC_IRQ_ERASE_COMPLETE       1
98 +#define NANDC_IRQ_COPYBACK_COMPLETE    2
99 +#define NANDC_IRQ_PROGRAM_COMPLETE     3
100 +#define NANDC_IRQ_CONTROLLER_RDY       4
101 +#define NANDC_IRQ_RDBSY_RDY            5
102 +#define NANDC_IRQ_ECC_UNCORRECTABLE    6
103 +#define NANDC_IRQ_ECC_CORRECTABLE      7
104 +#define NANDC_IRQ_NUM                  8
105 +
106 +struct bcmnand_reg_field {
107 +       unsigned int reg;
108 +       unsigned int pos;
109 +       unsigned int width;
110 +};
111 +
112 +/*
113 + * REGISTERS
114 + *
115 + * Individual bit-fields aof registers are specificed here
116 + * for clarity, and the rest of the code will access each field
117 + * as if it was its own register.
118 + *
119 + * Following registers are off <reg_base>:
120 + */
121 +#define REG_BIT_FIELD(r, p, w) ((struct bcmnand_reg_field){(r), (p), (w)})
122 +
123 +#define NANDC_8KB_PAGE_SUPPORT         REG_BIT_FIELD(0x0, 31, 1)
124 +#define NANDC_REV_MAJOR                        REG_BIT_FIELD(0x0, 8, 8)
125 +#define NANDC_REV_MINOR                        REG_BIT_FIELD(0x0, 0, 8)
126 +
127 +#define NANDC_CMD_START_OPCODE         REG_BIT_FIELD(0x4, 24, 5)
128 +
129 +#define NANDC_CMD_CS_SEL               REG_BIT_FIELD(0x8, 16, 3)
130 +#define NANDC_CMD_EXT_ADDR             REG_BIT_FIELD(0x8, 0, 16)
131 +
132 +#define NANDC_CMD_ADDRESS              REG_BIT_FIELD(0xc, 0, 32)
133 +#define NANDC_CMD_END_ADDRESS          REG_BIT_FIELD(0x10, 0, 32)
134 +
135 +#define NANDC_INT_STATUS               REG_BIT_FIELD(0x14, 0, 32)
136 +#define NANDC_INT_STAT_CTLR_RDY                REG_BIT_FIELD(0x14, 31, 1)
137 +#define NANDC_INT_STAT_FLASH_RDY       REG_BIT_FIELD(0x14, 30, 1)
138 +#define NANDC_INT_STAT_CACHE_VALID     REG_BIT_FIELD(0x14, 29, 1)
139 +#define NANDC_INT_STAT_SPARE_VALID     REG_BIT_FIELD(0x14, 28, 1)
140 +#define NANDC_INT_STAT_ERASED          REG_BIT_FIELD(0x14, 27, 1)
141 +#define NANDC_INT_STAT_PLANE_RDY       REG_BIT_FIELD(0x14, 26, 1)
142 +#define NANDC_INT_STAT_FLASH_STATUS    REG_BIT_FIELD(0x14, 0, 8)
143 +
144 +#define NANDC_CS_LOCK                  REG_BIT_FIELD(0x18, 31, 1)
145 +#define NANDC_CS_AUTO_CONFIG           REG_BIT_FIELD(0x18, 30, 1)
146 +#define NANDC_CS_NAND_WP               REG_BIT_FIELD(0x18, 29, 1)
147 +#define NANDC_CS_BLK0_WP               REG_BIT_FIELD(0x18, 28, 1)
148 +#define NANDC_CS_SW_USING_CS(n)                REG_BIT_FIELD(0x18, 8+(n), 1)
149 +#define NANDC_CS_MAP_SEL_CS(n)         REG_BIT_FIELD(0x18, 0+(n), 1)
150 +
151 +#define NANDC_XOR_ADDR_BLK0_ONLY       REG_BIT_FIELD(0x1c, 31, 1)
152 +#define NANDC_XOR_ADDR_CS(n)           REG_BIT_FIELD(0x1c, 0+(n), 1)
153 +
154 +#define NANDC_LL_OP_RET_IDLE           REG_BIT_FIELD(0x20, 31, 1)
155 +#define NANDC_LL_OP_CLE                        REG_BIT_FIELD(0x20, 19, 1)
156 +#define NANDC_LL_OP_ALE                        REG_BIT_FIELD(0x20, 18, 1)
157 +#define NANDC_LL_OP_WE                 REG_BIT_FIELD(0x20, 17, 1)
158 +#define NANDC_LL_OP_RE                 REG_BIT_FIELD(0x20, 16, 1)
159 +#define NANDC_LL_OP_DATA               REG_BIT_FIELD(0x20, 0, 16)
160 +
161 +#define NANDC_MPLANE_ADDR_EXT          REG_BIT_FIELD(0x24, 0, 16)
162 +#define NANDC_MPLANE_ADDR              REG_BIT_FIELD(0x28, 0, 32)
163 +
164 +#define NANDC_ACC_CTRL_CS(n)           REG_BIT_FIELD(0x50+((n)<<4), 0, 32)
165 +#define NANDC_ACC_CTRL_RD_ECC(n)       REG_BIT_FIELD(0x50+((n)<<4), 31, 1)
166 +#define NANDC_ACC_CTRL_WR_ECC(n)       REG_BIT_FIELD(0x50+((n)<<4), 30, 1)
167 +#define NANDC_ACC_CTRL_CE_CARE(n)      REG_BIT_FIELD(0x50+((n)<<4), 29, 1)
168 +#define NANDC_ACC_CTRL_PGM_RDIN(n)     REG_BIT_FIELD(0x50+((n)<<4), 28, 1)
169 +#define NANDC_ACC_CTRL_ERA_ECC_ERR(n)  REG_BIT_FIELD(0x50+((n)<<4), 27, 1)
170 +#define NANDC_ACC_CTRL_PGM_PARTIAL(n)  REG_BIT_FIELD(0x50+((n)<<4), 26, 1)
171 +#define NANDC_ACC_CTRL_WR_PREEMPT(n)   REG_BIT_FIELD(0x50+((n)<<4), 25, 1)
172 +#define NANDC_ACC_CTRL_PG_HIT(n)       REG_BIT_FIELD(0x50+((n)<<4), 24, 1)
173 +#define NANDC_ACC_CTRL_PREFETCH(n)     REG_BIT_FIELD(0x50+((n)<<4), 23, 1)
174 +#define NANDC_ACC_CTRL_CACHE_MODE(n)   REG_BIT_FIELD(0x50+((n)<<4), 22, 1)
175 +#define NANDC_ACC_CTRL_CACHE_LASTPG(n) REG_BIT_FIELD(0x50+((n)<<4), 21, 1)
176 +#define NANDC_ACC_CTRL_ECC_LEVEL(n)    REG_BIT_FIELD(0x50+((n)<<4), 16, 5)
177 +#define NANDC_ACC_CTRL_SECTOR_1K(n)    REG_BIT_FIELD(0x50+((n)<<4), 7, 1)
178 +#define NANDC_ACC_CTRL_SPARE_SIZE(n)   REG_BIT_FIELD(0x50+((n)<<4), 0, 7)
179 +
180 +#define NANDC_CONFIG_CS(n)             REG_BIT_FIELD(0x54+((n)<<4), 0, 32)
181 +#define NANDC_CONFIG_LOCK(n)           REG_BIT_FIELD(0x54+((n)<<4), 31, 1)
182 +#define NANDC_CONFIG_BLK_SIZE(n)       REG_BIT_FIELD(0x54+((n)<<4), 28, 3)
183 +#define NANDC_CONFIG_CHIP_SIZE(n)      REG_BIT_FIELD(0x54+((n)<<4), 24, 4)
184 +#define NANDC_CONFIG_CHIP_WIDTH(n)     REG_BIT_FIELD(0x54+((n)<<4), 23, 1)
185 +#define NANDC_CONFIG_PAGE_SIZE(n)      REG_BIT_FIELD(0x54+((n)<<4), 20, 2)
186 +#define NANDC_CONFIG_FUL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 16, 3)
187 +#define NANDC_CONFIG_COL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 12, 3)
188 +#define NANDC_CONFIG_BLK_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 8, 3)
189 +
190 +#define NANDC_TIMING_1_CS(n)           REG_BIT_FIELD(0x58+((n)<<4), 0, 32)
191 +#define NANDC_TIMING_2_CS(n)           REG_BIT_FIELD(0x5c+((n)<<4), 0, 32)
192 +       /* Individual bits for Timing registers - TBD */
193 +
194 +#define NANDC_CORR_STAT_THRESH_CS(n)   REG_BIT_FIELD(0xc0, 6*(n), 6)
195 +
196 +#define NANDC_BLK_WP_END_ADDR          REG_BIT_FIELD(0xc8, 0, 32)
197 +
198 +#define NANDC_MPLANE_ERASE_CYC2_OPCODE REG_BIT_FIELD(0xcc, 24, 8)
199 +#define NANDC_MPLANE_READ_STAT_OPCODE  REG_BIT_FIELD(0xcc, 16, 8)
200 +#define NANDC_MPLANE_PROG_ODD_OPCODE   REG_BIT_FIELD(0xcc, 8, 8)
201 +#define NANDC_MPLANE_PROG_TRL_OPCODE   REG_BIT_FIELD(0xcc, 0, 8)
202 +
203 +#define NANDC_MPLANE_PGCACHE_TRL_OPCODE        REG_BIT_FIELD(0xd0, 24, 8)
204 +#define NANDC_MPLANE_READ_STAT2_OPCODE REG_BIT_FIELD(0xd0, 16, 8)
205 +#define NANDC_MPLANE_READ_EVEN_OPCODE  REG_BIT_FIELD(0xd0, 8, 8)
206 +#define NANDC_MPLANE_READ_ODD__OPCODE  REG_BIT_FIELD(0xd0, 0, 8)
207 +
208 +#define NANDC_MPLANE_CTRL_ERASE_CYC2_EN        REG_BIT_FIELD(0xd4, 31, 1)
209 +#define NANDC_MPLANE_CTRL_RD_ADDR_SIZE REG_BIT_FIELD(0xd4, 30, 1)
210 +#define NANDC_MPLANE_CTRL_RD_CYC_ADDR  REG_BIT_FIELD(0xd4, 29, 1)
211 +#define NANDC_MPLANE_CTRL_RD_COL_ADDR  REG_BIT_FIELD(0xd4, 28, 1)
212 +
213 +#define NANDC_UNCORR_ERR_COUNT         REG_BIT_FIELD(0xfc, 0, 32)
214 +
215 +#define NANDC_CORR_ERR_COUNT           REG_BIT_FIELD(0x100, 0, 32)
216 +
217 +#define NANDC_READ_CORR_BIT_COUNT      REG_BIT_FIELD(0x104, 0, 32)
218 +
219 +#define NANDC_BLOCK_LOCK_STATUS                REG_BIT_FIELD(0x108, 0, 8)
220 +
221 +#define NANDC_ECC_CORR_ADDR_CS         REG_BIT_FIELD(0x10c, 16, 3)
222 +#define NANDC_ECC_CORR_ADDR_EXT                REG_BIT_FIELD(0x10c, 0, 16)
223 +
224 +#define NANDC_ECC_CORR_ADDR            REG_BIT_FIELD(0x110, 0, 32)
225 +
226 +#define NANDC_ECC_UNC_ADDR_CS          REG_BIT_FIELD(0x114, 16, 3)
227 +#define NANDC_ECC_UNC_ADDR_EXT         REG_BIT_FIELD(0x114, 0, 16)
228 +
229 +#define NANDC_ECC_UNC_ADDR             REG_BIT_FIELD(0x118, 0, 32)
230 +
231 +#define NANDC_READ_ADDR_CS             REG_BIT_FIELD(0x11c, 16, 3)
232 +#define NANDC_READ_ADDR_EXT            REG_BIT_FIELD(0x11c, 0, 16)
233 +#define NANDC_READ_ADDR                        REG_BIT_FIELD(0x120, 0, 32)
234 +
235 +#define NANDC_PROG_ADDR_CS             REG_BIT_FIELD(0x124, 16, 3)
236 +#define NANDC_PROG_ADDR_EXT            REG_BIT_FIELD(0x124, 0, 16)
237 +#define NANDC_PROG_ADDR                        REG_BIT_FIELD(0x128, 0, 32)
238 +
239 +#define NANDC_CPYBK_ADDR_CS            REG_BIT_FIELD(0x12c, 16, 3)
240 +#define NANDC_CPYBK_ADDR_EXT           REG_BIT_FIELD(0x12c, 0, 16)
241 +#define NANDC_CPYBK_ADDR               REG_BIT_FIELD(0x130, 0, 32)
242 +
243 +#define NANDC_ERASE_ADDR_CS            REG_BIT_FIELD(0x134, 16, 3)
244 +#define NANDC_ERASE_ADDR_EXT           REG_BIT_FIELD(0x134, 0, 16)
245 +#define NANDC_ERASE_ADDR               REG_BIT_FIELD(0x138, 0, 32)
246 +
247 +#define NANDC_INV_READ_ADDR_CS         REG_BIT_FIELD(0x13c, 16, 3)
248 +#define NANDC_INV_READ_ADDR_EXT                REG_BIT_FIELD(0x13c, 0, 16)
249 +#define NANDC_INV_READ_ADDR            REG_BIT_FIELD(0x140, 0, 32)
250 +
251 +#define NANDC_INIT_STAT                        REG_BIT_FIELD(0x144, 0, 32)
252 +#define NANDC_INIT_ONFI_DONE           REG_BIT_FIELD(0x144, 31, 1)
253 +#define NANDC_INIT_DEVID_DONE          REG_BIT_FIELD(0x144, 30, 1)
254 +#define NANDC_INIT_SUCCESS             REG_BIT_FIELD(0x144, 29, 1)
255 +#define NANDC_INIT_FAIL                        REG_BIT_FIELD(0x144, 28, 1)
256 +#define NANDC_INIT_BLANK               REG_BIT_FIELD(0x144, 27, 1)
257 +#define NANDC_INIT_TIMEOUT             REG_BIT_FIELD(0x144, 26, 1)
258 +#define NANDC_INIT_UNC_ERROR           REG_BIT_FIELD(0x144, 25, 1)
259 +#define NANDC_INIT_CORR_ERROR          REG_BIT_FIELD(0x144, 24, 1)
260 +#define NANDC_INIT_PARAM_RDY           REG_BIT_FIELD(0x144, 23, 1)
261 +#define NANDC_INIT_AUTH_FAIL           REG_BIT_FIELD(0x144, 22, 1)
262 +
263 +#define NANDC_ONFI_STAT                        REG_BIT_FIELD(0x148, 0, 32)
264 +#define NANDC_ONFI_DEBUG               REG_BIT_FIELD(0x148, 28, 4)
265 +#define NANDC_ONFI_PRESENT             REG_BIT_FIELD(0x148, 27, 1)
266 +#define NANDC_ONFI_BADID_PG2           REG_BIT_FIELD(0x148, 5, 1)
267 +#define NANDC_ONFI_BADID_PG1           REG_BIT_FIELD(0x148, 4, 1)
268 +#define NANDC_ONFI_BADID_PG0           REG_BIT_FIELD(0x148, 3, 1)
269 +#define NANDC_ONFI_BADCRC_PG2          REG_BIT_FIELD(0x148, 2, 1)
270 +#define NANDC_ONFI_BADCRC_PG1          REG_BIT_FIELD(0x148, 1, 1)
271 +#define NANDC_ONFI_BADCRC_PG0          REG_BIT_FIELD(0x148, 0, 1)
272 +
273 +#define NANDC_ONFI_DEBUG_DATA          REG_BIT_FIELD(0x14c, 0, 32)
274 +
275 +#define NANDC_SEMAPHORE                        REG_BIT_FIELD(0x150, 0, 8)
276 +
277 +#define NANDC_DEVID_BYTE(b)            REG_BIT_FIELD(0x194+((b)&0x4), \
278 +                                               24-(((b)&3)<<3), 8)
279 +
280 +#define NANDC_LL_RDDATA                        REG_BIT_FIELD(0x19c, 0, 16)
281 +
282 +#define NANDC_INT_N_REG(n)             REG_BIT_FIELD(0xf00|((n)<<2), 0, 1)
283 +#define NANDC_INT_DIREC_READ_MISS      REG_BIT_FIELD(0xf00, 0, 1)
284 +#define NANDC_INT_ERASE_DONE           REG_BIT_FIELD(0xf04, 0, 1)
285 +#define NANDC_INT_CPYBK_DONE           REG_BIT_FIELD(0xf08, 0, 1)
286 +#define NANDC_INT_PROGRAM_DONE         REG_BIT_FIELD(0xf0c, 0, 1)
287 +#define NANDC_INT_CONTROLLER_RDY       REG_BIT_FIELD(0xf10, 0, 1)
288 +#define NANDC_INT_RDBSY_RDY            REG_BIT_FIELD(0xf14, 0, 1)
289 +#define NANDC_INT_ECC_UNCORRECTABLE    REG_BIT_FIELD(0xf18, 0, 1)
290 +#define NANDC_INT_ECC_CORRECTABLE      REG_BIT_FIELD(0xf1c, 0, 1)
291 +
292 +/*
293 + * Following  registers are treated as contigous IO memory, offset is from
294 + * <reg_base>, and the data is in big-endian byte order
295 + */
296 +#define NANDC_SPARE_AREA_READ_OFF      0x200
297 +#define NANDC_SPARE_AREA_WRITE_OFF     0x280
298 +#define NANDC_CACHE_OFF                        0x400
299 +#define NANDC_CACHE_SIZE               (128*4)
300 +
301 +struct bcmnand_areg_field {
302 +       unsigned int reg;
303 +       unsigned int pos;
304 +       unsigned int width;
305 +};
306 +
307 +/*
308 + * Following are IDM (a.k.a. Slave Wrapper) registers are off <idm_base>:
309 + */
310 +#define IDMREG_BIT_FIELD(r, p, w)      ((struct bcmnand_areg_field){(r), (p), (w)})
311 +
312 +#define NANDC_IDM_AXI_BIG_ENDIAN       IDMREG_BIT_FIELD(0x408, 28, 1)
313 +#define NANDC_IDM_APB_LITTLE_ENDIAN    IDMREG_BIT_FIELD(0x408, 24, 1)
314 +#define NANDC_IDM_TM                   IDMREG_BIT_FIELD(0x408, 16, 5)
315 +#define NANDC_IDM_IRQ_CORRECABLE_EN    IDMREG_BIT_FIELD(0x408, 9, 1)
316 +#define NANDC_IDM_IRQ_UNCORRECABLE_EN  IDMREG_BIT_FIELD(0x408, 8, 1)
317 +#define NANDC_IDM_IRQ_RDYBSY_RDY_EN    IDMREG_BIT_FIELD(0x408, 7, 1)
318 +#define NANDC_IDM_IRQ_CONTROLLER_RDY_EN        IDMREG_BIT_FIELD(0x408, 6, 1)
319 +#define NANDC_IDM_IRQ_PRPOGRAM_COMP_EN IDMREG_BIT_FIELD(0x408, 5, 1)
320 +#define NANDC_IDM_IRQ_COPYBK_COMP_EN   IDMREG_BIT_FIELD(0x408, 4, 1)
321 +#define NANDC_IDM_IRQ_ERASE_COMP_EN    IDMREG_BIT_FIELD(0x408, 3, 1)
322 +#define NANDC_IDM_IRQ_READ_MISS_EN     IDMREG_BIT_FIELD(0x408, 2, 1)
323 +#define NANDC_IDM_IRQ_N_EN(n)          IDMREG_BIT_FIELD(0x408, 2+(n), 1)
324 +
325 +#define NANDC_IDM_CLOCK_EN             IDMREG_BIT_FIELD(0x408, 0, 1)
326 +
327 +#define NANDC_IDM_IO_ECC_CORR          IDMREG_BIT_FIELD(0x500, 3, 1)
328 +#define NANDC_IDM_IO_ECC_UNCORR                IDMREG_BIT_FIELD(0x500, 2, 1)
329 +#define NANDC_IDM_IO_RDYBSY            IDMREG_BIT_FIELD(0x500, 1, 1)
330 +#define NANDC_IDM_IO_CTRL_RDY          IDMREG_BIT_FIELD(0x500, 0, 1)
331 +
332 +#define NANDC_IDM_RESET                        IDMREG_BIT_FIELD(0x800, 0, 1)
333 +       /* Remaining IDM registers do not seem to be useful, skipped */
334 +
335 +/*
336 + * NAND Controller has its own command opcodes
337 + * different from opcodes sent to the actual flash chip
338 + */
339 +#define NANDC_CMD_OPCODE_NULL          0
340 +#define NANDC_CMD_OPCODE_PAGE_READ     1
341 +#define NANDC_CMD_OPCODE_SPARE_READ    2
342 +#define NANDC_CMD_OPCODE_STATUS_READ   3
343 +#define NANDC_CMD_OPCODE_PAGE_PROG     4
344 +#define NANDC_CMD_OPCODE_SPARE_PROG    5
345 +#define NANDC_CMD_OPCODE_DEVID_READ    7
346 +#define NANDC_CMD_OPCODE_BLOCK_ERASE   8
347 +#define NANDC_CMD_OPCODE_FLASH_RESET   9
348 +
349 +/*
350 + * NAND Controller hardware ECC data size
351 + *
352 + * The following table contains the number of bytes needed for
353 + * each of the ECC levels, per "sector", which is either 512 or 1024 bytes.
354 + * The actual layout is as follows:
355 + * The entire spare area is equally divided into as many sections as there
356 + * are sectors per page, and the ECC data is located at the end of each
357 + * of these sections.
358 + * For example, given a 2K per page and 64 bytes spare device, configured for
359 + * sector size 1k and ECC level of 4, the spare area will be divided into 2
360 + * sections 32 bytes each, and the last 14 bytes of 32 in each section will
361 + * be filled with ECC data.
362 + * Note: the name of the algorythm and the number of error bits it can correct
363 + * is of no consequence to this driver, therefore omitted.
364 + */
365 +struct bcmnand_ecc_size_s {
366 +       unsigned char sector_size_shift;
367 +       unsigned char ecc_level;
368 +       unsigned char ecc_bytes_per_sec;
369 +       unsigned char reserved;
370 +};
371 +
372 +static const struct bcmnand_ecc_size_s bcmnand_ecc_sizes[] = {
373 +       { 9,    0,      0 },
374 +       { 10,   0,      0 },
375 +       { 9,    1,      2 },
376 +       { 10,   1,      4 },
377 +       { 9,    2,      4 },
378 +       { 10,   2,      7 },
379 +       { 9,    3,      6 },
380 +       { 10,   3,      11 },
381 +       { 9,    4,      7 },
382 +       { 10,   4,      14 },
383 +       { 9,    5,      9 },
384 +       { 10,   5,      18 },
385 +       { 9,    6,      11 },
386 +       { 10,   6,      21 },
387 +       { 9,    7,      13 },
388 +       { 10,   7,      25 },
389 +       { 9,    8,      14 },
390 +       { 10,   8,      28 },
391 +
392 +       { 9,    9,      16 },
393 +       { 9,    10,     18 },
394 +       { 9,    11,     20 },
395 +       { 9,    12,     21 },
396 +
397 +       { 10,   9,      32 },
398 +       { 10,   10,     35 },
399 +       { 10,   11,     39 },
400 +       { 10,   12,     42 },
401 +};
402 +
403 +/*
404 + * Populate the various fields that depend on how
405 + * the hardware ECC data is located in the spare area
406 + *
407 + * For this controiller, it is easier to fill-in these
408 + * structures at run time.
409 + *
410 + * The bad-block marker is assumed to occupy one byte
411 + * at chip->badblockpos, which must be in the first
412 + * sector of the spare area, namely it is either
413 + * at offset 0 or 5.
414 + * Some chips use both for manufacturer's bad block
415 + * markers, but we ingore that issue here, and assume only
416 + * one byte is used as bad-block marker always.
417 + */
418 +static int bcmnand_hw_ecc_layout(struct bcmnand_ctrl *ctrl)
419 +{
420 +       struct nand_ecclayout *layout;
421 +       struct device *dev = &ctrl->core->dev;
422 +       unsigned int i, j, k;
423 +       unsigned int ecc_per_sec, oob_per_sec;
424 +       unsigned int bbm_pos = ctrl->nand.badblockpos;
425 +
426 +       /* Caclculate spare area per sector size */
427 +       oob_per_sec = ctrl->mtd.oobsize >> ctrl->sec_per_page_shift;
428 +
429 +       /* Try to calculate the amount of ECC bytes per sector with a formula */
430 +       if (ctrl->sector_size_shift == 9)
431 +               ecc_per_sec = ((ctrl->ecc_level * 14) + 7) >> 3;
432 +       else if (ctrl->sector_size_shift == 10)
433 +               ecc_per_sec = ((ctrl->ecc_level * 14) + 3) >> 2;
434 +       else
435 +               ecc_per_sec = oob_per_sec + 1;  /* cause an error if not in table */
436 +
437 +       /* Now find out the answer according to the table */
438 +       for (i = 0; i < ARRAY_SIZE(bcmnand_ecc_sizes); i++) {
439 +               if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
440 +                   bcmnand_ecc_sizes[i].sector_size_shift ==
441 +                               ctrl->sector_size_shift) {
442 +                       break;
443 +               }
444 +       }
445 +
446 +       /* Table match overrides formula */
447 +       if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
448 +           bcmnand_ecc_sizes[i].sector_size_shift == ctrl->sector_size_shift)
449 +               ecc_per_sec = bcmnand_ecc_sizes[i].ecc_bytes_per_sec;
450 +
451 +       /* Return an error if calculated ECC leaves no room for OOB */
452 +       if ((ctrl->sec_per_page_shift != 0 && ecc_per_sec >= oob_per_sec) ||
453 +           (ctrl->sec_per_page_shift == 0 && ecc_per_sec >= (oob_per_sec - 1))) {
454 +               dev_err(dev, "ECC level %d too high, leaves no room for OOB data\n",
455 +                       ctrl->ecc_level);
456 +               return -EINVAL;
457 +       }
458 +
459 +       /* Fill in the needed fields */
460 +       ctrl->nand.ecc.size = ctrl->mtd.writesize >> ctrl->sec_per_page_shift;
461 +       ctrl->nand.ecc.bytes = ecc_per_sec;
462 +       ctrl->nand.ecc.steps = 1 << ctrl->sec_per_page_shift;
463 +       ctrl->nand.ecc.total = ecc_per_sec << ctrl->sec_per_page_shift;
464 +       ctrl->nand.ecc.strength = ctrl->ecc_level;
465 +
466 +       /* Build an ecc layout data structure */
467 +       layout = &ctrl->ecclayout;
468 +       memset(layout, 0, sizeof(*layout));
469 +
470 +       /* Total number of bytes used by HW ECC */
471 +       layout->eccbytes = ecc_per_sec << ctrl->sec_per_page_shift;
472 +
473 +       /* Location for each of the HW ECC bytes */
474 +       for (i = j = 0, k = 1;
475 +            i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes;
476 +            i++, j++) {
477 +               /* switch sector # */
478 +               if (j == ecc_per_sec) {
479 +                       j = 0;
480 +                       k++;
481 +               }
482 +               /* save position of each HW-generated ECC byte */
483 +               layout->eccpos[i] = (oob_per_sec * k) - ecc_per_sec + j;
484 +
485 +               /* Check that HW ECC does not overlap bad-block marker */
486 +               if (bbm_pos == layout->eccpos[i]) {
487 +                       dev_err(dev, "ECC level %d too high, HW ECC collides with bad-block marker position\n",
488 +                               ctrl->ecc_level);
489 +                       return -EINVAL;
490 +               }
491 +       }
492 +
493 +       /* Location of all user-available OOB byte-ranges */
494 +       for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++) {
495 +               struct nand_oobfree *oobfree = &layout->oobfree[i];
496 +
497 +               if (i >= (1 << ctrl->sec_per_page_shift))
498 +                       break;
499 +               oobfree->offset = oob_per_sec * i;
500 +               oobfree->length = oob_per_sec - ecc_per_sec;
501 +
502 +               /* Bad-block marker must be in the first sector spare area */
503 +               if (WARN_ON(bbm_pos >= (oobfree->offset + oobfree->length)))
504 +                       return -EINVAL;
505 +
506 +               if (i != 0)
507 +                       continue;
508 +
509 +               /* Remove bad-block marker from available byte range */
510 +               if (bbm_pos == oobfree->offset) {
511 +                       oobfree->offset += 1;
512 +                       oobfree->length -= 1;
513 +               } else if (bbm_pos == (oobfree->offset + oobfree->length - 1)) {
514 +                       oobfree->length -= 1;
515 +               } else {
516 +                       layout->oobfree[i + 1].offset = bbm_pos + 1;
517 +                       layout->oobfree[i + 1].length =
518 +                               oobfree->length - bbm_pos - 1;
519 +                       oobfree->length = bbm_pos;
520 +                       i++;
521 +               }
522 +       }
523 +
524 +       layout->oobavail = ((oob_per_sec - ecc_per_sec)
525 +               << ctrl->sec_per_page_shift) - 1;
526 +
527 +       ctrl->mtd.oobavail = layout->oobavail;
528 +       ctrl->nand.ecc.layout = layout;
529 +
530 +       /* Output layout for debugging */
531 +       dev_dbg(dev, "Spare area=%d eccbytes %d, ecc bytes located at:\n",
532 +               ctrl->mtd.oobsize, layout->eccbytes);
533 +       for (i = j = 0;
534 +            i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes; i++)
535 +               pr_debug(" %d", layout->eccpos[i]);
536 +       pr_debug("\n");
537 +
538 +       dev_dbg(dev, "Available %d bytes at (off,len):\n", layout->oobavail);
539 +       for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++)
540 +               pr_debug("(%d,%d) ", layout->oobfree[i].offset,
541 +                        layout->oobfree[i].length);
542 +       pr_debug("\n");
543 +
544 +       return 0;
545 +}
546 +
547 +/*
548 + * Register bit-field manipulation routines
549 + */
550 +
551 +static inline unsigned int bcmnand_reg_read(struct bcmnand_ctrl *ctrl,
552 +                                           struct bcmnand_reg_field rbf)
553 +{
554 +       u32 val;
555 +
556 +       val = bcma_read32(ctrl->core, rbf.reg);
557 +       val >>= rbf.pos;
558 +       val &= (1 << rbf.width) - 1;
559 +
560 +       return val;
561 +}
562 +
563 +static inline void bcmnand_reg_write(struct bcmnand_ctrl *ctrl,
564 +                                    struct bcmnand_reg_field rbf,
565 +                                    unsigned newval)
566 +{
567 +       u32 val, msk;
568 +
569 +       msk = (1 << rbf.width) - 1;
570 +       msk <<= rbf.pos;
571 +       newval <<= rbf.pos;
572 +       newval &= msk;
573 +
574 +       val = bcma_read32(ctrl->core, rbf.reg);
575 +       val &= ~msk;
576 +       val |= newval;
577 +       bcma_write32(ctrl->core, rbf.reg, val);
578 +}
579 +
580 +static inline unsigned int bcmnand_reg_aread(struct bcmnand_ctrl *ctrl,
581 +                                            struct bcmnand_areg_field rbf)
582 +{
583 +       u32 val;
584 +
585 +       val = bcma_aread32(ctrl->core, rbf.reg);
586 +       val >>= rbf.pos;
587 +       val &= (1 << rbf.width) - 1;
588 +
589 +       return val;
590 +}
591 +
592 +static inline void bcmnand_reg_awrite(struct bcmnand_ctrl *ctrl,
593 +                                     struct bcmnand_areg_field rbf,
594 +                                     unsigned int newval)
595 +{
596 +       u32 val, msk;
597 +
598 +       msk = (1 << rbf.width) - 1;
599 +       msk <<= rbf.pos;
600 +       newval <<= rbf.pos;
601 +       newval &= msk;
602 +
603 +       val = bcma_aread32(ctrl->core, rbf.reg);
604 +       val &= ~msk;
605 +       val |= newval;
606 +       bcma_awrite32(ctrl->core, rbf.reg, val);
607 +}
608 +
609 +/*
610 + * NAND Interface - dev_ready
611 + *
612 + * Return 1 iff device is ready, 0 otherwise
613 + */
614 +static int bcmnand_dev_ready(struct mtd_info *mtd)
615 +{
616 +       struct nand_chip *chip = mtd->priv;
617 +       struct bcmnand_ctrl *ctrl = chip->priv;
618 +
619 +       return bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY);
620 +}
621 +
622 +/*
623 + * Interrupt service routines
624 + */
625 +static irqreturn_t bcmnand_isr(int irq, void *dev_id)
626 +{
627 +       struct bcmnand_ctrl *ctrl = dev_id;
628 +       int irq_off;
629 +
630 +       irq_off = irq - ctrl->core->irq;
631 +       WARN_ON(irq_off < 0 || irq_off >= NANDC_IRQ_NUM);
632 +
633 +       if (!bcmnand_reg_read(ctrl, NANDC_INT_N_REG(irq_off)))
634 +               return IRQ_NONE;
635 +
636 +       /* Acknowledge interrupt */
637 +       bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
638 +
639 +       /* Wake up task */
640 +       complete(&ctrl->op_completion);
641 +
642 +       return IRQ_HANDLED;
643 +}
644 +
645 +static int bcmnand_wait_interrupt(struct bcmnand_ctrl *ctrl,
646 +                                 unsigned int irq_off,
647 +                                 unsigned int timeout_usec)
648 +{
649 +       long timeout_jiffies;
650 +       int ret = 0;
651 +
652 +       reinit_completion(&ctrl->op_completion);
653 +
654 +       /* Acknowledge interrupt */
655 +       bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
656 +
657 +       /* Enable IRQ to wait on */
658 +       bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 1);
659 +
660 +       timeout_jiffies = 1 + usecs_to_jiffies(timeout_usec);
661 +
662 +       if (irq_off != NANDC_IRQ_CONTROLLER_RDY ||
663 +               0 == bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY)) {
664 +
665 +               timeout_jiffies = wait_for_completion_interruptible_timeout(
666 +                                       &ctrl->op_completion, timeout_jiffies);
667 +
668 +               if (timeout_jiffies < 0)
669 +                       ret =  timeout_jiffies;
670 +               if (timeout_jiffies == 0)
671 +                       ret = -ETIME;
672 +       }
673 +
674 +       /* Disable IRQ, we're done waiting */
675 +       bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
676 +
677 +       if (bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY))
678 +               ret = 0;
679 +
680 +       return ret;
681 +}
682 +
683 +/*
684 + * wait for command completion
685 + */
686 +static int bcmnand_wait_cmd(struct bcmnand_ctrl *ctrl, unsigned int timeout_usec)
687 +{
688 +       unsigned int retries;
689 +
690 +       if (bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY))
691 +               return 0;
692 +
693 +       /* If the timeout is long, wait for interrupt */
694 +       if (timeout_usec >= jiffies_to_usecs(1) >> 4)
695 +               return bcmnand_wait_interrupt(
696 +                       ctrl, NANDC_IRQ_CONTROLLER_RDY, timeout_usec);
697 +
698 +       /* Wait for completion of the prior command */
699 +       retries = (timeout_usec >> 3) + 1;
700 +
701 +       while (retries-- &&
702 +               0 == bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY)) {
703 +               cpu_relax();
704 +               udelay(6);
705 +       }
706 +
707 +       if (retries == 0)
708 +               return -ETIME;
709 +
710 +       return 0;
711 +}
712 +
713 +
714 +/*
715 + * NAND Interface - waitfunc
716 + */
717 +static int bcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
718 +{
719 +       struct bcmnand_ctrl *ctrl = chip->priv;
720 +       unsigned int to;
721 +       int ret;
722 +
723 +       /* figure out timeout based on what command is on */
724 +       switch (ctrl->last_cmd) {
725 +       default:
726 +       case NAND_CMD_ERASE1:
727 +       case NAND_CMD_ERASE2:
728 +               to = 1 << 16;
729 +               break;
730 +       case NAND_CMD_STATUS:
731 +       case NAND_CMD_RESET:
732 +               to = 256;
733 +               break;
734 +       case NAND_CMD_READID:
735 +               to = 1024;
736 +               break;
737 +       case NAND_CMD_READ1:
738 +       case NAND_CMD_READ0:
739 +               to = 2048;
740 +               break;
741 +       case NAND_CMD_PAGEPROG:
742 +               to = 4096;
743 +               break;
744 +       case NAND_CMD_READOOB:
745 +               to = 512;
746 +               break;
747 +       }
748 +
749 +       /* deliver deferred error code if any */
750 +       ret = ctrl->cmd_ret;
751 +       if (ret < 0)
752 +               ctrl->cmd_ret = 0;
753 +       else
754 +               ret = bcmnand_wait_cmd(ctrl, to);
755 +
756 +       /* Timeout */
757 +       if (ret < 0)
758 +               return NAND_STATUS_FAIL;
759 +
760 +       ret = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
761 +
762 +       return ret;
763 +}
764 +
765 +/*
766 + * NAND Interface - read_oob
767 + */
768 +static int bcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
769 +                           int page)
770 +{
771 +       struct bcmnand_ctrl *ctrl = chip->priv;
772 +       unsigned int n = ctrl->chip_num;
773 +       void __iomem *ctrl_spare;
774 +       unsigned int spare_per_sec, sector;
775 +       u64 nand_addr;
776 +
777 +       ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
778 +
779 +       /* Set the page address for the following commands */
780 +       nand_addr = ((u64)page << chip->page_shift);
781 +       bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
782 +
783 +       spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
784 +
785 +       /* Disable ECC validation for spare area reads */
786 +       bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), 0);
787 +
788 +       /* Loop all sectors in page */
789 +       for (sector = 0; sector < (1<<ctrl->sec_per_page_shift); sector++) {
790 +               unsigned int col;
791 +
792 +               col = (sector << ctrl->sector_size_shift);
793 +
794 +               /* Issue command to read partial page */
795 +               bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
796 +
797 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
798 +                                 NANDC_CMD_OPCODE_SPARE_READ);
799 +
800 +               /* Wait for the command to complete */
801 +               if (bcmnand_wait_cmd(ctrl, (sector == 0) ? 10000 : 100))
802 +                       return -EIO;
803 +
804 +               if (!bcmnand_reg_read(ctrl, NANDC_INT_STAT_SPARE_VALID))
805 +                       return -EIO;
806 +
807 +               /* Set controller to Little Endian mode for copying */
808 +               bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
809 +
810 +               memcpy(chip->oob_poi + sector * spare_per_sec,
811 +                      ctrl_spare, spare_per_sec);
812 +
813 +               /* Return to Big Endian mode for commands etc */
814 +               bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
815 +       }
816 +
817 +       return 0;
818 +}
819 +
820 +/*
821 + * NAND Interface - write_oob
822 + */
823 +static int bcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
824 +                            int page)
825 +{
826 +       struct bcmnand_ctrl *ctrl = chip->priv;
827 +       unsigned int n = ctrl->chip_num;
828 +       void __iomem *ctrl_spare;
829 +       unsigned int spare_per_sec, sector, num_sec;
830 +       u64 nand_addr;
831 +       int to, status = 0;
832 +
833 +       ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
834 +
835 +       /* Disable ECC generation for spare area writes */
836 +       bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), 0);
837 +
838 +       spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
839 +
840 +       /* Set the page address for the following commands */
841 +       nand_addr = ((u64)page << chip->page_shift);
842 +       bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
843 +
844 +       /* Must allow partial programming to change spare area only */
845 +       bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 1);
846 +
847 +       num_sec = 1 << ctrl->sec_per_page_shift;
848 +       /* Loop all sectors in page */
849 +       for (sector = 0; sector < num_sec; sector++) {
850 +               unsigned int col;
851 +
852 +               /* Spare area accessed by the data sector offset */
853 +               col = (sector << ctrl->sector_size_shift);
854 +
855 +               bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
856 +
857 +               /* Set controller to Little Endian mode for copying */
858 +               bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
859 +
860 +               memcpy(ctrl_spare, chip->oob_poi + sector * spare_per_sec,
861 +                      spare_per_sec);
862 +
863 +               /* Return to Big Endian mode for commands etc */
864 +               bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
865 +
866 +               /* Push spare bytes into internal buffer, last goes to flash */
867 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
868 +                                 NANDC_CMD_OPCODE_SPARE_PROG);
869 +
870 +               if (sector == (num_sec - 1))
871 +                       to = 1 << 16;
872 +               else
873 +                       to = 1 << 10;
874 +
875 +               if (bcmnand_wait_cmd(ctrl, to))
876 +                       return -EIO;
877 +       }
878 +
879 +       /* Restore partial programming inhibition */
880 +       bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 0);
881 +
882 +       status = bcmnand_waitfunc(mtd, chip);
883 +       return status & NAND_STATUS_FAIL ? -EIO : 0;
884 +}
885 +
886 +/*
887 + * verify that a buffer is all erased
888 + */
889 +static bool bcmnand_buf_erased(const void *buf, unsigned int len)
890 +{
891 +       unsigned int i;
892 +       const u32 *p = buf;
893 +
894 +       for (i = 0; i < (len >> 2); i++) {
895 +               if (p[i] != 0xffffffff)
896 +                       return false;
897 +       }
898 +       return true;
899 +}
900 +
901 +/*
902 + * read a page, with or without ECC checking
903 + */
904 +static int bcmnand_read_page_do(struct mtd_info *mtd, struct nand_chip *chip,
905 +                               uint8_t *buf, int page, bool ecc)
906 +{
907 +       struct bcmnand_ctrl *ctrl = chip->priv;
908 +       unsigned int n = ctrl->chip_num;
909 +       void __iomem *ctrl_cache;
910 +       void __iomem *ctrl_spare;
911 +       unsigned int data_bytes;
912 +       unsigned int spare_per_sec;
913 +       unsigned int sector, to = 1 << 16;
914 +       u32 err_soft_reg, err_hard_reg;
915 +       unsigned int hard_err_count = 0;
916 +       int ret;
917 +       u64 nand_addr;
918 +
919 +       ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
920 +       ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
921 +
922 +       /* Reset  ECC error stats */
923 +       err_hard_reg = bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT);
924 +       err_soft_reg = bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
925 +
926 +       spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
927 +
928 +       /* Set the page address for the following commands */
929 +       nand_addr = ((u64)page << chip->page_shift);
930 +       bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
931 +
932 +       /* Enable ECC validation for ecc page reads */
933 +       bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), ecc);
934 +
935 +       /* Loop all sectors in page */
936 +       for (sector = 0; sector < (1 << ctrl->sec_per_page_shift); sector++) {
937 +               data_bytes  = 0;
938 +
939 +               /* Copy partial sectors sized by cache reg */
940 +               while (data_bytes < (1<<ctrl->sector_size_shift)) {
941 +                       unsigned int col;
942 +
943 +                       col = data_bytes + (sector << ctrl->sector_size_shift);
944 +
945 +                       bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
946 +                                         nand_addr + col);
947 +
948 +                       /* Issue command to read partial page */
949 +                       bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
950 +                                         NANDC_CMD_OPCODE_PAGE_READ);
951 +
952 +                       /* Wait for the command to complete */
953 +                       ret = bcmnand_wait_cmd(ctrl, to);
954 +                       if (ret < 0)
955 +                               return ret;
956 +
957 +                       /* Set controller to Little Endian mode for copying */
958 +                       bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
959 +
960 +                       if (data_bytes == 0) {
961 +                               memcpy(chip->oob_poi + sector * spare_per_sec,
962 +                                      ctrl_spare, spare_per_sec);
963 +                       }
964 +
965 +                       memcpy(buf + col, ctrl_cache, NANDC_CACHE_SIZE);
966 +                       data_bytes += NANDC_CACHE_SIZE;
967 +
968 +                       /* Return to Big Endian mode for commands etc */
969 +                       bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
970 +
971 +                       /* Next iterations should go fast */
972 +                       to = 1 << 10;
973 +
974 +                       /* capture hard errors for each partial */
975 +                       if (err_hard_reg != bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT)) {
976 +                               int era = bcmnand_reg_read(ctrl, NANDC_INT_STAT_ERASED);
977 +
978 +                               if (!era &&
979 +                                   !bcmnand_buf_erased(buf + col, NANDC_CACHE_SIZE))
980 +                                       hard_err_count++;
981 +
982 +                               err_hard_reg = bcmnand_reg_read(ctrl,
983 +                                                       NANDC_UNCORR_ERR_COUNT);
984 +                       }
985 +               }
986 +       }
987 +
988 +       if (!ecc)
989 +               return 0;
990 +
991 +       /* Report hard ECC errors */
992 +       if (hard_err_count)
993 +               mtd->ecc_stats.failed++;
994 +
995 +       /* Get ECC soft error stats */
996 +       mtd->ecc_stats.corrected += err_soft_reg -
997 +                       bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
998 +
999 +       return 0;
1000 +}
1001 +
1002 +/*
1003 + * NAND Interface - read_page_ecc
1004 + */
1005 +static int bcmnand_read_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
1006 +                                uint8_t *buf, int oob_required, int page)
1007 +{
1008 +       return bcmnand_read_page_do(mtd, chip, buf, page, true);
1009 +}
1010 +
1011 +/*
1012 + * NAND Interface - read_page_raw
1013 + */
1014 +static int bcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1015 +                                uint8_t *buf, int oob_required, int page)
1016 +{
1017 +       return bcmnand_read_page_do(mtd, chip, buf, page, true);
1018 +}
1019 +
1020 +/*
1021 + * do page write, with or without ECC generation enabled
1022 + */
1023 +static int bcmnand_write_page_do(struct mtd_info *mtd, struct nand_chip *chip,
1024 +                                const uint8_t *buf, bool ecc)
1025 +{
1026 +       struct bcmnand_ctrl *ctrl = chip->priv;
1027 +       unsigned int n = ctrl->chip_num;
1028 +       void __iomem *ctrl_cache;
1029 +       void __iomem *ctrl_spare;
1030 +       unsigned int spare_per_sec, sector, num_sec;
1031 +       unsigned int data_bytes, spare_bytes;
1032 +       int i, to;
1033 +       uint8_t *tmp_poi;
1034 +       u32 nand_addr;
1035 +
1036 +       ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
1037 +       ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
1038 +
1039 +       /* Get start-of-page address */
1040 +       nand_addr = bcmnand_reg_read(ctrl, NANDC_CMD_ADDRESS);
1041 +
1042 +       tmp_poi = kmalloc(mtd->oobsize, GFP_KERNEL);
1043 +       if (!tmp_poi)
1044 +               return -ENOMEM;
1045 +
1046 +       /* Retreive pre-existing OOB values */
1047 +       memcpy(tmp_poi, chip->oob_poi, mtd->oobsize);
1048 +       ctrl->cmd_ret = bcmnand_read_oob(mtd, chip,
1049 +                                        nand_addr >> chip->page_shift);
1050 +       if (ctrl->cmd_ret < 0) {
1051 +               kfree(tmp_poi);
1052 +               return ctrl->cmd_ret;
1053 +       }
1054 +
1055 +       /* Apply new OOB data bytes just like they would end up on the chip */
1056 +       for (i = 0; i < mtd->oobsize; i++)
1057 +               chip->oob_poi[i] &= tmp_poi[i];
1058 +       kfree(tmp_poi);
1059 +
1060 +       spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
1061 +
1062 +       /* Enable ECC generation for ecc page write, if requested */
1063 +       bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), ecc);
1064 +
1065 +       spare_bytes = 0;
1066 +       num_sec = 1 << ctrl->sec_per_page_shift;
1067 +
1068 +       /* Loop all sectors in page */
1069 +       for (sector = 0; sector < num_sec; sector++) {
1070 +               data_bytes  = 0;
1071 +
1072 +               /* Copy partial sectors sized by cache reg */
1073 +               while (data_bytes < (1<<ctrl->sector_size_shift)) {
1074 +                       unsigned int col;
1075 +
1076 +                       col = data_bytes +
1077 +                               (sector << ctrl->sector_size_shift);
1078 +
1079 +                       /* Set address of 512-byte sub-page */
1080 +                       bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
1081 +                                         nand_addr + col);
1082 +
1083 +                       /* Set controller to Little Endian mode for copying */
1084 +                       bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN,
1085 +                                          1);
1086 +
1087 +                       /* Set spare area is written at each sector start */
1088 +                       if (data_bytes == 0) {
1089 +                               memcpy(ctrl_spare,
1090 +                                       chip->oob_poi + spare_bytes,
1091 +                                       spare_per_sec);
1092 +                               spare_bytes += spare_per_sec;
1093 +                       }
1094 +
1095 +                       /* Copy sub-page data */
1096 +                       memcpy(ctrl_cache, buf + col, NANDC_CACHE_SIZE);
1097 +                       data_bytes += NANDC_CACHE_SIZE;
1098 +
1099 +                       /* Return to Big Endian mode for commands etc */
1100 +                       bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
1101 +
1102 +                       /* Push data into internal cache */
1103 +                       bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1104 +                                         NANDC_CMD_OPCODE_PAGE_PROG);
1105 +
1106 +                       /* Wait for the command to complete */
1107 +                       if (sector == (num_sec - 1))
1108 +                               to = 1 << 16;
1109 +                       else
1110 +                               to = 1 << 10;
1111 +                       ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
1112 +                       if (ctrl->cmd_ret < 0)
1113 +                               return ctrl->cmd_ret;
1114 +               }
1115 +       }
1116 +       return 0;
1117 +}
1118 +
1119 +/*
1120 + * NAND Interface = write_page_ecc
1121 + */
1122 +static int bcmnand_write_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
1123 +                                 const uint8_t *buf, int oob_required)
1124 +{
1125 +       return bcmnand_write_page_do(mtd, chip, buf, true);
1126 +}
1127 +
1128 +/*
1129 + * NAND Interface = write_page_raw
1130 + */
1131 +static int bcmnand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1132 +                                 const uint8_t *buf, int oob_required)
1133 +{
1134 +       return bcmnand_write_page_do(mtd, chip, buf, false);
1135 +}
1136 +
1137 +/*
1138 + * MTD Interface - read_byte
1139 + *
1140 + * This function emulates simple controllers behavior
1141 + * for just a few relevant commands
1142 + */
1143 +static uint8_t bcmnand_read_byte(struct mtd_info *mtd)
1144 +{
1145 +       struct nand_chip *nand = mtd->priv;
1146 +       struct bcmnand_ctrl *ctrl = nand->priv;
1147 +       struct device *dev = &ctrl->core->dev;
1148 +       uint8_t b = ~0;
1149 +
1150 +       switch (ctrl->last_cmd) {
1151 +       case NAND_CMD_READID:
1152 +               if (ctrl->id_byte_index < 8) {
1153 +                       b = bcmnand_reg_read(ctrl, NANDC_DEVID_BYTE(
1154 +                                                       ctrl->id_byte_index));
1155 +                       ctrl->id_byte_index++;
1156 +               }
1157 +               break;
1158 +       case NAND_CMD_READOOB:
1159 +               if (ctrl->oob_index < mtd->oobsize)
1160 +                       b = nand->oob_poi[ctrl->oob_index++];
1161 +               break;
1162 +       case NAND_CMD_STATUS:
1163 +               b = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
1164 +               break;
1165 +       default:
1166 +               dev_err(dev, "got unkown command: 0x%x in read_byte\n",
1167 +                       ctrl->last_cmd);
1168 +       }
1169 +       return b;
1170 +}
1171 +
1172 +/*
1173 + * MTD Interface - read_word
1174 + *
1175 + * Can not be tested without x16 chip, but the SoC does not support x16 i/f.
1176 + */
1177 +static u16 bcmnand_read_word(struct mtd_info *mtd)
1178 +{
1179 +       u16 w = ~0;
1180 +
1181 +       w = bcmnand_read_byte(mtd);
1182 +       barrier();
1183 +       w |= bcmnand_read_byte(mtd) << 8;
1184 +
1185 +       return w;
1186 +}
1187 +
1188 +/*
1189 + * MTD Interface - select a chip from an array
1190 + */
1191 +static void bcmnand_select_chip(struct mtd_info *mtd, int chip)
1192 +{
1193 +       struct nand_chip *nand = mtd->priv;
1194 +       struct bcmnand_ctrl *ctrl = nand->priv;
1195 +
1196 +       ctrl->chip_num = chip;
1197 +       bcmnand_reg_write(ctrl, NANDC_CMD_CS_SEL, chip);
1198 +}
1199 +
1200 +/*
1201 + * NAND Interface - emulate low-level NAND commands
1202 + *
1203 + * Only a few low-level commands are really needed by generic NAND,
1204 + * and they do not call for CMD_LL operations the controller can support.
1205 + */
1206 +static void bcmnand_cmdfunc(struct mtd_info *mtd, unsigned int command,
1207 +                           int column, int page_addr)
1208 +{
1209 +       struct nand_chip *nand = mtd->priv;
1210 +       struct bcmnand_ctrl *ctrl = nand->priv;
1211 +       struct device *dev = &ctrl->core->dev;
1212 +       u64 nand_addr;
1213 +       unsigned int to = 1;
1214 +
1215 +       ctrl->last_cmd = command;
1216 +
1217 +       /* Set address for some commands */
1218 +       switch (command) {
1219 +       case NAND_CMD_ERASE1:
1220 +               column = 0;
1221 +               /*FALLTHROUGH*/
1222 +       case NAND_CMD_SEQIN:
1223 +       case NAND_CMD_READ0:
1224 +       case NAND_CMD_READ1:
1225 +               WARN_ON(column >= mtd->writesize);
1226 +               nand_addr = (u64) column |
1227 +                       ((u64)page_addr << nand->page_shift);
1228 +               bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
1229 +               bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr);
1230 +               break;
1231 +       case NAND_CMD_ERASE2:
1232 +       case NAND_CMD_RESET:
1233 +       case NAND_CMD_READID:
1234 +       case NAND_CMD_READOOB:
1235 +       case NAND_CMD_PAGEPROG:
1236 +       default:
1237 +               /* Do nothing, address not used */
1238 +               break;
1239 +       }
1240 +
1241 +       /* Issue appropriate command to controller */
1242 +       switch (command) {
1243 +       case NAND_CMD_SEQIN:
1244 +               /* Only need to load command address, done */
1245 +               return;
1246 +
1247 +       case NAND_CMD_RESET:
1248 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1249 +                                 NANDC_CMD_OPCODE_FLASH_RESET);
1250 +               to = 1 << 8;
1251 +               break;
1252 +
1253 +       case NAND_CMD_READID:
1254 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1255 +                                 NANDC_CMD_OPCODE_DEVID_READ);
1256 +               ctrl->id_byte_index = 0;
1257 +               to = 1 << 8;
1258 +               break;
1259 +
1260 +       case NAND_CMD_READ0:
1261 +       case NAND_CMD_READ1:
1262 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1263 +                                 NANDC_CMD_OPCODE_PAGE_READ);
1264 +               to = 1 << 15;
1265 +               break;
1266 +       case NAND_CMD_STATUS:
1267 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1268 +                                 NANDC_CMD_OPCODE_STATUS_READ);
1269 +               to = 1 << 8;
1270 +               break;
1271 +       case NAND_CMD_ERASE1:
1272 +               return;
1273 +
1274 +       case NAND_CMD_ERASE2:
1275 +               bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1276 +                                 NANDC_CMD_OPCODE_BLOCK_ERASE);
1277 +               to = 1 << 18;
1278 +               break;
1279 +
1280 +       case NAND_CMD_PAGEPROG:
1281 +               /* Cmd already set from write_page */
1282 +               return;
1283 +
1284 +       case NAND_CMD_READOOB:
1285 +               /* Emulate simple interface */
1286 +               bcmnand_read_oob(mtd, nand, page_addr);
1287 +               ctrl->oob_index = 0;
1288 +               return;
1289 +
1290 +       default:
1291 +               dev_err(dev, "got unkown command: 0x%x in cmdfunc\n",
1292 +                       ctrl->last_cmd);
1293 +       }
1294 +
1295 +       /* Wait for command to complete */
1296 +       ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
1297 +
1298 +}
1299 +
1300 +static int bcmnand_scan(struct mtd_info *mtd)
1301 +{
1302 +       struct nand_chip *nand = mtd->priv;
1303 +       struct bcmnand_ctrl *ctrl = nand->priv;
1304 +       struct device *dev = &ctrl->core->dev;
1305 +       bool sector_1k = false;
1306 +       unsigned int chip_num = 0;
1307 +       int ecc_level = 0;
1308 +       int ret;
1309 +
1310 +       ret = nand_scan_ident(mtd, NANDC_MAX_CHIPS, NULL);
1311 +       if (ret)
1312 +               return ret;
1313 +
1314 +       /* Get configuration from first chip */
1315 +       sector_1k = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_SECTOR_1K(0));
1316 +       ecc_level = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(0));
1317 +       mtd->writesize_shift = nand->page_shift;
1318 +
1319 +       ctrl->ecc_level = ecc_level;
1320 +       ctrl->sector_size_shift = sector_1k ? 10 : 9;
1321 +
1322 +       /* Configure spare area, tweak as needed */
1323 +       do {
1324 +               ctrl->sec_per_page_shift =
1325 +                       mtd->writesize_shift - ctrl->sector_size_shift;
1326 +
1327 +               /* will return -EINVAL if OOB space exhausted */
1328 +               ret = bcmnand_hw_ecc_layout(ctrl);
1329 +
1330 +               /* First try to bump sector size to 1k, then decrease level */
1331 +               if (ret && nand->page_shift > 9 && ctrl->sector_size_shift < 10)
1332 +                       ctrl->sector_size_shift = 10;
1333 +               else if (ret)
1334 +                       ctrl->ecc_level--;
1335 +
1336 +       } while (ret && ctrl->ecc_level > 0);
1337 +
1338 +       if (WARN_ON(ctrl->ecc_level == 0))
1339 +               return -ENOENT;
1340 +
1341 +       if ((ctrl->sector_size_shift > 9) != (sector_1k == 1)) {
1342 +               dev_info(dev, "sector size adjusted to 1k\n");
1343 +               sector_1k = 1;
1344 +       }
1345 +
1346 +       if (ecc_level != ctrl->ecc_level) {
1347 +               dev_info(dev, "ECC level adjusted from %u to %u\n",
1348 +                        ecc_level, ctrl->ecc_level);
1349 +               ecc_level = ctrl->ecc_level;
1350 +       }
1351 +
1352 +       /* handle the hardware chip config registers */
1353 +       for (chip_num = 0; chip_num < nand->numchips; chip_num++) {
1354 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_SECTOR_1K(chip_num),
1355 +                                 sector_1k);
1356 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip_num),
1357 +                                 ecc_level);
1358 +
1359 +               /* Large pages: no partial page programming */
1360 +               if (mtd->writesize > 512) {
1361 +                       bcmnand_reg_write(ctrl,
1362 +                               NANDC_ACC_CTRL_PGM_RDIN(chip_num), 0);
1363 +                       bcmnand_reg_write(ctrl,
1364 +                               NANDC_ACC_CTRL_PGM_PARTIAL(chip_num), 0);
1365 +               }
1366 +
1367 +               /* Do not raise ECC error when reading erased pages */
1368 +               /* This bit has only partial effect, driver needs to help */
1369 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ERA_ECC_ERR(chip_num),
1370 +                                 0);
1371 +
1372 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PG_HIT(chip_num), 0);
1373 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PREFETCH(chip_num), 0);
1374 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_MODE(chip_num), 0);
1375 +               bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_LASTPG(chip_num),
1376 +                                 0);
1377 +
1378 +               /* TBD: consolidate or at least verify the s/w and h/w geometries agree */
1379 +       }
1380 +
1381 +       /* Allow writing on device */
1382 +       if (!(nand->options & NAND_ROM))
1383 +               bcmnand_reg_write(ctrl, NANDC_CS_NAND_WP, 0);
1384 +
1385 +       dev_dbg(dev, "layout.oobavail=%d\n", nand->ecc.layout->oobavail);
1386 +
1387 +       ret = nand_scan_tail(mtd);
1388 +
1389 +       if (nand->badblockbits == 0)
1390 +               nand->badblockbits = 8;
1391 +       if (WARN_ON((1 << nand->page_shift) != mtd->writesize))
1392 +               return -EIO;
1393 +
1394 +       /* Spit out some key chip parameters as detected by nand_base */
1395 +       dev_dbg(dev, "erasesize=%d writesize=%d oobsize=%d page_shift=%d badblockpos=%d badblockbits=%d\n",
1396 +               mtd->erasesize, mtd->writesize, mtd->oobsize,
1397 +               nand->page_shift, nand->badblockpos, nand->badblockbits);
1398 +
1399 +       return ret;
1400 +}
1401 +
1402 +/*
1403 + * main intiailization function
1404 + */
1405 +static int bcmnand_ctrl_init(struct bcmnand_ctrl *ctrl)
1406 +{
1407 +       unsigned int chip;
1408 +       struct nand_chip *nand;
1409 +       struct mtd_info *mtd;
1410 +       struct device *dev = &ctrl->core->dev;
1411 +       int ret;
1412 +
1413 +       /* Software variables init */
1414 +       nand = &ctrl->nand;
1415 +       mtd = &ctrl->mtd;
1416 +
1417 +       init_completion(&ctrl->op_completion);
1418 +
1419 +       mtd->priv = nand;
1420 +       mtd->owner = THIS_MODULE;
1421 +       mtd->name = KBUILD_MODNAME;
1422 +
1423 +       nand->priv = ctrl;
1424 +
1425 +       nand->chip_delay = 5;   /* not used */
1426 +       nand->IO_ADDR_R = nand->IO_ADDR_W = (void *)~0L;
1427 +
1428 +       if (bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_WIDTH(0)))
1429 +               nand->options |= NAND_BUSWIDTH_16;
1430 +       nand->options |= NAND_SKIP_BBTSCAN;     /* Dont need BBTs */
1431 +
1432 +       nand->options |= NAND_NO_SUBPAGE_WRITE; /* Subpages unsupported */
1433 +
1434 +       nand->dev_ready                 = bcmnand_dev_ready;
1435 +       nand->read_byte                 = bcmnand_read_byte;
1436 +       nand->read_word                 = bcmnand_read_word;
1437 +       nand->select_chip               = bcmnand_select_chip;
1438 +       nand->cmdfunc                   = bcmnand_cmdfunc;
1439 +       nand->waitfunc                  = bcmnand_waitfunc;
1440 +
1441 +       nand->ecc.mode                  = NAND_ECC_HW;
1442 +       nand->ecc.read_page_raw         = bcmnand_read_page_raw;
1443 +       nand->ecc.write_page_raw        = bcmnand_write_page_raw;
1444 +       nand->ecc.read_page             = bcmnand_read_page_ecc;
1445 +       nand->ecc.write_page            = bcmnand_write_page_ecc;
1446 +       nand->ecc.read_oob              = bcmnand_read_oob;
1447 +       nand->ecc.write_oob             = bcmnand_write_oob;
1448 +
1449 +       /* Set AUTO_CNFIG bit - try to auto-detect chips */
1450 +       bcmnand_reg_write(ctrl, NANDC_CS_AUTO_CONFIG, 1);
1451 +
1452 +       usleep_range(1000, 1500);
1453 +
1454 +       /* Print out current chip config */
1455 +       for (chip = 0; chip < NANDC_MAX_CHIPS; chip++) {
1456 +               dev_dbg(dev, "chip[%d]: size=%#x block=%#x page=%#x ecc_level=%#x\n",
1457 +                       chip,
1458 +                       bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_SIZE(chip)),
1459 +                       bcmnand_reg_read(ctrl, NANDC_CONFIG_BLK_SIZE(chip)),
1460 +                       bcmnand_reg_read(ctrl, NANDC_CONFIG_PAGE_SIZE(chip)),
1461 +                       bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip)));
1462 +       }
1463 +
1464 +       dev_dbg(dev, "Nand controller is reads=%d\n",
1465 +               bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY));
1466 +
1467 +       ret = bcmnand_scan(mtd);
1468 +       if (ret) {
1469 +               dev_err(dev, "scanning the nand flash chip failed with %i\n",
1470 +                       ret);
1471 +               return ret;
1472 +       }
1473 +
1474 +       return 0;
1475 +}
1476 +
1477 +static int bcmnand_idm_init(struct bcmnand_ctrl *ctrl)
1478 +{
1479 +       int irq_off;
1480 +       unsigned int retries = 0x1000;
1481 +       struct device *dev = &ctrl->core->dev;
1482 +
1483 +       if (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET))
1484 +               dev_info(dev, "stuck in reset\n");
1485 +
1486 +       bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 1);
1487 +       if (!bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
1488 +               dev_err(dev, "reset of failed\n");
1489 +               return -EIO;
1490 +       }
1491 +
1492 +       while (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
1493 +               bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 0);
1494 +               cpu_relax();
1495 +               usleep_range(100, 150);
1496 +               if (!(retries--)) {
1497 +                       dev_err(dev, "did not came back from reset\n");
1498 +                       return -ETIMEDOUT;
1499 +               }
1500 +       }
1501 +
1502 +       bcmnand_reg_awrite(ctrl, NANDC_IDM_CLOCK_EN, 1);
1503 +       bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
1504 +       udelay(10);
1505 +
1506 +       dev_info(dev, "NAND Controller rev %d.%d\n",
1507 +               bcmnand_reg_read(ctrl, NANDC_REV_MAJOR),
1508 +               bcmnand_reg_read(ctrl, NANDC_REV_MINOR));
1509 +
1510 +       usleep_range(250, 350);
1511 +
1512 +       /* Disable all IRQs */
1513 +       for (irq_off = 0; irq_off < NANDC_IRQ_NUM; irq_off++)
1514 +               bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
1515 +
1516 +       return 0;
1517 +}
1518 +
1519 +static const char * const part_probes[] = { "ofpart", "bcm47xxpart", NULL };
1520 +
1521 +/*
1522 + * Top-level init function
1523 + */
1524 +static int bcmnand_probe(struct bcma_device *core)
1525 +{
1526 +       struct mtd_part_parser_data parser_data;
1527 +       struct device *dev = &core->dev;
1528 +       struct bcmnand_ctrl *ctrl;
1529 +       int res, i, irq;
1530 +
1531 +       ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
1532 +       if (!ctrl)
1533 +               return -ENOMEM;
1534 +
1535 +       bcma_set_drvdata(core, ctrl);
1536 +
1537 +       ctrl->mtd.dev.parent = &core->dev;
1538 +       ctrl->core = core;
1539 +
1540 +       /* Acquire all interrupt lines */
1541 +       for (i = 0; i < NANDC_IRQ_NUM; i++) {
1542 +               irq = bcma_core_irq(core, i);
1543 +               if (!irq) {
1544 +                       dev_err(dev, "IRQ idx %i not available\n", i);
1545 +                       return -ENOENT;
1546 +               }
1547 +               res = devm_request_irq(dev, irq, bcmnand_isr, 0,
1548 +                                      KBUILD_MODNAME, ctrl);
1549 +               if (res < 0) {
1550 +                       dev_err(dev, "problem requesting irq: %i (idx: %i)\n",
1551 +                               irq, i);
1552 +                       return res;
1553 +               }
1554 +       }
1555 +
1556 +       res = bcmnand_idm_init(ctrl);
1557 +       if (res)
1558 +               return res;
1559 +
1560 +       res = bcmnand_ctrl_init(ctrl);
1561 +       if (res)
1562 +               return res;
1563 +
1564 +       parser_data.of_node = dev->of_node;
1565 +       res = mtd_device_parse_register(&ctrl->mtd, part_probes, &parser_data, NULL, 0);
1566 +       if (res) {
1567 +               dev_err(dev, "Failed to register MTD device: %d\n", res);
1568 +               return res;
1569 +       }
1570 +       return 0;
1571 +}
1572 +
1573 +static void bcmnand_remove(struct bcma_device *core)
1574 +{
1575 +       struct bcmnand_ctrl *ctrl = bcma_get_drvdata(core);
1576 +
1577 +       mtd_device_unregister(&ctrl->mtd);
1578 +}
1579 +
1580 +static const struct bcma_device_id bcmnand_bcma_tbl[] = {
1581 +       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_NAND, BCMA_ANY_REV, BCMA_ANY_CLASS),
1582 +       BCMA_CORETABLE_END
1583 +};
1584 +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
1585 +
1586 +static struct bcma_driver bcmnand_bcma_driver = {
1587 +       .name           = KBUILD_MODNAME,
1588 +       .id_table       = bcmnand_bcma_tbl,
1589 +       .probe          = bcmnand_probe,
1590 +       .remove         = bcmnand_remove,
1591 +};
1592 +
1593 +static int __init bcmnand_init(void)
1594 +{
1595 +       return bcma_driver_register(&bcmnand_bcma_driver);
1596 +}
1597 +
1598 +static void __exit bcmnand_exit(void)
1599 +{
1600 +       bcma_driver_unregister(&bcmnand_bcma_driver);
1601 +}
1602 +
1603 +module_init(bcmnand_init)
1604 +module_exit(bcmnand_exit)
1605 +
1606 +MODULE_LICENSE("GPL");
1607 +MODULE_AUTHOR("Hauke Mehrtens");
1608 +MODULE_DESCRIPTION("Northstar on-chip NAND Flash Controller driver");