[s3c24xx] glamo: Use mfd_cell for child resource handling instead of some ugly custom...
[openwrt.git] / target / linux / s3c24xx / files-2.6.30 / drivers / mfd / glamo / glamo-core.c
1 /* Smedia Glamo 336x/337x driver
2  *
3  * (C) 2007 by Openmoko, Inc.
4  * Author: Harald Welte <laforge@openmoko.org>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/mm.h>
28 #include <linux/tty.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/fb.h>
32 #include <linux/init.h>
33 #include <linux/irq.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/wait.h>
37 #include <linux/platform_device.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/spinlock.h>
40 #include <linux/glamofb.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/host.h>
43 #include <linux/mfd/core.h>
44
45 #include <asm/io.h>
46 #include <asm/uaccess.h>
47 #include <asm/div64.h>
48
49 #ifdef CONFIG_PM
50 #include <linux/pm.h>
51 #endif
52
53 #include "glamo-regs.h"
54 #include "glamo-core.h"
55
56 #define GLAMO_MEM_REFRESH_COUNT 0x100
57
58 /*
59  * Glamo internal settings
60  *
61  * We run the memory interface from the faster PLLB on 2.6.28 kernels and
62  * above.  Couple of GTA02 users report trouble with memory bus when they
63  * upgraded from 2.6.24.  So this parameter allows reversion to 2.6.24
64  * scheme if their Glamo chip needs it.
65  *
66  * you can override the faster default on kernel commandline using
67  *
68  *   glamo3362.slow_memory=1
69  *
70  * for example
71  */
72
73 static int slow_memory = 0;
74 module_param(slow_memory, int, 0644);
75
76 struct reg_range {
77         int start;
78         int count;
79         char *name;
80         char dump;
81 };
82 struct reg_range reg_range[] = {
83         { 0x0000, 0x76,         "General",      1 },
84         { 0x0200, 0x16,         "Host Bus",     1 },
85         { 0x0300, 0x38,         "Memory",       1 },
86 /*      { 0x0400, 0x100,        "Sensor",       0 }, */
87 /*              { 0x0500, 0x300,        "ISP",          0 }, */
88 /*              { 0x0800, 0x400,        "JPEG",         0 }, */
89 /*              { 0x0c00, 0xcc,         "MPEG",         0 }, */
90         { 0x1100, 0xb2,         "LCD 1",        1 },
91         { 0x1200, 0x64,         "LCD 2",        1 },
92         { 0x1400, 0x40,         "MMC",          1 },
93 /*              { 0x1500, 0x080,        "MPU 0",        0 },
94         { 0x1580, 0x080,        "MPU 1",        0 },
95         { 0x1600, 0x080,        "Cmd Queue",    0 },
96         { 0x1680, 0x080,        "RISC CPU",     0 },
97         { 0x1700, 0x400,        "2D Unit",      0 },
98         { 0x1b00, 0x900,        "3D Unit",      0 }, */
99 };
100
101 static struct glamo_core *glamo_handle;
102
103 static inline void __reg_write(struct glamo_core *glamo,
104                                 u_int16_t reg, u_int16_t val)
105 {
106         writew(val, glamo->base + reg);
107 }
108
109 static inline u_int16_t __reg_read(struct glamo_core *glamo,
110                                    u_int16_t reg)
111 {
112         return readw(glamo->base + reg);
113 }
114
115 static void __reg_set_bit_mask(struct glamo_core *glamo,
116                                 u_int16_t reg, u_int16_t mask,
117                                 u_int16_t val)
118 {
119         u_int16_t tmp;
120
121         val &= mask;
122
123         tmp = __reg_read(glamo, reg);
124         tmp &= ~mask;
125         tmp |= val;
126         __reg_write(glamo, reg, tmp);
127 }
128
129 static void reg_set_bit_mask(struct glamo_core *glamo,
130                                 u_int16_t reg, u_int16_t mask,
131                                 u_int16_t val)
132 {
133         spin_lock(&glamo->lock);
134         __reg_set_bit_mask(glamo, reg, mask, val);
135         spin_unlock(&glamo->lock);
136 }
137
138 static inline void __reg_set_bit(struct glamo_core *glamo,
139                                  u_int16_t reg, u_int16_t bit)
140 {
141         __reg_set_bit_mask(glamo, reg, bit, 0xffff);
142 }
143
144 static inline void __reg_clear_bit(struct glamo_core *glamo,
145                                    u_int16_t reg, u_int16_t bit)
146 {
147         __reg_set_bit_mask(glamo, reg, bit, 0);
148 }
149
150 /***********************************************************************
151  * resources of sibling devices
152  ***********************************************************************/
153
154 static struct resource glamo_fb_resources[] = {
155         {
156                 .name   = "glamo-fb-regs",
157                 .start  = GLAMO_REGOFS_LCD,
158                 .end    = GLAMO_REGOFS_MMC - 1,
159                 .flags  = IORESOURCE_MEM,
160         }, {
161                 .name   = "glamo-fb-mem",
162                 .start  = GLAMO_OFFSET_FB,
163                 .end    = GLAMO_OFFSET_FB + GLAMO_FB_SIZE - 1,
164                 .flags  = IORESOURCE_MEM,
165         },
166 };
167
168 static struct resource glamo_mmc_resources[] = {
169         {
170                 .start  = GLAMO_REGOFS_MMC,
171                 .end    = GLAMO_REGOFS_MPROC0 - 1,
172                 .flags  = IORESOURCE_MEM
173         }, {
174                 .start  = IRQ_GLAMO_MMC,
175                 .end    = IRQ_GLAMO_MMC,
176                 .flags  = IORESOURCE_IRQ,
177         }, { /* our data buffer for MMC transfers */
178                 .start  = GLAMO_OFFSET_FB + GLAMO_FB_SIZE,
179                 .end    = GLAMO_OFFSET_FB + GLAMO_FB_SIZE +
180                                   GLAMO_MMC_BUFFER_SIZE - 1,
181                 .flags  = IORESOURCE_MEM
182         },
183 };
184
185 static struct glamo_mci_pdata glamo_mci_def_pdata = {
186         .gpio_detect            = 0,
187         .glamo_can_set_mci_power        = NULL, /* filled in from MFD platform data */
188         .glamo_irq_is_wired     = NULL, /* filled in from MFD platform data */
189         .mci_suspending = NULL, /* filled in from MFD platform data */
190         .mci_all_dependencies_resumed = NULL, /* filled in from MFD platform data */
191 };
192
193 enum glamo_cells {
194         GLAMO_CELL_FB,
195         GLAMO_CELL_MMC,
196         GLAMO_CELL_SPI_GPIO
197 };
198
199 static struct mfd_cell glamo_cells[] = {
200         [GLAMO_CELL_FB] = {
201                 .name = "glamo-fb",
202                 .num_resources = ARRAY_SIZE(glamo_fb_resources),
203                 .resources = glamo_fb_resources,
204         },
205         [GLAMO_CELL_MMC] = {
206                 .name = "glamo-mci",
207                 .num_resources = ARRAY_SIZE(glamo_mmc_resources),
208                 .resources = glamo_mmc_resources,
209         },
210         [GLAMO_CELL_SPI_GPIO] = {
211                 .name = "glamo-spi-gpio",
212         },
213 };
214
215
216 /***********************************************************************
217  * IRQ demultiplexer
218  ***********************************************************************/
219 #define irq2glamo(x)    (x - IRQ_GLAMO(0))
220
221 static void glamo_ack_irq(unsigned int irq)
222 {
223         /* clear interrupt source */
224         __reg_write(glamo_handle, GLAMO_REG_IRQ_CLEAR,
225                     1 << irq2glamo(irq));
226 }
227
228 static void glamo_mask_irq(unsigned int irq)
229 {
230         u_int16_t tmp;
231
232         /* clear bit in enable register */
233         tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
234         tmp &= ~(1 << irq2glamo(irq));
235         __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
236 }
237
238 static void glamo_unmask_irq(unsigned int irq)
239 {
240         u_int16_t tmp;
241
242         /* set bit in enable register */
243         tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
244         tmp |= (1 << irq2glamo(irq));
245         __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
246 }
247
248 static struct irq_chip glamo_irq_chip = {
249         .ack    = glamo_ack_irq,
250         .mask   = glamo_mask_irq,
251         .unmask = glamo_unmask_irq,
252 };
253
254 static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
255 {
256         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
257
258         if (unlikely(desc->status & IRQ_INPROGRESS)) {
259                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
260                 desc->chip->mask(irq);
261                 desc->chip->ack(irq);
262                 return;
263         }
264         kstat_incr_irqs_this_cpu(irq, desc);
265
266         desc->chip->ack(irq);
267         desc->status |= IRQ_INPROGRESS;
268
269         do {
270                 u_int16_t irqstatus;
271                 int i;
272
273                 if (unlikely((desc->status &
274                                 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
275                                 (IRQ_PENDING | IRQ_MASKED))) {
276                         /* dealing with pending IRQ, unmasking */
277                         desc->chip->unmask(irq);
278                         desc->status &= ~IRQ_MASKED;
279                 }
280
281                 desc->status &= ~IRQ_PENDING;
282
283                 /* read IRQ status register */
284                 irqstatus = __reg_read(glamo_handle, GLAMO_REG_IRQ_STATUS);
285                 for (i = 0; i < 9; i++)
286                         if (irqstatus & (1 << i))
287                                 desc_handle_irq(IRQ_GLAMO(i),
288                                     irq_desc+IRQ_GLAMO(i));
289
290         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
291
292         desc->status &= ~IRQ_INPROGRESS;
293 }
294
295
296 static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
297                            const char *buf, size_t count)
298 {
299         unsigned long reg = simple_strtoul(buf, NULL, 10);
300         struct glamo_core *glamo = dev_get_drvdata(dev);
301
302         while (*buf && (*buf != ' '))
303                 buf++;
304         if (*buf != ' ')
305                 return -EINVAL;
306         while (*buf && (*buf == ' '))
307                 buf++;
308         if (!*buf)
309                 return -EINVAL;
310
311         printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
312                reg, simple_strtoul(buf, NULL, 10));
313
314         __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
315
316         return count;
317 }
318
319 static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
320                         char *buf)
321 {
322         struct glamo_core *glamo = dev_get_drvdata(dev);
323         int n, n1 = 0, r;
324         char * end = buf;
325
326         spin_lock(&glamo->lock);
327
328         for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
329                 if (!reg_range[r].dump)
330                         continue;
331                 n1 = 0;
332                 end += sprintf(end, "\n%s\n", reg_range[r].name);
333                 for (n = reg_range[r].start;
334                      n < reg_range[r].start + reg_range[r].count; n += 2) {
335                         if (((n1++) & 7) == 0)
336                                 end += sprintf(end, "\n%04X:  ", n);
337                         end += sprintf(end, "%04x ", __reg_read(glamo, n));
338                 }
339                 end += sprintf(end, "\n");
340                 if (!attr) {
341                         printk("%s", buf);
342                         end = buf;
343                 }
344         }
345         spin_unlock(&glamo->lock);
346
347         return end - buf;
348 }
349
350 static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
351 static struct attribute *glamo_sysfs_entries[] = {
352         &dev_attr_regs.attr,
353         NULL
354 };
355 static struct attribute_group glamo_attr_group = {
356         .name   = NULL,
357         .attrs  = glamo_sysfs_entries,
358 };
359
360
361
362 /***********************************************************************
363  * 'engine' support
364  ***********************************************************************/
365
366 int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
367 {
368         switch (engine) {
369         case GLAMO_ENGINE_LCD:
370                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
371                                    GLAMO_HOSTBUS2_MMIO_EN_LCD,
372                                    GLAMO_HOSTBUS2_MMIO_EN_LCD);
373                 __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
374                             GLAMO_CLOCK_LCD_EN_M5CLK |
375                             GLAMO_CLOCK_LCD_EN_DHCLK |
376                             GLAMO_CLOCK_LCD_EN_DMCLK |
377                             GLAMO_CLOCK_LCD_EN_DCLK |
378                             GLAMO_CLOCK_LCD_DG_M5CLK |
379                             GLAMO_CLOCK_LCD_DG_DMCLK);
380                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
381                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
382                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
383                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
384                 break;
385         case GLAMO_ENGINE_MMC:
386                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
387                                    GLAMO_HOSTBUS2_MMIO_EN_MMC,
388                                    GLAMO_HOSTBUS2_MMIO_EN_MMC);
389                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
390                                    GLAMO_CLOCK_MMC_EN_M9CLK |
391                                    GLAMO_CLOCK_MMC_EN_TCLK |
392                                    GLAMO_CLOCK_MMC_DG_M9CLK |
393                                    GLAMO_CLOCK_MMC_DG_TCLK, 0xffff);
394                 /* enable the TCLK divider clk input */
395                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
396                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
397                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
398                 break;
399         case GLAMO_ENGINE_2D:
400                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
401                                    GLAMO_CLOCK_2D_EN_M7CLK |
402                                    GLAMO_CLOCK_2D_EN_GCLK |
403                                    GLAMO_CLOCK_2D_DG_M7CLK |
404                                    GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
405                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
406                                    GLAMO_HOSTBUS2_MMIO_EN_2D,
407                                    GLAMO_HOSTBUS2_MMIO_EN_2D);
408                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
409                                    GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
410                                                    0xffff);
411                 break;
412         case GLAMO_ENGINE_CMDQ:
413                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
414                                    GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
415                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
416                                    GLAMO_HOSTBUS2_MMIO_EN_CQ,
417                                    GLAMO_HOSTBUS2_MMIO_EN_CQ);
418                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
419                                    GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
420                                                    0xffff);
421                 break;
422         /* FIXME: Implementation */
423         default:
424                 return -EINVAL;
425         }
426
427         glamo->engine_enabled_bitfield |= 1 << engine;
428
429         return 0;
430 }
431
432 int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
433 {
434         int ret;
435
436         spin_lock(&glamo->lock);
437
438         ret = __glamo_engine_enable(glamo, engine);
439
440         spin_unlock(&glamo->lock);
441
442         return ret;
443 }
444 EXPORT_SYMBOL_GPL(glamo_engine_enable);
445
446 int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
447 {
448         switch (engine) {
449         case GLAMO_ENGINE_LCD:
450                 /* remove pixel clock to LCM */
451                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
452                             GLAMO_CLOCK_LCD_EN_DCLK, 0);
453                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
454                             GLAMO_CLOCK_LCD_EN_DHCLK |
455                             GLAMO_CLOCK_LCD_EN_DMCLK, 0);
456                 /* kill memory clock */
457                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
458                             GLAMO_CLOCK_LCD_EN_M5CLK, 0);
459                 /* stop dividing the clocks */
460                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
461                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
462                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
463                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
464                 break;
465
466         case GLAMO_ENGINE_MMC:
467                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
468                                                    GLAMO_CLOCK_MMC_EN_M9CLK |
469                                                    GLAMO_CLOCK_MMC_EN_TCLK |
470                                                    GLAMO_CLOCK_MMC_DG_M9CLK |
471                                                    GLAMO_CLOCK_MMC_DG_TCLK, 0);
472                 /* disable the TCLK divider clk input */
473                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
474                                         GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
475                 break;
476         case GLAMO_ENGINE_CMDQ:
477                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
478                                            GLAMO_CLOCK_2D_EN_M6CLK,
479                                                            0);
480                         __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
481                                            GLAMO_HOSTBUS2_MMIO_EN_CQ,
482                                            GLAMO_HOSTBUS2_MMIO_EN_CQ);
483 /*                      __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
484                                            GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
485                                                            0);*/
486                 break;
487         case GLAMO_ENGINE_2D:
488                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
489                                            GLAMO_CLOCK_2D_EN_M7CLK |
490                                                            GLAMO_CLOCK_2D_EN_GCLK |
491                                                            GLAMO_CLOCK_2D_DG_M7CLK |
492                                                            GLAMO_CLOCK_2D_DG_GCLK,
493                                                            0);
494                         __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
495                                            GLAMO_HOSTBUS2_MMIO_EN_2D,
496                                            GLAMO_HOSTBUS2_MMIO_EN_2D);
497                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
498                                            GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
499                                                            0);
500                 break;
501         default:
502                 return -EINVAL;
503         }
504
505         glamo->engine_enabled_bitfield &= ~(1 << engine);
506
507         return 0;
508 }
509 int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
510 {
511         int ret;
512
513         spin_lock(&glamo->lock);
514
515         ret = __glamo_engine_disable(glamo, engine);
516
517         spin_unlock(&glamo->lock);
518
519         return ret;
520 }
521 EXPORT_SYMBOL_GPL(glamo_engine_disable);
522
523 static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
524         [GLAMO_ENGINE_LCD]      = GLAMO_REG_CLOCK_LCD,
525         [GLAMO_ENGINE_MMC]      = GLAMO_REG_CLOCK_MMC,
526         [GLAMO_ENGINE_ISP]      = GLAMO_REG_CLOCK_ISP,
527         [GLAMO_ENGINE_JPEG]     = GLAMO_REG_CLOCK_JPEG,
528         [GLAMO_ENGINE_3D]       = GLAMO_REG_CLOCK_3D,
529         [GLAMO_ENGINE_2D]       = GLAMO_REG_CLOCK_2D,
530         [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
531         [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
532 };
533
534 void glamo_engine_clkreg_set(struct glamo_core *glamo,
535                              enum glamo_engine engine,
536                              u_int16_t mask, u_int16_t val)
537 {
538         reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
539 }
540 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
541
542 u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
543                                   enum glamo_engine engine)
544 {
545         u_int16_t val;
546
547         spin_lock(&glamo->lock);
548         val = __reg_read(glamo, engine_clock_regs[engine]);
549         spin_unlock(&glamo->lock);
550
551         return val;
552 }
553 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
554
555 struct glamo_script reset_regs[] = {
556         [GLAMO_ENGINE_LCD] = {
557                 GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
558         },
559 #if 0
560         [GLAMO_ENGINE_HOST] = {
561                 GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
562         },
563         [GLAMO_ENGINE_MEM] = {
564                 GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
565         },
566 #endif
567         [GLAMO_ENGINE_MMC] = {
568                 GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
569         },
570         [GLAMO_ENGINE_CMDQ] = {
571                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
572         },
573         [GLAMO_ENGINE_2D] = {
574                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
575         },
576         [GLAMO_ENGINE_JPEG] = {
577                 GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
578         },
579 };
580
581 void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
582 {
583         struct glamo_script *rst;
584
585         if (engine >= ARRAY_SIZE(reset_regs)) {
586                 dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
587                 return;
588         }
589
590         rst = &reset_regs[engine];
591
592         spin_lock(&glamo->lock);
593         __reg_set_bit(glamo, rst->reg, rst->val);
594         __reg_clear_bit(glamo, rst->reg, rst->val);
595         spin_unlock(&glamo->lock);
596 }
597 EXPORT_SYMBOL_GPL(glamo_engine_reset);
598
599 void glamo_lcm_reset(int level)
600 {
601         if (!glamo_handle)
602                 return;
603
604         glamo_gpio_setpin(glamo_handle, GLAMO_GPIO4, level);
605         glamo_gpio_cfgpin(glamo_handle, GLAMO_GPIO4_OUTPUT);
606
607 }
608 EXPORT_SYMBOL_GPL(glamo_lcm_reset);
609
610 enum glamo_pll {
611         GLAMO_PLL1,
612         GLAMO_PLL2,
613 };
614
615 static int glamo_pll_rate(struct glamo_core *glamo,
616                           enum glamo_pll pll)
617 {
618         u_int16_t reg;
619         unsigned int div = 512;
620         /* FIXME: move osci into platform_data */
621         unsigned int osci = 32768;
622
623         if (osci == 32768)
624                 div = 1;
625
626         switch (pll) {
627         case GLAMO_PLL1:
628                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
629                 break;
630         case GLAMO_PLL2:
631                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
632                 break;
633         default:
634                 return -EINVAL;
635         }
636         return (osci/div)*reg;
637 }
638
639 int glamo_engine_reclock(struct glamo_core *glamo,
640                          enum glamo_engine engine,
641                          int ps)
642 {
643         int pll, khz;
644         u_int16_t reg, mask, val = 0;
645
646         if (!ps)
647                 return 0;
648
649         switch (engine) {
650         case GLAMO_ENGINE_LCD:
651                 pll = GLAMO_PLL1;
652                 reg = GLAMO_REG_CLOCK_GEN7;
653                 mask = 0xff;
654                 break;
655         default:
656                 dev_warn(&glamo->pdev->dev,
657                          "reclock of engine 0x%x not supported\n", engine);
658                 return -EINVAL;
659                 break;
660         }
661
662         pll = glamo_pll_rate(glamo, pll);
663         khz = 1000000000UL / ps;
664
665         if (khz)
666                 val = (pll / khz) / 1000;
667
668         dev_dbg(&glamo->pdev->dev,
669                         "PLL %d, kHZ %d, div %d\n", pll, khz, val);
670
671         if (val) {
672                 val--;
673                 reg_set_bit_mask(glamo, reg, mask, val);
674                 mdelay(5); /* wait some time to stabilize */
675
676                 return 0;
677         } else {
678                 return -EINVAL;
679         }
680 }
681 EXPORT_SYMBOL_GPL(glamo_engine_reclock);
682
683 /***********************************************************************
684  * script support
685  ***********************************************************************/
686
687 int glamo_run_script(struct glamo_core *glamo, struct glamo_script *script,
688                      int len, int may_sleep)
689 {
690         int i;
691
692         for (i = 0; i < len; i++) {
693                 struct glamo_script *line = &script[i];
694
695                 switch (line->reg) {
696                 case 0xffff:
697                         return 0;
698                 case 0xfffe:
699                         if (may_sleep)
700                                 msleep(line->val);
701                         else
702                                 mdelay(line->val * 4);
703                         break;
704                 case 0xfffd:
705                         /* spin until PLLs lock */
706                         while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
707                                 ;
708                         break;
709
710                 /*
711                  * couple of people reported artefacts with 2.6.28 changes, this
712                  * allows reversion to 2.6.24 settings
713                  */
714
715                 case 0x200:
716                         switch (slow_memory) {
717                         /* choice 1 is the most conservative */
718                         case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
719                                 __reg_write(glamo, script[i].reg, 0xef0);
720                                 break;
721                         case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
722                                 __reg_write(glamo, script[i].reg, 0xea0);
723                                 break;
724                         case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
725                                 __reg_write(glamo, script[i].reg, 0xe50);
726                                 break;
727                         case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
728                                 __reg_write(glamo, script[i].reg, 0xe00);
729                                 break;
730
731                         /* using PLL2 for memory bus increases CPU bandwidth significantly */
732                         case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
733                                 __reg_write(glamo, script[i].reg, 0xef3);
734                                 break;
735                         case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
736                                 __reg_write(glamo, script[i].reg, 0xea3);
737                                 break;
738                         case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
739                                 __reg_write(glamo, script[i].reg, 0xe53);
740                                 break;
741                         /* default of 0 or >7 is fastest */
742                         default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
743                                 __reg_write(glamo, script[i].reg, 0xe03);
744                                 break;
745                         }
746                         break;
747
748                 default:
749                         __reg_write(glamo, script[i].reg, script[i].val);
750                         break;
751                 }
752         }
753
754         return 0;
755 }
756 EXPORT_SYMBOL(glamo_run_script);
757
758 static struct glamo_script glamo_init_script[] = {
759         { GLAMO_REG_CLOCK_HOST,         0x1000 },
760                 { 0xfffe, 2 },
761         { GLAMO_REG_CLOCK_MEMORY,       0x1000 },
762         { GLAMO_REG_CLOCK_MEMORY,       0x2000 },
763         { GLAMO_REG_CLOCK_LCD,          0x1000 },
764         { GLAMO_REG_CLOCK_MMC,          0x1000 },
765         { GLAMO_REG_CLOCK_ISP,          0x1000 },
766         { GLAMO_REG_CLOCK_ISP,          0x3000 },
767         { GLAMO_REG_CLOCK_JPEG,         0x1000 },
768         { GLAMO_REG_CLOCK_3D,           0x1000 },
769         { GLAMO_REG_CLOCK_3D,           0x3000 },
770         { GLAMO_REG_CLOCK_2D,           0x1000 },
771         { GLAMO_REG_CLOCK_2D,           0x3000 },
772         { GLAMO_REG_CLOCK_RISC1,        0x1000 },
773         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
774         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
775         { GLAMO_REG_CLOCK_MPROC,        0x1000 /*0x100f*/ },
776                 { 0xfffe, 2 },
777         { GLAMO_REG_CLOCK_HOST,         0x0000 },
778         { GLAMO_REG_CLOCK_MEMORY,       0x0000 },
779         { GLAMO_REG_CLOCK_LCD,          0x0000 },
780         { GLAMO_REG_CLOCK_MMC,          0x0000 },
781 #if 0
782 /* unused engines must be left in reset to stop MMC block read "blackouts" */
783         { GLAMO_REG_CLOCK_ISP,          0x0000 },
784         { GLAMO_REG_CLOCK_ISP,          0x0000 },
785         { GLAMO_REG_CLOCK_JPEG,         0x0000 },
786         { GLAMO_REG_CLOCK_3D,           0x0000 },
787         { GLAMO_REG_CLOCK_3D,           0x0000 },
788         { GLAMO_REG_CLOCK_2D,           0x0000 },
789         { GLAMO_REG_CLOCK_2D,           0x0000 },
790         { GLAMO_REG_CLOCK_RISC1,        0x0000 },
791         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
792         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
793 #endif
794         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
795         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
796         { 0xfffd, 0 },
797         /*
798          * b9 of this register MUST be zero to get any interrupts on INT#
799          * the other set bits enable all the engine interrupt sources
800          */
801         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
802         { GLAMO_REG_CLOCK_GEN6,         0x2000 },
803         { GLAMO_REG_CLOCK_GEN7,         0x0101 },
804         { GLAMO_REG_CLOCK_GEN8,         0x0100 },
805         { GLAMO_REG_CLOCK_HOST,         0x000d },
806         /*
807          * b7..b4 = 0 = no wait states on read or write
808          * b0 = 1 select PLL2 for Host interface, b1 = enable it
809          */
810         { 0x200,        0x0e03 /* this is replaced by script parser */ },
811         { 0x202,        0x07ff },
812         { 0x212,        0x0000 },
813         { 0x214,        0x4000 },
814         { 0x216,        0xf00e },
815
816         /* S-Media recommended "set tiling mode to 512 mode for memory access
817          * more efficiency when 640x480" */
818         { GLAMO_REG_MEM_TYPE,           0x0c74 }, /* 8MB, 16 word pg wr+rd */
819         { GLAMO_REG_MEM_GEN,            0xafaf }, /* 63 grants min + max */
820
821         { GLAMO_REGOFS_HOSTBUS + 2,     0xffff }, /* enable  on MMIO*/
822
823         { GLAMO_REG_MEM_TIMING1,        0x0108 },
824         { GLAMO_REG_MEM_TIMING2,        0x0010 }, /* Taa = 3 MCLK */
825         { GLAMO_REG_MEM_TIMING3,        0x0000 },
826         { GLAMO_REG_MEM_TIMING4,        0x0000 }, /* CE1# delay fall/rise */
827         { GLAMO_REG_MEM_TIMING5,        0x0000 }, /* UB# LB# */
828         { GLAMO_REG_MEM_TIMING6,        0x0000 }, /* OE# */
829         { GLAMO_REG_MEM_TIMING7,        0x0000 }, /* WE# */
830         { GLAMO_REG_MEM_TIMING8,        0x1002 }, /* MCLK delay, was 0x1000 */
831         { GLAMO_REG_MEM_TIMING9,        0x6006 },
832         { GLAMO_REG_MEM_TIMING10,       0x00ff },
833         { GLAMO_REG_MEM_TIMING11,       0x0001 },
834         { GLAMO_REG_MEM_POWER1,         0x0020 },
835         { GLAMO_REG_MEM_POWER2,         0x0000 },
836         { GLAMO_REG_MEM_DRAM1,          0x0000 },
837                 { 0xfffe, 1 },
838         { GLAMO_REG_MEM_DRAM1,          0xc100 },
839                 { 0xfffe, 1 },
840         { GLAMO_REG_MEM_DRAM1,          0xe100 },
841         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
842         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
843         { GLAMO_REG_GPIO_GEN1,          0x000f },
844         { GLAMO_REG_GPIO_GEN2,          0x111e },
845         { GLAMO_REG_GPIO_GEN3,          0xccc3 },
846         { GLAMO_REG_GPIO_GEN4,          0x111e },
847         { GLAMO_REG_GPIO_GEN5,          0x000f },
848 };
849 #if 0
850 static struct glamo_script glamo_resume_script[] = {
851
852         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
853         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
854         { GLAMO_REG_DFT_GEN6, 1 },
855                 { 0xfffe, 100 },
856                 { 0xfffd, 0 },
857         { 0x200,        0x0e03 },
858
859         /*
860          * b9 of this register MUST be zero to get any interrupts on INT#
861          * the other set bits enable all the engine interrupt sources
862          */
863         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
864         { GLAMO_REG_CLOCK_HOST,         0x0018 },
865         { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
866
867         { GLAMO_REG_MEM_DRAM1,          0x0000 },
868                 { 0xfffe, 1 },
869         { GLAMO_REG_MEM_DRAM1,          0xc100 },
870                 { 0xfffe, 1 },
871         { GLAMO_REG_MEM_DRAM1,          0xe100 },
872         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
873         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
874 };
875 #endif
876
877 enum glamo_power {
878         GLAMO_POWER_ON,
879         GLAMO_POWER_SUSPEND,
880 };
881
882 static void glamo_power(struct glamo_core *glamo,
883                         enum glamo_power new_state)
884 {
885         int n;
886         unsigned long flags;
887         
888         spin_lock_irqsave(&glamo->lock, flags);
889
890         dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
891
892         /*
893 Power management
894 static const REG_VALUE_MASK_TYPE reg_powerOn[] =
895 {
896     { REG_GEN_DFT6,     REG_BIT_ALL,    REG_DATA(1u << 0)           },
897     { REG_GEN_PLL3,     0u,             REG_DATA(1u << 13)          },
898     { REG_GEN_MEM_CLK,  REG_BIT_ALL,    REG_BIT_EN_MOCACLK          },
899     { REG_MEM_DRAM2,    0u,             REG_BIT_EN_DEEP_POWER_DOWN  },
900     { REG_MEM_DRAM1,    0u,             REG_BIT_SELF_REFRESH        }
901 };
902
903 static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
904 {
905     { REG_MEM_DRAM1,    REG_BIT_ALL,    REG_BIT_SELF_REFRESH    },
906     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK      },
907     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)      },
908     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)       }
909 };
910
911 static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
912 {
913     { REG_MEM_DRAM2,    REG_BIT_ALL,    REG_BIT_EN_DEEP_POWER_DOWN  },
914     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK          },
915     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)          },
916     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)           }
917 };
918 */
919
920         switch (new_state) {
921         case GLAMO_POWER_ON:
922
923                 /*
924                  * glamo state on resume is nondeterministic in some
925                  * fundamental way, it has also been observed that the
926                  * Glamo reset pin can get asserted by, eg, touching it with
927                  * a scope probe.  So the only answer is to roll with it and
928                  * force an external reset on the Glamo during resume.
929                  */
930
931                 (glamo->pdata->glamo_external_reset)(0);
932                 udelay(10);
933                 (glamo->pdata->glamo_external_reset)(1);
934                 mdelay(5);
935
936                 glamo_run_script(glamo, glamo_init_script,
937                          ARRAY_SIZE(glamo_init_script), 0);
938
939                 break;
940
941         case GLAMO_POWER_SUSPEND:
942
943                 /* nuke interrupts */
944                 __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
945
946                 /* stash a copy of which engines were running */
947                 glamo->engine_enabled_bitfield_suspend =
948                                                  glamo->engine_enabled_bitfield;
949
950                 /* take down each engine before we kill mem and pll */
951                 for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
952                         if (glamo->engine_enabled_bitfield & (1 << n))
953                                 __glamo_engine_disable(glamo, n);
954
955                 /* enable self-refresh */
956
957                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
958                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
959                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
960                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
961                                         GLAMO_MEM_REFRESH_COUNT);
962                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
963                                         GLAMO_MEM_DRAM1_EN_MODEREG_SET |
964                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
965                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
966                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
967                                         GLAMO_MEM_REFRESH_COUNT);
968
969                 /* force RAM into deep powerdown */
970
971                 __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
972                                         GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
973                                         (7 << 6) | /* tRC */
974                                         (1 << 4) | /* tRP */
975                                         (1 << 2) | /* tRCD */
976                                         2); /* CAS latency */
977
978                 /* disable clocks to memory */
979                 __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
980
981                 /* all dividers from OSCI */
982                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
983
984                 /* PLL2 into bypass */
985                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
986
987                 __reg_write(glamo, 0x200, 0x0e00);
988
989
990                 /* kill PLLS 1 then 2 */
991                 __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
992                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
993
994                 break;
995         }
996
997         spin_unlock_irqrestore(&glamo->lock, flags);
998 }
999
1000 #if 0
1001 #define MEMDETECT_RETRY 6
1002 static unsigned int detect_memsize(struct glamo_core *glamo)
1003 {
1004         int i;
1005
1006         /*static const u_int16_t pattern[] = {
1007                 0x1111, 0x8a8a, 0x2222, 0x7a7a,
1008                 0x3333, 0x6a6a, 0x4444, 0x5a5a,
1009                 0x5555, 0x4a4a, 0x6666, 0x3a3a,
1010                 0x7777, 0x2a2a, 0x8888, 0x1a1a
1011         }; */
1012
1013         for (i = 0; i < MEMDETECT_RETRY; i++) {
1014                 switch (glamo->type) {
1015                 case 3600:
1016                         __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
1017                         __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1018                         break;
1019                 case 3650:
1020                         switch (glamo->revision) {
1021                         case GLAMO_CORE_REV_A0:
1022                                 if (i & 1)
1023                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1024                                                     0x097a);
1025                                 else
1026                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1027                                                     0x0173);
1028
1029                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1030                                 msleep(1);
1031                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1032                                 break;
1033                         default:
1034                                 if (i & 1)
1035                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1036                                                     0x0972);
1037                                 else
1038                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1039                                                     0x0872);
1040
1041                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1042                                 msleep(1);
1043                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
1044                                 break;
1045                         }
1046                         break;
1047                 case 3700:
1048                         /* FIXME */
1049                 default:
1050                         break;
1051                 }
1052
1053 #if 0
1054                 /* FIXME: finish implementation */
1055                 for (j = 0; j < 8; j++) {
1056                         __
1057 #endif
1058         }
1059
1060         return 0;
1061 }
1062 #endif
1063
1064 /* Find out if we can support this version of the Glamo chip */
1065 static int glamo_supported(struct glamo_core *glamo)
1066 {
1067         u_int16_t dev_id, rev_id; /*, memsize; */
1068
1069         dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
1070         rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
1071
1072         switch (dev_id) {
1073         case 0x3650:
1074                 switch (rev_id) {
1075                 case GLAMO_CORE_REV_A2:
1076                         break;
1077                 case GLAMO_CORE_REV_A0:
1078                 case GLAMO_CORE_REV_A1:
1079                 case GLAMO_CORE_REV_A3:
1080                         dev_warn(&glamo->pdev->dev, "untested core revision "
1081                                  "%04x, your mileage may vary\n", rev_id);
1082                         break;
1083                 default:
1084                         dev_warn(&glamo->pdev->dev, "unknown glamo revision "
1085                                  "%04x, your mileage may vary\n", rev_id);
1086                         /* maybe should abort ? */
1087                 }
1088                 break;
1089         case 0x3600:
1090         case 0x3700:
1091         default:
1092                 dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
1093                         dev_id);
1094                 return 0;
1095         }
1096
1097         dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
1098                  "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
1099                  glamo_pll_rate(glamo, GLAMO_PLL1),
1100                  glamo_pll_rate(glamo, GLAMO_PLL2));
1101
1102         return 1;
1103 }
1104
1105 static int __init glamo_probe(struct platform_device *pdev)
1106 {
1107         int rc = 0, irq;
1108         struct glamo_core *glamo;
1109
1110         if (glamo_handle) {
1111                 dev_err(&pdev->dev,
1112                         "This driver supports only one instance\n");
1113                 return -EBUSY;
1114         }
1115
1116         glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
1117         if (!glamo)
1118                 return -ENOMEM;
1119
1120         spin_lock_init(&glamo->lock);
1121         glamo_handle = glamo;
1122         glamo->pdev = pdev;
1123         glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124         glamo->irq = platform_get_irq(pdev, 0);
1125         glamo->pdata = pdev->dev.platform_data;
1126         if (!glamo->mem || !glamo->pdata) {
1127                 dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
1128                 rc = -ENOENT;
1129                 goto bail_free;
1130         }
1131
1132         /* register a number of sibling devices whoise IOMEM resources
1133          * are siblings of pdev's IOMEM resource */
1134
1135         /* only remap the generic, hostbus and memory controller registers */
1136         glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
1137         if (!glamo->base) {
1138                 dev_err(&pdev->dev, "failed to ioremap() memory region\n");
1139                 goto bail_free;
1140         }
1141
1142         platform_set_drvdata(pdev, glamo);
1143
1144         (glamo->pdata->glamo_external_reset)(0);
1145         udelay(10);
1146         (glamo->pdata->glamo_external_reset)(1);
1147         mdelay(10);
1148
1149         /*
1150          * finally set the mfd interrupts up
1151          * can't do them earlier or sibling probes blow up
1152          */
1153
1154         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1155                 set_irq_chip(irq, &glamo_irq_chip);
1156                 set_irq_handler(irq, handle_level_irq);
1157                 set_irq_flags(irq, IRQF_VALID);
1158         }
1159
1160         if (glamo->pdata->glamo_irq_is_wired &&
1161             !glamo->pdata->glamo_irq_is_wired()) {
1162                 set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
1163                 set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
1164                 dev_info(&pdev->dev, "Glamo interrupt registered\n");
1165                 glamo->irq_works = 1;
1166         } else {
1167                 dev_err(&pdev->dev, "Glamo interrupt not used\n");
1168                 glamo->irq_works = 0;
1169         }
1170
1171         /* confirm it isn't insane version */
1172         if (!glamo_supported(glamo)) {
1173                 dev_err(&pdev->dev, "This Glamo is not supported\n");
1174                 goto bail_irq;
1175         }
1176
1177         /* sysfs */
1178         rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
1179         if (rc < 0) {
1180                 dev_err(&pdev->dev, "cannot create sysfs group\n");
1181                 goto bail_irq;
1182         }
1183
1184         /* init the chip with canned register set */
1185
1186         dev_dbg(&glamo->pdev->dev, "running init script\n");
1187         glamo_run_script(glamo, glamo_init_script,
1188                          ARRAY_SIZE(glamo_init_script), 1);
1189
1190         dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
1191                  glamo_pll_rate(glamo, GLAMO_PLL1),
1192                  glamo_pll_rate(glamo, GLAMO_PLL2));
1193
1194         glamo->pdata->glamo = glamo;
1195
1196         /* bring MCI specific stuff over from our MFD platform data */
1197         glamo_mci_def_pdata.glamo_can_set_mci_power =
1198                                         glamo->pdata->glamo_can_set_mci_power;
1199         glamo_mci_def_pdata.glamo_mci_use_slow =
1200                                         glamo->pdata->glamo_mci_use_slow;
1201         glamo_mci_def_pdata.glamo_irq_is_wired =
1202                                         glamo->pdata->glamo_irq_is_wired;
1203         glamo_mci_def_pdata.pglamo = glamo;
1204
1205         /* register siblings */
1206         glamo_cells[GLAMO_CELL_MMC].platform_data = &glamo_mci_def_pdata;
1207         glamo_cells[GLAMO_CELL_MMC].data_size = sizeof(glamo_mci_def_pdata);
1208
1209         glamo_cells[GLAMO_CELL_FB].platform_data = glamo->pdata;
1210         glamo_cells[GLAMO_CELL_FB].data_size = sizeof(struct glamofb_platform_data);
1211
1212         glamo->pdata->spigpio_info->glamo = glamo;
1213         glamo_cells[GLAMO_CELL_SPI_GPIO].platform_data = glamo->pdata->spigpio_info;
1214         glamo_cells[GLAMO_CELL_SPI_GPIO].data_size =
1215         sizeof(struct glamo_spigpio_info);
1216
1217         mfd_add_devices(&pdev->dev, pdev->id, glamo_cells,
1218                               ARRAY_SIZE(glamo_cells),
1219                                                   glamo->mem, 0);
1220
1221         /* only request the generic, hostbus and memory controller MMIO */
1222         glamo->mem = request_mem_region(glamo->mem->start,
1223                                         GLAMO_REGOFS_VIDCAP, "glamo-core");
1224         if (!glamo->mem) {
1225                 dev_err(&pdev->dev, "failed to request memory region\n");
1226                 goto bail_irq;
1227         }
1228
1229         return 0;
1230
1231 bail_irq:
1232         disable_irq(glamo->irq);
1233         set_irq_chained_handler(glamo->irq, NULL);
1234
1235         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1236                 set_irq_flags(irq, 0);
1237                 set_irq_chip(irq, NULL);
1238         }
1239
1240         iounmap(glamo->base);
1241 bail_free:
1242         platform_set_drvdata(pdev, NULL);
1243         glamo_handle = NULL;
1244         kfree(glamo);
1245
1246         return rc;
1247 }
1248
1249 static int glamo_remove(struct platform_device *pdev)
1250 {
1251         struct glamo_core *glamo = platform_get_drvdata(pdev);
1252         int irq;
1253
1254         disable_irq(glamo->irq);
1255         set_irq_chained_handler(glamo->irq, NULL);
1256
1257         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1258                 set_irq_flags(irq, 0);
1259                 set_irq_chip(irq, NULL);
1260         }
1261
1262         platform_set_drvdata(pdev, NULL);
1263         mfd_remove_devices(&pdev->dev);
1264         iounmap(glamo->base);
1265         release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
1266         glamo_handle = NULL;
1267         kfree(glamo);
1268
1269         return 0;
1270 }
1271
1272 #ifdef CONFIG_PM
1273
1274 static int glamo_suspend(struct platform_device *pdev, pm_message_t state)
1275 {
1276         glamo_handle->suspending = 1;
1277         glamo_power(glamo_handle, GLAMO_POWER_SUSPEND);
1278
1279         return 0;
1280 }
1281
1282 static int glamo_resume(struct platform_device *pdev)
1283 {
1284         glamo_power(glamo_handle, GLAMO_POWER_ON);
1285         glamo_handle->suspending = 0;
1286
1287         return 0;
1288 }
1289
1290 #else
1291 #define glamo_suspend NULL
1292 #define glamo_resume  NULL
1293 #endif
1294
1295 static struct platform_driver glamo_driver = {
1296         .probe          = glamo_probe,
1297         .remove         = glamo_remove,
1298         .suspend        = glamo_suspend,
1299         .resume = glamo_resume,
1300         .driver         = {
1301                 .name   = "glamo3362",
1302                 .owner  = THIS_MODULE,
1303         },
1304 };
1305
1306 static int __devinit glamo_init(void)
1307 {
1308         return platform_driver_register(&glamo_driver);
1309 }
1310
1311 static void __exit glamo_cleanup(void)
1312 {
1313         platform_driver_unregister(&glamo_driver);
1314 }
1315
1316 module_init(glamo_init);
1317 module_exit(glamo_cleanup);
1318
1319 MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
1320 MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
1321 MODULE_LICENSE("GPL");