goldfish: R.I.P.
[openwrt.git] / target / linux / s3c24xx / files-2.6.30 / drivers / mfd / glamo / glamo-core.c
1 /* Smedia Glamo 336x/337x driver
2  *
3  * (C) 2007 by Openmoko, Inc.
4  * Author: Harald Welte <laforge@openmoko.org>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/mm.h>
28 #include <linux/delay.h>
29 #include <linux/fb.h>
30 #include <linux/init.h>
31 #include <linux/irq.h>
32 #include <linux/interrupt.h>
33 #include <linux/workqueue.h>
34 #include <linux/platform_device.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/spinlock.h>
37 #include <linux/mfd/core.h>
38 #include <linux/mfd/glamo.h>
39 #include <linux/glamofb.h>
40 #include <linux/io.h>
41
42 #include <asm/div64.h>
43
44 #include <linux/pm.h>
45
46 #include "glamo-regs.h"
47 #include "glamo-core.h"
48
49 #define GLAMO_MEM_REFRESH_COUNT 0x100
50
51 /*
52  * Glamo internal settings
53  *
54  * We run the memory interface from the faster PLLB on 2.6.28 kernels and
55  * above.  Couple of GTA02 users report trouble with memory bus when they
56  * upgraded from 2.6.24.  So this parameter allows reversion to 2.6.24
57  * scheme if their Glamo chip needs it.
58  *
59  * you can override the faster default on kernel commandline using
60  *
61  *   glamo3362.slow_memory=1
62  *
63  * for example
64  */
65
66 static int slow_memory = 0;
67 module_param(slow_memory, int, 0644);
68
69 struct reg_range {
70         int start;
71         int count;
72         char *name;
73         char dump;
74 };
75
76 struct reg_range reg_range[] = {
77         { 0x0000, 0x76,         "General",      1 },
78         { 0x0200, 0x18,         "Host Bus",     1 },
79         { 0x0300, 0x38,         "Memory",       1 },
80 /*      { 0x0400, 0x100,        "Sensor",       0 }, */
81 /*              { 0x0500, 0x300,        "ISP",          0 }, */
82 /*              { 0x0800, 0x400,        "JPEG",         0 }, */
83 /*              { 0x0c00, 0xcc,         "MPEG",         0 }, */
84         { 0x1100, 0xb2,         "LCD 1",        1 },
85         { 0x1200, 0x64,         "LCD 2",        1 },
86         { 0x1400, 0x42,         "MMC",          1 },
87 /*              { 0x1500, 0x080,        "MPU 0",        0 },
88         { 0x1580, 0x080,        "MPU 1",        0 },
89         { 0x1600, 0x080,        "Cmd Queue",    0 },
90         { 0x1680, 0x080,        "RISC CPU",     0 },*/
91         { 0x1700, 0x400,        "2D Unit",      0 },
92 /*      { 0x1b00, 0x900,        "3D Unit",      0 }, */
93 };
94
95 static inline void __reg_write(struct glamo_core *glamo,
96                                 u_int16_t reg, u_int16_t val)
97 {
98         writew(val, glamo->base + reg);
99 }
100
101 static inline u_int16_t __reg_read(struct glamo_core *glamo,
102                                    u_int16_t reg)
103 {
104         return readw(glamo->base + reg);
105 }
106
107 static void __reg_set_bit_mask(struct glamo_core *glamo,
108                                 u_int16_t reg, u_int16_t mask,
109                                 u_int16_t val)
110 {
111         u_int16_t tmp;
112
113         val &= mask;
114
115         tmp = __reg_read(glamo, reg);
116         tmp &= ~mask;
117         tmp |= val;
118         __reg_write(glamo, reg, tmp);
119 }
120
121 static void reg_set_bit_mask(struct glamo_core *glamo,
122                                 u_int16_t reg, u_int16_t mask,
123                                 u_int16_t val)
124 {
125         spin_lock(&glamo->lock);
126         __reg_set_bit_mask(glamo, reg, mask, val);
127         spin_unlock(&glamo->lock);
128 }
129
130 static inline void __reg_set_bit(struct glamo_core *glamo,
131                                  u_int16_t reg, u_int16_t bit)
132 {
133         __reg_set_bit_mask(glamo, reg, bit, 0xffff);
134 }
135
136 static inline void __reg_clear_bit(struct glamo_core *glamo,
137                                    u_int16_t reg, u_int16_t bit)
138 {
139         __reg_set_bit_mask(glamo, reg, bit, 0);
140 }
141
142 /***********************************************************************
143  * resources of sibling devices
144  ***********************************************************************/
145
146 static struct resource glamo_fb_resources[] = {
147         {
148                 .name   = "glamo-fb-regs",
149                 .start  = GLAMO_REGOFS_LCD,
150                 .end    = GLAMO_REGOFS_MMC - 1,
151                 .flags  = IORESOURCE_MEM,
152         }, {
153                 .name   = "glamo-fb-mem",
154                 .start  = GLAMO_OFFSET_FB,
155                 .end    = GLAMO_OFFSET_FB + GLAMO_FB_SIZE - 1,
156                 .flags  = IORESOURCE_MEM,
157         },
158 };
159
160 static struct resource glamo_mmc_resources[] = {
161         {
162                 .start  = GLAMO_REGOFS_MMC,
163                 .end    = GLAMO_REGOFS_MPROC0 - 1,
164                 .flags  = IORESOURCE_MEM
165         }, {
166                 .start  = IRQ_GLAMO_MMC,
167                 .end    = IRQ_GLAMO_MMC,
168                 .flags  = IORESOURCE_IRQ,
169         }, { /* our data buffer for MMC transfers */
170                 .start  = GLAMO_OFFSET_FB + GLAMO_FB_SIZE,
171                 .end    = GLAMO_OFFSET_FB + GLAMO_FB_SIZE +
172                                   GLAMO_MMC_BUFFER_SIZE - 1,
173                 .flags  = IORESOURCE_MEM
174         },
175 };
176
177 enum glamo_cells {
178         GLAMO_CELL_FB,
179         GLAMO_CELL_MMC,
180         GLAMO_CELL_GPIO,
181 };
182
183 static struct mfd_cell glamo_cells[] = {
184         [GLAMO_CELL_FB] = {
185                 .name = "glamo-fb",
186                 .num_resources = ARRAY_SIZE(glamo_fb_resources),
187                 .resources = glamo_fb_resources,
188         },
189         [GLAMO_CELL_MMC] = {
190                 .name = "glamo-mci",
191                 .num_resources = ARRAY_SIZE(glamo_mmc_resources),
192                 .resources = glamo_mmc_resources,
193         },
194         [GLAMO_CELL_GPIO] = {
195                 .name = "glamo-gpio",
196         },
197 };
198
199
200 /***********************************************************************
201  * IRQ demultiplexer
202  ***********************************************************************/
203 #define irq2glamo(x)    (x - IRQ_GLAMO(0))
204
205 static void glamo_ack_irq(unsigned int irq)
206 {
207         struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
208         /* clear interrupt source */
209         __reg_write(glamo, GLAMO_REG_IRQ_CLEAR,
210                     1 << irq2glamo(irq));
211 }
212
213 static void glamo_mask_irq(unsigned int irq)
214 {
215         struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
216         u_int16_t tmp;
217
218         /* clear bit in enable register */
219         tmp = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE);
220         tmp &= ~(1 << irq2glamo(irq));
221         __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, tmp);
222 }
223
224 static void glamo_unmask_irq(unsigned int irq)
225 {
226         struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
227         u_int16_t tmp;
228
229         /* set bit in enable register */
230         tmp = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE);
231         tmp |= (1 << irq2glamo(irq));
232         __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, tmp);
233 }
234
235 static struct irq_chip glamo_irq_chip = {
236         .name   = "glamo",
237         .ack    = glamo_ack_irq,
238         .mask   = glamo_mask_irq,
239         .unmask = glamo_unmask_irq,
240 };
241
242 static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
243 {
244         struct glamo_core *glamo = get_irq_desc_chip_data(desc);
245         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
246
247         if (unlikely(desc->status & IRQ_INPROGRESS)) {
248                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
249                 desc->chip->mask(irq);
250                 desc->chip->ack(irq);
251                 return;
252         }
253         kstat_incr_irqs_this_cpu(irq, desc);
254
255         desc->chip->ack(irq);
256         desc->status |= IRQ_INPROGRESS;
257
258         do {
259                 u_int16_t irqstatus;
260                 int i;
261
262                 if (unlikely((desc->status &
263                                 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
264                                 (IRQ_PENDING | IRQ_MASKED))) {
265                         /* dealing with pending IRQ, unmasking */
266                         desc->chip->unmask(irq);
267                         desc->status &= ~IRQ_MASKED;
268                 }
269
270                 desc->status &= ~IRQ_PENDING;
271
272                 /* read IRQ status register */
273                 irqstatus = __reg_read(glamo, GLAMO_REG_IRQ_STATUS);
274                 for (i = 0; i < 9; i++)
275                         if (irqstatus & (1 << i))
276                                 desc_handle_irq(IRQ_GLAMO(i),
277                                     irq_desc+IRQ_GLAMO(i));
278
279         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
280
281         desc->status &= ~IRQ_INPROGRESS;
282 }
283
284
285 static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
286                            const char *buf, size_t count)
287 {
288         unsigned long reg = simple_strtoul(buf, NULL, 10);
289         struct glamo_core *glamo = dev_get_drvdata(dev);
290
291         while (*buf && (*buf != ' '))
292                 buf++;
293         if (*buf != ' ')
294                 return -EINVAL;
295         while (*buf && (*buf == ' '))
296                 buf++;
297         if (!*buf)
298                 return -EINVAL;
299
300         printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
301                reg, simple_strtoul(buf, NULL, 10));
302
303         __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
304
305         return count;
306 }
307
308 static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
309                         char *buf)
310 {
311         struct glamo_core *glamo = dev_get_drvdata(dev);
312         int n, n1 = 0, r;
313         char * end = buf;
314
315         spin_lock(&glamo->lock);
316
317         for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
318                 if (!reg_range[r].dump)
319                         continue;
320                 n1 = 0;
321                 end += sprintf(end, "\n%s\n", reg_range[r].name);
322                 for (n = reg_range[r].start;
323                      n < reg_range[r].start + reg_range[r].count; n += 2) {
324                         if (((n1++) & 7) == 0)
325                                 end += sprintf(end, "\n%04X:  ", n);
326                         end += sprintf(end, "%04x ", __reg_read(glamo, n));
327                 }
328                 end += sprintf(end, "\n");
329                 if (!attr) {
330                         printk("%s", buf);
331                         end = buf;
332                 }
333         }
334         spin_unlock(&glamo->lock);
335
336         return end - buf;
337 }
338
339 static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
340 static struct attribute *glamo_sysfs_entries[] = {
341         &dev_attr_regs.attr,
342         NULL
343 };
344 static struct attribute_group glamo_attr_group = {
345         .name   = NULL,
346         .attrs  = glamo_sysfs_entries,
347 };
348
349
350
351 /***********************************************************************
352  * 'engine' support
353  ***********************************************************************/
354
355 int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
356 {
357         switch (engine) {
358         case GLAMO_ENGINE_LCD:
359                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
360                                    GLAMO_HOSTBUS2_MMIO_EN_LCD,
361                                    GLAMO_HOSTBUS2_MMIO_EN_LCD);
362                 __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
363                             GLAMO_CLOCK_LCD_EN_M5CLK |
364                             GLAMO_CLOCK_LCD_EN_DHCLK |
365                             GLAMO_CLOCK_LCD_EN_DMCLK |
366                             GLAMO_CLOCK_LCD_EN_DCLK |
367                             GLAMO_CLOCK_LCD_DG_M5CLK |
368                             GLAMO_CLOCK_LCD_DG_DMCLK);
369                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
370                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
371                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
372                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
373                 break;
374         case GLAMO_ENGINE_MMC:
375                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
376                                    GLAMO_HOSTBUS2_MMIO_EN_MMC,
377                                    GLAMO_HOSTBUS2_MMIO_EN_MMC);
378                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
379                                    GLAMO_CLOCK_MMC_EN_M9CLK |
380                                    GLAMO_CLOCK_MMC_EN_TCLK |
381                                    GLAMO_CLOCK_MMC_DG_M9CLK |
382                                    GLAMO_CLOCK_MMC_DG_TCLK,
383                                    0xffff);
384                 /* enable the TCLK divider clk input */
385                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
386                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
387                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
388                 break;
389         case GLAMO_ENGINE_2D:
390                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
391                                    GLAMO_CLOCK_2D_EN_M7CLK |
392                                    GLAMO_CLOCK_2D_EN_GCLK |
393                                    GLAMO_CLOCK_2D_DG_M7CLK |
394                                    GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
395                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
396                                    GLAMO_HOSTBUS2_MMIO_EN_2D,
397                                    GLAMO_HOSTBUS2_MMIO_EN_2D);
398                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
399                                    GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
400                                                    0xffff);
401                 break;
402         case GLAMO_ENGINE_CMDQ:
403                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
404                                    GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
405                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
406                                    GLAMO_HOSTBUS2_MMIO_EN_CQ,
407                                    GLAMO_HOSTBUS2_MMIO_EN_CQ);
408                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
409                                    GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
410                                                    0xffff);
411                 break;
412         /* FIXME: Implementation */
413         default:
414                 return -EINVAL;
415         }
416
417         glamo->engine_enabled_bitfield |= 1 << engine;
418
419         return 0;
420 }
421
422 int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
423 {
424         int ret;
425
426         spin_lock(&glamo->lock);
427
428         ret = __glamo_engine_enable(glamo, engine);
429
430         spin_unlock(&glamo->lock);
431
432         return ret;
433 }
434 EXPORT_SYMBOL_GPL(glamo_engine_enable);
435
436 int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
437 {
438         switch (engine) {
439         case GLAMO_ENGINE_LCD:
440                 /* remove pixel clock to LCM */
441                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
442                             GLAMO_CLOCK_LCD_EN_DCLK, 0);
443                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
444                             GLAMO_CLOCK_LCD_EN_DHCLK |
445                             GLAMO_CLOCK_LCD_EN_DMCLK, 0);
446                 /* kill memory clock */
447                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
448                             GLAMO_CLOCK_LCD_EN_M5CLK, 0);
449                 /* stop dividing the clocks */
450                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
451                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
452                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
453                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
454                 break;
455
456         case GLAMO_ENGINE_MMC:
457                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
458                                                    GLAMO_CLOCK_MMC_EN_M9CLK |
459                                                    GLAMO_CLOCK_MMC_EN_TCLK |
460                                                    GLAMO_CLOCK_MMC_DG_M9CLK |
461                                                    GLAMO_CLOCK_MMC_DG_TCLK, 0);
462                 /* disable the TCLK divider clk input */
463                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
464                                         GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
465                 break;
466         case GLAMO_ENGINE_CMDQ:
467                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
468                                            GLAMO_CLOCK_2D_EN_M6CLK,
469                                                            0);
470                         __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
471                                            GLAMO_HOSTBUS2_MMIO_EN_CQ,
472                                            GLAMO_HOSTBUS2_MMIO_EN_CQ);
473 /*                      __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
474                                            GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
475                                                            0);*/
476                 break;
477         case GLAMO_ENGINE_2D:
478                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
479                                                            GLAMO_CLOCK_2D_EN_M7CLK |
480                                                            GLAMO_CLOCK_2D_EN_GCLK |
481                                                            GLAMO_CLOCK_2D_DG_M7CLK |
482                                                            GLAMO_CLOCK_2D_DG_GCLK,
483                                                            0);
484                         __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
485                                            GLAMO_HOSTBUS2_MMIO_EN_2D,
486                                            GLAMO_HOSTBUS2_MMIO_EN_2D);
487                         __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
488                                            GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
489                                                            0);
490                 break;
491         default:
492                 return -EINVAL;
493         }
494
495         glamo->engine_enabled_bitfield &= ~(1 << engine);
496
497         return 0;
498 }
499 int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
500 {
501         int ret;
502
503         spin_lock(&glamo->lock);
504
505         ret = __glamo_engine_disable(glamo, engine);
506
507         spin_unlock(&glamo->lock);
508
509         return ret;
510 }
511 EXPORT_SYMBOL_GPL(glamo_engine_disable);
512
513 static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
514         [GLAMO_ENGINE_LCD]      = GLAMO_REG_CLOCK_LCD,
515         [GLAMO_ENGINE_MMC]      = GLAMO_REG_CLOCK_MMC,
516         [GLAMO_ENGINE_ISP]      = GLAMO_REG_CLOCK_ISP,
517         [GLAMO_ENGINE_JPEG]     = GLAMO_REG_CLOCK_JPEG,
518         [GLAMO_ENGINE_3D]       = GLAMO_REG_CLOCK_3D,
519         [GLAMO_ENGINE_2D]       = GLAMO_REG_CLOCK_2D,
520         [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
521         [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
522 };
523
524 void glamo_engine_clkreg_set(struct glamo_core *glamo,
525                              enum glamo_engine engine,
526                              u_int16_t mask, u_int16_t val)
527 {
528         reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
529 }
530 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
531
532 u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
533                                   enum glamo_engine engine)
534 {
535         u_int16_t val;
536
537         spin_lock(&glamo->lock);
538         val = __reg_read(glamo, engine_clock_regs[engine]);
539         spin_unlock(&glamo->lock);
540
541         return val;
542 }
543 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
544
545 static const struct glamo_script engine_div_regs[__NUM_GLAMO_ENGINES] = {
546         [GLAMO_ENGINE_LCD] = {GLAMO_REG_CLOCK_GEN5_1, GLAMO_CLOCK_GEN51_EN_DIV_DCLK},
547         [GLAMO_ENGINE_MMC] = {GLAMO_REG_CLOCK_GEN5_1, GLAMO_CLOCK_GEN51_EN_DIV_TCLK},
548         [GLAMO_ENGINE_2D]  = {GLAMO_REG_CLOCK_GEN5_1, GLAMO_CLOCK_GEN51_EN_DIV_GCLK},
549 };
550
551 void glamo_engine_div_enable(struct glamo_core *glamo, enum glamo_engine engine)
552 {
553         uint16_t reg = engine_div_regs[engine].reg;
554         uint16_t bit = engine_div_regs[engine].val;
555         uint16_t val;
556
557         spin_lock(&glamo->lock);
558         val = __reg_read(glamo, reg);
559         __reg_write(glamo, reg, val | bit);
560         spin_unlock(&glamo->lock);
561         mdelay(5);
562 }
563 EXPORT_SYMBOL_GPL(glamo_engine_div_enable);
564
565 void glamo_engine_div_disable(struct glamo_core *glamo, enum glamo_engine engine)
566 {
567         uint16_t reg = engine_div_regs[engine].reg;
568         uint16_t bit = engine_div_regs[engine].val;
569         uint16_t val;
570
571         spin_lock(&glamo->lock);
572         val = __reg_read(glamo, reg);
573         __reg_write(glamo, reg, val & ~bit);
574         spin_unlock(&glamo->lock);
575 }
576 EXPORT_SYMBOL_GPL(glamo_engine_div_disable);
577
578 static const struct glamo_script reset_regs[] = {
579         [GLAMO_ENGINE_LCD] = {
580                 GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
581         },
582 #if 0
583         [GLAMO_ENGINE_HOST] = {
584                 GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
585         },
586         [GLAMO_ENGINE_MEM] = {
587                 GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
588         },
589 #endif
590         [GLAMO_ENGINE_MMC] = {
591                 GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
592         },
593         [GLAMO_ENGINE_CMDQ] = {
594                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
595         },
596         [GLAMO_ENGINE_2D] = {
597                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
598         },
599         [GLAMO_ENGINE_JPEG] = {
600                 GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
601         },
602 };
603
604 void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
605 {
606         uint16_t reg = reset_regs[engine].reg;
607         uint16_t val = reset_regs[engine].val;
608
609         if (engine >= ARRAY_SIZE(reset_regs)) {
610                 dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
611                 return;
612         }
613
614
615         spin_lock(&glamo->lock);
616         __reg_set_bit(glamo, reg, val);
617         __reg_clear_bit(glamo, reg, val);
618         spin_unlock(&glamo->lock);
619 }
620 EXPORT_SYMBOL_GPL(glamo_engine_reset);
621
622 int glamo_pll_rate(struct glamo_core *glamo,
623                           enum glamo_pll pll)
624 {
625         u_int16_t reg;
626         unsigned int osci = glamo->pdata->osci_clock_rate;
627
628         switch (pll) {
629         case GLAMO_PLL1:
630                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
631                 break;
632         case GLAMO_PLL2:
633                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
634                 break;
635         default:
636                 return -EINVAL;
637         }
638         return osci*reg;
639 }
640 EXPORT_SYMBOL_GPL(glamo_pll_rate);
641
642 int glamo_engine_reclock(struct glamo_core *glamo,
643                          enum glamo_engine engine,
644                          int hz)
645 {
646         int pll;
647         u_int16_t reg, mask, div;
648
649         if (!hz)
650                 return -EINVAL;
651
652         switch (engine) {
653         case GLAMO_ENGINE_LCD:
654                 pll = GLAMO_PLL1;
655                 reg = GLAMO_REG_CLOCK_GEN7;
656                 mask = 0xff;
657                 break;
658         case GLAMO_ENGINE_MMC:
659                 pll = GLAMO_PLL1;
660                 reg = GLAMO_REG_CLOCK_GEN8;
661                 mask = 0xff;
662                 break;
663         default:
664                 dev_warn(&glamo->pdev->dev,
665                          "reclock of engine 0x%x not supported\n", engine);
666                 return -EINVAL;
667                 break;
668         }
669
670         pll = glamo_pll_rate(glamo, pll);
671
672         div = pll / hz;
673
674         if (div != 0 && pll / div <= hz)
675                 --div;
676
677         if (div > mask)
678                 div = mask;
679
680         dev_dbg(&glamo->pdev->dev,
681                         "PLL %d, kHZ %d, div %d\n", pll, hz / 1000, div);
682
683         reg_set_bit_mask(glamo, reg, mask, div);
684         mdelay(5); /* wait some time to stabilize */
685
686         return pll / (div + 1);
687 }
688 EXPORT_SYMBOL_GPL(glamo_engine_reclock);
689
690 /***********************************************************************
691  * script support
692  ***********************************************************************/
693
694 int glamo_run_script(struct glamo_core *glamo, const struct glamo_script *script,
695                      int len, int may_sleep)
696 {
697         int i;
698     const struct glamo_script *line = script;
699
700         for (i = 0; i < len; ++i, ++line) {
701                 switch (line->reg) {
702                 case 0xffff:
703                         return 0;
704                 case 0xfffe:
705                         if (may_sleep)
706                                 msleep(line->val);
707                         else
708                                 mdelay(line->val * 4);
709                         break;
710                 case 0xfffd:
711                         /* spin until PLLs lock */
712                         while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
713                                 ;
714                         break;
715
716                 /*
717                  * couple of people reported artefacts with 2.6.28 changes, this
718                  * allows reversion to 2.6.24 settings
719                  */
720
721                 case 0x200:
722                         switch (slow_memory) {
723                         /* choice 1 is the most conservative */
724                         case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
725                                 __reg_write(glamo, script[i].reg, 0xef0);
726                                 break;
727                         case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
728                                 __reg_write(glamo, script[i].reg, 0xea0);
729                                 break;
730                         case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
731                                 __reg_write(glamo, script[i].reg, 0xe50);
732                                 break;
733                         case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
734                                 __reg_write(glamo, script[i].reg, 0xe00);
735                                 break;
736
737                         /* using PLL2 for memory bus increases CPU bandwidth significantly */
738                         case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
739                                 __reg_write(glamo, script[i].reg, 0xef3);
740                                 break;
741                         case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
742                                 __reg_write(glamo, script[i].reg, 0xea3);
743                                 break;
744                         case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
745                                 __reg_write(glamo, script[i].reg, 0xe53);
746                                 break;
747                         /* default of 0 or >7 is fastest */
748                         default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
749                                 __reg_write(glamo, script[i].reg, 0xe03);
750                                 break;
751                         }
752                         break;
753
754                 default:
755                         __reg_write(glamo, script[i].reg, script[i].val);
756                         break;
757                 }
758         }
759
760         return 0;
761 }
762 EXPORT_SYMBOL(glamo_run_script);
763
764 static const struct glamo_script glamo_init_script[] = {
765         { GLAMO_REG_CLOCK_HOST,         0x1000 },
766                 { 0xfffe, 2 },
767         { GLAMO_REG_CLOCK_MEMORY,       0x1000 },
768         { GLAMO_REG_CLOCK_MEMORY,       0x2000 },
769         { GLAMO_REG_CLOCK_LCD,          0x1000 },
770         { GLAMO_REG_CLOCK_MMC,          0x1000 },
771         { GLAMO_REG_CLOCK_ISP,          0x1000 },
772         { GLAMO_REG_CLOCK_ISP,          0x3000 },
773         { GLAMO_REG_CLOCK_JPEG,         0x1000 },
774         { GLAMO_REG_CLOCK_3D,           0x1000 },
775         { GLAMO_REG_CLOCK_3D,           0x3000 },
776         { GLAMO_REG_CLOCK_2D,           0x1000 },
777         { GLAMO_REG_CLOCK_2D,           0x3000 },
778         { GLAMO_REG_CLOCK_RISC1,        0x1000 },
779         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
780         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
781         { GLAMO_REG_CLOCK_MPROC,        0x1000 /*0x100f*/ },
782                 { 0xfffe, 2 },
783         { GLAMO_REG_CLOCK_HOST,         0x0000 },
784         { GLAMO_REG_CLOCK_MEMORY,       0x0000 },
785         { GLAMO_REG_CLOCK_LCD,          0x0000 },
786         { GLAMO_REG_CLOCK_MMC,          0x0000 },
787 #if 0
788 /* unused engines must be left in reset to stop MMC block read "blackouts" */
789         { GLAMO_REG_CLOCK_ISP,          0x0000 },
790         { GLAMO_REG_CLOCK_ISP,          0x0000 },
791         { GLAMO_REG_CLOCK_JPEG,         0x0000 },
792         { GLAMO_REG_CLOCK_3D,           0x0000 },
793         { GLAMO_REG_CLOCK_3D,           0x0000 },
794         { GLAMO_REG_CLOCK_2D,           0x0000 },
795         { GLAMO_REG_CLOCK_2D,           0x0000 },
796         { GLAMO_REG_CLOCK_RISC1,        0x0000 },
797         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
798         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
799 #endif
800         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
801         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
802         { 0xfffd, 0 },
803         /*
804          * b9 of this register MUST be zero to get any interrupts on INT#
805          * the other set bits enable all the engine interrupt sources
806          */
807         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
808         { GLAMO_REG_CLOCK_GEN6,         0x2000 },
809         { GLAMO_REG_CLOCK_GEN7,         0x0101 },
810         { GLAMO_REG_CLOCK_GEN8,         0x0100 },
811         { GLAMO_REG_CLOCK_HOST,         0x000d },
812         /*
813          * b7..b4 = 0 = no wait states on read or write
814          * b0 = 1 select PLL2 for Host interface, b1 = enable it
815          */
816         { 0x200,        0x0e03 /* this is replaced by script parser */ },
817         { 0x202,        0x07ff },
818         { 0x212,        0x0000 },
819         { 0x214,        0x4000 },
820         { 0x216,        0xf00e },
821
822         /* S-Media recommended "set tiling mode to 512 mode for memory access
823          * more efficiency when 640x480" */
824         { GLAMO_REG_MEM_TYPE,           0x0c74 }, /* 8MB, 16 word pg wr+rd */
825         { GLAMO_REG_MEM_GEN,            0xafaf }, /* 63 grants min + max */
826
827         { GLAMO_REGOFS_HOSTBUS + 2,     0xffff }, /* enable  on MMIO*/
828
829         { GLAMO_REG_MEM_TIMING1,        0x0108 },
830         { GLAMO_REG_MEM_TIMING2,        0x0010 }, /* Taa = 3 MCLK */
831         { GLAMO_REG_MEM_TIMING3,        0x0000 },
832         { GLAMO_REG_MEM_TIMING4,        0x0000 }, /* CE1# delay fall/rise */
833         { GLAMO_REG_MEM_TIMING5,        0x0000 }, /* UB# LB# */
834         { GLAMO_REG_MEM_TIMING6,        0x0000 }, /* OE# */
835         { GLAMO_REG_MEM_TIMING7,        0x0000 }, /* WE# */
836         { GLAMO_REG_MEM_TIMING8,        0x1002 }, /* MCLK delay, was 0x1000 */
837         { GLAMO_REG_MEM_TIMING9,        0x6006 },
838         { GLAMO_REG_MEM_TIMING10,       0x00ff },
839         { GLAMO_REG_MEM_TIMING11,       0x0001 },
840         { GLAMO_REG_MEM_POWER1,         0x0020 },
841         { GLAMO_REG_MEM_POWER2,         0x0000 },
842         { GLAMO_REG_MEM_DRAM1,          0x0000 },
843                 { 0xfffe, 1 },
844         { GLAMO_REG_MEM_DRAM1,          0xc100 },
845                 { 0xfffe, 1 },
846         { GLAMO_REG_MEM_DRAM1,          0xe100 },
847         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
848         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
849 };
850 #if 0
851 static struct glamo_script glamo_resume_script[] = {
852
853         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
854         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
855         { GLAMO_REG_DFT_GEN6, 1 },
856                 { 0xfffe, 100 },
857                 { 0xfffd, 0 },
858         { 0x200,        0x0e03 },
859
860         /*
861          * b9 of this register MUST be zero to get any interrupts on INT#
862          * the other set bits enable all the engine interrupt sources
863          */
864         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
865         { GLAMO_REG_CLOCK_HOST,         0x0018 },
866         { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
867
868         { GLAMO_REG_MEM_DRAM1,          0x0000 },
869                 { 0xfffe, 1 },
870         { GLAMO_REG_MEM_DRAM1,          0xc100 },
871                 { 0xfffe, 1 },
872         { GLAMO_REG_MEM_DRAM1,          0xe100 },
873         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
874         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
875 };
876 #endif
877
878 enum glamo_power {
879         GLAMO_POWER_ON,
880         GLAMO_POWER_SUSPEND,
881 };
882
883 static void glamo_power(struct glamo_core *glamo,
884                         enum glamo_power new_state)
885 {
886         int n;
887         unsigned long flags;
888
889         spin_lock_irqsave(&glamo->lock, flags);
890
891         dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
892
893         /*
894 Power management
895 static const REG_VALUE_MASK_TYPE reg_powerOn[] =
896 {
897     { REG_GEN_DFT6,     REG_BIT_ALL,    REG_DATA(1u << 0)           },
898     { REG_GEN_PLL3,     0u,             REG_DATA(1u << 13)          },
899     { REG_GEN_MEM_CLK,  REG_BIT_ALL,    REG_BIT_EN_MOCACLK          },
900     { REG_MEM_DRAM2,    0u,             REG_BIT_EN_DEEP_POWER_DOWN  },
901     { REG_MEM_DRAM1,    0u,             REG_BIT_SELF_REFRESH        }
902 };
903
904 static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
905 {
906     { REG_MEM_DRAM1,    REG_BIT_ALL,    REG_BIT_SELF_REFRESH    },
907     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK      },
908     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)      },
909     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)       }
910 };
911
912 static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
913 {
914     { REG_MEM_DRAM2,    REG_BIT_ALL,    REG_BIT_EN_DEEP_POWER_DOWN  },
915     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK          },
916     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)          },
917     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)           }
918 };
919 */
920
921         switch (new_state) {
922         case GLAMO_POWER_ON:
923
924                 /*
925                  * glamo state on resume is nondeterministic in some
926                  * fundamental way, it has also been observed that the
927                  * Glamo reset pin can get asserted by, eg, touching it with
928                  * a scope probe.  So the only answer is to roll with it and
929                  * force an external reset on the Glamo during resume.
930                  */
931
932                 (glamo->pdata->glamo_external_reset)(0);
933                 udelay(10);
934                 (glamo->pdata->glamo_external_reset)(1);
935                 mdelay(5);
936
937                 glamo_run_script(glamo, glamo_init_script,
938                          ARRAY_SIZE(glamo_init_script), 0);
939
940                 break;
941
942         case GLAMO_POWER_SUSPEND:
943
944                 /* nuke interrupts */
945                 __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
946
947                 /* stash a copy of which engines were running */
948                 glamo->engine_enabled_bitfield_suspend =
949                                                  glamo->engine_enabled_bitfield;
950
951                 /* take down each engine before we kill mem and pll */
952                 for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
953                         if (glamo->engine_enabled_bitfield & (1 << n))
954                                 __glamo_engine_disable(glamo, n);
955
956                 /* enable self-refresh */
957
958                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
959                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
960                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
961                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
962                                         GLAMO_MEM_REFRESH_COUNT);
963                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
964                                         GLAMO_MEM_DRAM1_EN_MODEREG_SET |
965                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
966                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
967                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
968                                         GLAMO_MEM_REFRESH_COUNT);
969
970                 /* force RAM into deep powerdown */
971
972                 __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
973                                         GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
974                                         (7 << 6) | /* tRC */
975                                         (1 << 4) | /* tRP */
976                                         (1 << 2) | /* tRCD */
977                                         2); /* CAS latency */
978
979                 /* disable clocks to memory */
980                 __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
981
982                 /* all dividers from OSCI */
983                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
984
985                 /* PLL2 into bypass */
986                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
987
988                 __reg_write(glamo, 0x200, 0x0e00);
989
990
991                 /* kill PLLS 1 then 2 */
992                 __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
993                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
994
995                 break;
996         }
997
998         spin_unlock_irqrestore(&glamo->lock, flags);
999 }
1000
1001 #if 0
1002 #define MEMDETECT_RETRY 6
1003 static unsigned int detect_memsize(struct glamo_core *glamo)
1004 {
1005         int i;
1006
1007         /*static const u_int16_t pattern[] = {
1008                 0x1111, 0x8a8a, 0x2222, 0x7a7a,
1009                 0x3333, 0x6a6a, 0x4444, 0x5a5a,
1010                 0x5555, 0x4a4a, 0x6666, 0x3a3a,
1011                 0x7777, 0x2a2a, 0x8888, 0x1a1a
1012         }; */
1013
1014         for (i = 0; i < MEMDETECT_RETRY; i++) {
1015                 switch (glamo->type) {
1016                 case 3600:
1017                         __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
1018                         __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1019                         break;
1020                 case 3650:
1021                         switch (glamo->revision) {
1022                         case GLAMO_CORE_REV_A0:
1023                                 if (i & 1)
1024                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1025                                                     0x097a);
1026                                 else
1027                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1028                                                     0x0173);
1029
1030                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1031                                 msleep(1);
1032                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1033                                 break;
1034                         default:
1035                                 if (i & 1)
1036                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1037                                                     0x0972);
1038                                 else
1039                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1040                                                     0x0872);
1041
1042                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1043                                 msleep(1);
1044                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
1045                                 break;
1046                         }
1047                         break;
1048                 case 3700:
1049                         /* FIXME */
1050                 default:
1051                         break;
1052                 }
1053
1054 #if 0
1055                 /* FIXME: finish implementation */
1056                 for (j = 0; j < 8; j++) {
1057                         __
1058 #endif
1059         }
1060
1061         return 0;
1062 }
1063 #endif
1064
1065 /* Find out if we can support this version of the Glamo chip */
1066 static int glamo_supported(struct glamo_core *glamo)
1067 {
1068         u_int16_t dev_id, rev_id; /*, memsize; */
1069
1070         dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
1071         rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
1072
1073         switch (dev_id) {
1074         case 0x3650:
1075                 switch (rev_id) {
1076                 case GLAMO_CORE_REV_A2:
1077                         break;
1078                 case GLAMO_CORE_REV_A0:
1079                 case GLAMO_CORE_REV_A1:
1080                 case GLAMO_CORE_REV_A3:
1081                         dev_warn(&glamo->pdev->dev, "untested core revision "
1082                                  "%04x, your mileage may vary\n", rev_id);
1083                         break;
1084                 default:
1085                         dev_warn(&glamo->pdev->dev, "unknown glamo revision "
1086                                  "%04x, your mileage may vary\n", rev_id);
1087                         /* maybe should abort ? */
1088                 }
1089                 break;
1090         case 0x3600:
1091         case 0x3700:
1092         default:
1093                 dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
1094                         dev_id);
1095                 return 0;
1096         }
1097
1098         dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
1099                  "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
1100                  glamo_pll_rate(glamo, GLAMO_PLL1),
1101                  glamo_pll_rate(glamo, GLAMO_PLL2));
1102
1103         return 1;
1104 }
1105
1106 static int __init glamo_probe(struct platform_device *pdev)
1107 {
1108         int rc = 0, irq;
1109         struct glamo_core *glamo;
1110
1111         glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
1112         if (!glamo)
1113                 return -ENOMEM;
1114
1115         spin_lock_init(&glamo->lock);
1116         glamo->pdev = pdev;
1117         glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1118         glamo->irq = platform_get_irq(pdev, 0);
1119         glamo->pdata = pdev->dev.platform_data;
1120         if (!glamo->mem || !glamo->pdata) {
1121                 dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
1122                 rc = -ENOENT;
1123                 goto bail_free;
1124         }
1125
1126         /* register a number of sibling devices whoise IOMEM resources
1127          * are siblings of pdev's IOMEM resource */
1128
1129         /* only remap the generic, hostbus and memory controller registers */
1130         glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
1131         if (!glamo->base) {
1132                 dev_err(&pdev->dev, "failed to ioremap() memory region\n");
1133                 goto bail_free;
1134         }
1135
1136         platform_set_drvdata(pdev, glamo);
1137
1138         (glamo->pdata->glamo_external_reset)(0);
1139         udelay(10);
1140         (glamo->pdata->glamo_external_reset)(1);
1141         mdelay(10);
1142
1143         /*
1144          * finally set the mfd interrupts up
1145          * can't do them earlier or sibling probes blow up
1146          */
1147
1148         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1149                 set_irq_chip_and_handler(irq, &glamo_irq_chip, handle_level_irq);
1150                 set_irq_flags(irq, IRQF_VALID);
1151                 set_irq_chip_data(irq, glamo);
1152         }
1153
1154         if (glamo->pdata->glamo_irq_is_wired &&
1155             !glamo->pdata->glamo_irq_is_wired()) {
1156                 set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
1157                 set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
1158                 set_irq_chip_data(glamo->irq, glamo);
1159                 dev_info(&pdev->dev, "Glamo interrupt registered\n");
1160                 glamo->irq_works = 1;
1161         } else {
1162                 dev_err(&pdev->dev, "Glamo interrupt not used\n");
1163                 glamo->irq_works = 0;
1164         }
1165
1166         /* confirm it isn't insane version */
1167         if (!glamo_supported(glamo)) {
1168                 dev_err(&pdev->dev, "This Glamo is not supported\n");
1169                 goto bail_irq;
1170         }
1171
1172         /* sysfs */
1173         rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
1174         if (rc < 0) {
1175                 dev_err(&pdev->dev, "cannot create sysfs group\n");
1176                 goto bail_irq;
1177         }
1178
1179         /* init the chip with canned register set */
1180
1181         dev_dbg(&glamo->pdev->dev, "running init script\n");
1182         glamo_run_script(glamo, glamo_init_script,
1183                          ARRAY_SIZE(glamo_init_script), 1);
1184
1185         dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
1186                  glamo_pll_rate(glamo, GLAMO_PLL1),
1187                  glamo_pll_rate(glamo, GLAMO_PLL2));
1188
1189         mfd_add_devices(&pdev->dev, pdev->id, glamo_cells,
1190                               ARRAY_SIZE(glamo_cells),
1191                                                   glamo->mem, 0);
1192
1193         /* only request the generic, hostbus and memory controller MMIO */
1194         glamo->mem = request_mem_region(glamo->mem->start,
1195                                         GLAMO_REGOFS_VIDCAP, "glamo-core");
1196         if (!glamo->mem) {
1197                 dev_err(&pdev->dev, "failed to request memory region\n");
1198                 goto bail_irq;
1199         }
1200
1201         return 0;
1202
1203 bail_irq:
1204         disable_irq(glamo->irq);
1205         set_irq_chained_handler(glamo->irq, NULL);
1206         set_irq_chip_data(glamo->irq, NULL);
1207
1208         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1209                 set_irq_flags(irq, 0);
1210                 set_irq_chip(irq, NULL);
1211                 set_irq_chip_data(irq, NULL);
1212         }
1213
1214         iounmap(glamo->base);
1215 bail_free:
1216         platform_set_drvdata(pdev, NULL);
1217         kfree(glamo);
1218
1219         return rc;
1220 }
1221
1222 static int glamo_remove(struct platform_device *pdev)
1223 {
1224         struct glamo_core *glamo = platform_get_drvdata(pdev);
1225         int irq;
1226
1227         disable_irq(glamo->irq);
1228         set_irq_chained_handler(glamo->irq, NULL);
1229         set_irq_chip_data(glamo->irq, NULL);
1230
1231         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1232                 set_irq_flags(irq, 0);
1233                 set_irq_chip(irq, NULL);
1234                 set_irq_chip_data(irq, NULL);
1235         }
1236
1237         platform_set_drvdata(pdev, NULL);
1238         mfd_remove_devices(&pdev->dev);
1239         iounmap(glamo->base);
1240         release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
1241         kfree(glamo);
1242
1243         return 0;
1244 }
1245
1246 #ifdef CONFIG_PM
1247
1248 static int glamo_suspend(struct device *dev)
1249 {
1250         struct glamo_core *glamo = dev_get_drvdata(dev);
1251         glamo->suspending = 1;
1252         glamo_power(glamo, GLAMO_POWER_SUSPEND);
1253
1254         return 0;
1255 }
1256
1257 static int glamo_resume(struct device *dev)
1258 {
1259         struct glamo_core *glamo = dev_get_drvdata(dev);
1260         glamo_power(glamo, GLAMO_POWER_ON);
1261         glamo->suspending = 0;
1262         return 0;
1263 }
1264
1265 static struct dev_pm_ops glamo_pm_ops = {
1266         .suspend = glamo_suspend,
1267         .resume  = glamo_resume,
1268 };
1269
1270 #define GLAMO_PM_OPS (&glamo_pm_ops)
1271
1272 #else
1273 #define GLAMO_PM_OPS NULL
1274 #endif
1275
1276 static struct platform_driver glamo_driver = {
1277         .probe          = glamo_probe,
1278         .remove         = glamo_remove,
1279         .driver         = {
1280                 .name   = "glamo3362",
1281                 .owner  = THIS_MODULE,
1282                 .pm     = GLAMO_PM_OPS,
1283         },
1284 };
1285
1286 static int __devinit glamo_init(void)
1287 {
1288         return platform_driver_register(&glamo_driver);
1289 }
1290
1291 static void __exit glamo_cleanup(void)
1292 {
1293         platform_driver_unregister(&glamo_driver);
1294 }
1295
1296 module_init(glamo_init);
1297 module_exit(glamo_cleanup);
1298
1299 MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
1300 MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
1301 MODULE_LICENSE("GPL");