bbbc1eb735135371b69cde89fd578f3e385bdfd1
[openwrt.git] / openwrt / package / linux / kernel-source / arch / mips / brcm-boards / bcm947xx / nvram_linux.c
1 /*
2  * NVRAM variable manipulation (Linux kernel half)
3  *
4  * Copyright 2004, Broadcom Corporation
5  * All Rights Reserved.
6  * 
7  * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
8  * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
9  * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
10  * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
11  *
12  * $Id$
13  */
14
15 #include <linux/config.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/bootmem.h>
24 #include <linux/wrapper.h>
25 #include <linux/fs.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mtd/mtd.h>
28 #include <asm/addrspace.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include <typedefs.h>
33 #include <bcmendian.h>
34 #include <bcmnvram.h>
35 #include <bcmutils.h>
36 #include <sbconfig.h>
37 #include <sbchipc.h>
38 #include <sbutils.h>
39 #include <sbmips.h>
40 #include <sflash.h>
41
42 /* In BSS to minimize text size and page aligned so it can be mmap()-ed */
43 static char nvram_buf[NVRAM_SPACE] __attribute__((aligned(PAGE_SIZE)));
44
45 #ifdef MODULE
46
47 #define early_nvram_get(name) nvram_get(name)
48
49 #else /* !MODULE */
50
51 /* Global SB handle */
52 extern void *bcm947xx_sbh;
53 extern spinlock_t bcm947xx_sbh_lock;
54
55 /* Convenience */
56 #define sbh bcm947xx_sbh
57 #define sbh_lock bcm947xx_sbh_lock
58 #define KB * 1024
59 #define MB * 1024 * 1024
60
61 /* Probe for NVRAM header */
62 static void __init
63 early_nvram_init(void)
64 {
65         struct nvram_header *header;
66         chipcregs_t *cc;
67         struct sflash *info = NULL;
68         int i;
69         uint32 base, off, lim;
70
71         if ((cc = sb_setcore(sbh, SB_CC, 0)) != NULL) {
72                 base = CC_FLASH_BASE;
73                 switch (readl(&cc->capabilities) & CAP_FLASH_MASK) {
74                 case PFLASH:
75                         lim = CC_FLASH_MAX;
76                         break;
77
78                 case SFLASH_ST:
79                 case SFLASH_AT:
80                         if ((info = sflash_init(cc)) == NULL)
81                                 return;
82                         lim = info->size;
83                         break;
84
85                 case FLASH_NONE:
86                 default:
87                         return;
88                 }
89         } else {
90                 /* extif assumed, Stop at 4 MB */
91                 base = FLASH_BASE;
92                 lim = FLASH_MAX;
93         }
94
95         off = FLASH_MIN;
96         while (off <= lim) {
97                 /* Windowed flash access */
98                 header = (struct nvram_header *) KSEG1ADDR(base + off - NVRAM_SPACE);
99                 if (header->magic == NVRAM_MAGIC) {
100                         u32 *src = (u32 *) header;
101                         u32 *dst = (u32 *) nvram_buf;
102                         for (i = 0; i < sizeof(struct nvram_header); i += 4)
103                                 *dst++ = *src++;
104                         for (; i < header->len && i < NVRAM_SPACE; i += 4)
105                                 *dst++ = ltoh32(*src++);
106                         return;
107                 }
108
109                 /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
110                 if (off == 1 KB)
111                         break;
112                 else if (off == 4 KB)
113                         off = 1 KB;
114                 else if (off == lim)
115                         off = 4 KB;
116                 else
117                         off <<= 1;
118         }
119 }
120
121 /* Early (before mm or mtd) read-only access to NVRAM */
122 static char * __init
123 early_nvram_get(const char *name)
124 {
125         char *var, *value, *end, *eq;
126
127         if (!name)
128                 return NULL;
129
130         if (!nvram_buf[0])
131                 early_nvram_init();
132
133         /* Look for name=value and return value */
134         var = &nvram_buf[sizeof(struct nvram_header)];
135         end = nvram_buf + sizeof(nvram_buf) - 2;
136         end[0] = end[1] = '\0';
137         for (; *var; var = value + strlen(value) + 1) {
138                 if (!(eq = strchr(var, '=')))
139                         break;
140                 value = eq + 1;
141                 if ((eq - var) == strlen(name) && strncmp(var, name, (eq - var)) == 0)
142                         return value;
143         }
144
145         return NULL;
146 }
147
148 #endif /* !MODULE */
149
150 extern char * _nvram_get(const char *name);
151 extern int _nvram_set(const char *name, const char *value);
152 extern int _nvram_unset(const char *name);
153 extern int _nvram_getall(char *buf, int count);
154 extern int _nvram_commit(struct nvram_header *header);
155 extern int _nvram_init(void);
156 extern void _nvram_exit(void);
157
158 /* Globals */
159 static spinlock_t nvram_lock = SPIN_LOCK_UNLOCKED;
160 static struct semaphore nvram_sem;
161 static unsigned long nvram_offset = 0;
162 static int nvram_major = -1;
163 static devfs_handle_t nvram_handle = NULL;
164 static struct mtd_info *nvram_mtd = NULL;
165
166 int
167 _nvram_read(char *buf)
168 {
169         struct nvram_header *header = (struct nvram_header *) buf;
170         size_t len;
171
172         if (!nvram_mtd ||
173             MTD_READ(nvram_mtd, nvram_mtd->size - NVRAM_SPACE, NVRAM_SPACE, &len, buf) ||
174             len != NVRAM_SPACE ||
175             header->magic != NVRAM_MAGIC) {
176                 /* Maybe we can recover some data from early initialization */
177                 memcpy(buf, nvram_buf, NVRAM_SPACE);
178         }
179
180         return 0;
181 }
182
183 struct nvram_tuple *
184 _nvram_realloc(struct nvram_tuple *t, const char *name, const char *value)
185 {
186         if ((nvram_offset + strlen(value) + 1) > NVRAM_SPACE)
187                 return NULL;
188
189         if (!t) {
190                 if (!(t = kmalloc(sizeof(struct nvram_tuple) + strlen(name) + 1, GFP_ATOMIC)))
191                         return NULL;
192
193                 /* Copy name */
194                 t->name = (char *) &t[1];
195                 strcpy(t->name, name);
196
197                 t->value = NULL;
198         }
199
200         /* Copy value */
201         if (!t->value || strcmp(t->value, value)) {
202                 t->value = &nvram_buf[nvram_offset];
203                 strcpy(t->value, value);
204                 nvram_offset += strlen(value) + 1;
205         }
206
207         return t;
208 }
209
210 void
211 _nvram_free(struct nvram_tuple *t)
212 {
213         if (!t)
214                 nvram_offset = 0;
215         else
216                 kfree(t);
217 }
218
219 int
220 nvram_set(const char *name, const char *value)
221 {
222         unsigned long flags;
223         int ret;
224         struct nvram_header *header;
225
226         spin_lock_irqsave(&nvram_lock, flags);
227         if ((ret = _nvram_set(name, value))) {
228                 /* Consolidate space and try again */
229                 if ((header = kmalloc(NVRAM_SPACE, GFP_ATOMIC))) {
230                         if (_nvram_commit(header) == 0)
231                                 ret = _nvram_set(name, value);
232                         kfree(header);
233                 }
234         }
235         spin_unlock_irqrestore(&nvram_lock, flags);
236
237         return ret;
238 }
239
240 char *
241 real_nvram_get(const char *name)
242 {
243         unsigned long flags;
244         char *value;
245
246         spin_lock_irqsave(&nvram_lock, flags);
247         value = _nvram_get(name);
248         spin_unlock_irqrestore(&nvram_lock, flags);
249
250         return value;
251 }
252
253 char *
254 nvram_get(const char *name)
255 {
256         if (nvram_major >= 0)
257                 return real_nvram_get(name);
258         else
259                 return early_nvram_get(name);
260 }
261
262 int
263 nvram_unset(const char *name)
264 {
265         unsigned long flags;
266         int ret;
267
268         spin_lock_irqsave(&nvram_lock, flags);
269         ret = _nvram_unset(name);
270         spin_unlock_irqrestore(&nvram_lock, flags);
271
272         return ret;
273 }
274
275 static void
276 erase_callback(struct erase_info *done)
277 {
278         wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv;
279         wake_up(wait_q);
280 }
281
282 int
283 nvram_commit(void)
284 {
285         char *buf;
286         size_t erasesize, len;
287         unsigned int i;
288         int ret;
289         struct nvram_header *header;
290         unsigned long flags;
291         u_int32_t offset;
292         DECLARE_WAITQUEUE(wait, current);
293         wait_queue_head_t wait_q;
294         struct erase_info erase;
295
296         printk("nvram_commit(): init\n");
297
298         if (!nvram_mtd) {
299                 printk("nvram_commit: NVRAM not found\n");
300                 return -ENODEV;
301         }
302
303         if (in_interrupt()) {
304                 printk("nvram_commit: not committing in interrupt\n");
305                 return -EINVAL;
306         }
307
308         /* Backup sector blocks to be erased */
309         erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize);
310         if (!(buf = kmalloc(erasesize, GFP_KERNEL))) {
311                 printk("nvram_commit: out of memory\n");
312                 return -ENOMEM;
313         }
314
315         down(&nvram_sem);
316 #if 0
317         offset = nvram_mtd->size - erasesize;
318         i = erasesize - NVRAM_SPACE;
319         ret = MTD_READ(nvram_mtd, offset, i, &len, buf);
320         if (ret || len != i) {
321                 printk("nvram_commit: read error\n");
322                 ret = -EIO;
323                 goto done;
324 #endif
325         if ((i = erasesize - NVRAM_SPACE) > 0) {
326                 offset = nvram_mtd->size - erasesize;
327                 len = 0;
328                 ret = MTD_READ(nvram_mtd, offset, i, &len, buf);
329                 if (ret || len != i) {
330                         printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i);
331                         ret = -EIO;
332                         goto done;
333                 }
334                 header = (struct nvram_header *)(buf + i);
335         } else {
336                 offset = nvram_mtd->size - NVRAM_SPACE;
337                 header = (struct nvram_header *)buf;
338         }
339
340         /* Regenerate NVRAM */
341         spin_lock_irqsave(&nvram_lock, flags);
342         ret = _nvram_commit(header);
343         spin_unlock_irqrestore(&nvram_lock, flags);
344         if (ret)
345                 goto done;
346
347         /* Erase sector blocks */
348         init_waitqueue_head(&wait_q);
349         for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) {
350                 erase.mtd = nvram_mtd;
351                 erase.addr = offset;
352                 erase.len = nvram_mtd->erasesize;
353                 erase.callback = erase_callback;
354                 erase.priv = (u_long) &wait_q;
355
356                 set_current_state(TASK_INTERRUPTIBLE);
357                 add_wait_queue(&wait_q, &wait);
358
359                 /* Unlock sector blocks */
360                 if (nvram_mtd->unlock)
361                         nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);
362
363                 if ((ret = MTD_ERASE(nvram_mtd, &erase))) {
364                         set_current_state(TASK_RUNNING);
365                         remove_wait_queue(&wait_q, &wait);
366                         printk("nvram_commit: erase error\n");
367                         goto done;
368                 }
369
370                 /* Wait for erase to finish */
371                 schedule();
372                 remove_wait_queue(&wait_q, &wait);
373         }
374
375         /* Write partition up to end of data area */
376         offset = nvram_mtd->size - erasesize;
377         i = erasesize - NVRAM_SPACE + header->len;
378         ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf);
379         if (ret || len != i) {
380                 printk("nvram_commit: write error\n");
381                 ret = -EIO;
382                 goto done;
383         }
384         /*
385          * Reading a few bytes back here will put the device
386          * back to the correct mode on certain flashes */
387
388         offset = nvram_mtd->size - erasesize;
389         ret = MTD_READ(nvram_mtd, offset, 4, &len, buf);
390
391  done:
392         up(&nvram_sem);
393         kfree(buf);
394         printk("nvram_commit(): end\n");
395         return ret;
396 }
397
398 int
399 nvram_getall(char *buf, int count)
400 {
401         unsigned long flags;
402         int ret;
403
404         spin_lock_irqsave(&nvram_lock, flags);
405         ret = _nvram_getall(buf, count);
406         spin_unlock_irqrestore(&nvram_lock, flags);
407
408         return ret;
409 }
410
411 EXPORT_SYMBOL(nvram_get);
412 EXPORT_SYMBOL(nvram_getall);
413 EXPORT_SYMBOL(nvram_set);
414 EXPORT_SYMBOL(nvram_unset);
415 EXPORT_SYMBOL(nvram_commit);
416
417 /* User mode interface below */
418
419 static ssize_t
420 dev_nvram_read(struct file *file, char *buf, size_t count, loff_t *ppos)
421 {
422         char tmp[100], *name = tmp, *value;
423         ssize_t ret;
424         unsigned long off;
425
426         if (count > sizeof(tmp)) {
427                 if (!(name = kmalloc(count, GFP_KERNEL)))
428                         return -ENOMEM;
429         }
430
431         if (copy_from_user(name, buf, count)) {
432                 ret = -EFAULT;
433                 goto done;
434         }
435
436         if (*name == '\0') {
437                 /* Get all variables */
438                 ret = nvram_getall(name, count);
439                 if (ret == 0) {
440                         if (copy_to_user(buf, name, count)) {
441                                 ret = -EFAULT;
442                                 goto done;
443                         }
444                         ret = count;
445                 }
446         } else {
447                 if (!(value = nvram_get(name))) {
448                         ret = 0;
449                         goto done;
450                 }
451
452                 /* Provide the offset into mmap() space */
453                 off = (unsigned long) value - (unsigned long) nvram_buf;
454
455                 if (put_user(off, (unsigned long *) buf)) {
456                         ret = -EFAULT;
457                         goto done;
458                 }
459
460                 ret = sizeof(unsigned long);
461         }
462
463         flush_cache_all();      
464  
465 done:
466         if (name != tmp)
467                 kfree(name);
468
469         return ret;
470 }
471
472 static ssize_t
473 dev_nvram_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
474 {
475         char tmp[100], *name = tmp, *value;
476         ssize_t ret;
477
478         if (count > sizeof(tmp)) {
479                 if (!(name = kmalloc(count, GFP_KERNEL)))
480                         return -ENOMEM;
481         }
482
483         if (copy_from_user(name, buf, count)) {
484                 ret = -EFAULT;
485                 goto done;
486         }
487
488         value = name;
489         name = strsep(&value, "=");
490         if (value)
491                 ret = nvram_set(name, value) ? : count;
492         else
493                 ret = nvram_unset(name) ? : count;
494
495  done:
496         if (name != tmp)
497                 kfree(name);
498
499         return ret;
500 }       
501
502 static int
503 dev_nvram_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
504 {
505         if (cmd != NVRAM_MAGIC)
506                 return -EINVAL;
507         return nvram_commit();
508 }
509
510 static int
511 dev_nvram_mmap(struct file *file, struct vm_area_struct *vma)
512 {
513         unsigned long offset = virt_to_phys(nvram_buf);
514
515         if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
516                              vma->vm_page_prot))
517                 return -EAGAIN;
518
519         return 0;
520 }
521
522 static int
523 dev_nvram_open(struct inode *inode, struct file * file)
524 {
525         MOD_INC_USE_COUNT;
526         return 0;
527 }
528
529 static int
530 dev_nvram_release(struct inode *inode, struct file * file)
531 {
532         MOD_DEC_USE_COUNT;
533         return 0;
534 }
535
536 static struct file_operations dev_nvram_fops = {
537         owner:          THIS_MODULE,
538         open:           dev_nvram_open,
539         release:        dev_nvram_release,
540         read:           dev_nvram_read,
541         write:          dev_nvram_write,
542         ioctl:          dev_nvram_ioctl,
543         mmap:           dev_nvram_mmap,
544 };
545
546 static void
547 dev_nvram_exit(void)
548 {
549         int order = 0;
550         struct page *page, *end;
551
552         if (nvram_handle)
553                 devfs_unregister(nvram_handle);
554
555         if (nvram_major >= 0)
556                 devfs_unregister_chrdev(nvram_major, "nvram");
557
558         if (nvram_mtd)
559                 put_mtd_device(nvram_mtd);
560
561         while ((PAGE_SIZE << order) < NVRAM_SPACE)
562                 order++;
563         end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1);
564         for (page = virt_to_page(nvram_buf); page <= end; page++)
565                 mem_map_unreserve(page);
566
567         _nvram_exit();
568 }
569
570 static int __init
571 dev_nvram_init(void)
572 {
573         int order = 0, ret = 0;
574         struct page *page, *end;
575         unsigned int i;
576
577         /* Allocate and reserve memory to mmap() */
578         while ((PAGE_SIZE << order) < NVRAM_SPACE)
579                 order++;
580         end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1);
581         for (page = virt_to_page(nvram_buf); page <= end; page++)
582                 mem_map_reserve(page);
583
584 #ifdef CONFIG_MTD
585         /* Find associated MTD device */
586         for (i = 0; i < MAX_MTD_DEVICES; i++) {
587                 nvram_mtd = get_mtd_device(NULL, i);
588                 if (nvram_mtd) {
589                         if (!strcmp(nvram_mtd->name, "nvram") &&
590                             nvram_mtd->size >= NVRAM_SPACE)
591                                 break;
592                         put_mtd_device(nvram_mtd);
593                 }
594         }
595         if (i >= MAX_MTD_DEVICES)
596                 nvram_mtd = NULL;
597 #endif
598
599         /* Initialize hash table lock */
600         spin_lock_init(&nvram_lock);
601
602         /* Initialize commit semaphore */
603         init_MUTEX(&nvram_sem);
604
605         /* Register char device */
606         if ((nvram_major = devfs_register_chrdev(0, "nvram", &dev_nvram_fops)) < 0) {
607                 ret = nvram_major;
608                 goto err;
609         }
610
611         /* Initialize hash table */
612         _nvram_init();
613
614         /* Create /dev/nvram handle */
615         nvram_handle = devfs_register(NULL, "nvram", DEVFS_FL_NONE, nvram_major, 0,
616                                       S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, &dev_nvram_fops, NULL);
617
618         /* Set the SDRAM NCDL value into NVRAM if not already done */
619         if (getintvar(NULL, "sdram_ncdl") == 0) {
620                 unsigned int ncdl;
621                 char buf[] = "0x00000000";
622
623                 if ((ncdl = sb_memc_get_ncdl(sbh))) {
624                         sprintf(buf, "0x%08x", ncdl);
625                         nvram_set("sdram_ncdl", buf);
626                         nvram_commit();
627                 }
628         }
629
630         return 0;
631
632  err:
633         dev_nvram_exit();
634         return ret;
635 }
636
637 module_init(dev_nvram_init);
638 module_exit(dev_nvram_exit);