1 From 5eac4d66049ab7d14a2b7311610c8cb85a2c1bf1 Mon Sep 17 00:00:00 2001
2 From: Nicolas Thill <nico@openwrt.org>
3 Date: Fri, 20 Mar 2015 00:31:06 +0100
4 Subject: [PATCH] UM: fix make headers_install after UAPI header installation
6 Signed-off-by: Nicolas Thill <nico@openwrt.org>
8 From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
9 From: Florian Fainelli <florian@openwrt.org>
10 Date: Sun, 17 Mar 2013 20:12:10 +0100
11 Subject: [PATCH] UM: fix make headers_install after UAPI header installation
13 Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
14 header installation and checking) breaks UML make headers_install with
17 $ ARCH=um make headers_install
18 CHK include/generated/uapi/linux/version.h
19 UPD include/generated/uapi/linux/version.h
20 HOSTCC scripts/basic/fixdep
21 WRAP arch/um/include/generated/asm/bug.h
23 WRAP arch/um/include/generated/asm/trace_clock.h
24 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
25 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
26 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
27 SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
28 HOSTCC scripts/unifdef
29 Makefile:912: *** Headers not exportable for the um architecture. Stop.
30 zsh: exit 2 ARCH=um make headers_install
32 The reason for that is because the top-level Makefile does the
34 $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
35 $(error Headers not exportable for the $(SRCARCH) architecture))
37 we end-up in the else part of the $(if) statement because UML still uses
38 the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
39 by moving the header files to be in arch/um/include/uapi/asm/ thus
40 making headers_install (and other make targets checking for uapi) to
43 Signed-off-by: Florian Fainelli <florian@openwrt.org>
45 Richard, this has been broken for 3.7+ onwards, if you want me to send
46 you separate patches for 3.7 and 3.8 let me know. Thanks!
49 --- a/arch/um/include/asm/Kbuild
52 -generic-y += barrier.h
54 -generic-y += clkdev.h
55 -generic-y += cputime.h
56 -generic-y += current.h
58 -generic-y += device.h
59 -generic-y += emergency-restart.h
61 -generic-y += ftrace.h
63 -generic-y += hardirq.h
65 -generic-y += hw_irq.h
67 -generic-y += irq_regs.h
68 -generic-y += irq_work.h
69 -generic-y += kdebug.h
70 -generic-y += mcs_spinlock.h
74 -generic-y += percpu.h
75 -generic-y += preempt.h
76 -generic-y += scatterlist.h
77 -generic-y += sections.h
78 -generic-y += switch_to.h
79 -generic-y += topology.h
80 -generic-y += trace_clock.h
81 -generic-y += word-at-a-time.h
83 --- a/arch/um/include/asm/a.out-core.h
86 -/* a.out coredump register dumper
88 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
89 - * Written by David Howells (dhowells@redhat.com)
91 - * This program is free software; you can redistribute it and/or
92 - * modify it under the terms of the GNU General Public Licence
93 - * as published by the Free Software Foundation; either version
94 - * 2 of the Licence, or (at your option) any later version.
97 -#ifndef __UM_A_OUT_CORE_H
98 -#define __UM_A_OUT_CORE_H
102 -#include <linux/user.h>
105 - * fill in the user structure for an a.out core dump
107 -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
111 -#endif /* __KERNEL__ */
112 -#endif /* __UM_A_OUT_CORE_H */
113 --- a/arch/um/include/asm/bugs.h
119 -void check_bugs(void);
122 --- a/arch/um/include/asm/cache.h
125 -#ifndef __UM_CACHE_H
126 -#define __UM_CACHE_H
129 -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
130 -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
131 -#elif defined(CONFIG_UML_X86) /* 64-bit */
132 -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
134 -/* XXX: this was taken from x86, now it's completely random. Luckily only
135 - * affects SMP padding. */
136 -# define L1_CACHE_SHIFT 5
139 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
142 --- a/arch/um/include/asm/common.lds.S
145 -#include <asm-generic/vmlinux.lds.h>
147 - .fini : { *(.fini) } =0x9090
149 - PROVIDE (etext = .);
153 - PROVIDE (sdata = .);
157 - .unprotected : { *(.unprotected) }
159 - PROVIDE (_unprotected_end = .);
162 - .note : { *(.note.*) }
167 - .uml.setup.init : {
168 - __uml_setup_start = .;
170 - __uml_setup_end = .;
174 - __uml_help_start = .;
176 - __uml_help_end = .;
179 - .uml.postsetup.init : {
180 - __uml_postsetup_start = .;
181 - *(.uml.postsetup.init)
182 - __uml_postsetup_end = .;
195 - .con_initcall.init : {
199 - .uml.initcall.init : {
200 - __uml_initcall_start = .;
201 - *(.uml.initcall.init)
202 - __uml_initcall_end = .;
208 - __exitcall_begin = .;
210 - __exitcall_end = .;
214 - __uml_exitcall_begin = .;
215 - *(.uml.exitcall.exit)
216 - __uml_exitcall_end = .;
220 - .altinstructions : {
221 - __alt_instructions = .;
222 - *(.altinstructions)
223 - __alt_instructions_end = .;
225 - .altinstr_replacement : { *(.altinstr_replacement) }
226 - /* .exit.text is discard at runtime, not link time, to deal with references
227 - from .altinstructions and .eh_frame */
228 - .exit.text : { *(.exit.text) }
229 - .exit.data : { *(.exit.data) }
232 - __preinit_array_start = .;
234 - __preinit_array_end = .;
237 - __init_array_start = .;
239 - __init_array_end = .;
242 - __fini_array_start = .;
244 - __fini_array_end = .;
252 --- a/arch/um/include/asm/dma.h
260 -extern unsigned long uml_physmem;
262 -#define MAX_DMA_ADDRESS (uml_physmem)
265 --- a/arch/um/include/asm/fixmap.h
268 -#ifndef __UM_FIXMAP_H
269 -#define __UM_FIXMAP_H
271 -#include <asm/processor.h>
272 -#include <asm/kmap_types.h>
273 -#include <asm/archparam.h>
274 -#include <asm/page.h>
275 -#include <linux/threads.h>
278 - * Here we define all the compile-time 'special' virtual
279 - * addresses. The point is to have a constant address at
280 - * compile time, but to set the physical address only
281 - * in the boot process. We allocate these special addresses
282 - * from the end of virtual memory (0xfffff000) backwards.
283 - * Also this lets us do fail-safe vmalloc(), we
284 - * can guarantee that these special addresses and
285 - * vmalloc()-ed addresses never overlap.
287 - * these 'compile-time allocated' memory buffers are
288 - * fixed-size 4k pages. (or larger if used with an increment
289 - * highger than 1) use fixmap_set(idx,phys) to associate
290 - * physical memory with fixmap indices.
292 - * TLB entries of such buffers will not be flushed across
297 - * on UP currently we will have no trace of the fixmap mechanizm,
298 - * no page table allocations, etc. This might change in the
299 - * future, say framebuffers for the console driver(s) could be
302 -enum fixed_addresses {
303 -#ifdef CONFIG_HIGHMEM
304 - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
305 - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
307 - __end_of_fixed_addresses
310 -extern void __set_fixmap (enum fixed_addresses idx,
311 - unsigned long phys, pgprot_t flags);
314 - * used by vmalloc.c.
316 - * Leave one empty page between vmalloc'ed areas and
317 - * the start of the fixmap, and leave one page empty
318 - * at the top of mem..
321 -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
322 -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
323 -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
325 -#include <asm-generic/fixmap.h>
328 --- a/arch/um/include/asm/irq.h
336 -#define CONSOLE_IRQ 2
337 -#define CONSOLE_WRITE_IRQ 3
339 -#define UM_ETH_IRQ 5
341 -#define SSL_WRITE_IRQ 7
342 -#define ACCEPT_IRQ 8
343 -#define MCONSOLE_IRQ 9
344 -#define WINCH_IRQ 10
345 -#define SIGIO_WRITE_IRQ 11
346 -#define TELNETD_IRQ 12
347 -#define XTERM_IRQ 13
348 -#define RANDOM_IRQ 14
350 -#define LAST_IRQ RANDOM_IRQ
351 -#define NR_IRQS (LAST_IRQ + 1)
354 --- a/arch/um/include/asm/irqflags.h
357 -#ifndef __UM_IRQFLAGS_H
358 -#define __UM_IRQFLAGS_H
360 -extern int get_signals(void);
361 -extern int set_signals(int enable);
362 -extern void block_signals(void);
363 -extern void unblock_signals(void);
365 -static inline unsigned long arch_local_save_flags(void)
367 - return get_signals();
370 -static inline void arch_local_irq_restore(unsigned long flags)
372 - set_signals(flags);
375 -static inline void arch_local_irq_enable(void)
380 -static inline void arch_local_irq_disable(void)
385 -static inline unsigned long arch_local_irq_save(void)
387 - unsigned long flags;
388 - flags = arch_local_save_flags();
389 - arch_local_irq_disable();
393 -static inline bool arch_irqs_disabled(void)
395 - return arch_local_save_flags() == 0;
399 --- a/arch/um/include/asm/kmap_types.h
403 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
404 - * Licensed under the GPL
407 -#ifndef __UM_KMAP_TYPES_H
408 -#define __UM_KMAP_TYPES_H
410 -/* No more #include "asm/arch/kmap_types.h" ! */
412 -#define KM_TYPE_NR 14
415 --- a/arch/um/include/asm/kvm_para.h
418 -#include <asm-generic/kvm_para.h>
419 --- a/arch/um/include/asm/mmu.h
423 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
424 - * Licensed under the GPL
427 -#ifndef __ARCH_UM_MMU_H
428 -#define __ARCH_UM_MMU_H
431 -#include <asm/mm_context.h>
433 -typedef struct mm_context {
435 - struct uml_arch_mm_context arch;
436 - struct page *stub_pages[2];
439 -extern void __switch_mm(struct mm_id * mm_idp);
441 -/* Avoid tangled inclusion with asm/ldt.h */
442 -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
443 -extern void free_ldt(struct mm_context *mm);
446 --- a/arch/um/include/asm/mmu_context.h
450 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
451 - * Licensed under the GPL
454 -#ifndef __UM_MMU_CONTEXT_H
455 -#define __UM_MMU_CONTEXT_H
457 -#include <linux/sched.h>
458 -#include <asm/mmu.h>
460 -extern void uml_setup_stubs(struct mm_struct *mm);
461 -extern void arch_exit_mmap(struct mm_struct *mm);
463 -#define deactivate_mm(tsk,mm) do { } while (0)
465 -extern void force_flush_all(void);
467 -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
470 - * This is called by fs/exec.c and sys_unshare()
471 - * when the new ->mm is used for the first time.
473 - __switch_mm(&new->context.id);
474 - down_write(&new->mmap_sem);
475 - uml_setup_stubs(new);
476 - up_write(&new->mmap_sem);
479 -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
480 - struct task_struct *tsk)
482 - unsigned cpu = smp_processor_id();
485 - cpumask_clear_cpu(cpu, mm_cpumask(prev));
486 - cpumask_set_cpu(cpu, mm_cpumask(next));
487 - if(next != &init_mm)
488 - __switch_mm(&next->context.id);
492 -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
494 - uml_setup_stubs(mm);
497 -static inline void enter_lazy_tlb(struct mm_struct *mm,
498 - struct task_struct *tsk)
502 -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
504 -extern void destroy_context(struct mm_struct *mm);
507 --- a/arch/um/include/asm/page.h
511 - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
512 - * Copyright 2003 PathScale, Inc.
513 - * Licensed under the GPL
519 -#include <linux/const.h>
521 -/* PAGE_SHIFT determines the page size */
522 -#define PAGE_SHIFT 12
523 -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
524 -#define PAGE_MASK (~(PAGE_SIZE-1))
526 -#ifndef __ASSEMBLY__
530 -#include <linux/types.h>
531 -#include <asm/vm-flags.h>
534 - * These are used to make use of C type-checking..
537 -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
538 -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
540 -#define clear_user_page(page, vaddr, pg) clear_page(page)
541 -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
543 -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
545 -typedef struct { unsigned long pte_low, pte_high; } pte_t;
546 -typedef struct { unsigned long pmd; } pmd_t;
547 -typedef struct { unsigned long pgd; } pgd_t;
548 -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
550 -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
551 -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
552 -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
553 -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
555 - (to).pte_low = (from).pte_low; })
556 -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
557 -#define pte_set_val(pte, phys, prot) \
558 - ({ (pte).pte_high = (phys) >> 32; \
559 - (pte).pte_low = (phys) | pgprot_val(prot); })
561 -#define pmd_val(x) ((x).pmd)
562 -#define __pmd(x) ((pmd_t) { (x) } )
564 -typedef unsigned long long pfn_t;
565 -typedef unsigned long long phys_t;
569 -typedef struct { unsigned long pte; } pte_t;
570 -typedef struct { unsigned long pgd; } pgd_t;
572 -#ifdef CONFIG_3_LEVEL_PGTABLES
573 -typedef struct { unsigned long pmd; } pmd_t;
574 -#define pmd_val(x) ((x).pmd)
575 -#define __pmd(x) ((pmd_t) { (x) } )
578 -#define pte_val(x) ((x).pte)
581 -#define pte_get_bits(p, bits) ((p).pte & (bits))
582 -#define pte_set_bits(p, bits) ((p).pte |= (bits))
583 -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
584 -#define pte_copy(to, from) ((to).pte = (from).pte)
585 -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
586 -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
588 -typedef unsigned long pfn_t;
589 -typedef unsigned long phys_t;
593 -typedef struct { unsigned long pgprot; } pgprot_t;
595 -typedef struct page *pgtable_t;
597 -#define pgd_val(x) ((x).pgd)
598 -#define pgprot_val(x) ((x).pgprot)
600 -#define __pte(x) ((pte_t) { (x) } )
601 -#define __pgd(x) ((pgd_t) { (x) } )
602 -#define __pgprot(x) ((pgprot_t) { (x) } )
604 -extern unsigned long uml_physmem;
606 -#define PAGE_OFFSET (uml_physmem)
607 -#define KERNELBASE PAGE_OFFSET
609 -#define __va_space (8*1024*1024)
613 -/* Cast to unsigned long before casting to void * to avoid a warning from
614 - * mmap_kmem about cutting a long long down to a void *. Not sure that
615 - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
618 -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
619 -#define __va(phys) to_virt((unsigned long) (phys))
621 -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
622 -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
624 -#define pfn_valid(pfn) ((pfn) < max_mapnr)
625 -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
627 -#include <asm-generic/memory_model.h>
628 -#include <asm-generic/getorder.h>
630 -#endif /* __ASSEMBLY__ */
632 -#ifdef CONFIG_X86_32
633 -#define __HAVE_ARCH_GATE_AREA 1
636 -#endif /* __UM_PAGE_H */
637 --- a/arch/um/include/asm/pgalloc.h
641 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
642 - * Copyright 2003 PathScale, Inc.
643 - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
644 - * Licensed under the GPL
647 -#ifndef __UM_PGALLOC_H
648 -#define __UM_PGALLOC_H
650 -#include <linux/mm.h>
652 -#define pmd_populate_kernel(mm, pmd, pte) \
653 - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
655 -#define pmd_populate(mm, pmd, pte) \
656 - set_pmd(pmd, __pmd(_PAGE_TABLE + \
657 - ((unsigned long long)page_to_pfn(pte) << \
658 - (unsigned long long) PAGE_SHIFT)))
659 -#define pmd_pgtable(pmd) pmd_page(pmd)
662 - * Allocate and free page tables.
664 -extern pgd_t *pgd_alloc(struct mm_struct *);
665 -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
667 -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
668 -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
670 -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
672 - free_page((unsigned long) pte);
675 -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
677 - pgtable_page_dtor(pte);
681 -#define __pte_free_tlb(tlb,pte, address) \
683 - pgtable_page_dtor(pte); \
684 - tlb_remove_page((tlb),(pte)); \
687 -#ifdef CONFIG_3_LEVEL_PGTABLES
689 -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
691 - free_page((unsigned long)pmd);
694 -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
697 -#define check_pgt_cache() do { } while (0)
701 --- a/arch/um/include/asm/pgtable-2level.h
705 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
706 - * Copyright 2003 PathScale, Inc.
707 - * Derived from include/asm-i386/pgtable.h
708 - * Licensed under the GPL
711 -#ifndef __UM_PGTABLE_2LEVEL_H
712 -#define __UM_PGTABLE_2LEVEL_H
714 -#include <asm-generic/pgtable-nopmd.h>
716 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
718 -#define PGDIR_SHIFT 22
719 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
720 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
723 - * entries per page directory level: the i386 is two-level, so
724 - * we don't really have any PMD directory physically.
726 -#define PTRS_PER_PTE 1024
727 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
728 -#define PTRS_PER_PGD 1024
729 -#define FIRST_USER_ADDRESS 0
731 -#define pte_ERROR(e) \
732 - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
734 -#define pgd_ERROR(e) \
735 - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
738 -static inline int pgd_newpage(pgd_t pgd) { return 0; }
739 -static inline void pgd_mkuptodate(pgd_t pgd) { }
741 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
743 -#define pte_pfn(x) phys_to_pfn(pte_val(x))
744 -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
745 -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
748 - * Bits 0 through 4 are taken
750 -#define PTE_FILE_MAX_BITS 27
752 -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
754 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
757 --- a/arch/um/include/asm/pgtable-3level.h
761 - * Copyright 2003 PathScale Inc
762 - * Derived from include/asm-i386/pgtable.h
763 - * Licensed under the GPL
766 -#ifndef __UM_PGTABLE_3LEVEL_H
767 -#define __UM_PGTABLE_3LEVEL_H
769 -#include <asm-generic/pgtable-nopud.h>
771 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
774 -#define PGDIR_SHIFT 30
776 -#define PGDIR_SHIFT 31
778 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
779 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
781 -/* PMD_SHIFT determines the size of the area a second-level page table can
785 -#define PMD_SHIFT 21
786 -#define PMD_SIZE (1UL << PMD_SHIFT)
787 -#define PMD_MASK (~(PMD_SIZE-1))
790 - * entries per page directory level
793 -#define PTRS_PER_PTE 512
795 -#define PTRS_PER_PMD 512
796 -#define PTRS_PER_PGD 512
798 -#define PTRS_PER_PMD 1024
799 -#define PTRS_PER_PGD 1024
802 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
803 -#define FIRST_USER_ADDRESS 0
805 -#define pte_ERROR(e) \
806 - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
808 -#define pmd_ERROR(e) \
809 - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
811 -#define pgd_ERROR(e) \
812 - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
815 -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
816 -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
817 -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
818 -#define pud_populate(mm, pud, pmd) \
819 - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
822 -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
824 -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
827 -static inline int pgd_newpage(pgd_t pgd)
829 - return(pgd_val(pgd) & _PAGE_NEWPAGE);
832 -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
835 -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
837 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
841 -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
843 -static inline void pud_clear (pud_t *pud)
845 - set_pud(pud, __pud(_PAGE_NEWPAGE));
848 -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
849 -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
851 -/* Find an entry in the second-level page table.. */
852 -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
853 - pmd_index(address))
855 -static inline unsigned long pte_pfn(pte_t pte)
857 - return phys_to_pfn(pte_val(pte));
860 -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
863 - phys_t phys = pfn_to_phys(page_nr);
865 - pte_set_val(pte, phys, pgprot);
869 -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
871 - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
875 - * Bits 0 through 3 are taken in the low part of the pte,
876 - * put the 32 bits of offset into the high part.
878 -#define PTE_FILE_MAX_BITS 32
882 -#define pte_to_pgoff(p) ((p).pte >> 32)
884 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
888 -#define pte_to_pgoff(pte) ((pte).pte_high)
890 -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
896 --- a/arch/um/include/asm/pgtable.h
900 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
901 - * Copyright 2003 PathScale, Inc.
902 - * Derived from include/asm-i386/pgtable.h
903 - * Licensed under the GPL
906 -#ifndef __UM_PGTABLE_H
907 -#define __UM_PGTABLE_H
909 -#include <asm/fixmap.h>
911 -#define _PAGE_PRESENT 0x001
912 -#define _PAGE_NEWPAGE 0x002
913 -#define _PAGE_NEWPROT 0x004
914 -#define _PAGE_RW 0x020
915 -#define _PAGE_USER 0x040
916 -#define _PAGE_ACCESSED 0x080
917 -#define _PAGE_DIRTY 0x100
918 -/* If _PAGE_PRESENT is clear, we use these: */
919 -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
920 -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
921 - pte_present gives true */
923 -#ifdef CONFIG_3_LEVEL_PGTABLES
924 -#include <asm/pgtable-3level.h>
926 -#include <asm/pgtable-2level.h>
929 -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
931 -/* zero page used for uninitialized stuff */
932 -extern unsigned long *empty_zero_page;
934 -#define pgtable_cache_init() do ; while (0)
936 -/* Just any arbitrary offset to the start of the vmalloc VM area: the
937 - * current 8MB value just means that there will be a 8MB "hole" after the
938 - * physical memory until the kernel virtual memory starts. That means that
939 - * any out-of-bounds memory accesses will hopefully be caught.
940 - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
941 - * area for the same reason. ;)
944 -extern unsigned long end_iomem;
946 -#define VMALLOC_OFFSET (__va_space)
947 -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
948 -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
949 -#ifdef CONFIG_HIGHMEM
950 -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
952 -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
954 -#define MODULES_VADDR VMALLOC_START
955 -#define MODULES_END VMALLOC_END
956 -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
958 -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
959 -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
960 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
961 -#define __PAGE_KERNEL_EXEC \
962 - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
963 -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
964 -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
965 -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
966 -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
967 -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
968 -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
971 - * The i386 can't do page protection for execute, and considers that the same
973 - * Also, write permissions imply read permissions. This is the closest we can
976 -#define __P000 PAGE_NONE
977 -#define __P001 PAGE_READONLY
978 -#define __P010 PAGE_COPY
979 -#define __P011 PAGE_COPY
980 -#define __P100 PAGE_READONLY
981 -#define __P101 PAGE_READONLY
982 -#define __P110 PAGE_COPY
983 -#define __P111 PAGE_COPY
985 -#define __S000 PAGE_NONE
986 -#define __S001 PAGE_READONLY
987 -#define __S010 PAGE_SHARED
988 -#define __S011 PAGE_SHARED
989 -#define __S100 PAGE_READONLY
990 -#define __S101 PAGE_READONLY
991 -#define __S110 PAGE_SHARED
992 -#define __S111 PAGE_SHARED
995 - * ZERO_PAGE is a global shared page that is always zero: used
996 - * for zero-mapped memory areas etc..
998 -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
1000 -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
1002 -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
1003 -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
1005 -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
1006 -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
1008 -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
1009 -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
1011 -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
1012 -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
1014 -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
1016 -#define pte_page(x) pfn_to_page(pte_pfn(x))
1018 -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
1021 - * =================================
1022 - * Flags checking section.
1023 - * =================================
1026 -static inline int pte_none(pte_t pte)
1028 - return pte_is_zero(pte);
1032 - * The following only work if pte_present() is true.
1033 - * Undefined behaviour if not..
1035 -static inline int pte_read(pte_t pte)
1037 - return((pte_get_bits(pte, _PAGE_USER)) &&
1038 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1041 -static inline int pte_exec(pte_t pte){
1042 - return((pte_get_bits(pte, _PAGE_USER)) &&
1043 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1046 -static inline int pte_write(pte_t pte)
1048 - return((pte_get_bits(pte, _PAGE_RW)) &&
1049 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1053 - * The following only works if pte_present() is not true.
1055 -static inline int pte_file(pte_t pte)
1057 - return pte_get_bits(pte, _PAGE_FILE);
1060 -static inline int pte_dirty(pte_t pte)
1062 - return pte_get_bits(pte, _PAGE_DIRTY);
1065 -static inline int pte_young(pte_t pte)
1067 - return pte_get_bits(pte, _PAGE_ACCESSED);
1070 -static inline int pte_newpage(pte_t pte)
1072 - return pte_get_bits(pte, _PAGE_NEWPAGE);
1075 -static inline int pte_newprot(pte_t pte)
1077 - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
1080 -static inline int pte_special(pte_t pte)
1086 - * =================================
1087 - * Flags setting section.
1088 - * =================================
1091 -static inline pte_t pte_mknewprot(pte_t pte)
1093 - pte_set_bits(pte, _PAGE_NEWPROT);
1097 -static inline pte_t pte_mkclean(pte_t pte)
1099 - pte_clear_bits(pte, _PAGE_DIRTY);
1103 -static inline pte_t pte_mkold(pte_t pte)
1105 - pte_clear_bits(pte, _PAGE_ACCESSED);
1109 -static inline pte_t pte_wrprotect(pte_t pte)
1111 - pte_clear_bits(pte, _PAGE_RW);
1112 - return(pte_mknewprot(pte));
1115 -static inline pte_t pte_mkread(pte_t pte)
1117 - pte_set_bits(pte, _PAGE_USER);
1118 - return(pte_mknewprot(pte));
1121 -static inline pte_t pte_mkdirty(pte_t pte)
1123 - pte_set_bits(pte, _PAGE_DIRTY);
1127 -static inline pte_t pte_mkyoung(pte_t pte)
1129 - pte_set_bits(pte, _PAGE_ACCESSED);
1133 -static inline pte_t pte_mkwrite(pte_t pte)
1135 - pte_set_bits(pte, _PAGE_RW);
1136 - return(pte_mknewprot(pte));
1139 -static inline pte_t pte_mkuptodate(pte_t pte)
1141 - pte_clear_bits(pte, _PAGE_NEWPAGE);
1142 - if(pte_present(pte))
1143 - pte_clear_bits(pte, _PAGE_NEWPROT);
1147 -static inline pte_t pte_mknewpage(pte_t pte)
1149 - pte_set_bits(pte, _PAGE_NEWPAGE);
1153 -static inline pte_t pte_mkspecial(pte_t pte)
1158 -static inline void set_pte(pte_t *pteptr, pte_t pteval)
1160 - pte_copy(*pteptr, pteval);
1162 - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
1163 - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
1167 - *pteptr = pte_mknewpage(*pteptr);
1168 - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
1170 -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
1172 -#define __HAVE_ARCH_PTE_SAME
1173 -static inline int pte_same(pte_t pte_a, pte_t pte_b)
1175 - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
1179 - * Conversion functions: convert a page and protection to a page entry,
1180 - * and a page entry and page directory to the page they refer to.
1183 -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
1184 -#define __virt_to_page(virt) phys_to_page(__pa(virt))
1185 -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
1186 -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
1188 -#define mk_pte(page, pgprot) \
1191 - pte_set_val(pte, page_to_phys(page), (pgprot)); \
1192 - if (pte_present(pte)) \
1193 - pte_mknewprot(pte_mknewpage(pte)); \
1196 -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1198 - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
1203 - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
1205 - * this macro returns the index of the entry in the pgd page which would
1206 - * control the given virtual address
1208 -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1211 - * pgd_offset() returns a (pgd_t *)
1212 - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
1214 -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
1217 - * a shortcut which implies the use of the kernel's pgd, instead
1220 -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1223 - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
1225 - * this macro returns the index of the entry in the pmd page which would
1226 - * control the given virtual address
1228 -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1229 -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1231 -#define pmd_page_vaddr(pmd) \
1232 - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1235 - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
1237 - * this macro returns the index of the entry in the pte page which would
1238 - * control the given virtual address
1240 -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
1241 -#define pte_offset_kernel(dir, address) \
1242 - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
1243 -#define pte_offset_map(dir, address) \
1244 - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
1245 -#define pte_unmap(pte) do { } while (0)
1248 -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
1250 -#define update_mmu_cache(vma,address,ptep) do ; while (0)
1252 -/* Encode and de-code a swap entry */
1253 -#define __swp_type(x) (((x).val >> 5) & 0x1f)
1254 -#define __swp_offset(x) ((x).val >> 11)
1256 -#define __swp_entry(type, offset) \
1257 - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
1258 -#define __pte_to_swp_entry(pte) \
1259 - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
1260 -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1262 -#define kern_addr_valid(addr) (1)
1264 -#include <asm-generic/pgtable.h>
1266 -/* Clear a kernel PTE and flush it from the TLB */
1267 -#define kpte_clear_flush(ptep, vaddr) \
1269 - pte_clear(&init_mm, (vaddr), (ptep)); \
1270 - __flush_tlb_one((vaddr)); \
1274 --- a/arch/um/include/asm/processor-generic.h
1278 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1279 - * Licensed under the GPL
1282 -#ifndef __UM_PROCESSOR_GENERIC_H
1283 -#define __UM_PROCESSOR_GENERIC_H
1287 -struct task_struct;
1289 -#include <asm/ptrace.h>
1290 -#include <registers.h>
1291 -#include <sysdep/archsetjmp.h>
1293 -#include <linux/prefetch.h>
1297 -struct thread_struct {
1298 - struct pt_regs regs;
1299 - struct pt_regs *segv_regs;
1300 - int singlestep_syscall;
1302 - jmp_buf *fault_catcher;
1303 - struct task_struct *prev_sched;
1304 - struct arch_thread arch;
1305 - jmp_buf switch_buf;
1313 - int (*proc)(void *);
1317 - void (*proc)(void *);
1324 -#define INIT_THREAD \
1326 - .regs = EMPTY_REGS, \
1327 - .fault_addr = NULL, \
1328 - .prev_sched = NULL, \
1329 - .arch = INIT_ARCH_THREAD, \
1330 - .request = { 0 } \
1333 -static inline void release_thread(struct task_struct *task)
1337 -extern unsigned long thread_saved_pc(struct task_struct *t);
1339 -static inline void mm_copy_segments(struct mm_struct *from_mm,
1340 - struct mm_struct *new_mm)
1344 -#define init_stack (init_thread_union.stack)
1347 - * User space process size: 3GB (default).
1349 -extern unsigned long task_size;
1351 -#define TASK_SIZE (task_size)
1354 -#undef STACK_TOP_MAX
1356 -extern unsigned long stacksizelim;
1358 -#define STACK_ROOM (stacksizelim)
1359 -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
1360 -#define STACK_TOP_MAX STACK_TOP
1362 -/* This decides where the kernel will search for a free chunk of vm
1363 - * space during mmap's.
1365 -#define TASK_UNMAPPED_BASE (0x40000000)
1367 -extern void start_thread(struct pt_regs *regs, unsigned long entry,
1368 - unsigned long stack);
1370 -struct cpuinfo_um {
1371 - unsigned long loops_per_jiffy;
1375 -extern struct cpuinfo_um boot_cpu_data;
1377 -#define my_cpu_data cpu_data[smp_processor_id()]
1380 -extern struct cpuinfo_um cpu_data[];
1381 -#define current_cpu_data cpu_data[smp_processor_id()]
1383 -#define cpu_data (&boot_cpu_data)
1384 -#define current_cpu_data boot_cpu_data
1388 -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
1389 -extern unsigned long get_wchan(struct task_struct *p);
1392 --- a/arch/um/include/asm/ptrace-generic.h
1396 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1397 - * Licensed under the GPL
1400 -#ifndef __UM_PTRACE_GENERIC_H
1401 -#define __UM_PTRACE_GENERIC_H
1403 -#ifndef __ASSEMBLY__
1405 -#include <asm/ptrace-abi.h>
1406 -#include <sysdep/ptrace.h>
1409 - struct uml_pt_regs regs;
1412 -#define arch_has_single_step() (1)
1414 -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
1416 -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
1417 -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
1419 -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
1421 -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
1423 -#define instruction_pointer(regs) PT_REGS_IP(regs)
1425 -struct task_struct;
1427 -extern long subarch_ptrace(struct task_struct *child, long request,
1428 - unsigned long addr, unsigned long data);
1429 -extern unsigned long getreg(struct task_struct *child, int regno);
1430 -extern int putreg(struct task_struct *child, int regno, unsigned long value);
1432 -extern int arch_copy_tls(struct task_struct *new);
1433 -extern void clear_flushed_tls(struct task_struct *task);
1434 -extern void syscall_trace_enter(struct pt_regs *regs);
1435 -extern void syscall_trace_leave(struct pt_regs *regs);
1440 --- a/arch/um/include/asm/setup.h
1443 -#ifndef SETUP_H_INCLUDED
1444 -#define SETUP_H_INCLUDED
1446 -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
1447 - * command line, so this choice is ok.
1450 -#define COMMAND_LINE_SIZE 4096
1452 -#endif /* SETUP_H_INCLUDED */
1453 --- a/arch/um/include/asm/smp.h
1461 -#include <linux/bitops.h>
1462 -#include <asm/current.h>
1463 -#include <linux/cpumask.h>
1465 -#define raw_smp_processor_id() (current_thread->cpu)
1467 -#define cpu_logical_map(n) (n)
1468 -#define cpu_number_map(n) (n)
1469 -extern int hard_smp_processor_id(void);
1470 -#define NO_PROC_ID -1
1475 -static inline void smp_cpus_done(unsigned int maxcpus)
1479 -extern struct task_struct *idle_threads[NR_CPUS];
1483 -#define hard_smp_processor_id() 0
1488 --- a/arch/um/include/asm/stacktrace.h
1491 -#ifndef _ASM_UML_STACKTRACE_H
1492 -#define _ASM_UML_STACKTRACE_H
1494 -#include <linux/uaccess.h>
1495 -#include <linux/ptrace.h>
1497 -struct stack_frame {
1498 - struct stack_frame *next_frame;
1499 - unsigned long return_address;
1502 -struct stacktrace_ops {
1503 - void (*address)(void *data, unsigned long address, int reliable);
1506 -#ifdef CONFIG_FRAME_POINTER
1507 -static inline unsigned long
1508 -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
1510 - if (!task || task == current)
1511 - return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
1512 - return KSTK_EBP(task);
1515 -static inline unsigned long
1516 -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
1522 -static inline unsigned long
1523 -*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
1525 - if (!task || task == current)
1526 - return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
1527 - return (unsigned long *)KSTK_ESP(task);
1530 -void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
1532 -#endif /* _ASM_UML_STACKTRACE_H */
1533 --- a/arch/um/include/asm/sysrq.h
1536 -#ifndef __UM_SYSRQ_H
1537 -#define __UM_SYSRQ_H
1539 -struct task_struct;
1540 -extern void show_trace(struct task_struct* task, unsigned long *stack);
1543 --- a/arch/um/include/asm/thread_info.h
1547 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1548 - * Licensed under the GPL
1551 -#ifndef __UM_THREAD_INFO_H
1552 -#define __UM_THREAD_INFO_H
1554 -#ifndef __ASSEMBLY__
1556 -#include <asm/types.h>
1557 -#include <asm/page.h>
1558 -#include <asm/uaccess.h>
1560 -struct thread_info {
1561 - struct task_struct *task; /* main task structure */
1562 - struct exec_domain *exec_domain; /* execution domain */
1563 - unsigned long flags; /* low level flags */
1564 - __u32 cpu; /* current CPU */
1565 - int preempt_count; /* 0 => preemptable,
1567 - mm_segment_t addr_limit; /* thread address space:
1568 - 0-0xBFFFFFFF for user
1569 - 0-0xFFFFFFFF for kernel */
1570 - struct restart_block restart_block;
1571 - struct thread_info *real_thread; /* Points to non-IRQ stack */
1574 -#define INIT_THREAD_INFO(tsk) \
1577 - .exec_domain = &default_exec_domain, \
1580 - .preempt_count = INIT_PREEMPT_COUNT, \
1581 - .addr_limit = KERNEL_DS, \
1582 - .restart_block = { \
1583 - .fn = do_no_restart_syscall, \
1585 - .real_thread = NULL, \
1588 -#define init_thread_info (init_thread_union.thread_info)
1589 -#define init_stack (init_thread_union.stack)
1591 -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
1592 -/* how to get the thread information struct from C */
1593 -static inline struct thread_info *current_thread_info(void)
1595 - struct thread_info *ti;
1596 - unsigned long mask = THREAD_SIZE - 1;
1599 - asm volatile ("" : "=r" (p) : "0" (&ti));
1600 - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
1604 -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
1608 -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
1609 -#define TIF_SIGPENDING 1 /* signal pending */
1610 -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
1611 -#define TIF_RESTART_BLOCK 4
1612 -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
1613 -#define TIF_SYSCALL_AUDIT 6
1614 -#define TIF_RESTORE_SIGMASK 7
1615 -#define TIF_NOTIFY_RESUME 8
1617 -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
1618 -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
1619 -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
1620 -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
1621 -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
1624 --- a/arch/um/include/asm/timex.h
1627 -#ifndef __UM_TIMEX_H
1628 -#define __UM_TIMEX_H
1630 -typedef unsigned long cycles_t;
1632 -static inline cycles_t get_cycles (void)
1637 -#define CLOCK_TICK_RATE (HZ)
1640 --- a/arch/um/include/asm/tlb.h
1646 -#include <linux/pagemap.h>
1647 -#include <linux/swap.h>
1648 -#include <asm/percpu.h>
1649 -#include <asm/pgalloc.h>
1650 -#include <asm/tlbflush.h>
1652 -#define tlb_start_vma(tlb, vma) do { } while (0)
1653 -#define tlb_end_vma(tlb, vma) do { } while (0)
1654 -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
1656 -/* struct mmu_gather is an opaque type used by the mm code for passing around
1657 - * any data needed by arch specific code for tlb_remove_page.
1659 -struct mmu_gather {
1660 - struct mm_struct *mm;
1661 - unsigned int need_flush; /* Really unmapped some ptes? */
1662 - unsigned long start;
1663 - unsigned long end;
1664 - unsigned int fullmm; /* non-zero means full mm flush */
1667 -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
1668 - unsigned long address)
1670 - if (tlb->start > address)
1671 - tlb->start = address;
1672 - if (tlb->end < address + PAGE_SIZE)
1673 - tlb->end = address + PAGE_SIZE;
1676 -static inline void init_tlb_gather(struct mmu_gather *tlb)
1678 - tlb->need_flush = 0;
1680 - tlb->start = TASK_SIZE;
1683 - if (tlb->fullmm) {
1685 - tlb->end = TASK_SIZE;
1690 -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
1693 - tlb->start = start;
1695 - tlb->fullmm = !(start | (end+1));
1697 - init_tlb_gather(tlb);
1700 -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
1701 - unsigned long end);
1704 -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
1706 - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
1710 -tlb_flush_mmu_free(struct mmu_gather *tlb)
1712 - init_tlb_gather(tlb);
1716 -tlb_flush_mmu(struct mmu_gather *tlb)
1718 - if (!tlb->need_flush)
1721 - tlb_flush_mmu_tlbonly(tlb);
1722 - tlb_flush_mmu_free(tlb);
1726 - * Called at the end of the shootdown operation to free up any resources
1727 - * that were required.
1730 -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
1732 - tlb_flush_mmu(tlb);
1734 - /* keep the page table cache within bounds */
1735 - check_pgt_cache();
1739 - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
1740 - * while handling the additional races in SMP caused by other CPUs
1741 - * caching valid mappings in their TLBs.
1743 -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1745 - tlb->need_flush = 1;
1746 - free_page_and_swap_cache(page);
1747 - return 1; /* avoid calling tlb_flush_mmu */
1750 -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1752 - __tlb_remove_page(tlb, page);
1756 - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
1758 - * Record the fact that pte's were really umapped in ->need_flush, so we can
1759 - * later optimise away the tlb invalidate. This helps when userspace is
1760 - * unmapping already-unmapped pages, which happens quite a lot.
1762 -#define tlb_remove_tlb_entry(tlb, ptep, address) \
1764 - tlb->need_flush = 1; \
1765 - __tlb_remove_tlb_entry(tlb, ptep, address); \
1768 -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
1770 -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
1772 -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
1774 -#define tlb_migrate_finish(mm) do {} while (0)
1777 --- a/arch/um/include/asm/tlbflush.h
1781 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1782 - * Licensed under the GPL
1785 -#ifndef __UM_TLBFLUSH_H
1786 -#define __UM_TLBFLUSH_H
1788 -#include <linux/mm.h>
1793 - * - flush_tlb() flushes the current mm struct TLBs
1794 - * - flush_tlb_all() flushes all processes TLBs
1795 - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
1796 - * - flush_tlb_page(vma, vmaddr) flushes one page
1797 - * - flush_tlb_kernel_vm() flushes the kernel vm area
1798 - * - flush_tlb_range(vma, start, end) flushes a range of pages
1801 -extern void flush_tlb_all(void);
1802 -extern void flush_tlb_mm(struct mm_struct *mm);
1803 -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1804 - unsigned long end);
1805 -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
1806 -extern void flush_tlb_kernel_vm(void);
1807 -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
1808 -extern void __flush_tlb_one(unsigned long addr);
1811 --- a/arch/um/include/asm/uaccess.h
1815 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
1816 - * Licensed under the GPL
1819 -#ifndef __UM_UACCESS_H
1820 -#define __UM_UACCESS_H
1822 -/* thread_info has a mm_segment_t in it, so put the definition up here */
1824 - unsigned long seg;
1827 -#include <linux/thread_info.h>
1828 -#include <linux/errno.h>
1829 -#include <asm/processor.h>
1830 -#include <asm/elf.h>
1832 -#define VERIFY_READ 0
1833 -#define VERIFY_WRITE 1
1836 - * The fs value determines whether argument validity checking should be
1837 - * performed or not. If get_fs() == USER_DS, checking is performed, with
1838 - * get_fs() == KERNEL_DS, checking is bypassed.
1840 - * For historical reasons, these macros are grossly misnamed.
1843 -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
1845 -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
1846 -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
1848 -#define get_ds() (KERNEL_DS)
1849 -#define get_fs() (current_thread_info()->addr_limit)
1850 -#define set_fs(x) (current_thread_info()->addr_limit = (x))
1852 -#define segment_eq(a, b) ((a).seg == (b).seg)
1854 -#define __under_task_size(addr, size) \
1855 - (((unsigned long) (addr) < TASK_SIZE) && \
1856 - (((unsigned long) (addr) + (size)) < TASK_SIZE))
1858 -#define __access_ok_vsyscall(type, addr, size) \
1859 - ((type == VERIFY_READ) && \
1860 - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
1861 - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
1862 - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
1864 -#define __addr_range_nowrap(addr, size) \
1865 - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
1867 -#define access_ok(type, addr, size) \
1868 - (__addr_range_nowrap(addr, size) && \
1869 - (__under_task_size(addr, size) || \
1870 - __access_ok_vsyscall(type, addr, size) || \
1871 - segment_eq(get_fs(), KERNEL_DS)))
1873 -extern int copy_from_user(void *to, const void __user *from, int n);
1874 -extern int copy_to_user(void __user *to, const void *from, int n);
1877 - * strncpy_from_user: - Copy a NUL terminated string from userspace.
1878 - * @dst: Destination address, in kernel space. This buffer must be at
1879 - * least @count bytes long.
1880 - * @src: Source address, in user space.
1881 - * @count: Maximum number of bytes to copy, including the trailing NUL.
1883 - * Copies a NUL-terminated string from userspace to kernel space.
1885 - * On success, returns the length of the string (not including the trailing
1888 - * If access to userspace fails, returns -EFAULT (some data may have been
1891 - * If @count is smaller than the length of the string, copies @count bytes
1892 - * and returns @count.
1895 -extern int strncpy_from_user(char *dst, const char __user *src, int count);
1898 - * __clear_user: - Zero a block of memory in user space, with less checking.
1899 - * @to: Destination address, in user space.
1900 - * @n: Number of bytes to zero.
1902 - * Zero a block of memory in user space. Caller must check
1903 - * the specified block with access_ok() before calling this function.
1905 - * Returns number of bytes that could not be cleared.
1906 - * On success, this will be zero.
1908 -extern int __clear_user(void __user *mem, int len);
1911 - * clear_user: - Zero a block of memory in user space.
1912 - * @to: Destination address, in user space.
1913 - * @n: Number of bytes to zero.
1915 - * Zero a block of memory in user space.
1917 - * Returns number of bytes that could not be cleared.
1918 - * On success, this will be zero.
1920 -extern int clear_user(void __user *mem, int len);
1923 - * strlen_user: - Get the size of a string in user space.
1924 - * @str: The string to measure.
1925 - * @n: The maximum valid length
1927 - * Get the size of a NUL-terminated string in user space.
1929 - * Returns the size of the string INCLUDING the terminating NUL.
1930 - * On exception, returns 0.
1931 - * If the string is too long, returns a value greater than @n.
1933 -extern int strnlen_user(const void __user *str, int len);
1935 -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
1937 -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
1939 -#define __copy_to_user_inatomic __copy_to_user
1940 -#define __copy_from_user_inatomic __copy_from_user
1942 -#define __get_user(x, ptr) \
1944 - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
1945 - __typeof__(x) __private_val; \
1946 - int __private_ret = -EFAULT; \
1947 - (x) = (__typeof__(*(__private_ptr)))0; \
1948 - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
1949 - sizeof(*(__private_ptr))) == 0) { \
1950 - (x) = (__typeof__(*(__private_ptr))) __private_val; \
1951 - __private_ret = 0; \
1956 -#define get_user(x, ptr) \
1958 - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
1959 - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
1960 - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
1963 -#define __put_user(x, ptr) \
1965 - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
1966 - __typeof__(*(__private_ptr)) __private_val; \
1967 - int __private_ret = -EFAULT; \
1968 - __private_val = (__typeof__(*(__private_ptr))) (x); \
1969 - if (__copy_to_user((__private_ptr), &__private_val, \
1970 - sizeof(*(__private_ptr))) == 0) { \
1971 - __private_ret = 0; \
1976 -#define put_user(x, ptr) \
1978 - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
1979 - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
1980 - __put_user(x, private_ptr) : -EFAULT); \
1983 -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
1985 -struct exception_table_entry
1987 - unsigned long insn;
1988 - unsigned long fixup;
1993 +++ b/arch/um/include/uapi/asm/Kbuild
1995 +generic-y += barrier.h
1997 +generic-y += clkdev.h
1998 +generic-y += cputime.h
1999 +generic-y += current.h
2000 +generic-y += delay.h
2001 +generic-y += device.h
2002 +generic-y += emergency-restart.h
2003 +generic-y += exec.h
2004 +generic-y += ftrace.h
2005 +generic-y += futex.h
2006 +generic-y += hardirq.h
2007 +generic-y += hash.h
2008 +generic-y += hw_irq.h
2010 +generic-y += irq_regs.h
2011 +generic-y += irq_work.h
2012 +generic-y += kdebug.h
2013 +generic-y += mcs_spinlock.h
2014 +generic-y += mutex.h
2015 +generic-y += param.h
2017 +generic-y += percpu.h
2018 +generic-y += preempt.h
2019 +generic-y += scatterlist.h
2020 +generic-y += sections.h
2021 +generic-y += switch_to.h
2022 +generic-y += topology.h
2023 +generic-y += trace_clock.h
2026 +++ b/arch/um/include/uapi/asm/a.out-core.h
2028 +/* a.out coredump register dumper
2030 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
2031 + * Written by David Howells (dhowells@redhat.com)
2033 + * This program is free software; you can redistribute it and/or
2034 + * modify it under the terms of the GNU General Public Licence
2035 + * as published by the Free Software Foundation; either version
2036 + * 2 of the Licence, or (at your option) any later version.
2039 +#ifndef __UM_A_OUT_CORE_H
2040 +#define __UM_A_OUT_CORE_H
2044 +#include <linux/user.h>
2047 + * fill in the user structure for an a.out core dump
2049 +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
2053 +#endif /* __KERNEL__ */
2054 +#endif /* __UM_A_OUT_CORE_H */
2056 +++ b/arch/um/include/uapi/asm/bugs.h
2058 +#ifndef __UM_BUGS_H
2059 +#define __UM_BUGS_H
2061 +void check_bugs(void);
2065 +++ b/arch/um/include/uapi/asm/cache.h
2067 +#ifndef __UM_CACHE_H
2068 +#define __UM_CACHE_H
2071 +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
2072 +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
2073 +#elif defined(CONFIG_UML_X86) /* 64-bit */
2074 +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
2076 +/* XXX: this was taken from x86, now it's completely random. Luckily only
2077 + * affects SMP padding. */
2078 +# define L1_CACHE_SHIFT 5
2081 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2085 +++ b/arch/um/include/uapi/asm/common.lds.S
2087 +#include <asm-generic/vmlinux.lds.h>
2089 + .fini : { *(.fini) } =0x9090
2091 + PROVIDE (etext = .);
2095 + PROVIDE (sdata = .);
2099 + .unprotected : { *(.unprotected) }
2101 + PROVIDE (_unprotected_end = .);
2104 + .note : { *(.note.*) }
2105 + EXCEPTION_TABLE(0)
2109 + .uml.setup.init : {
2110 + __uml_setup_start = .;
2111 + *(.uml.setup.init)
2112 + __uml_setup_end = .;
2115 + .uml.help.init : {
2116 + __uml_help_start = .;
2118 + __uml_help_end = .;
2121 + .uml.postsetup.init : {
2122 + __uml_postsetup_start = .;
2123 + *(.uml.postsetup.init)
2124 + __uml_postsetup_end = .;
2131 + PERCPU_SECTION(32)
2133 + .initcall.init : {
2137 + .con_initcall.init : {
2141 + .uml.initcall.init : {
2142 + __uml_initcall_start = .;
2143 + *(.uml.initcall.init)
2144 + __uml_initcall_end = .;
2150 + __exitcall_begin = .;
2152 + __exitcall_end = .;
2156 + __uml_exitcall_begin = .;
2157 + *(.uml.exitcall.exit)
2158 + __uml_exitcall_end = .;
2162 + .altinstructions : {
2163 + __alt_instructions = .;
2164 + *(.altinstructions)
2165 + __alt_instructions_end = .;
2167 + .altinstr_replacement : { *(.altinstr_replacement) }
2168 + /* .exit.text is discard at runtime, not link time, to deal with references
2169 + from .altinstructions and .eh_frame */
2170 + .exit.text : { *(.exit.text) }
2171 + .exit.data : { *(.exit.data) }
2173 + .preinit_array : {
2174 + __preinit_array_start = .;
2176 + __preinit_array_end = .;
2179 + __init_array_start = .;
2181 + __init_array_end = .;
2184 + __fini_array_start = .;
2186 + __fini_array_end = .;
2195 +++ b/arch/um/include/uapi/asm/dma.h
2200 +#include <asm/io.h>
2202 +extern unsigned long uml_physmem;
2204 +#define MAX_DMA_ADDRESS (uml_physmem)
2208 +++ b/arch/um/include/uapi/asm/fixmap.h
2210 +#ifndef __UM_FIXMAP_H
2211 +#define __UM_FIXMAP_H
2213 +#include <asm/processor.h>
2214 +#include <asm/kmap_types.h>
2215 +#include <asm/archparam.h>
2216 +#include <asm/page.h>
2217 +#include <linux/threads.h>
2220 + * Here we define all the compile-time 'special' virtual
2221 + * addresses. The point is to have a constant address at
2222 + * compile time, but to set the physical address only
2223 + * in the boot process. We allocate these special addresses
2224 + * from the end of virtual memory (0xfffff000) backwards.
2225 + * Also this lets us do fail-safe vmalloc(), we
2226 + * can guarantee that these special addresses and
2227 + * vmalloc()-ed addresses never overlap.
2229 + * these 'compile-time allocated' memory buffers are
2230 + * fixed-size 4k pages. (or larger if used with an increment
2231 + * highger than 1) use fixmap_set(idx,phys) to associate
2232 + * physical memory with fixmap indices.
2234 + * TLB entries of such buffers will not be flushed across
2239 + * on UP currently we will have no trace of the fixmap mechanizm,
2240 + * no page table allocations, etc. This might change in the
2241 + * future, say framebuffers for the console driver(s) could be
2244 +enum fixed_addresses {
2245 +#ifdef CONFIG_HIGHMEM
2246 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
2247 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
2249 + __end_of_fixed_addresses
2252 +extern void __set_fixmap (enum fixed_addresses idx,
2253 + unsigned long phys, pgprot_t flags);
2256 + * used by vmalloc.c.
2258 + * Leave one empty page between vmalloc'ed areas and
2259 + * the start of the fixmap, and leave one page empty
2260 + * at the top of mem..
2263 +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
2264 +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
2265 +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
2267 +#include <asm-generic/fixmap.h>
2271 +++ b/arch/um/include/uapi/asm/irq.h
2276 +#define TIMER_IRQ 0
2278 +#define CONSOLE_IRQ 2
2279 +#define CONSOLE_WRITE_IRQ 3
2281 +#define UM_ETH_IRQ 5
2283 +#define SSL_WRITE_IRQ 7
2284 +#define ACCEPT_IRQ 8
2285 +#define MCONSOLE_IRQ 9
2286 +#define WINCH_IRQ 10
2287 +#define SIGIO_WRITE_IRQ 11
2288 +#define TELNETD_IRQ 12
2289 +#define XTERM_IRQ 13
2290 +#define RANDOM_IRQ 14
2292 +#define LAST_IRQ RANDOM_IRQ
2293 +#define NR_IRQS (LAST_IRQ + 1)
2297 +++ b/arch/um/include/uapi/asm/irqflags.h
2299 +#ifndef __UM_IRQFLAGS_H
2300 +#define __UM_IRQFLAGS_H
2302 +extern int get_signals(void);
2303 +extern int set_signals(int enable);
2304 +extern void block_signals(void);
2305 +extern void unblock_signals(void);
2307 +static inline unsigned long arch_local_save_flags(void)
2309 + return get_signals();
2312 +static inline void arch_local_irq_restore(unsigned long flags)
2314 + set_signals(flags);
2317 +static inline void arch_local_irq_enable(void)
2319 + unblock_signals();
2322 +static inline void arch_local_irq_disable(void)
2327 +static inline unsigned long arch_local_irq_save(void)
2329 + unsigned long flags;
2330 + flags = arch_local_save_flags();
2331 + arch_local_irq_disable();
2335 +static inline bool arch_irqs_disabled(void)
2337 + return arch_local_save_flags() == 0;
2342 +++ b/arch/um/include/uapi/asm/kmap_types.h
2345 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
2346 + * Licensed under the GPL
2349 +#ifndef __UM_KMAP_TYPES_H
2350 +#define __UM_KMAP_TYPES_H
2352 +/* No more #include "asm/arch/kmap_types.h" ! */
2354 +#define KM_TYPE_NR 14
2358 +++ b/arch/um/include/uapi/asm/kvm_para.h
2360 +#include <asm-generic/kvm_para.h>
2362 +++ b/arch/um/include/uapi/asm/mmu.h
2365 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2366 + * Licensed under the GPL
2369 +#ifndef __ARCH_UM_MMU_H
2370 +#define __ARCH_UM_MMU_H
2373 +#include <asm/mm_context.h>
2375 +typedef struct mm_context {
2377 + struct uml_arch_mm_context arch;
2378 + struct page *stub_pages[2];
2381 +extern void __switch_mm(struct mm_id * mm_idp);
2383 +/* Avoid tangled inclusion with asm/ldt.h */
2384 +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
2385 +extern void free_ldt(struct mm_context *mm);
2389 +++ b/arch/um/include/uapi/asm/mmu_context.h
2392 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2393 + * Licensed under the GPL
2396 +#ifndef __UM_MMU_CONTEXT_H
2397 +#define __UM_MMU_CONTEXT_H
2399 +#include <linux/sched.h>
2400 +#include <asm/mmu.h>
2402 +extern void uml_setup_stubs(struct mm_struct *mm);
2403 +extern void arch_exit_mmap(struct mm_struct *mm);
2405 +#define deactivate_mm(tsk,mm) do { } while (0)
2407 +extern void force_flush_all(void);
2409 +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
2412 + * This is called by fs/exec.c and sys_unshare()
2413 + * when the new ->mm is used for the first time.
2415 + __switch_mm(&new->context.id);
2416 + down_write(&new->mmap_sem);
2417 + uml_setup_stubs(new);
2418 + up_write(&new->mmap_sem);
2421 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2422 + struct task_struct *tsk)
2424 + unsigned cpu = smp_processor_id();
2427 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
2428 + cpumask_set_cpu(cpu, mm_cpumask(next));
2429 + if(next != &init_mm)
2430 + __switch_mm(&next->context.id);
2434 +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
2436 + uml_setup_stubs(mm);
2439 +static inline void enter_lazy_tlb(struct mm_struct *mm,
2440 + struct task_struct *tsk)
2444 +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
2446 +extern void destroy_context(struct mm_struct *mm);
2450 +++ b/arch/um/include/uapi/asm/page.h
2453 + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
2454 + * Copyright 2003 PathScale, Inc.
2455 + * Licensed under the GPL
2458 +#ifndef __UM_PAGE_H
2459 +#define __UM_PAGE_H
2461 +#include <linux/const.h>
2463 +/* PAGE_SHIFT determines the page size */
2464 +#define PAGE_SHIFT 12
2465 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
2466 +#define PAGE_MASK (~(PAGE_SIZE-1))
2468 +#ifndef __ASSEMBLY__
2472 +#include <linux/types.h>
2473 +#include <asm/vm-flags.h>
2476 + * These are used to make use of C type-checking..
2479 +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
2480 +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
2482 +#define clear_user_page(page, vaddr, pg) clear_page(page)
2483 +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
2485 +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
2487 +typedef struct { unsigned long pte_low, pte_high; } pte_t;
2488 +typedef struct { unsigned long pmd; } pmd_t;
2489 +typedef struct { unsigned long pgd; } pgd_t;
2490 +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
2492 +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
2493 +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
2494 +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
2495 +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
2497 + (to).pte_low = (from).pte_low; })
2498 +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
2499 +#define pte_set_val(pte, phys, prot) \
2500 + ({ (pte).pte_high = (phys) >> 32; \
2501 + (pte).pte_low = (phys) | pgprot_val(prot); })
2503 +#define pmd_val(x) ((x).pmd)
2504 +#define __pmd(x) ((pmd_t) { (x) } )
2506 +typedef unsigned long long pfn_t;
2507 +typedef unsigned long long phys_t;
2511 +typedef struct { unsigned long pte; } pte_t;
2512 +typedef struct { unsigned long pgd; } pgd_t;
2514 +#ifdef CONFIG_3_LEVEL_PGTABLES
2515 +typedef struct { unsigned long pmd; } pmd_t;
2516 +#define pmd_val(x) ((x).pmd)
2517 +#define __pmd(x) ((pmd_t) { (x) } )
2520 +#define pte_val(x) ((x).pte)
2523 +#define pte_get_bits(p, bits) ((p).pte & (bits))
2524 +#define pte_set_bits(p, bits) ((p).pte |= (bits))
2525 +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
2526 +#define pte_copy(to, from) ((to).pte = (from).pte)
2527 +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
2528 +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
2530 +typedef unsigned long pfn_t;
2531 +typedef unsigned long phys_t;
2535 +typedef struct { unsigned long pgprot; } pgprot_t;
2537 +typedef struct page *pgtable_t;
2539 +#define pgd_val(x) ((x).pgd)
2540 +#define pgprot_val(x) ((x).pgprot)
2542 +#define __pte(x) ((pte_t) { (x) } )
2543 +#define __pgd(x) ((pgd_t) { (x) } )
2544 +#define __pgprot(x) ((pgprot_t) { (x) } )
2546 +extern unsigned long uml_physmem;
2548 +#define PAGE_OFFSET (uml_physmem)
2549 +#define KERNELBASE PAGE_OFFSET
2551 +#define __va_space (8*1024*1024)
2555 +/* Cast to unsigned long before casting to void * to avoid a warning from
2556 + * mmap_kmem about cutting a long long down to a void *. Not sure that
2557 + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
2560 +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
2561 +#define __va(phys) to_virt((unsigned long) (phys))
2563 +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
2564 +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
2566 +#define pfn_valid(pfn) ((pfn) < max_mapnr)
2567 +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
2569 +#include <asm-generic/memory_model.h>
2570 +#include <asm-generic/getorder.h>
2572 +#endif /* __ASSEMBLY__ */
2574 +#ifdef CONFIG_X86_32
2575 +#define __HAVE_ARCH_GATE_AREA 1
2578 +#endif /* __UM_PAGE_H */
2580 +++ b/arch/um/include/uapi/asm/pgalloc.h
2583 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2584 + * Copyright 2003 PathScale, Inc.
2585 + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
2586 + * Licensed under the GPL
2589 +#ifndef __UM_PGALLOC_H
2590 +#define __UM_PGALLOC_H
2592 +#include <linux/mm.h>
2594 +#define pmd_populate_kernel(mm, pmd, pte) \
2595 + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
2597 +#define pmd_populate(mm, pmd, pte) \
2598 + set_pmd(pmd, __pmd(_PAGE_TABLE + \
2599 + ((unsigned long long)page_to_pfn(pte) << \
2600 + (unsigned long long) PAGE_SHIFT)))
2601 +#define pmd_pgtable(pmd) pmd_page(pmd)
2604 + * Allocate and free page tables.
2606 +extern pgd_t *pgd_alloc(struct mm_struct *);
2607 +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
2609 +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
2610 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
2612 +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2614 + free_page((unsigned long) pte);
2617 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2619 + pgtable_page_dtor(pte);
2623 +#define __pte_free_tlb(tlb,pte, address) \
2625 + pgtable_page_dtor(pte); \
2626 + tlb_remove_page((tlb),(pte)); \
2629 +#ifdef CONFIG_3_LEVEL_PGTABLES
2631 +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
2633 + free_page((unsigned long)pmd);
2636 +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
2639 +#define check_pgt_cache() do { } while (0)
2644 +++ b/arch/um/include/uapi/asm/pgtable-2level.h
2647 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2648 + * Copyright 2003 PathScale, Inc.
2649 + * Derived from include/asm-i386/pgtable.h
2650 + * Licensed under the GPL
2653 +#ifndef __UM_PGTABLE_2LEVEL_H
2654 +#define __UM_PGTABLE_2LEVEL_H
2656 +#include <asm-generic/pgtable-nopmd.h>
2658 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2660 +#define PGDIR_SHIFT 22
2661 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2662 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2665 + * entries per page directory level: the i386 is two-level, so
2666 + * we don't really have any PMD directory physically.
2668 +#define PTRS_PER_PTE 1024
2669 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2670 +#define PTRS_PER_PGD 1024
2671 +#define FIRST_USER_ADDRESS 0
2673 +#define pte_ERROR(e) \
2674 + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2676 +#define pgd_ERROR(e) \
2677 + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2680 +static inline int pgd_newpage(pgd_t pgd) { return 0; }
2681 +static inline void pgd_mkuptodate(pgd_t pgd) { }
2683 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2685 +#define pte_pfn(x) phys_to_pfn(pte_val(x))
2686 +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
2687 +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
2690 + * Bits 0 through 4 are taken
2692 +#define PTE_FILE_MAX_BITS 27
2694 +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
2696 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
2700 +++ b/arch/um/include/uapi/asm/pgtable-3level.h
2703 + * Copyright 2003 PathScale Inc
2704 + * Derived from include/asm-i386/pgtable.h
2705 + * Licensed under the GPL
2708 +#ifndef __UM_PGTABLE_3LEVEL_H
2709 +#define __UM_PGTABLE_3LEVEL_H
2711 +#include <asm-generic/pgtable-nopud.h>
2713 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2715 +#ifdef CONFIG_64BIT
2716 +#define PGDIR_SHIFT 30
2718 +#define PGDIR_SHIFT 31
2720 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2721 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2723 +/* PMD_SHIFT determines the size of the area a second-level page table can
2727 +#define PMD_SHIFT 21
2728 +#define PMD_SIZE (1UL << PMD_SHIFT)
2729 +#define PMD_MASK (~(PMD_SIZE-1))
2732 + * entries per page directory level
2735 +#define PTRS_PER_PTE 512
2736 +#ifdef CONFIG_64BIT
2737 +#define PTRS_PER_PMD 512
2738 +#define PTRS_PER_PGD 512
2740 +#define PTRS_PER_PMD 1024
2741 +#define PTRS_PER_PGD 1024
2744 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2745 +#define FIRST_USER_ADDRESS 0
2747 +#define pte_ERROR(e) \
2748 + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2750 +#define pmd_ERROR(e) \
2751 + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2753 +#define pgd_ERROR(e) \
2754 + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2757 +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
2758 +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2759 +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
2760 +#define pud_populate(mm, pud, pmd) \
2761 + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
2763 +#ifdef CONFIG_64BIT
2764 +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
2766 +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
2769 +static inline int pgd_newpage(pgd_t pgd)
2771 + return(pgd_val(pgd) & _PAGE_NEWPAGE);
2774 +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
2776 +#ifdef CONFIG_64BIT
2777 +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
2779 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2783 +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
2785 +static inline void pud_clear (pud_t *pud)
2787 + set_pud(pud, __pud(_PAGE_NEWPAGE));
2790 +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
2791 +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
2793 +/* Find an entry in the second-level page table.. */
2794 +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
2795 + pmd_index(address))
2797 +static inline unsigned long pte_pfn(pte_t pte)
2799 + return phys_to_pfn(pte_val(pte));
2802 +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
2805 + phys_t phys = pfn_to_phys(page_nr);
2807 + pte_set_val(pte, phys, pgprot);
2811 +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
2813 + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
2817 + * Bits 0 through 3 are taken in the low part of the pte,
2818 + * put the 32 bits of offset into the high part.
2820 +#define PTE_FILE_MAX_BITS 32
2822 +#ifdef CONFIG_64BIT
2824 +#define pte_to_pgoff(p) ((p).pte >> 32)
2826 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
2830 +#define pte_to_pgoff(pte) ((pte).pte_high)
2832 +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
2839 +++ b/arch/um/include/uapi/asm/pgtable.h
2842 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2843 + * Copyright 2003 PathScale, Inc.
2844 + * Derived from include/asm-i386/pgtable.h
2845 + * Licensed under the GPL
2848 +#ifndef __UM_PGTABLE_H
2849 +#define __UM_PGTABLE_H
2851 +#include <asm/fixmap.h>
2853 +#define _PAGE_PRESENT 0x001
2854 +#define _PAGE_NEWPAGE 0x002
2855 +#define _PAGE_NEWPROT 0x004
2856 +#define _PAGE_RW 0x020
2857 +#define _PAGE_USER 0x040
2858 +#define _PAGE_ACCESSED 0x080
2859 +#define _PAGE_DIRTY 0x100
2860 +/* If _PAGE_PRESENT is clear, we use these: */
2861 +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
2862 +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
2863 + pte_present gives true */
2865 +#ifdef CONFIG_3_LEVEL_PGTABLES
2866 +#include <asm/pgtable-3level.h>
2868 +#include <asm/pgtable-2level.h>
2871 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
2873 +/* zero page used for uninitialized stuff */
2874 +extern unsigned long *empty_zero_page;
2876 +#define pgtable_cache_init() do ; while (0)
2878 +/* Just any arbitrary offset to the start of the vmalloc VM area: the
2879 + * current 8MB value just means that there will be a 8MB "hole" after the
2880 + * physical memory until the kernel virtual memory starts. That means that
2881 + * any out-of-bounds memory accesses will hopefully be caught.
2882 + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
2883 + * area for the same reason. ;)
2886 +extern unsigned long end_iomem;
2888 +#define VMALLOC_OFFSET (__va_space)
2889 +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
2890 +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
2891 +#ifdef CONFIG_HIGHMEM
2892 +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
2894 +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
2896 +#define MODULES_VADDR VMALLOC_START
2897 +#define MODULES_END VMALLOC_END
2898 +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
2900 +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
2901 +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
2902 +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
2903 +#define __PAGE_KERNEL_EXEC \
2904 + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2905 +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
2906 +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
2907 +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2908 +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2909 +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2910 +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
2913 + * The i386 can't do page protection for execute, and considers that the same
2915 + * Also, write permissions imply read permissions. This is the closest we can
2918 +#define __P000 PAGE_NONE
2919 +#define __P001 PAGE_READONLY
2920 +#define __P010 PAGE_COPY
2921 +#define __P011 PAGE_COPY
2922 +#define __P100 PAGE_READONLY
2923 +#define __P101 PAGE_READONLY
2924 +#define __P110 PAGE_COPY
2925 +#define __P111 PAGE_COPY
2927 +#define __S000 PAGE_NONE
2928 +#define __S001 PAGE_READONLY
2929 +#define __S010 PAGE_SHARED
2930 +#define __S011 PAGE_SHARED
2931 +#define __S100 PAGE_READONLY
2932 +#define __S101 PAGE_READONLY
2933 +#define __S110 PAGE_SHARED
2934 +#define __S111 PAGE_SHARED
2937 + * ZERO_PAGE is a global shared page that is always zero: used
2938 + * for zero-mapped memory areas etc..
2940 +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
2942 +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
2944 +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
2945 +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2947 +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
2948 +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
2950 +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
2951 +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
2953 +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
2954 +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
2956 +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
2958 +#define pte_page(x) pfn_to_page(pte_pfn(x))
2960 +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
2963 + * =================================
2964 + * Flags checking section.
2965 + * =================================
2968 +static inline int pte_none(pte_t pte)
2970 + return pte_is_zero(pte);
2974 + * The following only work if pte_present() is true.
2975 + * Undefined behaviour if not..
2977 +static inline int pte_read(pte_t pte)
2979 + return((pte_get_bits(pte, _PAGE_USER)) &&
2980 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2983 +static inline int pte_exec(pte_t pte){
2984 + return((pte_get_bits(pte, _PAGE_USER)) &&
2985 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2988 +static inline int pte_write(pte_t pte)
2990 + return((pte_get_bits(pte, _PAGE_RW)) &&
2991 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2995 + * The following only works if pte_present() is not true.
2997 +static inline int pte_file(pte_t pte)
2999 + return pte_get_bits(pte, _PAGE_FILE);
3002 +static inline int pte_dirty(pte_t pte)
3004 + return pte_get_bits(pte, _PAGE_DIRTY);
3007 +static inline int pte_young(pte_t pte)
3009 + return pte_get_bits(pte, _PAGE_ACCESSED);
3012 +static inline int pte_newpage(pte_t pte)
3014 + return pte_get_bits(pte, _PAGE_NEWPAGE);
3017 +static inline int pte_newprot(pte_t pte)
3019 + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
3022 +static inline int pte_special(pte_t pte)
3028 + * =================================
3029 + * Flags setting section.
3030 + * =================================
3033 +static inline pte_t pte_mknewprot(pte_t pte)
3035 + pte_set_bits(pte, _PAGE_NEWPROT);
3039 +static inline pte_t pte_mkclean(pte_t pte)
3041 + pte_clear_bits(pte, _PAGE_DIRTY);
3045 +static inline pte_t pte_mkold(pte_t pte)
3047 + pte_clear_bits(pte, _PAGE_ACCESSED);
3051 +static inline pte_t pte_wrprotect(pte_t pte)
3053 + pte_clear_bits(pte, _PAGE_RW);
3054 + return(pte_mknewprot(pte));
3057 +static inline pte_t pte_mkread(pte_t pte)
3059 + pte_set_bits(pte, _PAGE_USER);
3060 + return(pte_mknewprot(pte));
3063 +static inline pte_t pte_mkdirty(pte_t pte)
3065 + pte_set_bits(pte, _PAGE_DIRTY);
3069 +static inline pte_t pte_mkyoung(pte_t pte)
3071 + pte_set_bits(pte, _PAGE_ACCESSED);
3075 +static inline pte_t pte_mkwrite(pte_t pte)
3077 + pte_set_bits(pte, _PAGE_RW);
3078 + return(pte_mknewprot(pte));
3081 +static inline pte_t pte_mkuptodate(pte_t pte)
3083 + pte_clear_bits(pte, _PAGE_NEWPAGE);
3084 + if(pte_present(pte))
3085 + pte_clear_bits(pte, _PAGE_NEWPROT);
3089 +static inline pte_t pte_mknewpage(pte_t pte)
3091 + pte_set_bits(pte, _PAGE_NEWPAGE);
3095 +static inline pte_t pte_mkspecial(pte_t pte)
3100 +static inline void set_pte(pte_t *pteptr, pte_t pteval)
3102 + pte_copy(*pteptr, pteval);
3104 + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
3105 + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
3109 + *pteptr = pte_mknewpage(*pteptr);
3110 + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
3112 +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
3114 +#define __HAVE_ARCH_PTE_SAME
3115 +static inline int pte_same(pte_t pte_a, pte_t pte_b)
3117 + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
3121 + * Conversion functions: convert a page and protection to a page entry,
3122 + * and a page entry and page directory to the page they refer to.
3125 +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
3126 +#define __virt_to_page(virt) phys_to_page(__pa(virt))
3127 +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
3128 +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
3130 +#define mk_pte(page, pgprot) \
3133 + pte_set_val(pte, page_to_phys(page), (pgprot)); \
3134 + if (pte_present(pte)) \
3135 + pte_mknewprot(pte_mknewpage(pte)); \
3138 +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
3140 + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
3145 + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
3147 + * this macro returns the index of the entry in the pgd page which would
3148 + * control the given virtual address
3150 +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
3153 + * pgd_offset() returns a (pgd_t *)
3154 + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
3156 +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
3159 + * a shortcut which implies the use of the kernel's pgd, instead
3162 +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
3165 + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
3167 + * this macro returns the index of the entry in the pmd page which would
3168 + * control the given virtual address
3170 +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3171 +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
3173 +#define pmd_page_vaddr(pmd) \
3174 + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3177 + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
3179 + * this macro returns the index of the entry in the pte page which would
3180 + * control the given virtual address
3182 +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
3183 +#define pte_offset_kernel(dir, address) \
3184 + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
3185 +#define pte_offset_map(dir, address) \
3186 + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
3187 +#define pte_unmap(pte) do { } while (0)
3190 +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
3192 +#define update_mmu_cache(vma,address,ptep) do ; while (0)
3194 +/* Encode and de-code a swap entry */
3195 +#define __swp_type(x) (((x).val >> 5) & 0x1f)
3196 +#define __swp_offset(x) ((x).val >> 11)
3198 +#define __swp_entry(type, offset) \
3199 + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
3200 +#define __pte_to_swp_entry(pte) \
3201 + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
3202 +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
3204 +#define kern_addr_valid(addr) (1)
3206 +#include <asm-generic/pgtable.h>
3208 +/* Clear a kernel PTE and flush it from the TLB */
3209 +#define kpte_clear_flush(ptep, vaddr) \
3211 + pte_clear(&init_mm, (vaddr), (ptep)); \
3212 + __flush_tlb_one((vaddr)); \
3217 +++ b/arch/um/include/uapi/asm/processor-generic.h
3220 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3221 + * Licensed under the GPL
3224 +#ifndef __UM_PROCESSOR_GENERIC_H
3225 +#define __UM_PROCESSOR_GENERIC_H
3229 +struct task_struct;
3231 +#include <asm/ptrace.h>
3232 +#include <registers.h>
3233 +#include <sysdep/archsetjmp.h>
3235 +#include <linux/prefetch.h>
3239 +struct thread_struct {
3240 + struct pt_regs regs;
3241 + struct pt_regs *segv_regs;
3242 + int singlestep_syscall;
3244 + jmp_buf *fault_catcher;
3245 + struct task_struct *prev_sched;
3246 + struct arch_thread arch;
3247 + jmp_buf switch_buf;
3255 + int (*proc)(void *);
3259 + void (*proc)(void *);
3266 +#define INIT_THREAD \
3268 + .regs = EMPTY_REGS, \
3269 + .fault_addr = NULL, \
3270 + .prev_sched = NULL, \
3271 + .arch = INIT_ARCH_THREAD, \
3272 + .request = { 0 } \
3275 +static inline void release_thread(struct task_struct *task)
3279 +extern unsigned long thread_saved_pc(struct task_struct *t);
3281 +static inline void mm_copy_segments(struct mm_struct *from_mm,
3282 + struct mm_struct *new_mm)
3286 +#define init_stack (init_thread_union.stack)
3289 + * User space process size: 3GB (default).
3291 +extern unsigned long task_size;
3293 +#define TASK_SIZE (task_size)
3296 +#undef STACK_TOP_MAX
3298 +extern unsigned long stacksizelim;
3300 +#define STACK_ROOM (stacksizelim)
3301 +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
3302 +#define STACK_TOP_MAX STACK_TOP
3304 +/* This decides where the kernel will search for a free chunk of vm
3305 + * space during mmap's.
3307 +#define TASK_UNMAPPED_BASE (0x40000000)
3309 +extern void start_thread(struct pt_regs *regs, unsigned long entry,
3310 + unsigned long stack);
3312 +struct cpuinfo_um {
3313 + unsigned long loops_per_jiffy;
3317 +extern struct cpuinfo_um boot_cpu_data;
3319 +#define my_cpu_data cpu_data[smp_processor_id()]
3322 +extern struct cpuinfo_um cpu_data[];
3323 +#define current_cpu_data cpu_data[smp_processor_id()]
3325 +#define cpu_data (&boot_cpu_data)
3326 +#define current_cpu_data boot_cpu_data
3330 +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
3331 +extern unsigned long get_wchan(struct task_struct *p);
3335 +++ b/arch/um/include/uapi/asm/ptrace-generic.h
3338 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3339 + * Licensed under the GPL
3342 +#ifndef __UM_PTRACE_GENERIC_H
3343 +#define __UM_PTRACE_GENERIC_H
3345 +#ifndef __ASSEMBLY__
3347 +#include <asm/ptrace-abi.h>
3348 +#include <sysdep/ptrace.h>
3351 + struct uml_pt_regs regs;
3354 +#define arch_has_single_step() (1)
3356 +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
3358 +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
3359 +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
3361 +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
3363 +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
3365 +#define instruction_pointer(regs) PT_REGS_IP(regs)
3367 +struct task_struct;
3369 +extern long subarch_ptrace(struct task_struct *child, long request,
3370 + unsigned long addr, unsigned long data);
3371 +extern unsigned long getreg(struct task_struct *child, int regno);
3372 +extern int putreg(struct task_struct *child, int regno, unsigned long value);
3374 +extern int arch_copy_tls(struct task_struct *new);
3375 +extern void clear_flushed_tls(struct task_struct *task);
3376 +extern void syscall_trace_enter(struct pt_regs *regs);
3377 +extern void syscall_trace_leave(struct pt_regs *regs);
3383 +++ b/arch/um/include/uapi/asm/setup.h
3385 +#ifndef SETUP_H_INCLUDED
3386 +#define SETUP_H_INCLUDED
3388 +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
3389 + * command line, so this choice is ok.
3392 +#define COMMAND_LINE_SIZE 4096
3394 +#endif /* SETUP_H_INCLUDED */
3396 +++ b/arch/um/include/uapi/asm/smp.h
3403 +#include <linux/bitops.h>
3404 +#include <asm/current.h>
3405 +#include <linux/cpumask.h>
3407 +#define raw_smp_processor_id() (current_thread->cpu)
3409 +#define cpu_logical_map(n) (n)
3410 +#define cpu_number_map(n) (n)
3411 +extern int hard_smp_processor_id(void);
3412 +#define NO_PROC_ID -1
3417 +static inline void smp_cpus_done(unsigned int maxcpus)
3421 +extern struct task_struct *idle_threads[NR_CPUS];
3425 +#define hard_smp_processor_id() 0
3431 +++ b/arch/um/include/uapi/asm/stacktrace.h
3433 +#ifndef _ASM_UML_STACKTRACE_H
3434 +#define _ASM_UML_STACKTRACE_H
3436 +#include <linux/uaccess.h>
3437 +#include <linux/ptrace.h>
3439 +struct stack_frame {
3440 + struct stack_frame *next_frame;
3441 + unsigned long return_address;
3444 +struct stacktrace_ops {
3445 + void (*address)(void *data, unsigned long address, int reliable);
3448 +#ifdef CONFIG_FRAME_POINTER
3449 +static inline unsigned long
3450 +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
3452 + if (!task || task == current)
3453 + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
3454 + return KSTK_EBP(task);
3457 +static inline unsigned long
3458 +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
3464 +static inline unsigned long
3465 +*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
3467 + if (!task || task == current)
3468 + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
3469 + return (unsigned long *)KSTK_ESP(task);
3472 +void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
3474 +#endif /* _ASM_UML_STACKTRACE_H */
3476 +++ b/arch/um/include/uapi/asm/sysrq.h
3478 +#ifndef __UM_SYSRQ_H
3479 +#define __UM_SYSRQ_H
3481 +struct task_struct;
3482 +extern void show_trace(struct task_struct* task, unsigned long *stack);
3486 +++ b/arch/um/include/uapi/asm/thread_info.h
3489 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3490 + * Licensed under the GPL
3493 +#ifndef __UM_THREAD_INFO_H
3494 +#define __UM_THREAD_INFO_H
3496 +#ifndef __ASSEMBLY__
3498 +#include <asm/types.h>
3499 +#include <asm/page.h>
3500 +#include <asm/uaccess.h>
3502 +struct thread_info {
3503 + struct task_struct *task; /* main task structure */
3504 + struct exec_domain *exec_domain; /* execution domain */
3505 + unsigned long flags; /* low level flags */
3506 + __u32 cpu; /* current CPU */
3507 + int preempt_count; /* 0 => preemptable,
3509 + mm_segment_t addr_limit; /* thread address space:
3510 + 0-0xBFFFFFFF for user
3511 + 0-0xFFFFFFFF for kernel */
3512 + struct restart_block restart_block;
3513 + struct thread_info *real_thread; /* Points to non-IRQ stack */
3516 +#define INIT_THREAD_INFO(tsk) \
3519 + .exec_domain = &default_exec_domain, \
3522 + .preempt_count = INIT_PREEMPT_COUNT, \
3523 + .addr_limit = KERNEL_DS, \
3524 + .restart_block = { \
3525 + .fn = do_no_restart_syscall, \
3527 + .real_thread = NULL, \
3530 +#define init_thread_info (init_thread_union.thread_info)
3531 +#define init_stack (init_thread_union.stack)
3533 +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
3534 +/* how to get the thread information struct from C */
3535 +static inline struct thread_info *current_thread_info(void)
3537 + struct thread_info *ti;
3538 + unsigned long mask = THREAD_SIZE - 1;
3541 + asm volatile ("" : "=r" (p) : "0" (&ti));
3542 + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
3546 +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
3550 +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
3551 +#define TIF_SIGPENDING 1 /* signal pending */
3552 +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
3553 +#define TIF_RESTART_BLOCK 4
3554 +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
3555 +#define TIF_SYSCALL_AUDIT 6
3556 +#define TIF_RESTORE_SIGMASK 7
3557 +#define TIF_NOTIFY_RESUME 8
3559 +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
3560 +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
3561 +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
3562 +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
3563 +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
3567 +++ b/arch/um/include/uapi/asm/timex.h
3569 +#ifndef __UM_TIMEX_H
3570 +#define __UM_TIMEX_H
3572 +typedef unsigned long cycles_t;
3574 +static inline cycles_t get_cycles (void)
3579 +#define CLOCK_TICK_RATE (HZ)
3583 +++ b/arch/um/include/uapi/asm/tlb.h
3588 +#include <linux/pagemap.h>
3589 +#include <linux/swap.h>
3590 +#include <asm/percpu.h>
3591 +#include <asm/pgalloc.h>
3592 +#include <asm/tlbflush.h>
3594 +#define tlb_start_vma(tlb, vma) do { } while (0)
3595 +#define tlb_end_vma(tlb, vma) do { } while (0)
3596 +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
3598 +/* struct mmu_gather is an opaque type used by the mm code for passing around
3599 + * any data needed by arch specific code for tlb_remove_page.
3601 +struct mmu_gather {
3602 + struct mm_struct *mm;
3603 + unsigned int need_flush; /* Really unmapped some ptes? */
3604 + unsigned long start;
3605 + unsigned long end;
3606 + unsigned int fullmm; /* non-zero means full mm flush */
3609 +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
3610 + unsigned long address)
3612 + if (tlb->start > address)
3613 + tlb->start = address;
3614 + if (tlb->end < address + PAGE_SIZE)
3615 + tlb->end = address + PAGE_SIZE;
3618 +static inline void init_tlb_gather(struct mmu_gather *tlb)
3620 + tlb->need_flush = 0;
3622 + tlb->start = TASK_SIZE;
3625 + if (tlb->fullmm) {
3627 + tlb->end = TASK_SIZE;
3632 +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
3635 + tlb->start = start;
3637 + tlb->fullmm = !(start | (end+1));
3639 + init_tlb_gather(tlb);
3642 +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
3643 + unsigned long end);
3646 +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
3648 + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
3652 +tlb_flush_mmu_free(struct mmu_gather *tlb)
3654 + init_tlb_gather(tlb);
3658 +tlb_flush_mmu(struct mmu_gather *tlb)
3660 + if (!tlb->need_flush)
3663 + tlb_flush_mmu_tlbonly(tlb);
3664 + tlb_flush_mmu_free(tlb);
3668 + * Called at the end of the shootdown operation to free up any resources
3669 + * that were required.
3672 +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
3674 + tlb_flush_mmu(tlb);
3676 + /* keep the page table cache within bounds */
3677 + check_pgt_cache();
3681 + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
3682 + * while handling the additional races in SMP caused by other CPUs
3683 + * caching valid mappings in their TLBs.
3685 +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3687 + tlb->need_flush = 1;
3688 + free_page_and_swap_cache(page);
3689 + return 1; /* avoid calling tlb_flush_mmu */
3692 +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3694 + __tlb_remove_page(tlb, page);
3698 + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
3700 + * Record the fact that pte's were really umapped in ->need_flush, so we can
3701 + * later optimise away the tlb invalidate. This helps when userspace is
3702 + * unmapping already-unmapped pages, which happens quite a lot.
3704 +#define tlb_remove_tlb_entry(tlb, ptep, address) \
3706 + tlb->need_flush = 1; \
3707 + __tlb_remove_tlb_entry(tlb, ptep, address); \
3710 +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
3712 +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
3714 +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
3716 +#define tlb_migrate_finish(mm) do {} while (0)
3720 +++ b/arch/um/include/uapi/asm/tlbflush.h
3723 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3724 + * Licensed under the GPL
3727 +#ifndef __UM_TLBFLUSH_H
3728 +#define __UM_TLBFLUSH_H
3730 +#include <linux/mm.h>
3735 + * - flush_tlb() flushes the current mm struct TLBs
3736 + * - flush_tlb_all() flushes all processes TLBs
3737 + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
3738 + * - flush_tlb_page(vma, vmaddr) flushes one page
3739 + * - flush_tlb_kernel_vm() flushes the kernel vm area
3740 + * - flush_tlb_range(vma, start, end) flushes a range of pages
3743 +extern void flush_tlb_all(void);
3744 +extern void flush_tlb_mm(struct mm_struct *mm);
3745 +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
3746 + unsigned long end);
3747 +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
3748 +extern void flush_tlb_kernel_vm(void);
3749 +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
3750 +extern void __flush_tlb_one(unsigned long addr);
3754 +++ b/arch/um/include/uapi/asm/uaccess.h
3757 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3758 + * Licensed under the GPL
3761 +#ifndef __UM_UACCESS_H
3762 +#define __UM_UACCESS_H
3764 +/* thread_info has a mm_segment_t in it, so put the definition up here */
3766 + unsigned long seg;
3769 +#include <linux/thread_info.h>
3770 +#include <linux/errno.h>
3771 +#include <asm/processor.h>
3772 +#include <asm/elf.h>
3774 +#define VERIFY_READ 0
3775 +#define VERIFY_WRITE 1
3778 + * The fs value determines whether argument validity checking should be
3779 + * performed or not. If get_fs() == USER_DS, checking is performed, with
3780 + * get_fs() == KERNEL_DS, checking is bypassed.
3782 + * For historical reasons, these macros are grossly misnamed.
3785 +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
3787 +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
3788 +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
3790 +#define get_ds() (KERNEL_DS)
3791 +#define get_fs() (current_thread_info()->addr_limit)
3792 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
3794 +#define segment_eq(a, b) ((a).seg == (b).seg)
3796 +#define __under_task_size(addr, size) \
3797 + (((unsigned long) (addr) < TASK_SIZE) && \
3798 + (((unsigned long) (addr) + (size)) < TASK_SIZE))
3800 +#define __access_ok_vsyscall(type, addr, size) \
3801 + ((type == VERIFY_READ) && \
3802 + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
3803 + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
3804 + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
3806 +#define __addr_range_nowrap(addr, size) \
3807 + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
3809 +#define access_ok(type, addr, size) \
3810 + (__addr_range_nowrap(addr, size) && \
3811 + (__under_task_size(addr, size) || \
3812 + __access_ok_vsyscall(type, addr, size) || \
3813 + segment_eq(get_fs(), KERNEL_DS)))
3815 +extern int copy_from_user(void *to, const void __user *from, int n);
3816 +extern int copy_to_user(void __user *to, const void *from, int n);
3819 + * strncpy_from_user: - Copy a NUL terminated string from userspace.
3820 + * @dst: Destination address, in kernel space. This buffer must be at
3821 + * least @count bytes long.
3822 + * @src: Source address, in user space.
3823 + * @count: Maximum number of bytes to copy, including the trailing NUL.
3825 + * Copies a NUL-terminated string from userspace to kernel space.
3827 + * On success, returns the length of the string (not including the trailing
3830 + * If access to userspace fails, returns -EFAULT (some data may have been
3833 + * If @count is smaller than the length of the string, copies @count bytes
3834 + * and returns @count.
3837 +extern int strncpy_from_user(char *dst, const char __user *src, int count);
3840 + * __clear_user: - Zero a block of memory in user space, with less checking.
3841 + * @to: Destination address, in user space.
3842 + * @n: Number of bytes to zero.
3844 + * Zero a block of memory in user space. Caller must check
3845 + * the specified block with access_ok() before calling this function.
3847 + * Returns number of bytes that could not be cleared.
3848 + * On success, this will be zero.
3850 +extern int __clear_user(void __user *mem, int len);
3853 + * clear_user: - Zero a block of memory in user space.
3854 + * @to: Destination address, in user space.
3855 + * @n: Number of bytes to zero.
3857 + * Zero a block of memory in user space.
3859 + * Returns number of bytes that could not be cleared.
3860 + * On success, this will be zero.
3862 +extern int clear_user(void __user *mem, int len);
3865 + * strlen_user: - Get the size of a string in user space.
3866 + * @str: The string to measure.
3867 + * @n: The maximum valid length
3869 + * Get the size of a NUL-terminated string in user space.
3871 + * Returns the size of the string INCLUDING the terminating NUL.
3872 + * On exception, returns 0.
3873 + * If the string is too long, returns a value greater than @n.
3875 +extern int strnlen_user(const void __user *str, int len);
3877 +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
3879 +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
3881 +#define __copy_to_user_inatomic __copy_to_user
3882 +#define __copy_from_user_inatomic __copy_from_user
3884 +#define __get_user(x, ptr) \
3886 + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
3887 + __typeof__(x) __private_val; \
3888 + int __private_ret = -EFAULT; \
3889 + (x) = (__typeof__(*(__private_ptr)))0; \
3890 + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
3891 + sizeof(*(__private_ptr))) == 0) { \
3892 + (x) = (__typeof__(*(__private_ptr))) __private_val; \
3893 + __private_ret = 0; \
3898 +#define get_user(x, ptr) \
3900 + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
3901 + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
3902 + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
3905 +#define __put_user(x, ptr) \
3907 + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
3908 + __typeof__(*(__private_ptr)) __private_val; \
3909 + int __private_ret = -EFAULT; \
3910 + __private_val = (__typeof__(*(__private_ptr))) (x); \
3911 + if (__copy_to_user((__private_ptr), &__private_val, \
3912 + sizeof(*(__private_ptr))) == 0) { \
3913 + __private_ret = 0; \
3918 +#define put_user(x, ptr) \
3920 + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
3921 + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
3922 + __put_user(x, private_ptr) : -EFAULT); \
3925 +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
3927 +struct exception_table_entry
3929 + unsigned long insn;
3930 + unsigned long fixup;