1 --- a/arch/mips/Makefile
2 +++ b/arch/mips/Makefile
3 @@ -89,10 +89,12 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
5 cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
6 cflags-y += -msoft-float
7 -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
8 +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
9 KBUILD_AFLAGS_MODULE += -mlong-calls
10 KBUILD_CFLAGS_MODULE += -mlong-calls
12 +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
14 cflags-y += -ffreestanding
17 --- a/arch/mips/kernel/vmlinux.lds.S
18 +++ b/arch/mips/kernel/vmlinux.lds.S
19 @@ -66,7 +66,7 @@ SECTIONS
20 /* Exception table for data bus errors */
22 __start___dbe_table = .;
24 + KEEP(*(__dbe_table))
25 __stop___dbe_table = .;
28 @@ -111,7 +111,7 @@ SECTIONS
30 .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
31 __mips_machines_start = .;
32 - *(.mips.machines.init)
33 + KEEP(*(.mips.machines.init))
34 __mips_machines_end = .;
37 --- a/include/asm-generic/vmlinux.lds.h
38 +++ b/include/asm-generic/vmlinux.lds.h
40 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
41 #define MCOUNT_REC() . = ALIGN(8); \
42 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
44 + KEEP(*(__mcount_loc)) \
45 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
50 #ifdef CONFIG_TRACE_BRANCH_PROFILING
51 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
52 - *(_ftrace_annotated_branch) \
53 + KEEP(*(_ftrace_annotated_branch)) \
54 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
56 #define LIKELY_PROFILE()
59 #ifdef CONFIG_PROFILE_ALL_BRANCHES
60 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
62 + KEEP(*(_ftrace_branch)) \
63 VMLINUX_SYMBOL(__stop_branch_profile) = .;
65 #define BRANCH_PROFILE()
67 #ifdef CONFIG_EVENT_TRACING
68 #define FTRACE_EVENTS() . = ALIGN(8); \
69 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
71 + KEEP(*(_ftrace_events)) \
72 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
74 #define FTRACE_EVENTS()
78 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
79 - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
80 + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
81 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
83 #define TRACE_PRINTKS()
85 #ifdef CONFIG_FTRACE_SYSCALLS
86 #define TRACE_SYSCALLS() . = ALIGN(8); \
87 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
88 - *(__syscalls_metadata) \
89 + KEEP(*(__syscalls_metadata)) \
90 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
92 #define TRACE_SYSCALLS()
94 #define KERNEL_DTB() \
96 VMLINUX_SYMBOL(__dtb_start) = .; \
97 - *(.dtb.init.rodata) \
98 + KEEP(*(.dtb.init.rodata)) \
99 VMLINUX_SYMBOL(__dtb_end) = .;
102 @@ -173,15 +173,16 @@
103 /* implement dynamic printk debug */ \
105 VMLINUX_SYMBOL(__start___jump_table) = .; \
107 + KEEP(*(__jump_table)) \
108 VMLINUX_SYMBOL(__stop___jump_table) = .; \
110 VMLINUX_SYMBOL(__start___verbose) = .; \
112 + KEEP(*(__verbose)) \
113 VMLINUX_SYMBOL(__stop___verbose) = .; \
118 + *(.data.[a-zA-Z_]*)
121 * Data section helpers
122 @@ -235,39 +236,39 @@
124 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
125 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
126 - *(.pci_fixup_early) \
127 + KEEP(*(.pci_fixup_early)) \
128 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
129 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
130 - *(.pci_fixup_header) \
131 + KEEP(*(.pci_fixup_header)) \
132 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
133 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
134 - *(.pci_fixup_final) \
135 + KEEP(*(.pci_fixup_final)) \
136 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
137 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
138 - *(.pci_fixup_enable) \
139 + KEEP(*(.pci_fixup_enable)) \
140 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
141 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
142 - *(.pci_fixup_resume) \
143 + KEEP(*(.pci_fixup_resume)) \
144 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
145 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
146 - *(.pci_fixup_resume_early) \
147 + KEEP(*(.pci_fixup_resume_early)) \
148 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
149 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
150 - *(.pci_fixup_suspend) \
151 + KEEP(*(.pci_fixup_suspend)) \
152 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
155 /* Built-in firmware blobs */ \
156 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
157 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
159 + KEEP(*(.builtin_fw)) \
160 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
163 /* RapidIO route ops */ \
164 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
165 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
166 - *(.rio_switch_ops) \
167 + KEEP(*(.rio_switch_ops)) \
168 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
171 @@ -276,49 +277,49 @@
172 /* Kernel symbol table: Normal symbols */ \
173 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
174 VMLINUX_SYMBOL(__start___ksymtab) = .; \
175 - *(SORT(___ksymtab+*)) \
176 + KEEP(*(SORT(___ksymtab+*))) \
177 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
180 /* Kernel symbol table: GPL-only symbols */ \
181 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
182 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
183 - *(SORT(___ksymtab_gpl+*)) \
184 + KEEP(*(SORT(___ksymtab_gpl+*))) \
185 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
188 /* Kernel symbol table: Normal unused symbols */ \
189 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
190 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
191 - *(SORT(___ksymtab_unused+*)) \
192 + KEEP(*(SORT(___ksymtab_unused+*))) \
193 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
196 /* Kernel symbol table: GPL-only unused symbols */ \
197 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
198 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
199 - *(SORT(___ksymtab_unused_gpl+*)) \
200 + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
201 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
204 /* Kernel symbol table: GPL-future-only symbols */ \
205 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
206 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
207 - *(SORT(___ksymtab_gpl_future+*)) \
208 + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
209 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
212 /* Kernel symbol table: Normal symbols */ \
213 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
214 VMLINUX_SYMBOL(__start___kcrctab) = .; \
215 - *(SORT(___kcrctab+*)) \
216 + KEEP(*(SORT(___kcrctab+*))) \
217 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
220 /* Kernel symbol table: GPL-only symbols */ \
221 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
222 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
223 - *(SORT(___kcrctab_gpl+*)) \
224 + KEEP(*(SORT(___kcrctab_gpl+*))) \
225 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
228 @@ -332,14 +333,14 @@
229 /* Kernel symbol table: GPL-only unused symbols */ \
230 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
231 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
232 - *(SORT(___kcrctab_unused_gpl+*)) \
233 + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
234 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
237 /* Kernel symbol table: GPL-future-only symbols */ \
238 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
239 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
240 - *(SORT(___kcrctab_gpl_future+*)) \
241 + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
242 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
245 @@ -362,14 +363,14 @@
246 /* Built-in module parameters. */ \
247 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
248 VMLINUX_SYMBOL(__start___param) = .; \
251 VMLINUX_SYMBOL(__stop___param) = .; \
254 /* Built-in module versions. */ \
255 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
256 VMLINUX_SYMBOL(__start___modver) = .; \
258 + KEEP(*(__modver)) \
259 VMLINUX_SYMBOL(__stop___modver) = .; \
260 . = ALIGN((align)); \
261 VMLINUX_SYMBOL(__end_rodata) = .; \
265 VMLINUX_SYMBOL(__entry_text_start) = .; \
267 + KEEP(*(.entry.text)) \
268 VMLINUX_SYMBOL(__entry_text_end) = .;
270 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
273 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
274 VMLINUX_SYMBOL(__start___ex_table) = .; \
276 + KEEP(*(__ex_table)) \
277 VMLINUX_SYMBOL(__stop___ex_table) = .; \
281 #ifdef CONFIG_CONSTRUCTORS
282 #define KERNEL_CTORS() . = ALIGN(8); \
283 VMLINUX_SYMBOL(__ctors_start) = .; \
286 VMLINUX_SYMBOL(__ctors_end) = .;
288 #define KERNEL_CTORS()
290 #define SBSS(sbss_align) \
291 . = ALIGN(sbss_align); \
292 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
300 *(.bss..page_aligned) \
309 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
310 VMLINUX_SYMBOL(__start___bug_table) = .; \
312 + KEEP(*(__bug_table)) \
313 VMLINUX_SYMBOL(__stop___bug_table) = .; \
318 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
319 VMLINUX_SYMBOL(__tracedata_start) = .; \
321 + KEEP(*(.tracedata)) \
322 VMLINUX_SYMBOL(__tracedata_end) = .; \
325 @@ -622,17 +623,17 @@
326 #define INIT_SETUP(initsetup_align) \
327 . = ALIGN(initsetup_align); \
328 VMLINUX_SYMBOL(__setup_start) = .; \
330 + KEEP(*(.init.setup)) \
331 VMLINUX_SYMBOL(__setup_end) = .;
333 #define INIT_CALLS_LEVEL(level) \
334 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
335 - *(.initcall##level##.init) \
336 - *(.initcall##level##s.init) \
337 + KEEP(*(.initcall##level##.init)) \
338 + KEEP(*(.initcall##level##s.init)) \
341 VMLINUX_SYMBOL(__initcall_start) = .; \
342 - *(.initcallearly.init) \
343 + KEEP(*(.initcallearly.init)) \
344 INIT_CALLS_LEVEL(0) \
345 INIT_CALLS_LEVEL(1) \
346 INIT_CALLS_LEVEL(2) \
347 @@ -646,21 +647,21 @@
349 #define CON_INITCALL \
350 VMLINUX_SYMBOL(__con_initcall_start) = .; \
351 - *(.con_initcall.init) \
352 + KEEP(*(.con_initcall.init)) \
353 VMLINUX_SYMBOL(__con_initcall_end) = .;
355 #define SECURITY_INITCALL \
356 VMLINUX_SYMBOL(__security_initcall_start) = .; \
357 - *(.security_initcall.init) \
358 + KEEP(*(.security_initcall.init)) \
359 VMLINUX_SYMBOL(__security_initcall_end) = .;
361 #ifdef CONFIG_BLK_DEV_INITRD
362 #define INIT_RAM_FS \
364 VMLINUX_SYMBOL(__initramfs_start) = .; \
366 + KEEP(*(.init.ramfs)) \
368 - *(.init.ramfs.info)
369 + KEEP(*(.init.ramfs.info))