220-gc_sections.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. From: Felix Fietkau <nbd@nbd.name>
  2. use -ffunction-sections, -fdata-sections and --gc-sections
  3. In combination with kernel symbol export stripping this significantly reduces
  4. the kernel image size. Used on both ARM and MIPS architectures.
  5. Signed-off-by: Felix Fietkau <nbd@nbd.name>
  6. Signed-off-by: Jonas Gorski <jogo@openwrt.org>
  7. Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
  8. ---
  9. --- a/arch/mips/Makefile
  10. +++ b/arch/mips/Makefile
  11. @@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
  12. #
  13. cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
  14. cflags-y += -msoft-float
  15. -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
  16. +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
  17. KBUILD_AFLAGS_MODULE += -mlong-calls
  18. KBUILD_CFLAGS_MODULE += -mlong-calls
  19. +ifndef CONFIG_FUNCTION_TRACER
  20. +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
  21. +endif
  22. +
  23. #
  24. # pass -msoft-float to GAS if it supports it. However on newer binutils
  25. # (specifically newer than 2.24.51.20140728) we then also need to explicitly
  26. --- a/arch/mips/kernel/vmlinux.lds.S
  27. +++ b/arch/mips/kernel/vmlinux.lds.S
  28. @@ -67,7 +67,7 @@ SECTIONS
  29. /* Exception table for data bus errors */
  30. __dbe_table : {
  31. __start___dbe_table = .;
  32. - *(__dbe_table)
  33. + KEEP(*(__dbe_table))
  34. __stop___dbe_table = .;
  35. }
  36. @@ -112,7 +112,7 @@ SECTIONS
  37. . = ALIGN(4);
  38. .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
  39. __mips_machines_start = .;
  40. - *(.mips.machines.init)
  41. + KEEP(*(.mips.machines.init))
  42. __mips_machines_end = .;
  43. }
  44. --- a/include/asm-generic/vmlinux.lds.h
  45. +++ b/include/asm-generic/vmlinux.lds.h
  46. @@ -89,7 +89,7 @@
  47. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  48. #define MCOUNT_REC() . = ALIGN(8); \
  49. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  50. - *(__mcount_loc) \
  51. + KEEP(*(__mcount_loc)) \
  52. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  53. #else
  54. #define MCOUNT_REC()
  55. @@ -97,7 +97,7 @@
  56. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  57. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  58. - *(_ftrace_annotated_branch) \
  59. + KEEP(*(_ftrace_annotated_branch)) \
  60. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  61. #else
  62. #define LIKELY_PROFILE()
  63. @@ -105,7 +105,7 @@
  64. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  65. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  66. - *(_ftrace_branch) \
  67. + KEEP(*(_ftrace_branch)) \
  68. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  69. #else
  70. #define BRANCH_PROFILE()
  71. @@ -114,7 +114,7 @@
  72. #ifdef CONFIG_KPROBES
  73. #define KPROBE_BLACKLIST() . = ALIGN(8); \
  74. VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
  75. - *(_kprobe_blacklist) \
  76. + KEEP(*(_kprobe_blacklist)) \
  77. VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
  78. #else
  79. #define KPROBE_BLACKLIST()
  80. @@ -123,10 +123,10 @@
  81. #ifdef CONFIG_EVENT_TRACING
  82. #define FTRACE_EVENTS() . = ALIGN(8); \
  83. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  84. - *(_ftrace_events) \
  85. + KEEP(*(_ftrace_events)) \
  86. VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
  87. VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
  88. - *(_ftrace_enum_map) \
  89. + KEEP(*(_ftrace_enum_map)) \
  90. VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
  91. #else
  92. #define FTRACE_EVENTS()
  93. @@ -134,7 +134,7 @@
  94. #ifdef CONFIG_TRACING
  95. #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
  96. - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
  97. + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
  98. VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
  99. #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
  100. *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
  101. @@ -147,7 +147,7 @@
  102. #ifdef CONFIG_FTRACE_SYSCALLS
  103. #define TRACE_SYSCALLS() . = ALIGN(8); \
  104. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  105. - *(__syscalls_metadata) \
  106. + KEEP(*(__syscalls_metadata)) \
  107. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  108. #else
  109. #define TRACE_SYSCALLS()
  110. @@ -169,8 +169,8 @@
  111. #define _OF_TABLE_1(name) \
  112. . = ALIGN(8); \
  113. VMLINUX_SYMBOL(__##name##_of_table) = .; \
  114. - *(__##name##_of_table) \
  115. - *(__##name##_of_table_end)
  116. + KEEP(*(__##name##_of_table)) \
  117. + KEEP(*(__##name##_of_table_end))
  118. #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
  119. #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
  120. @@ -184,7 +184,7 @@
  121. #define KERNEL_DTB() \
  122. STRUCT_ALIGN(); \
  123. VMLINUX_SYMBOL(__dtb_start) = .; \
  124. - *(.dtb.init.rodata) \
  125. + KEEP(*(.dtb.init.rodata)) \
  126. VMLINUX_SYMBOL(__dtb_end) = .;
  127. /* .data section */
  128. @@ -200,16 +200,17 @@
  129. /* implement dynamic printk debug */ \
  130. . = ALIGN(8); \
  131. VMLINUX_SYMBOL(__start___jump_table) = .; \
  132. - *(__jump_table) \
  133. + KEEP(*(__jump_table)) \
  134. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  135. . = ALIGN(8); \
  136. VMLINUX_SYMBOL(__start___verbose) = .; \
  137. - *(__verbose) \
  138. + KEEP(*(__verbose)) \
  139. VMLINUX_SYMBOL(__stop___verbose) = .; \
  140. LIKELY_PROFILE() \
  141. BRANCH_PROFILE() \
  142. TRACE_PRINTKS() \
  143. - TRACEPOINT_STR()
  144. + TRACEPOINT_STR() \
  145. + *(.data.[a-zA-Z_]*)
  146. /*
  147. * Data section helpers
  148. @@ -263,35 +264,35 @@
  149. /* PCI quirks */ \
  150. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  151. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  152. - *(.pci_fixup_early) \
  153. + KEEP(*(.pci_fixup_early)) \
  154. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  155. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  156. - *(.pci_fixup_header) \
  157. + KEEP(*(.pci_fixup_header)) \
  158. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  159. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  160. - *(.pci_fixup_final) \
  161. + KEEP(*(.pci_fixup_final)) \
  162. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  163. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  164. - *(.pci_fixup_enable) \
  165. + KEEP(*(.pci_fixup_enable)) \
  166. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  167. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  168. - *(.pci_fixup_resume) \
  169. + KEEP(*(.pci_fixup_resume)) \
  170. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  171. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  172. - *(.pci_fixup_resume_early) \
  173. + KEEP(*(.pci_fixup_resume_early)) \
  174. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  175. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  176. - *(.pci_fixup_suspend) \
  177. + KEEP(*(.pci_fixup_suspend)) \
  178. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  179. VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
  180. - *(.pci_fixup_suspend_late) \
  181. + KEEP(*(.pci_fixup_suspend_late)) \
  182. VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
  183. } \
  184. \
  185. /* Built-in firmware blobs */ \
  186. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  187. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  188. - *(.builtin_fw) \
  189. + KEEP(*(.builtin_fw)) \
  190. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  191. } \
  192. \
  193. @@ -300,49 +301,49 @@
  194. /* Kernel symbol table: Normal symbols */ \
  195. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  196. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  197. - *(SORT(___ksymtab+*)) \
  198. + KEEP(*(SORT(___ksymtab+*))) \
  199. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  200. } \
  201. \
  202. /* Kernel symbol table: GPL-only symbols */ \
  203. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  204. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  205. - *(SORT(___ksymtab_gpl+*)) \
  206. + KEEP(*(SORT(___ksymtab_gpl+*))) \
  207. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  208. } \
  209. \
  210. /* Kernel symbol table: Normal unused symbols */ \
  211. __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
  212. VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
  213. - *(SORT(___ksymtab_unused+*)) \
  214. + KEEP(*(SORT(___ksymtab_unused+*))) \
  215. VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
  216. } \
  217. \
  218. /* Kernel symbol table: GPL-only unused symbols */ \
  219. __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
  220. VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
  221. - *(SORT(___ksymtab_unused_gpl+*)) \
  222. + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
  223. VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
  224. } \
  225. \
  226. /* Kernel symbol table: GPL-future-only symbols */ \
  227. __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
  228. VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
  229. - *(SORT(___ksymtab_gpl_future+*)) \
  230. + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
  231. VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
  232. } \
  233. \
  234. /* Kernel symbol table: Normal symbols */ \
  235. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  236. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  237. - *(SORT(___kcrctab+*)) \
  238. + KEEP(*(SORT(___kcrctab+*))) \
  239. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  240. } \
  241. \
  242. /* Kernel symbol table: GPL-only symbols */ \
  243. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  244. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  245. - *(SORT(___kcrctab_gpl+*)) \
  246. + KEEP(*(SORT(___kcrctab_gpl+*))) \
  247. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  248. } \
  249. \
  250. @@ -356,14 +357,14 @@
  251. /* Kernel symbol table: GPL-only unused symbols */ \
  252. __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
  253. VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
  254. - *(SORT(___kcrctab_unused_gpl+*)) \
  255. + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
  256. VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
  257. } \
  258. \
  259. /* Kernel symbol table: GPL-future-only symbols */ \
  260. __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
  261. VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
  262. - *(SORT(___kcrctab_gpl_future+*)) \
  263. + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
  264. VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
  265. } \
  266. \
  267. @@ -382,14 +383,14 @@
  268. /* Built-in module parameters. */ \
  269. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  270. VMLINUX_SYMBOL(__start___param) = .; \
  271. - *(__param) \
  272. + KEEP(*(__param)) \
  273. VMLINUX_SYMBOL(__stop___param) = .; \
  274. } \
  275. \
  276. /* Built-in module versions. */ \
  277. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  278. VMLINUX_SYMBOL(__start___modver) = .; \
  279. - *(__modver) \
  280. + KEEP(*(__modver)) \
  281. VMLINUX_SYMBOL(__stop___modver) = .; \
  282. . = ALIGN((align)); \
  283. VMLINUX_SYMBOL(__end_rodata) = .; \
  284. @@ -445,7 +446,7 @@
  285. #define ENTRY_TEXT \
  286. ALIGN_FUNCTION(); \
  287. VMLINUX_SYMBOL(__entry_text_start) = .; \
  288. - *(.entry.text) \
  289. + KEEP(*(.entry.text)) \
  290. VMLINUX_SYMBOL(__entry_text_end) = .;
  291. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  292. @@ -473,7 +474,7 @@
  293. . = ALIGN(align); \
  294. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  295. VMLINUX_SYMBOL(__start___ex_table) = .; \
  296. - *(__ex_table) \
  297. + KEEP(*(__ex_table)) \
  298. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  299. }
  300. @@ -489,9 +490,9 @@
  301. #ifdef CONFIG_CONSTRUCTORS
  302. #define KERNEL_CTORS() . = ALIGN(8); \
  303. VMLINUX_SYMBOL(__ctors_start) = .; \
  304. - *(.ctors) \
  305. + KEEP(*(.ctors)) \
  306. *(SORT(.init_array.*)) \
  307. - *(.init_array) \
  308. + KEEP(*(.init_array)) \
  309. VMLINUX_SYMBOL(__ctors_end) = .;
  310. #else
  311. #define KERNEL_CTORS()
  312. @@ -546,7 +547,7 @@
  313. #define SBSS(sbss_align) \
  314. . = ALIGN(sbss_align); \
  315. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  316. - *(.sbss) \
  317. + *(.sbss .sbss.*) \
  318. *(.scommon) \
  319. }
  320. @@ -564,7 +565,7 @@
  321. BSS_FIRST_SECTIONS \
  322. *(.bss..page_aligned) \
  323. *(.dynbss) \
  324. - *(.bss) \
  325. + *(.bss .bss.*) \
  326. *(COMMON) \
  327. }
  328. @@ -613,7 +614,7 @@
  329. . = ALIGN(8); \
  330. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  331. VMLINUX_SYMBOL(__start___bug_table) = .; \
  332. - *(__bug_table) \
  333. + KEEP(*(__bug_table)) \
  334. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  335. }
  336. #else
  337. @@ -625,7 +626,7 @@
  338. . = ALIGN(4); \
  339. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  340. VMLINUX_SYMBOL(__tracedata_start) = .; \
  341. - *(.tracedata) \
  342. + KEEP(*(.tracedata)) \
  343. VMLINUX_SYMBOL(__tracedata_end) = .; \
  344. }
  345. #else
  346. @@ -642,17 +643,17 @@
  347. #define INIT_SETUP(initsetup_align) \
  348. . = ALIGN(initsetup_align); \
  349. VMLINUX_SYMBOL(__setup_start) = .; \
  350. - *(.init.setup) \
  351. + KEEP(*(.init.setup)) \
  352. VMLINUX_SYMBOL(__setup_end) = .;
  353. #define INIT_CALLS_LEVEL(level) \
  354. VMLINUX_SYMBOL(__initcall##level##_start) = .; \
  355. - *(.initcall##level##.init) \
  356. - *(.initcall##level##s.init) \
  357. + KEEP(*(.initcall##level##.init)) \
  358. + KEEP(*(.initcall##level##s.init)) \
  359. #define INIT_CALLS \
  360. VMLINUX_SYMBOL(__initcall_start) = .; \
  361. - *(.initcallearly.init) \
  362. + KEEP(*(.initcallearly.init)) \
  363. INIT_CALLS_LEVEL(0) \
  364. INIT_CALLS_LEVEL(1) \
  365. INIT_CALLS_LEVEL(2) \
  366. @@ -666,21 +667,21 @@
  367. #define CON_INITCALL \
  368. VMLINUX_SYMBOL(__con_initcall_start) = .; \
  369. - *(.con_initcall.init) \
  370. + KEEP(*(.con_initcall.init)) \
  371. VMLINUX_SYMBOL(__con_initcall_end) = .;
  372. #define SECURITY_INITCALL \
  373. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  374. - *(.security_initcall.init) \
  375. + KEEP(*(.security_initcall.init)) \
  376. VMLINUX_SYMBOL(__security_initcall_end) = .;
  377. #ifdef CONFIG_BLK_DEV_INITRD
  378. #define INIT_RAM_FS \
  379. . = ALIGN(4); \
  380. VMLINUX_SYMBOL(__initramfs_start) = .; \
  381. - *(.init.ramfs) \
  382. + KEEP(*(.init.ramfs)) \
  383. . = ALIGN(8); \
  384. - *(.init.ramfs.info)
  385. + KEEP(*(.init.ramfs.info))
  386. #else
  387. #define INIT_RAM_FS
  388. #endif
  389. --- a/arch/arm/Makefile
  390. +++ b/arch/arm/Makefile
  391. @@ -18,11 +18,16 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
  392. LDFLAGS_vmlinux += --be8
  393. LDFLAGS_MODULE += --be8
  394. endif
  395. +LDFLAGS_vmlinux += --gc-sections
  396. OBJCOPYFLAGS :=-O binary -R .comment -S
  397. GZFLAGS :=-9
  398. #KBUILD_CFLAGS +=-pipe
  399. +ifndef CONFIG_FUNCTION_TRACER
  400. +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
  401. +endif
  402. +
  403. # Never generate .eh_frame
  404. KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
  405. --- a/arch/arm/kernel/vmlinux.lds.S
  406. +++ b/arch/arm/kernel/vmlinux.lds.S
  407. @@ -15,13 +15,13 @@
  408. #define PROC_INFO \
  409. . = ALIGN(4); \
  410. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  411. - *(.proc.info.init) \
  412. + KEEP(*(.proc.info.init)) \
  413. VMLINUX_SYMBOL(__proc_info_end) = .;
  414. #define IDMAP_TEXT \
  415. ALIGN_FUNCTION(); \
  416. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  417. - *(.idmap.text) \
  418. + KEEP(*(.idmap.text)) \
  419. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  420. . = ALIGN(PAGE_SIZE); \
  421. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  422. @@ -102,7 +102,7 @@ SECTIONS
  423. _stext = .; /* Text and read-only data */
  424. IDMAP_TEXT
  425. __exception_text_start = .;
  426. - *(.exception.text)
  427. + KEEP(*(.exception.text))
  428. __exception_text_end = .;
  429. IRQENTRY_TEXT
  430. TEXT_TEXT
  431. @@ -126,7 +126,7 @@ SECTIONS
  432. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  433. __start___ex_table = .;
  434. #ifdef CONFIG_MMU
  435. - *(__ex_table)
  436. + KEEP(*(__ex_table))
  437. #endif
  438. __stop___ex_table = .;
  439. }
  440. @@ -138,12 +138,12 @@ SECTIONS
  441. . = ALIGN(8);
  442. .ARM.unwind_idx : {
  443. __start_unwind_idx = .;
  444. - *(.ARM.exidx*)
  445. + KEEP(*(.ARM.exidx*))
  446. __stop_unwind_idx = .;
  447. }
  448. .ARM.unwind_tab : {
  449. __start_unwind_tab = .;
  450. - *(.ARM.extab*)
  451. + KEEP(*(.ARM.extab*))
  452. __stop_unwind_tab = .;
  453. }
  454. #endif
  455. @@ -166,14 +166,14 @@ SECTIONS
  456. */
  457. __vectors_start = .;
  458. .vectors 0 : AT(__vectors_start) {
  459. - *(.vectors)
  460. + KEEP(*(.vectors))
  461. }
  462. . = __vectors_start + SIZEOF(.vectors);
  463. __vectors_end = .;
  464. __stubs_start = .;
  465. .stubs 0x1000 : AT(__stubs_start) {
  466. - *(.stubs)
  467. + KEEP(*(.stubs))
  468. }
  469. . = __stubs_start + SIZEOF(.stubs);
  470. __stubs_end = .;
  471. @@ -187,24 +187,24 @@ SECTIONS
  472. }
  473. .init.arch.info : {
  474. __arch_info_begin = .;
  475. - *(.arch.info.init)
  476. + KEEP(*(.arch.info.init))
  477. __arch_info_end = .;
  478. }
  479. .init.tagtable : {
  480. __tagtable_begin = .;
  481. - *(.taglist.init)
  482. + KEEP(*(.taglist.init))
  483. __tagtable_end = .;
  484. }
  485. #ifdef CONFIG_SMP_ON_UP
  486. .init.smpalt : {
  487. __smpalt_begin = .;
  488. - *(.alt.smp.init)
  489. + KEEP(*(.alt.smp.init))
  490. __smpalt_end = .;
  491. }
  492. #endif
  493. .init.pv_table : {
  494. __pv_table_begin = .;
  495. - *(.pv_table)
  496. + KEEP(*(.pv_table))
  497. __pv_table_end = .;
  498. }
  499. .init.data : {
  500. --- a/arch/arm/boot/compressed/Makefile
  501. +++ b/arch/arm/boot/compressed/Makefile
  502. @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
  503. ORIG_CFLAGS := $(KBUILD_CFLAGS)
  504. KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
  505. endif
  506. +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
  507. ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  508. asflags-y := -DZIMAGE