Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
Pull kbuild updates from Michal Marek: - EXPORT_SYMBOL for asm source by Al Viro. This does bring a regression, because genksyms no longer generates checksums for these symbols (CONFIG_MODVERSIONS). Nick Piggin is working on a patch to fix this. Plus, we are talking about functions like strcpy(), which rarely change prototypes. - Fixes for PPC fallout of the above by Stephen Rothwell and Nick Piggin - fixdep speedup by Alexey Dobriyan. - preparatory work by Nick Piggin to allow architectures to build with -ffunction-sections, -fdata-sections and --gc-sections - CONFIG_THIN_ARCHIVES support by Stephen Rothwell - fix for filenames with colons in the initramfs source by me. * 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild: (22 commits) initramfs: Escape colons in depfile ppc: there is no clear_pages to export powerpc/64: whitelist unresolved modversions CRCs kbuild: -ffunction-sections fix for archs with conflicting sections kbuild: add arch specific post-link Makefile kbuild: allow archs to select link dead code/data elimination kbuild: allow architectures to use thin archives instead of ld -r kbuild: Regenerate genksyms lexer kbuild: genksyms fix for typeof handling fixdep: faster CONFIG_ search ia64: move exports to definitions sparc32: debride memcpy.S a bit [sparc] unify 32bit and 64bit string.h sparc: move exports to definitions ppc: move exports to definitions arm: move exports to definitions s390: move exports to definitions m68k: move exports to definitions alpha: move exports to actual definitions x86: move exports to actual definitions ...
This commit is contained in:
@@ -28,6 +28,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/*
|
||||
* computes a partial checksum, e.g. for TCP/UDP fragments
|
||||
@@ -251,6 +252,7 @@ ENTRY(csum_partial)
|
||||
ENDPROC(csum_partial)
|
||||
|
||||
#endif
|
||||
EXPORT_SYMBOL(csum_partial)
|
||||
|
||||
/*
|
||||
unsigned int csum_partial_copy_generic (const char *src, char *dst,
|
||||
@@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
|
||||
#undef ROUND1
|
||||
|
||||
#endif
|
||||
EXPORT_SYMBOL(csum_partial_copy_generic)
|
||||
|
@@ -1,6 +1,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/*
|
||||
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
|
||||
@@ -23,6 +24,7 @@ ENTRY(clear_page)
|
||||
rep stosq
|
||||
ret
|
||||
ENDPROC(clear_page)
|
||||
EXPORT_SYMBOL(clear_page)
|
||||
|
||||
ENTRY(clear_page_orig)
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.text
|
||||
|
||||
@@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
|
||||
ret
|
||||
|
||||
ENDPROC(cmpxchg8b_emu)
|
||||
EXPORT_SYMBOL(cmpxchg8b_emu)
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/*
|
||||
* Some CPUs run faster using the string copy instructions (sane microcode).
|
||||
@@ -17,6 +18,7 @@ ENTRY(copy_page)
|
||||
rep movsq
|
||||
ret
|
||||
ENDPROC(copy_page)
|
||||
EXPORT_SYMBOL(copy_page)
|
||||
|
||||
ENTRY(copy_page_regs)
|
||||
subq $2*8, %rsp
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/* Standard copy_to_user with segment limit checking */
|
||||
ENTRY(_copy_to_user)
|
||||
@@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
|
||||
"jmp copy_user_enhanced_fast_string", \
|
||||
X86_FEATURE_ERMS
|
||||
ENDPROC(_copy_to_user)
|
||||
EXPORT_SYMBOL(_copy_to_user)
|
||||
|
||||
/* Standard copy_from_user with segment limit checking */
|
||||
ENTRY(_copy_from_user)
|
||||
@@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
|
||||
"jmp copy_user_enhanced_fast_string", \
|
||||
X86_FEATURE_ERMS
|
||||
ENDPROC(_copy_from_user)
|
||||
EXPORT_SYMBOL(_copy_from_user)
|
||||
|
||||
|
||||
.section .fixup,"ax"
|
||||
/* must zero dest */
|
||||
@@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
|
||||
_ASM_EXTABLE(21b,50b)
|
||||
_ASM_EXTABLE(22b,50b)
|
||||
ENDPROC(copy_user_generic_unrolled)
|
||||
EXPORT_SYMBOL(copy_user_generic_unrolled)
|
||||
|
||||
/* Some CPUs run faster using the string copy instructions.
|
||||
* This is also a lot simpler. Use them when possible.
|
||||
@@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
|
||||
_ASM_EXTABLE(1b,11b)
|
||||
_ASM_EXTABLE(3b,12b)
|
||||
ENDPROC(copy_user_generic_string)
|
||||
EXPORT_SYMBOL(copy_user_generic_string)
|
||||
|
||||
/*
|
||||
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
|
||||
@@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)
|
||||
|
||||
_ASM_EXTABLE(1b,12b)
|
||||
ENDPROC(copy_user_enhanced_fast_string)
|
||||
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
|
||||
|
||||
/*
|
||||
* copy_user_nocache - Uncached memory copy with exception handling
|
||||
@@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
|
||||
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
|
||||
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
|
||||
ENDPROC(__copy_user_nocache)
|
||||
EXPORT_SYMBOL(__copy_user_nocache)
|
||||
|
@@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
|
||||
return (__force __wsum)add32_with_carry(do_csum(buff, len),
|
||||
(__force u32)sum);
|
||||
}
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
|
||||
/*
|
||||
* this routine is used for miscellaneous IP-like checksums, mainly
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.text
|
||||
ENTRY(__get_user_1)
|
||||
@@ -44,6 +45,7 @@ ENTRY(__get_user_1)
|
||||
ASM_CLAC
|
||||
ret
|
||||
ENDPROC(__get_user_1)
|
||||
EXPORT_SYMBOL(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
add $1,%_ASM_AX
|
||||
@@ -57,6 +59,7 @@ ENTRY(__get_user_2)
|
||||
ASM_CLAC
|
||||
ret
|
||||
ENDPROC(__get_user_2)
|
||||
EXPORT_SYMBOL(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
add $3,%_ASM_AX
|
||||
@@ -70,6 +73,7 @@ ENTRY(__get_user_4)
|
||||
ASM_CLAC
|
||||
ret
|
||||
ENDPROC(__get_user_4)
|
||||
EXPORT_SYMBOL(__get_user_4)
|
||||
|
||||
ENTRY(__get_user_8)
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -97,6 +101,7 @@ ENTRY(__get_user_8)
|
||||
ret
|
||||
#endif
|
||||
ENDPROC(__get_user_8)
|
||||
EXPORT_SYMBOL(__get_user_8)
|
||||
|
||||
|
||||
bad_get_user:
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
|
||||
@@ -32,6 +33,7 @@ ENTRY(__sw_hweight32)
|
||||
__ASM_SIZE(pop,) %__ASM_REG(dx)
|
||||
ret
|
||||
ENDPROC(__sw_hweight32)
|
||||
EXPORT_SYMBOL(__sw_hweight32)
|
||||
|
||||
ENTRY(__sw_hweight64)
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -77,3 +79,4 @@ ENTRY(__sw_hweight64)
|
||||
ret
|
||||
#endif
|
||||
ENDPROC(__sw_hweight64)
|
||||
EXPORT_SYMBOL(__sw_hweight64)
|
||||
|
@@ -4,6 +4,7 @@
|
||||
#include <asm/errno.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/*
|
||||
* We build a jump to memcpy_orig by default which gets NOPped out on
|
||||
@@ -40,6 +41,8 @@ ENTRY(memcpy)
|
||||
ret
|
||||
ENDPROC(memcpy)
|
||||
ENDPROC(__memcpy)
|
||||
EXPORT_SYMBOL(memcpy)
|
||||
EXPORT_SYMBOL(__memcpy)
|
||||
|
||||
/*
|
||||
* memcpy_erms() - enhanced fast string memcpy. This is faster and
|
||||
@@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe_unrolled)
|
||||
xorq %rax, %rax
|
||||
ret
|
||||
ENDPROC(memcpy_mcsafe_unrolled)
|
||||
EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
|
||||
|
||||
.section .fixup, "ax"
|
||||
/* Return -EFAULT for any failure */
|
||||
|
@@ -8,6 +8,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
#undef memmove
|
||||
|
||||
@@ -207,3 +208,5 @@ ENTRY(__memmove)
|
||||
retq
|
||||
ENDPROC(__memmove)
|
||||
ENDPROC(memmove)
|
||||
EXPORT_SYMBOL(__memmove)
|
||||
EXPORT_SYMBOL(memmove)
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.weak memset
|
||||
|
||||
@@ -43,6 +44,8 @@ ENTRY(__memset)
|
||||
ret
|
||||
ENDPROC(memset)
|
||||
ENDPROC(__memset)
|
||||
EXPORT_SYMBOL(memset)
|
||||
EXPORT_SYMBOL(__memset)
|
||||
|
||||
/*
|
||||
* ISO C memset - set a memory block to a byte value. This function uses
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
|
||||
/*
|
||||
@@ -43,6 +44,7 @@ ENTRY(__put_user_1)
|
||||
xor %eax,%eax
|
||||
EXIT
|
||||
ENDPROC(__put_user_1)
|
||||
EXPORT_SYMBOL(__put_user_1)
|
||||
|
||||
ENTRY(__put_user_2)
|
||||
ENTER
|
||||
@@ -55,6 +57,7 @@ ENTRY(__put_user_2)
|
||||
xor %eax,%eax
|
||||
EXIT
|
||||
ENDPROC(__put_user_2)
|
||||
EXPORT_SYMBOL(__put_user_2)
|
||||
|
||||
ENTRY(__put_user_4)
|
||||
ENTER
|
||||
@@ -67,6 +70,7 @@ ENTRY(__put_user_4)
|
||||
xor %eax,%eax
|
||||
EXIT
|
||||
ENDPROC(__put_user_4)
|
||||
EXPORT_SYMBOL(__put_user_4)
|
||||
|
||||
ENTRY(__put_user_8)
|
||||
ENTER
|
||||
@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
|
||||
xor %eax,%eax
|
||||
EXIT
|
||||
ENDPROC(__put_user_8)
|
||||
EXPORT_SYMBOL(__put_user_8)
|
||||
|
||||
bad_put_user:
|
||||
movl $-EFAULT,%eax
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
char *strstr(const char *cs, const char *ct)
|
||||
{
|
||||
@@ -28,4 +29,4 @@ __asm__ __volatile__(
|
||||
: "dx", "di");
|
||||
return __res;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(strstr);
|
||||
|
Reference in New Issue
Block a user