
This unifies the vdso mapping code and teaches it how to map special pages at addresses corresponding to symbols in the vdso image. The new code is used for all vdso variants, but so far only the 32-bit variants use the new vvar page position. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/b6d7858ad7b5ac3fd3c29cab6d6d769bc45d195e.1399317206.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
51 lines
1.2 KiB
C
51 lines
1.2 KiB
C
#ifndef _ASM_X86_VDSO_H
|
|
#define _ASM_X86_VDSO_H
|
|
|
|
#include <asm/page_types.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
struct vdso_image {
|
|
void *data;
|
|
unsigned long size; /* Always a multiple of PAGE_SIZE */
|
|
struct page **pages; /* Big enough for data/size page pointers */
|
|
|
|
unsigned long alt, alt_len;
|
|
|
|
unsigned long sym_end_mapping; /* Total size of the mapping */
|
|
|
|
unsigned long sym_vvar_page;
|
|
unsigned long sym_hpet_page;
|
|
unsigned long sym_VDSO32_NOTE_MASK;
|
|
unsigned long sym___kernel_sigreturn;
|
|
unsigned long sym___kernel_rt_sigreturn;
|
|
unsigned long sym___kernel_vsyscall;
|
|
unsigned long sym_VDSO32_SYSENTER_RETURN;
|
|
};
|
|
|
|
#ifdef CONFIG_X86_64
|
|
extern const struct vdso_image vdso_image_64;
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_X32
|
|
extern const struct vdso_image vdso_image_x32;
|
|
#endif
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
|
|
extern const struct vdso_image vdso_image_32_int80;
|
|
#ifdef CONFIG_COMPAT
|
|
extern const struct vdso_image vdso_image_32_syscall;
|
|
#endif
|
|
extern const struct vdso_image vdso_image_32_sysenter;
|
|
|
|
extern const struct vdso_image *selected_vdso32;
|
|
#endif
|
|
|
|
extern void __init init_vdso_image(const struct vdso_image *image);
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
|
|
#endif /* _ASM_X86_VDSO_H */
|