Merge tag 'kvm-s390-master-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux

KVM: s390: Fixes for 4.19

- Fallout from the hugetlbfs support: pfmf interpretion and locking
- VSIE: fix keywrapping for nested guests
This commit is contained in:
Radim Krčmář
2018-09-07 18:30:47 +02:00
243 changed files with 2249 additions and 1653 deletions

View File

@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
return flags;
}
static inline void native_restore_fl(unsigned long flags)
extern inline void native_restore_fl(unsigned long flags);
extern inline void native_restore_fl(unsigned long flags)
{
asm volatile("push %0 ; popf"
: /* no output */

View File

@@ -2,6 +2,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H
#include <asm/atomic64_32.h>
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
/* xchg acts as a barrier before the setting of the high bits */
res.pte_low = xchg(&ptep->pte_low, 0);
res.pte_high = ptep->pte_high;
ptep->pte_high = 0;
res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
return res;
}

View File

@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
/* Address space bits used by the cache internally */
u8 x86_cache_bits;
unsigned initialized : 1;
} __randomize_layout;
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long long l1tf_pfn_limit(void)
{
return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}
extern void early_cpu_init(void);

View File

@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
#define __ARCH_HAS_SA_RESTORER
#include <asm/asm.h>
#include <uapi/asm/sigcontext.h>
#ifdef __i386__
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
static inline int __gen_sigismember(sigset_t *set, int _sig)
{
unsigned char ret;
asm("btl %2,%1\n\tsetc %0"
: "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
bool ret;
asm("btl %2,%1" CC_SET(c)
: CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
return ret;
}

View File

@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
return (unsigned long)frame;
}
void show_opcodes(u8 *rip, const char *loglvl);
void show_opcodes(struct pt_regs *regs, const char *loglvl);
void show_ip(struct pt_regs *regs, const char *loglvl);
#endif /* _ASM_X86_STACKTRACE_H */

View File

@@ -175,8 +175,16 @@ struct tlb_state {
* are on. This means that it may not match current->active_mm,
* which will contain the previous user mm when we're in lazy TLB
* mode even if we've already switched back to swapper_pg_dir.
*
* During switch_mm_irqs_off(), loaded_mm will be set to
* LOADED_MM_SWITCHING during the brief interrupts-off window
* when CR3 and loaded_mm would otherwise be inconsistent. This
* is for nmi_uaccess_okay()'s benefit.
*/
struct mm_struct *loaded_mm;
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
u16 loaded_mm_asid;
u16 next_asid;
/* last user mm's ctx id */
@@ -246,6 +254,38 @@ struct tlb_state {
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
* switching the loaded mm. It can also be dangerous if we
* interrupted some kernel code that was temporarily using a
* different mm.
*/
static inline bool nmi_uaccess_okay(void)
{
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
struct mm_struct *current_mm = current->mm;
VM_WARN_ON_ONCE(!loaded_mm);
/*
* The condition we want to check is
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
* is supposed to be reasonably fast.
*
* Instead, we check the almost equivalent but somewhat conservative
* condition below, and we rely on the fact that switch_mm_irqs_off()
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
*/
if (loaded_mm != current_mm)
return false;
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
return true;
}
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{

View File

@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
*
* If RDPID is available, use it.
*/
alternative_io ("lsl %[p],%[seg]",
alternative_io ("lsl %[seg],%[p]",
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
X86_FEATURE_RDPID,
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));