Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "ARM: - Move the arch-specific code into arch/arm64/kvm - Start the post-32bit cleanup - Cherry-pick a few non-invasive pre-NV patches x86: - Rework of TLB flushing - Rework of event injection, especially with respect to nested virtualization - Nested AMD event injection facelift, building on the rework of generic code and fixing a lot of corner cases - Nested AMD live migration support - Optimization for TSC deadline MSR writes and IPIs - Various cleanups - Asynchronous page fault cleanups (from tglx, common topic branch with tip tree) - Interrupt-based delivery of asynchronous "page ready" events (host side) - Hyper-V MSRs and hypercalls for guest debugging - VMX preemption timer fixes s390: - Cleanups Generic: - switch vCPU thread wakeup from swait to rcuwait The other architectures, and the guest side of the asynchronous page fault work, will come next week" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (256 commits) KVM: selftests: fix rdtsc() for vmx_tsc_adjust_test KVM: check userspace_addr for all memslots KVM: selftests: update hyperv_cpuid with SynDBG tests x86/kvm/hyper-v: Add support for synthetic debugger via hypercalls x86/kvm/hyper-v: enable hypercalls regardless of hypercall page x86/kvm/hyper-v: Add support for synthetic debugger interface x86/hyper-v: Add synthetic debugger definitions KVM: selftests: VMX preemption timer migration test KVM: nVMX: Fix VMX preemption timer migration x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit KVM: x86/pmu: Support full width counting KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in KVM: x86: announce KVM_FEATURE_ASYNC_PF_INT KVM: x86: acknowledgment mechanism for async pf page ready notifications KVM: x86: interrupt based APF 'page ready' event delivery KVM: introduce kvm_read_guest_offset_cached() KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present() KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously" KVM: VMX: Replace zero-length array with flexible-array ...
This commit is contained in:
@@ -228,8 +228,9 @@ repeat:
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
void rcuwait_wake_up(struct rcuwait *w)
|
||||
int rcuwait_wake_up(struct rcuwait *w)
|
||||
{
|
||||
int ret = 0;
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -237,7 +238,7 @@ void rcuwait_wake_up(struct rcuwait *w)
|
||||
/*
|
||||
* Order condition vs @task, such that everything prior to the load
|
||||
* of @task is visible. This is the condition as to why the user called
|
||||
* rcuwait_trywake() in the first place. Pairs with set_current_state()
|
||||
* rcuwait_wake() in the first place. Pairs with set_current_state()
|
||||
* barrier (A) in rcuwait_wait_event().
|
||||
*
|
||||
* WAIT WAKE
|
||||
@@ -249,8 +250,10 @@ void rcuwait_wake_up(struct rcuwait *w)
|
||||
|
||||
task = rcu_dereference(w->task);
|
||||
if (task)
|
||||
wake_up_process(task);
|
||||
ret = wake_up_process(task);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcuwait_wake_up);
|
||||
|
||||
|
@@ -3616,13 +3616,10 @@ mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
|
||||
/*
|
||||
* Hardirqs will be enabled:
|
||||
*/
|
||||
static void __trace_hardirqs_on_caller(unsigned long ip)
|
||||
static void __trace_hardirqs_on_caller(void)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
curr->hardirqs_enabled = 1;
|
||||
|
||||
/*
|
||||
* We are going to turn hardirqs on, so set the
|
||||
* usage bit for all held locks:
|
||||
@@ -3635,15 +3632,19 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
|
||||
* this bit from being set before)
|
||||
*/
|
||||
if (curr->softirqs_enabled)
|
||||
if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
|
||||
return;
|
||||
|
||||
curr->hardirq_enable_ip = ip;
|
||||
curr->hardirq_enable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
|
||||
}
|
||||
|
||||
void lockdep_hardirqs_on(unsigned long ip)
|
||||
/**
|
||||
* lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
|
||||
* @ip: Caller address
|
||||
*
|
||||
* Invoked before a possible transition to RCU idle from exit to user or
|
||||
* guest mode. This ensures that all RCU operations are done before RCU
|
||||
* stops watching. After the RCU transition lockdep_hardirqs_on() has to be
|
||||
* invoked to set the final state.
|
||||
*/
|
||||
void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
{
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
@@ -3679,20 +3680,62 @@ void lockdep_hardirqs_on(unsigned long ip)
|
||||
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
|
||||
return;
|
||||
|
||||
current->hardirq_chain_key = current->curr_chain_key;
|
||||
|
||||
current->lockdep_recursion++;
|
||||
__trace_hardirqs_on_caller(ip);
|
||||
__trace_hardirqs_on_caller();
|
||||
lockdep_recursion_finish();
|
||||
}
|
||||
NOKPROBE_SYMBOL(lockdep_hardirqs_on);
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
|
||||
|
||||
void noinstr lockdep_hardirqs_on(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks || curr->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (curr->hardirqs_enabled) {
|
||||
/*
|
||||
* Neither irq nor preemption are disabled here
|
||||
* so this is racy by nature but losing one hit
|
||||
* in a stat is not a big deal.
|
||||
*/
|
||||
__debug_atomic_inc(redundant_hardirqs_on);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're enabling irqs and according to our state above irqs weren't
|
||||
* already enabled, yet we find the hardware thinks they are in fact
|
||||
* enabled.. someone messed up their IRQ state tracing.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure the lock stack remained unchanged between
|
||||
* lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
|
||||
*/
|
||||
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
|
||||
current->curr_chain_key);
|
||||
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
curr->hardirqs_enabled = 1;
|
||||
curr->hardirq_enable_ip = ip;
|
||||
curr->hardirq_enable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
|
||||
|
||||
/*
|
||||
* Hardirqs were disabled:
|
||||
*/
|
||||
void lockdep_hardirqs_off(unsigned long ip)
|
||||
void noinstr lockdep_hardirqs_off(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
if (unlikely(!debug_locks || curr->lockdep_recursion))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -3710,10 +3753,11 @@ void lockdep_hardirqs_off(unsigned long ip)
|
||||
curr->hardirq_disable_ip = ip;
|
||||
curr->hardirq_disable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_off_events);
|
||||
} else
|
||||
} else {
|
||||
debug_atomic_inc(redundant_hardirqs_off);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(lockdep_hardirqs_off);
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
|
||||
|
||||
/*
|
||||
* Softirqs will be enabled:
|
||||
@@ -4389,8 +4433,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
static int match_held_lock(const struct held_lock *hlock,
|
||||
const struct lockdep_map *lock)
|
||||
static noinstr int match_held_lock(const struct held_lock *hlock,
|
||||
const struct lockdep_map *lock)
|
||||
{
|
||||
if (hlock->instance == lock)
|
||||
return 1;
|
||||
@@ -4677,7 +4721,7 @@ __lock_release(struct lockdep_map *lock, unsigned long ip)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static nokprobe_inline
|
||||
static __always_inline
|
||||
int __lock_is_held(const struct lockdep_map *lock, int read)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
@@ -4937,7 +4981,7 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lock_release);
|
||||
|
||||
int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
@@ -19,6 +19,24 @@
|
||||
/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
||||
static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
||||
|
||||
/*
|
||||
* Like trace_hardirqs_on() but without the lockdep invocation. This is
|
||||
* used in the low level entry code where the ordering vs. RCU is important
|
||||
* and lockdep uses a staged approach which splits the lockdep hardirq
|
||||
* tracking into a RCU on and a RCU off section.
|
||||
*/
|
||||
void trace_hardirqs_on_prepare(void)
|
||||
{
|
||||
if (this_cpu_read(tracing_irq_cpu)) {
|
||||
if (!in_nmi())
|
||||
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
|
||||
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
||||
this_cpu_write(tracing_irq_cpu, 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on_prepare);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
|
||||
|
||||
void trace_hardirqs_on(void)
|
||||
{
|
||||
if (this_cpu_read(tracing_irq_cpu)) {
|
||||
@@ -28,11 +46,31 @@ void trace_hardirqs_on(void)
|
||||
this_cpu_write(tracing_irq_cpu, 0);
|
||||
}
|
||||
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_on);
|
||||
|
||||
/*
|
||||
* Like trace_hardirqs_off() but without the lockdep invocation. This is
|
||||
* used in the low level entry code where the ordering vs. RCU is important
|
||||
* and lockdep uses a staged approach which splits the lockdep hardirq
|
||||
* tracking into a RCU on and a RCU off section.
|
||||
*/
|
||||
void trace_hardirqs_off_prepare(void)
|
||||
{
|
||||
if (!this_cpu_read(tracing_irq_cpu)) {
|
||||
this_cpu_write(tracing_irq_cpu, 1);
|
||||
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
||||
if (!in_nmi())
|
||||
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
|
||||
}
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_off_prepare);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_off_prepare);
|
||||
|
||||
void trace_hardirqs_off(void)
|
||||
{
|
||||
if (!this_cpu_read(tracing_irq_cpu)) {
|
||||
@@ -56,6 +94,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
||||
this_cpu_write(tracing_irq_cpu, 0);
|
||||
}
|
||||
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
||||
|
Reference in New Issue
Block a user