Merge 5.10.93 into android12-5.10-lts

Changes in 5.10.93
	kbuild: Add $(KBUILD_HOSTLDFLAGS) to 'has_libelf' test
	devtmpfs regression fix: reconfigure on each mount
	orangefs: Fix the size of a memory allocation in orangefs_bufmap_alloc()
	remoteproc: qcom: pil_info: Don't memcpy_toio more than is provided
	vfs: fs_context: fix up param length parsing in legacy_parse_param
	perf: Protect perf_guest_cbs with RCU
	KVM: x86: Register Processor Trace interrupt hook iff PT enabled in guest
	KVM: s390: Clarify SIGP orders versus STOP/RESTART
	9p: only copy valid iattrs in 9P2000.L setattr implementation
	video: vga16fb: Only probe for EGA and VGA 16 color graphic cards
	media: uvcvideo: fix division by zero at stream start
	rtlwifi: rtl8192cu: Fix WARNING when calling local_irq_restore() with interrupts enabled
	firmware: qemu_fw_cfg: fix sysfs information leak
	firmware: qemu_fw_cfg: fix NULL-pointer deref on duplicate entries
	firmware: qemu_fw_cfg: fix kobject leak in probe error path
	KVM: x86: remove PMU FIXED_CTR3 from msrs_to_save_all
	ALSA: hda/realtek: Add speaker fixup for some Yoga 15ITL5 devices
	ALSA: hda/realtek - Fix silent output on Gigabyte X570 Aorus Master after reboot from Windows
	ALSA: hda: ALC287: Add Lenovo IdeaPad Slim 9i 14ITL5 speaker quirk
	ALSA: hda/realtek: Add quirk for Legion Y9000X 2020
	ALSA: hda/realtek: Re-order quirk entries for Lenovo
	powerpc/pseries: Get entry and uaccess flush required bits from H_GET_CPU_CHARACTERISTICS
	mtd: fixup CFI on ixp4xx
	Linux 5.10.93

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I6913f176d30f4c258f45327bd9bcb50deefcea98
This commit is contained in:
Greg Kroah-Hartman
2022-01-20 09:34:45 +01:00
33 changed files with 267 additions and 75 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 92 SUBLEVEL = 93
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail,
void void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct frame_tail __user *tail; struct frame_tail __user *tail;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */ /* We don't support guest os callchain now */
return; return;
} }
@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
void void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stackframe fr; struct stackframe fr;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */ /* We don't support guest os callchain now */
return; return;
} }
@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
unsigned long perf_instruction_pointer(struct pt_regs *regs) unsigned long perf_instruction_pointer(struct pt_regs *regs)
{ {
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
return perf_guest_cbs->get_guest_ip();
if (guest_cbs && guest_cbs->is_in_guest())
return guest_cbs->get_guest_ip();
return instruction_pointer(regs); return instruction_pointer(regs);
} }
unsigned long perf_misc_flags(struct pt_regs *regs) unsigned long perf_misc_flags(struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
int misc = 0; int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode()) if (guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER; misc |= PERF_RECORD_MISC_GUEST_USER;
else else
misc |= PERF_RECORD_MISC_GUEST_KERNEL; misc |= PERF_RECORD_MISC_GUEST_KERNEL;

View File

@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
if (guest_cbs && guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */ /* We don't support guest os callchain now */
return; return;
} }
@@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc)
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stackframe frame; struct stackframe frame;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */ /* We don't support guest os callchain now */
return; return;
} }
@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
unsigned long perf_instruction_pointer(struct pt_regs *regs) unsigned long perf_instruction_pointer(struct pt_regs *regs)
{ {
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
return perf_guest_cbs->get_guest_ip();
if (guest_cbs && guest_cbs->is_in_guest())
return guest_cbs->get_guest_ip();
return instruction_pointer(regs); return instruction_pointer(regs);
} }
unsigned long perf_misc_flags(struct pt_regs *regs) unsigned long perf_misc_flags(struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
int misc = 0; int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode()) if (guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER; misc |= PERF_RECORD_MISC_GUEST_USER;
else else
misc |= PERF_RECORD_MISC_GUEST_KERNEL; misc |= PERF_RECORD_MISC_GUEST_KERNEL;

View File

@@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
unsigned long fp = 0; unsigned long fp = 0;
/* C-SKY does not support virtualization. */ /* C-SKY does not support virtualization. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) if (guest_cbs && guest_cbs->is_in_guest())
return; return;
fp = regs->regs[4]; fp = regs->regs[4];
@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stackframe fr; struct stackframe fr;
/* C-SKY does not support virtualization. */ /* C-SKY does not support virtualization. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
pr_warn("C-SKY does not support perf in guest mode!"); pr_warn("C-SKY does not support perf in guest mode!");
return; return;
} }

View File

@@ -1363,6 +1363,7 @@ void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
unsigned long fp = 0; unsigned long fp = 0;
unsigned long gp = 0; unsigned long gp = 0;
unsigned long lp = 0; unsigned long lp = 0;
@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
leaf_fp = 0; leaf_fp = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */ /* We don't support guest os callchain now */
return; return;
} }
@@ -1479,9 +1480,10 @@ void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stackframe fr; struct stackframe fr;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */ /* We don't support guest os callchain now */
return; return;
} }
@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
unsigned long perf_instruction_pointer(struct pt_regs *regs) unsigned long perf_instruction_pointer(struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
/* However, NDS32 does not support virtualization */ /* However, NDS32 does not support virtualization */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) if (guest_cbs && guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip(); return guest_cbs->get_guest_ip();
return instruction_pointer(regs); return instruction_pointer(regs);
} }
unsigned long perf_misc_flags(struct pt_regs *regs) unsigned long perf_misc_flags(struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
int misc = 0; int misc = 0;
/* However, NDS32 does not support virtualization */ /* However, NDS32 does not support virtualization */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode()) if (guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER; misc |= PERF_RECORD_MISC_GUEST_USER;
else else
misc |= PERF_RECORD_MISC_GUEST_KERNEL; misc |= PERF_RECORD_MISC_GUEST_KERNEL;

View File

@@ -382,6 +382,8 @@
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 #define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6 #define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
/* Flag values used in H_REGISTER_PROC_TBL hcall */ /* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18 #define PROC_TABLE_OP_MASK 0x18

View File

@@ -538,6 +538,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
} }

View File

@@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
unsigned long fp = 0; unsigned long fp = 0;
/* RISC-V does not support perf in guest mode. */ /* RISC-V does not support perf in guest mode. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) if (guest_cbs && guest_cbs->is_in_guest())
return; return;
fp = regs->s0; fp = regs->s0;
@@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task,
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
/* RISC-V does not support perf in guest mode. */ /* RISC-V does not support perf in guest mode. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
pr_warn("RISC-V does not support perf in guest mode!"); pr_warn("RISC-V does not support perf in guest mode!");
return; return;
} }

View File

@@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
} }
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
}
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;

View File

@@ -4588,10 +4588,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
} }
} }
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ /*
* Set the VCPU to STOPPED and THEN clear the interrupt flag,
* now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
* have been fully processed. This will ensure that the VCPU
* is kept BUSY if another VCPU is inquiring with SIGP SENSE.
*/
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
kvm_s390_clear_stop_irq(vcpu); kvm_s390_clear_stop_irq(vcpu);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {

View File

@@ -418,6 +418,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm);
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops; extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
void __user *buf, int len); void __user *buf, int len);

View File

@@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
/*
* SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
* are processed asynchronously. Until the affected VCPU finishes
* its work and calls back into KVM to clear the (RESTART or STOP)
* interrupt, we need to return any new non-reset orders "busy".
*
* This is important because a single VCPU could issue:
* 1) SIGP STOP $DESTINATION
* 2) SIGP SENSE $DESTINATION
*
* If the SIGP SENSE would not be rejected as "busy", it could
* return an incorrect answer as to whether the VCPU is STOPPED
* or OPERATING.
*/
if (order_code != SIGP_INITIAL_CPU_RESET &&
order_code != SIGP_CPU_RESET) {
/*
* Lockless check. Both SIGP STOP and SIGP (RE)START
* properly synchronize everything while processing
* their orders, while the guest cannot observe a
* difference when issuing other orders from two
* different VCPUs.
*/
if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
kvm_s390_is_restart_irq_pending(dst_vcpu))
return SIGP_CC_BUSY;
}
switch (order_code) { switch (order_code) {
case SIGP_SENSE: case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++; vcpu->stat.instruction_sigp_sense++;

View File

@@ -2545,10 +2545,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
void void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct unwind_state state; struct unwind_state state;
unsigned long addr; unsigned long addr;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */ /* TODO: We don't support guest os callchain now */
return; return;
} }
@@ -2648,10 +2649,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
void void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stack_frame frame; struct stack_frame frame;
const struct stack_frame __user *fp; const struct stack_frame __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */ /* TODO: We don't support guest os callchain now */
return; return;
} }
@@ -2728,18 +2730,21 @@ static unsigned long code_segment_base(struct pt_regs *regs)
unsigned long perf_instruction_pointer(struct pt_regs *regs) unsigned long perf_instruction_pointer(struct pt_regs *regs)
{ {
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
return perf_guest_cbs->get_guest_ip();
if (guest_cbs && guest_cbs->is_in_guest())
return guest_cbs->get_guest_ip();
return regs->ip + code_segment_base(regs); return regs->ip + code_segment_base(regs);
} }
unsigned long perf_misc_flags(struct pt_regs *regs) unsigned long perf_misc_flags(struct pt_regs *regs)
{ {
struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
int misc = 0; int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (guest_cbs && guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode()) if (guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER; misc |= PERF_RECORD_MISC_GUEST_USER;
else else
misc |= PERF_RECORD_MISC_GUEST_KERNEL; misc |= PERF_RECORD_MISC_GUEST_KERNEL;

View File

@@ -2586,6 +2586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
{ {
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_info_callbacks *guest_cbs;
int bit; int bit;
int handled = 0; int handled = 0;
@@ -2651,9 +2652,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/ */
if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
handled++; handled++;
if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
perf_guest_cbs->handle_intel_pt_intr)) guest_cbs = perf_get_guest_cbs();
perf_guest_cbs->handle_intel_pt_intr(); if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
guest_cbs->handle_intel_pt_intr))
guest_cbs->handle_intel_pt_intr();
else else
intel_pt_interrupt(); intel_pt_interrupt();
} }

View File

@@ -1306,6 +1306,7 @@ struct kvm_x86_init_ops {
int (*disabled_by_bios)(void); int (*disabled_by_bios)(void);
int (*check_processor_compatibility)(void); int (*check_processor_compatibility)(void);
int (*hardware_setup)(void); int (*hardware_setup)(void);
bool (*intel_pt_intr_in_guest)(void);
struct kvm_x86_ops *runtime_ops; struct kvm_x86_ops *runtime_ops;
}; };

View File

@@ -7915,6 +7915,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
.disabled_by_bios = vmx_disabled_by_bios, .disabled_by_bios = vmx_disabled_by_bios,
.check_processor_compatibility = vmx_check_processor_compat, .check_processor_compatibility = vmx_check_processor_compat,
.hardware_setup = hardware_setup, .hardware_setup = hardware_setup,
.intel_pt_intr_in_guest = vmx_pt_mode_is_host_guest,
.runtime_ops = &vmx_x86_ops, .runtime_ops = &vmx_x86_ops,
}; };

View File

@@ -1229,7 +1229,7 @@ static const u32 msrs_to_save_all[] = {
MSR_IA32_UMWAIT_CONTROL, MSR_IA32_UMWAIT_CONTROL,
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
@@ -7882,7 +7882,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
.is_in_guest = kvm_is_in_guest, .is_in_guest = kvm_is_in_guest,
.is_user_mode = kvm_is_user_mode, .is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip, .get_guest_ip = kvm_get_guest_ip,
.handle_intel_pt_intr = kvm_handle_intel_pt_intr, .handle_intel_pt_intr = NULL,
}; };
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
@@ -8005,6 +8005,8 @@ int kvm_arch_init(void *opaque)
PT_PRESENT_MASK, 0, sme_me_mask); PT_PRESENT_MASK, 0, sme_me_mask);
kvm_timer_init(); kvm_timer_init();
if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
perf_register_guest_info_callbacks(&kvm_guest_cbs); perf_register_guest_info_callbacks(&kvm_guest_cbs);
if (boot_cpu_has(X86_FEATURE_XSAVE)) { if (boot_cpu_has(X86_FEATURE_XSAVE)) {
@@ -8042,6 +8044,7 @@ void kvm_arch_exit(void)
#endif #endif
kvm_lapic_exit(); kvm_lapic_exit();
perf_unregister_guest_info_callbacks(&kvm_guest_cbs); perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
kvm_guest_cbs.handle_intel_pt_intr = NULL;
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,

View File

@@ -59,8 +59,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla
const char *dev_name, void *data) const char *dev_name, void *data)
{ {
struct super_block *s = mnt->mnt_sb; struct super_block *s = mnt->mnt_sb;
int err;
atomic_inc(&s->s_active); atomic_inc(&s->s_active);
down_write(&s->s_umount); down_write(&s->s_umount);
err = reconfigure_single(s, flags, data);
if (err < 0) {
deactivate_locked_super(s);
return ERR_PTR(err);
}
return dget(s->s_root); return dget(s->s_root);
} }

View File

@@ -388,9 +388,7 @@ static void fw_cfg_sysfs_cache_cleanup(void)
struct fw_cfg_sysfs_entry *entry, *next; struct fw_cfg_sysfs_entry *entry, *next;
list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
/* will end up invoking fw_cfg_sysfs_cache_delist() fw_cfg_sysfs_cache_delist(entry);
* via each object's release() method (i.e. destructor)
*/
kobject_put(&entry->kobj); kobject_put(&entry->kobj);
} }
} }
@@ -448,7 +446,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
{ {
struct fw_cfg_sysfs_entry *entry = to_entry(kobj); struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
fw_cfg_sysfs_cache_delist(entry);
kfree(entry); kfree(entry);
} }
@@ -601,20 +598,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* set file entry information */ /* set file entry information */
entry->size = be32_to_cpu(f->size); entry->size = be32_to_cpu(f->size);
entry->select = be16_to_cpu(f->select); entry->select = be16_to_cpu(f->select);
memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select); fw_cfg_sel_ko, "%d", entry->select);
if (err) { if (err)
kobject_put(&entry->kobj); goto err_put_entry;
return err;
}
/* add raw binary content access */ /* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
if (err) if (err)
goto err_add_raw; goto err_del_entry;
/* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
@@ -623,9 +618,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
fw_cfg_sysfs_cache_enlist(entry); fw_cfg_sysfs_cache_enlist(entry);
return 0; return 0;
err_add_raw: err_del_entry:
kobject_del(&entry->kobj); kobject_del(&entry->kobj);
kfree(entry); err_put_entry:
kobject_put(&entry->kobj);
return err; return err;
} }

View File

@@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
if (ep == NULL) if (ep == NULL)
return -EIO; return -EIO;
/* Reject broken descriptors. */
if (usb_endpoint_maxp(&ep->desc) == 0)
return -EIO;
ret = uvc_init_video_bulk(stream, ep, gfp_flags); ret = uvc_init_video_bulk(stream, ep, gfp_flags);
} }

View File

@@ -55,12 +55,14 @@ choice
LITTLE_ENDIAN_BYTE, if the bytes are reversed. LITTLE_ENDIAN_BYTE, if the bytes are reversed.
config MTD_CFI_NOSWAP config MTD_CFI_NOSWAP
depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN
bool "NO" bool "NO"
config MTD_CFI_BE_BYTE_SWAP config MTD_CFI_BE_BYTE_SWAP
bool "BIG_ENDIAN_BYTE" bool "BIG_ENDIAN_BYTE"
config MTD_CFI_LE_BYTE_SWAP config MTD_CFI_LE_BYTE_SWAP
depends on !ARCH_IXP4XX
bool "LITTLE_ENDIAN_BYTE" bool "LITTLE_ENDIAN_BYTE"
endchoice endchoice

View File

@@ -325,7 +325,7 @@ config MTD_DC21285
config MTD_IXP4XX config MTD_IXP4XX
tristate "CFI Flash device mapped on Intel IXP4xx based systems" tristate "CFI Flash device mapped on Intel IXP4xx based systems"
depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS
help help
This enables MTD access to flash devices on platforms based This enables MTD access to flash devices on platforms based
on Intel's IXP4xx family of network processors such as the on Intel's IXP4xx family of network processors such as the

View File

@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
_initpabias(hw); _initpabias(hw);
rtl92c_dm_init(hw); rtl92c_dm_init(hw);
exit: exit:
local_irq_disable();
local_irq_restore(flags); local_irq_restore(flags);
return err; return err;
} }

View File

@@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
return -ENOMEM; return -ENOMEM;
found_unused: found_unused:
memcpy_toio(entry, image, PIL_RELOC_NAME_LEN); memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
found_existing: found_existing:
/* Use two writel() as base is only aligned to 4 bytes on odd entries */ /* Use two writel() as base is only aligned to 4 bytes on odd entries */
writel(base, entry + PIL_RELOC_NAME_LEN); writel(base, entry + PIL_RELOC_NAME_LEN);

View File

@@ -184,6 +184,25 @@ static inline void setindex(int index)
vga_io_w(VGA_GFX_I, index); vga_io_w(VGA_GFX_I, index);
} }
/* Check if the video mode is supported by the driver */
static inline int check_mode_supported(void)
{
/* non-x86 architectures treat orig_video_isVGA as a boolean flag */
#if defined(CONFIG_X86)
/* only EGA and VGA in 16 color graphic mode are supported */
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
return -ENODEV;
if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */
return -ENODEV;
#endif
return 0;
}
static void vga16fb_pan_var(struct fb_info *info, static void vga16fb_pan_var(struct fb_info *info,
struct fb_var_screeninfo *var) struct fb_var_screeninfo *var)
{ {
@@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void)
vga16fb_setup(option); vga16fb_setup(option);
#endif #endif
ret = check_mode_supported();
if (ret)
return ret;
ret = platform_driver_register(&vga16fb_driver); ret = platform_driver_register(&vga16fb_driver);
if (!ret) { if (!ret) {

View File

@@ -541,7 +541,10 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
{ {
int retval; int retval;
struct p9_fid *fid = NULL; struct p9_fid *fid = NULL;
struct p9_iattr_dotl p9attr; struct p9_iattr_dotl p9attr = {
.uid = INVALID_UID,
.gid = INVALID_GID,
};
struct inode *inode = d_inode(dentry); struct inode *inode = d_inode(dentry);
p9_debug(P9_DEBUG_VFS, "\n"); p9_debug(P9_DEBUG_VFS, "\n");
@@ -551,14 +554,22 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
return retval; return retval;
p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
p9attr.mode = iattr->ia_mode; if (iattr->ia_valid & ATTR_MODE)
p9attr.uid = iattr->ia_uid; p9attr.mode = iattr->ia_mode;
p9attr.gid = iattr->ia_gid; if (iattr->ia_valid & ATTR_UID)
p9attr.size = iattr->ia_size; p9attr.uid = iattr->ia_uid;
p9attr.atime_sec = iattr->ia_atime.tv_sec; if (iattr->ia_valid & ATTR_GID)
p9attr.atime_nsec = iattr->ia_atime.tv_nsec; p9attr.gid = iattr->ia_gid;
p9attr.mtime_sec = iattr->ia_mtime.tv_sec; if (iattr->ia_valid & ATTR_SIZE)
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; p9attr.size = iattr->ia_size;
if (iattr->ia_valid & ATTR_ATIME_SET) {
p9attr.atime_sec = iattr->ia_atime.tv_sec;
p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
}
if (iattr->ia_valid & ATTR_MTIME_SET) {
p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
}
if (iattr->ia_valid & ATTR_FILE) { if (iattr->ia_valid & ATTR_FILE) {
fid = iattr->ia_file->private_data; fid = iattr->ia_file->private_data;

View File

@@ -530,7 +530,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
param->key); param->key);
} }
if (len > PAGE_SIZE - 2 - size) if (size + len + 2 > PAGE_SIZE)
return invalf(fc, "VFS: Legacy: Cumulative options too large"); return invalf(fc, "VFS: Legacy: Cumulative options too large");
if (strchr(param->key, ',') || if (strchr(param->key, ',') ||
(param->type == fs_value_is_string && (param->type == fs_value_is_string &&

View File

@@ -176,7 +176,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
{ {
kfree(bufmap->page_array); kfree(bufmap->page_array);
kfree(bufmap->desc_array); kfree(bufmap->desc_array);
kfree(bufmap->buffer_index_array); bitmap_free(bufmap->buffer_index_array);
kfree(bufmap); kfree(bufmap);
} }
@@ -226,8 +226,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
bufmap->desc_size = user_desc->size; bufmap->desc_size = user_desc->size;
bufmap->desc_shift = ilog2(bufmap->desc_size); bufmap->desc_shift = ilog2(bufmap->desc_size);
bufmap->buffer_index_array = bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
if (!bufmap->buffer_index_array) if (!bufmap->buffer_index_array)
goto out_free_bufmap; goto out_free_bufmap;
@@ -250,7 +249,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
out_free_desc_array: out_free_desc_array:
kfree(bufmap->desc_array); kfree(bufmap->desc_array);
out_free_index_array: out_free_index_array:
kfree(bufmap->buffer_index_array); bitmap_free(bufmap->buffer_index_array);
out_free_bufmap: out_free_bufmap:
kfree(bufmap); kfree(bufmap);
out: out:

View File

@@ -1472,8 +1472,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
} }
EXPORT_SYMBOL(mount_nodev); EXPORT_SYMBOL(mount_nodev);
static int reconfigure_single(struct super_block *s, int reconfigure_single(struct super_block *s,
int flags, void *data) int flags, void *data)
{ {
struct fs_context *fc; struct fs_context *fc;
int ret; int ret;

View File

@@ -140,6 +140,8 @@ extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc); extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc);
extern void fc_drop_locked(struct fs_context *fc); extern void fc_drop_locked(struct fs_context *fc);
int reconfigure_single(struct super_block *s,
int flags, void *data);
/* /*
* sget() wrappers to be called from the ->get_tree() op. * sget() wrappers to be called from the ->get_tree() op.

View File

@@ -1235,7 +1235,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type, enum perf_bpf_event_type type,
u16 flags); u16 flags);
extern struct perf_guest_info_callbacks *perf_guest_cbs; extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
{
/*
* Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
* the callbacks between a !NULL check and dereferences, to ensure
* pending stores/changes to the callback pointers are visible before a
* non-NULL perf_guest_cbs is visible to readers, and to prevent a
* module from unloading callbacks while readers are active.
*/
return rcu_dereference(perf_guest_cbs);
}
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);

View File

@@ -6396,18 +6396,25 @@ static void perf_pending_event(struct irq_work *entry)
* Later on, we might change it to a list if there is * Later on, we might change it to a list if there is
* another virtualization implementation supporting the callbacks. * another virtualization implementation supporting the callbacks.
*/ */
struct perf_guest_info_callbacks *perf_guest_cbs; struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{ {
perf_guest_cbs = cbs; if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
return -EBUSY;
rcu_assign_pointer(perf_guest_cbs, cbs);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{ {
perf_guest_cbs = NULL; if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
return -EINVAL;
rcu_assign_pointer(perf_guest_cbs, NULL);
synchronize_rcu();
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);

View File

@@ -1936,6 +1936,7 @@ enum {
ALC887_FIXUP_ASUS_BASS, ALC887_FIXUP_ASUS_BASS,
ALC887_FIXUP_BASS_CHMAP, ALC887_FIXUP_BASS_CHMAP,
ALC1220_FIXUP_GB_DUAL_CODECS, ALC1220_FIXUP_GB_DUAL_CODECS,
ALC1220_FIXUP_GB_X570,
ALC1220_FIXUP_CLEVO_P950, ALC1220_FIXUP_CLEVO_P950,
ALC1220_FIXUP_CLEVO_PB51ED, ALC1220_FIXUP_CLEVO_PB51ED,
ALC1220_FIXUP_CLEVO_PB51ED_PINS, ALC1220_FIXUP_CLEVO_PB51ED_PINS,
@@ -2125,6 +2126,29 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec,
} }
} }
static void alc1220_fixup_gb_x570(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
{
static const hda_nid_t conn1[] = { 0x0c };
static const struct coef_fw gb_x570_coefs[] = {
WRITE_COEF(0x1a, 0x01c1),
WRITE_COEF(0x1b, 0x0202),
WRITE_COEF(0x43, 0x3005),
{}
};
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1);
snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1);
break;
case HDA_FIXUP_ACT_INIT:
alc_process_coef_fw(codec, gb_x570_coefs);
break;
}
}
static void alc1220_fixup_clevo_p950(struct hda_codec *codec, static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
const struct hda_fixup *fix, const struct hda_fixup *fix,
int action) int action)
@@ -2427,6 +2451,10 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC, .type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_gb_dual_codecs, .v.func = alc1220_fixup_gb_dual_codecs,
}, },
[ALC1220_FIXUP_GB_X570] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_gb_x570,
},
[ALC1220_FIXUP_CLEVO_P950] = { [ALC1220_FIXUP_CLEVO_P950] = {
.type = HDA_FIXUP_FUNC, .type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_clevo_p950, .v.func = alc1220_fixup_clevo_p950,
@@ -2529,7 +2557,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
@@ -6729,6 +6757,8 @@ enum {
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
ALC233_FIXUP_NO_AUDIO_JACK, ALC233_FIXUP_NO_AUDIO_JACK,
ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME,
ALC285_FIXUP_LEGION_Y9000X_SPEAKERS,
ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
}; };
static const struct hda_fixup alc269_fixups[] = { static const struct hda_fixup alc269_fixups[] = {
@@ -8319,6 +8349,18 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true, .chained = true,
.chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
}, },
[ALC285_FIXUP_LEGION_Y9000X_SPEAKERS] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_ideapad_s740_coef,
.chained = true,
.chain_id = ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
},
[ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc287_fixup_legion_15imhg05_speakers,
.chained = true,
.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
},
[ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = { [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
.type = HDA_FIXUP_VERBS, .type = HDA_FIXUP_VERBS,
//.v.verbs = legion_15imhg05_coefs, //.v.verbs = legion_15imhg05_coefs,
@@ -8857,13 +8899,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),