Merge 5.10.96 into android12-5.10-lts

Changes in 5.10.96
	Bluetooth: refactor malicious adv data check
	media: venus: core: Drop second v4l2 device unregister
	net: sfp: ignore disabled SFP node
	net: stmmac: skip only stmmac_ptp_register when resume from suspend
	s390/module: fix loading modules with a lot of relocations
	s390/hypfs: include z/VM guests with access control group set
	bpf: Guard against accessing NULL pt_regs in bpf_get_task_stack()
	scsi: zfcp: Fix failed recovery on gone remote port with non-NPIV FCP devices
	udf: Restore i_lenAlloc when inode expansion fails
	udf: Fix NULL ptr deref when converting from inline format
	efi: runtime: avoid EFIv2 runtime services on Apple x86 machines
	PM: wakeup: simplify the output logic of pm_show_wakelocks()
	tracing/histogram: Fix a potential memory leak for kstrdup()
	tracing: Don't inc err_log entry count if entry allocation fails
	ceph: properly put ceph_string reference after async create attempt
	ceph: set pool_ns in new inode layout for async creates
	fsnotify: fix fsnotify hooks in pseudo filesystems
	Revert "KVM: SVM: avoid infinite loop on NPF from bad address"
	perf/x86/intel/uncore: Fix CAS_COUNT_WRITE issue for ICX
	drm/etnaviv: relax submit size limits
	KVM: x86: Update vCPU's runtime CPUID on write to MSR_IA32_XSS
	arm64: errata: Fix exec handling in erratum 1418040 workaround
	netfilter: nft_payload: do not update layer 4 checksum when mangling fragments
	serial: 8250: of: Fix mapped region size when using reg-offset property
	serial: stm32: fix software flow control transfer
	tty: n_gsm: fix SW flow control encoding/handling
	tty: Add support for Brainboxes UC cards.
	usb-storage: Add unusual-devs entry for VL817 USB-SATA bridge
	usb: xhci-plat: fix crash when suspend if remote wake enable
	usb: common: ulpi: Fix crash in ulpi_match()
	usb: gadget: f_sourcesink: Fix isoc transfer for USB_SPEED_SUPER_PLUS
	USB: core: Fix hang in usb_kill_urb by adding memory barriers
	usb: typec: tcpm: Do not disconnect while receiving VBUS off
	ucsi_ccg: Check DEV_INT bit only when starting CCG4
	jbd2: export jbd2_journal_[grab|put]_journal_head
	ocfs2: fix a deadlock when commit trans
	sched/membarrier: Fix membarrier-rseq fence command missing from query bitmask
	x86/MCE/AMD: Allow thresholding interface updates after init
	powerpc/32s: Allocate one 256k IBAT instead of two consecutives 128k IBATs
	powerpc/32s: Fix kasan_init_region() for KASAN
	powerpc/32: Fix boot failure with GCC latent entropy plugin
	i40e: Increase delay to 1 s after global EMP reset
	i40e: Fix issue when maximum queues is exceeded
	i40e: Fix queues reservation for XDP
	i40e: Fix for failed to init adminq while VF reset
	i40e: fix unsigned stat widths
	usb: roles: fix include/linux/usb/role.h compile issue
	rpmsg: char: Fix race between the release of rpmsg_ctrldev and cdev
	rpmsg: char: Fix race between the release of rpmsg_eptdev and cdev
	scsi: bnx2fc: Flush destroy_work queue before calling bnx2fc_interface_put()
	ipv6_tunnel: Rate limit warning messages
	net: fix information leakage in /proc/net/ptype
	hwmon: (lm90) Mark alert as broken for MAX6646/6647/6649
	hwmon: (lm90) Mark alert as broken for MAX6680
	ping: fix the sk_bound_dev_if match in ping_lookup
	ipv4: avoid using shared IP generator for connected sockets
	hwmon: (lm90) Reduce maximum conversion rate for G781
	NFSv4: Handle case where the lookup of a directory fails
	NFSv4: nfs_atomic_open() can race when looking up a non-regular file
	net-procfs: show net devices bound packet types
	drm/msm: Fix wrong size calculation
	drm/msm/dsi: Fix missing put_device() call in dsi_get_phy
	drm/msm/dsi: invalid parameter check in msm_dsi_phy_enable
	ipv6: annotate accesses to fn->fn_sernum
	NFS: Ensure the server has an up to date ctime before hardlinking
	NFS: Ensure the server has an up to date ctime before renaming
	powerpc64/bpf: Limit 'ldbrx' to processors compliant with ISA v2.06
	netfilter: conntrack: don't increment invalid counter on NF_REPEAT
	kernel: delete repeated words in comments
	perf: Fix perf_event_read_local() time
	sched/pelt: Relax the sync of util_sum with util_avg
	net: phy: broadcom: hook up soft_reset for BCM54616S
	phylib: fix potential use-after-free
	octeontx2-pf: Forward error codes to VF
	rxrpc: Adjust retransmission backoff
	efi/libstub: arm64: Fix image check alignment at entry
	hwmon: (lm90) Mark alert as broken for MAX6654
	powerpc/perf: Fix power_pmu_disable to call clear_pmi_irq_pending only if PMI is pending
	net: ipv4: Move ip_options_fragment() out of loop
	net: ipv4: Fix the warning for dereference
	ipv4: fix ip option filtering for locally generated fragments
	ibmvnic: init ->running_cap_crqs early
	ibmvnic: don't spin in tasklet
	video: hyperv_fb: Fix validation of screen resolution
	drm/msm/hdmi: Fix missing put_device() call in msm_hdmi_get_phy
	drm/msm/dpu: invalid parameter check in dpu_setup_dspp_pcc
	yam: fix a memory leak in yam_siocdevprivate()
	net: cpsw: Properly initialise struct page_pool_params
	net: hns3: handle empty unknown interrupt for VF
	Revert "ipv6: Honor all IPv6 PIO Valid Lifetime values"
	net: bridge: vlan: fix single net device option dumping
	ipv4: raw: lock the socket in raw_bind()
	ipv4: tcp: send zero IPID in SYNACK messages
	ipv4: remove sparse error in ip_neigh_gw4()
	net: bridge: vlan: fix memory leak in __allowed_ingress
	dt-bindings: can: tcan4x5x: fix mram-cfg RX FIFO config
	usr/include/Makefile: add linux/nfc.h to the compile-test coverage
	fsnotify: invalidate dcache before IN_DELETE event
	block: Fix wrong offset in bio_truncate()
	mtd: rawnand: mpc5121: Remove unused variable in ads5121_select_chip()
	Linux 5.10.96

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie34be06fa082557e93eda246f1a9ebf9f155a138
This commit is contained in:
Greg Kroah-Hartman
2022-02-07 11:17:58 +01:00
108 changed files with 1012 additions and 528 deletions

View File

@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
#address-cells = <1>;
#size-cells = <1>;
spi-max-frequency = <10000000>;
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
interrupt-parent = <&gpio1>;
interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 95
SUBLEVEL = 96
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -506,34 +506,26 @@ static void entry_task_switch(struct task_struct *next)
/*
* ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
* Assuming the virtual counter is enabled at the beginning of times:
*
* - disable access when switching from a 64bit task to a 32bit task
* - enable access when switching from a 32bit task to a 64bit task
* Ensure access is disabled when switching to a 32bit task, ensure
* access is enabled when switching to a 64bit task.
*/
static void erratum_1418040_thread_switch(struct task_struct *prev,
struct task_struct *next)
static void erratum_1418040_thread_switch(struct task_struct *next)
{
bool prev32, next32;
u64 val;
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
!this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return;
prev32 = is_compat_thread(task_thread_info(prev));
next32 = is_compat_thread(task_thread_info(next));
if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return;
val = read_sysreg(cntkctl_el1);
if (!next32)
val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
if (is_compat_thread(task_thread_info(next)))
sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
else
val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
}
write_sysreg(val, cntkctl_el1);
static void erratum_1418040_new_exec(void)
{
preempt_disable();
erratum_1418040_thread_switch(current);
preempt_enable();
}
/*
@@ -569,7 +561,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
erratum_1418040_thread_switch(next);
ptrauth_thread_switch_user(next);
/*
@@ -660,6 +652,7 @@ void arch_setup_new_exec(void)
current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
erratum_1418040_new_exec();
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,

View File

@@ -102,6 +102,8 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
int __init find_free_bat(void);
unsigned int bat_block_size(unsigned long base, unsigned long top);
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use

View File

@@ -449,6 +449,7 @@
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))

View File

@@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)

View File

@@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
endif
CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o
ifndef CONFIG_KASAN

View File

@@ -72,7 +72,7 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0;
}
static int find_free_bat(void)
int __init find_free_bat(void)
{
int b;
int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
@@ -96,7 +96,7 @@ static int find_free_bat(void)
* - block size has to be a power of two. This is calculated by finding the
* highest bit set to 1.
*/
static unsigned int block_size(unsigned long base, unsigned long top)
unsigned int bat_block_size(unsigned long base, unsigned long top)
{
unsigned int max_size = SZ_256M;
unsigned int base_shift = (ffs(base) - 1) & 31;
@@ -141,7 +141,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
int idx;
while ((idx = find_free_bat()) != -1 && base != top) {
unsigned int size = block_size(base, top);
unsigned int size = bat_block_size(base, top);
if (size < 128 << 10)
break;
@@ -201,18 +201,17 @@ void mmu_mark_initmem_nx(void)
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
unsigned long size;
for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
size = block_size(base, top);
for (i = 0; i < nb - 1 && base < top;) {
size = bat_block_size(base, top);
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
base += size;
}
if (base < top) {
size = block_size(base, top);
size = max(size, 128UL << 10);
size = bat_block_size(base, top);
if ((top - base) > size) {
size <<= 1;
if (strict_kernel_rwx_enabled() && base + size > border)

View File

@@ -10,48 +10,51 @@ int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
unsigned long k_cur = k_start;
int k_size = k_end - k_start;
int k_size_base = 1 << (ffs(k_size) - 1);
unsigned long k_nobat = k_start;
unsigned long k_cur;
phys_addr_t phys;
int ret;
void *block;
block = memblock_alloc(k_size, k_size_base);
while (k_nobat < k_end) {
unsigned int k_size = bat_block_size(k_nobat, k_end);
int idx = find_free_bat();
if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
int shift = ffs(k_size - k_size_base);
int k_size_more = shift ? 1 << (shift - 1) : 0;
if (idx == -1)
break;
if (k_size < SZ_128K)
break;
phys = memblock_phys_alloc_range(k_size, k_size, 0,
MEMBLOCK_ALLOC_ANYWHERE);
if (!phys)
break;
setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
if (k_size_more >= SZ_128K)
setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
k_size_more, PAGE_KERNEL);
if (v_block_mapped(k_start))
k_cur = k_start + k_size_base;
if (v_block_mapped(k_start + k_size_base))
k_cur = k_start + k_size_base + k_size_more;
update_bats();
setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
k_nobat += k_size;
}
if (k_nobat != k_start)
update_bats();
if (!block)
block = memblock_alloc(k_size, PAGE_SIZE);
if (!block)
return -ENOMEM;
if (k_nobat < k_end) {
phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
MEMBLOCK_ALLOC_ANYWHERE);
if (!phys)
return -ENOMEM;
}
ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret)
return ret;
kasan_update_early_region(k_start, k_cur, __pte(0));
kasan_update_early_region(k_start, k_nobat, __pte(0));
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
}
flush_tlb_kernel_range(k_start, k_end);
memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
return 0;
}

View File

@@ -651,17 +651,21 @@ bpf_alu32_trunc:
EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
break;
case 64:
/*
* Way easier and faster(?) to store the value
* into stack and then use ldbrx
*
* ctx->seen will be reliable in pass2, but
* the instructions generated will remain the
* same across all passes
*/
/* Store the value to stack and then use byte-reverse loads */
PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
} else {
EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
}
break;
}
break;

View File

@@ -1273,9 +1273,20 @@ static void power_pmu_disable(struct pmu *pmu)
* Otherwise provide a warning if there is PMI pending, but
* no counter is found overflown.
*/
if (any_pmc_overflown(cpuhw))
clear_pmi_irq_pending();
else
if (any_pmc_overflown(cpuhw)) {
/*
* Since power_pmu_disable runs under local_irq_save, it
* could happen that code hits a PMC overflow without PMI
* pending in paca. Hence only clear PMI pending if it was
* set.
*
* If a PMI is pending, then MSR[EE] must be disabled (because
* the masked PMI handler disabling EE). So it is safe to
* call clear_pmi_irq_pending().
*/
if (pmi_irq_pending())
clear_pmi_irq_pending();
} else
WARN_ON(pmi_irq_pending());
val = mmcra = cpuhw->mmcr.mmcra;

View File

@@ -20,6 +20,7 @@
static char local_guest[] = " ";
static char all_guests[] = "* ";
static char *all_groups = all_guests;
static char *guest_query;
struct diag2fc_data {
@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
memcpy(parm_list.userid, query, NAME_LEN);
ASCEBC(parm_list.userid, NAME_LEN);
parm_list.addr = (unsigned long) addr ;
memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
ASCEBC(parm_list.aci_grp, NAME_LEN);
parm_list.addr = (unsigned long)addr;
parm_list.size = size;
parm_list.fmt = 0x02;
memset(parm_list.aci_grp, 0x40, NAME_LEN);
rc = -1;
diag_stat_inc(DIAG_STAT_X2FC);

View File

@@ -30,7 +30,7 @@
#define DEBUGP(fmt , ...)
#endif
#define PLT_ENTRY_SIZE 20
#define PLT_ENTRY_SIZE 22
void *module_alloc(unsigned long size)
{
@@ -330,27 +330,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
unsigned int insn[5];
unsigned int *ip = me->core_layout.base +
me->arch.plt_offset +
info->plt_offset;
unsigned char insn[PLT_ENTRY_SIZE];
char *plt_base;
char *ip;
insn[0] = 0x0d10e310; /* basr 1,0 */
insn[1] = 0x100a0004; /* lg 1,10(1) */
plt_base = me->core_layout.base + me->arch.plt_offset;
ip = plt_base + info->plt_offset;
*(int *)insn = 0x0d10e310; /* basr 1,0 */
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
unsigned int *ij;
ij = me->core_layout.base +
me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
insn[2] = 0xa7f40000 + /* j __jump_r1 */
(unsigned int)(u16)
(((unsigned long) ij - 8 -
(unsigned long) ip) / 2);
char *jump_r1;
jump_r1 = plt_base + me->arch.plt_size -
PLT_ENTRY_SIZE;
/* brcl 0xf,__jump_r1 */
*(short *)&insn[8] = 0xc0f4;
*(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
} else {
insn[2] = 0x07f10000; /* br %r1 */
*(int *)&insn[8] = 0x07f10000; /* br %r1 */
}
insn[3] = (unsigned int) (val >> 32);
insn[4] = (unsigned int) val;
*(long *)&insn[14] = val;
write(ip, insn, sizeof(insn));
info->plt_initialized = 1;

View File

@@ -5239,7 +5239,7 @@ static struct intel_uncore_type icx_uncore_imc = {
.fixed_ctr_bits = 48,
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
.event_descs = hswep_uncore_imc_events,
.event_descs = snr_uncore_imc_events,
.perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
.event_ctl = SNR_IMC_MMIO_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,

View File

@@ -387,7 +387,7 @@ static void threshold_restart_bank(void *_tr)
u32 hi, lo;
/* sysfs write might race against an offline operation */
if (this_cpu_read(threshold_banks))
if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
return;
rdmsr(tr->b->address, lo, hi);

View File

@@ -4146,13 +4146,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
if (likely(!insn || insn_len))
return true;
/*
* If RIP is invalid, go ahead with emulation which will cause an
* internal error exit.
*/
if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
return true;
cr4 = kvm_read_cr4(vcpu);
smep = cr4 & X86_CR4_SMEP;
smap = cr4 & X86_CR4_SMAP;

View File

@@ -3171,6 +3171,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & ~supported_xss)
return 1;
vcpu->arch.ia32_xss = data;
kvm_update_cpuid_runtime(vcpu);
break;
case MSR_SMI_COUNT:
if (!msr_info->host_initiated)

View File

@@ -575,7 +575,8 @@ void bio_truncate(struct bio *bio, unsigned new_size)
offset = new_size - done;
else
offset = 0;
zero_user(bv.bv_page, offset, bv.bv_len - offset);
zero_user(bv.bv_page, bv.bv_offset + offset,
bv.bv_len - offset);
truncated = true;
}
done += bv.bv_len;

View File

@@ -719,6 +719,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
systab_hdr->revision >> 16,
systab_hdr->revision & 0xffff,
vendor);
if (IS_ENABLED(CONFIG_X86_64) &&
systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
!strcmp(vendor, "Apple")) {
pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
}
}
static __initdata char memory_type_name[][13] = {

View File

@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (image->image_base != _text)
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
EFI_KIMG_ALIGN >> 10);
if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);

View File

@@ -469,8 +469,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n");
return -EINVAL;
}

View File

@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
struct dpu_hw_pcc_cfg *cfg)
{
u32 base = ctx->cap->sblk->pcc.base;
u32 base;
if (!ctx || !base) {
if (!ctx) {
DRM_ERROR("invalid ctx %pK\n", ctx);
return;
}
base = ctx->cap->sblk->pcc.base;
if (!base) {
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
return;
}

View File

@@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
if (!phy_pdev || !msm_dsi->phy) {
if (!phy_pdev) {
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
if (!msm_dsi->phy) {
put_device(&phy_pdev->dev);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}

View File

@@ -769,12 +769,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
struct msm_dsi_phy_clk_request *clk_req)
{
struct device *dev = &phy->pdev->dev;
struct device *dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
dev = &phy->pdev->dev;
ret = dsi_phy_enable_resource(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",

View File

@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
if (!phy_pdev || !hdmi->phy) {
if (!phy_pdev) {
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
if (!hdmi->phy) {
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
put_device(&phy_pdev->dev);
return -EPROBE_DEFER;
}
hdmi->phy_dev = get_device(&phy_pdev->dev);

View File

@@ -350,7 +350,7 @@ static int msm_init_vram(struct drm_device *dev)
of_node_put(node);
if (ret)
return ret;
size = r.end - r.start;
size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.

View File

@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.max_convrate = 7,
},
[lm86] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
},
[max6646] = {
.flags = LM90_HAVE_CRIT,
.flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6654] = {
.flags = LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
},
[max6680] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
| LM90_HAVE_CRIT_ALRM_SWP,
| LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
},

View File

@@ -375,8 +375,6 @@ static int venus_remove(struct platform_device *pdev)
hfi_destroy(core);
v4l2_device_unregister(&core->v4l2_dev);
mutex_destroy(&core->pm_lock);
mutex_destroy(&core->lock);
venus_dbgfs_deinit(core);

View File

@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
/* Control chips select signal on ADS5121 board */
static void ads5121_select_chip(struct nand_chip *nand, int chip)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
u8 v;

View File

@@ -2382,8 +2382,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_enable_vector(&hdev->misc_vector, true);
hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}

View File

@@ -3401,11 +3401,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
int cap_reqs;
/* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
* the PROMISC flag). Initialize this count upfront. When the tasklet
* receives a response to all of these, it will send the next protocol
* message (QUERY_IP_OFFLOAD).
*/
if (!(adapter->netdev->flags & IFF_PROMISC) ||
adapter->promisc_supported)
cap_reqs = 7;
else
cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
atomic_set(&adapter->running_cap_crqs, cap_reqs);
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3466,44 +3480,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
} else {
atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3511,16 +3526,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
atomic_inc(&adapter->running_cap_crqs);
cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
/* Keep at end to catch any discrepancy between expected and actual
* CRQs sent.
*/
WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -3953,118 +3973,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
int cap_reqs;
/* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
* upfront. When the tasklet receives a response to all of these, it
* can send out the next protocol messaage (REQUEST_CAPABILITY).
*/
cap_reqs = 25;
atomic_set(&adapter->running_cap_crqs, cap_reqs);
atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
/* Keep at end to catch any discrepancy between expected and actual
* CRQs sent.
*/
WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4369,6 +4403,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -5039,12 +5075,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
/* remain in tasklet until all
* capabilities responses are received
*/
if (!adapter->wait_capability)
done = true;
}
/* if capabilities CRQ's were sent in this tasklet, the following
* tasklet must wait until all responses are received

View File

@@ -172,7 +172,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -755,12 +754,12 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
u32 tx_restart;
u32 tx_busy;
u64 tx_restart;
u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
u32 rx_buf_failed;
u32 rx_page_failed;
u64 rx_buf_failed;
u64 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;

View File

@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
" tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();

View File

@@ -195,10 +195,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
*
* The search_hint trick and lack of advanced fit-finding only work
* because we're highly likely to have all the same size lump requests.
* Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -213,8 +209,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
/* start the linear search with an imperfect hint */
i = pile->search_hint;
/* Allocate last queue in the pile for FDIR VSI queue
* so it doesn't fragment the qp_pile
*/
if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
dev_err(&pf->pdev->dev,
"Cannot allocate queue %d for I40E_VSI_FDIR\n",
pile->num_entries - 1);
return -ENOMEM;
}
pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
return pile->num_entries - 1;
}
i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -233,7 +242,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
pile->search_hint = i + j;
break;
}
@@ -256,7 +264,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
int i;
u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -268,8 +276,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
if (count && index < pile->search_hint)
pile->search_hint = index;
return count;
}
@@ -771,9 +777,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
u64 tx_restart, tx_busy;
struct i40e_ring *p;
u32 rx_page, rx_buf;
u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -10130,15 +10136,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
/* The following delay is necessary for 4.33 firmware and older
* to recover after EMP reset. 200 ms should suffice but we
* put here 300 ms to be sure that FW is ready to operate
* after reset.
*/
mdelay(300);
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
/* The following delay is necessary for firmware update. */
mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -11327,7 +11327,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -12130,7 +12129,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;

View File

@@ -279,6 +279,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11

View File

@@ -1323,6 +1323,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
return aq_ret;
}
/**
* i40e_sync_vfr_reset
* @hw: pointer to hw struct
* @vf_id: VF identifier
*
* Before trigger hardware reset, we need to know if no other process has
* reserved the hardware for any reset operations. This check is done by
* examining the status of the RSTAT1 register used to signal the reset.
**/
static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
{
u32 reg;
int i;
for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
I40E_VFINT_ICR0_ADMINQ_MASK;
if (reg)
return 0;
usleep_range(100, 200);
}
return -EAGAIN;
}
/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
@@ -1337,9 +1363,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
bool vf_active;
u32 radq;
/* warn the VF */
clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1353,7 +1381,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
/* reset VF using VPGEN_VFRTRIG reg */
/* Sync VFR reset before trigger next one */
radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
I40E_VFINT_ICR0_ADMINQ_MASK;
if (vf_active && !radq)
/* waiting for finish reset by virtual driver */
if (i40e_sync_vfr_reset(hw, vf->vf_id))
dev_info(&pf->pdev->dev,
"Reset VF %d never finished\n",
vf->vf_id);
/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
* in progress state in rstat1 register.
*/
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -2563,6 +2603,59 @@ error_param:
aq_ret);
}
/**
* i40e_check_enough_queue - find big enough queue number
* @vf: pointer to the VF info
* @needed: the number of items needed
*
* Returns the base item index of the queue, or negative for error
**/
static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
{
unsigned int i, cur_queues, more, pool_size;
struct i40e_lump_tracking *pile;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi;
vsi = pf->vsi[vf->lan_vsi_idx];
cur_queues = vsi->alloc_queue_pairs;
/* if current allocated queues are enough for need */
if (cur_queues >= needed)
return vsi->base_queue;
pile = pf->qp_pile;
if (cur_queues > 0) {
/* if the allocated queues are not zero
* just check if there are enough queues for more
* behind the allocated queues.
*/
more = needed - cur_queues;
for (i = vsi->base_queue + cur_queues;
i < pile->num_entries; i++) {
if (pile->list[i] & I40E_PILE_VALID_BIT)
break;
if (more-- == 1)
/* there is enough */
return vsi->base_queue;
}
}
pool_size = 0;
for (i = 0; i < pile->num_entries; i++) {
if (pile->list[i] & I40E_PILE_VALID_BIT) {
pool_size = 0;
continue;
}
if (needed <= ++pool_size)
/* there is enough */
return i;
}
return -ENOMEM;
}
/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
@@ -2597,6 +2690,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but there is not enough for it.\n",
vf->vf_id,
req_pairs - cur_pairs);
vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;

View File

@@ -19,6 +19,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {

View File

@@ -386,7 +386,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
if (err) {
/* Error code -EIO indicate there is a communication failure
* to the AF. Rest of the error codes indicate that AF processed
* VF messages and set the error codes in response messages
* (if any) so simply forward responses to VF.
*/
if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */

View File

@@ -816,8 +816,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
stmmac_ptp_register(priv);
return 0;
}
@@ -2691,7 +2689,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
* @init_ptp: initialize PTP if set
* @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -2701,7 +2699,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -2757,13 +2755,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
else if (ret)
netdev_warn(priv->dev, "PTP init failed\n");
}
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
else if (ret)
netdev_warn(priv->dev, "PTP init failed\n");
else if (ptp_register)
stmmac_ptp_register(priv);
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;

View File

@@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
int size)
{
struct page_pool_params pp_params;
struct page_pool_params pp_params = {};
struct page_pool *pool;
pp_params.order = 0;

View File

@@ -951,9 +951,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
if (ym->cmd != SIOCYAMSMCS)
return -EINVAL;
if (ym->bitrate > YAM_MAXBITRATE) {
if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}

View File

@@ -789,6 +789,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
.soft_reset = genphy_soft_reset,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,

View File

@@ -1682,6 +1682,9 @@ void phy_detach(struct phy_device *phydev)
phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
/* Assert the reset signal */
phy_device_reset(phydev, 1);
/*
* The phydev might go away on the put_device() below, so avoid
* a use-after-free bug by reading the underlying bus first.
@@ -1693,9 +1696,6 @@ void phy_detach(struct phy_device *phydev)
ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
/* Assert the reset signal */
phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);

View File

@@ -609,6 +609,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
else if (ret < 0)
return ERR_PTR(ret);
if (!fwnode_device_is_available(ref.fwnode)) {
fwnode_handle_put(ref.fwnode);
return NULL;
}
bus = sfp_bus_get(ref.fwnode);
fwnode_handle_put(ref.fwnode);
if (!bus)

View File

@@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
/* wake up any blocked readers */
wake_up_interruptible(&eptdev->readq);
device_del(&eptdev->dev);
cdev_device_del(&eptdev->cdev, &eptdev->dev);
put_device(&eptdev->dev);
return 0;
@@ -332,7 +332,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ept_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
cdev_del(&eptdev->cdev);
kfree(eptdev);
}
@@ -377,19 +376,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
dev->id = ret;
dev_set_name(dev, "rpmsg%d", ret);
ret = cdev_add(&eptdev->cdev, dev->devt, 1);
ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
if (ret)
goto free_ept_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_eptdev_release_device;
ret = device_add(dev);
if (ret) {
dev_err(dev, "device_add failed: %d\n", ret);
put_device(dev);
}
return ret;
free_ept_ida:
@@ -458,7 +451,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
cdev_del(&ctrldev->cdev);
kfree(ctrldev);
}
@@ -493,19 +485,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
dev->id = ret;
dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
if (ret)
goto free_ctrl_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_ctrldev_release_device;
ret = device_add(dev);
if (ret) {
dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
put_device(dev);
}
dev_set_drvdata(&rpdev->dev, ctrldev);
return ret;
@@ -531,7 +517,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
if (ret)
dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
device_del(&ctrldev->dev);
cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
put_device(&ctrldev->dev);
}

View File

@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
goto out;
}
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
u32 d_id;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
d_id = port->d_id; /* remember as destination for send els below */
/*
* Force fresh GID_PN lookup on next port recovery.
* Must happen after request setup and before sending request,
* to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
*/
port->d_id = 0;
ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);

View File

@@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
static void bnx2fc_destroy_work(struct work_struct *work);
static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -905,9 +905,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
/* Ensure ALL destroy work has been completed before return */
flush_workqueue(bnx2fc_wq);
return;
default:
@@ -1213,8 +1210,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1523,7 +1520,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@@ -1651,8 +1647,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
@@ -1692,15 +1688,12 @@ netdev_err:
return rc;
}
static void bnx2fc_destroy_work(struct work_struct *work)
static void bnx2fc_port_destroy(struct fcoe_port *port)
{
struct fcoe_port *port;
struct fc_lport *lport;
port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@@ -2554,9 +2547,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
/* Ensure ALL destroy work has been completed before return */
flush_workqueue(bnx2fc_wq);
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))

View File

@@ -317,6 +317,7 @@ static struct tty_driver *gsm_tty_driver;
#define GSM1_ESCAPE_BITS 0x20
#define XON 0x11
#define XOFF 0x13
#define ISO_IEC_646_MASK 0x7F
static const struct tty_port_operations gsm_port_ops;
@@ -526,7 +527,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
int olen = 0;
while (len--) {
if (*input == GSM1_SOF || *input == GSM1_ESCAPE
|| *input == XON || *input == XOFF) {
|| (*input & ISO_IEC_646_MASK) == XON
|| (*input & ISO_IEC_646_MASK) == XOFF) {
*output++ = GSM1_ESCAPE;
*output++ = *input++ ^ GSM1_ESCAPE_BITS;
olen++;

View File

@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
if (of_property_read_u32(np, "reg-offset", &prop) == 0)
if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
if (prop >= port->mapsize) {
dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
prop, &port->mapsize);
ret = -EINVAL;
goto err_unprepare;
}
port->mapbase += prop;
port->mapsize -= prop;
}
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {

View File

@@ -5171,8 +5171,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
/* Brainboxes Devices */
/*
* BrainBoxes UC-260
* Brainboxes UC-101
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-235/246
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
/*
* Brainboxes UC-257
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0861,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-260/271/701/756
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D21,
PCI_ANY_ID, PCI_ANY_ID,
@@ -5180,7 +5202,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0E34,
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
pbn_b2_4_115200 },
/*
* Brainboxes UC-268
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0841,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-275/279
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0881,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_8_115200 },
/*
* Brainboxes UC-302
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08E1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-310
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08C1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-313
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-320/324
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0A61,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
/*
* Brainboxes UC-346
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0B02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-357
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0A81,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A83,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-368
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0C41,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-420/431
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0921,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Perle PCI-RAS cards

View File

@@ -574,7 +574,7 @@ static void stm32_usart_start_tx(struct uart_port *port)
struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit))
if (uart_circ_empty(xmit) && !port->x_char)
return;
if (rs485conf->flags & SER_RS485_ENABLED) {

View File

@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
struct ulpi *ulpi = to_ulpi_dev(dev);
const struct ulpi_device_id *id;
/* Some ULPI devices don't have a vendor id so rely on OF match */
if (ulpi->id.vendor == 0)
/*
* Some ULPI devices don't have a vendor id
* or provide an id_table so rely on OF match.
*/
if (ulpi->id.vendor == 0 || !drv->id_table)
return of_driver_match_device(dev, driver);
for (id = drv->id_table; id->vendor; id++)

View File

@@ -1562,6 +1562,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
/*
* Order the write of urb->use_count above before the read
* of urb->reject below. Pairs with the memory barriers in
* usb_kill_urb() and usb_poison_urb().
*/
smp_mb__after_atomic();
atomic_dec(&urb->dev->urbnum);
if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
@@ -1666,6 +1673,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
/*
* Order the write of urb->use_count above before the read
* of urb->reject below. Pairs with the memory barriers in
* usb_kill_urb() and usb_poison_urb().
*/
smp_mb__after_atomic();
if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);

View File

@@ -706,6 +706,12 @@ void usb_kill_urb(struct urb *urb)
if (!(urb && urb->dev && urb->ep))
return;
atomic_inc(&urb->reject);
/*
* Order the write of urb->reject above before the read
* of urb->use_count below. Pairs with the barriers in
* __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
*/
smp_mb__after_atomic();
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -747,6 +753,12 @@ void usb_poison_urb(struct urb *urb)
if (!urb)
return;
atomic_inc(&urb->reject);
/*
* Order the write of urb->reject above before the read
* of urb->use_count below. Pairs with the barriers in
* __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
*/
smp_mb__after_atomic();
if (!urb->dev || !urb->ep)
return;

View File

@@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
if (is_iso) {
switch (speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
size = ss->isoc_maxpacket *
(ss->isoc_mult + 1) *

View File

@@ -478,6 +478,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
if (pm_runtime_suspended(dev))
pm_runtime_resume(dev);
ret = xhci_priv_suspend_quirk(hcd);
if (ret)
return ret;

View File

@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
/*
* Reported by DocMAX <mail@vacharakis.de>
* and Thomas Weißschuh <linux@weissschuh.net>
*/
UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
"VIA Labs, Inc.",
"VL817 SATA Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",

View File

@@ -5240,8 +5240,9 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
case SNK_TRYWAIT_DEBOUNCE:
break;
case SNK_ATTACH_WAIT:
case SNK_DEBOUNCED:
port->debouncing = false;
tcpm_set_state(port, SNK_UNATTACHED, 0);
/* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
break;
case SNK_NEGOTIATE_CAPABILITIES:

View File

@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
if (status < 0)
return status;
if (!data)
if (!(data & DEV_INT))
return 0;
status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));

View File

@@ -286,8 +286,6 @@ struct hvfb_par {
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
static uint screen_width_max = HVFB_WIDTH;
static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
@@ -581,7 +579,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
int ret = 0;
unsigned long t;
u8 index;
int i;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
@@ -612,13 +609,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
goto out;
}
for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
screen_width_max = max_t(unsigned int, screen_width_max,
msg->resolution_resp.supported_resolution[i].width);
screen_height_max = max_t(unsigned int, screen_height_max,
msg->resolution_resp.supported_resolution[i].height);
}
screen_width =
msg->resolution_resp.supported_resolution[index].width;
screen_height =
@@ -940,7 +930,7 @@ static void hvfb_get_option(struct fb_info *info)
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
(synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
(x > screen_width_max || y > screen_height_max)) ||
(x * y * screen_depth / 8 > screen_fb_size)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@@ -1193,8 +1183,8 @@ static int hvfb_probe(struct hv_device *hdev,
}
hvfb_get_option(info);
pr_info("Screen resolution: %dx%d, Color depth: %d\n",
screen_width, screen_height, screen_depth);
pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
screen_width, screen_height, screen_depth, screen_fb_size);
ret = hvfb_getmem(hdev, info);
if (ret) {

View File

@@ -3103,10 +3103,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
inode_lock(inode);
err = btrfs_delete_subvolume(dir, dentry);
inode_unlock(inode);
if (!err) {
fsnotify_rmdir(dir, dentry);
d_delete(dentry);
}
if (!err)
d_delete_notify(dir, dentry);
out_dput:
dput(dentry);

View File

@@ -577,6 +577,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode;
struct timespec64 now;
struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@@ -626,6 +627,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
in.max_size = cpu_to_le64(lo->stripe_unit);
ceph_file_layout_to_legacy(lo, &in.layout);
/* lo is private, so pool_ns can't change */
pool_ns = rcu_dereference_raw(lo->pool_ns);
if (pool_ns) {
iinfo.pool_ns_len = pool_ns->len;
iinfo.pool_ns_data = pool_ns->str;
}
down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
@@ -743,8 +750,10 @@ retry:
restore_deleg_ino(dir, req->r_deleg_ino);
ceph_mdsc_put_request(req);
try_async = false;
ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto retry;
}
ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto out_req;
}
}

View File

@@ -1820,8 +1820,8 @@ void configfs_unregister_group(struct config_group *group)
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
d_drop(dentry);
fsnotify_rmdir(d_inode(parent), dentry);
d_delete(dentry);
inode_unlock(d_inode(parent));
dput(dentry);
@@ -1962,10 +1962,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(dentry));
d_delete(dentry);
d_drop(dentry);
fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(root));

View File

@@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry)
dentry->d_fsdata = NULL;
drop_nlink(dentry->d_inode);
fsnotify_unlink(d_inode(dentry->d_parent), dentry);
d_drop(dentry);
fsnotify_unlink(d_inode(dentry->d_parent), dentry);
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
}

View File

@@ -2795,6 +2795,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
jbd_unlock_bh_journal_head(bh);
return jh;
}
EXPORT_SYMBOL(jbd2_journal_grab_journal_head);
static void __journal_remove_journal_head(struct buffer_head *bh)
{
@@ -2847,6 +2848,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
jbd_unlock_bh_journal_head(bh);
}
}
EXPORT_SYMBOL(jbd2_journal_put_journal_head);
/*
* Initialize jbd inode head

View File

@@ -3793,13 +3793,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
detach_mounts(dentry);
fsnotify_rmdir(dir, dentry);
out:
inode_unlock(dentry->d_inode);
dput(dentry);
if (!error)
d_delete(dentry);
d_delete_notify(dir, dentry);
return error;
}
EXPORT_SYMBOL_NS(vfs_rmdir, ANDROID_GKI_VFS_EXPORT_ONLY);
@@ -3909,7 +3908,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate
if (!error) {
dont_mount(dentry);
detach_mounts(dentry);
fsnotify_unlink(dir, dentry);
}
}
}
@@ -3917,9 +3915,11 @@ out:
inode_unlock(target);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
fsnotify_unlink(dir, dentry);
} else if (!error) {
fsnotify_link_count(target);
d_delete(dentry);
d_delete_notify(dir, dentry);
}
return error;

View File

@@ -1777,6 +1777,24 @@ out:
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
if (!res) {
inode = d_inode(dentry);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
!S_ISDIR(inode->i_mode))
res = ERR_PTR(-ENOTDIR);
else if (inode && S_ISREG(inode->i_mode))
res = ERR_PTR(-EOPENSTALE);
} else if (!IS_ERR(res)) {
inode = d_inode(res);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
!S_ISDIR(inode->i_mode)) {
dput(res);
res = ERR_PTR(-ENOTDIR);
} else if (inode && S_ISREG(inode->i_mode)) {
dput(res);
res = ERR_PTR(-EOPENSTALE);
}
}
if (switched) {
d_lookup_done(dentry);
if (!res)
@@ -2174,6 +2192,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
trace_nfs_link_enter(inode, dir, dentry);
d_drop(dentry);
if (S_ISREG(inode->i_mode))
nfs_sync_inode(inode);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
if (error == 0) {
ihold(inode);
@@ -2262,6 +2282,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
if (S_ISREG(old_inode->i_mode))
nfs_sync_inode(old_inode);
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
if (IS_ERR(task)) {
error = PTR_ERR(task);

View File

@@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry)
clear_ncl(d_inode(dentry));
dget(dentry);
ret = simple_unlink(dir, dentry);
d_delete(dentry);
d_drop(dentry);
fsnotify_unlink(dir, dentry);
dput(dentry);
WARN_ON_ONCE(ret);
}
@@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry)
dget(dentry);
ret = simple_rmdir(dir, dentry);
WARN_ON_ONCE(ret);
d_drop(dentry);
fsnotify_rmdir(dir, dentry);
d_delete(dentry);
dput(dentry);
inode_unlock(dir);
}

View File

@@ -1253,26 +1253,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct journal_head *jh;
int ret = 1;
int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
return 0;
if (!buffer_jbd(bg_bh))
jh = jbd2_journal_grab_journal_head(bg_bh);
if (!jh)
return 1;
jbd_lock_bh_journal_head(bg_bh);
if (buffer_jbd(bg_bh)) {
jh = bh2jh(bg_bh);
spin_lock(&jh->b_state_lock);
bg = (struct ocfs2_group_desc *) jh->b_committed_data;
if (bg)
ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
else
ret = 1;
spin_unlock(&jh->b_state_lock);
}
jbd_unlock_bh_journal_head(bg_bh);
spin_lock(&jh->b_state_lock);
bg = (struct ocfs2_group_desc *) jh->b_committed_data;
if (bg)
ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
else
ret = 1;
spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
return ret;
}

View File

@@ -257,10 +257,6 @@ int udf_expand_file_adinicb(struct inode *inode)
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
WARN_ON_ONCE(!inode_is_locked(inode));
if (!iinfo->i_lenAlloc) {
@@ -304,8 +300,10 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
set_page_dirty(page);
unlock_page(page);
up_write(&iinfo->i_data_sem);
err = inode->i_data.a_ops->writepage(page, &udf_wbc);
err = filemap_fdatawrite(inode->i_mapping);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
@@ -316,6 +314,7 @@ int udf_expand_file_adinicb(struct inode *inode)
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
iinfo->i_lenAlloc = inode->i_size;
up_write(&iinfo->i_data_sem);
}
put_page(page);

View File

@@ -203,6 +203,42 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0);
}
/*
* fsnotify_delete - @dentry was unlinked and unhashed
*
* Caller must make sure that dentry->d_name is stable.
*
* Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
* as this may be called after d_delete() and old_dentry may be negative.
*/
static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
struct dentry *dentry)
{
__u32 mask = FS_DELETE;
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
fsnotify_name(dir, mask, inode, &dentry->d_name, 0);
}
/**
* d_delete_notify - delete a dentry and call fsnotify_delete()
* @dentry: The dentry to delete
*
* This helper is used to guaranty that the unlinked inode cannot be found
* by lookup of this name after fsnotify_delete() event has been delivered.
*/
static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
ihold(inode);
d_delete(dentry);
fsnotify_delete(dir, inode, dentry);
iput(inode);
}
/*
* fsnotify_unlink - 'name' was unlinked
*
@@ -210,10 +246,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
*/
static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
{
/* Expected to be called before d_delete() */
WARN_ON_ONCE(d_is_negative(dentry));
if (WARN_ON_ONCE(d_is_negative(dentry)))
return;
fsnotify_dirent(dir, dentry, FS_DELETE);
fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -233,10 +269,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
{
/* Expected to be called before d_delete() */
WARN_ON_ONCE(d_is_negative(dentry));
if (WARN_ON_ONCE(d_is_negative(dentry)))
return;
fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*

View File

@@ -2585,6 +2585,7 @@ struct packet_type {
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;

View File

@@ -678,18 +678,6 @@ struct perf_event {
u64 total_time_running;
u64 tstamp;
/*
* timestamp shadows the actual context timing but it can
* be safely used in NMI interrupt context. It reflects the
* context time as it was when the event was last scheduled in,
* or when ctx_sched_in failed to schedule the event because we
* run out of PMC.
*
* ctx_time already accounts for ctx->timestamp. Therefore to
* compute ctx_time for a sample, simply add perf_clock().
*/
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
@@ -834,6 +822,7 @@ struct perf_event_context {
*/
u64 time;
u64 timestamp;
u64 timeoffset;
/*
* These fields let us detect when two contexts have both
@@ -916,6 +905,8 @@ struct bpf_perf_event_data_kern {
struct perf_cgroup_info {
u64 time;
u64 timestamp;
u64 timeoffset;
int active;
};
struct perf_cgroup {

View File

@@ -90,6 +90,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node)
static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
static inline struct usb_role_switch *
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
{
return NULL;
}
static inline struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc)

View File

@@ -6,6 +6,8 @@
#define RTR_SOLICITATION_INTERVAL (4*HZ)
#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
#define TEMP_VALID_LIFETIME (7*86400)
#define TEMP_PREFERRED_LIFETIME (86400)
#define REGEN_MAX_RETRY (3)

View File

@@ -520,19 +520,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
{
struct iphdr *iph = ip_hdr(skb);
/* We had many attacks based on IPID, use the private
* generator as much as we can.
*/
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += segs;
return;
}
if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
/* This is only to work around buggy Windows95/2000
* VJ compression implementations. If the ID field
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += segs;
} else {
iph->id = 0;
}
iph->id = 0;
} else {
/* Unfortunately we need the big hammer to get a suitable IPID */
__ip_select_ident(net, iph, segs);
}
}

View File

@@ -290,7 +290,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
fn = rcu_dereference(f6i->fib6_node);
if (fn) {
*cookie = fn->fn_sernum;
*cookie = READ_ONCE(fn->fn_sernum);
/* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
smp_rmb();
status = true;

View File

@@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
{
struct neighbour *neigh;
neigh = __ipv4_neigh_lookup_noref(dev, daddr);
neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &daddr, dev, false);

View File

@@ -664,13 +664,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
u32, size, u64, flags)
{
struct pt_regs *regs;
long res;
long res = -EINVAL;
if (!try_get_task_stack(task))
return -EFAULT;
regs = task_pt_regs(task);
res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
if (regs)
res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
put_task_stack(task);
return res;

View File

@@ -266,7 +266,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
if (!event->parent) {
/*
* If this is a !child event, we must hold ctx::mutex to
* stabilize the the event->ctx relation. See
* stabilize the event->ctx relation. See
* perf_event_ctx_lock().
*/
lockdep_assert_held(&ctx->mutex);
@@ -673,6 +673,23 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state)
WRITE_ONCE(event->state, state);
}
/*
* UP store-release, load-acquire
*/
#define __store_release(ptr, val) \
do { \
barrier(); \
WRITE_ONCE(*(ptr), (val)); \
} while (0)
#define __load_acquire(ptr) \
({ \
__unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
barrier(); \
___p; \
})
#ifdef CONFIG_CGROUP_PERF
static inline bool
@@ -718,34 +735,51 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
return t->time;
}
static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *info;
u64 now;
struct perf_cgroup_info *t;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
if (!__load_acquire(&t->active))
return t->time;
now += READ_ONCE(t->timeoffset);
return now;
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
{
if (adv)
info->time += now - info->timestamp;
info->timestamp = now;
/*
* see update_context_time()
*/
WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{
struct perf_cgroup *cgrp = cpuctx->cgrp;
struct cgroup_subsys_state *css;
struct perf_cgroup_info *info;
if (cgrp) {
u64 now = perf_clock();
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
__update_cgrp_time(cgrp);
info = this_cpu_ptr(cgrp->info);
__update_cgrp_time(info, now, true);
if (final)
__store_release(&info->active, 0);
}
}
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup_info *info;
struct perf_cgroup *cgrp;
/*
@@ -759,8 +793,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
/*
* Do not update time when cgroup is not active
*/
if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
__update_cgrp_time(event->cgrp);
if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) {
info = this_cpu_ptr(event->cgrp->info);
__update_cgrp_time(info, perf_clock(), true);
}
}
static inline void
@@ -784,7 +820,8 @@ perf_cgroup_set_timestamp(struct task_struct *task,
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
__update_cgrp_time(info, ctx->timestamp, false);
__store_release(&info->active, 1);
}
}
@@ -980,14 +1017,6 @@ out:
return ret;
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
event->shadow_ctx_time = now - t->timestamp;
}
static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{
@@ -1065,7 +1094,8 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
bool final)
{
}
@@ -1097,12 +1127,12 @@ perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
return 0;
}
@@ -1300,7 +1330,7 @@ static void put_ctx(struct perf_event_context *ctx)
* life-time rules separate them. That is an exiting task cannot fork, and a
* spawning task cannot (yet) exit.
*
* But remember that that these are parent<->child context relations, and
* But remember that these are parent<->child context relations, and
* migration does not affect children, therefore these two orderings should not
* interact.
*
@@ -1439,7 +1469,7 @@ static u64 primary_event_id(struct perf_event *event)
/*
* Get the perf_event_context for a task and lock it.
*
* This has to cope with with the fact that until it is locked,
* This has to cope with the fact that until it is locked,
* the context could get moved to another task.
*/
static struct perf_event_context *
@@ -1524,22 +1554,59 @@ static void perf_unpin_context(struct perf_event_context *ctx)
/*
* Update the record of the current time in a context.
*/
static void update_context_time(struct perf_event_context *ctx)
static void __update_context_time(struct perf_event_context *ctx, bool adv)
{
u64 now = perf_clock();
ctx->time += now - ctx->timestamp;
if (adv)
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
/*
* The above: time' = time + (now - timestamp), can be re-arranged
* into: time` = now + (time - timestamp), which gives a single value
* offset to compute future time without locks on.
*
* See perf_event_time_now(), which can be used from NMI context where
* it's (obviously) not possible to acquire ctx->lock in order to read
* both the above values in a consistent manner.
*/
WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
}
static void update_context_time(struct perf_event_context *ctx)
{
__update_context_time(ctx, true);
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (unlikely(!ctx))
return 0;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx ? ctx->time : 0;
return ctx->time;
}
static u64 perf_event_time_now(struct perf_event *event, u64 now)
{
struct perf_event_context *ctx = event->ctx;
if (unlikely(!ctx))
return 0;
if (is_cgroup_event(event))
return perf_cgroup_event_time_now(event, now);
if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
return ctx->time;
now += READ_ONCE(ctx->timeoffset);
return now;
}
static enum event_type_t get_event_type(struct perf_event *event)
@@ -2333,7 +2400,7 @@ __perf_remove_from_context(struct perf_event *event,
if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
update_cgrp_time_from_cpuctx(cpuctx, false);
}
event_sched_out(event, cpuctx, ctx);
@@ -2342,6 +2409,9 @@ __perf_remove_from_context(struct perf_event *event,
list_del_event(event, ctx);
if (!ctx->nr_events && ctx->is_active) {
if (ctx == &cpuctx->ctx)
update_cgrp_time_from_cpuctx(cpuctx, true);
ctx->is_active = 0;
ctx->rotate_necessary = 0;
if (ctx->task) {
@@ -2467,40 +2537,6 @@ void perf_event_disable_inatomic(struct perf_event *event)
irq_work_queue(&event->pending);
}
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, event->tstamp);
else
event->shadow_ctx_time = event->tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
@@ -2541,8 +2577,6 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
perf_set_shadow_time(event, ctx);
perf_log_itrace_start(event);
if (event->pmu->add(event, PERF_EF_START)) {
@@ -3216,16 +3250,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
return;
}
ctx->is_active &= ~event_type;
if (!(ctx->is_active & EVENT_ALL))
ctx->is_active = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
/*
* Always update time if it was set; not only when it changes.
* Otherwise we can 'forget' to update time for any but the last
@@ -3239,7 +3263,22 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (is_active & EVENT_TIME) {
/* update (and stop) ctx time */
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
/*
* CPU-release for the below ->is_active store,
* see __load_acquire() in perf_event_time_now()
*/
barrier();
}
ctx->is_active &= ~event_type;
if (!(ctx->is_active & EVENT_ALL))
ctx->is_active = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
is_active ^= ctx->is_active; /* changed bits */
@@ -3676,13 +3715,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
return 0;
}
/*
* Because the userpage is strictly per-event (there is no concept of context,
* so there cannot be a context indirection), every userpage must be updated
* when context time starts :-(
*
* IOW, we must not miss EVENT_TIME edges.
*/
static inline bool event_update_userpage(struct perf_event *event)
{
if (likely(!atomic_read(&event->mmap_count)))
return false;
perf_event_update_time(event);
perf_set_shadow_time(event, event->ctx);
perf_event_update_userpage(event);
return true;
@@ -3766,13 +3811,23 @@ ctx_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
int is_active = ctx->is_active;
u64 now;
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events))
return;
if (is_active ^ EVENT_TIME) {
/* start ctx time */
__update_context_time(ctx, false);
perf_cgroup_set_timestamp(task, ctx);
/*
* CPU-release for the below ->is_active store,
* see __load_acquire() in perf_event_time_now()
*/
barrier();
}
ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
if (!is_active)
@@ -3783,13 +3838,6 @@ ctx_sched_in(struct perf_event_context *ctx,
is_active ^= ctx->is_active; /* changed bits */
if (is_active & EVENT_TIME) {
/* start ctx time */
now = perf_clock();
ctx->timestamp = now;
perf_cgroup_set_timestamp(task, ctx);
}
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
@@ -4325,6 +4373,18 @@ static inline u64 perf_event_count(struct perf_event *event)
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = perf_event_time_now(event, *now);
__perf_update_times(event, ctx_time, enabled, running);
}
/*
* NMI-safe method to read a local event, that is an event that
* is:
@@ -4384,10 +4444,9 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
*value = local64_read(&event->count);
if (enabled || running) {
u64 now = event->shadow_ctx_time + perf_clock();
u64 __enabled, __running;
u64 __enabled, __running, __now;;
__perf_update_times(event, now, &__enabled, &__running);
calc_timer_values(event, &__now, &__enabled, &__running);
if (enabled)
*enabled = __enabled;
if (running)
@@ -5695,18 +5754,6 @@ static int perf_event_index(struct perf_event *event)
return event->pmu->event_idx(event);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
__perf_update_times(event, ctx_time, enabled, running);
}
static void perf_event_init_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
@@ -6246,7 +6293,6 @@ accounting:
ring_buffer_attach(event, rb);
perf_event_update_time(event);
perf_set_shadow_time(event, event->ctx);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {

View File

@@ -1735,7 +1735,7 @@ void uprobe_free_utask(struct task_struct *t)
}
/*
* Allocate a uprobe_task object for the task if if necessary.
* Allocate a uprobe_task object for the task if necessary.
* Called when the thread hits a breakpoint.
*
* Returns:

View File

@@ -1441,7 +1441,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
}
/*
* Performs the wakeup of the the top-waiter and re-enables preemption.
* Performs the wakeup of the top-waiter and re-enables preemption.
*/
void rt_mutex_postunlock(struct wake_q_head *wake_q)
{
@@ -1835,7 +1835,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
* been started.
* @waiter: the pre-initialized rt_mutex_waiter
*
* Wait for the the lock acquisition started on our behalf by
* Wait for the lock acquisition started on our behalf by
* rt_mutex_start_proxy_lock(). Upon failure, the caller must call
* rt_mutex_cleanup_proxy_lock().
*

View File

@@ -1182,7 +1182,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
/*
* If there were already threads queued before us and:
* 1) there are no no active locks, wake the front
* 1) there are no active locks, wake the front
* queued process(es) as the handoff bit might be set.
* 2) there are no active writers and some readers, the lock
* must be read owned; so we try to wake any read lock

View File

@@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable);
* @sem: the semaphore to be acquired
*
* Try to acquire the semaphore atomically. Returns 0 if the semaphore has
* been acquired successfully or 1 if it it cannot be acquired.
* been acquired successfully or 1 if it cannot be acquired.
*
* NOTE: This return value is inverted from both spin_trylock and
* mutex_trylock! Be careful about this when converting code.

View File

@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
{
struct rb_node *node;
struct wakelock *wl;
char *str = buf;
char *end = buf + PAGE_SIZE;
int len = 0;
mutex_lock(&wakelocks_lock);
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node);
if (wl->ws->active == show_active)
str += scnprintf(str, end - str, "%s ", wl->name);
len += sysfs_emit_at(buf, len, "%s ", wl->name);
}
if (str > buf)
str--;
str += scnprintf(str, end - str, "\n");
len += sysfs_emit_at(buf, len, "\n");
mutex_unlock(&wakelocks_lock);
return (str - buf);
return len;
}
#if CONFIG_PM_WAKELOCKS_LIMIT > 0

View File

@@ -3389,7 +3389,6 @@ void set_task_rq_fair(struct sched_entity *se,
se->avg.last_update_time = n_last_update_time;
}
/*
* When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
* propagate its contribution. The key to this propagation is the invariant
@@ -3457,7 +3456,6 @@ void set_task_rq_fair(struct sched_entity *se,
* XXX: only do this for the part of runnable > running ?
*
*/
static inline void
update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
@@ -3688,6 +3686,18 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
r = removed_util;
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * divider);
/*
* Because of rounding, se->util_sum might ends up being +1 more than
* cfs->util_sum. Although this is not a problem by itself, detaching
* a lot of tasks with the rounding problem between 2 updates of
* util_avg (~1ms) can make cfs->util_sum becoming null whereas
* cfs_util_avg is not.
* Check that util_sum is still above its lower bound for the new
* util_avg. Given that period_contrib might have moved since the last
* sync, we are only sure that util_sum must be above or equal to
* util_avg * minimum possible divider
*/
sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
r = removed_runnable;
sub_positive(&sa->runnable_avg, r);
@@ -5180,7 +5190,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/*
* When a group wakes up we want to make sure that its quota is not already
* expired/exceeded, otherwise it may be allowed to steal additional ticks of
* runtime as update_curr() throttling can not not trigger until it's on-rq.
* runtime as update_curr() throttling can not trigger until it's on-rq.
*/
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{

View File

@@ -19,11 +19,11 @@
#endif
#ifdef CONFIG_RSEQ
#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \
#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
#else
#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
#endif
#define MEMBARRIER_CMD_BITMASK \
@@ -31,7 +31,8 @@
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
static void ipi_mb(void *info)
{
@@ -315,7 +316,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
/*
* For each cpu runqueue, if the task's mm match @mm, ensure that all
* @mm's membarrier state set bits are also set in in the runqueue's
* @mm's membarrier state set bits are also set in the runqueue's
* membarrier state. This ensures that a runqueue scheduling
* between threads which are users of @mm has its membarrier state
* updated.

View File

@@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running)
}
#endif
#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
static inline u32 get_pelt_divider(struct sched_avg *avg)
{
return LOAD_AVG_MAX - 1024 + avg->period_contrib;
return PELT_MIN_DIVIDER + avg->period_contrib;
}
static inline void cfs_se_util_change(struct sched_avg *avg)

View File

@@ -7258,7 +7258,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
err = kzalloc(sizeof(*err), GFP_KERNEL);
if (!err)
err = ERR_PTR(-ENOMEM);
tr->n_err_log_entries++;
else
tr->n_err_log_entries++;
return err;
}

View File

@@ -3506,6 +3506,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
var_ref_idx = find_var_ref_idx(hist_data, var_ref);
if (WARN_ON(var_ref_idx < 0)) {
kfree(p);
ret = var_ref_idx;
goto err;
}

View File

@@ -5661,6 +5661,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_ev_le_advertising_info *ev = ptr;
s8 rssi;
if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
bt_dev_err(hdev, "Malicious advertising data.");
break;
}
if (ev->length <= HCI_MAX_AD_LENGTH &&
ev->data + ev->length <= skb_tail_pointer(skb)) {
rssi = ev->data[ev->length];
@@ -5672,11 +5677,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
ptr += sizeof(*ev) + ev->length + 1;
if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
break;
}
}
hci_dev_unlock(hdev);

View File

@@ -543,10 +543,10 @@ static bool __allowed_ingress(const struct net_bridge *br,
if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
if (*state == BR_STATE_FORWARDING) {
*state = br_vlan_get_pvid_state(vg);
return br_vlan_state_allowed(*state, true);
} else {
return true;
if (!br_vlan_state_allowed(*state, true))
goto drop;
}
return true;
}
}
v = br_vlan_find(vg, *vid);
@@ -1873,7 +1873,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto out_err;
}
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
if (err && err != -EMSGSIZE)
/* if the dump completed without an error we return 0 here */
if (err != -EMSGSIZE)
goto out_err;
} else {
for_each_netdev_rcu(net, dev) {

View File

@@ -193,12 +193,23 @@ static const struct seq_operations softnet_seq_ops = {
.show = softnet_seq_show,
};
static void *ptype_get_idx(loff_t pos)
static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
{
struct list_head *ptype_list = NULL;
struct packet_type *pt = NULL;
struct net_device *dev;
loff_t i = 0;
int t;
for_each_netdev_rcu(seq_file_net(seq), dev) {
ptype_list = &dev->ptype_all;
list_for_each_entry_rcu(pt, ptype_list, list) {
if (i == pos)
return pt;
++i;
}
}
list_for_each_entry_rcu(pt, &ptype_all, list) {
if (i == pos)
return pt;
@@ -219,22 +230,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net_device *dev;
struct packet_type *pt;
struct list_head *nxt;
int hash;
++*pos;
if (v == SEQ_START_TOKEN)
return ptype_get_idx(0);
return ptype_get_idx(seq, 0);
pt = v;
nxt = pt->list.next;
if (pt->dev) {
if (nxt != &pt->dev->ptype_all)
goto found;
dev = pt->dev;
for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
if (!list_empty(&dev->ptype_all)) {
nxt = dev->ptype_all.next;
goto found;
}
}
nxt = ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
if (nxt != &ptype_all)
goto found;
hash = 0;
@@ -263,7 +292,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
(!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else

View File

@@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
if (ip_dont_fragment(sk, &rt->dst)) {
/* Do not bother generating IPID for small packets (eg SYNACK) */
if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
iph->frag_off = htons(IP_DF);
iph->id = 0;
} else {
iph->frag_off = 0;
__ip_select_ident(net, iph, 1);
/* TCP packets here are SYNACK with fat IPv4/TCP options.
* Avoid using the hashed IP ident generator.
*/
if (sk->sk_protocol == IPPROTO_TCP)
iph->id = (__force __be16)prandom_u32();
else
__ip_select_ident(net, iph, 1);
}
if (opt && opt->opt.optlen) {
@@ -614,18 +621,6 @@ void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
}
EXPORT_SYMBOL(ip_fraglist_init);
static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
struct ip_fraglist_iter *iter)
{
struct sk_buff *to = iter->frag;
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(skb)->flags;
if (iter->offset == 0)
ip_options_fragment(to);
}
void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
{
unsigned int hlen = iter->hlen;
@@ -671,7 +666,7 @@ void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
EXPORT_SYMBOL(ip_frag_init);
static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
bool first_frag, struct ip_frag_state *state)
bool first_frag)
{
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
@@ -850,8 +845,20 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
/* Prepare header of the next frame,
* before previous one went down. */
if (iter.frag) {
ip_fraglist_ipcb_prepare(skb, &iter);
bool first_frag = (iter.offset == 0);
IPCB(iter.frag)->flags = IPCB(skb)->flags;
ip_fraglist_prepare(skb, &iter);
if (first_frag && IPCB(skb)->opt.optlen) {
/* ipcb->opt is not populated for frags
* coming from __ip_make_skb(),
* ip_options_fragment() needs optlen
*/
IPCB(iter.frag)->opt.optlen =
IPCB(skb)->opt.optlen;
ip_options_fragment(iter.frag);
ip_send_check(iter.iph);
}
}
skb->tstamp = tstamp;
@@ -905,7 +912,7 @@ slow_path:
err = PTR_ERR(skb2);
goto fail;
}
ip_frag_ipcb(skb, skb2, first_frag, &state);
ip_frag_ipcb(skb, skb2, first_frag);
/*
* Put this fragment into the sending queue.

View File

@@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
continue;
}
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
sk->sk_bound_dev_if != inet_sdif(skb))
continue;
sock_hold(sk);

View File

@@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int ret = -EINVAL;
int chk_addr_ret;
lock_sock(sk);
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
@@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
ret = 0;
out: return ret;
out:
release_sock(sk);
return ret;
}
/*

View File

@@ -2601,7 +2601,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
__u32 valid_lft, u32 prefered_lft)
{
struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
int create = 0;
int create = 0, update_lft = 0;
if (!ifp && valid_lft) {
int max_addresses = in6_dev->cnf.max_addresses;
@@ -2645,19 +2645,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
unsigned long now;
u32 stored_lft;
/* Update lifetime (RFC4862 5.5.3 e)
* We deviate from RFC4862 by honoring all Valid Lifetimes to
* improve the reaction of SLAAC to renumbering events
* (draft-gont-6man-slaac-renum-06, Section 4.2)
*/
/* update lifetime (RFC2462 5.5.3 e) */
spin_lock_bh(&ifp->lock);
now = jiffies;
if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
else
stored_lft = 0;
if (!create && stored_lft) {
const u32 minimum_lft = min_t(u32,
stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
/* RFC4862 Section 5.5.3e:
* "Note that the preferred lifetime of the
* corresponding address is always reset to
* the Preferred Lifetime in the received
* Prefix Information option, regardless of
* whether the valid lifetime is also reset or
* ignored."
*
* So we should always update prefered_lft here.
*/
update_lft = 1;
}
if (update_lft) {
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
ifp->tstamp = now;

View File

@@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
fn = rcu_dereference_protected(f6i->fib6_node,
lockdep_is_held(&f6i->fib6_table->tb6_lock));
if (fn)
fn->fn_sernum = fib6_new_sernum(net);
WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
}
/*
@@ -587,12 +587,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
spin_unlock_bh(&table->tb6_lock);
if (res > 0) {
cb->args[4] = 1;
cb->args[5] = w->root->fn_sernum;
cb->args[5] = READ_ONCE(w->root->fn_sernum);
}
} else {
if (cb->args[5] != w->root->fn_sernum) {
int sernum = READ_ONCE(w->root->fn_sernum);
if (cb->args[5] != sernum) {
/* Begin at the root if the tree changed */
cb->args[5] = w->root->fn_sernum;
cb->args[5] = sernum;
w->state = FWS_INIT;
w->node = w->root;
w->skip = w->count;
@@ -1342,7 +1343,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
/* paired with smp_rmb() in rt6_get_cookie_safe() */
smp_wmb();
while (fn) {
fn->fn_sernum = sernum;
WRITE_ONCE(fn->fn_sernum, sernum);
fn = rcu_dereference_protected(fn->parent,
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
@@ -2171,8 +2172,8 @@ static int fib6_clean_node(struct fib6_walker *w)
};
if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
w->node->fn_sernum != c->sernum)
w->node->fn_sernum = c->sernum;
READ_ONCE(w->node->fn_sernum) != c->sernum)
WRITE_ONCE(w->node->fn_sernum, c->sernum);
if (!c->func) {
WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
@@ -2536,7 +2537,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
iter->w.state = FWS_INIT;
iter->w.node = iter->w.root;
iter->w.args = iter;
iter->sernum = iter->w.root->fn_sernum;
iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
INIT_LIST_HEAD(&iter->w.lh);
fib6_walker_link(net, &iter->w);
}
@@ -2564,8 +2565,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
{
if (iter->sernum != iter->w.root->fn_sernum) {
iter->sernum = iter->w.root->fn_sernum;
int sernum = READ_ONCE(iter->w.root->fn_sernum);
if (iter->sernum != sernum) {
iter->sernum = sernum;
iter->w.state = FWS_INIT;
iter->w.node = iter->w.root;
WARN_ON(iter->w.skip);

View File

@@ -1066,14 +1066,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
0, IFA_F_TENTATIVE)))
pr_warn("%s xmit: Local address not yet configured!\n",
p->name);
pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
p->name);
else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
!ipv6_addr_is_multicast(raddr) &&
unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
true, 0, IFA_F_TENTATIVE)))
pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
p->name);
pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
p->name);
else
ret = 1;
rcu_read_unlock();

View File

@@ -2674,7 +2674,7 @@ static void ip6_link_failure(struct sk_buff *skb)
if (from) {
fn = rcu_dereference(from->fib6_node);
if (fn && (rt->rt6i_flags & RTF_DEFAULT))
fn->fn_sernum = -1;
WRITE_ONCE(fn->fn_sernum, -1);
}
}
rcu_read_unlock();

Some files were not shown because too many files have changed in this diff Show More