Merge 5.10.152 into android12-5.10-lts
Changes in 5.10.152 ocfs2: clear dinode links count in case of error ocfs2: fix BUG when iput after ocfs2_mknod fails selinux: enable use of both GFP_KERNEL and GFP_ATOMIC in convert_context() cpufreq: qcom: fix writes in read-only memory region i2c: qcom-cci: Fix ordering of pm_runtime_xx and i2c_add_adapter x86/microcode/AMD: Apply the patch early on every logical thread hwmon/coretemp: Handle large core ID value ata: ahci-imx: Fix MODULE_ALIAS ata: ahci: Match EM_MAX_SLOTS with SATA_PMP_MAX_PORTS cpufreq: qcom: fix memory leak in error path kvm: Add support for arch compat vm ioctls KVM: arm64: vgic: Fix exit condition in scan_its_table() media: mceusb: set timeout to at least timeout provided media: venus: dec: Handle the case where find_format fails block: wbt: Remove unnecessary invoking of wbt_update_limits in wbt_init blk-wbt: call rq_qos_add() after wb_normal is initialized arm64: errata: Remove AES hwcap for COMPAT tasks r8152: add PID for the Lenovo OneLink+ Dock btrfs: fix processing of delayed data refs during backref walking btrfs: fix processing of delayed tree block refs during backref walking ACPI: extlog: Handle multiple records tipc: Fix recognition of trial period tipc: fix an information leak in tipc_topsrv_kern_subscr i40e: Fix DMA mappings leak HID: magicmouse: Do not set BTN_MOUSE on double report sfc: Change VF mac via PF as first preference if available. net/atm: fix proc_mpc_write incorrect return value net: phy: dp83867: Extend RX strap quirk for SGMII mode cifs: Fix xid leak in cifs_copy_file_range() cifs: Fix xid leak in cifs_flock() cifs: Fix xid leak in cifs_ses_add_channel() net: hsr: avoid possible NULL deref in skb_clone() ionic: catch NULL pointer issue on reconfig nvme-hwmon: rework to avoid devm allocation nvme-hwmon: Return error code when registration fails nvme-hwmon: consistently ignore errors from nvme_hwmon_init nvme-hwmon: kmalloc the NVME SMART log buffer net: sched: cake: fix null pointer access issue when cake_init() fails net: sched: delete duplicate cleanup of backlog and qlen net: sched: sfb: fix null pointer access issue when sfb_init() fails sfc: include vport_id in filter spec hash and equal() net: hns: fix possible memory leak in hnae_ae_register() net: sched: fix race condition in qdisc_graft() net: phy: dp83822: disable MDI crossover status change interrupt iommu/vt-d: Allow NVS regions in arch_rmrr_sanity_check() iommu/vt-d: Clean up si_domain in the init_dmars() error path drm/virtio: Use appropriate atomic state in virtio_gpu_plane_cleanup_fb() dmaengine: mxs-dma: Remove the unused .id_table dmaengine: mxs: use platform_driver_register tracing: Simplify conditional compilation code in tracing_set_tracer() tracing: Do not free snapshot if tracer is on cmdline xen: assume XENFEAT_gnttab_map_avail_bits being set for pv guests xen/gntdev: Accommodate VMA splitting mmc: sdhci-tegra: Use actual clock rate for SW tuning correction riscv: Add machine name to kernel boot log and stack dump output riscv: always honor the CONFIG_CMDLINE_FORCE when parsing dtb perf pmu: Validate raw event with sysfs exported format bits perf: Skip and warn on unknown format 'configN' attrs fcntl: make F_GETOWN(EX) return 0 on dead owner task fcntl: fix potential deadlocks for &fown_struct.lock arm64: dts: qcom: sc7180-trogdor: Fixup modem memory region arm64: topology: move store_cpu_topology() to shared code riscv: topology: fix default topology reporting perf/x86/intel/pt: Relax address filter validation hv_netvsc: Fix race between VF offering and VF association message from host ACPI: video: Force backlight native for more TongFang devices x86/Kconfig: Drop check for -mabi=ms for CONFIG_EFI_STUB Makefile.debug: re-enable debug info for .S files mmc: core: Add SD card quirk for broken discard blk-wbt: fix that 'rwb->wc' is always set to 1 in wbt_init() mm: /proc/pid/smaps_rollup: fix no vma's null-deref udp: Update reuse->has_conns under reuseport_lock. Linux 5.10.152 Change-Id: I2c75b6fd3ae205968bcc3133ebf71b82ff2a19b6 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -76,10 +76,14 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #853709 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
||||
|
||||
6
Makefile
6
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 151
|
||||
SUBLEVEL = 152
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
@@ -860,7 +860,9 @@ else
|
||||
DEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
ifneq ($(LLVM_IAS),1)
|
||||
ifeq ($(LLVM_IAS),1)
|
||||
KBUILD_AFLAGS += -g
|
||||
else
|
||||
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
||||
endif
|
||||
|
||||
|
||||
@@ -493,6 +493,22 @@ config ARM64_ERRATUM_834220
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1742098
|
||||
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
|
||||
depends on COMPAT
|
||||
default y
|
||||
help
|
||||
This option removes the AES hwcap for aarch32 user-space to
|
||||
workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
|
||||
|
||||
Affected parts may corrupt the AES state if an interrupt is
|
||||
taken between a pair of AES instructions. These instructions
|
||||
are only present if the cryptography extensions are present.
|
||||
All software should have a fallback implementation for CPUs
|
||||
that don't implement the cryptography extensions.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_845719
|
||||
bool "Cortex-A53: 845719: a load might read incorrect data"
|
||||
depends on COMPAT
|
||||
|
||||
@@ -9,6 +9,10 @@
|
||||
label = "proximity-wifi-lte";
|
||||
};
|
||||
|
||||
&mpss_mem {
|
||||
reg = <0x0 0x86000000 0x0 0x8c00000>;
|
||||
};
|
||||
|
||||
&remoteproc_mpss {
|
||||
firmware-name = "qcom/sc7180-trogdor/modem/mba.mbn",
|
||||
"qcom/sc7180-trogdor/modem/qdsp6sw.mbn";
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
};
|
||||
|
||||
mpss_mem: memory@86000000 {
|
||||
reg = <0x0 0x86000000 0x0 0x8c00000>;
|
||||
reg = <0x0 0x86000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
|
||||
@@ -72,8 +72,9 @@
|
||||
#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61
|
||||
#define ARM64_SPECTRE_BHB 62
|
||||
#define ARM64_WORKAROUND_2457168 63
|
||||
#define ARM64_WORKAROUND_1742098 64
|
||||
|
||||
/* kabi: reserve 64 - 76 for future cpu capabilities */
|
||||
/* kabi: reserve 65 - 76 for future cpu capabilities */
|
||||
#define ARM64_NCAPS 76
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
||||
@@ -358,6 +358,14 @@ static const struct midr_range tsb_flush_fail_cpus[] = {
|
||||
};
|
||||
#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
static struct midr_range broken_aarch32_aes[] = {
|
||||
MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
@@ -566,6 +574,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
/* Cortex-A510 r0p0-r1p1 */
|
||||
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
{
|
||||
.desc = "ARM erratum 1742098",
|
||||
.capability = ARM64_WORKAROUND_1742098,
|
||||
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/processor.h>
|
||||
@@ -1897,6 +1898,14 @@ static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, in
|
||||
}
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
static void elf_hwcap_fixup(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
|
||||
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
|
||||
#endif /* ARM64_ERRATUM_1742098 */
|
||||
}
|
||||
|
||||
/* Internal helper functions to match cpu capability type */
|
||||
static bool
|
||||
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
|
||||
@@ -2921,8 +2930,10 @@ void __init setup_cpu_features(void)
|
||||
setup_system_capabilities();
|
||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||
|
||||
if (system_supports_32bit_el0())
|
||||
if (system_supports_32bit_el0()) {
|
||||
setup_elf_hwcaps(compat_elf_hwcaps);
|
||||
elf_hwcap_fixup();
|
||||
}
|
||||
|
||||
if (system_uses_ttbr0_pan())
|
||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||
|
||||
@@ -22,46 +22,6 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
void store_cpu_topology(unsigned int cpuid)
|
||||
{
|
||||
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
|
||||
u64 mpidr;
|
||||
|
||||
if (cpuid_topo->package_id != -1)
|
||||
goto topology_populated;
|
||||
|
||||
mpidr = read_cpuid_mpidr();
|
||||
|
||||
/* Uniprocessor systems can rely on default topology values */
|
||||
if (mpidr & MPIDR_UP_BITMASK)
|
||||
return;
|
||||
|
||||
/*
|
||||
* This would be the place to create cpu topology based on MPIDR.
|
||||
*
|
||||
* However, it cannot be trusted to depict the actual topology; some
|
||||
* pieces of the architecture enforce an artificial cap on Aff0 values
|
||||
* (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
|
||||
* artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
|
||||
* having absolutely no relationship to the actual underlying system
|
||||
* topology, and cannot be reasonably used as core / package ID.
|
||||
*
|
||||
* If the MT bit is set, Aff0 *could* be used to define a thread ID, but
|
||||
* we still wouldn't be able to obtain a sane core ID. This means we
|
||||
* need to entirely ignore MPIDR for any topology deduction.
|
||||
*/
|
||||
cpuid_topo->thread_id = -1;
|
||||
cpuid_topo->core_id = cpuid;
|
||||
cpuid_topo->package_id = cpu_to_node(cpuid);
|
||||
|
||||
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
|
||||
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
|
||||
cpuid_topo->thread_id, mpidr);
|
||||
|
||||
topology_populated:
|
||||
update_siblings_masks(cpuid);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool __init acpi_cpu_is_threaded(int cpu)
|
||||
{
|
||||
|
||||
@@ -2096,7 +2096,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
||||
|
||||
memset(entry, 0, esz);
|
||||
|
||||
while (len > 0) {
|
||||
while (true) {
|
||||
int next_offset;
|
||||
size_t byte_offset;
|
||||
|
||||
@@ -2109,6 +2109,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
||||
return next_offset;
|
||||
|
||||
byte_offset = next_offset * esz;
|
||||
if (byte_offset >= len)
|
||||
break;
|
||||
|
||||
id += next_offset;
|
||||
gpa += byte_offset;
|
||||
len -= byte_offset;
|
||||
|
||||
@@ -35,7 +35,7 @@ config RISCV
|
||||
select CLINT_TIMER if !MMU
|
||||
select COMMON_CLK
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ARCH_TOPOLOGY if SMP
|
||||
select GENERIC_ARCH_TOPOLOGY
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
|
||||
@@ -54,10 +54,17 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||
static void __init parse_dtb(void)
|
||||
{
|
||||
/* Early scan of device tree from init memory */
|
||||
if (early_init_dt_scan(dtb_early_va))
|
||||
return;
|
||||
if (early_init_dt_scan(dtb_early_va)) {
|
||||
const char *name = of_flat_dt_get_machine_name();
|
||||
|
||||
if (name) {
|
||||
pr_info("Machine model: %s\n", name);
|
||||
dump_stack_set_arch_desc("%s (DT)", name);
|
||||
}
|
||||
} else {
|
||||
pr_err("No DTB passed to the kernel\n");
|
||||
}
|
||||
|
||||
pr_err("No DTB passed to the kernel\n");
|
||||
#ifdef CONFIG_CMDLINE_FORCE
|
||||
strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
|
||||
pr_info("Forcing kernel command line to: %s\n", boot_command_line);
|
||||
|
||||
@@ -46,6 +46,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
int cpuid;
|
||||
int ret;
|
||||
|
||||
store_cpu_topology(smp_processor_id());
|
||||
|
||||
/* This covers non-smp usecase mandated by "nosmp" option */
|
||||
if (max_cpus == 0)
|
||||
return;
|
||||
@@ -152,8 +154,8 @@ asmlinkage __visible void smp_callin(void)
|
||||
mmgrab(mm);
|
||||
current->active_mm = mm;
|
||||
|
||||
store_cpu_topology(curr_cpuid);
|
||||
notify_cpu_starting(curr_cpuid);
|
||||
update_siblings_masks(curr_cpuid);
|
||||
set_cpu_online(curr_cpuid, 1);
|
||||
|
||||
/*
|
||||
|
||||
@@ -1952,7 +1952,6 @@ config EFI
|
||||
config EFI_STUB
|
||||
bool "EFI stub support"
|
||||
depends on EFI && !X86_USE_3DNOW
|
||||
depends on $(cc-option,-mabi=ms) || X86_32
|
||||
select RELOCATABLE
|
||||
help
|
||||
This kernel feature allows a bzImage to be loaded directly
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
@@ -1348,11 +1350,37 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
||||
event->hw.addr_filters = NULL;
|
||||
}
|
||||
|
||||
static inline bool valid_kernel_ip(unsigned long ip)
|
||||
#ifdef CONFIG_X86_64
|
||||
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return virt_addr_valid(ip) && kernel_ip(ip);
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address greater-than-or-equal-to the address given */
|
||||
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
-BIT_ULL(vaddr_bits - 1);
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address less-than-or-equal-to the address given */
|
||||
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
BIT_ULL(vaddr_bits - 1) - 1;
|
||||
}
|
||||
#else
|
||||
#define clamp_to_ge_canonical_addr(x, y) (x)
|
||||
#define clamp_to_le_canonical_addr(x, y) (x)
|
||||
#endif
|
||||
|
||||
static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
{
|
||||
struct perf_addr_filter *filter;
|
||||
@@ -1367,14 +1395,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
filter->action == PERF_ADDR_FILTER_ACTION_START)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!filter->path.dentry) {
|
||||
if (!valid_kernel_ip(filter->offset))
|
||||
return -EINVAL;
|
||||
|
||||
if (!valid_kernel_ip(filter->offset + filter->size))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -1398,9 +1418,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
|
||||
if (filter->path.dentry && !fr[range].start) {
|
||||
msr_a = msr_b = 0;
|
||||
} else {
|
||||
/* apply the offset */
|
||||
msr_a = fr[range].start;
|
||||
msr_b = msr_a + fr[range].size - 1;
|
||||
unsigned long n = fr[range].size - 1;
|
||||
unsigned long a = fr[range].start;
|
||||
unsigned long b;
|
||||
|
||||
if (a > ULONG_MAX - n)
|
||||
b = ULONG_MAX;
|
||||
else
|
||||
b = a + n;
|
||||
/*
|
||||
* Apply the offset. 64-bit addresses written to the
|
||||
* MSRs must be canonical, but the range can encompass
|
||||
* non-canonical addresses. Since software cannot
|
||||
* execute at non-canonical addresses, adjusting to
|
||||
* canonical addresses does not affect the result of the
|
||||
* address filter.
|
||||
*/
|
||||
msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
|
||||
msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
|
||||
if (msr_b < msr_a)
|
||||
msr_a = msr_b = 0;
|
||||
}
|
||||
|
||||
filters->filter[range].msr_a = msr_a;
|
||||
|
||||
@@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
|
||||
{
|
||||
u64 start = rmrr->base_address;
|
||||
u64 end = rmrr->end_address + 1;
|
||||
int entry_type;
|
||||
|
||||
if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
|
||||
entry_type = e820__get_entry_type(start, end);
|
||||
if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
|
||||
return 0;
|
||||
|
||||
pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
|
||||
|
||||
@@ -441,7 +441,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
|
||||
return ret;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
if (rev >= mc->hdr.patch_id)
|
||||
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (rev > mc->hdr.patch_id)
|
||||
return ret;
|
||||
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
@@ -523,8 +529,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
/* Check whether we have saved a new patch already: */
|
||||
if (*new_rev && rev < mc->hdr.patch_id) {
|
||||
/*
|
||||
* Check whether a new patch has been saved already. Also, allow application of
|
||||
* the same revision in order to pick up SMT-thread-specific configuration even
|
||||
* if the sibling SMT thread already has an up-to-date revision.
|
||||
*/
|
||||
if (*new_rev && rev <= mc->hdr.patch_id) {
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
*new_rev = mc->hdr.patch_id;
|
||||
return;
|
||||
|
||||
@@ -838,9 +838,11 @@ int wbt_init(struct request_queue *q)
|
||||
rwb->last_comp = rwb->last_issue = jiffies;
|
||||
rwb->win_nsec = RWB_WINDOW_NSEC;
|
||||
rwb->enable_state = WBT_STATE_ON_DEFAULT;
|
||||
rwb->wc = 1;
|
||||
rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
|
||||
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
|
||||
wbt_update_limits(rwb);
|
||||
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
|
||||
|
||||
wbt_queue_depth_changed(&rwb->rqos);
|
||||
|
||||
/*
|
||||
* Assign rwb and add the stats callback.
|
||||
@@ -848,10 +850,5 @@ int wbt_init(struct request_queue *q)
|
||||
rq_qos_add(q, &rwb->rqos);
|
||||
blk_stat_add_callback(q, rwb->cb);
|
||||
|
||||
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
|
||||
|
||||
wbt_queue_depth_changed(&rwb->rqos);
|
||||
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/edac.h>
|
||||
#include <linux/ras.h>
|
||||
#include <acpi/ghes.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
@@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
||||
int cpu = mce->extcpu;
|
||||
struct acpi_hest_generic_status *estatus, *tmp;
|
||||
struct acpi_hest_generic_data *gdata;
|
||||
const guid_t *fru_id = &guid_null;
|
||||
char *fru_text = "";
|
||||
const guid_t *fru_id;
|
||||
char *fru_text;
|
||||
guid_t *sec_type;
|
||||
static u32 err_seq;
|
||||
|
||||
@@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
||||
|
||||
/* log event via trace */
|
||||
err_seq++;
|
||||
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
||||
fru_id = (guid_t *)gdata->fru_id;
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
||||
fru_text = gdata->fru_text;
|
||||
sec_type = (guid_t *)gdata->section_type;
|
||||
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
||||
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
||||
if (gdata->error_data_length >= sizeof(*mem))
|
||||
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
||||
(u8)gdata->error_severity);
|
||||
apei_estatus_for_each_section(tmp, gdata) {
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
||||
fru_id = (guid_t *)gdata->fru_id;
|
||||
else
|
||||
fru_id = &guid_null;
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
||||
fru_text = gdata->fru_text;
|
||||
else
|
||||
fru_text = "";
|
||||
sec_type = (guid_t *)gdata->section_type;
|
||||
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
||||
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
||||
|
||||
if (gdata->error_data_length >= sizeof(*mem))
|
||||
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
||||
(u8)gdata->error_severity);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
@@ -500,6 +500,70 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* More Tongfang devices with the same issue as the Clevo NL5xRU and
|
||||
* NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GMxNGxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GMxZGxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GMxRGxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Desktops which falsely report a backlight and which our heuristics
|
||||
* for this do not catch.
|
||||
|
||||
@@ -254,7 +254,7 @@ enum {
|
||||
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
|
||||
|
||||
/* em constants */
|
||||
EM_MAX_SLOTS = 8,
|
||||
EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
|
||||
EM_MAX_RETRY = 5,
|
||||
|
||||
/* em_ctl bits */
|
||||
|
||||
@@ -1230,4 +1230,4 @@ module_platform_driver(imx_ahci_driver);
|
||||
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
|
||||
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("ahci:imx");
|
||||
MODULE_ALIAS("platform:" DRV_NAME);
|
||||
|
||||
@@ -607,4 +607,23 @@ void __init init_cpu_topology(void)
|
||||
else if (of_have_populated_dt() && parse_dt_topology())
|
||||
reset_cpu_topology();
|
||||
}
|
||||
|
||||
void store_cpu_topology(unsigned int cpuid)
|
||||
{
|
||||
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
|
||||
|
||||
if (cpuid_topo->package_id != -1)
|
||||
goto topology_populated;
|
||||
|
||||
cpuid_topo->thread_id = -1;
|
||||
cpuid_topo->core_id = cpuid;
|
||||
cpuid_topo->package_id = cpu_to_node(cpuid);
|
||||
|
||||
pr_debug("CPU%u: package %d core %d thread %d\n",
|
||||
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
|
||||
cpuid_topo->thread_id);
|
||||
|
||||
topology_populated:
|
||||
update_siblings_masks(cpuid);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -215,6 +215,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
||||
int speed = 0, pvs = 0, pvs_ver = 0;
|
||||
u8 *speedbin;
|
||||
size_t len;
|
||||
int ret = 0;
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
|
||||
@@ -232,7 +233,8 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
||||
break;
|
||||
default:
|
||||
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto len_error;
|
||||
}
|
||||
|
||||
snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
|
||||
@@ -240,8 +242,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
||||
|
||||
drv->versions = (1 << speed);
|
||||
|
||||
len_error:
|
||||
kfree(speedbin);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
||||
@@ -264,7 +267,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
char *pvs_name = "speedXX-pvsXX-vXX";
|
||||
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
|
||||
char *pvs_name = pvs_name_buffer;
|
||||
unsigned cpu;
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
@@ -167,29 +167,11 @@ static struct mxs_dma_type mxs_dma_types[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static const struct platform_device_id mxs_dma_ids[] = {
|
||||
{
|
||||
.name = "imx23-dma-apbh",
|
||||
.driver_data = (kernel_ulong_t) &mxs_dma_types[0],
|
||||
}, {
|
||||
.name = "imx23-dma-apbx",
|
||||
.driver_data = (kernel_ulong_t) &mxs_dma_types[1],
|
||||
}, {
|
||||
.name = "imx28-dma-apbh",
|
||||
.driver_data = (kernel_ulong_t) &mxs_dma_types[2],
|
||||
}, {
|
||||
.name = "imx28-dma-apbx",
|
||||
.driver_data = (kernel_ulong_t) &mxs_dma_types[3],
|
||||
}, {
|
||||
/* end of list */
|
||||
}
|
||||
};
|
||||
|
||||
static const struct of_device_id mxs_dma_dt_ids[] = {
|
||||
{ .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
|
||||
{ .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
|
||||
{ .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
|
||||
{ .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
|
||||
{ .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], },
|
||||
{ .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], },
|
||||
{ .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], },
|
||||
{ .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
|
||||
@@ -688,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
|
||||
return mxs_chan->status;
|
||||
}
|
||||
|
||||
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
|
||||
static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -759,11 +741,9 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
|
||||
ofdma->of_node);
|
||||
}
|
||||
|
||||
static int __init mxs_dma_probe(struct platform_device *pdev)
|
||||
static int mxs_dma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
const struct platform_device_id *id_entry;
|
||||
const struct of_device_id *of_id;
|
||||
const struct mxs_dma_type *dma_type;
|
||||
struct mxs_dma_engine *mxs_dma;
|
||||
struct resource *iores;
|
||||
@@ -779,13 +759,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
|
||||
if (of_id)
|
||||
id_entry = of_id->data;
|
||||
else
|
||||
id_entry = platform_get_device_id(pdev);
|
||||
|
||||
dma_type = (struct mxs_dma_type *)id_entry->driver_data;
|
||||
dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
|
||||
mxs_dma->type = dma_type->type;
|
||||
mxs_dma->dev_id = dma_type->id;
|
||||
|
||||
@@ -865,11 +839,7 @@ static struct platform_driver mxs_dma_driver = {
|
||||
.name = "mxs-dma",
|
||||
.of_match_table = mxs_dma_dt_ids,
|
||||
},
|
||||
.id_table = mxs_dma_ids,
|
||||
.probe = mxs_dma_probe,
|
||||
};
|
||||
|
||||
static int __init mxs_dma_module_init(void)
|
||||
{
|
||||
return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
|
||||
}
|
||||
subsys_initcall(mxs_dma_module_init);
|
||||
builtin_platform_driver(mxs_dma_driver);
|
||||
|
||||
@@ -259,14 +259,14 @@ static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
|
||||
}
|
||||
|
||||
static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct virtio_gpu_framebuffer *vgfb;
|
||||
|
||||
if (!plane->state->fb)
|
||||
if (!state->fb)
|
||||
return;
|
||||
|
||||
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
|
||||
vgfb = to_virtio_gpu_framebuffer(state->fb);
|
||||
if (vgfb->fence) {
|
||||
dma_fence_put(&vgfb->fence->f);
|
||||
vgfb->fence = NULL;
|
||||
|
||||
@@ -387,7 +387,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
||||
magicmouse_raw_event(hdev, report, data + 2, data[1]);
|
||||
magicmouse_raw_event(hdev, report, data + 2 + data[1],
|
||||
size - 2 - data[1]);
|
||||
break;
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
|
||||
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
|
||||
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
|
||||
|
||||
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define for_each_sibling(i, cpu) \
|
||||
for_each_cpu(i, topology_sibling_cpumask(cpu))
|
||||
@@ -91,6 +88,8 @@ struct temp_data {
|
||||
struct platform_data {
|
||||
struct device *hwmon_dev;
|
||||
u16 pkg_id;
|
||||
u16 cpu_map[NUM_REAL_CORES];
|
||||
struct ida ida;
|
||||
struct cpumask cpumask;
|
||||
struct temp_data *core_data[MAX_CORE_DATA];
|
||||
struct device_attribute name_attr;
|
||||
@@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
|
||||
MSR_IA32_THERM_STATUS;
|
||||
tdata->is_pkg_data = pkg_flag;
|
||||
tdata->cpu = cpu;
|
||||
tdata->cpu_core_id = TO_CORE_ID(cpu);
|
||||
tdata->cpu_core_id = topology_core_id(cpu);
|
||||
tdata->attr_size = MAX_CORE_ATTRS;
|
||||
mutex_init(&tdata->update_lock);
|
||||
return tdata;
|
||||
@@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
|
||||
struct platform_data *pdata = platform_get_drvdata(pdev);
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
u32 eax, edx;
|
||||
int err, attr_no;
|
||||
int err, index, attr_no;
|
||||
|
||||
/*
|
||||
* Find attr number for sysfs:
|
||||
@@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
|
||||
* The attr number is always core id + 2
|
||||
* The Pkgtemp will always show up as temp1_*, if available
|
||||
*/
|
||||
attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
|
||||
if (pkg_flag) {
|
||||
attr_no = PKG_SYSFS_ATTR_NO;
|
||||
} else {
|
||||
index = ida_alloc(&pdata->ida, GFP_KERNEL);
|
||||
if (index < 0)
|
||||
return index;
|
||||
pdata->cpu_map[index] = topology_core_id(cpu);
|
||||
attr_no = index + BASE_SYSFS_ATTR_NO;
|
||||
}
|
||||
|
||||
if (attr_no > MAX_CORE_DATA - 1)
|
||||
return -ERANGE;
|
||||
if (attr_no > MAX_CORE_DATA - 1) {
|
||||
err = -ERANGE;
|
||||
goto ida_free;
|
||||
}
|
||||
|
||||
tdata = init_temp_data(cpu, pkg_flag);
|
||||
if (!tdata)
|
||||
return -ENOMEM;
|
||||
if (!tdata) {
|
||||
err = -ENOMEM;
|
||||
goto ida_free;
|
||||
}
|
||||
|
||||
/* Test if we can access the status register */
|
||||
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
|
||||
@@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
|
||||
exit_free:
|
||||
pdata->core_data[attr_no] = NULL;
|
||||
kfree(tdata);
|
||||
ida_free:
|
||||
if (!pkg_flag)
|
||||
ida_free(&pdata->ida, index);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
|
||||
|
||||
kfree(pdata->core_data[indx]);
|
||||
pdata->core_data[indx] = NULL;
|
||||
|
||||
if (indx >= BASE_SYSFS_ATTR_NO)
|
||||
ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
|
||||
}
|
||||
|
||||
static int coretemp_probe(struct platform_device *pdev)
|
||||
@@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
pdata->pkg_id = pdev->id;
|
||||
ida_init(&pdata->ida);
|
||||
platform_set_drvdata(pdev, pdata);
|
||||
|
||||
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
|
||||
@@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev)
|
||||
if (pdata->core_data[i])
|
||||
coretemp_remove_core(pdata, i);
|
||||
|
||||
ida_destroy(&pdata->ida);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
|
||||
struct platform_device *pdev = coretemp_get_pdev(cpu);
|
||||
struct platform_data *pd;
|
||||
struct temp_data *tdata;
|
||||
int indx, target;
|
||||
int i, indx = -1, target;
|
||||
|
||||
/*
|
||||
* Don't execute this on suspend as the device remove locks
|
||||
@@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu)
|
||||
if (!pdev)
|
||||
return 0;
|
||||
|
||||
/* The core id is too big, just return */
|
||||
indx = TO_ATTR_NO(cpu);
|
||||
if (indx > MAX_CORE_DATA - 1)
|
||||
pd = platform_get_drvdata(pdev);
|
||||
|
||||
for (i = 0; i < NUM_REAL_CORES; i++) {
|
||||
if (pd->cpu_map[i] == topology_core_id(cpu)) {
|
||||
indx = i + BASE_SYSFS_ATTR_NO;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Too many cores and this core is not populated, just return */
|
||||
if (indx < 0)
|
||||
return 0;
|
||||
|
||||
pd = platform_get_drvdata(pdev);
|
||||
tdata = pd->core_data[indx];
|
||||
|
||||
cpumask_clear_cpu(cpu, &pd->cpumask);
|
||||
|
||||
@@ -638,6 +638,11 @@ static int cci_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
for (i = 0; i < cci->data->num_masters; i++) {
|
||||
if (!cci->master[i].cci)
|
||||
continue;
|
||||
@@ -649,14 +654,12 @@ static int cci_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
error_i2c:
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_dont_use_autosuspend(dev);
|
||||
|
||||
for (--i ; i >= 0; i--) {
|
||||
if (cci->master[i].cci) {
|
||||
i2c_del_adapter(&cci->master[i].adap);
|
||||
|
||||
@@ -2846,6 +2846,7 @@ static int __init si_domain_init(int hw)
|
||||
|
||||
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
|
||||
domain_exit(si_domain);
|
||||
si_domain = NULL;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@@ -3505,6 +3506,10 @@ free_iommu:
|
||||
disable_dmar_iommu(iommu);
|
||||
free_dmar_iommu(iommu);
|
||||
}
|
||||
if (si_domain) {
|
||||
domain_exit(si_domain);
|
||||
si_domain = NULL;
|
||||
}
|
||||
|
||||
kfree(g_iommus);
|
||||
|
||||
|
||||
@@ -158,6 +158,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
|
||||
else
|
||||
return NULL;
|
||||
fmt = find_format(inst, pixmp->pixelformat, f->type);
|
||||
if (!fmt)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
|
||||
|
||||
@@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
|
||||
struct mceusb_dev *ir = dev->priv;
|
||||
unsigned int units;
|
||||
|
||||
units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT);
|
||||
units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT);
|
||||
|
||||
cmdbuf[2] = units >> 8;
|
||||
cmdbuf[3] = units;
|
||||
|
||||
@@ -1077,6 +1077,11 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
|
||||
nr = blk_rq_sectors(req);
|
||||
|
||||
do {
|
||||
unsigned int erase_arg = card->erase_arg;
|
||||
|
||||
if (mmc_card_broken_sd_discard(card))
|
||||
erase_arg = SD_ERASE_ARG;
|
||||
|
||||
err = 0;
|
||||
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
@@ -1087,7 +1092,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
|
||||
card->ext_csd.generic_cmd6_time);
|
||||
}
|
||||
if (!err)
|
||||
err = mmc_erase(card, from, nr, card->erase_arg);
|
||||
err = mmc_erase(card, from, nr, erase_arg);
|
||||
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
|
||||
if (err)
|
||||
status = BLK_STS_IOERR;
|
||||
|
||||
@@ -70,6 +70,7 @@ struct mmc_fixup {
|
||||
#define EXT_CSD_REV_ANY (-1u)
|
||||
|
||||
#define CID_MANFID_SANDISK 0x2
|
||||
#define CID_MANFID_SANDISK_SD 0x3
|
||||
#define CID_MANFID_ATP 0x9
|
||||
#define CID_MANFID_TOSHIBA 0x11
|
||||
#define CID_MANFID_MICRON 0x13
|
||||
@@ -222,4 +223,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c)
|
||||
return c->quirks & MMC_QUIRK_BROKEN_HPI;
|
||||
}
|
||||
|
||||
static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
|
||||
{
|
||||
return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -99,6 +99,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
|
||||
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_TRIM_BROKEN),
|
||||
|
||||
/*
|
||||
* Some SD cards reports discard support while they don't
|
||||
*/
|
||||
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
|
||||
MMC_QUIRK_BROKEN_SD_DISCARD),
|
||||
|
||||
END_FIXUP
|
||||
};
|
||||
|
||||
|
||||
@@ -760,7 +760,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
*/
|
||||
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
|
||||
clk_set_rate(pltfm_host->clk, host_clk);
|
||||
tegra_host->curr_clk_rate = host_clk;
|
||||
tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
|
||||
if (tegra_host->ddr_signaling)
|
||||
host->max_clk = host_clk;
|
||||
else
|
||||
|
||||
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
|
||||
hdev->cls_dev.release = hnae_release;
|
||||
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
|
||||
ret = device_register(&hdev->cls_dev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
put_device(&hdev->cls_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
__module_get(THIS_MODULE);
|
||||
|
||||
|
||||
@@ -2081,9 +2081,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
|
||||
*/
|
||||
rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
|
||||
err = i40e_setup_rx_descriptors(&rx_rings[i]);
|
||||
if (err)
|
||||
goto rx_unwind;
|
||||
err = i40e_alloc_rx_bi(&rx_rings[i]);
|
||||
if (err)
|
||||
goto rx_unwind;
|
||||
|
||||
|
||||
@@ -3409,12 +3409,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
||||
if (ring->vsi->type == I40E_VSI_MAIN)
|
||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||
|
||||
kfree(ring->rx_bi);
|
||||
ring->xsk_pool = i40e_xsk_pool(ring);
|
||||
if (ring->xsk_pool) {
|
||||
ret = i40e_alloc_rx_bi_zc(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
ring->rx_buf_len =
|
||||
xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
||||
/* For AF_XDP ZC, we disallow packets to span on
|
||||
@@ -3432,9 +3428,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
||||
ring->queue_index);
|
||||
|
||||
} else {
|
||||
ret = i40e_alloc_rx_bi(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
ring->rx_buf_len = vsi->rx_buf_len;
|
||||
if (ring->vsi->type == I40E_VSI_MAIN) {
|
||||
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
||||
@@ -12684,6 +12677,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
|
||||
i40e_reset_and_rebuild(pf, true, true);
|
||||
}
|
||||
|
||||
if (!i40e_enabled_xdp_vsi(vsi) && prog) {
|
||||
if (i40e_realloc_rx_bi_zc(vsi, true))
|
||||
return -ENOMEM;
|
||||
} else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
|
||||
if (i40e_realloc_rx_bi_zc(vsi, false))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++)
|
||||
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
|
||||
|
||||
@@ -12916,6 +12917,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
|
||||
|
||||
i40e_queue_pair_disable_irq(vsi, queue_pair);
|
||||
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
|
||||
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
|
||||
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
|
||||
i40e_queue_pair_clean_rings(vsi, queue_pair);
|
||||
i40e_queue_pair_reset_stats(vsi, queue_pair);
|
||||
|
||||
@@ -1305,14 +1305,6 @@ err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
|
||||
{
|
||||
unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
|
||||
|
||||
rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
|
||||
return rx_ring->rx_bi ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
|
||||
{
|
||||
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
|
||||
@@ -1443,6 +1435,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
|
||||
|
||||
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
|
||||
|
||||
rx_ring->rx_bi =
|
||||
kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
|
||||
if (!rx_ring->rx_bi)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -465,7 +465,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
|
||||
bool __i40e_chk_linearize(struct sk_buff *skb);
|
||||
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
u32 flags);
|
||||
int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
|
||||
@@ -9,14 +9,6 @@
|
||||
#include "i40e_txrx_common.h"
|
||||
#include "i40e_xsk.h"
|
||||
|
||||
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
|
||||
{
|
||||
unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
|
||||
|
||||
rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
|
||||
return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
|
||||
{
|
||||
memset(rx_ring->rx_bi_zc, 0,
|
||||
@@ -28,6 +20,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
|
||||
return &rx_ring->rx_bi_zc[idx];
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
|
||||
* @rx_ring: Current rx ring
|
||||
* @pool_present: is pool for XSK present
|
||||
*
|
||||
* Try allocating memory and return ENOMEM, if failed to allocate.
|
||||
* If allocation was successful, substitute buffer with allocated one.
|
||||
* Returns 0 on success, negative on failure
|
||||
*/
|
||||
static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
|
||||
{
|
||||
size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
|
||||
sizeof(*rx_ring->rx_bi);
|
||||
void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
|
||||
|
||||
if (!sw_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
if (pool_present) {
|
||||
kfree(rx_ring->rx_bi);
|
||||
rx_ring->rx_bi = NULL;
|
||||
rx_ring->rx_bi_zc = sw_ring;
|
||||
} else {
|
||||
kfree(rx_ring->rx_bi_zc);
|
||||
rx_ring->rx_bi_zc = NULL;
|
||||
rx_ring->rx_bi = sw_ring;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_realloc_rx_bi_zc - reallocate rx SW rings
|
||||
* @vsi: Current VSI
|
||||
* @zc: is zero copy set
|
||||
*
|
||||
* Reallocate buffer for rx_rings that might be used by XSK.
|
||||
* XDP requires more memory, than rx_buf provides.
|
||||
* Returns 0 on success, negative on failure
|
||||
*/
|
||||
int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
|
||||
{
|
||||
struct i40e_ring *rx_ring;
|
||||
unsigned long q;
|
||||
|
||||
for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
|
||||
rx_ring = vsi->rx_rings[q];
|
||||
if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
|
||||
* certain ring/qid
|
||||
@@ -68,6 +112,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i40e_queue_pair_enable(vsi, qid);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -112,6 +160,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
|
||||
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
|
||||
|
||||
if (if_running) {
|
||||
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
|
||||
if (err)
|
||||
return err;
|
||||
err = i40e_queue_pair_enable(vsi, qid);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -17,7 +17,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
|
||||
|
||||
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
|
||||
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
|
||||
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
|
||||
int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
|
||||
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
|
||||
|
||||
#endif /* _I40E_XSK_H_ */
|
||||
|
||||
@@ -2383,11 +2383,15 @@ err_out:
|
||||
* than the full array, but leave the qcq shells in place
|
||||
*/
|
||||
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
|
||||
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
||||
ionic_qcq_free(lif, lif->txqcqs[i]);
|
||||
if (lif->txqcqs && lif->txqcqs[i]) {
|
||||
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
||||
ionic_qcq_free(lif, lif->txqcqs[i]);
|
||||
}
|
||||
|
||||
lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
||||
ionic_qcq_free(lif, lif->rxqcqs[i]);
|
||||
if (lif->rxqcqs && lif->rxqcqs[i]) {
|
||||
lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
||||
ionic_qcq_free(lif, lif->rxqcqs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
@@ -3255,6 +3255,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
|
||||
bool was_enabled = efx->port_enabled;
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
/* If this function is a VF and we have access to the parent PF,
|
||||
* then use the PF control path to attempt to change the VF MAC address.
|
||||
*/
|
||||
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
|
||||
struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
u8 mac[ETH_ALEN];
|
||||
|
||||
/* net_dev->dev_addr can be zeroed by efx_net_stop in
|
||||
* efx_ef10_sriov_set_vf_mac, so pass in a copy.
|
||||
*/
|
||||
ether_addr_copy(mac, efx->net_dev->dev_addr);
|
||||
|
||||
rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
|
||||
if (!rc)
|
||||
return 0;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"Updating VF mac via PF failed (%d), setting directly\n",
|
||||
rc);
|
||||
}
|
||||
#endif
|
||||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_net_stop(efx->net_dev);
|
||||
|
||||
@@ -3277,40 +3301,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
|
||||
efx_net_open(efx->net_dev);
|
||||
efx_device_attach_if_not_resetting(efx);
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
|
||||
|
||||
if (rc == -EPERM) {
|
||||
struct efx_nic *efx_pf;
|
||||
|
||||
/* Switch to PF and change MAC address on vport */
|
||||
efx_pf = pci_get_drvdata(pci_dev_pf);
|
||||
|
||||
rc = efx_ef10_sriov_set_vf_mac(efx_pf,
|
||||
nic_data->vf_index,
|
||||
efx->net_dev->dev_addr);
|
||||
} else if (!rc) {
|
||||
struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
|
||||
struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
|
||||
unsigned int i;
|
||||
|
||||
/* MAC address successfully changed by VF (with MAC
|
||||
* spoofing) so update the parent PF if possible.
|
||||
*/
|
||||
for (i = 0; i < efx_pf->vf_count; ++i) {
|
||||
struct ef10_vf *vf = nic_data->vf + i;
|
||||
|
||||
if (vf->efx == efx) {
|
||||
ether_addr_copy(vf->mac,
|
||||
efx->net_dev->dev_addr);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (rc == -EPERM) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"Cannot change MAC address; use sfboot to enable"
|
||||
|
||||
@@ -157,7 +157,8 @@ struct efx_filter_spec {
|
||||
u32 flags:6;
|
||||
u32 dmaq_id:12;
|
||||
u32 rss_context;
|
||||
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
|
||||
u32 vport_id;
|
||||
__be16 outer_vid;
|
||||
__be16 inner_vid;
|
||||
u8 loc_mac[ETH_ALEN];
|
||||
u8 rem_mac[ETH_ALEN];
|
||||
|
||||
@@ -676,17 +676,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
|
||||
return false;
|
||||
|
||||
return memcmp(&left->outer_vid, &right->outer_vid,
|
||||
return memcmp(&left->vport_id, &right->vport_id,
|
||||
sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) == 0;
|
||||
offsetof(struct efx_filter_spec, vport_id)) == 0;
|
||||
}
|
||||
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
|
||||
return jhash2((const u32 *)&spec->outer_vid,
|
||||
BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
|
||||
return jhash2((const u32 *)&spec->vport_id,
|
||||
(sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) / 4,
|
||||
offsetof(struct efx_filter_spec, vport_id)) / 4,
|
||||
0);
|
||||
}
|
||||
|
||||
|
||||
@@ -977,7 +977,8 @@ struct net_device_context {
|
||||
u32 vf_alloc;
|
||||
/* Serial number of the VF to team with */
|
||||
u32 vf_serial;
|
||||
|
||||
/* completion variable to confirm vf association */
|
||||
struct completion vf_add;
|
||||
/* Is the current data path through the VF NIC? */
|
||||
bool data_path_is_vf;
|
||||
|
||||
|
||||
@@ -1327,6 +1327,10 @@ static void netvsc_send_vf(struct net_device *ndev,
|
||||
|
||||
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
|
||||
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
|
||||
|
||||
if (net_device_ctx->vf_alloc)
|
||||
complete(&net_device_ctx->vf_add);
|
||||
|
||||
netdev_info(ndev, "VF slot %u %s\n",
|
||||
net_device_ctx->vf_serial,
|
||||
net_device_ctx->vf_alloc ? "added" : "removed");
|
||||
|
||||
@@ -2290,6 +2290,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
|
||||
{
|
||||
struct device *parent = vf_netdev->dev.parent;
|
||||
struct net_device_context *ndev_ctx;
|
||||
struct net_device *ndev;
|
||||
struct pci_dev *pdev;
|
||||
u32 serial;
|
||||
|
||||
@@ -2316,6 +2317,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
|
||||
return hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
}
|
||||
|
||||
/* Fallback path to check synthetic vf with
|
||||
* help of mac addr
|
||||
*/
|
||||
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
|
||||
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
|
||||
netdev_notice(vf_netdev,
|
||||
"falling back to mac addr based matching\n");
|
||||
return ndev;
|
||||
}
|
||||
}
|
||||
|
||||
netdev_notice(vf_netdev,
|
||||
"no netdev found for vf serial:%u\n", serial);
|
||||
return NULL;
|
||||
@@ -2406,6 +2419,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
|
||||
return NOTIFY_OK;
|
||||
net_device_ctx->data_path_is_vf = vf_is_up;
|
||||
|
||||
if (vf_is_up && !net_device_ctx->vf_alloc) {
|
||||
netdev_info(ndev, "Waiting for the VF association from host\n");
|
||||
wait_for_completion(&net_device_ctx->vf_add);
|
||||
}
|
||||
|
||||
netvsc_switch_datapath(ndev, vf_is_up);
|
||||
netdev_info(ndev, "Data path switched %s VF: %s\n",
|
||||
vf_is_up ? "to" : "from", vf_netdev->name);
|
||||
@@ -2429,6 +2447,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
|
||||
|
||||
netvsc_vf_setxdp(vf_netdev, NULL);
|
||||
|
||||
reinit_completion(&net_device_ctx->vf_add);
|
||||
netdev_rx_handler_unregister(vf_netdev);
|
||||
netdev_upper_dev_unlink(vf_netdev, ndev);
|
||||
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
|
||||
@@ -2466,6 +2485,7 @@ static int netvsc_probe(struct hv_device *dev,
|
||||
|
||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
||||
|
||||
init_completion(&net_device_ctx->vf_add);
|
||||
spin_lock_init(&net_device_ctx->lock);
|
||||
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
|
||||
|
||||
@@ -268,8 +268,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
|
||||
DP83822_EEE_ERROR_CHANGE_INT_EN);
|
||||
|
||||
if (!dp83822->fx_enabled)
|
||||
misr_status |= DP83822_MDI_XOVER_INT_EN |
|
||||
DP83822_ANEG_ERR_INT_EN |
|
||||
misr_status |= DP83822_ANEG_ERR_INT_EN |
|
||||
DP83822_WOL_PKT_INT_EN;
|
||||
|
||||
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
|
||||
|
||||
@@ -757,6 +757,14 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||
else
|
||||
val &= ~DP83867_SGMII_TYPE;
|
||||
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
|
||||
|
||||
/* This is a SW workaround for link instability if RX_CTRL is
|
||||
* not strapped to mode 3 or 4 in HW. This is required for SGMII
|
||||
* in addition to clearing bit 7, handled above.
|
||||
*/
|
||||
if (dp83867->rxctrl_strap_quirk)
|
||||
phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
|
||||
BIT(8));
|
||||
}
|
||||
|
||||
val = phy_read(phydev, DP83867_CFG3);
|
||||
|
||||
@@ -763,6 +763,13 @@ static const struct usb_device_id products[] = {
|
||||
},
|
||||
#endif
|
||||
|
||||
/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = 0,
|
||||
},
|
||||
|
||||
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
|
||||
|
||||
@@ -6870,6 +6870,7 @@ static const struct usb_device_id rtl8152_table[] = {
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
|
||||
|
||||
@@ -3232,8 +3232,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
return ret;
|
||||
|
||||
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
|
||||
/*
|
||||
* Do not return errors unless we are in a controller reset,
|
||||
* the controller works perfectly fine without hwmon.
|
||||
*/
|
||||
ret = nvme_hwmon_init(ctrl);
|
||||
if (ret < 0)
|
||||
if (ret == -EINTR)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4485,6 +4489,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
nvme_hwmon_exit(ctrl);
|
||||
nvme_fault_inject_fini(&ctrl->fault_inject);
|
||||
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
||||
cdev_device_del(&ctrl->cdev, ctrl->device);
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
struct nvme_hwmon_data {
|
||||
struct nvme_ctrl *ctrl;
|
||||
struct nvme_smart_log log;
|
||||
struct nvme_smart_log *log;
|
||||
struct mutex read_lock;
|
||||
};
|
||||
|
||||
@@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
|
||||
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
|
||||
{
|
||||
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
|
||||
NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
|
||||
}
|
||||
|
||||
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *val)
|
||||
{
|
||||
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
|
||||
struct nvme_smart_log *log = &data->log;
|
||||
struct nvme_smart_log *log = data->log;
|
||||
int temp;
|
||||
int err;
|
||||
|
||||
@@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
||||
case hwmon_temp_max:
|
||||
case hwmon_temp_min:
|
||||
if ((!channel && data->ctrl->wctemp) ||
|
||||
(channel && data->log.temp_sensor[channel - 1])) {
|
||||
(channel && data->log->temp_sensor[channel - 1])) {
|
||||
if (data->ctrl->quirks &
|
||||
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
|
||||
return 0444;
|
||||
@@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
||||
break;
|
||||
case hwmon_temp_input:
|
||||
case hwmon_temp_label:
|
||||
if (!channel || data->log.temp_sensor[channel - 1])
|
||||
if (!channel || data->log->temp_sensor[channel - 1])
|
||||
return 0444;
|
||||
break;
|
||||
default:
|
||||
@@ -223,33 +223,57 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
|
||||
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct device *dev = ctrl->dev;
|
||||
struct device *dev = ctrl->device;
|
||||
struct nvme_hwmon_data *data;
|
||||
struct device *hwmon;
|
||||
int err;
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
|
||||
data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
|
||||
if (!data->log) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_data;
|
||||
}
|
||||
|
||||
data->ctrl = ctrl;
|
||||
mutex_init(&data->read_lock);
|
||||
|
||||
err = nvme_hwmon_get_smart_log(data);
|
||||
if (err) {
|
||||
dev_warn(ctrl->device,
|
||||
"Failed to read smart log (error %d)\n", err);
|
||||
devm_kfree(dev, data);
|
||||
return err;
|
||||
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
|
||||
goto err_free_log;
|
||||
}
|
||||
|
||||
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
|
||||
&nvme_hwmon_chip_info,
|
||||
NULL);
|
||||
hwmon = hwmon_device_register_with_info(dev, "nvme",
|
||||
data, &nvme_hwmon_chip_info,
|
||||
NULL);
|
||||
if (IS_ERR(hwmon)) {
|
||||
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
||||
devm_kfree(dev, data);
|
||||
err = PTR_ERR(hwmon);
|
||||
goto err_free_log;
|
||||
}
|
||||
|
||||
ctrl->hwmon_device = hwmon;
|
||||
return 0;
|
||||
|
||||
err_free_log:
|
||||
kfree(data->log);
|
||||
err_free_data:
|
||||
kfree(data);
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->hwmon_device) {
|
||||
struct nvme_hwmon_data *data =
|
||||
dev_get_drvdata(ctrl->hwmon_device);
|
||||
|
||||
hwmon_device_unregister(ctrl->hwmon_device);
|
||||
ctrl->hwmon_device = NULL;
|
||||
kfree(data->log);
|
||||
kfree(data);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,6 +257,9 @@ struct nvme_ctrl {
|
||||
struct rw_semaphore namespaces_rwsem;
|
||||
struct device ctrl_device;
|
||||
struct device *device; /* char device */
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
struct device *hwmon_device;
|
||||
#endif
|
||||
struct cdev cdev;
|
||||
struct work_struct reset_work;
|
||||
struct work_struct delete_work;
|
||||
@@ -876,11 +879,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
|
||||
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
|
||||
#else
|
||||
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
||||
@@ -44,9 +44,10 @@ struct gntdev_unmap_notify {
|
||||
};
|
||||
|
||||
struct gntdev_grant_map {
|
||||
atomic_t in_use;
|
||||
struct mmu_interval_notifier notifier;
|
||||
bool notifier_init;
|
||||
struct list_head next;
|
||||
struct vm_area_struct *vma;
|
||||
int index;
|
||||
int count;
|
||||
int flags;
|
||||
|
||||
@@ -276,6 +276,9 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
|
||||
*/
|
||||
}
|
||||
|
||||
if (use_ptemod && map->notifier_init)
|
||||
mmu_interval_notifier_remove(&map->notifier);
|
||||
|
||||
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
|
||||
notify_remote_via_evtchn(map->notify.event);
|
||||
evtchn_put(map->notify.event);
|
||||
@@ -288,21 +291,14 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
|
||||
static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
|
||||
{
|
||||
struct gntdev_grant_map *map = data;
|
||||
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
|
||||
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
|
||||
unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
|
||||
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
|
||||
(1 << _GNTMAP_guest_avail0);
|
||||
u64 pte_maddr;
|
||||
|
||||
BUG_ON(pgnr >= map->count);
|
||||
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
|
||||
|
||||
/*
|
||||
* Set the PTE as special to force get_user_pages_fast() fall
|
||||
* back to the slow path. If this is not supported as part of
|
||||
* the grant map, it will be done afterwards.
|
||||
*/
|
||||
if (xen_feature(XENFEAT_gnttab_map_avail_bits))
|
||||
flags |= (1 << _GNTMAP_guest_avail0);
|
||||
|
||||
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
|
||||
map->grants[pgnr].ref,
|
||||
map->grants[pgnr].domid);
|
||||
@@ -311,14 +307,6 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
|
||||
{
|
||||
set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
||||
{
|
||||
size_t alloced = 0;
|
||||
@@ -493,11 +481,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
|
||||
struct gntdev_priv *priv = file->private_data;
|
||||
|
||||
pr_debug("gntdev_vma_close %p\n", vma);
|
||||
if (use_ptemod) {
|
||||
WARN_ON(map->vma != vma);
|
||||
mmu_interval_notifier_remove(&map->notifier);
|
||||
map->vma = NULL;
|
||||
}
|
||||
|
||||
vma->vm_private_data = NULL;
|
||||
gntdev_put_map(priv, map);
|
||||
}
|
||||
@@ -525,29 +509,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
|
||||
struct gntdev_grant_map *map =
|
||||
container_of(mn, struct gntdev_grant_map, notifier);
|
||||
unsigned long mstart, mend;
|
||||
unsigned long map_start, map_end;
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
map_start = map->pages_vm_start;
|
||||
map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* If the VMA is split or otherwise changed the notifier is not
|
||||
* updated, but we don't want to process VA's outside the modified
|
||||
* VMA. FIXME: It would be much more understandable to just prevent
|
||||
* modifying the VMA in the first place.
|
||||
*/
|
||||
if (map->vma->vm_start >= range->end ||
|
||||
map->vma->vm_end <= range->start)
|
||||
if (map_start >= range->end || map_end <= range->start)
|
||||
return true;
|
||||
|
||||
mstart = max(range->start, map->vma->vm_start);
|
||||
mend = min(range->end, map->vma->vm_end);
|
||||
mstart = max(range->start, map_start);
|
||||
mend = min(range->end, map_end);
|
||||
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
|
||||
map->index, map->count,
|
||||
map->vma->vm_start, map->vma->vm_end,
|
||||
range->start, range->end, mstart, mend);
|
||||
unmap_grant_pages(map,
|
||||
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
|
||||
(mend - mstart) >> PAGE_SHIFT);
|
||||
map->index, map->count, map_start, map_end,
|
||||
range->start, range->end, mstart, mend);
|
||||
unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
|
||||
(mend - mstart) >> PAGE_SHIFT);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1027,18 +1012,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
||||
return -EINVAL;
|
||||
|
||||
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
|
||||
index, count, vma->vm_start, vma->vm_pgoff);
|
||||
index, count, vma->vm_start, vma->vm_pgoff);
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
map = gntdev_find_map_index(priv, index, count);
|
||||
if (!map)
|
||||
goto unlock_out;
|
||||
if (use_ptemod && map->vma)
|
||||
if (!atomic_add_unless(&map->in_use, 1, 1))
|
||||
goto unlock_out;
|
||||
if (atomic_read(&map->live_grants)) {
|
||||
err = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
refcount_inc(&map->users);
|
||||
|
||||
vma->vm_ops = &gntdev_vmops;
|
||||
@@ -1059,15 +1041,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
||||
map->flags |= GNTMAP_readonly;
|
||||
}
|
||||
|
||||
map->pages_vm_start = vma->vm_start;
|
||||
|
||||
if (use_ptemod) {
|
||||
map->vma = vma;
|
||||
err = mmu_interval_notifier_insert_locked(
|
||||
&map->notifier, vma->vm_mm, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
|
||||
if (err) {
|
||||
map->vma = NULL;
|
||||
if (err)
|
||||
goto out_unlock_put;
|
||||
}
|
||||
|
||||
map->notifier_init = true;
|
||||
}
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
@@ -1084,7 +1067,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
||||
*/
|
||||
mmu_interval_read_begin(&map->notifier);
|
||||
|
||||
map->pages_vm_start = vma->vm_start;
|
||||
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start,
|
||||
find_grant_ptes, map);
|
||||
@@ -1102,23 +1084,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
||||
err = vm_map_pages_zero(vma, map->pages, map->count);
|
||||
if (err)
|
||||
goto out_put_map;
|
||||
} else {
|
||||
#ifdef CONFIG_X86
|
||||
/*
|
||||
* If the PTEs were not made special by the grant map
|
||||
* hypercall, do so here.
|
||||
*
|
||||
* This is racy since the mapping is already visible
|
||||
* to userspace but userspace should be well-behaved
|
||||
* enough to not touch it until the mmap() call
|
||||
* returns.
|
||||
*/
|
||||
if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
|
||||
apply_to_page_range(vma->vm_mm, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start,
|
||||
set_grant_ptes_as_special, NULL);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1130,13 +1095,8 @@ unlock_out:
|
||||
out_unlock_put:
|
||||
mutex_unlock(&priv->lock);
|
||||
out_put_map:
|
||||
if (use_ptemod) {
|
||||
if (use_ptemod)
|
||||
unmap_grant_pages(map, 0, map->count);
|
||||
if (map->vma) {
|
||||
mmu_interval_notifier_remove(&map->notifier);
|
||||
map->vma = NULL;
|
||||
}
|
||||
}
|
||||
gntdev_put_map(priv, map);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -137,6 +137,7 @@ struct share_check {
|
||||
u64 root_objectid;
|
||||
u64 inum;
|
||||
int share_count;
|
||||
bool have_delayed_delete_refs;
|
||||
};
|
||||
|
||||
static inline int extent_is_shared(struct share_check *sc)
|
||||
@@ -817,16 +818,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
||||
struct preftrees *preftrees, struct share_check *sc)
|
||||
{
|
||||
struct btrfs_delayed_ref_node *node;
|
||||
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key tmp_op_key;
|
||||
struct rb_node *n;
|
||||
int count;
|
||||
int ret = 0;
|
||||
|
||||
if (extent_op && extent_op->update_key)
|
||||
btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
|
||||
|
||||
spin_lock(&head->lock);
|
||||
for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
|
||||
node = rb_entry(n, struct btrfs_delayed_ref_node,
|
||||
@@ -852,10 +848,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
||||
case BTRFS_TREE_BLOCK_REF_KEY: {
|
||||
/* NORMAL INDIRECT METADATA backref */
|
||||
struct btrfs_delayed_tree_ref *ref;
|
||||
struct btrfs_key *key_ptr = NULL;
|
||||
|
||||
if (head->extent_op && head->extent_op->update_key) {
|
||||
btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
|
||||
key_ptr = &key;
|
||||
}
|
||||
|
||||
ref = btrfs_delayed_node_to_tree_ref(node);
|
||||
ret = add_indirect_ref(fs_info, preftrees, ref->root,
|
||||
&tmp_op_key, ref->level + 1,
|
||||
key_ptr, ref->level + 1,
|
||||
node->bytenr, count, sc,
|
||||
GFP_ATOMIC);
|
||||
break;
|
||||
@@ -881,13 +883,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
||||
key.offset = ref->offset;
|
||||
|
||||
/*
|
||||
* Found a inum that doesn't match our known inum, we
|
||||
* know it's shared.
|
||||
* If we have a share check context and a reference for
|
||||
* another inode, we can't exit immediately. This is
|
||||
* because even if this is a BTRFS_ADD_DELAYED_REF
|
||||
* reference we may find next a BTRFS_DROP_DELAYED_REF
|
||||
* which cancels out this ADD reference.
|
||||
*
|
||||
* If this is a DROP reference and there was no previous
|
||||
* ADD reference, then we need to signal that when we
|
||||
* process references from the extent tree (through
|
||||
* add_inline_refs() and add_keyed_refs()), we should
|
||||
* not exit early if we find a reference for another
|
||||
* inode, because one of the delayed DROP references
|
||||
* may cancel that reference in the extent tree.
|
||||
*/
|
||||
if (sc && sc->inum && ref->objectid != sc->inum) {
|
||||
ret = BACKREF_FOUND_SHARED;
|
||||
goto out;
|
||||
}
|
||||
if (sc && count < 0)
|
||||
sc->have_delayed_delete_refs = true;
|
||||
|
||||
ret = add_indirect_ref(fs_info, preftrees, ref->root,
|
||||
&key, 0, node->bytenr, count, sc,
|
||||
@@ -917,7 +928,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
||||
}
|
||||
if (!ret)
|
||||
ret = extent_is_shared(sc);
|
||||
out:
|
||||
|
||||
spin_unlock(&head->lock);
|
||||
return ret;
|
||||
}
|
||||
@@ -1020,7 +1031,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
|
||||
|
||||
if (sc && sc->inum && key.objectid != sc->inum) {
|
||||
if (sc && sc->inum && key.objectid != sc->inum &&
|
||||
!sc->have_delayed_delete_refs) {
|
||||
ret = BACKREF_FOUND_SHARED;
|
||||
break;
|
||||
}
|
||||
@@ -1030,6 +1042,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
|
||||
ret = add_indirect_ref(fs_info, preftrees, root,
|
||||
&key, 0, bytenr, count,
|
||||
sc, GFP_NOFS);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@@ -1119,7 +1132,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
|
||||
|
||||
if (sc && sc->inum && key.objectid != sc->inum) {
|
||||
if (sc && sc->inum && key.objectid != sc->inum &&
|
||||
!sc->have_delayed_delete_refs) {
|
||||
ret = BACKREF_FOUND_SHARED;
|
||||
break;
|
||||
}
|
||||
@@ -1542,6 +1556,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
|
||||
.root_objectid = root->root_key.objectid,
|
||||
.inum = inum,
|
||||
.share_count = 0,
|
||||
.have_delayed_delete_refs = false,
|
||||
};
|
||||
|
||||
ulist_init(roots);
|
||||
@@ -1576,6 +1591,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
|
||||
break;
|
||||
bytenr = node->val;
|
||||
shared.share_count = 0;
|
||||
shared.have_delayed_delete_refs = false;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
|
||||
@@ -1221,8 +1221,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
|
||||
ssize_t rc;
|
||||
struct cifsFileInfo *cfile = dst_file->private_data;
|
||||
|
||||
if (cfile->swapfile)
|
||||
return -EOPNOTSUPP;
|
||||
if (cfile->swapfile) {
|
||||
rc = -EOPNOTSUPP;
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
|
||||
len, flags);
|
||||
|
||||
@@ -1735,11 +1735,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
|
||||
struct cifsFileInfo *cfile;
|
||||
__u32 type;
|
||||
|
||||
rc = -EACCES;
|
||||
xid = get_xid();
|
||||
|
||||
if (!(fl->fl_flags & FL_FLOCK))
|
||||
return -ENOLCK;
|
||||
if (!(fl->fl_flags & FL_FLOCK)) {
|
||||
rc = -ENOLCK;
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cfile = (struct cifsFileInfo *)file->private_data;
|
||||
tcon = tlink_tcon(cfile->tlink);
|
||||
@@ -1758,8 +1760,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
|
||||
* if no lock or unlock then nothing to do since we do not
|
||||
* know what it is
|
||||
*/
|
||||
rc = -EOPNOTSUPP;
|
||||
free_xid(xid);
|
||||
return -EOPNOTSUPP;
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
|
||||
|
||||
@@ -306,6 +306,7 @@ out:
|
||||
cifs_put_tcp_session(chan->server, 0);
|
||||
unload_nls(vol.local_nls);
|
||||
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
32
fs/fcntl.c
32
fs/fcntl.c
@@ -148,12 +148,17 @@ void f_delown(struct file *filp)
|
||||
|
||||
pid_t f_getown(struct file *filp)
|
||||
{
|
||||
pid_t pid;
|
||||
read_lock(&filp->f_owner.lock);
|
||||
pid = pid_vnr(filp->f_owner.pid);
|
||||
if (filp->f_owner.pid_type == PIDTYPE_PGID)
|
||||
pid = -pid;
|
||||
read_unlock(&filp->f_owner.lock);
|
||||
pid_t pid = 0;
|
||||
|
||||
read_lock_irq(&filp->f_owner.lock);
|
||||
rcu_read_lock();
|
||||
if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
|
||||
pid = pid_vnr(filp->f_owner.pid);
|
||||
if (filp->f_owner.pid_type == PIDTYPE_PGID)
|
||||
pid = -pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
read_unlock_irq(&filp->f_owner.lock);
|
||||
return pid;
|
||||
}
|
||||
|
||||
@@ -200,11 +205,14 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
|
||||
static int f_getown_ex(struct file *filp, unsigned long arg)
|
||||
{
|
||||
struct f_owner_ex __user *owner_p = (void __user *)arg;
|
||||
struct f_owner_ex owner;
|
||||
struct f_owner_ex owner = {};
|
||||
int ret = 0;
|
||||
|
||||
read_lock(&filp->f_owner.lock);
|
||||
owner.pid = pid_vnr(filp->f_owner.pid);
|
||||
read_lock_irq(&filp->f_owner.lock);
|
||||
rcu_read_lock();
|
||||
if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
|
||||
owner.pid = pid_vnr(filp->f_owner.pid);
|
||||
rcu_read_unlock();
|
||||
switch (filp->f_owner.pid_type) {
|
||||
case PIDTYPE_PID:
|
||||
owner.type = F_OWNER_TID;
|
||||
@@ -223,7 +231,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
read_unlock(&filp->f_owner.lock);
|
||||
read_unlock_irq(&filp->f_owner.lock);
|
||||
|
||||
if (!ret) {
|
||||
ret = copy_to_user(owner_p, &owner, sizeof(owner));
|
||||
@@ -241,10 +249,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
|
||||
uid_t src[2];
|
||||
int err;
|
||||
|
||||
read_lock(&filp->f_owner.lock);
|
||||
read_lock_irq(&filp->f_owner.lock);
|
||||
src[0] = from_kuid(user_ns, filp->f_owner.uid);
|
||||
src[1] = from_kuid(user_ns, filp->f_owner.euid);
|
||||
read_unlock(&filp->f_owner.lock);
|
||||
read_unlock_irq(&filp->f_owner.lock);
|
||||
|
||||
err = put_user(src[0], &dst[0]);
|
||||
err |= put_user(src[1], &dst[1]);
|
||||
|
||||
@@ -231,6 +231,7 @@ static int ocfs2_mknod(struct inode *dir,
|
||||
handle_t *handle = NULL;
|
||||
struct ocfs2_super *osb;
|
||||
struct ocfs2_dinode *dirfe;
|
||||
struct ocfs2_dinode *fe = NULL;
|
||||
struct buffer_head *new_fe_bh = NULL;
|
||||
struct inode *inode = NULL;
|
||||
struct ocfs2_alloc_context *inode_ac = NULL;
|
||||
@@ -381,6 +382,7 @@ static int ocfs2_mknod(struct inode *dir,
|
||||
goto leave;
|
||||
}
|
||||
|
||||
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
|
||||
if (S_ISDIR(mode)) {
|
||||
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
|
||||
new_fe_bh, data_ac, meta_ac);
|
||||
@@ -453,8 +455,11 @@ roll_back:
|
||||
leave:
|
||||
if (status < 0 && did_quota_inode)
|
||||
dquot_free_inode(inode);
|
||||
if (handle)
|
||||
if (handle) {
|
||||
if (status < 0 && fe)
|
||||
ocfs2_set_links_count(fe, 0);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
}
|
||||
|
||||
ocfs2_inode_unlock(dir, 1);
|
||||
if (did_block_signals)
|
||||
@@ -631,18 +636,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
|
||||
return status;
|
||||
}
|
||||
|
||||
status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
||||
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
||||
parent_fe_bh, handle, inode_ac,
|
||||
fe_blkno, suballoc_loc, suballoc_bit);
|
||||
if (status < 0) {
|
||||
u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
|
||||
int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
|
||||
inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
|
||||
if (tmp)
|
||||
mlog_errno(tmp);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int ocfs2_mkdir(struct inode *dir,
|
||||
@@ -2023,8 +2019,11 @@ bail:
|
||||
ocfs2_clusters_to_bytes(osb->sb, 1));
|
||||
if (status < 0 && did_quota_inode)
|
||||
dquot_free_inode(inode);
|
||||
if (handle)
|
||||
if (handle) {
|
||||
if (status < 0 && fe)
|
||||
ocfs2_set_links_count(fe, 0);
|
||||
ocfs2_commit_trans(osb, handle);
|
||||
}
|
||||
|
||||
ocfs2_inode_unlock(dir, 1);
|
||||
if (did_block_signals)
|
||||
|
||||
@@ -1016,7 +1016,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
|
||||
vma = vma->vm_next;
|
||||
}
|
||||
|
||||
show_vma_header_prefix(m, priv->mm->mmap->vm_start,
|
||||
show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0,
|
||||
last_vma_end, 0, 0, 0, 0);
|
||||
seq_pad(m, ' ');
|
||||
seq_puts(m, "[rollup]\n");
|
||||
|
||||
@@ -911,6 +911,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
||||
struct kvm_enable_cap *cap);
|
||||
long kvm_arch_vm_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg);
|
||||
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg);
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
|
||||
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
|
||||
|
||||
@@ -273,6 +273,7 @@ struct mmc_card {
|
||||
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
|
||||
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
|
||||
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
|
||||
#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
|
||||
|
||||
bool reenable_cmdq; /* Re-enable Command Queue */
|
||||
|
||||
|
||||
@@ -1181,7 +1181,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
|
||||
static inline void qdisc_reset_queue(struct Qdisc *sch)
|
||||
{
|
||||
__qdisc_reset_queue(&sch->q);
|
||||
sch->qstats.backlog = 0;
|
||||
}
|
||||
|
||||
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
|
||||
|
||||
@@ -38,21 +38,20 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
|
||||
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
||||
extern int reuseport_detach_prog(struct sock *sk);
|
||||
|
||||
static inline bool reuseport_has_conns(struct sock *sk, bool set)
|
||||
static inline bool reuseport_has_conns(struct sock *sk)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||
if (reuse) {
|
||||
if (set)
|
||||
reuse->has_conns = 1;
|
||||
ret = reuse->has_conns;
|
||||
}
|
||||
if (reuse && reuse->has_conns)
|
||||
ret = true;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void reuseport_has_conns_set(struct sock *sk);
|
||||
|
||||
#endif /* _SOCK_REUSEPORT_H */
|
||||
|
||||
@@ -6009,12 +6009,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
if (tr->current_trace->reset)
|
||||
tr->current_trace->reset(tr);
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
had_max_tr = tr->current_trace->use_max_tr;
|
||||
|
||||
/* Current trace needs to be nop_trace before synchronize_rcu */
|
||||
tr->current_trace = &nop_trace;
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
had_max_tr = tr->allocated_snapshot;
|
||||
|
||||
if (had_max_tr && !t->use_max_tr) {
|
||||
/*
|
||||
* We need to make sure that the update_max_tr sees that
|
||||
@@ -6026,14 +6026,14 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
synchronize_rcu();
|
||||
free_snapshot(tr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
if (t->use_max_tr && !had_max_tr) {
|
||||
if (t->use_max_tr && !tr->allocated_snapshot) {
|
||||
ret = tracing_alloc_snapshot_instance(tr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
#else
|
||||
tr->current_trace = &nop_trace;
|
||||
#endif
|
||||
|
||||
if (t->init) {
|
||||
|
||||
@@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
for (p = page, len = 0; len < nbytes; p++, len++) {
|
||||
for (p = page, len = 0; len < nbytes; p++) {
|
||||
if (get_user(*p, buff++)) {
|
||||
free_page((unsigned long)page);
|
||||
return -EFAULT;
|
||||
}
|
||||
len += 1;
|
||||
if (*p == '\0' || *p == '\n')
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -18,6 +18,22 @@ DEFINE_SPINLOCK(reuseport_lock);
|
||||
|
||||
static DEFINE_IDA(reuseport_ida);
|
||||
|
||||
void reuseport_has_conns_set(struct sock *sk)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
if (!rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock));
|
||||
if (likely(reuse))
|
||||
reuse->has_conns = 1;
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_has_conns_set);
|
||||
|
||||
static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
|
||||
{
|
||||
unsigned int size = sizeof(struct sock_reuseport) +
|
||||
|
||||
@@ -108,15 +108,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
|
||||
struct hsr_port *port)
|
||||
{
|
||||
if (!frame->skb_std) {
|
||||
if (frame->skb_hsr) {
|
||||
if (frame->skb_hsr)
|
||||
frame->skb_std =
|
||||
create_stripped_skb_hsr(frame->skb_hsr, frame);
|
||||
} else {
|
||||
/* Unexpected */
|
||||
WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
|
||||
__FILE__, __LINE__, port->dev->name);
|
||||
else
|
||||
netdev_warn_once(port->dev,
|
||||
"Unexpected frame received in hsr_get_untagged_frame()\n");
|
||||
|
||||
if (!frame->skb_std)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return skb_clone(frame->skb_std, GFP_ATOMIC);
|
||||
|
||||
@@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
|
||||
}
|
||||
inet->inet_daddr = fl4->daddr;
|
||||
inet->inet_dport = usin->sin_port;
|
||||
reuseport_has_conns(sk, true);
|
||||
reuseport_has_conns_set(sk);
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
inet->inet_id = prandom_u32();
|
||||
|
||||
@@ -447,7 +447,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
|
||||
result = lookup_reuseport(net, sk, skb,
|
||||
saddr, sport, daddr, hnum);
|
||||
/* Fall back to scoring if group has connections */
|
||||
if (result && !reuseport_has_conns(sk, false))
|
||||
if (result && !reuseport_has_conns(sk))
|
||||
return result;
|
||||
|
||||
result = result ? : sk;
|
||||
|
||||
@@ -256,7 +256,7 @@ ipv4_connected:
|
||||
goto out;
|
||||
}
|
||||
|
||||
reuseport_has_conns(sk, true);
|
||||
reuseport_has_conns_set(sk);
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
out:
|
||||
|
||||
@@ -179,7 +179,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
|
||||
result = lookup_reuseport(net, sk, skb,
|
||||
saddr, sport, daddr, hnum);
|
||||
/* Fall back to scoring if group has connections */
|
||||
if (result && !reuseport_has_conns(sk, false))
|
||||
if (result && !reuseport_has_conns(sk))
|
||||
return result;
|
||||
|
||||
result = result ? : sk;
|
||||
|
||||
@@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
|
||||
|
||||
skip:
|
||||
if (!ingress) {
|
||||
notify_and_destroy(net, skb, n, classid,
|
||||
rtnl_dereference(dev->qdisc), new);
|
||||
old = rtnl_dereference(dev->qdisc);
|
||||
if (new && !new->ops->attach)
|
||||
qdisc_refcount_inc(new);
|
||||
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
|
||||
|
||||
notify_and_destroy(net, skb, n, classid, old, new);
|
||||
|
||||
if (new && new->ops->attach)
|
||||
new->ops->attach(new);
|
||||
} else {
|
||||
|
||||
@@ -575,7 +575,6 @@ static void atm_tc_reset(struct Qdisc *sch)
|
||||
pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
|
||||
list_for_each_entry(flow, &p->flows, list)
|
||||
qdisc_reset(flow->q);
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void atm_tc_destroy(struct Qdisc *sch)
|
||||
|
||||
@@ -2224,8 +2224,12 @@ retry:
|
||||
|
||||
static void cake_reset(struct Qdisc *sch)
|
||||
{
|
||||
struct cake_sched_data *q = qdisc_priv(sch);
|
||||
u32 c;
|
||||
|
||||
if (!q->tins)
|
||||
return;
|
||||
|
||||
for (c = 0; c < CAKE_MAX_TINS; c++)
|
||||
cake_clear_tin(sch, c);
|
||||
}
|
||||
|
||||
@@ -1053,7 +1053,6 @@ cbq_reset(struct Qdisc *sch)
|
||||
cl->cpriority = cl->priority;
|
||||
}
|
||||
}
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -315,8 +315,6 @@ static void choke_reset(struct Qdisc *sch)
|
||||
rtnl_qdisc_drop(skb, sch);
|
||||
}
|
||||
|
||||
sch->q.qlen = 0;
|
||||
sch->qstats.backlog = 0;
|
||||
if (q->tab)
|
||||
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
|
||||
q->head = q->tail = 0;
|
||||
|
||||
@@ -443,8 +443,6 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void drr_destroy_qdisc(struct Qdisc *sch)
|
||||
|
||||
@@ -408,8 +408,6 @@ static void dsmark_reset(struct Qdisc *sch)
|
||||
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
||||
if (p->q)
|
||||
qdisc_reset(p->q);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void dsmark_destroy(struct Qdisc *sch)
|
||||
|
||||
@@ -445,9 +445,6 @@ static void etf_reset(struct Qdisc *sch)
|
||||
timesortedlist_clear(sch);
|
||||
__qdisc_reset_queue(&sch->q);
|
||||
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
|
||||
q->last = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -722,8 +722,6 @@ static void ets_qdisc_reset(struct Qdisc *sch)
|
||||
}
|
||||
for (band = 0; band < q->nbands; band++)
|
||||
qdisc_reset(q->classes[band].qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void ets_qdisc_destroy(struct Qdisc *sch)
|
||||
|
||||
@@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch)
|
||||
codel_vars_init(&flow->cvars);
|
||||
}
|
||||
memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
|
||||
sch->q.qlen = 0;
|
||||
sch->qstats.backlog = 0;
|
||||
q->memory_usage = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -521,9 +521,6 @@ static void fq_pie_reset(struct Qdisc *sch)
|
||||
INIT_LIST_HEAD(&flow->flowchain);
|
||||
pie_vars_init(&flow->vars);
|
||||
}
|
||||
|
||||
sch->q.qlen = 0;
|
||||
sch->qstats.backlog = 0;
|
||||
}
|
||||
|
||||
static void fq_pie_destroy(struct Qdisc *sch)
|
||||
|
||||
@@ -1484,8 +1484,6 @@ hfsc_reset_qdisc(struct Qdisc *sch)
|
||||
}
|
||||
q->eligible = RB_ROOT;
|
||||
qdisc_watchdog_cancel(&q->watchdog);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
||||
@@ -966,8 +966,6 @@ static void htb_reset(struct Qdisc *sch)
|
||||
}
|
||||
qdisc_watchdog_cancel(&q->watchdog);
|
||||
__qdisc_reset_queue(&q->direct_queue);
|
||||
sch->q.qlen = 0;
|
||||
sch->qstats.backlog = 0;
|
||||
memset(q->hlevel, 0, sizeof(q->hlevel));
|
||||
memset(q->row_mask, 0, sizeof(q->row_mask));
|
||||
}
|
||||
|
||||
@@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch)
|
||||
|
||||
for (band = 0; band < q->bands; band++)
|
||||
qdisc_reset(q->queues[band]);
|
||||
sch->q.qlen = 0;
|
||||
q->curband = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -135,8 +135,6 @@ prio_reset(struct Qdisc *sch)
|
||||
|
||||
for (prio = 0; prio < q->bands; prio++)
|
||||
qdisc_reset(q->queues[prio]);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
|
||||
|
||||
@@ -1458,8 +1458,6 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void qfq_destroy_qdisc(struct Qdisc *sch)
|
||||
|
||||
@@ -176,8 +176,6 @@ static void red_reset(struct Qdisc *sch)
|
||||
struct red_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
red_restart(&q->vars);
|
||||
}
|
||||
|
||||
|
||||
@@ -455,9 +455,8 @@ static void sfb_reset(struct Qdisc *sch)
|
||||
{
|
||||
struct sfb_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
if (likely(q->qdisc))
|
||||
qdisc_reset(q->qdisc);
|
||||
q->slot = 0;
|
||||
q->double_buffering = false;
|
||||
sfb_zero_all_buckets(q);
|
||||
|
||||
@@ -213,9 +213,6 @@ static void skbprio_reset(struct Qdisc *sch)
|
||||
struct skbprio_sched_data *q = qdisc_priv(sch);
|
||||
int prio;
|
||||
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
|
||||
for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
|
||||
__skb_queue_purge(&q->qdiscs[prio]);
|
||||
|
||||
|
||||
@@ -1626,8 +1626,6 @@ static void taprio_reset(struct Qdisc *sch)
|
||||
if (q->qdiscs[i])
|
||||
qdisc_reset(q->qdiscs[i]);
|
||||
}
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void taprio_destroy(struct Qdisc *sch)
|
||||
|
||||
@@ -316,8 +316,6 @@ static void tbf_reset(struct Qdisc *sch)
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
q->t_c = ktime_get_ns();
|
||||
q->tokens = q->buffer;
|
||||
q->ptokens = q->mtu;
|
||||
|
||||
@@ -124,7 +124,6 @@ teql_reset(struct Qdisc *sch)
|
||||
struct teql_sched_data *dat = qdisc_priv(sch);
|
||||
|
||||
skb_queue_purge(&dat->q);
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user