Merge 5.10.106 into android12-5.10-lts
Changes in 5.10.106 ARM: boot: dts: bcm2711: Fix HVS register range clk: qcom: gdsc: Add support to update GDSC transition delay HID: vivaldi: fix sysfs attributes leak arm64: dts: armada-3720-turris-mox: Add missing ethernet0 alias tipc: fix kernel panic when enabling bearer mISDN: Remove obsolete PIPELINE_DEBUG debugging information mISDN: Fix memory leak in dsp_pipeline_build() virtio-blk: Don't use MAX_DISCARD_SEGMENTS if max_discard_seg is zero isdn: hfcpci: check the return value of dma_set_mask() in setup_hw() net: qlogic: check the return value of dma_alloc_coherent() in qed_vf_hw_prepare() esp: Fix BEET mode inter address family tunneling on GSO qed: return status of qed_iov_get_link drm/sun4i: mixer: Fix P010 and P210 format numbers net: dsa: mt7530: fix incorrect test in mt753x_phylink_validate() ARM: dts: aspeed: Fix AST2600 quad spi group i40e: stop disabling VFs due to PF error responses ice: stop disabling VFs due to PF error responses ice: Align macro names to the specification ice: Remove unnecessary checker loop ice: Rename a couple of variables ice: Fix curr_link_speed advertised speed ethernet: Fix error handling in xemaclite_of_probe tipc: fix incorrect order of state message data sanity check net: ethernet: ti: cpts: Handle error for clk_enable net: ethernet: lpc_eth: Handle error for clk_enable ax25: Fix NULL pointer dereference in ax25_kill_by_device net/mlx5: Fix size field in bufferx_reg struct net/mlx5: Fix a race on command flush flow net/mlx5e: Lag, Only handle events from highest priority multipath entry NFC: port100: fix use-after-free in port100_send_complete selftests: pmtu.sh: Kill tcpdump processes launched by subshell. gpio: ts4900: Do not set DAT and OE together gianfar: ethtool: Fix refcount leak in gfar_get_ts_info net: phy: DP83822: clear MISR2 register to disable interrupts sctp: fix kernel-infoleak for SCTP sockets net: bcmgenet: Don't claim WOL when its not available selftests/bpf: Add test for bpf_timer overwriting crash spi: rockchip: Fix error in getting num-cs property spi: rockchip: terminate dma transmission when slave abort net-sysfs: add check for netdevice being present to speed_show hwmon: (pmbus) Clear pmbus fault/warning bits after read gpio: Return EPROBE_DEFER if gc->to_irq is NULL Revert "xen-netback: remove 'hotplug-status' once it has served its purpose" Revert "xen-netback: Check for hotplug-status existence before watching" ipv6: prevent a possible race condition with lifetimes tracing: Ensure trace buffer is at least 4096 bytes large selftest/vm: fix map_fixed_noreplace test failure selftests/memfd: clean up mapping in mfd_fail_write ARM: Spectre-BHB: provide empty stub for non-config fuse: fix pipe buffer lifetime for direct_io staging: rtl8723bs: Fix access-point mode deadlock staging: gdm724x: fix use after free in gdm_lte_rx() net: macb: Fix lost RX packet wakeup race in NAPI receive mmc: meson: Fix usage of meson_mmc_post_req() riscv: Fix auipc+jalr relocation range checks arm64: dts: marvell: armada-37xx: Remap IO space to bus address 0x0 virtio: unexport virtio_finalize_features virtio: acknowledge all features before access watch_queue, pipe: Free watchqueue state after clearing pipe ring watch_queue: Fix to release page in ->release() watch_queue: Fix to always request a pow-of-2 pipe ring size watch_queue: Fix the alloc bitmap size to reflect notes allocated watch_queue: Free the alloc bitmap when the watch_queue is torn down watch_queue: Fix lack of barrier/sync/lock between post and read watch_queue: Make comment about setting ->defunct more accurate x86/boot: Fix memremap of setup_indirect structures x86/boot: Add setup_indirect support in early_memremap_is_setup_data() x86/traps: Mark do_int3() NOKPROBE_SYMBOL ext4: add check to prevent attempting to resize an fs with sparse_super2 ARM: fix Thumb2 regression with Spectre BHB watch_queue: Fix filter limit check Linux 5.10.106 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic7943bdf8c771bff4a95fcf0585ec9c24057cb5b
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 105
|
SUBLEVEL = 106
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
@@ -118,7 +118,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
pinctrl_fwqspid_default: fwqspid_default {
|
pinctrl_fwqspid_default: fwqspid_default {
|
||||||
function = "FWQSPID";
|
function = "FWSPID";
|
||||||
groups = "FWQSPID";
|
groups = "FWQSPID";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -290,6 +290,7 @@
|
|||||||
|
|
||||||
hvs: hvs@7e400000 {
|
hvs: hvs@7e400000 {
|
||||||
compatible = "brcm,bcm2711-hvs";
|
compatible = "brcm,bcm2711-hvs";
|
||||||
|
reg = <0x7e400000 0x8000>;
|
||||||
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
spi0 = &spi0;
|
spi0 = &spi0;
|
||||||
|
ethernet0 = ð0;
|
||||||
ethernet1 = ð1;
|
ethernet1 = ð1;
|
||||||
mmc0 = &sdhci0;
|
mmc0 = &sdhci0;
|
||||||
mmc1 = &sdhci1;
|
mmc1 = &sdhci1;
|
||||||
@@ -137,7 +138,9 @@
|
|||||||
/*
|
/*
|
||||||
* U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
|
* U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
|
||||||
* contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
|
* contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
|
||||||
* 2 size cells and also expects that the second range starts at 16 MB offset. If these
|
* 2 size cells and also expects that the second range starts at 16 MB offset. Also it
|
||||||
|
* expects that first range uses same address for PCI (child) and CPU (parent) cells (so
|
||||||
|
* no remapping) and that this address is the lowest from all specified ranges. If these
|
||||||
* conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
|
* conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
|
||||||
* space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
|
* space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
|
||||||
* for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
|
* for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
|
||||||
@@ -146,6 +149,9 @@
|
|||||||
* https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
|
* https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
|
||||||
* https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
|
* https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
|
||||||
* https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
|
* https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
|
||||||
|
* Bug related to requirement of same child and parent addresses for first range is fixed
|
||||||
|
* in U-Boot version 2022.04 by following commit:
|
||||||
|
* https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17
|
||||||
*/
|
*/
|
||||||
#address-cells = <3>;
|
#address-cells = <3>;
|
||||||
#size-cells = <2>;
|
#size-cells = <2>;
|
||||||
|
@@ -495,7 +495,7 @@
|
|||||||
* (totaling 127 MiB) for MEM.
|
* (totaling 127 MiB) for MEM.
|
||||||
*/
|
*/
|
||||||
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
|
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
|
||||||
0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
|
0x81000000 0 0x00000000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
|
||||||
interrupt-map-mask = <0 0 0 7>;
|
interrupt-map-mask = <0 0 0 7>;
|
||||||
interrupt-map = <0 0 0 1 &pcie_intc 0>,
|
interrupt-map = <0 0 0 1 &pcie_intc 0>,
|
||||||
<0 0 0 2 &pcie_intc 1>,
|
<0 0 0 2 &pcie_intc 1>,
|
||||||
|
@@ -13,6 +13,19 @@
|
|||||||
#include <linux/pgtable.h>
|
#include <linux/pgtable.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The auipc+jalr instruction pair can reach any PC-relative offset
|
||||||
|
* in the range [-2^31 - 2^11, 2^31 - 2^11)
|
||||||
|
*/
|
||||||
|
static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_32BIT
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
||||||
{
|
{
|
||||||
if (v != (u32)v) {
|
if (v != (u32)v) {
|
||||||
@@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
|
|||||||
ptrdiff_t offset = (void *)v - (void *)location;
|
ptrdiff_t offset = (void *)v - (void *)location;
|
||||||
s32 hi20;
|
s32 hi20;
|
||||||
|
|
||||||
if (offset != (s32)offset) {
|
if (!riscv_insn_valid_32bit_offset(offset)) {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||||
me->name, (long long)v, location);
|
me->name, (long long)v, location);
|
||||||
@@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
|
|||||||
Elf_Addr v)
|
Elf_Addr v)
|
||||||
{
|
{
|
||||||
ptrdiff_t offset = (void *)v - (void *)location;
|
ptrdiff_t offset = (void *)v - (void *)location;
|
||||||
s32 fill_v = offset;
|
|
||||||
u32 hi20, lo12;
|
u32 hi20, lo12;
|
||||||
|
|
||||||
if (offset != fill_v) {
|
if (!riscv_insn_valid_32bit_offset(offset)) {
|
||||||
/* Only emit the plt entry if offset over 32-bit range */
|
/* Only emit the plt entry if offset over 32-bit range */
|
||||||
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
|
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
|
||||||
offset = module_emit_plt_entry(me, v);
|
offset = module_emit_plt_entry(me, v);
|
||||||
@@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
|
|||||||
Elf_Addr v)
|
Elf_Addr v)
|
||||||
{
|
{
|
||||||
ptrdiff_t offset = (void *)v - (void *)location;
|
ptrdiff_t offset = (void *)v - (void *)location;
|
||||||
s32 fill_v = offset;
|
|
||||||
u32 hi20, lo12;
|
u32 hi20, lo12;
|
||||||
|
|
||||||
if (offset != fill_v) {
|
if (!riscv_insn_valid_32bit_offset(offset)) {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||||
me->name, (long long)v, location);
|
me->name, (long long)v, location);
|
||||||
|
@@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt);
|
|||||||
*/
|
*/
|
||||||
void __init e820__reserve_setup_data(void)
|
void __init e820__reserve_setup_data(void)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 pa_data;
|
u64 pa_data, pa_next;
|
||||||
|
u32 len;
|
||||||
|
|
||||||
pa_data = boot_params.hdr.setup_data;
|
pa_data = boot_params.hdr.setup_data;
|
||||||
if (!pa_data)
|
if (!pa_data)
|
||||||
@@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void)
|
|||||||
|
|
||||||
while (pa_data) {
|
while (pa_data) {
|
||||||
data = early_memremap(pa_data, sizeof(*data));
|
data = early_memremap(pa_data, sizeof(*data));
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("e820: failed to memremap setup_data entry\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = sizeof(*data);
|
||||||
|
pa_next = data->next;
|
||||||
|
|
||||||
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void)
|
|||||||
sizeof(*data) + data->len,
|
sizeof(*data) + data->len,
|
||||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||||
|
|
||||||
if (data->type == SETUP_INDIRECT &&
|
if (data->type == SETUP_INDIRECT) {
|
||||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
len += data->len;
|
||||||
e820__range_update(((struct setup_indirect *)data->data)->addr,
|
early_memunmap(data, sizeof(*data));
|
||||||
((struct setup_indirect *)data->data)->len,
|
data = early_memremap(pa_data, len);
|
||||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
if (!data) {
|
||||||
e820__range_update_kexec(((struct setup_indirect *)data->data)->addr,
|
pr_warn("e820: failed to memremap indirect setup_data\n");
|
||||||
((struct setup_indirect *)data->data)->len,
|
return;
|
||||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
}
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT) {
|
||||||
|
e820__range_update(indirect->addr, indirect->len,
|
||||||
|
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||||
|
e820__range_update_kexec(indirect->addr, indirect->len,
|
||||||
|
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pa_data = data->next;
|
pa_data = pa_next;
|
||||||
early_memunmap(data, sizeof(*data));
|
early_memunmap(data, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
e820__update_table(e820_table);
|
e820__update_table(e820_table);
|
||||||
|
@@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no,
|
|||||||
|
|
||||||
static int __init create_setup_data_nodes(struct dentry *parent)
|
static int __init create_setup_data_nodes(struct dentry *parent)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
struct setup_data_node *node;
|
struct setup_data_node *node;
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
int error;
|
u64 pa_data, pa_next;
|
||||||
struct dentry *d;
|
struct dentry *d;
|
||||||
u64 pa_data;
|
int error;
|
||||||
|
u32 len;
|
||||||
int no = 0;
|
int no = 0;
|
||||||
|
|
||||||
d = debugfs_create_dir("setup_data", parent);
|
d = debugfs_create_dir("setup_data", parent);
|
||||||
@@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent)
|
|||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
goto err_dir;
|
goto err_dir;
|
||||||
}
|
}
|
||||||
|
pa_next = data->next;
|
||||||
|
|
||||||
if (data->type == SETUP_INDIRECT &&
|
if (data->type == SETUP_INDIRECT) {
|
||||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
len = sizeof(*data) + data->len;
|
||||||
node->paddr = ((struct setup_indirect *)data->data)->addr;
|
memunmap(data);
|
||||||
node->type = ((struct setup_indirect *)data->data)->type;
|
data = memremap(pa_data, len, MEMREMAP_WB);
|
||||||
node->len = ((struct setup_indirect *)data->data)->len;
|
if (!data) {
|
||||||
|
kfree(node);
|
||||||
|
error = -ENOMEM;
|
||||||
|
goto err_dir;
|
||||||
|
}
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT) {
|
||||||
|
node->paddr = indirect->addr;
|
||||||
|
node->type = indirect->type;
|
||||||
|
node->len = indirect->len;
|
||||||
|
} else {
|
||||||
|
node->paddr = pa_data;
|
||||||
|
node->type = data->type;
|
||||||
|
node->len = data->len;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
node->paddr = pa_data;
|
node->paddr = pa_data;
|
||||||
node->type = data->type;
|
node->type = data->type;
|
||||||
@@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
|
|||||||
}
|
}
|
||||||
|
|
||||||
create_setup_data_node(d, no, node);
|
create_setup_data_node(d, no, node);
|
||||||
pa_data = data->next;
|
pa_data = pa_next;
|
||||||
|
|
||||||
memunmap(data);
|
memunmap(data);
|
||||||
no++;
|
no++;
|
||||||
|
@@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr)
|
|||||||
|
|
||||||
static int __init get_setup_data_size(int nr, size_t *size)
|
static int __init get_setup_data_size(int nr, size_t *size)
|
||||||
{
|
{
|
||||||
int i = 0;
|
u64 pa_data = boot_params.hdr.setup_data, pa_next;
|
||||||
|
struct setup_indirect *indirect;
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 pa_data = boot_params.hdr.setup_data;
|
int i = 0;
|
||||||
|
u32 len;
|
||||||
|
|
||||||
while (pa_data) {
|
while (pa_data) {
|
||||||
data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
|
data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
pa_next = data->next;
|
||||||
|
|
||||||
if (nr == i) {
|
if (nr == i) {
|
||||||
if (data->type == SETUP_INDIRECT &&
|
if (data->type == SETUP_INDIRECT) {
|
||||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
|
len = sizeof(*data) + data->len;
|
||||||
*size = ((struct setup_indirect *)data->data)->len;
|
memunmap(data);
|
||||||
else
|
data = memremap(pa_data, len, MEMREMAP_WB);
|
||||||
|
if (!data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT)
|
||||||
|
*size = indirect->len;
|
||||||
|
else
|
||||||
|
*size = data->len;
|
||||||
|
} else {
|
||||||
*size = data->len;
|
*size = data->len;
|
||||||
|
}
|
||||||
|
|
||||||
memunmap(data);
|
memunmap(data);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pa_data = data->next;
|
pa_data = pa_next;
|
||||||
memunmap(data);
|
memunmap(data);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size)
|
|||||||
static ssize_t type_show(struct kobject *kobj,
|
static ssize_t type_show(struct kobject *kobj,
|
||||||
struct kobj_attribute *attr, char *buf)
|
struct kobj_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
|
struct setup_data *data;
|
||||||
int nr, ret;
|
int nr, ret;
|
||||||
u64 paddr;
|
u64 paddr;
|
||||||
struct setup_data *data;
|
u32 len;
|
||||||
|
|
||||||
ret = kobj_to_setup_data_nr(kobj, &nr);
|
ret = kobj_to_setup_data_nr(kobj, &nr);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj,
|
|||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (data->type == SETUP_INDIRECT)
|
if (data->type == SETUP_INDIRECT) {
|
||||||
ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type);
|
len = sizeof(*data) + data->len;
|
||||||
else
|
memunmap(data);
|
||||||
|
data = memremap(paddr, len, MEMREMAP_WB);
|
||||||
|
if (!data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
ret = sprintf(buf, "0x%x\n", indirect->type);
|
||||||
|
} else {
|
||||||
ret = sprintf(buf, "0x%x\n", data->type);
|
ret = sprintf(buf, "0x%x\n", data->type);
|
||||||
|
}
|
||||||
|
|
||||||
memunmap(data);
|
memunmap(data);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp,
|
|||||||
char *buf,
|
char *buf,
|
||||||
loff_t off, size_t count)
|
loff_t off, size_t count)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
|
struct setup_data *data;
|
||||||
int nr, ret = 0;
|
int nr, ret = 0;
|
||||||
u64 paddr, len;
|
u64 paddr, len;
|
||||||
struct setup_data *data;
|
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
ret = kobj_to_setup_data_nr(kobj, &nr);
|
ret = kobj_to_setup_data_nr(kobj, &nr);
|
||||||
@@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp,
|
|||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (data->type == SETUP_INDIRECT &&
|
if (data->type == SETUP_INDIRECT) {
|
||||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
len = sizeof(*data) + data->len;
|
||||||
paddr = ((struct setup_indirect *)data->data)->addr;
|
memunmap(data);
|
||||||
len = ((struct setup_indirect *)data->data)->len;
|
data = memremap(paddr, len, MEMREMAP_WB);
|
||||||
|
if (!data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT) {
|
||||||
|
paddr = indirect->addr;
|
||||||
|
len = indirect->len;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Even though this is technically undefined, return
|
||||||
|
* the data as though it is a normal setup_data struct.
|
||||||
|
* This will at least allow it to be inspected.
|
||||||
|
*/
|
||||||
|
paddr += sizeof(*data);
|
||||||
|
len = data->len;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
paddr += sizeof(*data);
|
paddr += sizeof(*data);
|
||||||
len = data->len;
|
len = data->len;
|
||||||
|
@@ -371,21 +371,41 @@ static void __init parse_setup_data(void)
|
|||||||
|
|
||||||
static void __init memblock_x86_reserve_range_setup_data(void)
|
static void __init memblock_x86_reserve_range_setup_data(void)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 pa_data;
|
u64 pa_data, pa_next;
|
||||||
|
u32 len;
|
||||||
|
|
||||||
pa_data = boot_params.hdr.setup_data;
|
pa_data = boot_params.hdr.setup_data;
|
||||||
while (pa_data) {
|
while (pa_data) {
|
||||||
data = early_memremap(pa_data, sizeof(*data));
|
data = early_memremap(pa_data, sizeof(*data));
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("setup: failed to memremap setup_data entry\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = sizeof(*data);
|
||||||
|
pa_next = data->next;
|
||||||
|
|
||||||
memblock_reserve(pa_data, sizeof(*data) + data->len);
|
memblock_reserve(pa_data, sizeof(*data) + data->len);
|
||||||
|
|
||||||
if (data->type == SETUP_INDIRECT &&
|
if (data->type == SETUP_INDIRECT) {
|
||||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
|
len += data->len;
|
||||||
memblock_reserve(((struct setup_indirect *)data->data)->addr,
|
early_memunmap(data, sizeof(*data));
|
||||||
((struct setup_indirect *)data->data)->len);
|
data = early_memremap(pa_data, len);
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("setup: failed to memremap indirect setup_data\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pa_data = data->next;
|
indirect = (struct setup_indirect *)data->data;
|
||||||
early_memunmap(data, sizeof(*data));
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT)
|
||||||
|
memblock_reserve(indirect->addr, indirect->len);
|
||||||
|
}
|
||||||
|
|
||||||
|
pa_data = pa_next;
|
||||||
|
early_memunmap(data, len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -651,6 +651,7 @@ static bool do_int3(struct pt_regs *regs)
|
|||||||
|
|
||||||
return res == NOTIFY_STOP;
|
return res == NOTIFY_STOP;
|
||||||
}
|
}
|
||||||
|
NOKPROBE_SYMBOL(do_int3);
|
||||||
|
|
||||||
static void do_int3_user(struct pt_regs *regs)
|
static void do_int3_user(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@@ -633,6 +633,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr,
|
|||||||
static bool memremap_is_setup_data(resource_size_t phys_addr,
|
static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 paddr, paddr_next;
|
u64 paddr, paddr_next;
|
||||||
|
|
||||||
@@ -645,6 +646,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
|||||||
|
|
||||||
data = memremap(paddr, sizeof(*data),
|
data = memremap(paddr, sizeof(*data),
|
||||||
MEMREMAP_WB | MEMREMAP_DEC);
|
MEMREMAP_WB | MEMREMAP_DEC);
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("failed to memremap setup_data entry\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
paddr_next = data->next;
|
paddr_next = data->next;
|
||||||
len = data->len;
|
len = data->len;
|
||||||
@@ -654,10 +659,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data->type == SETUP_INDIRECT &&
|
if (data->type == SETUP_INDIRECT) {
|
||||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
memunmap(data);
|
||||||
paddr = ((struct setup_indirect *)data->data)->addr;
|
data = memremap(paddr, sizeof(*data) + len,
|
||||||
len = ((struct setup_indirect *)data->data)->len;
|
MEMREMAP_WB | MEMREMAP_DEC);
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("failed to memremap indirect setup_data\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT) {
|
||||||
|
paddr = indirect->addr;
|
||||||
|
len = indirect->len;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memunmap(data);
|
memunmap(data);
|
||||||
@@ -678,22 +694,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
|||||||
static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
|
static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
|
struct setup_indirect *indirect;
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 paddr, paddr_next;
|
u64 paddr, paddr_next;
|
||||||
|
|
||||||
paddr = boot_params.hdr.setup_data;
|
paddr = boot_params.hdr.setup_data;
|
||||||
while (paddr) {
|
while (paddr) {
|
||||||
unsigned int len;
|
unsigned int len, size;
|
||||||
|
|
||||||
if (phys_addr == paddr)
|
if (phys_addr == paddr)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
data = early_memremap_decrypted(paddr, sizeof(*data));
|
data = early_memremap_decrypted(paddr, sizeof(*data));
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("failed to early memremap setup_data entry\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = sizeof(*data);
|
||||||
|
|
||||||
paddr_next = data->next;
|
paddr_next = data->next;
|
||||||
len = data->len;
|
len = data->len;
|
||||||
|
|
||||||
early_memunmap(data, sizeof(*data));
|
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
|
||||||
|
early_memunmap(data, sizeof(*data));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data->type == SETUP_INDIRECT) {
|
||||||
|
size += len;
|
||||||
|
early_memunmap(data, sizeof(*data));
|
||||||
|
data = early_memremap_decrypted(paddr, size);
|
||||||
|
if (!data) {
|
||||||
|
pr_warn("failed to early memremap indirect setup_data\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
indirect = (struct setup_indirect *)data->data;
|
||||||
|
|
||||||
|
if (indirect->type != SETUP_INDIRECT) {
|
||||||
|
paddr = indirect->addr;
|
||||||
|
len = indirect->len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
early_memunmap(data, size);
|
||||||
|
|
||||||
if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
|
if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
|
||||||
return true;
|
return true;
|
||||||
|
@@ -871,9 +871,15 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||||||
|
|
||||||
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
|
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
|
||||||
&v);
|
&v);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* max_discard_seg == 0 is out of spec but we always
|
||||||
|
* handled it.
|
||||||
|
*/
|
||||||
|
if (!v)
|
||||||
|
v = sg_elems - 2;
|
||||||
blk_queue_max_discard_segments(q,
|
blk_queue_max_discard_segments(q,
|
||||||
min_not_zero(v,
|
min(v, MAX_DISCARD_SEGMENTS));
|
||||||
MAX_DISCARD_SEGMENTS));
|
|
||||||
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
@@ -34,9 +34,14 @@
|
|||||||
#define CFG_GDSCR_OFFSET 0x4
|
#define CFG_GDSCR_OFFSET 0x4
|
||||||
|
|
||||||
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
|
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
|
||||||
#define EN_REST_WAIT_VAL (0x2 << 20)
|
#define EN_REST_WAIT_VAL 0x2
|
||||||
#define EN_FEW_WAIT_VAL (0x8 << 16)
|
#define EN_FEW_WAIT_VAL 0x8
|
||||||
#define CLK_DIS_WAIT_VAL (0x2 << 12)
|
#define CLK_DIS_WAIT_VAL 0x2
|
||||||
|
|
||||||
|
/* Transition delay shifts */
|
||||||
|
#define EN_REST_WAIT_SHIFT 20
|
||||||
|
#define EN_FEW_WAIT_SHIFT 16
|
||||||
|
#define CLK_DIS_WAIT_SHIFT 12
|
||||||
|
|
||||||
#define RETAIN_MEM BIT(14)
|
#define RETAIN_MEM BIT(14)
|
||||||
#define RETAIN_PERIPH BIT(13)
|
#define RETAIN_PERIPH BIT(13)
|
||||||
@@ -341,7 +346,18 @@ static int gdsc_init(struct gdsc *sc)
|
|||||||
*/
|
*/
|
||||||
mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
|
mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
|
||||||
EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
|
EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
|
||||||
val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
|
|
||||||
|
if (!sc->en_rest_wait_val)
|
||||||
|
sc->en_rest_wait_val = EN_REST_WAIT_VAL;
|
||||||
|
if (!sc->en_few_wait_val)
|
||||||
|
sc->en_few_wait_val = EN_FEW_WAIT_VAL;
|
||||||
|
if (!sc->clk_dis_wait_val)
|
||||||
|
sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
|
||||||
|
|
||||||
|
val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
|
||||||
|
sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
|
||||||
|
sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
|
||||||
|
|
||||||
ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
|
ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __QCOM_GDSC_H__
|
#ifndef __QCOM_GDSC_H__
|
||||||
@@ -22,6 +22,9 @@ struct reset_controller_dev;
|
|||||||
* @cxcs: offsets of branch registers to toggle mem/periph bits in
|
* @cxcs: offsets of branch registers to toggle mem/periph bits in
|
||||||
* @cxc_count: number of @cxcs
|
* @cxc_count: number of @cxcs
|
||||||
* @pwrsts: Possible powerdomain power states
|
* @pwrsts: Possible powerdomain power states
|
||||||
|
* @en_rest_wait_val: transition delay value for receiving enr ack signal
|
||||||
|
* @en_few_wait_val: transition delay value for receiving enf ack signal
|
||||||
|
* @clk_dis_wait_val: transition delay value for halting clock
|
||||||
* @resets: ids of resets associated with this gdsc
|
* @resets: ids of resets associated with this gdsc
|
||||||
* @reset_count: number of @resets
|
* @reset_count: number of @resets
|
||||||
* @rcdev: reset controller
|
* @rcdev: reset controller
|
||||||
@@ -35,6 +38,9 @@ struct gdsc {
|
|||||||
unsigned int clamp_io_ctrl;
|
unsigned int clamp_io_ctrl;
|
||||||
unsigned int *cxcs;
|
unsigned int *cxcs;
|
||||||
unsigned int cxc_count;
|
unsigned int cxc_count;
|
||||||
|
unsigned int en_rest_wait_val;
|
||||||
|
unsigned int en_few_wait_val;
|
||||||
|
unsigned int clk_dis_wait_val;
|
||||||
const u8 pwrsts;
|
const u8 pwrsts;
|
||||||
/* Powerdomain allowable state bitfields */
|
/* Powerdomain allowable state bitfields */
|
||||||
#define PWRSTS_OFF BIT(0)
|
#define PWRSTS_OFF BIT(0)
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Digital I/O driver for Technologic Systems I2C FPGA Core
|
* Digital I/O driver for Technologic Systems I2C FPGA Core
|
||||||
*
|
*
|
||||||
* Copyright (C) 2015 Technologic Systems
|
* Copyright (C) 2015, 2018 Technologic Systems
|
||||||
* Copyright (C) 2016 Savoir-Faire Linux
|
* Copyright (C) 2016 Savoir-Faire Linux
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or
|
* This program is free software; you can redistribute it and/or
|
||||||
@@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
|
|||||||
{
|
{
|
||||||
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
|
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
|
||||||
|
|
||||||
/*
|
/* Only clear the OE bit here, requires a RMW. Prevents potential issue
|
||||||
* This will clear the output enable bit, the other bits are
|
* with OE and data getting to the physical pin at different times.
|
||||||
* dontcare when this is cleared
|
|
||||||
*/
|
*/
|
||||||
return regmap_write(priv->regmap, offset, 0);
|
return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ts4900_gpio_direction_output(struct gpio_chip *chip,
|
static int ts4900_gpio_direction_output(struct gpio_chip *chip,
|
||||||
unsigned int offset, int value)
|
unsigned int offset, int value)
|
||||||
{
|
{
|
||||||
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
|
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
|
||||||
|
unsigned int reg;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* If changing from an input to an output, we need to first set the
|
||||||
|
* proper data bit to what is requested and then set OE bit. This
|
||||||
|
* prevents a glitch that can occur on the IO line
|
||||||
|
*/
|
||||||
|
regmap_read(priv->regmap, offset, ®);
|
||||||
|
if (!(reg & TS4900_GPIO_OE)) {
|
||||||
|
if (value)
|
||||||
|
reg = TS4900_GPIO_OUT;
|
||||||
|
else
|
||||||
|
reg &= ~TS4900_GPIO_OUT;
|
||||||
|
|
||||||
|
regmap_write(priv->regmap, offset, reg);
|
||||||
|
}
|
||||||
|
|
||||||
if (value)
|
if (value)
|
||||||
ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
|
ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
|
||||||
TS4900_GPIO_OUT);
|
TS4900_GPIO_OUT);
|
||||||
|
@@ -3216,6 +3216,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
|
|||||||
|
|
||||||
return retirq;
|
return retirq;
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_GPIOLIB_IRQCHIP
|
||||||
|
if (gc->irq.chip) {
|
||||||
|
/*
|
||||||
|
* Avoid race condition with other code, which tries to lookup
|
||||||
|
* an IRQ before the irqchip has been properly registered,
|
||||||
|
* i.e. while gpiochip is still being brought up.
|
||||||
|
*/
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gpiod_to_irq);
|
EXPORT_SYMBOL_GPL(gpiod_to_irq);
|
||||||
|
@@ -113,10 +113,10 @@
|
|||||||
/* format 13 is semi-planar YUV411 VUVU */
|
/* format 13 is semi-planar YUV411 VUVU */
|
||||||
#define SUN8I_MIXER_FBFMT_YUV411 14
|
#define SUN8I_MIXER_FBFMT_YUV411 14
|
||||||
/* format 15 doesn't exist */
|
/* format 15 doesn't exist */
|
||||||
/* format 16 is P010 YVU */
|
#define SUN8I_MIXER_FBFMT_P010_YUV 16
|
||||||
#define SUN8I_MIXER_FBFMT_P010_YUV 17
|
/* format 17 is P010 YVU */
|
||||||
/* format 18 is P210 YVU */
|
#define SUN8I_MIXER_FBFMT_P210_YUV 18
|
||||||
#define SUN8I_MIXER_FBFMT_P210_YUV 19
|
/* format 19 is P210 YVU */
|
||||||
/* format 20 is packed YVU444 10-bit */
|
/* format 20 is packed YVU444 10-bit */
|
||||||
/* format 21 is packed YUV444 10-bit */
|
/* format 21 is packed YUV444 10-bit */
|
||||||
|
|
||||||
|
@@ -143,7 +143,7 @@ out:
|
|||||||
static int vivaldi_input_configured(struct hid_device *hdev,
|
static int vivaldi_input_configured(struct hid_device *hdev,
|
||||||
struct hid_input *hidinput)
|
struct hid_input *hidinput)
|
||||||
{
|
{
|
||||||
return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group);
|
return devm_device_add_group(&hdev->dev, &input_attribute_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hid_device_id vivaldi_table[] = {
|
static const struct hid_device_id vivaldi_table[] = {
|
||||||
|
@@ -898,6 +898,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
|
|||||||
pmbus_update_sensor_data(client, s2);
|
pmbus_update_sensor_data(client, s2);
|
||||||
|
|
||||||
regval = status & mask;
|
regval = status & mask;
|
||||||
|
if (regval) {
|
||||||
|
ret = pmbus_write_byte_data(client, page, reg, regval);
|
||||||
|
if (ret)
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
if (s1 && s2) {
|
if (s1 && s2) {
|
||||||
s64 v1, v2;
|
s64 v1, v2;
|
||||||
|
|
||||||
|
@@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc)
|
|||||||
}
|
}
|
||||||
/* Allocate memory for FIFOS */
|
/* Allocate memory for FIFOS */
|
||||||
/* the memory needs to be on a 32k boundary within the first 4G */
|
/* the memory needs to be on a 32k boundary within the first 4G */
|
||||||
dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
|
if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
|
||||||
|
printk(KERN_WARNING
|
||||||
|
"HFC-PCI: No usable DMA configuration!\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
|
buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
/* We silently assume the address is okay if nonzero */
|
/* We silently assume the address is okay if nonzero */
|
||||||
|
@@ -17,9 +17,6 @@
|
|||||||
#include "dsp.h"
|
#include "dsp.h"
|
||||||
#include "dsp_hwec.h"
|
#include "dsp_hwec.h"
|
||||||
|
|
||||||
/* uncomment for debugging */
|
|
||||||
/*#define PIPELINE_DEBUG*/
|
|
||||||
|
|
||||||
struct dsp_pipeline_entry {
|
struct dsp_pipeline_entry {
|
||||||
struct mISDN_dsp_element *elem;
|
struct mISDN_dsp_element *elem;
|
||||||
void *p;
|
void *p;
|
||||||
@@ -104,10 +101,6 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err2:
|
err2:
|
||||||
@@ -129,10 +122,6 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
|
|||||||
list_for_each_entry_safe(entry, n, &dsp_elements, list)
|
list_for_each_entry_safe(entry, n, &dsp_elements, list)
|
||||||
if (entry->elem == elem) {
|
if (entry->elem == elem) {
|
||||||
device_unregister(&entry->dev);
|
device_unregister(&entry->dev);
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: %s unregistered\n",
|
|
||||||
__func__, elem->name);
|
|
||||||
#endif
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
|
printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
|
||||||
@@ -145,10 +134,6 @@ int dsp_pipeline_module_init(void)
|
|||||||
if (IS_ERR(elements_class))
|
if (IS_ERR(elements_class))
|
||||||
return PTR_ERR(elements_class);
|
return PTR_ERR(elements_class);
|
||||||
|
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
dsp_hwec_init();
|
dsp_hwec_init();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -168,10 +153,6 @@ void dsp_pipeline_module_exit(void)
|
|||||||
__func__, entry->elem->name);
|
__func__, entry->elem->name);
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int dsp_pipeline_init(struct dsp_pipeline *pipeline)
|
int dsp_pipeline_init(struct dsp_pipeline *pipeline)
|
||||||
@@ -181,10 +162,6 @@ int dsp_pipeline_init(struct dsp_pipeline *pipeline)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&pipeline->list);
|
INIT_LIST_HEAD(&pipeline->list);
|
||||||
|
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -210,16 +187,12 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
_dsp_pipeline_destroy(pipeline);
|
_dsp_pipeline_destroy(pipeline);
|
||||||
|
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
||||||
{
|
{
|
||||||
int incomplete = 0, found = 0;
|
int found = 0;
|
||||||
char *dup, *tok, *name, *args;
|
char *dup, *next, *tok, *name, *args;
|
||||||
struct dsp_element_entry *entry, *n;
|
struct dsp_element_entry *entry, *n;
|
||||||
struct dsp_pipeline_entry *pipeline_entry;
|
struct dsp_pipeline_entry *pipeline_entry;
|
||||||
struct mISDN_dsp_element *elem;
|
struct mISDN_dsp_element *elem;
|
||||||
@@ -230,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
|||||||
if (!list_empty(&pipeline->list))
|
if (!list_empty(&pipeline->list))
|
||||||
_dsp_pipeline_destroy(pipeline);
|
_dsp_pipeline_destroy(pipeline);
|
||||||
|
|
||||||
dup = kstrdup(cfg, GFP_ATOMIC);
|
dup = next = kstrdup(cfg, GFP_ATOMIC);
|
||||||
if (!dup)
|
if (!dup)
|
||||||
return 0;
|
return 0;
|
||||||
while ((tok = strsep(&dup, "|"))) {
|
while ((tok = strsep(&next, "|"))) {
|
||||||
if (!strlen(tok))
|
if (!strlen(tok))
|
||||||
continue;
|
continue;
|
||||||
name = strsep(&tok, "(");
|
name = strsep(&tok, "(");
|
||||||
@@ -251,7 +224,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
|||||||
printk(KERN_ERR "%s: failed to add "
|
printk(KERN_ERR "%s: failed to add "
|
||||||
"entry to pipeline: %s (out of "
|
"entry to pipeline: %s (out of "
|
||||||
"memory)\n", __func__, elem->name);
|
"memory)\n", __func__, elem->name);
|
||||||
incomplete = 1;
|
|
||||||
goto _out;
|
goto _out;
|
||||||
}
|
}
|
||||||
pipeline_entry->elem = elem;
|
pipeline_entry->elem = elem;
|
||||||
@@ -268,20 +240,12 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
|||||||
if (pipeline_entry->p) {
|
if (pipeline_entry->p) {
|
||||||
list_add_tail(&pipeline_entry->
|
list_add_tail(&pipeline_entry->
|
||||||
list, &pipeline->list);
|
list, &pipeline->list);
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: created "
|
|
||||||
"instance of %s%s%s\n",
|
|
||||||
__func__, name, args ?
|
|
||||||
" with args " : "", args ?
|
|
||||||
args : "");
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_ERR "%s: failed "
|
printk(KERN_ERR "%s: failed "
|
||||||
"to add entry to pipeline: "
|
"to add entry to pipeline: "
|
||||||
"%s (new() returned NULL)\n",
|
"%s (new() returned NULL)\n",
|
||||||
__func__, elem->name);
|
__func__, elem->name);
|
||||||
kfree(pipeline_entry);
|
kfree(pipeline_entry);
|
||||||
incomplete = 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
found = 1;
|
found = 1;
|
||||||
@@ -290,11 +254,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
|||||||
|
|
||||||
if (found)
|
if (found)
|
||||||
found = 0;
|
found = 0;
|
||||||
else {
|
else
|
||||||
printk(KERN_ERR "%s: element not found, skipping: "
|
printk(KERN_ERR "%s: element not found, skipping: "
|
||||||
"%s\n", __func__, name);
|
"%s\n", __func__, name);
|
||||||
incomplete = 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_out:
|
_out:
|
||||||
@@ -303,10 +265,6 @@ _out:
|
|||||||
else
|
else
|
||||||
pipeline->inuse = 0;
|
pipeline->inuse = 0;
|
||||||
|
|
||||||
#ifdef PIPELINE_DEBUG
|
|
||||||
printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n",
|
|
||||||
__func__, incomplete ? " incomplete" : "", cfg);
|
|
||||||
#endif
|
|
||||||
kfree(dup);
|
kfree(dup);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -173,6 +173,8 @@ struct meson_host {
|
|||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
bool vqmmc_enabled;
|
bool vqmmc_enabled;
|
||||||
|
bool needs_pre_post_req;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
|
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
|
||||||
@@ -652,6 +654,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc,
|
|||||||
struct meson_host *host = mmc_priv(mmc);
|
struct meson_host *host = mmc_priv(mmc);
|
||||||
|
|
||||||
host->cmd = NULL;
|
host->cmd = NULL;
|
||||||
|
if (host->needs_pre_post_req)
|
||||||
|
meson_mmc_post_req(mmc, mrq, 0);
|
||||||
mmc_request_done(host->mmc, mrq);
|
mmc_request_done(host->mmc, mrq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -869,7 +873,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data
|
|||||||
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||||
{
|
{
|
||||||
struct meson_host *host = mmc_priv(mmc);
|
struct meson_host *host = mmc_priv(mmc);
|
||||||
bool needs_pre_post_req = mrq->data &&
|
host->needs_pre_post_req = mrq->data &&
|
||||||
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
|
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -885,22 +889,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (needs_pre_post_req) {
|
if (host->needs_pre_post_req) {
|
||||||
meson_mmc_get_transfer_mode(mmc, mrq);
|
meson_mmc_get_transfer_mode(mmc, mrq);
|
||||||
if (!meson_mmc_desc_chain_mode(mrq->data))
|
if (!meson_mmc_desc_chain_mode(mrq->data))
|
||||||
needs_pre_post_req = false;
|
host->needs_pre_post_req = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (needs_pre_post_req)
|
if (host->needs_pre_post_req)
|
||||||
meson_mmc_pre_req(mmc, mrq);
|
meson_mmc_pre_req(mmc, mrq);
|
||||||
|
|
||||||
/* Stop execution */
|
/* Stop execution */
|
||||||
writel(0, host->regs + SD_EMMC_START);
|
writel(0, host->regs + SD_EMMC_START);
|
||||||
|
|
||||||
meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
|
meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
|
||||||
|
|
||||||
if (needs_pre_post_req)
|
|
||||||
meson_mmc_post_req(mmc, mrq, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
|
static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
|
||||||
|
@@ -2342,7 +2342,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
|
|||||||
|
|
||||||
phylink_set_port_modes(mask);
|
phylink_set_port_modes(mask);
|
||||||
|
|
||||||
if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
|
if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
|
||||||
!phy_interface_mode_is_8023z(state->interface)) {
|
!phy_interface_mode_is_8023z(state->interface)) {
|
||||||
phylink_set(mask, 10baseT_Half);
|
phylink_set(mask, 10baseT_Half);
|
||||||
phylink_set(mask, 10baseT_Full);
|
phylink_set(mask, 10baseT_Full);
|
||||||
|
@@ -41,6 +41,13 @@
|
|||||||
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||||
{
|
{
|
||||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||||
|
struct device *kdev = &priv->pdev->dev;
|
||||||
|
|
||||||
|
if (!device_can_wakeup(kdev)) {
|
||||||
|
wol->supported = 0;
|
||||||
|
wol->wolopts = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
||||||
wol->wolopts = priv->wolopts;
|
wol->wolopts = priv->wolopts;
|
||||||
|
@@ -1448,7 +1448,14 @@ static int macb_poll(struct napi_struct *napi, int budget)
|
|||||||
if (work_done < budget) {
|
if (work_done < budget) {
|
||||||
napi_complete_done(napi, work_done);
|
napi_complete_done(napi, work_done);
|
||||||
|
|
||||||
/* Packets received while interrupts were disabled */
|
/* RSR bits only seem to propagate to raise interrupts when
|
||||||
|
* interrupts are enabled at the time, so if bits are already
|
||||||
|
* set due to packets received while interrupts were disabled,
|
||||||
|
* they will not cause another interrupt to be generated when
|
||||||
|
* interrupts are re-enabled.
|
||||||
|
* Check for this case here. This has been seen to happen
|
||||||
|
* around 30% of the time under heavy network load.
|
||||||
|
*/
|
||||||
status = macb_readl(bp, RSR);
|
status = macb_readl(bp, RSR);
|
||||||
if (status) {
|
if (status) {
|
||||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||||
@@ -1456,6 +1463,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
|
|||||||
napi_reschedule(napi);
|
napi_reschedule(napi);
|
||||||
} else {
|
} else {
|
||||||
queue_writel(queue, IER, bp->rx_intr_mask);
|
queue_writel(queue, IER, bp->rx_intr_mask);
|
||||||
|
|
||||||
|
/* In rare cases, packets could have been received in
|
||||||
|
* the window between the check above and re-enabling
|
||||||
|
* interrupts. Therefore, a double-check is required
|
||||||
|
* to avoid losing a wakeup. This can potentially race
|
||||||
|
* with the interrupt handler doing the same actions
|
||||||
|
* if an interrupt is raised just after enabling them,
|
||||||
|
* but this should be harmless.
|
||||||
|
*/
|
||||||
|
status = macb_readl(bp, RSR);
|
||||||
|
if (unlikely(status)) {
|
||||||
|
queue_writel(queue, IDR, bp->rx_intr_mask);
|
||||||
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||||
|
queue_writel(queue, ISR, MACB_BIT(RCOMP));
|
||||||
|
napi_schedule(napi);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1456,6 +1456,7 @@ static int gfar_get_ts_info(struct net_device *dev,
|
|||||||
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
|
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
|
||||||
if (ptp_node) {
|
if (ptp_node) {
|
||||||
ptp_dev = of_find_device_by_node(ptp_node);
|
ptp_dev = of_find_device_by_node(ptp_node);
|
||||||
|
of_node_put(ptp_node);
|
||||||
if (ptp_dev)
|
if (ptp_dev)
|
||||||
ptp = platform_get_drvdata(ptp_dev);
|
ptp = platform_get_drvdata(ptp_dev);
|
||||||
}
|
}
|
||||||
|
@@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
|
|||||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||||
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
|
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
|
||||||
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
|
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
|
||||||
dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
|
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
|
||||||
vf->num_mdd_events,
|
vf->num_mdd_events);
|
||||||
vf->num_invalid_msgs,
|
|
||||||
vf->num_valid_msgs);
|
|
||||||
} else {
|
} else {
|
||||||
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
|
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
|
||||||
}
|
}
|
||||||
|
@@ -1864,19 +1864,17 @@ sriov_configure_out:
|
|||||||
/***********************virtual channel routines******************/
|
/***********************virtual channel routines******************/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_vc_send_msg_to_vf_ex
|
* i40e_vc_send_msg_to_vf
|
||||||
* @vf: pointer to the VF info
|
* @vf: pointer to the VF info
|
||||||
* @v_opcode: virtual channel opcode
|
* @v_opcode: virtual channel opcode
|
||||||
* @v_retval: virtual channel return value
|
* @v_retval: virtual channel return value
|
||||||
* @msg: pointer to the msg buffer
|
* @msg: pointer to the msg buffer
|
||||||
* @msglen: msg length
|
* @msglen: msg length
|
||||||
* @is_quiet: true for not printing unsuccessful return values, false otherwise
|
|
||||||
*
|
*
|
||||||
* send msg to VF
|
* send msg to VF
|
||||||
**/
|
**/
|
||||||
static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||||
u32 v_retval, u8 *msg, u16 msglen,
|
u32 v_retval, u8 *msg, u16 msglen)
|
||||||
bool is_quiet)
|
|
||||||
{
|
{
|
||||||
struct i40e_pf *pf;
|
struct i40e_pf *pf;
|
||||||
struct i40e_hw *hw;
|
struct i40e_hw *hw;
|
||||||
@@ -1891,25 +1889,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
|||||||
hw = &pf->hw;
|
hw = &pf->hw;
|
||||||
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
|
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
|
||||||
|
|
||||||
/* single place to detect unsuccessful return values */
|
|
||||||
if (v_retval && !is_quiet) {
|
|
||||||
vf->num_invalid_msgs++;
|
|
||||||
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
|
|
||||||
vf->vf_id, v_opcode, v_retval);
|
|
||||||
if (vf->num_invalid_msgs >
|
|
||||||
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
|
|
||||||
dev_err(&pf->pdev->dev,
|
|
||||||
"Number of invalid messages exceeded for VF %d\n",
|
|
||||||
vf->vf_id);
|
|
||||||
dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
|
|
||||||
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
vf->num_valid_msgs++;
|
|
||||||
/* reset the invalid counter, if a valid message is received. */
|
|
||||||
vf->num_invalid_msgs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
|
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
|
||||||
msg, msglen, NULL);
|
msg, msglen, NULL);
|
||||||
if (aq_ret) {
|
if (aq_ret) {
|
||||||
@@ -1922,23 +1901,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* i40e_vc_send_msg_to_vf
|
|
||||||
* @vf: pointer to the VF info
|
|
||||||
* @v_opcode: virtual channel opcode
|
|
||||||
* @v_retval: virtual channel return value
|
|
||||||
* @msg: pointer to the msg buffer
|
|
||||||
* @msglen: msg length
|
|
||||||
*
|
|
||||||
* send msg to VF
|
|
||||||
**/
|
|
||||||
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
|
||||||
u32 v_retval, u8 *msg, u16 msglen)
|
|
||||||
{
|
|
||||||
return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
|
|
||||||
msg, msglen, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_vc_send_resp_to_vf
|
* i40e_vc_send_resp_to_vf
|
||||||
* @vf: pointer to the VF info
|
* @vf: pointer to the VF info
|
||||||
@@ -2759,7 +2721,6 @@ error_param:
|
|||||||
* i40e_check_vf_permission
|
* i40e_check_vf_permission
|
||||||
* @vf: pointer to the VF info
|
* @vf: pointer to the VF info
|
||||||
* @al: MAC address list from virtchnl
|
* @al: MAC address list from virtchnl
|
||||||
* @is_quiet: set true for printing msg without opcode info, false otherwise
|
|
||||||
*
|
*
|
||||||
* Check that the given list of MAC addresses is allowed. Will return -EPERM
|
* Check that the given list of MAC addresses is allowed. Will return -EPERM
|
||||||
* if any address in the list is not valid. Checks the following conditions:
|
* if any address in the list is not valid. Checks the following conditions:
|
||||||
@@ -2774,15 +2735,13 @@ error_param:
|
|||||||
* addresses might not be accurate.
|
* addresses might not be accurate.
|
||||||
**/
|
**/
|
||||||
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||||
struct virtchnl_ether_addr_list *al,
|
struct virtchnl_ether_addr_list *al)
|
||||||
bool *is_quiet)
|
|
||||||
{
|
{
|
||||||
struct i40e_pf *pf = vf->pf;
|
struct i40e_pf *pf = vf->pf;
|
||||||
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
||||||
int mac2add_cnt = 0;
|
int mac2add_cnt = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
*is_quiet = false;
|
|
||||||
for (i = 0; i < al->num_elements; i++) {
|
for (i = 0; i < al->num_elements; i++) {
|
||||||
struct i40e_mac_filter *f;
|
struct i40e_mac_filter *f;
|
||||||
u8 *addr = al->list[i].addr;
|
u8 *addr = al->list[i].addr;
|
||||||
@@ -2806,7 +2765,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
|||||||
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
|
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
|
||||||
*is_quiet = true;
|
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2843,7 +2801,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
(struct virtchnl_ether_addr_list *)msg;
|
(struct virtchnl_ether_addr_list *)msg;
|
||||||
struct i40e_pf *pf = vf->pf;
|
struct i40e_pf *pf = vf->pf;
|
||||||
struct i40e_vsi *vsi = NULL;
|
struct i40e_vsi *vsi = NULL;
|
||||||
bool is_quiet = false;
|
|
||||||
i40e_status ret = 0;
|
i40e_status ret = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@@ -2860,7 +2817,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
*/
|
*/
|
||||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||||
|
|
||||||
ret = i40e_check_vf_permission(vf, al, &is_quiet);
|
ret = i40e_check_vf_permission(vf, al);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||||
goto error_param;
|
goto error_param;
|
||||||
@@ -2898,8 +2855,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
|
|
||||||
error_param:
|
error_param:
|
||||||
/* send the response to the VF */
|
/* send the response to the VF */
|
||||||
return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||||
ret, NULL, 0, is_quiet);
|
ret, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -10,8 +10,6 @@
|
|||||||
|
|
||||||
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
|
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
|
||||||
|
|
||||||
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
|
|
||||||
|
|
||||||
#define I40E_VLAN_PRIORITY_SHIFT 13
|
#define I40E_VLAN_PRIORITY_SHIFT 13
|
||||||
#define I40E_VLAN_MASK 0xFFF
|
#define I40E_VLAN_MASK 0xFFF
|
||||||
#define I40E_PRIORITY_MASK 0xE000
|
#define I40E_PRIORITY_MASK 0xE000
|
||||||
@@ -92,9 +90,6 @@ struct i40e_vf {
|
|||||||
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
|
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
|
||||||
u8 num_req_queues; /* num of requested qps */
|
u8 num_req_queues; /* num of requested qps */
|
||||||
u64 num_mdd_events; /* num of mdd events detected */
|
u64 num_mdd_events; /* num of mdd events detected */
|
||||||
/* num of continuous malformed or invalid msgs detected */
|
|
||||||
u64 num_invalid_msgs;
|
|
||||||
u64 num_valid_msgs; /* num of valid msgs detected */
|
|
||||||
|
|
||||||
unsigned long vf_caps; /* vf's adv. capabilities */
|
unsigned long vf_caps; /* vf's adv. capabilities */
|
||||||
unsigned long vf_states; /* vf's runtime states */
|
unsigned long vf_states; /* vf's runtime states */
|
||||||
|
@@ -870,11 +870,11 @@ struct ice_aqc_get_phy_caps {
|
|||||||
* 01b - Report topology capabilities
|
* 01b - Report topology capabilities
|
||||||
* 10b - Report SW configured
|
* 10b - Report SW configured
|
||||||
*/
|
*/
|
||||||
#define ICE_AQC_REPORT_MODE_S 1
|
#define ICE_AQC_REPORT_MODE_S 1
|
||||||
#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
|
#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
|
||||||
#define ICE_AQC_REPORT_NVM_CAP 0
|
#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0
|
||||||
#define ICE_AQC_REPORT_TOPO_CAP BIT(1)
|
#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1)
|
||||||
#define ICE_AQC_REPORT_SW_CFG BIT(2)
|
#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2)
|
||||||
__le32 reserved1;
|
__le32 reserved1;
|
||||||
__le32 addr_high;
|
__le32 addr_high;
|
||||||
__le32 addr_low;
|
__le32 addr_low;
|
||||||
|
@@ -193,7 +193,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
|
|||||||
ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
|
ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
|
||||||
pcaps->module_type[2]);
|
pcaps->module_type[2]);
|
||||||
|
|
||||||
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
|
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
|
||||||
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
|
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
|
||||||
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
|
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
|
||||||
memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
|
memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
|
||||||
@@ -924,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
|
|||||||
|
|
||||||
/* Initialize port_info struct with PHY capabilities */
|
/* Initialize port_info struct with PHY capabilities */
|
||||||
status = ice_aq_get_phy_caps(hw->port_info, false,
|
status = ice_aq_get_phy_caps(hw->port_info, false,
|
||||||
ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
|
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
|
||||||
|
NULL);
|
||||||
devm_kfree(ice_hw_to_dev(hw), pcaps);
|
devm_kfree(ice_hw_to_dev(hw), pcaps);
|
||||||
if (status)
|
if (status)
|
||||||
goto err_unroll_sched;
|
goto err_unroll_sched;
|
||||||
@@ -2682,7 +2683,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
|
|||||||
if (!pcaps)
|
if (!pcaps)
|
||||||
return ICE_ERR_NO_MEMORY;
|
return ICE_ERR_NO_MEMORY;
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
|
||||||
pcaps, NULL);
|
pcaps, NULL);
|
||||||
|
|
||||||
devm_kfree(ice_hw_to_dev(hw), pcaps);
|
devm_kfree(ice_hw_to_dev(hw), pcaps);
|
||||||
@@ -2842,8 +2843,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
|
|||||||
return ICE_ERR_NO_MEMORY;
|
return ICE_ERR_NO_MEMORY;
|
||||||
|
|
||||||
/* Get the current PHY config */
|
/* Get the current PHY config */
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
|
||||||
NULL);
|
pcaps, NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
|
*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
|
||||||
goto out;
|
goto out;
|
||||||
@@ -2989,7 +2990,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
|
|||||||
if (!pcaps)
|
if (!pcaps)
|
||||||
return ICE_ERR_NO_MEMORY;
|
return ICE_ERR_NO_MEMORY;
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (status)
|
if (status)
|
||||||
goto out;
|
goto out;
|
||||||
|
@@ -1081,7 +1081,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
|
|||||||
if (!caps)
|
if (!caps)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
|
||||||
caps, NULL);
|
caps, NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
@@ -1976,7 +1976,7 @@ ice_get_link_ksettings(struct net_device *netdev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(vsi->port_info, false,
|
status = ice_aq_get_phy_caps(vsi->port_info, false,
|
||||||
ICE_AQC_REPORT_SW_CFG, caps, NULL);
|
ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
goto done;
|
goto done;
|
||||||
@@ -2013,7 +2013,7 @@ ice_get_link_ksettings(struct net_device *netdev,
|
|||||||
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
|
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(vsi->port_info, false,
|
status = ice_aq_get_phy_caps(vsi->port_info, false,
|
||||||
ICE_AQC_REPORT_TOPO_CAP, caps, NULL);
|
ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
goto done;
|
goto done;
|
||||||
@@ -2187,12 +2187,12 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
{
|
{
|
||||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||||
struct ethtool_link_ksettings safe_ks, copy_ks;
|
struct ethtool_link_ksettings safe_ks, copy_ks;
|
||||||
struct ice_aqc_get_phy_caps_data *abilities;
|
|
||||||
u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
|
u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
|
||||||
u16 adv_link_speed, curr_link_speed, idx;
|
struct ice_aqc_get_phy_caps_data *phy_caps;
|
||||||
struct ice_aqc_set_phy_cfg_data config;
|
struct ice_aqc_set_phy_cfg_data config;
|
||||||
|
u16 adv_link_speed, curr_link_speed;
|
||||||
struct ice_pf *pf = np->vsi->back;
|
struct ice_pf *pf = np->vsi->back;
|
||||||
struct ice_port_info *p;
|
struct ice_port_info *pi;
|
||||||
u8 autoneg_changed = 0;
|
u8 autoneg_changed = 0;
|
||||||
enum ice_status status;
|
enum ice_status status;
|
||||||
u64 phy_type_high = 0;
|
u64 phy_type_high = 0;
|
||||||
@@ -2200,33 +2200,25 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
bool linkup;
|
bool linkup;
|
||||||
|
|
||||||
p = np->vsi->port_info;
|
pi = np->vsi->port_info;
|
||||||
|
|
||||||
if (!p)
|
if (!pi)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* Check if this is LAN VSI */
|
if (pi->phy.media_type != ICE_MEDIA_BASET &&
|
||||||
ice_for_each_vsi(pf, idx)
|
pi->phy.media_type != ICE_MEDIA_FIBER &&
|
||||||
if (pf->vsi[idx]->type == ICE_VSI_PF) {
|
pi->phy.media_type != ICE_MEDIA_BACKPLANE &&
|
||||||
if (np->vsi != pf->vsi[idx])
|
pi->phy.media_type != ICE_MEDIA_DA &&
|
||||||
return -EOPNOTSUPP;
|
pi->phy.link_info.link_info & ICE_AQ_LINK_UP)
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (p->phy.media_type != ICE_MEDIA_BASET &&
|
|
||||||
p->phy.media_type != ICE_MEDIA_FIBER &&
|
|
||||||
p->phy.media_type != ICE_MEDIA_BACKPLANE &&
|
|
||||||
p->phy.media_type != ICE_MEDIA_DA &&
|
|
||||||
p->phy.link_info.link_info & ICE_AQ_LINK_UP)
|
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
|
phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL);
|
||||||
if (!abilities)
|
if (!phy_caps)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Get the PHY capabilities based on media */
|
/* Get the PHY capabilities based on media */
|
||||||
status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
|
||||||
abilities, NULL);
|
phy_caps, NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
goto done;
|
goto done;
|
||||||
@@ -2288,26 +2280,26 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
* configuration is initialized during probe from PHY capabilities
|
* configuration is initialized during probe from PHY capabilities
|
||||||
* software mode, and updated on set PHY configuration.
|
* software mode, and updated on set PHY configuration.
|
||||||
*/
|
*/
|
||||||
memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config));
|
memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
|
||||||
|
|
||||||
config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
|
config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
|
||||||
|
|
||||||
/* Check autoneg */
|
/* Check autoneg */
|
||||||
err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
|
err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed,
|
||||||
netdev);
|
netdev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
/* Call to get the current link speed */
|
/* Call to get the current link speed */
|
||||||
p->phy.get_link_info = true;
|
pi->phy.get_link_info = true;
|
||||||
status = ice_get_link_status(p, &linkup);
|
status = ice_get_link_status(pi, &linkup);
|
||||||
if (status) {
|
if (status) {
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
curr_link_speed = p->phy.link_info.link_speed;
|
curr_link_speed = pi->phy.curr_user_speed_req;
|
||||||
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
|
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
|
||||||
|
|
||||||
/* If speed didn't get set, set it to what it currently is.
|
/* If speed didn't get set, set it to what it currently is.
|
||||||
@@ -2326,7 +2318,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* save the requested speeds */
|
/* save the requested speeds */
|
||||||
p->phy.link_info.req_speeds = adv_link_speed;
|
pi->phy.link_info.req_speeds = adv_link_speed;
|
||||||
|
|
||||||
/* set link and auto negotiation so changes take effect */
|
/* set link and auto negotiation so changes take effect */
|
||||||
config.caps |= ICE_AQ_PHY_ENA_LINK;
|
config.caps |= ICE_AQ_PHY_ENA_LINK;
|
||||||
@@ -2342,9 +2334,9 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
* for set PHY configuration
|
* for set PHY configuration
|
||||||
*/
|
*/
|
||||||
config.phy_type_high = cpu_to_le64(phy_type_high) &
|
config.phy_type_high = cpu_to_le64(phy_type_high) &
|
||||||
abilities->phy_type_high;
|
phy_caps->phy_type_high;
|
||||||
config.phy_type_low = cpu_to_le64(phy_type_low) &
|
config.phy_type_low = cpu_to_le64(phy_type_low) &
|
||||||
abilities->phy_type_low;
|
phy_caps->phy_type_low;
|
||||||
|
|
||||||
if (!(config.phy_type_high || config.phy_type_low)) {
|
if (!(config.phy_type_high || config.phy_type_low)) {
|
||||||
/* If there is no intersection and lenient mode is enabled, then
|
/* If there is no intersection and lenient mode is enabled, then
|
||||||
@@ -2364,7 +2356,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* If link is up put link down */
|
/* If link is up put link down */
|
||||||
if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
|
if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) {
|
||||||
/* Tell the OS link is going down, the link will go
|
/* Tell the OS link is going down, the link will go
|
||||||
* back up when fw says it is ready asynchronously
|
* back up when fw says it is ready asynchronously
|
||||||
*/
|
*/
|
||||||
@@ -2374,7 +2366,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* make the aq call */
|
/* make the aq call */
|
||||||
status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL);
|
status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
netdev_info(netdev, "Set phy config failed,\n");
|
netdev_info(netdev, "Set phy config failed,\n");
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
@@ -2382,9 +2374,9 @@ ice_set_link_ksettings(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Save speed request */
|
/* Save speed request */
|
||||||
p->phy.curr_user_speed_req = adv_link_speed;
|
pi->phy.curr_user_speed_req = adv_link_speed;
|
||||||
done:
|
done:
|
||||||
kfree(abilities);
|
kfree(phy_caps);
|
||||||
clear_bit(__ICE_CFG_BUSY, pf->state);
|
clear_bit(__ICE_CFG_BUSY, pf->state);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@@ -2954,7 +2946,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* Get current PHY config */
|
/* Get current PHY config */
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (status)
|
if (status)
|
||||||
goto out;
|
goto out;
|
||||||
@@ -3021,7 +3013,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Get current PHY config */
|
/* Get current PHY config */
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
kfree(pcaps);
|
kfree(pcaps);
|
||||||
|
@@ -726,7 +726,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
|
|||||||
}
|
}
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(vsi->port_info, false,
|
status = ice_aq_get_phy_caps(vsi->port_info, false,
|
||||||
ICE_AQC_REPORT_SW_CFG, caps, NULL);
|
ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
|
||||||
if (status)
|
if (status)
|
||||||
netdev_info(vsi->netdev, "Get phy capability failed.\n");
|
netdev_info(vsi->netdev, "Get phy capability failed.\n");
|
||||||
|
|
||||||
@@ -1645,7 +1645,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
|
|||||||
if (!pcaps)
|
if (!pcaps)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (retcode) {
|
if (retcode) {
|
||||||
dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
|
dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
|
||||||
@@ -1705,7 +1705,7 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi)
|
|||||||
if (!pcaps)
|
if (!pcaps)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
@@ -1821,7 +1821,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
|
|||||||
if (!pcaps)
|
if (!pcaps)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
|
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
|
||||||
@@ -1900,7 +1900,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Get current PHY config */
|
/* Get current PHY config */
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
|
dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
|
||||||
@@ -1918,7 +1918,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
|
|||||||
|
|
||||||
/* Use PHY topology as baseline for configuration */
|
/* Use PHY topology as baseline for configuration */
|
||||||
memset(pcaps, 0, sizeof(*pcaps));
|
memset(pcaps, 0, sizeof(*pcaps));
|
||||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
|
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
|
||||||
NULL);
|
NULL);
|
||||||
if (status) {
|
if (status) {
|
||||||
dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
|
dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
|
||||||
|
@@ -1849,24 +1849,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
|
|||||||
|
|
||||||
dev = ice_pf_to_dev(pf);
|
dev = ice_pf_to_dev(pf);
|
||||||
|
|
||||||
/* single place to detect unsuccessful return values */
|
|
||||||
if (v_retval) {
|
|
||||||
vf->num_inval_msgs++;
|
|
||||||
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
|
|
||||||
v_opcode, v_retval);
|
|
||||||
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
|
|
||||||
dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
|
|
||||||
vf->vf_id);
|
|
||||||
dev_err(dev, "Use PF Control I/F to enable the VF\n");
|
|
||||||
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
vf->num_valid_msgs++;
|
|
||||||
/* reset the invalid counter, if a valid message is received. */
|
|
||||||
vf->num_inval_msgs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
|
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
|
||||||
msg, msglen, NULL);
|
msg, msglen, NULL);
|
||||||
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
|
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
|
||||||
|
@@ -13,7 +13,6 @@
|
|||||||
#define ICE_MAX_MACADDR_PER_VF 18
|
#define ICE_MAX_MACADDR_PER_VF 18
|
||||||
|
|
||||||
/* Malicious Driver Detection */
|
/* Malicious Driver Detection */
|
||||||
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
|
|
||||||
#define ICE_MDD_EVENTS_THRESHOLD 30
|
#define ICE_MDD_EVENTS_THRESHOLD 30
|
||||||
|
|
||||||
/* Static VF transaction/status register def */
|
/* Static VF transaction/status register def */
|
||||||
@@ -97,8 +96,6 @@ struct ice_vf {
|
|||||||
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
|
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
|
||||||
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
|
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
|
||||||
|
|
||||||
u64 num_inval_msgs; /* number of continuous invalid msgs */
|
|
||||||
u64 num_valid_msgs; /* number of valid msgs detected */
|
|
||||||
unsigned long vf_caps; /* VF's adv. capabilities */
|
unsigned long vf_caps; /* VF's adv. capabilities */
|
||||||
u8 num_req_qs; /* num of queue pairs requested by VF */
|
u8 num_req_qs; /* num of queue pairs requested by VF */
|
||||||
u16 num_mac;
|
u16 num_mac;
|
||||||
|
@@ -130,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
|
|||||||
|
|
||||||
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
|
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
lockdep_assert_held(&cmd->alloc_lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
|
||||||
set_bit(idx, &cmd->bitmask);
|
set_bit(idx, &cmd->bitmask);
|
||||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
|
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
|
||||||
@@ -144,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
|
|||||||
|
|
||||||
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
|
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
|
||||||
{
|
{
|
||||||
|
struct mlx5_cmd *cmd = ent->cmd;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||||
if (!refcount_dec_and_test(&ent->refcnt))
|
if (!refcount_dec_and_test(&ent->refcnt))
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
if (ent->idx >= 0) {
|
if (ent->idx >= 0) {
|
||||||
struct mlx5_cmd *cmd = ent->cmd;
|
|
||||||
|
|
||||||
cmd_free_index(cmd, ent->idx);
|
cmd_free_index(cmd, ent->idx);
|
||||||
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
|
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd_free_ent(ent);
|
cmd_free_ent(ent);
|
||||||
|
out:
|
||||||
|
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
|
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
|
||||||
|
@@ -123,6 +123,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Handle multipath entry with lower priority value */
|
||||||
|
if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Handle add/replace event */
|
/* Handle add/replace event */
|
||||||
nhs = fib_info_num_path(fi);
|
nhs = fib_info_num_path(fi);
|
||||||
if (nhs == 1) {
|
if (nhs == 1) {
|
||||||
@@ -132,12 +136,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
|
|||||||
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
|
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
|
||||||
|
|
||||||
if (i < 0)
|
if (i < 0)
|
||||||
i = MLX5_LAG_NORMAL_AFFINITY;
|
return;
|
||||||
else
|
|
||||||
++i;
|
|
||||||
|
|
||||||
|
i++;
|
||||||
mlx5_lag_set_port_affinity(ldev, i);
|
mlx5_lag_set_port_affinity(ldev, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mp->mfi = fi;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1468,6 +1468,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||||
struct netdata_local *pldat;
|
struct netdata_local *pldat;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (device_may_wakeup(&pdev->dev))
|
if (device_may_wakeup(&pdev->dev))
|
||||||
disable_irq_wake(ndev->irq);
|
disable_irq_wake(ndev->irq);
|
||||||
@@ -1477,7 +1478,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
|
|||||||
pldat = netdev_priv(ndev);
|
pldat = netdev_priv(ndev);
|
||||||
|
|
||||||
/* Enable interface clock */
|
/* Enable interface clock */
|
||||||
clk_enable(pldat->clk);
|
ret = clk_enable(pldat->clk);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* Reset and initialize */
|
/* Reset and initialize */
|
||||||
__lpc_eth_reset(pldat);
|
__lpc_eth_reset(pldat);
|
||||||
|
@@ -3778,11 +3778,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
|
|||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
||||||
u16 vfid,
|
u16 vfid,
|
||||||
struct qed_mcp_link_params *p_params,
|
struct qed_mcp_link_params *p_params,
|
||||||
struct qed_mcp_link_state *p_link,
|
struct qed_mcp_link_state *p_link,
|
||||||
struct qed_mcp_link_capabilities *p_caps)
|
struct qed_mcp_link_capabilities *p_caps)
|
||||||
{
|
{
|
||||||
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
|
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
|
||||||
vfid,
|
vfid,
|
||||||
@@ -3790,7 +3790,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
|||||||
struct qed_bulletin_content *p_bulletin;
|
struct qed_bulletin_content *p_bulletin;
|
||||||
|
|
||||||
if (!p_vf)
|
if (!p_vf)
|
||||||
return;
|
return -EINVAL;
|
||||||
|
|
||||||
p_bulletin = p_vf->bulletin.p_virt;
|
p_bulletin = p_vf->bulletin.p_virt;
|
||||||
|
|
||||||
@@ -3800,6 +3800,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
|||||||
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
|
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
|
||||||
if (p_caps)
|
if (p_caps)
|
||||||
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
|
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@@ -4658,6 +4659,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
|
|||||||
struct qed_public_vf_info *vf_info;
|
struct qed_public_vf_info *vf_info;
|
||||||
struct qed_mcp_link_state link;
|
struct qed_mcp_link_state link;
|
||||||
u32 tx_rate;
|
u32 tx_rate;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Sanitize request */
|
/* Sanitize request */
|
||||||
if (IS_VF(cdev))
|
if (IS_VF(cdev))
|
||||||
@@ -4671,7 +4673,9 @@ static int qed_get_vf_config(struct qed_dev *cdev,
|
|||||||
|
|
||||||
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
|
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
|
||||||
|
|
||||||
qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
|
ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* Fill information about VF */
|
/* Fill information about VF */
|
||||||
ivi->vf = vf_id;
|
ivi->vf = vf_id;
|
||||||
|
@@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
|
|||||||
p_iov->bulletin.size,
|
p_iov->bulletin.size,
|
||||||
&p_iov->bulletin.phys,
|
&p_iov->bulletin.phys,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (!p_iov->bulletin.p_virt)
|
||||||
|
goto free_pf2vf_reply;
|
||||||
|
|
||||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||||
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
|
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
|
||||||
p_iov->bulletin.p_virt,
|
p_iov->bulletin.p_virt,
|
||||||
@@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
|
|||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
free_pf2vf_reply:
|
||||||
|
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||||
|
sizeof(union pfvf_tlvs),
|
||||||
|
p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
|
||||||
free_vf2pf_request:
|
free_vf2pf_request:
|
||||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||||
sizeof(union vfpf_tlvs),
|
sizeof(union vfpf_tlvs),
|
||||||
|
@@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts)
|
|||||||
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
||||||
list_add(&cpts->pool_data[i].list, &cpts->pool);
|
list_add(&cpts->pool_data[i].list, &cpts->pool);
|
||||||
|
|
||||||
clk_enable(cpts->refclk);
|
err = clk_enable(cpts->refclk);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
cpts_write32(cpts, CPTS_EN, control);
|
cpts_write32(cpts, CPTS_EN, control);
|
||||||
cpts_write32(cpts, TS_PEND_EN, int_enable);
|
cpts_write32(cpts, TS_PEND_EN, int_enable);
|
||||||
|
@@ -1187,7 +1187,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev,
|
dev_err(dev,
|
||||||
"Cannot register network device, aborting\n");
|
"Cannot register network device, aborting\n");
|
||||||
goto error;
|
goto put_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(dev,
|
dev_info(dev,
|
||||||
@@ -1195,6 +1195,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
|||||||
(unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq);
|
(unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
put_node:
|
||||||
|
of_node_put(lp->phy_node);
|
||||||
error:
|
error:
|
||||||
free_netdev(ndev);
|
free_netdev(ndev);
|
||||||
return rc;
|
return rc;
|
||||||
|
@@ -289,7 +289,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = phy_write(phydev, MII_DP83822_MISR1, 0);
|
err = phy_write(phydev, MII_DP83822_MISR2, 0);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be)
|
|||||||
unsigned int queue_index;
|
unsigned int queue_index;
|
||||||
|
|
||||||
xen_unregister_watchers(vif);
|
xen_unregister_watchers(vif);
|
||||||
|
xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
xenvif_debugfs_delif(vif);
|
xenvif_debugfs_delif(vif);
|
||||||
#endif /* CONFIG_DEBUG_FS */
|
#endif /* CONFIG_DEBUG_FS */
|
||||||
@@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
|
|||||||
|
|
||||||
/* Not interested in this watch anymore. */
|
/* Not interested in this watch anymore. */
|
||||||
unregister_hotplug_status_watch(be);
|
unregister_hotplug_status_watch(be);
|
||||||
xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
|
|
||||||
}
|
}
|
||||||
kfree(str);
|
kfree(str);
|
||||||
}
|
}
|
||||||
@@ -824,15 +824,11 @@ static void connect(struct backend_info *be)
|
|||||||
xenvif_carrier_on(be->vif);
|
xenvif_carrier_on(be->vif);
|
||||||
|
|
||||||
unregister_hotplug_status_watch(be);
|
unregister_hotplug_status_watch(be);
|
||||||
if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
|
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
|
||||||
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
|
hotplug_status_changed,
|
||||||
NULL, hotplug_status_changed,
|
"%s/%s", dev->nodename, "hotplug-status");
|
||||||
"%s/%s", dev->nodename,
|
if (!err)
|
||||||
"hotplug-status");
|
|
||||||
if (err)
|
|
||||||
goto err;
|
|
||||||
be->have_hotplug_status_watch = 1;
|
be->have_hotplug_status_watch = 1;
|
||||||
}
|
|
||||||
|
|
||||||
netif_tx_wake_all_queues(be->vif->dev);
|
netif_tx_wake_all_queues(be->vif->dev);
|
||||||
|
|
||||||
|
@@ -1609,7 +1609,9 @@ free_nfc_dev:
|
|||||||
nfc_digital_free_device(dev->nfc_digital_dev);
|
nfc_digital_free_device(dev->nfc_digital_dev);
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
usb_kill_urb(dev->in_urb);
|
||||||
usb_free_urb(dev->in_urb);
|
usb_free_urb(dev->in_urb);
|
||||||
|
usb_kill_urb(dev->out_urb);
|
||||||
usb_free_urb(dev->out_urb);
|
usb_free_urb(dev->out_urb);
|
||||||
usb_put_dev(dev->udev);
|
usb_put_dev(dev->udev);
|
||||||
|
|
||||||
|
@@ -567,6 +567,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
|
|||||||
{
|
{
|
||||||
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
|
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
|
||||||
|
|
||||||
|
if (atomic_read(&rs->state) & RXDMA)
|
||||||
|
dmaengine_terminate_sync(ctlr->dma_rx);
|
||||||
|
if (atomic_read(&rs->state) & TXDMA)
|
||||||
|
dmaengine_terminate_sync(ctlr->dma_tx);
|
||||||
|
atomic_set(&rs->state, 0);
|
||||||
|
spi_enable_chip(rs, false);
|
||||||
rs->slave_abort = true;
|
rs->slave_abort = true;
|
||||||
complete(&ctlr->xfer_completion);
|
complete(&ctlr->xfer_completion);
|
||||||
|
|
||||||
@@ -636,7 +642,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
|
|||||||
struct spi_controller *ctlr;
|
struct spi_controller *ctlr;
|
||||||
struct resource *mem;
|
struct resource *mem;
|
||||||
struct device_node *np = pdev->dev.of_node;
|
struct device_node *np = pdev->dev.of_node;
|
||||||
u32 rsd_nsecs;
|
u32 rsd_nsecs, num_cs;
|
||||||
bool slave_mode;
|
bool slave_mode;
|
||||||
|
|
||||||
slave_mode = of_property_read_bool(np, "spi-slave");
|
slave_mode = of_property_read_bool(np, "spi-slave");
|
||||||
@@ -744,8 +750,9 @@ static int rockchip_spi_probe(struct platform_device *pdev)
|
|||||||
* rk spi0 has two native cs, spi1..5 one cs only
|
* rk spi0 has two native cs, spi1..5 one cs only
|
||||||
* if num-cs is missing in the dts, default to 1
|
* if num-cs is missing in the dts, default to 1
|
||||||
*/
|
*/
|
||||||
if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect))
|
if (of_property_read_u32(np, "num-cs", &num_cs))
|
||||||
ctlr->num_chipselect = 1;
|
num_cs = 1;
|
||||||
|
ctlr->num_chipselect = num_cs;
|
||||||
ctlr->use_gpio_descriptors = true;
|
ctlr->use_gpio_descriptors = true;
|
||||||
}
|
}
|
||||||
ctlr->dev.of_node = pdev->dev.of_node;
|
ctlr->dev.of_node = pdev->dev.of_node;
|
||||||
|
@@ -76,14 +76,15 @@ static void tx_complete(void *arg)
|
|||||||
|
|
||||||
static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
|
static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret, len;
|
||||||
|
|
||||||
|
len = skb->len + ETH_HLEN;
|
||||||
ret = netif_rx_ni(skb);
|
ret = netif_rx_ni(skb);
|
||||||
if (ret == NET_RX_DROP) {
|
if (ret == NET_RX_DROP) {
|
||||||
nic->stats.rx_dropped++;
|
nic->stats.rx_dropped++;
|
||||||
} else {
|
} else {
|
||||||
nic->stats.rx_packets++;
|
nic->stats.rx_packets++;
|
||||||
nic->stats.rx_bytes += skb->len + ETH_HLEN;
|
nic->stats.rx_bytes += len;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -6679,6 +6679,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
|
|||||||
struct sta_info *psta_bmc;
|
struct sta_info *psta_bmc;
|
||||||
struct list_head *xmitframe_plist, *xmitframe_phead;
|
struct list_head *xmitframe_plist, *xmitframe_phead;
|
||||||
struct xmit_frame *pxmitframe = NULL;
|
struct xmit_frame *pxmitframe = NULL;
|
||||||
|
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||||
|
|
||||||
/* for BC/MC Frames */
|
/* for BC/MC Frames */
|
||||||
@@ -6689,7 +6690,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
|
|||||||
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
|
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
|
||||||
msleep(10);/* 10ms, ATIM(HIQ) Windows */
|
msleep(10);/* 10ms, ATIM(HIQ) Windows */
|
||||||
|
|
||||||
spin_lock_bh(&psta_bmc->sleep_q.lock);
|
/* spin_lock_bh(&psta_bmc->sleep_q.lock); */
|
||||||
|
spin_lock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
|
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
|
||||||
xmitframe_plist = get_next(xmitframe_phead);
|
xmitframe_plist = get_next(xmitframe_phead);
|
||||||
@@ -6715,7 +6717,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
|
|||||||
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
|
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&psta_bmc->sleep_q.lock);
|
/* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
|
||||||
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
/* check hi queue and bmc_sleepq */
|
/* check hi queue and bmc_sleepq */
|
||||||
rtw_chk_hi_queue_cmd(padapter);
|
rtw_chk_hi_queue_cmd(padapter);
|
||||||
|
@@ -1144,8 +1144,10 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_
|
|||||||
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
|
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
|
||||||
struct list_head *xmitframe_plist, *xmitframe_phead;
|
struct list_head *xmitframe_plist, *xmitframe_phead;
|
||||||
struct xmit_frame *pxmitframe = NULL;
|
struct xmit_frame *pxmitframe = NULL;
|
||||||
|
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||||
|
|
||||||
spin_lock_bh(&psta->sleep_q.lock);
|
/* spin_lock_bh(&psta->sleep_q.lock); */
|
||||||
|
spin_lock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
xmitframe_phead = get_list_head(&psta->sleep_q);
|
xmitframe_phead = get_list_head(&psta->sleep_q);
|
||||||
xmitframe_plist = get_next(xmitframe_phead);
|
xmitframe_plist = get_next(xmitframe_phead);
|
||||||
@@ -1180,10 +1182,12 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_
|
|||||||
update_beacon(padapter, _TIM_IE_, NULL, true);
|
update_beacon(padapter, _TIM_IE_, NULL, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&psta->sleep_q.lock);
|
/* spin_unlock_bh(&psta->sleep_q.lock); */
|
||||||
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
spin_unlock_bh(&psta->sleep_q.lock);
|
/* spin_unlock_bh(&psta->sleep_q.lock); */
|
||||||
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
/* DBG_871X("no buffered packets to xmit\n"); */
|
/* DBG_871X("no buffered packets to xmit\n"); */
|
||||||
if (pstapriv->tim_bitmap&BIT(psta->aid)) {
|
if (pstapriv->tim_bitmap&BIT(psta->aid)) {
|
||||||
|
@@ -330,48 +330,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
|
|||||||
|
|
||||||
/* list_del_init(&psta->wakeup_list); */
|
/* list_del_init(&psta->wakeup_list); */
|
||||||
|
|
||||||
spin_lock_bh(&psta->sleep_q.lock);
|
|
||||||
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
|
|
||||||
psta->sleepq_len = 0;
|
|
||||||
spin_unlock_bh(&psta->sleep_q.lock);
|
|
||||||
|
|
||||||
spin_lock_bh(&pxmitpriv->lock);
|
spin_lock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
|
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
|
||||||
|
psta->sleepq_len = 0;
|
||||||
|
|
||||||
/* vo */
|
/* vo */
|
||||||
spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
|
/* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
|
||||||
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
|
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
|
||||||
list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
|
list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
|
||||||
phwxmit = pxmitpriv->hwxmits;
|
phwxmit = pxmitpriv->hwxmits;
|
||||||
phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
|
phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
|
||||||
pstaxmitpriv->vo_q.qcnt = 0;
|
pstaxmitpriv->vo_q.qcnt = 0;
|
||||||
spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
|
/* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
|
||||||
|
|
||||||
/* vi */
|
/* vi */
|
||||||
spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
|
/* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
|
||||||
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
|
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
|
||||||
list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
|
list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
|
||||||
phwxmit = pxmitpriv->hwxmits+1;
|
phwxmit = pxmitpriv->hwxmits+1;
|
||||||
phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
|
phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
|
||||||
pstaxmitpriv->vi_q.qcnt = 0;
|
pstaxmitpriv->vi_q.qcnt = 0;
|
||||||
spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
|
/* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
|
||||||
|
|
||||||
/* be */
|
/* be */
|
||||||
spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
|
/* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
|
||||||
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
|
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
|
||||||
list_del_init(&(pstaxmitpriv->be_q.tx_pending));
|
list_del_init(&(pstaxmitpriv->be_q.tx_pending));
|
||||||
phwxmit = pxmitpriv->hwxmits+2;
|
phwxmit = pxmitpriv->hwxmits+2;
|
||||||
phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
|
phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
|
||||||
pstaxmitpriv->be_q.qcnt = 0;
|
pstaxmitpriv->be_q.qcnt = 0;
|
||||||
spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
|
/* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
|
||||||
|
|
||||||
/* bk */
|
/* bk */
|
||||||
spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
|
/* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
|
||||||
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
|
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
|
||||||
list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
|
list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
|
||||||
phwxmit = pxmitpriv->hwxmits+3;
|
phwxmit = pxmitpriv->hwxmits+3;
|
||||||
phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
|
phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
|
||||||
pstaxmitpriv->bk_q.qcnt = 0;
|
pstaxmitpriv->bk_q.qcnt = 0;
|
||||||
spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
|
/* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
|
||||||
|
|
||||||
spin_unlock_bh(&pxmitpriv->lock);
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
|
@@ -1871,6 +1871,8 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
|
|||||||
struct list_head *plist, *phead;
|
struct list_head *plist, *phead;
|
||||||
struct xmit_frame *pxmitframe;
|
struct xmit_frame *pxmitframe;
|
||||||
|
|
||||||
|
spin_lock_bh(&pframequeue->lock);
|
||||||
|
|
||||||
phead = get_list_head(pframequeue);
|
phead = get_list_head(pframequeue);
|
||||||
plist = get_next(phead);
|
plist = get_next(phead);
|
||||||
|
|
||||||
@@ -1881,6 +1883,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
|
|||||||
|
|
||||||
rtw_free_xmitframe(pxmitpriv, pxmitframe);
|
rtw_free_xmitframe(pxmitpriv, pxmitframe);
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&pframequeue->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
|
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
|
||||||
@@ -1943,7 +1946,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
|
|||||||
struct sta_info *psta;
|
struct sta_info *psta;
|
||||||
struct tx_servq *ptxservq;
|
struct tx_servq *ptxservq;
|
||||||
struct pkt_attrib *pattrib = &pxmitframe->attrib;
|
struct pkt_attrib *pattrib = &pxmitframe->attrib;
|
||||||
struct xmit_priv *xmit_priv = &padapter->xmitpriv;
|
|
||||||
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
|
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
|
||||||
sint res = _SUCCESS;
|
sint res = _SUCCESS;
|
||||||
|
|
||||||
@@ -1972,14 +1974,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
|
|||||||
|
|
||||||
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
|
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
|
||||||
|
|
||||||
spin_lock_bh(&xmit_priv->lock);
|
|
||||||
if (list_empty(&ptxservq->tx_pending))
|
if (list_empty(&ptxservq->tx_pending))
|
||||||
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
|
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
|
||||||
|
|
||||||
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
|
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
|
||||||
ptxservq->qcnt++;
|
ptxservq->qcnt++;
|
||||||
phwxmits[ac_index].accnt++;
|
phwxmits[ac_index].accnt++;
|
||||||
spin_unlock_bh(&xmit_priv->lock);
|
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
|
|
||||||
@@ -2397,10 +2397,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
|
|||||||
struct list_head *xmitframe_plist, *xmitframe_phead;
|
struct list_head *xmitframe_plist, *xmitframe_phead;
|
||||||
struct xmit_frame *pxmitframe = NULL;
|
struct xmit_frame *pxmitframe = NULL;
|
||||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||||
|
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||||
|
|
||||||
psta_bmc = rtw_get_bcmc_stainfo(padapter);
|
psta_bmc = rtw_get_bcmc_stainfo(padapter);
|
||||||
|
|
||||||
spin_lock_bh(&psta->sleep_q.lock);
|
spin_lock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
xmitframe_phead = get_list_head(&psta->sleep_q);
|
xmitframe_phead = get_list_head(&psta->sleep_q);
|
||||||
xmitframe_plist = get_next(xmitframe_phead);
|
xmitframe_plist = get_next(xmitframe_phead);
|
||||||
@@ -2508,7 +2509,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
|
|||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
|
||||||
spin_unlock_bh(&psta->sleep_q.lock);
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
if (update_mask)
|
if (update_mask)
|
||||||
update_beacon(padapter, _TIM_IE_, NULL, true);
|
update_beacon(padapter, _TIM_IE_, NULL, true);
|
||||||
@@ -2520,8 +2521,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
|
|||||||
struct list_head *xmitframe_plist, *xmitframe_phead;
|
struct list_head *xmitframe_plist, *xmitframe_phead;
|
||||||
struct xmit_frame *pxmitframe = NULL;
|
struct xmit_frame *pxmitframe = NULL;
|
||||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||||
|
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||||
|
|
||||||
spin_lock_bh(&psta->sleep_q.lock);
|
spin_lock_bh(&pxmitpriv->lock);
|
||||||
|
|
||||||
xmitframe_phead = get_list_head(&psta->sleep_q);
|
xmitframe_phead = get_list_head(&psta->sleep_q);
|
||||||
xmitframe_plist = get_next(xmitframe_phead);
|
xmitframe_plist = get_next(xmitframe_phead);
|
||||||
@@ -2577,7 +2579,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&psta->sleep_q.lock);
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void enqueue_pending_xmitbuf(
|
void enqueue_pending_xmitbuf(
|
||||||
|
@@ -572,7 +572,9 @@ s32 rtl8723bs_hal_xmit(
|
|||||||
rtw_issue_addbareq_cmd(padapter, pxmitframe);
|
rtw_issue_addbareq_cmd(padapter, pxmitframe);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_bh(&pxmitpriv->lock);
|
||||||
err = rtw_xmitframe_enqueue(padapter, pxmitframe);
|
err = rtw_xmitframe_enqueue(padapter, pxmitframe);
|
||||||
|
spin_unlock_bh(&pxmitpriv->lock);
|
||||||
if (err != _SUCCESS) {
|
if (err != _SUCCESS) {
|
||||||
RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n"));
|
RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n"));
|
||||||
rtw_free_xmitframe(pxmitpriv, pxmitframe);
|
rtw_free_xmitframe(pxmitpriv, pxmitframe);
|
||||||
|
@@ -167,14 +167,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtio_add_status);
|
EXPORT_SYMBOL_GPL(virtio_add_status);
|
||||||
|
|
||||||
int virtio_finalize_features(struct virtio_device *dev)
|
/* Do some validation, then set FEATURES_OK */
|
||||||
|
static int virtio_features_ok(struct virtio_device *dev)
|
||||||
{
|
{
|
||||||
int ret = dev->config->finalize_features(dev);
|
|
||||||
unsigned status;
|
unsigned status;
|
||||||
|
int ret;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = arch_has_restricted_virtio_memory_access();
|
ret = arch_has_restricted_virtio_memory_access();
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -203,7 +202,6 @@ int virtio_finalize_features(struct virtio_device *dev)
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtio_finalize_features);
|
|
||||||
|
|
||||||
static int virtio_dev_probe(struct device *_d)
|
static int virtio_dev_probe(struct device *_d)
|
||||||
{
|
{
|
||||||
@@ -240,17 +238,6 @@ static int virtio_dev_probe(struct device *_d)
|
|||||||
driver_features_legacy = driver_features;
|
driver_features_legacy = driver_features;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Some devices detect legacy solely via F_VERSION_1. Write
|
|
||||||
* F_VERSION_1 to force LE config space accesses before FEATURES_OK for
|
|
||||||
* these when needed.
|
|
||||||
*/
|
|
||||||
if (drv->validate && !virtio_legacy_is_little_endian()
|
|
||||||
&& device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
|
|
||||||
dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
|
|
||||||
dev->config->finalize_features(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (device_features & (1ULL << VIRTIO_F_VERSION_1))
|
if (device_features & (1ULL << VIRTIO_F_VERSION_1))
|
||||||
dev->features = driver_features & device_features;
|
dev->features = driver_features & device_features;
|
||||||
else
|
else
|
||||||
@@ -261,13 +248,26 @@ static int virtio_dev_probe(struct device *_d)
|
|||||||
if (device_features & (1ULL << i))
|
if (device_features & (1ULL << i))
|
||||||
__virtio_set_bit(dev, i);
|
__virtio_set_bit(dev, i);
|
||||||
|
|
||||||
|
err = dev->config->finalize_features(dev);
|
||||||
|
if (err)
|
||||||
|
goto err;
|
||||||
|
|
||||||
if (drv->validate) {
|
if (drv->validate) {
|
||||||
|
u64 features = dev->features;
|
||||||
|
|
||||||
err = drv->validate(dev);
|
err = drv->validate(dev);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
/* Did validation change any features? Then write them again. */
|
||||||
|
if (features != dev->features) {
|
||||||
|
err = dev->config->finalize_features(dev);
|
||||||
|
if (err)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = virtio_finalize_features(dev);
|
err = virtio_features_ok(dev);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
@@ -444,7 +444,11 @@ int virtio_device_restore(struct virtio_device *dev)
|
|||||||
/* We have a driver! */
|
/* We have a driver! */
|
||||||
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
|
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
|
||||||
|
|
||||||
ret = virtio_finalize_features(dev);
|
ret = dev->config->finalize_features(dev);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
ret = virtio_features_ok(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@@ -74,6 +74,11 @@ int ext4_resize_begin(struct super_block *sb)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ext4_has_feature_sparse_super2(sb)) {
|
||||||
|
ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
|
if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
|
||||||
&EXT4_SB(sb)->s_ext4_flags))
|
&EXT4_SB(sb)->s_ext4_flags))
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
@@ -950,7 +950,17 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
|
|||||||
|
|
||||||
while (count) {
|
while (count) {
|
||||||
if (cs->write && cs->pipebufs && page) {
|
if (cs->write && cs->pipebufs && page) {
|
||||||
return fuse_ref_page(cs, page, offset, count);
|
/*
|
||||||
|
* Can't control lifetime of pipe buffers, so always
|
||||||
|
* copy user pages.
|
||||||
|
*/
|
||||||
|
if (cs->req->args->user_pages) {
|
||||||
|
err = fuse_copy_fill(cs);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
} else {
|
||||||
|
return fuse_ref_page(cs, page, offset, count);
|
||||||
|
}
|
||||||
} else if (!cs->len) {
|
} else if (!cs->len) {
|
||||||
if (cs->move_pages && page &&
|
if (cs->move_pages && page &&
|
||||||
offset == 0 && count == PAGE_SIZE) {
|
offset == 0 && count == PAGE_SIZE) {
|
||||||
|
@@ -1420,6 +1420,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
|||||||
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
|
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ap->args.user_pages = true;
|
||||||
if (write)
|
if (write)
|
||||||
ap->args.in_pages = true;
|
ap->args.in_pages = true;
|
||||||
else
|
else
|
||||||
|
@@ -277,6 +277,7 @@ struct fuse_args {
|
|||||||
bool nocreds:1;
|
bool nocreds:1;
|
||||||
bool in_pages:1;
|
bool in_pages:1;
|
||||||
bool out_pages:1;
|
bool out_pages:1;
|
||||||
|
bool user_pages:1;
|
||||||
bool out_argvar:1;
|
bool out_argvar:1;
|
||||||
bool page_zeroing:1;
|
bool page_zeroing:1;
|
||||||
bool page_replace:1;
|
bool page_replace:1;
|
||||||
|
11
fs/pipe.c
11
fs/pipe.c
@@ -252,7 +252,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
|
|||||||
*/
|
*/
|
||||||
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
|
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
unsigned int head = pipe->head;
|
/* Read ->head with a barrier vs post_one_notification() */
|
||||||
|
unsigned int head = smp_load_acquire(&pipe->head);
|
||||||
unsigned int tail = pipe->tail;
|
unsigned int tail = pipe->tail;
|
||||||
unsigned int mask = pipe->ring_size - 1;
|
unsigned int mask = pipe->ring_size - 1;
|
||||||
|
|
||||||
@@ -831,10 +832,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_WATCH_QUEUE
|
#ifdef CONFIG_WATCH_QUEUE
|
||||||
if (pipe->watch_queue) {
|
if (pipe->watch_queue)
|
||||||
watch_queue_clear(pipe->watch_queue);
|
watch_queue_clear(pipe->watch_queue);
|
||||||
put_watch_queue(pipe->watch_queue);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
|
(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
|
||||||
@@ -844,6 +843,10 @@ void free_pipe_info(struct pipe_inode_info *pipe)
|
|||||||
if (buf->ops)
|
if (buf->ops)
|
||||||
pipe_buf_release(pipe, buf);
|
pipe_buf_release(pipe, buf);
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_WATCH_QUEUE
|
||||||
|
if (pipe->watch_queue)
|
||||||
|
put_watch_queue(pipe->watch_queue);
|
||||||
|
#endif
|
||||||
if (pipe->tmp_page)
|
if (pipe->tmp_page)
|
||||||
__free_page(pipe->tmp_page);
|
__free_page(pipe->tmp_page);
|
||||||
kfree(pipe->bufs);
|
kfree(pipe->bufs);
|
||||||
|
@@ -9307,8 +9307,8 @@ struct mlx5_ifc_bufferx_reg_bits {
|
|||||||
u8 reserved_at_0[0x6];
|
u8 reserved_at_0[0x6];
|
||||||
u8 lossy[0x1];
|
u8 lossy[0x1];
|
||||||
u8 epsb[0x1];
|
u8 epsb[0x1];
|
||||||
u8 reserved_at_8[0xc];
|
u8 reserved_at_8[0x8];
|
||||||
u8 size[0xc];
|
u8 size[0x10];
|
||||||
|
|
||||||
u8 xoff_threshold[0x10];
|
u8 xoff_threshold[0x10];
|
||||||
u8 xon_threshold[0x10];
|
u8 xon_threshold[0x10];
|
||||||
|
@@ -134,7 +134,6 @@ void virtio_break_device(struct virtio_device *dev);
|
|||||||
void virtio_config_changed(struct virtio_device *dev);
|
void virtio_config_changed(struct virtio_device *dev);
|
||||||
void virtio_config_disable(struct virtio_device *dev);
|
void virtio_config_disable(struct virtio_device *dev);
|
||||||
void virtio_config_enable(struct virtio_device *dev);
|
void virtio_config_enable(struct virtio_device *dev);
|
||||||
int virtio_finalize_features(struct virtio_device *dev);
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
int virtio_device_freeze(struct virtio_device *dev);
|
int virtio_device_freeze(struct virtio_device *dev);
|
||||||
int virtio_device_restore(struct virtio_device *dev);
|
int virtio_device_restore(struct virtio_device *dev);
|
||||||
|
@@ -62,8 +62,9 @@ struct virtio_shm_region {
|
|||||||
* Returns the first 64 feature bits (all we currently need).
|
* Returns the first 64 feature bits (all we currently need).
|
||||||
* @finalize_features: confirm what device features we'll be using.
|
* @finalize_features: confirm what device features we'll be using.
|
||||||
* vdev: the virtio_device
|
* vdev: the virtio_device
|
||||||
* This gives the final feature bits for the device: it can change
|
* This sends the driver feature bits to the device: it can change
|
||||||
* the dev->feature bits if it wants.
|
* the dev->feature bits if it wants.
|
||||||
|
* Note: despite the name this can be called any number of times.
|
||||||
* Returns 0 on success or error status
|
* Returns 0 on success or error status
|
||||||
* @bus_name: return the bus name associated with the device (optional)
|
* @bus_name: return the bus name associated with the device (optional)
|
||||||
* vdev: the virtio_device
|
* vdev: the virtio_device
|
||||||
|
@@ -28,7 +28,8 @@ struct watch_type_filter {
|
|||||||
struct watch_filter {
|
struct watch_filter {
|
||||||
union {
|
union {
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
unsigned long type_filter[2]; /* Bitmask of accepted types */
|
/* Bitmask of accepted types */
|
||||||
|
DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
|
||||||
};
|
};
|
||||||
u32 nr_filters; /* Number of filters */
|
u32 nr_filters; /* Number of filters */
|
||||||
struct watch_type_filter filters[];
|
struct watch_type_filter filters[];
|
||||||
|
@@ -1491,10 +1491,12 @@ static int __init set_buf_size(char *str)
|
|||||||
if (!str)
|
if (!str)
|
||||||
return 0;
|
return 0;
|
||||||
buf_size = memparse(str, &str);
|
buf_size = memparse(str, &str);
|
||||||
/* nr_entries can not be zero */
|
/*
|
||||||
if (buf_size == 0)
|
* nr_entries can not be zero and the startup
|
||||||
return 0;
|
* tests require some buffer space. Therefore
|
||||||
trace_buf_size = buf_size;
|
* ensure we have at least 4096 bytes of buffer.
|
||||||
|
*/
|
||||||
|
trace_buf_size = max(4096UL, buf_size);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("trace_buf_size=", set_buf_size);
|
__setup("trace_buf_size=", set_buf_size);
|
||||||
|
@@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
|
|||||||
bit += page->index;
|
bit += page->index;
|
||||||
|
|
||||||
set_bit(bit, wqueue->notes_bitmap);
|
set_bit(bit, wqueue->notes_bitmap);
|
||||||
|
generic_pipe_buf_release(pipe, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// No try_steal function => no stealing
|
// No try_steal function => no stealing
|
||||||
@@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue,
|
|||||||
buf->offset = offset;
|
buf->offset = offset;
|
||||||
buf->len = len;
|
buf->len = len;
|
||||||
buf->flags = PIPE_BUF_FLAG_WHOLE;
|
buf->flags = PIPE_BUF_FLAG_WHOLE;
|
||||||
pipe->head = head + 1;
|
smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
|
||||||
|
|
||||||
if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
|
if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
|
||||||
spin_unlock_irq(&pipe->rd_wait.lock);
|
spin_unlock_irq(&pipe->rd_wait.lock);
|
||||||
@@ -243,7 +244,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pipe_resize_ring(pipe, nr_notes);
|
nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
|
||||||
|
ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
@@ -268,7 +270,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
|
|||||||
wqueue->notes = pages;
|
wqueue->notes = pages;
|
||||||
wqueue->notes_bitmap = bitmap;
|
wqueue->notes_bitmap = bitmap;
|
||||||
wqueue->nr_pages = nr_pages;
|
wqueue->nr_pages = nr_pages;
|
||||||
wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
|
wqueue->nr_notes = nr_notes;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_p:
|
error_p:
|
||||||
@@ -320,7 +322,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
|
|||||||
tf[i].info_mask & WATCH_INFO_LENGTH)
|
tf[i].info_mask & WATCH_INFO_LENGTH)
|
||||||
goto err_filter;
|
goto err_filter;
|
||||||
/* Ignore any unknown types */
|
/* Ignore any unknown types */
|
||||||
if (tf[i].type >= sizeof(wfilter->type_filter) * 8)
|
if (tf[i].type >= WATCH_TYPE__NR)
|
||||||
continue;
|
continue;
|
||||||
nr_filter++;
|
nr_filter++;
|
||||||
}
|
}
|
||||||
@@ -336,7 +338,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
|
|||||||
|
|
||||||
q = wfilter->filters;
|
q = wfilter->filters;
|
||||||
for (i = 0; i < filter.nr_filters; i++) {
|
for (i = 0; i < filter.nr_filters; i++) {
|
||||||
if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG)
|
if (tf[i].type >= WATCH_TYPE__NR)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
q->type = tf[i].type;
|
q->type = tf[i].type;
|
||||||
@@ -371,6 +373,7 @@ static void __put_watch_queue(struct kref *kref)
|
|||||||
|
|
||||||
for (i = 0; i < wqueue->nr_pages; i++)
|
for (i = 0; i < wqueue->nr_pages; i++)
|
||||||
__free_page(wqueue->notes[i]);
|
__free_page(wqueue->notes[i]);
|
||||||
|
bitmap_free(wqueue->notes_bitmap);
|
||||||
|
|
||||||
wfilter = rcu_access_pointer(wqueue->filter);
|
wfilter = rcu_access_pointer(wqueue->filter);
|
||||||
if (wfilter)
|
if (wfilter)
|
||||||
@@ -566,7 +569,7 @@ void watch_queue_clear(struct watch_queue *wqueue)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
spin_lock_bh(&wqueue->lock);
|
spin_lock_bh(&wqueue->lock);
|
||||||
|
|
||||||
/* Prevent new additions and prevent notifications from happening */
|
/* Prevent new notifications from being stored. */
|
||||||
wqueue->defunct = true;
|
wqueue->defunct = true;
|
||||||
|
|
||||||
while (!hlist_empty(&wqueue->watches)) {
|
while (!hlist_empty(&wqueue->watches)) {
|
||||||
|
@@ -87,6 +87,13 @@ again:
|
|||||||
ax25_for_each(s, &ax25_list) {
|
ax25_for_each(s, &ax25_list) {
|
||||||
if (s->ax25_dev == ax25_dev) {
|
if (s->ax25_dev == ax25_dev) {
|
||||||
sk = s->sk;
|
sk = s->sk;
|
||||||
|
if (!sk) {
|
||||||
|
spin_unlock_bh(&ax25_list_lock);
|
||||||
|
s->ax25_dev = NULL;
|
||||||
|
ax25_disconnect(s, ENETUNREACH);
|
||||||
|
spin_lock_bh(&ax25_list_lock);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
spin_unlock_bh(&ax25_list_lock);
|
spin_unlock_bh(&ax25_list_lock);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
@@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev,
|
|||||||
if (!rtnl_trylock())
|
if (!rtnl_trylock())
|
||||||
return restart_syscall();
|
return restart_syscall();
|
||||||
|
|
||||||
if (netif_running(netdev)) {
|
if (netif_running(netdev) && netif_device_present(netdev)) {
|
||||||
struct ethtool_link_ksettings cmd;
|
struct ethtool_link_ksettings cmd;
|
||||||
|
|
||||||
if (!__ethtool_get_link_ksettings(netdev, &cmd))
|
if (!__ethtool_get_link_ksettings(netdev, &cmd))
|
||||||
|
@@ -160,6 +160,9 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
|
|||||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (proto == IPPROTO_IPV6)
|
||||||
|
skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
|
||||||
|
|
||||||
__skb_pull(skb, skb_transport_offset(skb));
|
__skb_pull(skb, skb_transport_offset(skb));
|
||||||
ops = rcu_dereference(inet_offloads[proto]);
|
ops = rcu_dereference(inet_offloads[proto]);
|
||||||
if (likely(ops && ops->callbacks.gso_segment))
|
if (likely(ops && ops->callbacks.gso_segment))
|
||||||
|
@@ -5008,6 +5008,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
|
|||||||
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
|
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
spin_lock_bh(&ifa->lock);
|
||||||
if (!((ifa->flags&IFA_F_PERMANENT) &&
|
if (!((ifa->flags&IFA_F_PERMANENT) &&
|
||||||
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
|
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
|
||||||
preferred = ifa->prefered_lft;
|
preferred = ifa->prefered_lft;
|
||||||
@@ -5029,6 +5030,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
|
|||||||
preferred = INFINITY_LIFE_TIME;
|
preferred = INFINITY_LIFE_TIME;
|
||||||
valid = INFINITY_LIFE_TIME;
|
valid = INFINITY_LIFE_TIME;
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&ifa->lock);
|
||||||
|
|
||||||
if (!ipv6_addr_any(&ifa->peer_addr)) {
|
if (!ipv6_addr_any(&ifa->peer_addr)) {
|
||||||
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
|
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
|
||||||
|
@@ -198,6 +198,9 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
|
|||||||
ipv6_skip_exthdr(skb, 0, &proto, &frag);
|
ipv6_skip_exthdr(skb, 0, &proto, &frag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (proto == IPPROTO_IPIP)
|
||||||
|
skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
|
||||||
|
|
||||||
__skb_pull(skb, skb_transport_offset(skb));
|
__skb_pull(skb, skb_transport_offset(skb));
|
||||||
ops = rcu_dereference(inet6_offloads[proto]);
|
ops = rcu_dereference(inet6_offloads[proto]);
|
||||||
if (likely(ops && ops->callbacks.gso_segment))
|
if (likely(ops && ops->callbacks.gso_segment))
|
||||||
|
@@ -61,10 +61,6 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
|
|||||||
r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
|
r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
|
||||||
r->idiag_retrans = asoc->rtx_data_chunks;
|
r->idiag_retrans = asoc->rtx_data_chunks;
|
||||||
r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
|
r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
|
||||||
} else {
|
|
||||||
r->idiag_timer = 0;
|
|
||||||
r->idiag_retrans = 0;
|
|
||||||
r->idiag_expires = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,13 +140,14 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
|
|||||||
r = nlmsg_data(nlh);
|
r = nlmsg_data(nlh);
|
||||||
BUG_ON(!sk_fullsock(sk));
|
BUG_ON(!sk_fullsock(sk));
|
||||||
|
|
||||||
|
r->idiag_timer = 0;
|
||||||
|
r->idiag_retrans = 0;
|
||||||
|
r->idiag_expires = 0;
|
||||||
if (asoc) {
|
if (asoc) {
|
||||||
inet_diag_msg_sctpasoc_fill(r, sk, asoc);
|
inet_diag_msg_sctpasoc_fill(r, sk, asoc);
|
||||||
} else {
|
} else {
|
||||||
inet_diag_msg_common_fill(r, sk);
|
inet_diag_msg_common_fill(r, sk);
|
||||||
r->idiag_state = sk->sk_state;
|
r->idiag_state = sk->sk_state;
|
||||||
r->idiag_timer = 0;
|
|
||||||
r->idiag_retrans = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
|
if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
|
||||||
|
@@ -342,16 +342,18 @@ static int tipc_enable_bearer(struct net *net, const char *name,
|
|||||||
goto rejected;
|
goto rejected;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Create monitoring data before accepting activate messages */
|
||||||
|
if (tipc_mon_create(net, bearer_id)) {
|
||||||
|
bearer_disable(net, b);
|
||||||
|
kfree_skb(skb);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
test_and_set_bit_lock(0, &b->up);
|
test_and_set_bit_lock(0, &b->up);
|
||||||
rcu_assign_pointer(tn->bearer_list[bearer_id], b);
|
rcu_assign_pointer(tn->bearer_list[bearer_id], b);
|
||||||
if (skb)
|
if (skb)
|
||||||
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
|
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
|
||||||
|
|
||||||
if (tipc_mon_create(net, bearer_id)) {
|
|
||||||
bearer_disable(net, b);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_info("Enabled bearer <%s>, priority %u\n", name, prio);
|
pr_info("Enabled bearer <%s>, priority %u\n", name, prio);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@@ -2245,6 +2245,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case STATE_MSG:
|
case STATE_MSG:
|
||||||
|
/* Validate Gap ACK blocks, drop if invalid */
|
||||||
|
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
|
||||||
|
if (glen > dlen)
|
||||||
|
break;
|
||||||
|
|
||||||
l->rcv_nxt_state = msg_seqno(hdr) + 1;
|
l->rcv_nxt_state = msg_seqno(hdr) + 1;
|
||||||
|
|
||||||
/* Update own tolerance if peer indicates a non-zero value */
|
/* Update own tolerance if peer indicates a non-zero value */
|
||||||
@@ -2270,10 +2275,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Receive Gap ACK blocks from peer if any */
|
|
||||||
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
|
|
||||||
if(glen > dlen)
|
|
||||||
break;
|
|
||||||
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
|
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
|
||||||
&l->mon_state, l->bearer_id);
|
&l->mon_state, l->bearer_id);
|
||||||
|
|
||||||
|
32
tools/testing/selftests/bpf/prog_tests/timer_crash.c
Normal file
32
tools/testing/selftests/bpf/prog_tests/timer_crash.c
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include <test_progs.h>
|
||||||
|
#include "timer_crash.skel.h"
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MODE_ARRAY,
|
||||||
|
MODE_HASH,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void test_timer_crash_mode(int mode)
|
||||||
|
{
|
||||||
|
struct timer_crash *skel;
|
||||||
|
|
||||||
|
skel = timer_crash__open_and_load();
|
||||||
|
if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
|
||||||
|
return;
|
||||||
|
skel->bss->pid = getpid();
|
||||||
|
skel->bss->crash_map = mode;
|
||||||
|
if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
|
||||||
|
goto end;
|
||||||
|
usleep(1);
|
||||||
|
end:
|
||||||
|
timer_crash__destroy(skel);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_timer_crash(void)
|
||||||
|
{
|
||||||
|
if (test__start_subtest("array"))
|
||||||
|
test_timer_crash_mode(MODE_ARRAY);
|
||||||
|
if (test__start_subtest("hash"))
|
||||||
|
test_timer_crash_mode(MODE_HASH);
|
||||||
|
}
|
54
tools/testing/selftests/bpf/progs/timer_crash.c
Normal file
54
tools/testing/selftests/bpf/progs/timer_crash.c
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <vmlinux.h>
|
||||||
|
#include <bpf/bpf_tracing.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
|
||||||
|
struct map_elem {
|
||||||
|
struct bpf_timer timer;
|
||||||
|
struct bpf_spin_lock lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||||
|
__uint(max_entries, 1);
|
||||||
|
__type(key, int);
|
||||||
|
__type(value, struct map_elem);
|
||||||
|
} amap SEC(".maps");
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__uint(type, BPF_MAP_TYPE_HASH);
|
||||||
|
__uint(max_entries, 1);
|
||||||
|
__type(key, int);
|
||||||
|
__type(value, struct map_elem);
|
||||||
|
} hmap SEC(".maps");
|
||||||
|
|
||||||
|
int pid = 0;
|
||||||
|
int crash_map = 0; /* 0 for amap, 1 for hmap */
|
||||||
|
|
||||||
|
SEC("fentry/do_nanosleep")
|
||||||
|
int sys_enter(void *ctx)
|
||||||
|
{
|
||||||
|
struct map_elem *e, value = {};
|
||||||
|
void *map = crash_map ? (void *)&hmap : (void *)&amap;
|
||||||
|
|
||||||
|
if (bpf_get_current_task_btf()->tgid != pid)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
*(void **)&value = (void *)0xdeadcaf3;
|
||||||
|
|
||||||
|
bpf_map_update_elem(map, &(int){0}, &value, 0);
|
||||||
|
/* For array map, doing bpf_map_update_elem will do a
|
||||||
|
* check_and_free_timer_in_array, which will trigger the crash if timer
|
||||||
|
* pointer was overwritten, for hmap we need to use bpf_timer_cancel.
|
||||||
|
*/
|
||||||
|
if (crash_map == 1) {
|
||||||
|
e = bpf_map_lookup_elem(map, &(int){0});
|
||||||
|
if (!e)
|
||||||
|
return 0;
|
||||||
|
bpf_timer_cancel(&e->timer);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
@@ -455,6 +455,7 @@ static void mfd_fail_write(int fd)
|
|||||||
printf("mmap()+mprotect() didn't fail as expected\n");
|
printf("mmap()+mprotect() didn't fail as expected\n");
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
munmap(p, mfd_def_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* verify PUNCH_HOLE fails */
|
/* verify PUNCH_HOLE fails */
|
||||||
|
@@ -799,7 +799,6 @@ setup_ovs_bridge() {
|
|||||||
setup() {
|
setup() {
|
||||||
[ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip
|
[ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip
|
||||||
|
|
||||||
cleanup
|
|
||||||
for arg do
|
for arg do
|
||||||
eval setup_${arg} || { echo " ${arg} not supported"; return 1; }
|
eval setup_${arg} || { echo " ${arg} not supported"; return 1; }
|
||||||
done
|
done
|
||||||
@@ -810,7 +809,7 @@ trace() {
|
|||||||
|
|
||||||
for arg do
|
for arg do
|
||||||
[ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
|
[ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
|
||||||
${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
|
${ns_cmd} tcpdump --immediate-mode -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
|
||||||
tcpdump_pids="${tcpdump_pids} $!"
|
tcpdump_pids="${tcpdump_pids} $!"
|
||||||
ns_cmd=
|
ns_cmd=
|
||||||
done
|
done
|
||||||
@@ -1636,6 +1635,10 @@ run_test() {
|
|||||||
|
|
||||||
unset IFS
|
unset IFS
|
||||||
|
|
||||||
|
# Since cleanup() relies on variables modified by this subshell, it
|
||||||
|
# has to run in this context.
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
if [ "$VERBOSE" = "1" ]; then
|
if [ "$VERBOSE" = "1" ]; then
|
||||||
printf "\n##########################################################################\n\n"
|
printf "\n##########################################################################\n\n"
|
||||||
fi
|
fi
|
||||||
|
@@ -17,9 +17,6 @@
|
|||||||
#define MAP_FIXED_NOREPLACE 0x100000
|
#define MAP_FIXED_NOREPLACE 0x100000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define BASE_ADDRESS (256ul * 1024 * 1024)
|
|
||||||
|
|
||||||
|
|
||||||
static void dump_maps(void)
|
static void dump_maps(void)
|
||||||
{
|
{
|
||||||
char cmd[32];
|
char cmd[32];
|
||||||
@@ -28,18 +25,46 @@ static void dump_maps(void)
|
|||||||
system(cmd);
|
system(cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long find_base_addr(unsigned long size)
|
||||||
|
{
|
||||||
|
void *addr;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||||
|
addr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
|
||||||
|
if (addr == MAP_FAILED) {
|
||||||
|
printf("Error: couldn't map the space we need for the test\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (munmap(addr, size) != 0) {
|
||||||
|
printf("Error: couldn't map the space we need for the test\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return (unsigned long)addr;
|
||||||
|
}
|
||||||
|
|
||||||
int main(void)
|
int main(void)
|
||||||
{
|
{
|
||||||
|
unsigned long base_addr;
|
||||||
unsigned long flags, addr, size, page_size;
|
unsigned long flags, addr, size, page_size;
|
||||||
char *p;
|
char *p;
|
||||||
|
|
||||||
page_size = sysconf(_SC_PAGE_SIZE);
|
page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
|
||||||
|
//let's find a base addr that is free before we start the tests
|
||||||
|
size = 5 * page_size;
|
||||||
|
base_addr = find_base_addr(size);
|
||||||
|
if (!base_addr) {
|
||||||
|
printf("Error: couldn't map the space we need for the test\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
|
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
|
||||||
|
|
||||||
// Check we can map all the areas we need below
|
// Check we can map all the areas we need below
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS;
|
addr = base_addr;
|
||||||
size = 5 * page_size;
|
size = 5 * page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
|
|
||||||
@@ -60,7 +85,7 @@ int main(void)
|
|||||||
printf("unmap() successful\n");
|
printf("unmap() successful\n");
|
||||||
|
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS + page_size;
|
addr = base_addr + page_size;
|
||||||
size = 3 * page_size;
|
size = 3 * page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -80,7 +105,7 @@ int main(void)
|
|||||||
* +4 | free | new
|
* +4 | free | new
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS;
|
addr = base_addr;
|
||||||
size = 5 * page_size;
|
size = 5 * page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -101,7 +126,7 @@ int main(void)
|
|||||||
* +4 | free |
|
* +4 | free |
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS + (2 * page_size);
|
addr = base_addr + (2 * page_size);
|
||||||
size = page_size;
|
size = page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -121,7 +146,7 @@ int main(void)
|
|||||||
* +4 | free | new
|
* +4 | free | new
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS + (3 * page_size);
|
addr = base_addr + (3 * page_size);
|
||||||
size = 2 * page_size;
|
size = 2 * page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -141,7 +166,7 @@ int main(void)
|
|||||||
* +4 | free |
|
* +4 | free |
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS;
|
addr = base_addr;
|
||||||
size = 2 * page_size;
|
size = 2 * page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -161,7 +186,7 @@ int main(void)
|
|||||||
* +4 | free |
|
* +4 | free |
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS;
|
addr = base_addr;
|
||||||
size = page_size;
|
size = page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -181,7 +206,7 @@ int main(void)
|
|||||||
* +4 | free | new
|
* +4 | free | new
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
addr = BASE_ADDRESS + (4 * page_size);
|
addr = base_addr + (4 * page_size);
|
||||||
size = page_size;
|
size = page_size;
|
||||||
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
|
||||||
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
|
||||||
@@ -192,7 +217,7 @@ int main(void)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = BASE_ADDRESS;
|
addr = base_addr;
|
||||||
size = 5 * page_size;
|
size = 5 * page_size;
|
||||||
if (munmap((void *)addr, size) != 0) {
|
if (munmap((void *)addr, size) != 0) {
|
||||||
dump_maps();
|
dump_maps();
|
||||||
|
Reference in New Issue
Block a user