Merge 5.10.170 into android12-5.10-lts
Changes in 5.10.170 drm/etnaviv: don't truncate physical page address wifi: rtl8xxxu: gen2: Turn on the rate control clk: mxl: Switch from direct readl/writel based IO to regmap based IO clk: mxl: Remove redundant spinlocks clk: mxl: Add option to override gate clks clk: mxl: Fix a clk entry by adding relevant flags powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G clk: mxl: syscon_node_to_regmap() returns error pointers random: always mix cycle counter in add_latent_entropy() KVM: x86: Fail emulation during EMULTYPE_SKIP on any exception KVM: SVM: Skip WRMSR fastpath on VM-Exit if next RIP isn't valid KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS can: kvaser_usb: hydra: help gcc-13 to figure out cmd_len powerpc: dts: t208x: Disable 10G on MAC1 and MAC2 drm/i915/gvt: fix double free bug in split_2MB_gtt_entry mac80211: mesh: embedd mesh_paths and mpp_paths into ieee80211_if_mesh uaccess: Add speculation barrier to copy_from_user() Revert "Revert "block: nbd: add sanity check for first_minor"" nbd: fix max value for 'first_minor' nbd: fix possible overflow for 'first_minor' in nbd_dev_add() nbd: fix possible overflow on 'first_minor' in nbd_dev_add() wifi: mwifiex: Add missing compatible string for SD8787 audit: update the mailing list in MAINTAINERS ext4: Fix function prototype mismatch for ext4_feat_ktype Revert "net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs" bpf: add missing header file include Linux 5.10.170 Change-Id: I9503ddc9b85049ce811a672092ed1f3c1f8e6d83 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -3001,7 +3001,7 @@ F: drivers/net/ieee802154/atusb.h
|
|||||||
AUDIT SUBSYSTEM
|
AUDIT SUBSYSTEM
|
||||||
M: Paul Moore <paul@paul-moore.com>
|
M: Paul Moore <paul@paul-moore.com>
|
||||||
M: Eric Paris <eparis@redhat.com>
|
M: Eric Paris <eparis@redhat.com>
|
||||||
L: linux-audit@redhat.com (moderated for non-subscribers)
|
L: audit@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://github.com/linux-audit
|
W: https://github.com/linux-audit
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
|
||||||
|
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 169
|
SUBLEVEL = 170
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
44
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
Normal file
44
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
|
||||||
|
/*
|
||||||
|
* QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
|
||||||
|
*
|
||||||
|
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
|
||||||
|
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
|
||||||
|
*/
|
||||||
|
|
||||||
|
fman@400000 {
|
||||||
|
fman0_rx_0x08: port@88000 {
|
||||||
|
cell-index = <0x8>;
|
||||||
|
compatible = "fsl,fman-v3-port-rx";
|
||||||
|
reg = <0x88000 0x1000>;
|
||||||
|
fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
fman0_tx_0x28: port@a8000 {
|
||||||
|
cell-index = <0x28>;
|
||||||
|
compatible = "fsl,fman-v3-port-tx";
|
||||||
|
reg = <0xa8000 0x1000>;
|
||||||
|
fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
ethernet@e0000 {
|
||||||
|
cell-index = <0>;
|
||||||
|
compatible = "fsl,fman-memac";
|
||||||
|
reg = <0xe0000 0x1000>;
|
||||||
|
fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
|
||||||
|
ptp-timer = <&ptp_timer0>;
|
||||||
|
pcsphy-handle = <&pcsphy0>;
|
||||||
|
};
|
||||||
|
|
||||||
|
mdio@e1000 {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||||
|
reg = <0xe1000 0x1000>;
|
||||||
|
fsl,erratum-a011043; /* must ignore read errors */
|
||||||
|
|
||||||
|
pcsphy0: ethernet-phy@0 {
|
||||||
|
reg = <0x0>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
44
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
Normal file
44
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
|
||||||
|
/*
|
||||||
|
* QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
|
||||||
|
*
|
||||||
|
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
|
||||||
|
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
|
||||||
|
*/
|
||||||
|
|
||||||
|
fman@400000 {
|
||||||
|
fman0_rx_0x09: port@89000 {
|
||||||
|
cell-index = <0x9>;
|
||||||
|
compatible = "fsl,fman-v3-port-rx";
|
||||||
|
reg = <0x89000 0x1000>;
|
||||||
|
fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
fman0_tx_0x29: port@a9000 {
|
||||||
|
cell-index = <0x29>;
|
||||||
|
compatible = "fsl,fman-v3-port-tx";
|
||||||
|
reg = <0xa9000 0x1000>;
|
||||||
|
fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
ethernet@e2000 {
|
||||||
|
cell-index = <1>;
|
||||||
|
compatible = "fsl,fman-memac";
|
||||||
|
reg = <0xe2000 0x1000>;
|
||||||
|
fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
|
||||||
|
ptp-timer = <&ptp_timer0>;
|
||||||
|
pcsphy-handle = <&pcsphy1>;
|
||||||
|
};
|
||||||
|
|
||||||
|
mdio@e3000 {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||||
|
reg = <0xe3000 0x1000>;
|
||||||
|
fsl,erratum-a011043; /* must ignore read errors */
|
||||||
|
|
||||||
|
pcsphy1: ethernet-phy@0 {
|
||||||
|
reg = <0x0>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
@@ -609,8 +609,8 @@
|
|||||||
/include/ "qoriq-bman1.dtsi"
|
/include/ "qoriq-bman1.dtsi"
|
||||||
|
|
||||||
/include/ "qoriq-fman3-0.dtsi"
|
/include/ "qoriq-fman3-0.dtsi"
|
||||||
/include/ "qoriq-fman3-0-1g-0.dtsi"
|
/include/ "qoriq-fman3-0-10g-2.dtsi"
|
||||||
/include/ "qoriq-fman3-0-1g-1.dtsi"
|
/include/ "qoriq-fman3-0-10g-3.dtsi"
|
||||||
/include/ "qoriq-fman3-0-1g-2.dtsi"
|
/include/ "qoriq-fman3-0-1g-2.dtsi"
|
||||||
/include/ "qoriq-fman3-0-1g-3.dtsi"
|
/include/ "qoriq-fman3-0-1g-3.dtsi"
|
||||||
/include/ "qoriq-fman3-0-1g-4.dtsi"
|
/include/ "qoriq-fman3-0-1g-4.dtsi"
|
||||||
@@ -659,3 +659,19 @@
|
|||||||
interrupts = <16 2 1 9>;
|
interrupts = <16 2 1 9>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&fman0_rx_0x08 {
|
||||||
|
/delete-property/ fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
&fman0_tx_0x28 {
|
||||||
|
/delete-property/ fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
&fman0_rx_0x09 {
|
||||||
|
/delete-property/ fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
|
||||||
|
&fman0_tx_0x29 {
|
||||||
|
/delete-property/ fsl,fman-10g-port;
|
||||||
|
};
|
||||||
|
@@ -3480,8 +3480,14 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
|
struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
|
||||||
to_svm(vcpu)->vmcb->control.exit_info_1)
|
|
||||||
|
/*
|
||||||
|
* Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
|
||||||
|
* can't read guest memory (dereference memslots) to decode the WRMSR.
|
||||||
|
*/
|
||||||
|
if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
|
||||||
|
nrips && control->next_rip)
|
||||||
return handle_fastpath_set_msr_irqoff(vcpu);
|
return handle_fastpath_set_msr_irqoff(vcpu);
|
||||||
|
|
||||||
return EXIT_FASTPATH_NONE;
|
return EXIT_FASTPATH_NONE;
|
||||||
|
@@ -4556,6 +4556,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
|
|||||||
|
|
||||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If IBRS is advertised to the vCPU, KVM must flush the indirect
|
||||||
|
* branch predictors when transitioning from L2 to L1, as L1 expects
|
||||||
|
* hardware (KVM in this case) to provide separate predictor modes.
|
||||||
|
* Bare metal isolates VMX root (host) from VMX non-root (guest), but
|
||||||
|
* doesn't isolate different VMCSs, i.e. in this case, doesn't provide
|
||||||
|
* separate modes for L2 vs L1.
|
||||||
|
*/
|
||||||
|
if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
||||||
|
indirect_branch_prediction_barrier();
|
||||||
|
|
||||||
/* Update any VMCS fields that might have changed while L2 ran */
|
/* Update any VMCS fields that might have changed while L2 ran */
|
||||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
||||||
|
@@ -1431,8 +1431,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* No indirect branch prediction barrier needed when switching
|
* No indirect branch prediction barrier needed when switching
|
||||||
* the active VMCS within a guest, e.g. on nested VM-Enter.
|
* the active VMCS within a vCPU, unless IBRS is advertised to
|
||||||
* The L1 VMM can protect itself with retpolines, IBPB or IBRS.
|
* the vCPU. To minimize the number of IBPBs executed, KVM
|
||||||
|
* performs IBPB on nested VM-Exit (a single nested transition
|
||||||
|
* may switch the active VMCS multiple times).
|
||||||
*/
|
*/
|
||||||
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
|
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
|
||||||
indirect_branch_prediction_barrier();
|
indirect_branch_prediction_barrier();
|
||||||
|
@@ -7534,7 +7534,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
|||||||
write_fault_to_spt,
|
write_fault_to_spt,
|
||||||
emulation_type))
|
emulation_type))
|
||||||
return 1;
|
return 1;
|
||||||
if (ctxt->have_exception) {
|
|
||||||
|
if (ctxt->have_exception &&
|
||||||
|
!(emulation_type & EMULTYPE_SKIP)) {
|
||||||
/*
|
/*
|
||||||
* #UD should result in just EMULATION_FAILED, and trap-like
|
* #UD should result in just EMULATION_FAILED, and trap-like
|
||||||
* exception should not be encountered during decode.
|
* exception should not be encountered during decode.
|
||||||
|
@@ -1865,8 +1865,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
|
|||||||
if (!netlink_capable(skb, CAP_SYS_ADMIN))
|
if (!netlink_capable(skb, CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (info->attrs[NBD_ATTR_INDEX])
|
if (info->attrs[NBD_ATTR_INDEX]) {
|
||||||
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
|
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Too big first_minor can cause duplicate creation of
|
||||||
|
* sysfs files/links, since index << part_shift might overflow, or
|
||||||
|
* MKDEV() expect that the max bits of first_minor is 20.
|
||||||
|
*/
|
||||||
|
if (index < 0 || index > MINORMASK >> part_shift) {
|
||||||
|
printk(KERN_ERR "nbd: illegal input index %d\n", index);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (!info->attrs[NBD_ATTR_SOCKETS]) {
|
if (!info->attrs[NBD_ATTR_SOCKETS]) {
|
||||||
printk(KERN_ERR "nbd: must specify at least one socket\n");
|
printk(KERN_ERR "nbd: must specify at least one socket\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@@ -1,8 +1,9 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
config CLK_LGM_CGU
|
config CLK_LGM_CGU
|
||||||
depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
|
depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
|
||||||
|
select MFD_SYSCON
|
||||||
select OF_EARLY_FLATTREE
|
select OF_EARLY_FLATTREE
|
||||||
bool "Clock driver for Lightning Mountain(LGM) platform"
|
bool "Clock driver for Lightning Mountain(LGM) platform"
|
||||||
help
|
help
|
||||||
Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
|
Clock Generation Unit(CGU) driver for MaxLinear's x86 based
|
||||||
network processor SoC.
|
Lightning Mountain(LGM) network processor SoC.
|
||||||
|
@@ -1,8 +1,9 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/*
|
/*
|
||||||
|
* Copyright (C) 2020-2022 MaxLinear, Inc.
|
||||||
* Copyright (C) 2020 Intel Corporation.
|
* Copyright (C) 2020 Intel Corporation.
|
||||||
* Zhu YiXin <yixin.zhu@intel.com>
|
* Zhu Yixin <yzhu@maxlinear.com>
|
||||||
* Rahul Tanwar <rahul.tanwar@intel.com>
|
* Rahul Tanwar <rtanwar@maxlinear.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
@@ -40,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
|
|||||||
{
|
{
|
||||||
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
||||||
unsigned int div, mult, frac;
|
unsigned int div, mult, frac;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->lock, flags);
|
|
||||||
mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
|
mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
|
||||||
div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
|
div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
|
||||||
frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
|
frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
|
||||||
spin_unlock_irqrestore(&pll->lock, flags);
|
|
||||||
|
|
||||||
if (pll->type == TYPE_LJPLL)
|
if (pll->type == TYPE_LJPLL)
|
||||||
div *= 4;
|
div *= 4;
|
||||||
@@ -57,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
|
|||||||
static int lgm_pll_is_enabled(struct clk_hw *hw)
|
static int lgm_pll_is_enabled(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
||||||
unsigned long flags;
|
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->lock, flags);
|
|
||||||
ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
|
ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
|
||||||
spin_unlock_irqrestore(&pll->lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -70,15 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw)
|
|||||||
static int lgm_pll_enable(struct clk_hw *hw)
|
static int lgm_pll_enable(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
||||||
unsigned long flags;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->lock, flags);
|
|
||||||
lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
|
lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
|
||||||
ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
|
ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg,
|
||||||
val, (val & 0x1), 1, 100);
|
val, (val & 0x1), 1, 100);
|
||||||
spin_unlock_irqrestore(&pll->lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -86,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw)
|
|||||||
static void lgm_pll_disable(struct clk_hw *hw)
|
static void lgm_pll_disable(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->lock, flags);
|
|
||||||
lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
|
lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
|
||||||
spin_unlock_irqrestore(&pll->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct clk_ops lgm_pll_ops = {
|
static const struct clk_ops lgm_pll_ops = {
|
||||||
@@ -121,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx,
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
pll->membase = ctx->membase;
|
pll->membase = ctx->membase;
|
||||||
pll->lock = ctx->lock;
|
|
||||||
pll->reg = list->reg;
|
pll->reg = list->reg;
|
||||||
pll->flags = list->flags;
|
pll->flags = list->flags;
|
||||||
pll->type = list->type;
|
pll->type = list->type;
|
||||||
|
@@ -1,8 +1,9 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/*
|
/*
|
||||||
|
* Copyright (C) 2020-2022 MaxLinear, Inc.
|
||||||
* Copyright (C) 2020 Intel Corporation.
|
* Copyright (C) 2020 Intel Corporation.
|
||||||
* Zhu YiXin <yixin.zhu@intel.com>
|
* Zhu Yixin <yzhu@maxlinear.com>
|
||||||
* Rahul Tanwar <rahul.tanwar@intel.com>
|
* Rahul Tanwar <rtanwar@maxlinear.com>
|
||||||
*/
|
*/
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
@@ -24,14 +25,10 @@
|
|||||||
static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
|
static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
|
||||||
const struct lgm_clk_branch *list)
|
const struct lgm_clk_branch *list)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
|
if (list->div_flags & CLOCK_FLAG_VAL_INIT)
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
|
||||||
lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
|
lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
|
||||||
list->div_width, list->div_val);
|
list->div_width, list->div_val);
|
||||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return clk_hw_register_fixed_rate(NULL, list->name,
|
return clk_hw_register_fixed_rate(NULL, list->name,
|
||||||
list->parent_data[0].name,
|
list->parent_data[0].name,
|
||||||
@@ -41,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
|
|||||||
static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
|
static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
|
struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
|
||||||
unsigned long flags;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
spin_lock_irqsave(&mux->lock, flags);
|
|
||||||
if (mux->flags & MUX_CLK_SW)
|
if (mux->flags & MUX_CLK_SW)
|
||||||
val = mux->reg;
|
val = mux->reg;
|
||||||
else
|
else
|
||||||
val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
|
val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
|
||||||
mux->width);
|
mux->width);
|
||||||
spin_unlock_irqrestore(&mux->lock, flags);
|
|
||||||
return clk_mux_val_to_index(hw, NULL, mux->flags, val);
|
return clk_mux_val_to_index(hw, NULL, mux->flags, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
||||||
{
|
{
|
||||||
struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
|
struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
|
||||||
unsigned long flags;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = clk_mux_index_to_val(NULL, mux->flags, index);
|
val = clk_mux_index_to_val(NULL, mux->flags, index);
|
||||||
spin_lock_irqsave(&mux->lock, flags);
|
|
||||||
if (mux->flags & MUX_CLK_SW)
|
if (mux->flags & MUX_CLK_SW)
|
||||||
mux->reg = val;
|
mux->reg = val;
|
||||||
else
|
else
|
||||||
lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
|
lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
|
||||||
mux->width, val);
|
mux->width, val);
|
||||||
spin_unlock_irqrestore(&mux->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -90,7 +81,7 @@ static struct clk_hw *
|
|||||||
lgm_clk_register_mux(struct lgm_clk_provider *ctx,
|
lgm_clk_register_mux(struct lgm_clk_provider *ctx,
|
||||||
const struct lgm_clk_branch *list)
|
const struct lgm_clk_branch *list)
|
||||||
{
|
{
|
||||||
unsigned long flags, cflags = list->mux_flags;
|
unsigned long cflags = list->mux_flags;
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
u8 shift = list->mux_shift;
|
u8 shift = list->mux_shift;
|
||||||
u8 width = list->mux_width;
|
u8 width = list->mux_width;
|
||||||
@@ -111,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
|
|||||||
init.num_parents = list->num_parents;
|
init.num_parents = list->num_parents;
|
||||||
|
|
||||||
mux->membase = ctx->membase;
|
mux->membase = ctx->membase;
|
||||||
mux->lock = ctx->lock;
|
|
||||||
mux->reg = reg;
|
mux->reg = reg;
|
||||||
mux->shift = shift;
|
mux->shift = shift;
|
||||||
mux->width = width;
|
mux->width = width;
|
||||||
@@ -123,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
if (cflags & CLOCK_FLAG_VAL_INIT) {
|
if (cflags & CLOCK_FLAG_VAL_INIT)
|
||||||
spin_lock_irqsave(&mux->lock, flags);
|
|
||||||
lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
|
lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
|
||||||
spin_unlock_irqrestore(&mux->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return hw;
|
return hw;
|
||||||
}
|
}
|
||||||
@@ -136,13 +123,10 @@ static unsigned long
|
|||||||
lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
||||||
{
|
{
|
||||||
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
|
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
|
||||||
unsigned long flags;
|
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
|
|
||||||
spin_lock_irqsave(÷r->lock, flags);
|
|
||||||
val = lgm_get_clk_val(divider->membase, divider->reg,
|
val = lgm_get_clk_val(divider->membase, divider->reg,
|
||||||
divider->shift, divider->width);
|
divider->shift, divider->width);
|
||||||
spin_unlock_irqrestore(÷r->lock, flags);
|
|
||||||
|
|
||||||
return divider_recalc_rate(hw, parent_rate, val, divider->table,
|
return divider_recalc_rate(hw, parent_rate, val, divider->table,
|
||||||
divider->flags, divider->width);
|
divider->flags, divider->width);
|
||||||
@@ -163,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
unsigned long prate)
|
unsigned long prate)
|
||||||
{
|
{
|
||||||
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
|
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
|
||||||
unsigned long flags;
|
|
||||||
int value;
|
int value;
|
||||||
|
|
||||||
value = divider_get_val(rate, prate, divider->table,
|
value = divider_get_val(rate, prate, divider->table,
|
||||||
@@ -171,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
if (value < 0)
|
if (value < 0)
|
||||||
return value;
|
return value;
|
||||||
|
|
||||||
spin_lock_irqsave(÷r->lock, flags);
|
|
||||||
lgm_set_clk_val(divider->membase, divider->reg,
|
lgm_set_clk_val(divider->membase, divider->reg,
|
||||||
divider->shift, divider->width, value);
|
divider->shift, divider->width, value);
|
||||||
spin_unlock_irqrestore(÷r->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -182,12 +163,10 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
|
static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
|
||||||
{
|
{
|
||||||
struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
|
struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&div->lock, flags);
|
if (div->flags != DIV_CLK_NO_MASK)
|
||||||
lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
|
lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
|
||||||
div->width_gate, enable);
|
div->width_gate, enable);
|
||||||
spin_unlock_irqrestore(&div->lock, flags);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,7 +192,7 @@ static struct clk_hw *
|
|||||||
lgm_clk_register_divider(struct lgm_clk_provider *ctx,
|
lgm_clk_register_divider(struct lgm_clk_provider *ctx,
|
||||||
const struct lgm_clk_branch *list)
|
const struct lgm_clk_branch *list)
|
||||||
{
|
{
|
||||||
unsigned long flags, cflags = list->div_flags;
|
unsigned long cflags = list->div_flags;
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
struct lgm_clk_divider *div;
|
struct lgm_clk_divider *div;
|
||||||
struct clk_init_data init = {};
|
struct clk_init_data init = {};
|
||||||
@@ -236,7 +215,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
|
|||||||
init.num_parents = 1;
|
init.num_parents = 1;
|
||||||
|
|
||||||
div->membase = ctx->membase;
|
div->membase = ctx->membase;
|
||||||
div->lock = ctx->lock;
|
|
||||||
div->reg = reg;
|
div->reg = reg;
|
||||||
div->shift = shift;
|
div->shift = shift;
|
||||||
div->width = width;
|
div->width = width;
|
||||||
@@ -251,11 +229,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
if (cflags & CLOCK_FLAG_VAL_INIT) {
|
if (cflags & CLOCK_FLAG_VAL_INIT)
|
||||||
spin_lock_irqsave(&div->lock, flags);
|
|
||||||
lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
|
lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
|
||||||
spin_unlock_irqrestore(&div->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return hw;
|
return hw;
|
||||||
}
|
}
|
||||||
@@ -264,7 +239,6 @@ static struct clk_hw *
|
|||||||
lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
|
lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
|
||||||
const struct lgm_clk_branch *list)
|
const struct lgm_clk_branch *list)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct clk_hw *hw;
|
struct clk_hw *hw;
|
||||||
|
|
||||||
hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
|
hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
|
||||||
@@ -273,12 +247,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
|
|||||||
if (IS_ERR(hw))
|
if (IS_ERR(hw))
|
||||||
return ERR_CAST(hw);
|
return ERR_CAST(hw);
|
||||||
|
|
||||||
if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
|
if (list->div_flags & CLOCK_FLAG_VAL_INIT)
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
|
||||||
lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
|
lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
|
||||||
list->div_width, list->div_val);
|
list->div_width, list->div_val);
|
||||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return hw;
|
return hw;
|
||||||
}
|
}
|
||||||
@@ -286,13 +257,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
|
|||||||
static int lgm_clk_gate_enable(struct clk_hw *hw)
|
static int lgm_clk_gate_enable(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
|
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
|
||||||
unsigned long flags;
|
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
|
|
||||||
spin_lock_irqsave(&gate->lock, flags);
|
|
||||||
reg = GATE_HW_REG_EN(gate->reg);
|
reg = GATE_HW_REG_EN(gate->reg);
|
||||||
lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
|
lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
|
||||||
spin_unlock_irqrestore(&gate->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -300,25 +268,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw)
|
|||||||
static void lgm_clk_gate_disable(struct clk_hw *hw)
|
static void lgm_clk_gate_disable(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
|
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
|
||||||
unsigned long flags;
|
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
|
|
||||||
spin_lock_irqsave(&gate->lock, flags);
|
|
||||||
reg = GATE_HW_REG_DIS(gate->reg);
|
reg = GATE_HW_REG_DIS(gate->reg);
|
||||||
lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
|
lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
|
||||||
spin_unlock_irqrestore(&gate->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
|
static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
|
struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
|
||||||
unsigned int reg, ret;
|
unsigned int reg, ret;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&gate->lock, flags);
|
|
||||||
reg = GATE_HW_REG_STAT(gate->reg);
|
reg = GATE_HW_REG_STAT(gate->reg);
|
||||||
ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
|
ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
|
||||||
spin_unlock_irqrestore(&gate->lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -333,7 +295,7 @@ static struct clk_hw *
|
|||||||
lgm_clk_register_gate(struct lgm_clk_provider *ctx,
|
lgm_clk_register_gate(struct lgm_clk_provider *ctx,
|
||||||
const struct lgm_clk_branch *list)
|
const struct lgm_clk_branch *list)
|
||||||
{
|
{
|
||||||
unsigned long flags, cflags = list->gate_flags;
|
unsigned long cflags = list->gate_flags;
|
||||||
const char *pname = list->parent_data[0].name;
|
const char *pname = list->parent_data[0].name;
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
u8 shift = list->gate_shift;
|
u8 shift = list->gate_shift;
|
||||||
@@ -354,7 +316,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
|
|||||||
init.num_parents = pname ? 1 : 0;
|
init.num_parents = pname ? 1 : 0;
|
||||||
|
|
||||||
gate->membase = ctx->membase;
|
gate->membase = ctx->membase;
|
||||||
gate->lock = ctx->lock;
|
|
||||||
gate->reg = reg;
|
gate->reg = reg;
|
||||||
gate->shift = shift;
|
gate->shift = shift;
|
||||||
gate->flags = cflags;
|
gate->flags = cflags;
|
||||||
@@ -366,9 +327,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
|
|||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
if (cflags & CLOCK_FLAG_VAL_INIT) {
|
if (cflags & CLOCK_FLAG_VAL_INIT) {
|
||||||
spin_lock_irqsave(&gate->lock, flags);
|
|
||||||
lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
|
lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
|
||||||
spin_unlock_irqrestore(&gate->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return hw;
|
return hw;
|
||||||
@@ -396,8 +355,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
|
|||||||
hw = lgm_clk_register_fixed_factor(ctx, list);
|
hw = lgm_clk_register_fixed_factor(ctx, list);
|
||||||
break;
|
break;
|
||||||
case CLK_TYPE_GATE:
|
case CLK_TYPE_GATE:
|
||||||
hw = lgm_clk_register_gate(ctx, list);
|
if (list->gate_flags & GATE_CLK_HW) {
|
||||||
|
hw = lgm_clk_register_gate(ctx, list);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* GATE_CLKs can be controlled either from
|
||||||
|
* CGU clk driver i.e. this driver or directly
|
||||||
|
* from power management driver/daemon. It is
|
||||||
|
* dependent on the power policy/profile requirements
|
||||||
|
* of the end product. To override control of gate
|
||||||
|
* clks from this driver, provide NULL for this index
|
||||||
|
* of gate clk provider.
|
||||||
|
*/
|
||||||
|
hw = NULL;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
dev_err(ctx->dev, "invalid clk type\n");
|
dev_err(ctx->dev, "invalid clk type\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -443,24 +416,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
|||||||
static int lgm_clk_ddiv_enable(struct clk_hw *hw)
|
static int lgm_clk_ddiv_enable(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&ddiv->lock, flags);
|
|
||||||
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
|
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
|
||||||
ddiv->width_gate, 1);
|
ddiv->width_gate, 1);
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lgm_clk_ddiv_disable(struct clk_hw *hw)
|
static void lgm_clk_ddiv_disable(struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&ddiv->lock, flags);
|
|
||||||
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
|
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
|
||||||
ddiv->width_gate, 0);
|
ddiv->width_gate, 0);
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@@ -497,32 +464,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
{
|
{
|
||||||
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
||||||
u32 div, ddiv1, ddiv2;
|
u32 div, ddiv1, ddiv2;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
|
div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
|
||||||
|
|
||||||
spin_lock_irqsave(&ddiv->lock, flags);
|
|
||||||
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
|
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
|
||||||
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
|
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
|
||||||
div = div * 2;
|
div = div * 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (div <= 0) {
|
if (div <= 0)
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
|
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2))
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
|
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
|
||||||
ddiv1 - 1);
|
ddiv1 - 1);
|
||||||
|
|
||||||
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
|
lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
|
||||||
ddiv2 - 1);
|
ddiv2 - 1);
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -533,18 +493,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
{
|
{
|
||||||
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
|
||||||
u32 div, ddiv1, ddiv2;
|
u32 div, ddiv1, ddiv2;
|
||||||
unsigned long flags;
|
|
||||||
u64 rate64;
|
u64 rate64;
|
||||||
|
|
||||||
div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
|
div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
|
||||||
|
|
||||||
/* if predivide bit is enabled, modify div by factor of 2.5 */
|
/* if predivide bit is enabled, modify div by factor of 2.5 */
|
||||||
spin_lock_irqsave(&ddiv->lock, flags);
|
|
||||||
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
|
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
|
||||||
div = div * 2;
|
div = div * 2;
|
||||||
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
|
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
|
|
||||||
if (div <= 0)
|
if (div <= 0)
|
||||||
return *prate;
|
return *prate;
|
||||||
@@ -558,12 +515,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
do_div(rate64, ddiv2);
|
do_div(rate64, ddiv2);
|
||||||
|
|
||||||
/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
|
/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
|
||||||
spin_lock_irqsave(&ddiv->lock, flags);
|
|
||||||
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
|
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
|
||||||
rate64 = rate64 * 2;
|
rate64 = rate64 * 2;
|
||||||
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
|
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ddiv->lock, flags);
|
|
||||||
|
|
||||||
return rate64;
|
return rate64;
|
||||||
}
|
}
|
||||||
@@ -600,7 +555,6 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
|
|||||||
init.num_parents = 1;
|
init.num_parents = 1;
|
||||||
|
|
||||||
ddiv->membase = ctx->membase;
|
ddiv->membase = ctx->membase;
|
||||||
ddiv->lock = ctx->lock;
|
|
||||||
ddiv->reg = list->reg;
|
ddiv->reg = list->reg;
|
||||||
ddiv->shift0 = list->shift0;
|
ddiv->shift0 = list->shift0;
|
||||||
ddiv->width0 = list->width0;
|
ddiv->width0 = list->width0;
|
||||||
|
@@ -1,28 +1,28 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
/*
|
/*
|
||||||
* Copyright(c) 2020 Intel Corporation.
|
* Copyright (C) 2020-2022 MaxLinear, Inc.
|
||||||
* Zhu YiXin <yixin.zhu@intel.com>
|
* Copyright (C) 2020 Intel Corporation.
|
||||||
* Rahul Tanwar <rahul.tanwar@intel.com>
|
* Zhu Yixin <yzhu@maxlinear.com>
|
||||||
|
* Rahul Tanwar <rtanwar@maxlinear.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __CLK_CGU_H
|
#ifndef __CLK_CGU_H
|
||||||
#define __CLK_CGU_H
|
#define __CLK_CGU_H
|
||||||
|
|
||||||
#include <linux/io.h>
|
#include <linux/regmap.h>
|
||||||
|
|
||||||
struct lgm_clk_mux {
|
struct lgm_clk_mux {
|
||||||
struct clk_hw hw;
|
struct clk_hw hw;
|
||||||
void __iomem *membase;
|
struct regmap *membase;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
u8 shift;
|
u8 shift;
|
||||||
u8 width;
|
u8 width;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lgm_clk_divider {
|
struct lgm_clk_divider {
|
||||||
struct clk_hw hw;
|
struct clk_hw hw;
|
||||||
void __iomem *membase;
|
struct regmap *membase;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
u8 shift;
|
u8 shift;
|
||||||
u8 width;
|
u8 width;
|
||||||
@@ -30,12 +30,11 @@ struct lgm_clk_divider {
|
|||||||
u8 width_gate;
|
u8 width_gate;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
const struct clk_div_table *table;
|
const struct clk_div_table *table;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lgm_clk_ddiv {
|
struct lgm_clk_ddiv {
|
||||||
struct clk_hw hw;
|
struct clk_hw hw;
|
||||||
void __iomem *membase;
|
struct regmap *membase;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
u8 shift0;
|
u8 shift0;
|
||||||
u8 width0;
|
u8 width0;
|
||||||
@@ -48,16 +47,14 @@ struct lgm_clk_ddiv {
|
|||||||
unsigned int mult;
|
unsigned int mult;
|
||||||
unsigned int div;
|
unsigned int div;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lgm_clk_gate {
|
struct lgm_clk_gate {
|
||||||
struct clk_hw hw;
|
struct clk_hw hw;
|
||||||
void __iomem *membase;
|
struct regmap *membase;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
u8 shift;
|
u8 shift;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum lgm_clk_type {
|
enum lgm_clk_type {
|
||||||
@@ -77,11 +74,10 @@ enum lgm_clk_type {
|
|||||||
* @clk_data: array of hw clocks and clk number.
|
* @clk_data: array of hw clocks and clk number.
|
||||||
*/
|
*/
|
||||||
struct lgm_clk_provider {
|
struct lgm_clk_provider {
|
||||||
void __iomem *membase;
|
struct regmap *membase;
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct clk_hw_onecell_data clk_data;
|
struct clk_hw_onecell_data clk_data;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum pll_type {
|
enum pll_type {
|
||||||
@@ -92,11 +88,10 @@ enum pll_type {
|
|||||||
|
|
||||||
struct lgm_clk_pll {
|
struct lgm_clk_pll {
|
||||||
struct clk_hw hw;
|
struct clk_hw hw;
|
||||||
void __iomem *membase;
|
struct regmap *membase;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
enum pll_type type;
|
enum pll_type type;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -202,6 +197,8 @@ struct lgm_clk_branch {
|
|||||||
/* clock flags definition */
|
/* clock flags definition */
|
||||||
#define CLOCK_FLAG_VAL_INIT BIT(16)
|
#define CLOCK_FLAG_VAL_INIT BIT(16)
|
||||||
#define MUX_CLK_SW BIT(17)
|
#define MUX_CLK_SW BIT(17)
|
||||||
|
#define GATE_CLK_HW BIT(18)
|
||||||
|
#define DIV_CLK_NO_MASK BIT(19)
|
||||||
|
|
||||||
#define LGM_MUX(_id, _name, _pdata, _f, _reg, \
|
#define LGM_MUX(_id, _name, _pdata, _f, _reg, \
|
||||||
_shift, _width, _cf, _v) \
|
_shift, _width, _cf, _v) \
|
||||||
@@ -300,29 +297,32 @@ struct lgm_clk_branch {
|
|||||||
.div = _d, \
|
.div = _d, \
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void lgm_set_clk_val(void __iomem *membase, u32 reg,
|
static inline void lgm_set_clk_val(struct regmap *membase, u32 reg,
|
||||||
u8 shift, u8 width, u32 set_val)
|
u8 shift, u8 width, u32 set_val)
|
||||||
{
|
{
|
||||||
u32 mask = (GENMASK(width - 1, 0) << shift);
|
u32 mask = (GENMASK(width - 1, 0) << shift);
|
||||||
u32 regval;
|
|
||||||
|
|
||||||
regval = readl(membase + reg);
|
regmap_update_bits(membase, reg, mask, set_val << shift);
|
||||||
regval = (regval & ~mask) | ((set_val << shift) & mask);
|
|
||||||
writel(regval, membase + reg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg,
|
static inline u32 lgm_get_clk_val(struct regmap *membase, u32 reg,
|
||||||
u8 shift, u8 width)
|
u8 shift, u8 width)
|
||||||
{
|
{
|
||||||
u32 mask = (GENMASK(width - 1, 0) << shift);
|
u32 mask = (GENMASK(width - 1, 0) << shift);
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = readl(membase + reg);
|
if (regmap_read(membase, reg, &val)) {
|
||||||
|
WARN_ONCE(1, "Failed to read clk reg: 0x%x\n", reg);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
val = (val & mask) >> shift;
|
val = (val & mask) >> shift;
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
|
int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
|
||||||
const struct lgm_clk_branch *list,
|
const struct lgm_clk_branch *list,
|
||||||
unsigned int nr_clk);
|
unsigned int nr_clk);
|
||||||
|
@@ -1,10 +1,12 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/*
|
/*
|
||||||
|
* Copyright (C) 2020-2022 MaxLinear, Inc.
|
||||||
* Copyright (C) 2020 Intel Corporation.
|
* Copyright (C) 2020 Intel Corporation.
|
||||||
* Zhu YiXin <yixin.zhu@intel.com>
|
* Zhu Yixin <yzhu@maxlinear.com>
|
||||||
* Rahul Tanwar <rahul.tanwar@intel.com>
|
* Rahul Tanwar <rtanwar@maxlinear.com>
|
||||||
*/
|
*/
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
|
#include <linux/mfd/syscon.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <dt-bindings/clock/intel,lgm-clk.h>
|
#include <dt-bindings/clock/intel,lgm-clk.h>
|
||||||
@@ -253,8 +255,8 @@ static const struct lgm_clk_branch lgm_branch_clks[] = {
|
|||||||
LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1,
|
LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1,
|
||||||
8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2),
|
8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2),
|
||||||
LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0),
|
LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0),
|
||||||
LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR,
|
LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", CLK_SET_RATE_PARENT, CGU_PCMCR,
|
||||||
25, 3, 0, 0, 0, 0, dcl_div),
|
25, 3, 0, 0, DIV_CLK_NO_MASK, 0, dcl_div),
|
||||||
LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR,
|
LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR,
|
||||||
0, 1, CLK_MUX_ROUND_CLOSEST, 0),
|
0, 1, CLK_MUX_ROUND_CLOSEST, 0),
|
||||||
LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr",
|
LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr",
|
||||||
@@ -433,13 +435,15 @@ static int lgm_cgu_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
ctx->clk_data.num = CLK_NR_CLKS;
|
ctx->clk_data.num = CLK_NR_CLKS;
|
||||||
|
|
||||||
ctx->membase = devm_platform_ioremap_resource(pdev, 0);
|
ctx->membase = syscon_node_to_regmap(np);
|
||||||
if (IS_ERR(ctx->membase))
|
if (IS_ERR(ctx->membase)) {
|
||||||
|
dev_err(dev, "Failed to get clk CGU iomem\n");
|
||||||
return PTR_ERR(ctx->membase);
|
return PTR_ERR(ctx->membase);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
ctx->np = np;
|
ctx->np = np;
|
||||||
ctx->dev = dev;
|
ctx->dev = dev;
|
||||||
spin_lock_init(&ctx->lock);
|
|
||||||
|
|
||||||
ret = lgm_clk_register_plls(ctx, lgm_pll_clks,
|
ret = lgm_clk_register_plls(ctx, lgm_pll_clks,
|
||||||
ARRAY_SIZE(lgm_pll_clks));
|
ARRAY_SIZE(lgm_pll_clks));
|
||||||
|
@@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for_each_sgtable_dma_sg(sgt, sg, i) {
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
||||||
u32 pa = sg_dma_address(sg) - sg->offset;
|
phys_addr_t pa = sg_dma_address(sg) - sg->offset;
|
||||||
size_t bytes = sg_dma_len(sg) + sg->offset;
|
size_t bytes = sg_dma_len(sg) + sg->offset;
|
||||||
|
|
||||||
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
|
VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
|
||||||
|
|
||||||
ret = etnaviv_context_map(context, da, pa, bytes, prot);
|
ret = etnaviv_context_map(context, da, pa, bytes, prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@@ -1192,10 +1192,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
|||||||
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||||
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
|
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
|
||||||
if (ret) {
|
if (ret)
|
||||||
ppgtt_invalidate_spt(spt);
|
goto err;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
sub_se.val64 = se->val64;
|
sub_se.val64 = se->val64;
|
||||||
|
|
||||||
/* Copy the PAT field from PDE. */
|
/* Copy the PAT field from PDE. */
|
||||||
@@ -1214,6 +1212,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
|||||||
ops->set_pfn(se, sub_spt->shadow_page.mfn);
|
ops->set_pfn(se, sub_spt->shadow_page.mfn);
|
||||||
ppgtt_set_shadow_entry(spt, se, index);
|
ppgtt_set_shadow_entry(spt, se, index);
|
||||||
return 0;
|
return 0;
|
||||||
|
err:
|
||||||
|
/* Cancel the existing addess mappings of DMA addr. */
|
||||||
|
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||||
|
gvt_vdbg_mm("invalidate 4K entry\n");
|
||||||
|
ppgtt_invalidate_pte(sub_spt, &sub_se);
|
||||||
|
}
|
||||||
|
/* Release the new allocated spt. */
|
||||||
|
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
|
||||||
|
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
|
||||||
|
ppgtt_free_spt(sub_spt);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
||||||
|
@@ -518,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
|
|||||||
u8 cmd_no, int channel)
|
u8 cmd_no, int channel)
|
||||||
{
|
{
|
||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
|
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
|
||||||
@@ -525,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = cmd_no;
|
cmd->header.cmd_no = cmd_no;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
if (channel < 0) {
|
if (channel < 0) {
|
||||||
kvaser_usb_hydra_set_cmd_dest_he
|
kvaser_usb_hydra_set_cmd_dest_he
|
||||||
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
|
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
|
||||||
@@ -541,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
|
|||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
if (err)
|
if (err)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
@@ -557,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
|
|||||||
{
|
{
|
||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
struct kvaser_usb *dev = priv->dev;
|
struct kvaser_usb *dev = priv->dev;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
|
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
|
||||||
@@ -564,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = cmd_no;
|
cmd->header.cmd_no = cmd_no;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
|
|
||||||
kvaser_usb_hydra_set_cmd_dest_he
|
kvaser_usb_hydra_set_cmd_dest_he
|
||||||
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
|
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
|
||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd_async(priv, cmd,
|
err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
|
||||||
kvaser_usb_hydra_cmd_size(cmd));
|
|
||||||
if (err)
|
if (err)
|
||||||
kfree(cmd);
|
kfree(cmd);
|
||||||
|
|
||||||
@@ -715,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
|
|||||||
{
|
{
|
||||||
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
|
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
|
||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
|
size_t cmd_len;
|
||||||
u32 value = 0;
|
u32 value = 0;
|
||||||
u32 mask = 0;
|
u32 mask = 0;
|
||||||
u16 cap_cmd_res;
|
u16 cap_cmd_res;
|
||||||
@@ -726,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
|
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
|
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
|
||||||
|
|
||||||
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
|
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
|
||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
if (err)
|
if (err)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
@@ -1555,6 +1560,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
|
|||||||
struct kvaser_usb *dev = priv->dev;
|
struct kvaser_usb *dev = priv->dev;
|
||||||
struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
|
struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
|
||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!hydra)
|
if (!hydra)
|
||||||
@@ -1565,6 +1571,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
|
cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
kvaser_usb_hydra_set_cmd_dest_he
|
kvaser_usb_hydra_set_cmd_dest_he
|
||||||
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
|
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
|
||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
@@ -1574,7 +1581,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
|
|||||||
|
|
||||||
reinit_completion(&priv->get_busparams_comp);
|
reinit_completion(&priv->get_busparams_comp);
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@@ -1601,6 +1608,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
|
|||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
|
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
|
||||||
struct kvaser_usb *dev = priv->dev;
|
struct kvaser_usb *dev = priv->dev;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
|
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
|
||||||
@@ -1608,6 +1616,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
|
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
|
memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
|
||||||
sizeof(cmd->set_busparams_req.busparams_nominal));
|
sizeof(cmd->set_busparams_req.busparams_nominal));
|
||||||
|
|
||||||
@@ -1616,7 +1625,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
|
|||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
|
|
||||||
kfree(cmd);
|
kfree(cmd);
|
||||||
|
|
||||||
@@ -1629,6 +1638,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
|
|||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
|
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
|
||||||
struct kvaser_usb *dev = priv->dev;
|
struct kvaser_usb *dev = priv->dev;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
|
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
|
||||||
@@ -1636,6 +1646,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
|
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
memcpy(&cmd->set_busparams_req.busparams_data, busparams,
|
memcpy(&cmd->set_busparams_req.busparams_data, busparams,
|
||||||
sizeof(cmd->set_busparams_req.busparams_data));
|
sizeof(cmd->set_busparams_req.busparams_data));
|
||||||
|
|
||||||
@@ -1653,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
|
|||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
|
|
||||||
kfree(cmd);
|
kfree(cmd);
|
||||||
|
|
||||||
@@ -1781,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
|
|||||||
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
|
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
|
||||||
{
|
{
|
||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
|
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
|
||||||
@@ -1790,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
|
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
cmd->sw_detail_req.use_ext_cmd = 1;
|
cmd->sw_detail_req.use_ext_cmd = 1;
|
||||||
kvaser_usb_hydra_set_cmd_dest_he
|
kvaser_usb_hydra_set_cmd_dest_he
|
||||||
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
|
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
|
||||||
@@ -1797,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
|
|||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
(cmd, kvaser_usb_hydra_get_next_transid(dev));
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
if (err)
|
if (err)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
@@ -1913,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
|
|||||||
{
|
{
|
||||||
struct kvaser_usb *dev = priv->dev;
|
struct kvaser_usb *dev = priv->dev;
|
||||||
struct kvaser_cmd *cmd;
|
struct kvaser_cmd *cmd;
|
||||||
|
size_t cmd_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if ((priv->can.ctrlmode &
|
if ((priv->can.ctrlmode &
|
||||||
@@ -1928,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
|
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
|
||||||
|
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
|
||||||
kvaser_usb_hydra_set_cmd_dest_he
|
kvaser_usb_hydra_set_cmd_dest_he
|
||||||
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
|
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
|
||||||
kvaser_usb_hydra_set_cmd_transid
|
kvaser_usb_hydra_set_cmd_transid
|
||||||
@@ -1937,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
|
|||||||
else
|
else
|
||||||
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
|
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
|
||||||
|
|
||||||
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
|
err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
|
||||||
kfree(cmd);
|
kfree(cmd);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@@ -485,6 +485,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
|
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
|
||||||
|
{ .compatible = "marvell,sd8787" },
|
||||||
{ .compatible = "marvell,sd8897" },
|
{ .compatible = "marvell,sd8897" },
|
||||||
{ .compatible = "marvell,sd8997" },
|
{ .compatible = "marvell,sd8997" },
|
||||||
{ }
|
{ }
|
||||||
|
@@ -4369,12 +4369,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
|
|||||||
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
|
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
|
||||||
u8 macid, bool connect)
|
u8 macid, bool connect)
|
||||||
{
|
{
|
||||||
#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
|
|
||||||
/*
|
/*
|
||||||
* Barry Day reports this causes issues with 8192eu and 8723bu
|
* The firmware turns on the rate control when it knows it's
|
||||||
* devices reconnecting. The reason for this is unclear, but
|
* connected to a network.
|
||||||
* until it is better understood, leave the code in place but
|
|
||||||
* disabled, so it is not lost.
|
|
||||||
*/
|
*/
|
||||||
struct h2c_cmd h2c;
|
struct h2c_cmd h2c;
|
||||||
|
|
||||||
@@ -4387,7 +4384,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
|
|||||||
h2c.media_status_rpt.parm &= ~BIT(0);
|
h2c.media_status_rpt.parm &= ~BIT(0);
|
||||||
|
|
||||||
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
|
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
|
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
|
||||||
|
@@ -487,6 +487,11 @@ static void ext4_sb_release(struct kobject *kobj)
|
|||||||
complete(&sbi->s_kobj_unregister);
|
complete(&sbi->s_kobj_unregister);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ext4_feat_release(struct kobject *kobj)
|
||||||
|
{
|
||||||
|
kfree(kobj);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct sysfs_ops ext4_attr_ops = {
|
static const struct sysfs_ops ext4_attr_ops = {
|
||||||
.show = ext4_attr_show,
|
.show = ext4_attr_show,
|
||||||
.store = ext4_attr_store,
|
.store = ext4_attr_store,
|
||||||
@@ -501,7 +506,7 @@ static struct kobj_type ext4_sb_ktype = {
|
|||||||
static struct kobj_type ext4_feat_ktype = {
|
static struct kobj_type ext4_feat_ktype = {
|
||||||
.default_groups = ext4_feat_groups,
|
.default_groups = ext4_feat_groups,
|
||||||
.sysfs_ops = &ext4_attr_ops,
|
.sysfs_ops = &ext4_attr_ops,
|
||||||
.release = (void (*)(struct kobject *))kfree,
|
.release = ext4_feat_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kobject *ext4_root;
|
static struct kobject *ext4_root;
|
||||||
|
@@ -11,6 +11,10 @@
|
|||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
|
||||||
|
#ifndef barrier_nospec
|
||||||
|
# define barrier_nospec() do { } while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
|
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
|
||||||
* @index: array element index
|
* @index: array element index
|
||||||
|
@@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code,
|
|||||||
void add_interrupt_randomness(int irq) __latent_entropy;
|
void add_interrupt_randomness(int irq) __latent_entropy;
|
||||||
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
|
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
|
||||||
|
|
||||||
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
|
|
||||||
static inline void add_latent_entropy(void)
|
static inline void add_latent_entropy(void)
|
||||||
{
|
{
|
||||||
|
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
|
||||||
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
|
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
static inline void add_latent_entropy(void) { }
|
add_device_randomness(NULL, 0);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void get_random_bytes(void *buf, int len);
|
void get_random_bytes(void *buf, int len);
|
||||||
int __must_check get_random_bytes_arch(void *buf, int len);
|
int __must_check get_random_bytes_arch(void *buf, int len);
|
||||||
|
@@ -32,6 +32,7 @@
|
|||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
#include <linux/extable.h>
|
#include <linux/extable.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
@@ -1646,9 +1647,7 @@ out:
|
|||||||
* reuse preexisting logic from Spectre v1 mitigation that
|
* reuse preexisting logic from Spectre v1 mitigation that
|
||||||
* happens to produce the required code on x86 for v4 as well.
|
* happens to produce the required code on x86 for v4 as well.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86
|
|
||||||
barrier_nospec();
|
barrier_nospec();
|
||||||
#endif
|
|
||||||
CONT;
|
CONT;
|
||||||
#define LDST(SIZEOP, SIZE) \
|
#define LDST(SIZEOP, SIZE) \
|
||||||
STX_MEM_##SIZEOP: \
|
STX_MEM_##SIZEOP: \
|
||||||
|
@@ -3,6 +3,7 @@
|
|||||||
#include <linux/fault-inject-usercopy.h>
|
#include <linux/fault-inject-usercopy.h>
|
||||||
#include <linux/instrumented.h>
|
#include <linux/instrumented.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
/* out-of-line parts */
|
/* out-of-line parts */
|
||||||
|
|
||||||
@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
|
|||||||
unsigned long res = n;
|
unsigned long res = n;
|
||||||
might_fault();
|
might_fault();
|
||||||
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
|
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
|
||||||
|
/*
|
||||||
|
* Ensure that bad access_ok() speculation will not
|
||||||
|
* lead to nasty side effects *after* the copy is
|
||||||
|
* finished:
|
||||||
|
*/
|
||||||
|
barrier_nospec();
|
||||||
instrument_copy_from_user(to, from, n);
|
instrument_copy_from_user(to, from, n);
|
||||||
res = raw_copy_from_user(to, from, n);
|
res = raw_copy_from_user(to, from, n);
|
||||||
}
|
}
|
||||||
|
@@ -644,6 +644,26 @@ struct mesh_csa_settings {
|
|||||||
struct cfg80211_csa_settings settings;
|
struct cfg80211_csa_settings settings;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct mesh_table
|
||||||
|
*
|
||||||
|
* @known_gates: list of known mesh gates and their mpaths by the station. The
|
||||||
|
* gate's mpath may or may not be resolved and active.
|
||||||
|
* @gates_lock: protects updates to known_gates
|
||||||
|
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
||||||
|
* @walk_head: linked list containing all mesh_path objects
|
||||||
|
* @walk_lock: lock protecting walk_head
|
||||||
|
* @entries: number of entries in the table
|
||||||
|
*/
|
||||||
|
struct mesh_table {
|
||||||
|
struct hlist_head known_gates;
|
||||||
|
spinlock_t gates_lock;
|
||||||
|
struct rhashtable rhead;
|
||||||
|
struct hlist_head walk_head;
|
||||||
|
spinlock_t walk_lock;
|
||||||
|
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
||||||
|
};
|
||||||
|
|
||||||
struct ieee80211_if_mesh {
|
struct ieee80211_if_mesh {
|
||||||
struct timer_list housekeeping_timer;
|
struct timer_list housekeeping_timer;
|
||||||
struct timer_list mesh_path_timer;
|
struct timer_list mesh_path_timer;
|
||||||
@@ -718,8 +738,8 @@ struct ieee80211_if_mesh {
|
|||||||
/* offset from skb->data while building IE */
|
/* offset from skb->data while building IE */
|
||||||
int meshconf_offset;
|
int meshconf_offset;
|
||||||
|
|
||||||
struct mesh_table *mesh_paths;
|
struct mesh_table mesh_paths;
|
||||||
struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
|
struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
|
||||||
int mesh_paths_generation;
|
int mesh_paths_generation;
|
||||||
int mpp_paths_generation;
|
int mpp_paths_generation;
|
||||||
};
|
};
|
||||||
|
@@ -127,26 +127,6 @@ struct mesh_path {
|
|||||||
u32 path_change_count;
|
u32 path_change_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* struct mesh_table
|
|
||||||
*
|
|
||||||
* @known_gates: list of known mesh gates and their mpaths by the station. The
|
|
||||||
* gate's mpath may or may not be resolved and active.
|
|
||||||
* @gates_lock: protects updates to known_gates
|
|
||||||
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
|
||||||
* @walk_head: linked list containging all mesh_path objects
|
|
||||||
* @walk_lock: lock protecting walk_head
|
|
||||||
* @entries: number of entries in the table
|
|
||||||
*/
|
|
||||||
struct mesh_table {
|
|
||||||
struct hlist_head known_gates;
|
|
||||||
spinlock_t gates_lock;
|
|
||||||
struct rhashtable rhead;
|
|
||||||
struct hlist_head walk_head;
|
|
||||||
spinlock_t walk_lock;
|
|
||||||
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Recent multicast cache */
|
/* Recent multicast cache */
|
||||||
/* RMC_BUCKETS must be a power of 2, maximum 256 */
|
/* RMC_BUCKETS must be a power of 2, maximum 256 */
|
||||||
#define RMC_BUCKETS 256
|
#define RMC_BUCKETS 256
|
||||||
@@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
|
|||||||
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
|
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
|
||||||
void mesh_path_flush_pending(struct mesh_path *mpath);
|
void mesh_path_flush_pending(struct mesh_path *mpath);
|
||||||
void mesh_path_tx_pending(struct mesh_path *mpath);
|
void mesh_path_tx_pending(struct mesh_path *mpath);
|
||||||
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
|
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
|
||||||
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
|
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
|
||||||
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
|
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
|
||||||
void mesh_path_timer(struct timer_list *t);
|
void mesh_path_timer(struct timer_list *t);
|
||||||
|
@@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
|
|||||||
mesh_path_free_rcu(tbl, mpath);
|
mesh_path_free_rcu(tbl, mpath);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mesh_table *mesh_table_alloc(void)
|
static void mesh_table_init(struct mesh_table *tbl)
|
||||||
{
|
{
|
||||||
struct mesh_table *newtbl;
|
INIT_HLIST_HEAD(&tbl->known_gates);
|
||||||
|
INIT_HLIST_HEAD(&tbl->walk_head);
|
||||||
|
atomic_set(&tbl->entries, 0);
|
||||||
|
spin_lock_init(&tbl->gates_lock);
|
||||||
|
spin_lock_init(&tbl->walk_lock);
|
||||||
|
|
||||||
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
|
/* rhashtable_init() may fail only in case of wrong
|
||||||
if (!newtbl)
|
* mesh_rht_params
|
||||||
return NULL;
|
*/
|
||||||
|
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
|
||||||
INIT_HLIST_HEAD(&newtbl->known_gates);
|
|
||||||
INIT_HLIST_HEAD(&newtbl->walk_head);
|
|
||||||
atomic_set(&newtbl->entries, 0);
|
|
||||||
spin_lock_init(&newtbl->gates_lock);
|
|
||||||
spin_lock_init(&newtbl->walk_lock);
|
|
||||||
if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
|
|
||||||
kfree(newtbl);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return newtbl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mesh_table_free(struct mesh_table *tbl)
|
static void mesh_table_free(struct mesh_table *tbl)
|
||||||
{
|
{
|
||||||
rhashtable_free_and_destroy(&tbl->rhead,
|
rhashtable_free_and_destroy(&tbl->rhead,
|
||||||
mesh_path_rht_free, tbl);
|
mesh_path_rht_free, tbl);
|
||||||
kfree(tbl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
|
|||||||
struct mesh_path *
|
struct mesh_path *
|
||||||
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
||||||
{
|
{
|
||||||
return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
|
return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mesh_path *
|
struct mesh_path *
|
||||||
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
||||||
{
|
{
|
||||||
return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
|
return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mesh_path *
|
static struct mesh_path *
|
||||||
@@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
|||||||
struct mesh_path *
|
struct mesh_path *
|
||||||
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
||||||
{
|
{
|
||||||
return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
|
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
|||||||
struct mesh_path *
|
struct mesh_path *
|
||||||
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
||||||
{
|
{
|
||||||
return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
|
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
tbl = mpath->sdata->u.mesh.mesh_paths;
|
tbl = &mpath->sdata->u.mesh.mesh_paths;
|
||||||
|
|
||||||
spin_lock_bh(&mpath->state_lock);
|
spin_lock_bh(&mpath->state_lock);
|
||||||
if (mpath->is_gate) {
|
if (mpath->is_gate) {
|
||||||
@@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|||||||
if (!new_mpath)
|
if (!new_mpath)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
tbl = sdata->u.mesh.mesh_paths;
|
tbl = &sdata->u.mesh.mesh_paths;
|
||||||
spin_lock_bh(&tbl->walk_lock);
|
spin_lock_bh(&tbl->walk_lock);
|
||||||
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
|
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
|
||||||
&new_mpath->rhash,
|
&new_mpath->rhash,
|
||||||
@@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
||||||
tbl = sdata->u.mesh.mpp_paths;
|
tbl = &sdata->u.mesh.mpp_paths;
|
||||||
|
|
||||||
spin_lock_bh(&tbl->walk_lock);
|
spin_lock_bh(&tbl->walk_lock);
|
||||||
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
||||||
@@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
|||||||
void mesh_plink_broken(struct sta_info *sta)
|
void mesh_plink_broken(struct sta_info *sta)
|
||||||
{
|
{
|
||||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||||
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
|
||||||
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||||
struct mesh_path *mpath;
|
struct mesh_path *mpath;
|
||||||
|
|
||||||
@@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
|
|||||||
void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
||||||
{
|
{
|
||||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||||
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
|
||||||
struct mesh_path *mpath;
|
struct mesh_path *mpath;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
@@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
|||||||
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
||||||
const u8 *proxy)
|
const u8 *proxy)
|
||||||
{
|
{
|
||||||
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
|
struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
|
||||||
struct mesh_path *mpath;
|
struct mesh_path *mpath;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
|
|
||||||
@@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
|
|||||||
*/
|
*/
|
||||||
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
|
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
|
||||||
{
|
{
|
||||||
table_flush_by_iface(sdata->u.mesh.mesh_paths);
|
table_flush_by_iface(&sdata->u.mesh.mesh_paths);
|
||||||
table_flush_by_iface(sdata->u.mesh.mpp_paths);
|
table_flush_by_iface(&sdata->u.mesh.mpp_paths);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
|
|||||||
/* flush relevant mpp entries first */
|
/* flush relevant mpp entries first */
|
||||||
mpp_flush_by_proxy(sdata, addr);
|
mpp_flush_by_proxy(sdata, addr);
|
||||||
|
|
||||||
err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
|
err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
|
||||||
sdata->u.mesh.mesh_paths_generation++;
|
sdata->u.mesh.mesh_paths_generation++;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
|
|||||||
struct mesh_path *gate;
|
struct mesh_path *gate;
|
||||||
bool copy = false;
|
bool copy = false;
|
||||||
|
|
||||||
tbl = sdata->u.mesh.mesh_paths;
|
tbl = &sdata->u.mesh.mesh_paths;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
|
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
|
||||||
@@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
|
|||||||
mesh_path_tx_pending(mpath);
|
mesh_path_tx_pending(mpath);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
|
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
|
||||||
{
|
{
|
||||||
struct mesh_table *tbl_path, *tbl_mpp;
|
mesh_table_init(&sdata->u.mesh.mesh_paths);
|
||||||
int ret;
|
mesh_table_init(&sdata->u.mesh.mpp_paths);
|
||||||
|
|
||||||
tbl_path = mesh_table_alloc();
|
|
||||||
if (!tbl_path)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
tbl_mpp = mesh_table_alloc();
|
|
||||||
if (!tbl_mpp) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto free_path;
|
|
||||||
}
|
|
||||||
|
|
||||||
sdata->u.mesh.mesh_paths = tbl_path;
|
|
||||||
sdata->u.mesh.mpp_paths = tbl_mpp;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
free_path:
|
|
||||||
mesh_table_free(tbl_path);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
@@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
|||||||
|
|
||||||
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
||||||
{
|
{
|
||||||
mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
|
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
|
||||||
mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
|
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
|
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
|
||||||
{
|
{
|
||||||
mesh_table_free(sdata->u.mesh.mesh_paths);
|
mesh_table_free(&sdata->u.mesh.mesh_paths);
|
||||||
mesh_table_free(sdata->u.mesh.mpp_paths);
|
mesh_table_free(&sdata->u.mesh.mpp_paths);
|
||||||
}
|
}
|
||||||
|
@@ -1906,14 +1906,12 @@ start_error:
|
|||||||
|
|
||||||
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
|
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
|
||||||
{
|
{
|
||||||
struct taprio_sched *q = qdisc_priv(sch);
|
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
|
||||||
struct net_device *dev = qdisc_dev(sch);
|
|
||||||
unsigned int ntx = cl - 1;
|
|
||||||
|
|
||||||
if (ntx >= dev->num_tx_queues)
|
if (!dev_queue)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return q->qdiscs[ntx];
|
return dev_queue->qdisc_sleeping;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
|
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
|
||||||
|
Reference in New Issue
Block a user