Merge 5.10.118 into android12-5.10-lts

Changes in 5.10.118
	usb: gadget: fix race when gadget driver register via ioctl
	io_uring: always grab file table for deferred statx
	floppy: use a statically allocated error counter
	Revert "drm/i915/opregion: check port number bounds for SWSCI display power state"
	igc: Remove _I_PHY_ID checking
	igc: Remove phy->type checking
	igc: Update I226_K device ID
	rtc: fix use-after-free on device removal
	rtc: pcf2127: fix bug when reading alarm registers
	um: Cleanup syscall_handler_t definition/cast, fix warning
	Input: add bounds checking to input_set_capability()
	Input: stmfts - fix reference leak in stmfts_input_open
	nvme-pci: add quirks for Samsung X5 SSDs
	gfs2: Disable page faults during lockless buffered reads
	rtc: sun6i: Fix time overflow handling
	crypto: stm32 - fix reference leak in stm32_crc_remove
	crypto: x86/chacha20 - Avoid spurious jumps to other functions
	ALSA: hda/realtek: Enable headset mic on Lenovo P360
	s390/pci: improve zpci_dev reference counting
	vhost_vdpa: don't setup irq offloading when irq_num < 0
	tools/virtio: compile with -pthread
	nvme-multipath: fix hang when disk goes live over reconnect
	rtc: mc146818-lib: Fix the AltCentury for AMD platforms
	fs: fix an infinite loop in iomap_fiemap
	MIPS: lantiq: check the return value of kzalloc()
	drbd: remove usage of list iterator variable after loop
	platform/chrome: cros_ec_debugfs: detach log reader wq from devm
	ARM: 9191/1: arm/stacktrace, kasan: Silence KASAN warnings in unwind_frame()
	nilfs2: fix lockdep warnings in page operations for btree nodes
	nilfs2: fix lockdep warnings during disk space reclamation
	Revert "swiotlb: fix info leak with DMA_FROM_DEVICE"
	Reinstate some of "swiotlb: rework "fix info leak with DMA_FROM_DEVICE""
	ALSA: usb-audio: Restore Rane SL-1 quirk
	ALSA: wavefront: Proper check of get_user() error
	ALSA: hda/realtek: Add quirk for TongFang devices with pop noise
	perf: Fix sys_perf_event_open() race against self
	selinux: fix bad cleanup on error in hashtab_duplicate()
	Fix double fget() in vhost_net_set_backend()
	PCI/PM: Avoid putting Elo i2 PCIe Ports in D3cold
	KVM: x86/mmu: Update number of zapped pages even if page list is stable
	arm64: paravirt: Use RCU read locks to guard stolen_time
	arm64: mte: Ensure the cleared tags are visible before setting the PTE
	crypto: qcom-rng - fix infinite loop on requests not multiple of WORD_SZ
	libceph: fix potential use-after-free on linger ping and resends
	drm/dp/mst: fix a possible memory leak in fetch_monitor_name()
	dma-buf: fix use of DMA_BUF_SET_NAME_{A,B} in userspace
	ARM: dts: aspeed-g6: remove FWQSPID group in pinctrl dtsi
	pinctrl: pinctrl-aspeed-g6: remove FWQSPID group in pinctrl
	ARM: dts: aspeed-g6: fix SPI1/SPI2 quad pin group
	net: ipa: record proper RX transaction count
	net: macb: Increment rx bd head after allocating skb and buffer
	net: evaluate net.ipvX.conf.all.disable_policy and disable_xfrm
	xfrm: Add possibility to set the default to block if we have no policy
	net: xfrm: fix shift-out-of-bounce
	xfrm: make user policy API complete
	xfrm: notify default policy on update
	xfrm: fix dflt policy check when there is no policy configured
	xfrm: rework default policy structure
	xfrm: fix "disable_policy" flag use when arriving from different devices
	net/sched: act_pedit: sanitize shift argument before usage
	net: systemport: Fix an error handling path in bcm_sysport_probe()
	net: vmxnet3: fix possible use-after-free bugs in vmxnet3_rq_alloc_rx_buf()
	net: vmxnet3: fix possible NULL pointer dereference in vmxnet3_rq_cleanup()
	ice: fix possible under reporting of ethtool Tx and Rx statistics
	clk: at91: generated: consider range when calculating best rate
	net/qla3xxx: Fix a test in ql_reset_work()
	NFC: nci: fix sleep in atomic context bugs caused by nci_skb_alloc
	net/mlx5e: Properly block LRO when XDP is enabled
	net: af_key: add check for pfkey_broadcast in function pfkey_process
	ARM: 9196/1: spectre-bhb: enable for Cortex-A15
	ARM: 9197/1: spectre-bhb: fix loop8 sequence for Thumb2
	igb: skip phy status check where unavailable
	net: bridge: Clear offload_fwd_mark when passing frame up bridge interface.
	riscv: dts: sifive: fu540-c000: align dma node name with dtschema
	gpio: gpio-vf610: do not touch other bits when set the target bit
	gpio: mvebu/pwm: Refuse requests with inverted polarity
	perf bench numa: Address compiler error on s390
	scsi: qla2xxx: Fix missed DMA unmap for aborted commands
	mac80211: fix rx reordering with non explicit / psmp ack policy
	nl80211: validate S1G channel width
	selftests: add ping test with ping_group_range tuned
	nl80211: fix locking in nl80211_set_tx_bitrate_mask()
	ethernet: tulip: fix missing pci_disable_device() on error in tulip_init_one()
	net: stmmac: fix missing pci_disable_device() on error in stmmac_pci_probe()
	net: atlantic: fix "frag[0] not initialized"
	net: atlantic: reduce scope of is_rsc_complete
	net: atlantic: add check for MAX_SKB_FRAGS
	net: atlantic: verify hw_head_ lies within TX buffer ring
	arm64: Enable repeat tlbi workaround on KRYO4XX gold CPUs
	Input: ili210x - fix reset timing
	dt-bindings: pinctrl: aspeed-g6: remove FWQSPID group
	module: treat exit sections the same as init sections when !CONFIG_MODULE_UNLOAD
	i2c: mt7621: fix missing clk_disable_unprepare() on error in mtk_i2c_probe()
	afs: Fix afs_getattr() to refetch file status if callback break occurred
	include/uapi/linux/xfrm.h: Fix XFRM_MSG_MAPPING ABI breakage
	module: check for exit sections in layout_sections() instead of module_init_section()
	Linux 5.10.118

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7093fab7ea7d43c42962f2d7fe799df255049a17
This commit is contained in:
Greg Kroah-Hartman
2022-06-06 16:37:12 +02:00
112 changed files with 1001 additions and 498 deletions

View File

@@ -166,6 +166,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+

View File

@@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
DMA_ATTR_OVERWRITE
------------------
This is a hint to the DMA-mapping subsystem that the device is expected to
overwrite the entire mapped size, thus the caller does not require any of the
previous buffer contents to be preserved. This allows bounce-buffering
implementations to optimise DMA_FROM_DEVICE transfers.

View File

@@ -58,7 +58,7 @@ patternProperties:
$ref: "/schemas/types.yaml#/definitions/string"
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP,
GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 117
SUBLEVEL = 118
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -117,11 +117,6 @@
groups = "FWSPID";
};
pinctrl_fwqspid_default: fwqspid_default {
function = "FWSPID";
groups = "FWQSPID";
};
pinctrl_fwspiwp_default: fwspiwp_default {
function = "FWSPIWP";
groups = "FWSPIWP";
@@ -653,12 +648,12 @@
};
pinctrl_qspi1_default: qspi1_default {
function = "QSPI1";
function = "SPI1";
groups = "QSPI1";
};
pinctrl_qspi2_default: qspi2_default {
function = "QSPI2";
function = "SPI2";
groups = "QSPI2";
};

View File

@@ -1043,7 +1043,7 @@ vector_bhb_loop8_\name:
@ bhb workaround
mov r0, #8
3: b . + 4
3: W(b) . + 4
subs r0, r0, #1
bne 3b
dsb

View File

@@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame)
return -EINVAL;
frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 4);
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
#endif
return 0;

View File

@@ -288,6 +288,7 @@ void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_v2_init();
cpu_v7_spectre_bhb_init();
}
void cpu_v7_bugs_init(void)

View File

@@ -210,6 +210,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1286807
{
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
{},

View File

@@ -67,6 +67,9 @@ void mte_sync_tags(pte_t *ptep, pte_t pte)
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
mte_sync_page_tags(page, ptep, check_swap);
}
/* ensure the tags are visible before the PTE is set */
smp_wmb();
}
int memcmp_pages(struct page *page1, struct page *page2)

View File

@@ -30,7 +30,7 @@ struct paravirt_patch_template pv_ops;
EXPORT_SYMBOL_GPL(pv_ops);
struct pv_time_stolen_time_region {
struct pvclock_vcpu_stolen_time *kaddr;
struct pvclock_vcpu_stolen_time __rcu *kaddr;
};
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
@@ -47,7 +47,9 @@ early_param("no-steal-acc", parse_no_stealacc);
/* return stolen time in ns by asking the hypervisor */
static u64 pv_steal_clock(int cpu)
{
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
u64 ret = 0;
reg = per_cpu_ptr(&stolen_time_region, cpu);
@@ -56,28 +58,37 @@ static u64 pv_steal_clock(int cpu)
* online notification callback runs. Until the callback
* has run we just return zero.
*/
if (!reg->kaddr)
rcu_read_lock();
kaddr = rcu_dereference(reg->kaddr);
if (!kaddr) {
rcu_read_unlock();
return 0;
}
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
rcu_read_unlock();
return ret;
}
static int stolen_time_cpu_down_prepare(unsigned int cpu)
{
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
reg = this_cpu_ptr(&stolen_time_region);
if (!reg->kaddr)
return 0;
memunmap(reg->kaddr);
memset(reg, 0, sizeof(*reg));
kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
synchronize_rcu();
memunmap(kaddr);
return 0;
}
static int stolen_time_cpu_online(unsigned int cpu)
{
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res;
@@ -88,17 +99,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
return -EINVAL;
reg->kaddr = memremap(res.a0,
kaddr = memremap(res.a0,
sizeof(struct pvclock_vcpu_stolen_time),
MEMREMAP_WB);
rcu_assign_pointer(reg->kaddr, kaddr);
if (!reg->kaddr) {
pr_warn("Failed to map stolen time data structure\n");
return -ENOMEM;
}
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
le32_to_cpu(reg->kaddr->attributes) != 0) {
if (le32_to_cpu(kaddr->revision) != 0 ||
le32_to_cpu(kaddr->attributes) != 0) {
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
return -ENXIO;
}

View File

@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = NULL;
clk->cl.clk = clk;

View File

@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev_name(dev);
clk->cl.con_id = con;
clk->cl.clk = clk;

View File

@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
{
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev;
clk->cl.con_id = con;
clk->cl.clk = clk;
@@ -356,6 +360,7 @@ static void clkdev_add_pci(void)
struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
/* main pci clock */
if (clk) {
clk->cl.dev_id = "17000000.pci";
clk->cl.con_id = NULL;
clk->cl.clk = clk;
@@ -366,8 +371,10 @@ static void clkdev_add_pci(void)
clk->module = 0;
clk->bits = PMU_PCI;
clkdev_add(&clk->cl);
}
/* use internal/external bus clock */
if (clk_ext) {
clk_ext->cl.dev_id = "17000000.pci";
clk_ext->cl.con_id = "external";
clk_ext->cl.clk = clk_ext;
@@ -375,6 +382,7 @@ static void clkdev_add_pci(void)
clk_ext->disable = pci_ext_disable;
clkdev_add(&clk_ext->cl);
}
}
/* xway socs can generate clocks on gpio pins */
static unsigned long valid_clkout_rates[4][5] = {
@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
char *name;
name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
if (!name)
continue;
sprintf(name, "clkout%d", i);
clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk) {
kfree(name);
continue;
}
clk->cl.dev_id = "1f103000.cgu";
clk->cl.con_id = name;
clk->cl.clk = clk;

View File

@@ -166,7 +166,7 @@
clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled";
};
dma: dma@3000000 {
dma: dma-controller@3000000 {
compatible = "sifive,fu540-c000-pdma";
reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic0>;

View File

@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
list_for_each_entry(tmp, &zpci_list, entry) {
if (tmp->fid == fid) {
zdev = tmp;
zpci_zdev_get(zdev);
break;
}
}

View File

@@ -13,6 +13,7 @@ void zpci_bus_device_unregister(struct zpci_dev *zdev);
void zpci_release_device(struct kref *kref);
static inline void zpci_zdev_put(struct zpci_dev *zdev)
{
if (zdev)
kref_put(&zdev->kref, zpci_release_device);
}

View File

@@ -22,6 +22,8 @@
#include <asm/clp.h>
#include <uapi/asm/clp.h>
#include "pci_bus.h"
bool zpci_unique_uid;
void update_uid_checking(bool new)
@@ -372,7 +374,10 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
return;
zdev = get_zdev_by_fid(entry->fid);
if (!zdev)
if (zdev) {
zpci_zdev_put(zdev);
return;
}
zpci_create_device(entry->fid, entry->fh, entry->config_state);
}

View File

@@ -61,10 +61,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
if (!pdev)
return;
goto no_pdev;
pdev->error_state = pci_channel_io_perm_failure;
pci_dev_put(pdev);
no_pdev:
zpci_zdev_put(zdev);
}
void zpci_event_error(void *data)
@@ -76,6 +78,7 @@ void zpci_event_error(void *data)
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
bool existing_zdev = !!zdev;
enum zpci_state state;
struct pci_dev *pdev;
int ret;
@@ -161,6 +164,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
default:
break;
}
if (existing_zdev)
zpci_zdev_put(zdev);
}
void zpci_event_availability(void *data)

View File

@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
# xor remaining bytes from partial register into output
mov %rcx,%rax
and $0xf,%rcx
jz .Ldone8
jz .Ldone2
mov %rax,%r9
and $~0xf,%r9
@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
# xor remaining bytes from partial register into output
mov %rcx,%rax
and $0xf,%rcx
jz .Ldone8
jz .Ldone4
mov %rax,%r9
and $~0xf,%r9

View File

@@ -5375,6 +5375,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
int nr_zapped, batch = 0;
bool unstable;
restart:
list_for_each_entry_safe_reverse(sp, node,
@@ -5406,12 +5407,13 @@ restart:
goto restart;
}
if (__kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
batch += nr_zapped;
if (unstable)
goto restart;
}
}
/*
* Trigger a remote TLB flush before freeing the page tables to ensure

View File

@@ -10,13 +10,12 @@
#include <linux/msg.h>
#include <linux/shm.h>
typedef long syscall_handler_t(void);
typedef long syscall_handler_t(long, long, long, long, long, long);
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
(((long (*)(long, long, long, long, long, long)) \
(*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
(((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
UPT_SYSCALL_ARG2(&regs->regs), \
UPT_SYSCALL_ARG3(&regs->regs), \
UPT_SYSCALL_ARG4(&regs->regs), \

View File

@@ -184,7 +184,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
struct drbd_request *req = NULL;
struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
@@ -238,8 +238,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch)
if (req->epoch == expect_epoch) {
tmp = req;
break;
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;

View File

@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* Errors during formatting are counted here. */
static int format_errors;
/* errors encountered on the current (or last) request */
static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@@ -530,7 +530,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
} else if (*errors >= drive_params[current_drive].max_errors.reporting) {
} else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
if (!next_valid_format(current_drive))
return;
}
err_count = ++(*errors);
err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
@@ -2240,9 +2239,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL;
}
format_req = *tmp_format_req;
format_errors = 0;
cont = &format_cont;
errors = &format_errors;
floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@@ -2721,7 +2719,7 @@ static int make_raw_rw_request(void)
*/
if (!direct ||
(indirect * 2 > direct * 3 &&
*errors < drive_params[current_drive].max_errors.read_track &&
floppy_errors < drive_params[current_drive].max_errors.read_track &&
((!probing ||
(drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) {
max_size = blk_rq_sectors(current_req);
@@ -2846,10 +2844,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
current_req->error_count = 0;
floppy_errors = 0;
list_del_init(&current_req->queuelist);
return 1;
}
return current_req != NULL;
return 0;
}
/* Starts or continues processing request. Will automatically unlock the
@@ -2908,7 +2907,6 @@ do_request:
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);

View File

@@ -106,6 +106,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
tmp_rate = parent_rate;
else
tmp_rate = parent_rate / div;
if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
return;
tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) {

View File

@@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
break;
}
} while (currsize < max);

View File

@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_noidle(crc->dev);
return ret;
}
spin_lock(&crc_list.lock);
list_del(&crc->list);

View File

@@ -700,6 +700,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long flags;
unsigned int on, off;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
if (val > UINT_MAX)

View File

@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
{
struct vf610_gpio_port *port = gpiochip_get_data(chip);
unsigned long mask = BIT(gpio);
u32 val;
if (port->sdata && port->sdata->have_paddr)
vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
if (port->sdata && port->sdata->have_paddr) {
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
vf610_gpio_set(chip, gpio, value);

View File

@@ -4792,6 +4792,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
kfree(mst_edid);
}
/**

View File

@@ -376,21 +376,6 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
return -EINVAL;
}
/*
* The port numbering and mapping here is bizarre. The now-obsolete
* swsci spec supports ports numbered [0..4]. Port E is handled as a
* special case, but port F and beyond are not. The functionality is
* supposed to be obsolete for new platforms. Just bail out if the port
* number is out of bounds after mapping.
*/
if (port > 4) {
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
intel_encoder->base.base.id, intel_encoder->base.name,
port_name(intel_encoder->port), port);
return -EINVAL;
}
if (!enable)
parm |= 4 << 8;

View File

@@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
return -EINVAL;
ret = -EINVAL;
goto err_disable_clk;
}
adap = &i2c->adap;
@@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
if (ret < 0)
return ret;
goto err_disable_clk;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
return 0;
err_disable_clk:
clk_disable_unprepare(i2c->clk);
return ret;
}

View File

@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
static const unsigned int input_max_code[EV_CNT] = {
[EV_KEY] = KEY_MAX,
[EV_REL] = REL_MAX,
[EV_ABS] = ABS_MAX,
[EV_MSC] = MSC_MAX,
[EV_SW] = SW_MAX,
[EV_LED] = LED_MAX,
[EV_SND] = SND_MAX,
[EV_FF] = FF_MAX,
};
static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max)
{
@@ -1976,6 +1987,14 @@ EXPORT_SYMBOL(input_get_timestamp);
*/
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{
if (type < EV_CNT && input_max_code[type] &&
code > input_max_code[type]) {
pr_err("%s: invalid code %u for type %u\n", __func__, code,
type);
dump_stack();
return;
}
switch (type) {
case EV_KEY:
__set_bit(code, dev->keybit);

View File

@@ -420,9 +420,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
if (error)
return error;
usleep_range(50, 100);
usleep_range(12000, 15000);
gpiod_set_value_cansleep(reset_gpio, 0);
msleep(100);
msleep(160);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);

View File

@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
err = pm_runtime_get_sync(&sdata->client->dev);
if (err < 0)
return err;
goto out;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
if (err)
return err;
goto out;
mutex_lock(&sdata->mutex);
sdata->running = true;
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
"failed to enable touchkey\n");
}
return 0;
out:
pm_runtime_put_noidle(&sdata->client->dev);
return err;
}
static void stmfts_input_close(struct input_dev *dev)

View File

@@ -345,7 +345,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -363,12 +362,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue;
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
buff_ = buff;
do {
bool is_rsc_completed = true;
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -376,18 +380,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
next_,
self->hw_head);
if (unlikely(!is_rsc_completed))
break;
if (unlikely(!is_rsc_completed) ||
frag_cnt > MAX_SKB_FRAGS) {
err = 0;
goto err_exit;
}
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
if (!is_rsc_completed) {
err = 0;
goto err_exit;
}
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@ -445,7 +448,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
skb_add_rx_frag(skb, 0, buff->rxdata.page,
skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
AQ_CFG_RX_FRAME_MAX);
@@ -454,7 +457,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) {
buff_ = buff;
i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];

View File

@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
/* Validate that the new hw_head_ is reasonable. */
if (hw_head_ >= ring->size) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);

View File

@@ -2592,8 +2592,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
if (IS_ERR(priv->wol_clk))
return PTR_ERR(priv->wol_clk);
if (IS_ERR(priv->wol_clk)) {
ret = PTR_ERR(priv->wol_clk);
goto err_deregister_fixed_link;
}
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);

View File

@@ -1092,7 +1092,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1131,6 +1130,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */

View File

@@ -1396,8 +1396,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev)
if (!dev) {
pci_disable_device(pdev);
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1774,6 +1776,7 @@ err_out_free_res:
err_out_free_netdev:
free_netdev (dev);
pci_disable_device(pdev);
return -ENODEV;
}

View File

@@ -5271,9 +5271,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
/* clear this now, and the first stats read will be used as baseline */
vsi->stat_offsets_loaded = false;
/* Perform an initial read of the statistics registers now to
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;

View File

@@ -5499,7 +5499,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
if (adapter->link_speed != SPEED_1000)
if (adapter->link_speed != SPEED_1000 ||
!hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */

View File

@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case I225_I_PHY_ID:
phy->type = igc_phy_i225;
break;
default:
ret_val = -IGC_ERR_PHY;
goto out;
}
out:
return ret_val;

View File

@@ -22,6 +22,7 @@
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
#define IGC_DEV_ID_I226_K 0x3102
#define IGC_DEV_ID_I225_LMVP 0x5502
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B

View File

@@ -4177,20 +4177,12 @@ bool igc_has_link(struct igc_adapter *adapter)
* false until the igc_check_for_link establishes link
* for copper adapters ONLY
*/
switch (hw->phy.media_type) {
case igc_media_type_copper:
if (!hw->mac.get_link_status)
return true;
hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
break;
default:
case igc_media_type_unknown:
break;
}
if (hw->mac.type == igc_i225 &&
hw->phy.id == I225_I_PHY_ID) {
if (hw->mac.type == igc_i225) {
if (!netif_carrier_ok(adapter->netdev)) {
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {

View File

@@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
return ret_val;
}
if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
hw->phy.id == I225_I_PHY_ID) {
if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
hw->phy.id == I225_I_PHY_ID)
if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |

View File

@@ -4009,6 +4009,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
if (params->xdp_prog) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with XDP\n");
features &= ~NETIF_F_LRO;
}
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)

View File

@@ -3628,7 +3628,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers;
unsigned long hw_flags;
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*

View File

@@ -175,7 +175,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
ret = pci_enable_device(pdev);
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -227,8 +227,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i));
break;
}
pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)

View File

@@ -1209,9 +1209,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
struct gsi_event *event_done;
struct gsi_event *event;
struct gsi_trans *trans;
u32 trans_count = 0;
u32 byte_count = 0;
u32 old_index;
u32 event_avail;
u32 old_index;
trans_info = &channel->trans_info;
@@ -1232,6 +1233,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
do {
trans->len = __le16_to_cpu(event->len);
byte_count += trans->len;
trans_count++;
/* Move on to the next event and transaction */
if (--event_avail)
@@ -1243,7 +1245,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* We record RX bytes when they are received */
channel->byte_count += byte_count;
channel->trans_count++;
channel->trans_count += trans_count;
}
/* Initialize a ring, including allocating DMA memory for its entries */

View File

@@ -595,6 +595,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -619,6 +620,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -1654,6 +1656,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
/* ring has already been cleaned up */
if (!rq->rx_ring[0].base)
return;
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD

View File

@@ -4420,6 +4420,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
nvme_mpath_update(ctrl);
}
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);

View File

@@ -484,8 +484,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
if (nvme_state_is_live(ns->ana_state))
/*
* nvme_mpath_set_live() will trigger I/O to the multipath path device
* and in turn to this path device. However we cannot accept this I/O
* if the controller is not live. This may deadlock if called from
* nvme_mpath_init_identify() and the ctrl will never complete
* initialization, preventing I/O from completing. For this case we
* will reprocess the ANA log page in nvme_mpath_update() once the
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -572,6 +581,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl);
}
void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
if (!ctrl->ana_log_buf)
return;
mutex_lock(&ctrl->ana_lock);
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
mutex_unlock(&ctrl->ana_lock);
}
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);

View File

@@ -712,6 +712,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -798,6 +799,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}

View File

@@ -3265,7 +3265,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};

View File

@@ -2836,6 +2836,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
/*
* Downstream device is not accessible after putting a root port
* into D3cold and back into D0 on Elo i2.
*/
.ident = "Elo i2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
},
},
#endif
{ }

View File

@@ -1224,18 +1224,12 @@ FUNC_GROUP_DECL(SALT8, AA12);
FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196
SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
SIG_EXPR_LIST_PTR(AE12, GPIOY4));
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197
SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
SIG_EXPR_LIST_PTR(AF12, GPIOY5));
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
#define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@@ -1508,9 +1502,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
@@ -1906,7 +1899,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID),
ASPEED_PINCTRL_GROUP(FWQSPID),
ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1),

View File

@@ -25,6 +25,9 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
/* waitqueue for log readers */
static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
/**
* struct cros_ec_debugfs - EC debugging information.
*
@@ -33,7 +36,6 @@
* @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer
* @log_wq: waitqueue for log readers
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
*/
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
struct circ_buf log_buffer;
struct cros_ec_command *read_msg;
struct mutex log_mutex;
wait_queue_head_t log_wq;
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
buf_space--;
}
wake_up(&debug_info->log_wq);
wake_up(&cros_ec_debugfs_log_wq);
}
mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
mutex_unlock(&debug_info->log_mutex);
ret = wait_event_interruptible(debug_info->log_wq,
ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0)
return ret;
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait);
poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex);
init_waitqueue_head(&debug_info->log_wq);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops);

View File

@@ -26,6 +26,15 @@ struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct timerqueue_head *head = &rtc->timerqueue;
struct timerqueue_node *node;
mutex_lock(&rtc->ops_lock);
while ((node = timerqueue_getnext(head)))
timerqueue_del(head, node);
mutex_unlock(&rtc->ops_lock);
cancel_work_sync(&rtc->irqwork);
ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);

View File

@@ -99,6 +99,17 @@ unsigned int mc146818_get_time(struct rtc_time *time)
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
/* AMD systems don't allow access to AltCentury with DV1 */
static bool apply_amd_register_a_behavior(void)
{
#ifdef CONFIG_X86
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return true;
#endif
return false;
}
/* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time)
{
@@ -172,6 +183,9 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
if (apply_amd_register_a_behavior())
CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
else
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION

View File

@@ -366,7 +366,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
unsigned int buf[5], ctrl2;
u8 buf[5];
unsigned int ctrl2;
int ret;
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);

View File

@@ -138,7 +138,7 @@ struct sun6i_rtc_dev {
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
unsigned long alarm;
time64_t alarm;
struct clk_hw hw;
struct clk_hw *int_osc;
@@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now;
unsigned long time_now = 0;
unsigned long time_set = 0;
unsigned long time_gap = 0;
int ret = 0;
time64_t time_now, time_set;
int ret;
ret = sun6i_rtc_gettime(dev, &tm_now);
if (ret < 0) {
@@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
return -EINVAL;
}
time_gap = time_set - time_now;
if (time_gap > U32_MAX) {
if ((time_set - time_now) > U32_MAX) {
dev_err(dev, "Date too far in the future\n");
return -EINVAL;
}
@@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
writel(0, chip->base + SUN6I_ALRM_COUNTER);
usleep_range(100, 300);
writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER);
chip->alarm = time_set;
sun6i_rtc_setaie(wkalrm->enabled, chip);

View File

@@ -3773,6 +3773,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:

View File

@@ -144,6 +144,7 @@ enum dev_state {
STATE_DEV_INVALID = 0,
STATE_DEV_OPENED,
STATE_DEV_INITIALIZED,
STATE_DEV_REGISTERING,
STATE_DEV_RUNNING,
STATE_DEV_CLOSED,
STATE_DEV_FAILED
@@ -507,6 +508,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
ret = -EINVAL;
goto out_unlock;
}
dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver);

View File

@@ -1450,13 +1450,9 @@ err:
return ERR_PTR(r);
}
static struct ptr_ring *get_tap_ptr_ring(int fd)
static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
struct file *file = fget(fd);
if (!file)
return NULL;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
@@ -1465,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
goto out;
ring = NULL;
out:
fput(file);
return ring;
}
@@ -1552,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
if (index == VHOST_NET_VQ_RX)
nvq->rx_ring = get_tap_ptr_ring(fd);
if (index == VHOST_NET_VQ_RX) {
if (sock)
nvq->rx_ring = get_tap_ptr_ring(sock->file);
else
nvq->rx_ring = NULL;
}
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;

View File

@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
return;
irq = ops->get_vq_irq(vdpa, qid);
if (irq < 0)
return;
irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx || irq < 0)
if (!vq->call_ctx.ctx)
return;
vq->call_ctx.producer.token = vq->call_ctx.ctx;

View File

@@ -729,10 +729,22 @@ int afs_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = d_inode(path->dentry);
struct afs_vnode *vnode = AFS_FS_I(inode);
int seq = 0;
struct key *key;
int ret, seq = 0;
_enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
if (!(query_flags & AT_STATX_DONT_SYNC) &&
!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key))
return PTR_ERR(key);
ret = afs_validate(vnode, key);
key_put(key);
if (ret < 0)
return ret;
}
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
generic_fillattr(inode, stat);

View File

@@ -858,14 +858,16 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
iocb->ki_flags &= ~IOCB_DIRECT;
}
pagefault_disable();
iocb->ki_flags |= IOCB_NOIO;
ret = generic_file_read_iter(iocb, to);
iocb->ki_flags &= ~IOCB_NOIO;
pagefault_enable();
if (ret >= 0) {
if (!iov_iter_count(to))
return ret;
written = ret;
} else {
} else if (ret != -EFAULT) {
if (ret != -EAGAIN)
return ret;
if (iocb->ki_flags & IOCB_NOWAIT)

View File

@@ -4252,12 +4252,8 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
struct io_statx *ctx = &req->statx;
int ret;
if (force_nonblock) {
/* only need file table for an actual valid fd */
if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
req->flags |= REQ_F_NO_FILE_TABLE;
if (force_nonblock)
return -EAGAIN;
}
ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
ctx->buffer);

View File

@@ -170,7 +170,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (*len == 0)
return -EINVAL;
if (start > maxbytes)
if (start >= maxbytes)
return -EFBIG;
/*

View File

@@ -20,6 +20,23 @@
#include "page.h"
#include "btnode.h"
/**
* nilfs_init_btnc_inode - initialize B-tree node cache inode
* @btnc_inode: inode to be initialized
*
* nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
*/
void nilfs_init_btnc_inode(struct inode *btnc_inode)
{
struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
btnc_inode->i_mode = S_IFREG;
ii->i_flags = 0;
memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
{
invalidate_mapping_pages(btnc, 0, -1);
@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
struct inode *inode = NILFS_BTNC_I(btnc);
struct inode *inode = btnc->host;
struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc);
struct inode *inode = btnc->host;
struct page *page;
int err;
@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
struct inode *inode = btnc->host;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;

View File

@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh;
};
void nilfs_init_btnc_inode(struct inode *btnc_inode);
void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);

View File

@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
__u64 ptr, struct buffer_head **bhp)
{
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
struct buffer_head **bhp,
const struct nilfs_btree_readahead_info *ra)
{
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh, *ra_bh;
sector_t submit_ptr = 0;
int ret;
@@ -1742,6 +1744,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
dat = nilfs_bmap_get_dat(btree);
}
ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
if (ret < 0)
return ret;
ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
if (ret < 0)
return ret;
@@ -1914,7 +1920,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
path[level].bp_ctxt.bh = path[level].bp_bh;
ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
if (ret < 0) {
nilfs_dat_abort_update(dat,
@@ -1940,7 +1946,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
if (buffer_nilfs_node(path[level].bp_bh)) {
nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
path[level].bp_bh = path[level].bp_ctxt.bh;
}
@@ -1959,7 +1965,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
&path[level].bp_newreq.bpr_req);
if (buffer_nilfs_node(path[level].bp_bh))
nilfs_btnode_abort_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
}
@@ -2135,7 +2141,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
struct list_head *listp)
{
struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btcache = btnc_inode->i_mapping;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
struct pagevec pvec;
struct buffer_head *bh, *head;
@@ -2189,12 +2196,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = blocknr;
path[level].bp_ctxt.bh = *bh;
ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
if (ret < 0)
return ret;
nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache,
NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt);
*bh = path[level].bp_ctxt.bh;
}
@@ -2399,6 +2406,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
ret = -EIO;
else
ret = nilfs_attach_btree_node_cache(
&NILFS_BMAP_I(bmap)->vfs_inode);
return ret;
}

View File

@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache);
nilfs_mdt_setup_shadow_map(dat, &di->shadow);
err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
if (err)
goto failed;
err = nilfs_read_inode_common(dat, raw_inode);
if (err)

View File

@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
{
struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
int ret;
ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
vbn ? : pbn, pbn, REQ_OP_READ, 0,
out_bh, &pbn);
if (ret == -EEXIST) /* internal code (cache hit) */
@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
return 0;
return nilfs_attach_btree_node_cache(inode);
}
/**
@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode);
}
}

View File

@@ -29,12 +29,16 @@
* @cno: checkpoint number
* @root: pointer on NILFS root object (mounted checkpoint)
* @for_gc: inode for GC flag
* @for_btnc: inode for B-tree node cache flag
* @for_shadow: inode for shadowed page cache flag
*/
struct nilfs_iget_args {
u64 ino;
__u64 cno;
struct nilfs_root *root;
int for_gc;
bool for_gc;
bool for_btnc;
bool for_shadow;
};
static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -314,7 +318,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
unsigned long ino)
{
struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0
.ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
};
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -527,6 +532,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
return 0;
ii = NILFS_I(inode);
if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
if (!args->for_btnc)
return 0;
} else if (args->for_btnc) {
return 0;
}
if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
if (!args->for_shadow)
return 0;
} else if (args->for_shadow) {
return 0;
}
if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
return !args->for_gc;
@@ -538,15 +556,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
struct nilfs_iget_args *args = opaque;
inode->i_ino = args->ino;
if (args->for_gc) {
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = NULL;
} else {
NILFS_I(inode)->i_root = args->root;
if (args->root && args->ino == NILFS_ROOT_INO)
nilfs_get_root(args->root);
NILFS_I(inode)->i_root = args->root;
}
if (args->for_gc)
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
if (args->for_btnc)
NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
if (args->for_shadow)
NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0;
}
@@ -554,7 +574,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0
.ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
};
return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -564,7 +585,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0
.ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
};
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -595,7 +617,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
__u64 cno)
{
struct nilfs_iget_args args = {
.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
.ino = ino, .root = NULL, .cno = cno, .for_gc = true,
.for_btnc = false, .for_shadow = false
};
struct inode *inode;
int err;
@@ -615,6 +638,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
return inode;
}
/**
* nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
* @inode: inode object
*
* nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
* or does nothing if the inode already has it. This function allocates
* an additional inode to maintain page cache of B-tree nodes one-on-one.
*
* Return Value: On success, 0 is returned. On errors, one of the following
* negative error code is returned.
*
* %-ENOMEM - Insufficient memory available.
*/
int nilfs_attach_btree_node_cache(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode;
struct nilfs_iget_args args;
if (ii->i_assoc_inode)
return 0;
args.ino = inode->i_ino;
args.root = ii->i_root;
args.cno = ii->i_cno;
args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
args.for_btnc = true;
args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!btnc_inode))
return -ENOMEM;
if (btnc_inode->i_state & I_NEW) {
nilfs_init_btnc_inode(btnc_inode);
unlock_new_inode(btnc_inode);
}
NILFS_I(btnc_inode)->i_assoc_inode = inode;
NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
ii->i_assoc_inode = btnc_inode;
return 0;
}
/**
* nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
* @inode: inode object
*
* nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
* holder inode bound to @inode, or does nothing if @inode doesn't have it.
*/
void nilfs_detach_btree_node_cache(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode = ii->i_assoc_inode;
if (btnc_inode) {
NILFS_I(btnc_inode)->i_assoc_inode = NULL;
ii->i_assoc_inode = NULL;
iput(btnc_inode);
}
}
/**
* nilfs_iget_for_shadow - obtain inode for shadow mapping
* @inode: inode object that uses shadow mapping
*
* nilfs_iget_for_shadow() allocates a pair of inodes that holds page
* caches for shadow mapping. The page cache for data pages is set up
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
* Return Value: On success, a pointer to the inode for data pages is
* returned. On errors, one of the following negative error code is returned
* in a pointer type.
*
* %-ENOMEM - Insufficient memory available.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
struct nilfs_iget_args args = {
.ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = true
};
struct inode *s_inode;
int err;
s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!s_inode))
return ERR_PTR(-ENOMEM);
if (!(s_inode->i_state & I_NEW))
return inode;
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
iget_failed(s_inode);
return ERR_PTR(err);
}
unlock_new_inode(s_inode);
return s_inode;
}
void nilfs_write_inode_common(struct inode *inode,
struct nilfs_inode *raw_inode, int has_bmap)
{
@@ -762,7 +892,8 @@ static void nilfs_clear_inode(struct inode *inode)
if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
if (!test_bit(NILFS_I_BTNC, &ii->i_state))
nilfs_detach_btree_node_cache(inode);
if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
nilfs_put_root(ii->i_root);

View File

@@ -469,9 +469,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
void nilfs_mdt_clear(struct inode *inode)
{
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mdi->mi_shadow;
if (mdi->mi_palloc_cache)
nilfs_palloc_destroy_cache(inode);
if (shadow) {
struct inode *s_inode = shadow->inode;
shadow->inode = NULL;
iput(s_inode);
mdi->mi_shadow = NULL;
}
}
/**
@@ -505,12 +514,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct inode *s_inode;
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, inode);
address_space_init_once(&shadow->frozen_btnodes);
nilfs_mapping_init(&shadow->frozen_btnodes, inode);
s_inode = nilfs_iget_for_shadow(inode);
if (IS_ERR(s_inode))
return PTR_ERR(s_inode);
shadow->inode = s_inode;
mi->mi_shadow = shadow;
return 0;
}
@@ -524,14 +536,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_inode_info *ii = NILFS_I(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *s_inode = shadow->inode;
int ret;
ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
if (ret)
goto out;
ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
&ii->i_btnode_cache);
ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
ii->i_assoc_inode->i_mapping);
if (ret)
goto out;
@@ -547,7 +560,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page;
int blkbits = inode->i_blkbits;
page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
if (!page)
return -ENOMEM;
@@ -579,7 +592,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page;
int n;
page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
if (page) {
if (page_has_buffers(page)) {
n = bh_offset(bh) >> inode->i_blkbits;
@@ -620,10 +633,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
nilfs_palloc_clear_cache(inode);
nilfs_clear_dirty_pages(inode->i_mapping, true);
nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
@@ -638,10 +652,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
down_write(&mi->mi_sem);
nilfs_release_frozen_buffers(shadow);
truncate_inode_pages(&shadow->frozen_data, 0);
truncate_inode_pages(&shadow->frozen_btnodes, 0);
truncate_inode_pages(shadow->inode->i_mapping, 0);
truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
up_write(&mi->mi_sem);
}

View File

@@ -18,14 +18,12 @@
/**
* struct nilfs_shadow_map - shadow mapping of meta data file
* @bmap_store: shadow copy of bmap state
* @frozen_data: shadowed dirty data pages
* @frozen_btnodes: shadowed dirty b-tree nodes' pages
* @inode: holder of page caches used in shadow mapping
* @frozen_buffers: list of frozen buffers
*/
struct nilfs_shadow_map {
struct nilfs_bmap_store bmap_store;
struct address_space frozen_data;
struct address_space frozen_btnodes;
struct inode *inode;
struct list_head frozen_buffers;
};

View File

@@ -28,7 +28,7 @@
* @i_xattr: <TODO>
* @i_dir_start_lookup: page index of last successful search
* @i_cno: checkpoint number for GC inode
* @i_btnode_cache: cached pages of b-tree nodes
* @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
* @i_dirty: list for connecting dirty files
* @xattr_sem: semaphore for extended attributes processing
* @i_bh: buffer contains disk inode
@@ -43,7 +43,7 @@ struct nilfs_inode_info {
__u64 i_xattr; /* sector_t ??? */
__u32 i_dir_start_lookup;
__u64 i_cno; /* check point number for GC inode */
struct address_space i_btnode_cache;
struct inode *i_assoc_inode;
struct list_head i_dirty; /* List for connecting dirty files */
#ifdef CONFIG_NILFS_XATTR
@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
}
static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
{
struct nilfs_inode_info *ii =
container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
return &ii->vfs_inode;
}
/*
* Dynamic state flags of NILFS on-memory inode (i_state)
*/
@@ -98,6 +91,8 @@ enum {
NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
NILFS_I_BMAP, /* has bmap and btnode_cache */
NILFS_I_GCINODE, /* inode for GC, on memory only */
NILFS_I_BTNC, /* inode for btree node cache */
NILFS_I_SHADOW, /* inode for shadowed page cache */
};
/*
@@ -264,6 +259,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
unsigned long ino);
extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
unsigned long ino, __u64 cno);
int nilfs_attach_btree_node_cache(struct inode *inode);
void nilfs_detach_btree_node_cache(struct inode *inode);
struct inode *nilfs_iget_for_shadow(struct inode *inode);
extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *);

View File

@@ -448,10 +448,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
/*
* NILFS2 needs clear_page_dirty() in the following two cases:
*
* 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
* page dirty flags when it copies back pages from the shadow cache
* (gcdat->{i_mapping,i_btnode_cache}) to its original cache
* (dat->{i_mapping,i_btnode_cache}).
* 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
* flag of pages when it copies back pages from shadow cache to the
* original cache.
*
* 2) Some B-tree operations like insertion or deletion may dispose buffers
* in dirty state, and this needs to cancel the dirty state of their pages.

View File

@@ -738,15 +738,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
struct list_head *listp)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct address_space *mapping = &ii->i_btnode_cache;
struct inode *btnc_inode = ii->i_assoc_inode;
struct pagevec pvec;
struct buffer_head *bh, *head;
unsigned int i;
pgoff_t index = 0;
if (!btnc_inode)
return;
pagevec_init(&pvec);
while (pagevec_lookup_tag(&pvec, mapping, &index,
while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]);
@@ -2415,7 +2418,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
continue;
list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode);
}
}

View File

@@ -158,7 +158,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_bh = NULL;
ii->i_state = 0;
ii->i_cno = 0;
nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
ii->i_assoc_inode = NULL;
ii->i_bmap = &ii->i_bmap_data;
return &ii->vfs_inode;
}
@@ -1378,8 +1379,6 @@ static void nilfs_inode_init_once(void *obj)
#ifdef CONFIG_NILFS_XATTR
init_rwsem(&ii->xattr_sem);
#endif
address_space_init_once(&ii->i_btnode_cache);
ii->i_bmap = &ii->i_bmap_data;
inode_init_once(&ii->vfs_inode);
}

View File

@@ -287,6 +287,9 @@ struct ceph_osd_linger_request {
rados_watcherrcb_t errcb;
void *data;
struct ceph_pagelist *request_pl;
struct page **notify_id_pages;
struct page ***preply_pages;
size_t *preply_len;
};

View File

@@ -61,14 +61,6 @@
*/
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
* This is a hint to the DMA-mapping subsystem that the device is expected
* to overwrite the entire mapped size, thus the caller does not require any
* of the previous buffer contents to be preserved. This allows
* bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
*/
#define DMA_ATTR_OVERWRITE (1UL << 10)
/*
* DMA_ATTR_SYS_CACHE_ONLY: used to indicate that the buffer should be mapped
* with the correct memory attributes so that it can be cached in the system

View File

@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
/* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F

View File

@@ -57,6 +57,7 @@ struct inet_skb_parm {
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
u16 frag_max_size;
};

View File

@@ -65,6 +65,9 @@ struct netns_xfrm {
u32 sysctl_aevent_rseqth;
int sysctl_larval_drop;
u32 sysctl_acq_expires;
u8 policy_default[XFRM_POLICY_MAX];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_hdr;
#endif

View File

@@ -1086,6 +1086,27 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
unsigned short family);
static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
int dir)
{
if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
return false;
}
static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
int dir, unsigned short family)
{
if (dir != XFRM_POLICY_OUT && family == AF_INET) {
/* same dst may be used for traffic originating from
* devices with different policy settings.
*/
return IPCB(skb)->flags & IPSKB_NOPOLICY;
}
return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
}
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
struct sk_buff *skb,
unsigned int family, int reverse)
@@ -1096,8 +1117,8 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
(skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
return __xfrm_check_nopolicy(net, skb, dir) ||
__xfrm_check_dev_nopolicy(skb, dir, family) ||
__xfrm_policy_check(sk, ndir, skb, family);
}
@@ -1150,8 +1171,11 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
struct net *net = dev_net(skb->dev);
return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
(skb_dst(skb)->flags & DST_NOXFRM) ||
if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
return true;
return (skb_dst(skb)->flags & DST_NOXFRM) ||
__xfrm_route_forward(skb, family);
}

View File

@@ -44,7 +44,7 @@ struct dma_buf_sync {
* between them in actual uapi, they're just different numbers.
*/
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
#endif

View File

@@ -215,6 +215,11 @@ enum {
XFRM_MSG_MAPPING,
#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
XFRM_MSG_SETDEFAULT,
#define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT
XFRM_MSG_GETDEFAULT,
#define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -514,6 +519,15 @@ struct xfrm_user_offload {
#define XFRM_OFFLOAD_IPV6 1
#define XFRM_OFFLOAD_INBOUND 2
struct xfrm_userpolicy_default {
#define XFRM_USERPOLICY_UNSPEC 0
#define XFRM_USERPOLICY_BLOCK 1
#define XFRM_USERPOLICY_ACCEPT 2
__u8 in;
__u8 fwd;
__u8 out;
};
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1

View File

@@ -597,9 +597,13 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
tlb_addr = slot_addr(io_tlb_start, index) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
dir == DMA_BIDIRECTIONAL))
/*
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
* to the tlb buffer, if we knew for sure the device will
* overwirte the entire current content. But we don't. Thus
* unconditional bounce may prevent leaking swiotlb content (i.e.
* kernel memory) to user-space.
*/
swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;
}

View File

@@ -11901,6 +11901,9 @@ SYSCALL_DEFINE5(perf_event_open,
* Do not allow to attach to a group in a different task
* or CPU context. If we're moving SW events, we'll fix
* this up later, so allow that.
*
* Racy, not holding group_leader->ctx->mutex, see comment with
* perf_event_ctx_lock().
*/
if (!move_group && group_leader->ctx != ctx)
goto err_context;
@@ -11968,6 +11971,7 @@ SYSCALL_DEFINE5(perf_event_open,
} else {
perf_event_ctx_unlock(group_leader, gctx);
move_group = 0;
goto not_move_group;
}
}
@@ -11984,7 +11988,17 @@ SYSCALL_DEFINE5(perf_event_open,
}
} else {
mutex_lock(&ctx->mutex);
/*
* Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx,
* see the group_leader && !move_group test earlier.
*/
if (group_leader && group_leader->ctx != ctx) {
err = -EINVAL;
goto err_locked;
}
}
not_move_group:
if (ctx->task == TASK_TOMBSTONE) {
err = -ESRCH;

View File

@@ -2297,6 +2297,15 @@ void *__symbol_get(const char *symbol)
}
EXPORT_SYMBOL_GPL(__symbol_get);
static bool module_init_layout_section(const char *sname)
{
#ifndef CONFIG_MODULE_UNLOAD
if (module_exit_section(sname))
return true;
#endif
return module_init_section(sname);
}
/*
* Ensure that an exported symbol [global namespace] does not already exist
* in the kernel or in some other module's exported symbol table.
@@ -2506,7 +2515,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
|| module_init_section(sname))
|| module_init_layout_section(sname))
continue;
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
pr_debug("\t%s\n", sname);
@@ -2539,7 +2548,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
|| !module_init_section(sname))
|| !module_init_layout_section(sname))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
| INIT_OFFSET_MASK);
@@ -3188,11 +3197,6 @@ static int rewrite_section_headers(struct load_info *info, int flags)
temporary image. */
shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
#ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
if (module_exit_section(info->secstrings+shdr->sh_name))
shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
#endif
}
/* Track but don't keep modinfo and version sections. */

View File

@@ -43,6 +43,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
u64_stats_update_end(&brstats->syncp);
vg = br_vlan_group_rcu(br);
/* Reset the offload_fwd_mark because there could be a stacked
* bridge above, and it should not think this bridge it doing
* that bridge's work forwarding out its ports.
*/
br_switchdev_frame_unmark(skb);
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc modue when someone
* may be running packet capture.

View File

@@ -537,43 +537,6 @@ static void request_init(struct ceph_osd_request *req)
target_init(&req->r_t);
}
/*
* This is ugly, but it allows us to reuse linger registration and ping
* requests, keeping the structure of the code around send_linger{_ping}()
* reasonable. Setting up a min_nr=2 mempool for each linger request
* and dealing with copying ops (this blasts req only, watch op remains
* intact) isn't any better.
*/
static void request_reinit(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
bool mempool = req->r_mempool;
unsigned int num_ops = req->r_num_ops;
u64 snapid = req->r_snapid;
struct ceph_snap_context *snapc = req->r_snapc;
bool linger = req->r_linger;
struct ceph_msg *request_msg = req->r_request;
struct ceph_msg *reply_msg = req->r_reply;
dout("%s req %p\n", __func__, req);
WARN_ON(kref_read(&req->r_kref) != 1);
request_release_checks(req);
WARN_ON(kref_read(&request_msg->kref) != 1);
WARN_ON(kref_read(&reply_msg->kref) != 1);
target_destroy(&req->r_t);
request_init(req);
req->r_osdc = osdc;
req->r_mempool = mempool;
req->r_num_ops = num_ops;
req->r_snapid = snapid;
req->r_snapc = snapc;
req->r_linger = linger;
req->r_request = request_msg;
req->r_reply = reply_msg;
}
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
unsigned int num_ops,
@@ -918,14 +881,30 @@ EXPORT_SYMBOL(osd_req_op_xattr_init);
* @watch_opcode: CEPH_OSD_WATCH_OP_*
*/
static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
u64 cookie, u8 watch_opcode)
u8 watch_opcode, u64 cookie, u32 gen)
{
struct ceph_osd_req_op *op;
op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
op->watch.cookie = cookie;
op->watch.op = watch_opcode;
op->watch.gen = 0;
op->watch.gen = gen;
}
/*
* prot_ver, timeout and notify payload (may be empty) should already be
* encoded in @request_pl
*/
static void osd_req_op_notify_init(struct ceph_osd_request *req, int which,
u64 cookie, struct ceph_pagelist *request_pl)
{
struct ceph_osd_req_op *op;
op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
op->notify.cookie = cookie;
ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl);
op->indata_len = request_pl->length;
}
/*
@@ -2727,9 +2706,12 @@ static void linger_release(struct kref *kref)
WARN_ON(!list_empty(&lreq->pending_lworks));
WARN_ON(lreq->osd);
if (lreq->reg_req)
if (lreq->request_pl)
ceph_pagelist_release(lreq->request_pl);
if (lreq->notify_id_pages)
ceph_release_page_vector(lreq->notify_id_pages, 1);
ceph_osdc_put_request(lreq->reg_req);
if (lreq->ping_req)
ceph_osdc_put_request(lreq->ping_req);
target_destroy(&lreq->t);
kfree(lreq);
@@ -2999,6 +2981,12 @@ static void linger_commit_cb(struct ceph_osd_request *req)
struct ceph_osd_linger_request *lreq = req->r_priv;
mutex_lock(&lreq->lock);
if (req != lreq->reg_req) {
dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
__func__, lreq, lreq->linger_id, req, lreq->reg_req);
goto out;
}
dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
lreq->linger_id, req->r_result);
linger_reg_commit_complete(lreq, req->r_result);
@@ -3022,6 +3010,7 @@ static void linger_commit_cb(struct ceph_osd_request *req)
}
}
out:
mutex_unlock(&lreq->lock);
linger_put(lreq);
}
@@ -3044,6 +3033,12 @@ static void linger_reconnect_cb(struct ceph_osd_request *req)
struct ceph_osd_linger_request *lreq = req->r_priv;
mutex_lock(&lreq->lock);
if (req != lreq->reg_req) {
dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
__func__, lreq, lreq->linger_id, req, lreq->reg_req);
goto out;
}
dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
lreq, lreq->linger_id, req->r_result, lreq->last_error);
if (req->r_result < 0) {
@@ -3053,46 +3048,64 @@ static void linger_reconnect_cb(struct ceph_osd_request *req)
}
}
out:
mutex_unlock(&lreq->lock);
linger_put(lreq);
}
static void send_linger(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_request *req = lreq->reg_req;
struct ceph_osd_req_op *op = &req->r_ops[0];
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd_request *req;
int ret;
verify_osdc_wrlocked(req->r_osdc);
verify_osdc_wrlocked(osdc);
mutex_lock(&lreq->lock);
dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
if (req->r_osd)
cancel_linger_request(req);
if (lreq->reg_req) {
if (lreq->reg_req->r_osd)
cancel_linger_request(lreq->reg_req);
ceph_osdc_put_request(lreq->reg_req);
}
req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
BUG_ON(!req);
request_reinit(req);
target_copy(&req->r_t, &lreq->t);
req->r_mtime = lreq->mtime;
mutex_lock(&lreq->lock);
if (lreq->is_watch && lreq->committed) {
WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
op->watch.cookie != lreq->linger_id);
op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
op->watch.gen = ++lreq->register_gen;
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT,
lreq->linger_id, ++lreq->register_gen);
dout("lreq %p reconnect register_gen %u\n", lreq,
op->watch.gen);
req->r_ops[0].watch.gen);
req->r_callback = linger_reconnect_cb;
} else {
if (!lreq->is_watch)
if (lreq->is_watch) {
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH,
lreq->linger_id, 0);
} else {
lreq->notify_id = 0;
else
WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
refcount_inc(&lreq->request_pl->refcnt);
osd_req_op_notify_init(req, 0, lreq->linger_id,
lreq->request_pl);
ceph_osd_data_pages_init(
osd_req_op_data(req, 0, notify, response_data),
lreq->notify_id_pages, PAGE_SIZE, 0, false, false);
}
dout("lreq %p register\n", lreq);
req->r_callback = linger_commit_cb;
}
mutex_unlock(&lreq->lock);
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
BUG_ON(ret);
req->r_priv = linger_get(lreq);
req->r_linger = true;
lreq->reg_req = req;
mutex_unlock(&lreq->lock);
submit_request(req, true);
}
@@ -3102,6 +3115,12 @@ static void linger_ping_cb(struct ceph_osd_request *req)
struct ceph_osd_linger_request *lreq = req->r_priv;
mutex_lock(&lreq->lock);
if (req != lreq->ping_req) {
dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
__func__, lreq, lreq->linger_id, req, lreq->ping_req);
goto out;
}
dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
__func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
lreq->last_error);
@@ -3117,6 +3136,7 @@ static void linger_ping_cb(struct ceph_osd_request *req)
lreq->register_gen, req->r_ops[0].watch.gen);
}
out:
mutex_unlock(&lreq->lock);
linger_put(lreq);
}
@@ -3124,8 +3144,8 @@ static void linger_ping_cb(struct ceph_osd_request *req)
static void send_linger_ping(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd_request *req = lreq->ping_req;
struct ceph_osd_req_op *op = &req->r_ops[0];
struct ceph_osd_request *req;
int ret;
if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
dout("%s PAUSERD\n", __func__);
@@ -3137,19 +3157,26 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
__func__, lreq, lreq->linger_id, lreq->ping_sent,
lreq->register_gen);
if (req->r_osd)
cancel_linger_request(req);
if (lreq->ping_req) {
if (lreq->ping_req->r_osd)
cancel_linger_request(lreq->ping_req);
ceph_osdc_put_request(lreq->ping_req);
}
req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
BUG_ON(!req);
request_reinit(req);
target_copy(&req->r_t, &lreq->t);
WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
op->watch.cookie != lreq->linger_id ||
op->watch.op != CEPH_OSD_WATCH_OP_PING);
op->watch.gen = lreq->register_gen;
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id,
lreq->register_gen);
req->r_callback = linger_ping_cb;
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
BUG_ON(ret);
req->r_priv = linger_get(lreq);
req->r_linger = true;
lreq->ping_req = req;
ceph_osdc_get_request(req);
account_request(req);
@@ -3165,12 +3192,6 @@ static void linger_submit(struct ceph_osd_linger_request *lreq)
down_write(&osdc->lock);
linger_register(lreq);
if (lreq->is_watch) {
lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
} else {
lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
}
calc_target(osdc, &lreq->t, false);
osd = lookup_create_osd(osdc, lreq->t.osd, true);
@@ -3202,9 +3223,9 @@ static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
*/
static void __linger_cancel(struct ceph_osd_linger_request *lreq)
{
if (lreq->is_watch && lreq->ping_req->r_osd)
if (lreq->ping_req && lreq->ping_req->r_osd)
cancel_linger_request(lreq->ping_req);
if (lreq->reg_req->r_osd)
if (lreq->reg_req && lreq->reg_req->r_osd)
cancel_linger_request(lreq->reg_req);
cancel_linger_map_check(lreq);
unlink_linger(lreq->osd, lreq);
@@ -4651,43 +4672,6 @@ again:
}
EXPORT_SYMBOL(ceph_osdc_sync);
static struct ceph_osd_request *
alloc_linger_request(struct ceph_osd_linger_request *lreq)
{
struct ceph_osd_request *req;
req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
if (!req)
return NULL;
ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
return req;
}
static struct ceph_osd_request *
alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
{
struct ceph_osd_request *req;
req = alloc_linger_request(lreq);
if (!req)
return NULL;
/*
* Pass 0 for cookie because we don't know it yet, it will be
* filled in by linger_submit().
*/
osd_req_op_watch_init(req, 0, 0, watch_opcode);
if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
ceph_osdc_put_request(req);
return NULL;
}
return req;
}
/*
* Returns a handle, caller owns a ref.
*/
@@ -4717,18 +4701,6 @@ ceph_osdc_watch(struct ceph_osd_client *osdc,
lreq->t.flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&lreq->mtime);
lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
if (!lreq->reg_req) {
ret = -ENOMEM;
goto err_put_lreq;
}
lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
if (!lreq->ping_req) {
ret = -ENOMEM;
goto err_put_lreq;
}
linger_submit(lreq);
ret = linger_reg_commit_wait(lreq);
if (ret) {
@@ -4766,8 +4738,8 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
req->r_flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&req->r_mtime);
osd_req_op_watch_init(req, 0, lreq->linger_id,
CEPH_OSD_WATCH_OP_UNWATCH);
osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH,
lreq->linger_id, 0);
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
@@ -4853,35 +4825,6 @@ out_put_req:
}
EXPORT_SYMBOL(ceph_osdc_notify_ack);
static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
u64 cookie, u32 prot_ver, u32 timeout,
void *payload, u32 payload_len)
{
struct ceph_osd_req_op *op;
struct ceph_pagelist *pl;
int ret;
op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
op->notify.cookie = cookie;
pl = ceph_pagelist_alloc(GFP_NOIO);
if (!pl)
return -ENOMEM;
ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
ret |= ceph_pagelist_encode_32(pl, timeout);
ret |= ceph_pagelist_encode_32(pl, payload_len);
ret |= ceph_pagelist_append(pl, payload, payload_len);
if (ret) {
ceph_pagelist_release(pl);
return -ENOMEM;
}
ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
op->indata_len = pl->length;
return 0;
}
/*
* @timeout: in seconds
*
@@ -4900,7 +4843,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
size_t *preply_len)
{
struct ceph_osd_linger_request *lreq;
struct page **pages;
int ret;
WARN_ON(!timeout);
@@ -4913,6 +4855,29 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
if (!lreq)
return -ENOMEM;
lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO);
if (!lreq->request_pl) {
ret = -ENOMEM;
goto out_put_lreq;
}
ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */
ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout);
ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len);
ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len);
if (ret) {
ret = -ENOMEM;
goto out_put_lreq;
}
/* for notify_id */
lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(lreq->notify_id_pages)) {
ret = PTR_ERR(lreq->notify_id_pages);
lreq->notify_id_pages = NULL;
goto out_put_lreq;
}
lreq->preply_pages = preply_pages;
lreq->preply_len = preply_len;
@@ -4920,35 +4885,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
ceph_oloc_copy(&lreq->t.base_oloc, oloc);
lreq->t.flags = CEPH_OSD_FLAG_READ;
lreq->reg_req = alloc_linger_request(lreq);
if (!lreq->reg_req) {
ret = -ENOMEM;
goto out_put_lreq;
}
/*
* Pass 0 for cookie because we don't know it yet, it will be
* filled in by linger_submit().
*/
ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
payload, payload_len);
if (ret)
goto out_put_lreq;
/* for notify_id */
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out_put_lreq;
}
ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
response_data),
pages, PAGE_SIZE, 0, false, true);
ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
if (ret)
goto out_put_lreq;
linger_submit(lreq);
ret = linger_reg_commit_wait(lreq);
if (!ret)

View File

@@ -1765,6 +1765,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct in_device *in_dev = __in_dev_get_rcu(dev);
unsigned int flags = RTCF_MULTICAST;
struct rtable *rth;
bool no_policy;
u32 itag = 0;
int err;
@@ -1775,8 +1776,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (our)
flags |= RTCF_LOCAL;
no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
if (no_policy)
IPCB(skb)->flags |= IPSKB_NOPOLICY;
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
no_policy, false);
if (!rth)
return -ENOBUFS;
@@ -1835,7 +1840,7 @@ static int __mkroute_input(struct sk_buff *skb,
struct rtable *rth;
int err;
struct in_device *out_dev;
bool do_cache;
bool do_cache, no_policy;
u32 itag = 0;
/* get a working reference to the output device */
@@ -1880,6 +1885,10 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
if (no_policy)
IPCB(skb)->flags |= IPSKB_NOPOLICY;
fnhe = find_exception(nhc, daddr);
if (do_cache) {
if (fnhe)
@@ -1892,9 +1901,8 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(out_dev, NOXFRM));
rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
IN_DEV_ORCONF(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
goto cleanup;
@@ -2145,6 +2153,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct rtable *rth;
struct flowi4 fl4;
bool do_cache = true;
bool no_policy;
/* IP on this device is disabled. */
@@ -2262,6 +2271,10 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
if (no_policy)
IPCB(skb)->flags |= IPSKB_NOPOLICY;
do_cache &= res->fi && !itag;
if (do_cache) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
@@ -2276,7 +2289,7 @@ local_input:
rth = rt_dst_alloc(ip_rt_get_dev(net, res),
flags | RTCF_LOCAL, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
no_policy, false);
if (!rth)
goto e_nobufs;
@@ -2499,8 +2512,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
add:
rth = rt_dst_alloc(dev_out, flags, type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(in_dev, NOXFRM));
IN_DEV_ORCONF(in_dev, NOPOLICY),
IN_DEV_ORCONF(in_dev, NOXFRM));
if (!rth)
return ERR_PTR(-ENOBUFS);

View File

@@ -2830,8 +2830,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
if (err)
return err;
memset(ext_hdrs, 0, sizeof(ext_hdrs));
err = parse_exthdrs(skb, hdr, ext_hdrs);

View File

@@ -1387,8 +1387,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
goto dont_reorder;
/* not part of a BA session */
if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */

View File

@@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
GFP_KERNEL);
GFP_ATOMIC);
if (skb_frag == NULL) {
rc = -ENOMEM;
goto free_exit;

View File

@@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
i = 0;
skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
NCI_DATA_HDR_SIZE, GFP_KERNEL);
NCI_DATA_HDR_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
@@ -186,7 +186,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
if (i < data_len) {
skb = nci_skb_alloc(ndev,
conn_info->max_pkt_payload_len +
NCI_DATA_HDR_SIZE, GFP_KERNEL);
NCI_DATA_HDR_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;

Some files were not shown because too many files have changed in this diff Show More