Merge 5.10.91 into android12-5.10-lts

Changes in 5.10.91
	f2fs: quota: fix potential deadlock
	selftests: x86: fix [-Wstringop-overread] warn in test_process_vm_readv()
	tracing: Fix check for trace_percpu_buffer validity in get_trace_buf()
	tracing: Tag trace_percpu_buffer as a percpu pointer
	ieee802154: atusb: fix uninit value in atusb_set_extended_addr
	i40e: Fix to not show opcode msg on unsuccessful VF MAC change
	iavf: Fix limit of total number of queues to active queues of VF
	RDMA/core: Don't infoleak GRH fields
	netrom: fix copying in user data in nr_setsockopt
	RDMA/uverbs: Check for null return of kmalloc_array
	mac80211: initialize variable have_higher_than_11mbit
	sfc: The RX page_ring is optional
	i40e: fix use-after-free in i40e_sync_filters_subtask()
	i40e: Fix for displaying message regarding NVM version
	i40e: Fix incorrect netdev's real number of RX/TX queues
	ftrace/samples: Add missing prototypes direct functions
	ipv4: Check attribute length for RTA_GATEWAY in multipath route
	ipv4: Check attribute length for RTA_FLOW in multipath route
	ipv6: Check attribute length for RTA_GATEWAY in multipath route
	ipv6: Check attribute length for RTA_GATEWAY when deleting multipath route
	lwtunnel: Validate RTA_ENCAP_TYPE attribute length
	batman-adv: mcast: don't send link-local multicast to mcast routers
	sch_qfq: prevent shift-out-of-bounds in qfq_init_qdisc
	net: ena: Fix undefined state when tx request id is out of bounds
	net: ena: Fix error handling when calculating max IO queues number
	xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate
	power: supply: core: Break capacity loop
	power: reset: ltc2952: Fix use of floating point literals
	rndis_host: support Hytera digital radios
	phonet: refcount leak in pep_sock_accep
	power: bq25890: Enable continuous conversion for ADC at charging
	ipv6: Continue processing multipath route even if gateway attribute is invalid
	ipv6: Do cleanup if attribute validation fails in multipath route
	usb: mtu3: fix interval value for intr and isoc
	scsi: libiscsi: Fix UAF in iscsi_conn_get_param()/iscsi_conn_teardown()
	ip6_vti: initialize __ip6_tnl_parm struct in vti6_siocdevprivate
	net: udp: fix alignment problem in udp4_seq_show()
	atlantic: Fix buff_ring OOB in aq_ring_rx_clean
	mISDN: change function names to avoid conflicts
	drm/amd/display: Added power down for DCN10
	ipv6: raw: check passed optlen before reading
	ARM: dts: gpio-ranges property is now required
	Input: zinitix - make sure the IRQ is allocated before it gets enabled
	Linux 5.10.91

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iea6f28f738081085f9c5df020ebdee6583b38dfb
This commit is contained in:
Greg Kroah-Hartman
2022-01-11 15:46:18 +01:00
44 changed files with 301 additions and 96 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 90 SUBLEVEL = 91
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -555,6 +555,8 @@
<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&gpio 0 0 58>;
gpclk0_gpio49: gpclk0_gpio49 { gpclk0_gpio49: gpclk0_gpio49 {
pin-gpclk { pin-gpclk {
pins = "gpio49"; pins = "gpio49";

View File

@@ -126,6 +126,8 @@
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>; #interrupt-cells = <2>;
gpio-ranges = <&gpio 0 0 54>;
/* Defines common pin muxing groups /* Defines common pin muxing groups
* *
* While each pin can have its mux selected * While each pin can have its mux selected

View File

@@ -77,6 +77,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock, .get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position, .calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level, .set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe, .set_pipe = dce110_set_pipe,

View File

@@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr *src = ah_attr;
struct rdma_ah_attr conv_ah; struct rdma_ah_attr conv_ah;
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); memset(&dst->grh, 0, sizeof(dst->grh));
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
(rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&

View File

@@ -450,6 +450,9 @@ static int uapi_finalize(struct uverbs_api *uapi)
uapi->num_write_ex = max_write_ex + 1; uapi->num_write_ex = max_write_ex + 1;
data = kmalloc_array(uapi->num_write + uapi->num_write_ex, data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
sizeof(*uapi->write_methods), GFP_KERNEL); sizeof(*uapi->write_methods), GFP_KERNEL);
if (!data)
return -ENOMEM;
for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
data[i] = &uapi->notsupp_method; data[i] = &uapi->notsupp_method;
uapi->write_methods = data; uapi->write_methods = data;

View File

@@ -488,6 +488,15 @@ static int zinitix_ts_probe(struct i2c_client *client)
return error; return error;
} }
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, zinitix_ts_irq_handler,
IRQF_ONESHOT,
client->name, bt541);
if (error) {
dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
return error;
}
error = zinitix_init_input_dev(bt541); error = zinitix_init_input_dev(bt541);
if (error) { if (error) {
dev_err(&client->dev, dev_err(&client->dev,
@@ -514,13 +523,6 @@ static int zinitix_ts_probe(struct i2c_client *client)
} }
irq_set_status_flags(client->irq, IRQ_NOAUTOEN); irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, zinitix_ts_irq_handler,
IRQF_ONESHOT, client->name, bt541);
if (error) {
dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
return error;
}
return 0; return 0;
} }

View File

@@ -381,7 +381,7 @@ mISDNInit(void)
err = mISDN_inittimer(&debug); err = mISDN_inittimer(&debug);
if (err) if (err)
goto error2; goto error2;
err = l1_init(&debug); err = Isdnl1_Init(&debug);
if (err) if (err)
goto error3; goto error3;
err = Isdnl2_Init(&debug); err = Isdnl2_Init(&debug);
@@ -395,7 +395,7 @@ mISDNInit(void)
error5: error5:
Isdnl2_cleanup(); Isdnl2_cleanup();
error4: error4:
l1_cleanup(); Isdnl1_cleanup();
error3: error3:
mISDN_timer_cleanup(); mISDN_timer_cleanup();
error2: error2:
@@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
{ {
misdn_sock_cleanup(); misdn_sock_cleanup();
Isdnl2_cleanup(); Isdnl2_cleanup();
l1_cleanup(); Isdnl1_cleanup();
mISDN_timer_cleanup(); mISDN_timer_cleanup();
class_unregister(&mISDN_class); class_unregister(&mISDN_class);

View File

@@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *); extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void); extern void mISDN_timer_cleanup(void);
extern int l1_init(u_int *); extern int Isdnl1_Init(u_int *);
extern void l1_cleanup(void); extern void Isdnl1_cleanup(void);
extern int Isdnl2_Init(u_int *); extern int Isdnl2_Init(u_int *);
extern void Isdnl2_cleanup(void); extern void Isdnl2_cleanup(void);

View File

@@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
EXPORT_SYMBOL(create_l1); EXPORT_SYMBOL(create_l1);
int int
l1_init(u_int *deb) Isdnl1_Init(u_int *deb)
{ {
debug = deb; debug = deb;
l1fsm_s.state_count = L1S_STATE_COUNT; l1fsm_s.state_count = L1S_STATE_COUNT;
@@ -409,7 +409,7 @@ l1_init(u_int *deb)
} }
void void
l1_cleanup(void) Isdnl1_cleanup(void)
{ {
mISDN_FsmFree(&l1fsm_s); mISDN_FsmFree(&l1fsm_s);
} }

View File

@@ -1199,26 +1199,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{ {
struct ena_tx_buffer *tx_info = NULL; struct ena_tx_buffer *tx_info;
if (likely(req_id < tx_ring->ring_size)) { tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info = &tx_ring->tx_buffer_info[req_id]; if (likely(tx_info->skb))
if (likely(tx_info->skb)) return 0;
return 0;
}
return handle_invalid_req_id(tx_ring, req_id, tx_info, false); return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
} }
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
{ {
struct ena_tx_buffer *tx_info = NULL; struct ena_tx_buffer *tx_info;
if (likely(req_id < xdp_ring->ring_size)) { tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info = &xdp_ring->tx_buffer_info[req_id]; if (likely(tx_info->xdpf))
if (likely(tx_info->xdpf)) return 0;
return 0;
}
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
} }
@@ -1243,9 +1239,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id); &req_id);
if (rc) if (rc) {
if (unlikely(rc == -EINVAL))
handle_invalid_req_id(tx_ring, req_id, NULL,
false);
break; break;
}
/* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id); rc = validate_tx_req_id(tx_ring, req_id);
if (rc) if (rc)
break; break;
@@ -1801,9 +1802,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
&req_id); &req_id);
if (rc) if (rc) {
if (unlikely(rc == -EINVAL))
handle_invalid_req_id(xdp_ring, req_id, NULL,
true);
break; break;
}
/* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id); rc = validate_xdp_req_id(xdp_ring, req_id);
if (rc) if (rc)
break; break;
@@ -3921,10 +3927,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
return max_num_io_queues; return max_num_io_queues;
} }

View File

@@ -365,6 +365,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) { if (!buff->is_eop) {
buff_ = buff; buff_ = buff;
do { do {
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
next_ = buff_->next, next_ = buff_->next,
buff_ = &self->buff_ring[next_]; buff_ = &self->buff_ring[next_];
is_rsc_completed = is_rsc_completed =
@@ -388,6 +392,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
(buff->is_lro && buff->is_cso_err)) { (buff->is_lro && buff->is_cso_err)) {
buff_ = buff; buff_ = buff;
do { do {
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
next_ = buff_->next, next_ = buff_->next,
buff_ = &self->buff_ring[next_]; buff_ = &self->buff_ring[next_];

View File

@@ -98,6 +98,24 @@ MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq; static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
netdev_for_each_mc_addr(ha, netdev) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
ha->refcount = 1;
break;
}
}
}
/** /**
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
@@ -2035,6 +2053,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
hlist_for_each_entry_safe(new, h, from, hlist) { hlist_for_each_entry_safe(new, h, from, hlist) {
/* We can simply free the wrapper structure */ /* We can simply free the wrapper structure */
hlist_del(&new->hlist); hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new); kfree(new);
} }
} }
@@ -2382,6 +2401,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
&tmp_add_list, &tmp_add_list,
&tmp_del_list, &tmp_del_list,
vlan_filters); vlan_filters);
hlist_for_each_entry(new, &tmp_add_list, hlist)
netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
if (retval) if (retval)
goto err_no_memory_locked; goto err_no_memory_locked;
@@ -2514,6 +2537,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (new->f->state == I40E_FILTER_NEW) if (new->f->state == I40E_FILTER_NEW)
new->f->state = new->state; new->f->state = new->state;
hlist_del(&new->hlist); hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new); kfree(new);
} }
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -8357,6 +8381,27 @@ int i40e_open(struct net_device *netdev)
return 0; return 0;
} }
/**
* i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
* @vsi: vsi structure
*
* This updates netdev's number of tx/rx queues
*
* Returns status of setting tx/rx queues
**/
static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
{
int ret;
ret = netif_set_real_num_rx_queues(vsi->netdev,
vsi->num_queue_pairs);
if (ret)
return ret;
return netif_set_real_num_tx_queues(vsi->netdev,
vsi->num_queue_pairs);
}
/** /**
* i40e_vsi_open - * i40e_vsi_open -
* @vsi: the VSI to open * @vsi: the VSI to open
@@ -8393,13 +8438,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_setup_rx; goto err_setup_rx;
/* Notify the stack of the actual queue counts. */ /* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, err = i40e_netif_set_realnum_tx_rx_queues(vsi);
vsi->num_queue_pairs);
if (err)
goto err_set_queues;
err = netif_set_real_num_rx_queues(vsi->netdev,
vsi->num_queue_pairs);
if (err) if (err)
goto err_set_queues; goto err_set_queues;
@@ -13686,6 +13725,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
case I40E_VSI_MAIN: case I40E_VSI_MAIN:
case I40E_VSI_VMDQ2: case I40E_VSI_VMDQ2:
ret = i40e_config_netdev(vsi); ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (ret) if (ret)
goto err_netdev; goto err_netdev;
ret = register_netdev(vsi->netdev); ret = register_netdev(vsi->netdev);
@@ -14963,8 +15005,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev, dev_dbg(&pdev->dev,
"The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n", "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver, hw->aq.api_maj_ver,
hw->aq.api_min_ver, hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MAJOR,

View File

@@ -1824,17 +1824,19 @@ sriov_configure_out:
/***********************virtual channel routines******************/ /***********************virtual channel routines******************/
/** /**
* i40e_vc_send_msg_to_vf * i40e_vc_send_msg_to_vf_ex
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @v_opcode: virtual channel opcode * @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value * @v_retval: virtual channel return value
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length * @msglen: msg length
* @is_quiet: true for not printing unsuccessful return values, false otherwise
* *
* send msg to VF * send msg to VF
**/ **/
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen) u32 v_retval, u8 *msg, u16 msglen,
bool is_quiet)
{ {
struct i40e_pf *pf; struct i40e_pf *pf;
struct i40e_hw *hw; struct i40e_hw *hw;
@@ -1850,7 +1852,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* single place to detect unsuccessful return values */ /* single place to detect unsuccessful return values */
if (v_retval) { if (v_retval && !is_quiet) {
vf->num_invalid_msgs++; vf->num_invalid_msgs++;
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
vf->vf_id, v_opcode, v_retval); vf->vf_id, v_opcode, v_retval);
@@ -1880,6 +1882,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
return 0; return 0;
} }
/**
* i40e_vc_send_msg_to_vf
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* send msg to VF
**/
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
msg, msglen, false);
}
/** /**
* i40e_vc_send_resp_to_vf * i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info * @vf: pointer to the VF info
@@ -2641,6 +2660,7 @@ error_param:
* i40e_check_vf_permission * i40e_check_vf_permission
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @al: MAC address list from virtchnl * @al: MAC address list from virtchnl
* @is_quiet: set true for printing msg without opcode info, false otherwise
* *
* Check that the given list of MAC addresses is allowed. Will return -EPERM * Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions: * if any address in the list is not valid. Checks the following conditions:
@@ -2655,13 +2675,15 @@ error_param:
* addresses might not be accurate. * addresses might not be accurate.
**/ **/
static inline int i40e_check_vf_permission(struct i40e_vf *vf, static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct virtchnl_ether_addr_list *al) struct virtchnl_ether_addr_list *al,
bool *is_quiet)
{ {
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
int mac2add_cnt = 0; int mac2add_cnt = 0;
int i; int i;
*is_quiet = false;
for (i = 0; i < al->num_elements; i++) { for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr; u8 *addr = al->list[i].addr;
@@ -2685,6 +2707,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) { !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
*is_quiet = true;
return -EPERM; return -EPERM;
} }
@@ -2721,6 +2744,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg; (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
bool is_quiet = false;
i40e_status ret = 0; i40e_status ret = 0;
int i; int i;
@@ -2737,7 +2761,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/ */
spin_lock_bh(&vsi->mac_filter_hash_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
ret = i40e_check_vf_permission(vf, al); ret = i40e_check_vf_permission(vf, al, &is_quiet);
if (ret) { if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param; goto error_param;
@@ -2775,8 +2799,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret); ret, NULL, 0, is_quiet);
} }
/** /**

View File

@@ -2598,8 +2598,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
total_max_rate += tx_rate; total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i]; num_qps += mqprio_qopt->qopt.count[i];
} }
if (num_qps > IAVF_MAX_REQ_QUEUES) if (num_qps > adapter->num_active_queues) {
dev_err(&adapter->pdev->dev,
"Cannot support requested number of queues\n");
return -EINVAL; return -EINVAL;
}
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
return ret; return ret;

View File

@@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
struct ef4_rx_page_state *state; struct ef4_rx_page_state *state;
unsigned index; unsigned index;
if (unlikely(!rx_queue->page_ring))
return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask; index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index]; page = rx_queue->page_ring[index];
if (page == NULL) if (page == NULL)
@@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
{ {
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
if (unlikely(!rx_queue->page_ring))
return;
do { do {
ef4_recycle_rx_page(channel, rx_buf); ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);

View File

@@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
unsigned int index; unsigned int index;
struct page *page; struct page *page;
if (unlikely(!rx_queue->page_ring))
return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask; index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index]; page = rx_queue->page_ring[index];
if (page == NULL) if (page == NULL)
@@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
{ {
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
if (unlikely(!rx_queue->page_ring))
return;
do { do {
efx_recycle_rx_page(channel, rx_buf); efx_recycle_rx_page(channel, rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf); rx_buf = efx_rx_buf_next(rx_queue, rx_buf);

View File

@@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
ret = usb_control_msg(usb_dev, pipe, request, requesttype, ret = usb_control_msg(usb_dev, pipe, request, requesttype,
value, index, data, size, timeout); value, index, data, size, timeout);
if (ret < 0) { if (ret < size) {
ret = ret < 0 ? ret : -ENODATA;
atusb->err = ret; atusb->err = ret;
dev_err(&usb_dev->dev, dev_err(&usb_dev->dev,
"%s: req 0x%02x val 0x%x idx 0x%x, error %d\n", "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
@@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
if (!build) if (!build)
return -ENOMEM; return -ENOMEM;
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), /* We cannot call atusb_control_msg() here, since this request may read various length data */
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
build, ATUSB_BUILD_SIZE, 1000); ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) { if (ret >= 0) {
build[ret] = 0; build[ret] = 0;
dev_info(&usb_dev->dev, "Firmware: build %s\n", build); dev_info(&usb_dev->dev, "Firmware: build %s\n", build);

View File

@@ -608,6 +608,11 @@ static const struct usb_device_id products [] = {
USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042, USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
USB_CLASS_COMM, 2 /* ACM */, 0x0ff), USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_poll_status_info, .driver_info = (unsigned long) &rndis_poll_status_info,
}, {
/* Hytera Communications DMR radios' "Radio to PC Network" */
USB_VENDOR_AND_INTERFACE_INFO(0x238b,
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long)&rndis_info,
}, { }, {
/* RNDIS is MSFT's un-official variant of CDC ACM */ /* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),

View File

@@ -159,8 +159,8 @@ static void ltc2952_poweroff_kill(void)
static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
{ {
data->wde_interval = 300L * 1E6L; data->wde_interval = 300L * NSEC_PER_MSEC;
data->trigger_delay = ktime_set(2, 500L*1E6L); data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
data->timer_trigger.function = ltc2952_poweroff_timer_trigger; data->timer_trigger.function = ltc2952_poweroff_timer_trigger;

View File

@@ -581,12 +581,12 @@ static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq)
if (!new_state.online && bq->state.online) { /* power removed */ if (!new_state.online && bq->state.online) { /* power removed */
/* disable ADC */ /* disable ADC */
ret = bq25890_field_write(bq, F_CONV_START, 0); ret = bq25890_field_write(bq, F_CONV_RATE, 0);
if (ret < 0) if (ret < 0)
goto error; goto error;
} else if (new_state.online && !bq->state.online) { /* power inserted */ } else if (new_state.online && !bq->state.online) { /* power inserted */
/* enable ADC, to have control of charge current/voltage */ /* enable ADC, to have control of charge current/voltage */
ret = bq25890_field_write(bq, F_CONV_START, 1); ret = bq25890_field_write(bq, F_CONV_RATE, 1);
if (ret < 0) if (ret < 0)
goto error; goto error;
} }

View File

@@ -912,6 +912,10 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
return NULL; return NULL;
for (i = 0; i < POWER_SUPPLY_OCV_TEMP_MAX; i++) { for (i = 0; i < POWER_SUPPLY_OCV_TEMP_MAX; i++) {
/* Out of capacity tables */
if (!info->ocv_table[i])
break;
temp_diff = abs(info->ocv_temp[i] - temp); temp_diff = abs(info->ocv_temp[i] - temp);
if (temp_diff < best_temp_diff) { if (temp_diff < best_temp_diff) {

View File

@@ -2950,6 +2950,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
char *tmp_persistent_address = conn->persistent_address;
char *tmp_local_ipaddr = conn->local_ipaddr;
del_timer_sync(&conn->transport_timer); del_timer_sync(&conn->transport_timer);
@@ -2971,8 +2973,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_lock_bh(&session->frwd_lock); spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data, free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
kfree(conn->local_ipaddr);
/* regular RX path uses back_lock */ /* regular RX path uses back_lock */
spin_lock_bh(&session->back_lock); spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
@@ -2984,6 +2984,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
mutex_unlock(&session->eh_mutex); mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn); iscsi_destroy_conn(cls_conn);
kfree(tmp_persistent_address);
kfree(tmp_local_ipaddr);
} }
EXPORT_SYMBOL_GPL(iscsi_conn_teardown); EXPORT_SYMBOL_GPL(iscsi_conn_teardown);

View File

@@ -77,7 +77,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
if (usb_endpoint_xfer_int(desc) || if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) { usb_endpoint_xfer_isoc(desc)) {
interval = desc->bInterval; interval = desc->bInterval;
interval = clamp_val(interval, 1, 16) - 1; interval = clamp_val(interval, 1, 16);
if (usb_endpoint_xfer_isoc(desc) && comp_desc) if (usb_endpoint_xfer_isoc(desc) && comp_desc)
mult = comp_desc->bmAttributes; mult = comp_desc->bmAttributes;
} }
@@ -89,7 +89,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
if (usb_endpoint_xfer_isoc(desc) || if (usb_endpoint_xfer_isoc(desc) ||
usb_endpoint_xfer_int(desc)) { usb_endpoint_xfer_int(desc)) {
interval = desc->bInterval; interval = desc->bInterval;
interval = clamp_val(interval, 1, 16) - 1; interval = clamp_val(interval, 1, 16);
mult = usb_endpoint_maxp_mult(desc) - 1; mult = usb_endpoint_maxp_mult(desc) - 1;
} }
break; break;

View File

@@ -1148,7 +1148,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
if (!is_journalled_quota(sbi)) if (!is_journalled_quota(sbi))
return false; return false;
down_write(&sbi->quota_sem); if (!down_write_trylock(&sbi->quota_sem))
return true;
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
ret = false; ret = false;
} else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {

View File

@@ -686,7 +686,8 @@ xfs_ioc_space(
if (bf->l_start > XFS_ISIZE(ip)) { if (bf->l_start > XFS_ISIZE(ip)) {
error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
bf->l_start - XFS_ISIZE(ip), 0); bf->l_start - XFS_ISIZE(ip),
XFS_BMAPI_PREALLOC);
if (error) if (error)
goto out_unlock; goto out_unlock;
} }

View File

@@ -3135,7 +3135,7 @@ struct trace_buffer_struct {
char buffer[4][TRACE_BUF_SIZE]; char buffer[4][TRACE_BUF_SIZE];
}; };
static struct trace_buffer_struct *trace_percpu_buffer; static struct trace_buffer_struct __percpu *trace_percpu_buffer;
/* /*
* Thise allows for lockless recording. If we're nested too deeply, then * Thise allows for lockless recording. If we're nested too deeply, then
@@ -3145,7 +3145,7 @@ static char *get_trace_buf(void)
{ {
struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
if (!buffer || buffer->nesting >= 4) if (!trace_percpu_buffer || buffer->nesting >= 4)
return NULL; return NULL;
buffer->nesting++; buffer->nesting++;
@@ -3164,7 +3164,7 @@ static void put_trace_buf(void)
static int alloc_percpu_trace_buffer(void) static int alloc_percpu_trace_buffer(void)
{ {
struct trace_buffer_struct *buffers; struct trace_buffer_struct __percpu *buffers;
if (trace_percpu_buffer) if (trace_percpu_buffer)
return 0; return 0;

View File

@@ -1373,6 +1373,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information * @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to check * @skb: The multicast packet to check
* @orig: an originator to be set to forward the skb to * @orig: an originator to be set to forward the skb to
* @is_routable: stores whether the destination is routable
* *
* Return: the forwarding mode as enum batadv_forw_mode and in case of * Return: the forwarding mode as enum batadv_forw_mode and in case of
* BATADV_FORW_SINGLE set the orig to the single originator the skb * BATADV_FORW_SINGLE set the orig to the single originator the skb
@@ -1380,17 +1381,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
*/ */
enum batadv_forw_mode enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **orig) struct batadv_orig_node **orig, int *is_routable)
{ {
int ret, tt_count, ip_count, unsnoop_count, total_count; int ret, tt_count, ip_count, unsnoop_count, total_count;
bool is_unsnoopable = false; bool is_unsnoopable = false;
unsigned int mcast_fanout; unsigned int mcast_fanout;
struct ethhdr *ethhdr; struct ethhdr *ethhdr;
int is_routable = 0;
int rtr_count = 0; int rtr_count = 0;
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
&is_routable); is_routable);
if (ret == -ENOMEM) if (ret == -ENOMEM)
return BATADV_FORW_NONE; return BATADV_FORW_NONE;
else if (ret < 0) else if (ret < 0)
@@ -1403,7 +1403,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
unsnoop_count = !is_unsnoopable ? 0 : unsnoop_count = !is_unsnoopable ? 0 :
atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable); rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
total_count = tt_count + ip_count + unsnoop_count + rtr_count; total_count = tt_count + ip_count + unsnoop_count + rtr_count;
@@ -1723,6 +1723,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information * @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit * @skb: the multicast packet to transmit
* @vid: the vlan identifier * @vid: the vlan identifier
* @is_routable: stores whether the destination is routable
* *
* Sends copies of a frame with multicast destination to any node that signaled * Sends copies of a frame with multicast destination to any node that signaled
* interest in it, that is either via the translation table or the according * interest in it, that is either via the translation table or the according
@@ -1735,7 +1736,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/ */
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid) unsigned short vid, int is_routable)
{ {
int ret; int ret;
@@ -1751,12 +1752,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
return ret; return ret;
} }
if (!is_routable)
goto skip_mc_router;
ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
skip_mc_router:
consume_skb(skb); consume_skb(skb);
return ret; return ret;
} }

View File

@@ -44,7 +44,8 @@ enum batadv_forw_mode {
enum batadv_forw_mode enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig); struct batadv_orig_node **mcast_single_orig,
int *is_routable);
int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct sk_buff *skb, struct sk_buff *skb,
@@ -52,7 +53,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node); struct batadv_orig_node *orig_node);
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid); unsigned short vid, int is_routable);
void batadv_mcast_init(struct batadv_priv *bat_priv); void batadv_mcast_init(struct batadv_priv *bat_priv);
@@ -71,7 +72,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline enum batadv_forw_mode static inline enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig) struct batadv_orig_node **mcast_single_orig,
int *is_routable)
{ {
return BATADV_FORW_ALL; return BATADV_FORW_ALL;
} }
@@ -88,7 +90,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
static inline int static inline int
batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid) unsigned short vid, int is_routable)
{ {
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_DROP; return NET_XMIT_DROP;

View File

@@ -200,6 +200,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
int gw_mode; int gw_mode;
enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE; enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
struct batadv_orig_node *mcast_single_orig = NULL; struct batadv_orig_node *mcast_single_orig = NULL;
int mcast_is_routable = 0;
int network_offset = ETH_HLEN; int network_offset = ETH_HLEN;
__be16 proto; __be16 proto;
@@ -302,7 +303,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
send: send:
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) { if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
forw_mode = batadv_mcast_forw_mode(bat_priv, skb, forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
&mcast_single_orig); &mcast_single_orig,
&mcast_is_routable);
if (forw_mode == BATADV_FORW_NONE) if (forw_mode == BATADV_FORW_NONE)
goto dropped; goto dropped;
@@ -367,7 +369,8 @@ send:
ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
mcast_single_orig); mcast_single_orig);
} else if (forw_mode == BATADV_FORW_SOME) { } else if (forw_mode == BATADV_FORW_SOME) {
ret = batadv_mcast_forw_send(bat_priv, skb, vid); ret = batadv_mcast_forw_send(bat_priv, skb, vid,
mcast_is_routable);
} else { } else {
if (batadv_dat_snoop_outgoing_arp_request(bat_priv, if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
skb)) skb))

View File

@@ -192,6 +192,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla_entype) { if (nla_entype) {
if (nla_len(nla_entype) < sizeof(u16)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
return -EINVAL;
}
encap_type = nla_get_u16(nla_entype); encap_type = nla_get_u16(nla_entype);
if (lwtunnel_valid_encap_type(encap_type, if (lwtunnel_valid_encap_type(encap_type,

View File

@@ -663,6 +663,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
return nhs; return nhs;
} }
static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
if (nla_len(nla) < sizeof(*gw)) {
NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
return -EINVAL;
}
*gw = nla_get_in_addr(nla);
return 0;
}
/* only called when fib_nh is integrated into fib_info */ /* only called when fib_nh is integrated into fib_info */
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg, int remaining, struct fib_config *cfg,
@@ -705,7 +718,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
return -EINVAL; return -EINVAL;
} }
if (nla) { if (nla) {
fib_cfg.fc_gw4 = nla_get_in_addr(nla); ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
extack);
if (ret)
goto errout;
if (fib_cfg.fc_gw4) if (fib_cfg.fc_gw4)
fib_cfg.fc_gw_family = AF_INET; fib_cfg.fc_gw_family = AF_INET;
} else if (nlav) { } else if (nlav) {
@@ -715,10 +732,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
} }
nla = nla_find(attrs, attrlen, RTA_FLOW); nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla) if (nla) {
if (nla_len(nla) < sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
return -EINVAL;
}
fib_cfg.fc_flow = nla_get_u32(nla); fib_cfg.fc_flow = nla_get_u32(nla);
}
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
/* RTA_ENCAP_TYPE length checked in
* lwtunnel_valid_encap_type_attr
*/
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla) if (nla)
fib_cfg.fc_encap_type = nla_get_u16(nla); fib_cfg.fc_encap_type = nla_get_u16(nla);
@@ -903,6 +928,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
attrlen = rtnh_attrlen(rtnh); attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) { if (attrlen > 0) {
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh); struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
int err;
nla = nla_find(attrs, attrlen, RTA_GATEWAY); nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nlav = nla_find(attrs, attrlen, RTA_VIA); nlav = nla_find(attrs, attrlen, RTA_VIA);
@@ -913,12 +939,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
} }
if (nla) { if (nla) {
__be32 gw;
err = fib_gw_from_attr(&gw, nla, extack);
if (err)
return err;
if (nh->fib_nh_gw_family != AF_INET || if (nh->fib_nh_gw_family != AF_INET ||
nla_get_in_addr(nla) != nh->fib_nh_gw4) gw != nh->fib_nh_gw4)
return 1; return 1;
} else if (nlav) { } else if (nlav) {
struct fib_config cfg2; struct fib_config cfg2;
int err;
err = fib_gw_from_via(&cfg2, nlav, extack); err = fib_gw_from_via(&cfg2, nlav, extack);
if (err) if (err)
@@ -941,8 +972,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW); nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid) if (nla) {
return 1; if (nla_len(nla) < sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
return -EINVAL;
}
if (nla_get_u32(nla) != nh->nh_tclassid)
return 1;
}
#endif #endif
} }

View File

@@ -3009,7 +3009,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
{ {
seq_setwidth(seq, 127); seq_setwidth(seq, 127);
if (v == SEQ_START_TOKEN) if (v == SEQ_START_TOKEN)
seq_puts(seq, " sl local_address rem_address st tx_queue " seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout " "rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops"); "inode ref pointer drops");
else { else {

View File

@@ -804,6 +804,8 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct net *net = dev_net(dev); struct net *net = dev_net(dev);
struct vti6_net *ip6n = net_generic(net, vti6_net_id); struct vti6_net *ip6n = net_generic(net, vti6_net_id);
memset(&p1, 0, sizeof(p1));
switch (cmd) { switch (cmd) {
case SIOCGETTUNNEL: case SIOCGETTUNNEL:
if (dev == ip6n->fb_tnl_dev) { if (dev == ip6n->fb_tnl_dev) {

View File

@@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
struct raw6_sock *rp = raw6_sk(sk); struct raw6_sock *rp = raw6_sk(sk);
int val; int val;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val))) if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT; return -EFAULT;

View File

@@ -5071,6 +5071,19 @@ out:
return should_notify; return should_notify;
} }
static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
if (nla_len(nla) < sizeof(*gw)) {
NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
return -EINVAL;
}
*gw = nla_get_in6_addr(nla);
return 0;
}
static int ip6_route_multipath_add(struct fib6_config *cfg, static int ip6_route_multipath_add(struct fib6_config *cfg,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
@@ -5111,10 +5124,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY); nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) { if (nla) {
r_cfg.fc_gateway = nla_get_in6_addr(nla); err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
extack);
if (err)
goto cleanup;
r_cfg.fc_flags |= RTF_GATEWAY; r_cfg.fc_flags |= RTF_GATEWAY;
} }
r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
/* RTA_ENCAP_TYPE length checked in
* lwtunnel_valid_encap_type_attr
*/
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla) if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla); r_cfg.fc_encap_type = nla_get_u16(nla);
@@ -5281,7 +5302,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY); nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) { if (nla) {
nla_memcpy(&r_cfg.fc_gateway, nla, 16); err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
extack);
if (err) {
last_err = err;
goto next_rtnh;
}
r_cfg.fc_flags |= RTF_GATEWAY; r_cfg.fc_flags |= RTF_GATEWAY;
} }
} }
@@ -5289,6 +5316,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
if (err) if (err)
last_err = err; last_err = err;
next_rtnh:
rtnh = rtnh_next(rtnh, &remaining); rtnh = rtnh_next(rtnh, &remaining);
} }

View File

@@ -5194,7 +5194,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
*/ */
if (new_sta) { if (new_sta) {
u32 rates = 0, basic_rates = 0; u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit; bool have_higher_than_11mbit = false;
int min_rate = INT_MAX, min_rate_index = -1; int min_rate = INT_MAX, min_rate_index = -1;
const struct cfg80211_bss_ies *ies; const struct cfg80211_bss_ies *ies;
int shift = ieee80211_vif_get_shift(&sdata->vif); int shift = ieee80211_vif_get_shift(&sdata->vif);

View File

@@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(unsigned int)) if (optlen < sizeof(unsigned int))
return -EINVAL; return -EINVAL;
if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) if (copy_from_sockptr(&opt, optval, sizeof(unsigned long)))
return -EFAULT; return -EFAULT;
switch (optname) { switch (optname) {

View File

@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
err = pep_accept_conn(newsk, skb); err = pep_accept_conn(newsk, skb);
if (err) { if (err) {
__sock_put(sk);
sock_put(newsk); sock_put(newsk);
newsk = NULL; newsk = NULL;
goto drop; goto drop;

View File

@@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
if (err < 0) if (err < 0)
return err; return err;
if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES) max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
max_classes = QFQ_MAX_AGG_CLASSES; QFQ_MAX_AGG_CLASSES);
else
max_classes = qdisc_dev(sch)->tx_queue_len + 1;
/* max_cl_shift = floor(log_2(max_classes)) */ /* max_cl_shift = floor(log_2(max_classes)) */
max_cl_shift = __fls(max_classes); max_cl_shift = __fls(max_classes);
q->max_agg_classes = 1<<max_cl_shift; q->max_agg_classes = 1<<max_cl_shift;

View File

@@ -3,6 +3,9 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
extern void my_direct_func1(void);
extern void my_direct_func2(void);
void my_direct_func1(void) void my_direct_func1(void)
{ {
trace_printk("my direct func1\n"); trace_printk("my direct func1\n");

View File

@@ -4,6 +4,9 @@
#include <linux/mm.h> /* for handle_mm_fault() */ #include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h> #include <linux/ftrace.h>
extern void my_direct_func(struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
void my_direct_func(struct vm_area_struct *vma, void my_direct_func(struct vm_area_struct *vma,
unsigned long address, unsigned int flags) unsigned long address, unsigned int flags)
{ {

View File

@@ -4,6 +4,8 @@
#include <linux/sched.h> /* for wake_up_process() */ #include <linux/sched.h> /* for wake_up_process() */
#include <linux/ftrace.h> #include <linux/ftrace.h>
extern void my_direct_func(struct task_struct *p);
void my_direct_func(struct task_struct *p) void my_direct_func(struct task_struct *p)
{ {
trace_printk("waking up %s-%d\n", p->comm, p->pid); trace_printk("waking up %s-%d\n", p->comm, p->pid);

View File

@@ -497,7 +497,7 @@ static int test_process_vm_readv(void)
} }
if (vsyscall_map_r) { if (vsyscall_map_r) {
if (!memcmp(buf, (const void *)0xffffffffff600000, 4096)) { if (!memcmp(buf, remote.iov_base, sizeof(buf))) {
printf("[OK]\tIt worked and read correct data\n"); printf("[OK]\tIt worked and read correct data\n");
} else { } else {
printf("[FAIL]\tIt worked but returned incorrect data\n"); printf("[FAIL]\tIt worked but returned incorrect data\n");