Merge 5.10.55 into android12-5.10-lts

Changes in 5.10.55
	tools: Allow proper CC/CXX/... override with LLVM=1 in Makefile.include
	io_uring: fix link timeout refs
	KVM: x86: determine if an exception has an error code only when injecting it.
	af_unix: fix garbage collect vs MSG_PEEK
	workqueue: fix UAF in pwq_unbound_release_workfn()
	cgroup1: fix leaked context root causing sporadic NULL deref in LTP
	net/802/mrp: fix memleak in mrp_request_join()
	net/802/garp: fix memleak in garp_request_join()
	net: annotate data race around sk_ll_usec
	sctp: move 198 addresses from unusable to private scope
	rcu-tasks: Don't delete holdouts within trc_inspect_reader()
	rcu-tasks: Don't delete holdouts within trc_wait_for_one_reader()
	ipv6: allocate enough headroom in ip6_finish_output2()
	drm/ttm: add a check against null pointer dereference
	hfs: add missing clean-up in hfs_fill_super
	hfs: fix high memory mapping in hfs_bnode_read
	hfs: add lock nesting notation to hfs_find_init
	firmware: arm_scmi: Fix possible scmi_linux_errmap buffer overflow
	firmware: arm_scmi: Fix range check for the maximum number of pending messages
	cifs: fix the out of range assignment to bit fields in parse_server_interfaces
	iomap: remove the length variable in iomap_seek_data
	iomap: remove the length variable in iomap_seek_hole
	ARM: dts: versatile: Fix up interrupt controller node names
	ipv6: ip6_finish_output2: set sk into newly allocated nskb
	Linux 5.10.55

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I2d673bdde784b3689af73289305091dbd4ead042
This commit is contained in:
Greg Kroah-Hartman
2021-07-31 08:51:04 +02:00
27 changed files with 216 additions and 69 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 54 SUBLEVEL = 55
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -195,16 +195,15 @@
#size-cells = <1>; #size-cells = <1>;
ranges; ranges;
vic: intc@10140000 { vic: interrupt-controller@10140000 {
compatible = "arm,versatile-vic"; compatible = "arm,versatile-vic";
interrupt-controller; interrupt-controller;
#interrupt-cells = <1>; #interrupt-cells = <1>;
reg = <0x10140000 0x1000>; reg = <0x10140000 0x1000>;
clear-mask = <0xffffffff>;
valid-mask = <0xffffffff>; valid-mask = <0xffffffff>;
}; };
sic: intc@10003000 { sic: interrupt-controller@10003000 {
compatible = "arm,versatile-sic"; compatible = "arm,versatile-sic";
interrupt-controller; interrupt-controller;
#interrupt-cells = <1>; #interrupt-cells = <1>;

View File

@@ -7,7 +7,7 @@
amba { amba {
/* The Versatile PB is using more SIC IRQ lines than the AB */ /* The Versatile PB is using more SIC IRQ lines than the AB */
sic: intc@10003000 { sic: interrupt-controller@10003000 {
clear-mask = <0xffffffff>; clear-mask = <0xffffffff>;
/* /*
* Valid interrupt lines mask according to * Valid interrupt lines mask according to

View File

@@ -541,8 +541,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
queue: queue:
if (has_error && !is_protmode(vcpu))
has_error = false;
if (reinject) { if (reinject) {
/* /*
* On vmentry, vcpu->arch.exception.pending is only * On vmentry, vcpu->arch.exception.pending is only
@@ -8265,6 +8263,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
} }
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
vcpu->arch.exception.error_code = false;
kvm_x86_ops.queue_exception(vcpu);
}
static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
{ {
int r; int r;
@@ -8273,7 +8278,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
/* try to reinject previous events if any */ /* try to reinject previous events if any */
if (vcpu->arch.exception.injected) { if (vcpu->arch.exception.injected) {
kvm_x86_ops.queue_exception(vcpu); kvm_inject_exception(vcpu);
can_inject = false; can_inject = false;
} }
/* /*
@@ -8336,7 +8341,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
} }
} }
kvm_x86_ops.queue_exception(vcpu); kvm_inject_exception(vcpu);
can_inject = false; can_inject = false;
} }

View File

@@ -49,7 +49,6 @@ enum scmi_error_codes {
SCMI_ERR_GENERIC = -8, /* Generic Error */ SCMI_ERR_GENERIC = -8, /* Generic Error */
SCMI_ERR_HARDWARE = -9, /* Hardware Error */ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
SCMI_ERR_MAX
}; };
/* List of all SCMI devices active in system */ /* List of all SCMI devices active in system */
@@ -168,8 +167,10 @@ static const int scmi_linux_errmap[] = {
static inline int scmi_to_linux_errno(int errno) static inline int scmi_to_linux_errno(int errno)
{ {
if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) int err_idx = -errno;
return scmi_linux_errmap[-errno];
if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
return scmi_linux_errmap[err_idx];
return -EIO; return -EIO;
} }
@@ -1029,8 +1030,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
const struct scmi_desc *desc = sinfo->desc; const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */ /* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
dev_err(dev, "Maximum message of %d exceeds supported %ld\n", dev_err(dev,
"Invalid maximum messages %d, not in range [1 - %lu]\n",
desc->max_msg, MSG_TOKEN_MAX); desc->max_msg, MSG_TOKEN_MAX);
return -EINVAL; return -EINVAL;
} }

View File

@@ -147,6 +147,9 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev,
struct drm_mm *mm = &rman->mm; struct drm_mm *mm = &rman->mm;
int ret; int ret;
if (!man)
return 0;
ttm_resource_manager_set_used(man, false); ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_force_list_clean(bdev, man); ret = ttm_resource_manager_force_list_clean(bdev, man);

View File

@@ -497,8 +497,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
p = buf; p = buf;
while (bytes_left >= sizeof(*p)) { while (bytes_left >= sizeof(*p)) {
info->speed = le64_to_cpu(p->LinkSpeed); info->speed = le64_to_cpu(p->LinkSpeed);
info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE); info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE); info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count); cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);

View File

@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2; fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0)); tree->cnid, __builtin_return_address(0));
mutex_lock(&tree->tree_lock); switch (tree->cnid) {
case HFS_CAT_CNID:
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
break;
case HFS_EXT_CNID:
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
break;
case HFS_ATTR_CNID:
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
break;
default:
return -EINVAL;
}
return 0; return 0;
} }

View File

@@ -15,16 +15,31 @@
#include "btree.h" #include "btree.h"
void hfs_bnode_read(struct hfs_bnode *node, void *buf, void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
int off, int len)
{ {
struct page *page; struct page *page;
int pagenum;
int bytes_read;
int bytes_to_read;
void *vaddr;
off += node->page_offset; off += node->page_offset;
page = node->page[0]; pagenum = off >> PAGE_SHIFT;
off &= ~PAGE_MASK; /* compute page offset for the first page */
memcpy(buf, kmap(page) + off, len); for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
kunmap(page); if (pagenum >= node->tree->pages_per_bnode)
break;
page = node->page[pagenum];
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
vaddr = kmap_atomic(page);
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
kunmap_atomic(vaddr);
pagenum++;
off = 0; /* page offset only applies to the first page */
}
} }
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)

View File

@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
#define NODE_HASH_SIZE 256 #define NODE_HASH_SIZE 256
/* B-tree mutex nested subclasses */
enum hfs_btree_mutex_classes {
CATALOG_BTREE_MUTEX,
EXTENTS_BTREE_MUTEX,
ATTR_BTREE_MUTEX,
};
/* A HFS BTree held in memory */ /* A HFS BTree held in memory */
struct hfs_btree { struct hfs_btree {
struct super_block *sb; struct super_block *sb;

View File

@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
if (!res) { if (!res) {
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
res = -EIO; res = -EIO;
goto bail; goto bail_hfs_find;
} }
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
} }
if (res) { if (res)
hfs_find_exit(&fd); goto bail_hfs_find;
goto bail_no_root;
}
res = -EINVAL; res = -EINVAL;
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
hfs_find_exit(&fd); hfs_find_exit(&fd);
@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
/* everything's okay */ /* everything's okay */
return 0; return 0;
bail_hfs_find:
hfs_find_exit(&fd);
bail_no_root: bail_no_root:
pr_err("get root inode failed\n"); pr_err("get root inode failed\n");
bail: bail:

View File

@@ -64,7 +64,6 @@ extern void __init chrdev_init(void);
*/ */
extern const struct fs_context_operations legacy_fs_context_ops; extern const struct fs_context_operations legacy_fs_context_ops;
extern int parse_monolithic_mount_data(struct fs_context *, void *); extern int parse_monolithic_mount_data(struct fs_context *, void *);
extern void fc_drop_locked(struct fs_context *);
extern void vfs_clean_context(struct fs_context *fc); extern void vfs_clean_context(struct fs_context *fc);
extern int finish_clean_context(struct fs_context *fc); extern int finish_clean_context(struct fs_context *fc);

View File

@@ -6266,7 +6266,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
if (prev) { if (prev) {
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req_deferred(prev, 1); io_put_req_deferred(prev, 1);
io_put_req_deferred(req, 1);
} else { } else {
io_cqring_add_event(req, -ETIME, 0); io_cqring_add_event(req, -ETIME, 0);
io_put_req_deferred(req, 1); io_put_req_deferred(req, 1);

View File

@@ -140,23 +140,20 @@ loff_t
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
{ {
loff_t size = i_size_read(inode); loff_t size = i_size_read(inode);
loff_t length = size - offset;
loff_t ret; loff_t ret;
/* Nothing to be found before or beyond the end of the file. */ /* Nothing to be found before or beyond the end of the file. */
if (offset < 0 || offset >= size) if (offset < 0 || offset >= size)
return -ENXIO; return -ENXIO;
while (length > 0) { while (offset < size) {
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
&offset, iomap_seek_hole_actor); ops, &offset, iomap_seek_hole_actor);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) if (ret == 0)
break; break;
offset += ret; offset += ret;
length -= ret;
} }
return offset; return offset;
@@ -186,27 +183,23 @@ loff_t
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
{ {
loff_t size = i_size_read(inode); loff_t size = i_size_read(inode);
loff_t length = size - offset;
loff_t ret; loff_t ret;
/* Nothing to be found before or beyond the end of the file. */ /* Nothing to be found before or beyond the end of the file. */
if (offset < 0 || offset >= size) if (offset < 0 || offset >= size)
return -ENXIO; return -ENXIO;
while (length > 0) { while (offset < size) {
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
&offset, iomap_seek_data_actor); ops, &offset, iomap_seek_data_actor);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) if (ret == 0)
break; return offset;
offset += ret; offset += ret;
length -= ret;
} }
if (length <= 0) /* We've reached the end of the file without finding data */
return -ENXIO; return -ENXIO;
return offset;
} }
EXPORT_SYMBOL_GPL(iomap_seek_data); EXPORT_SYMBOL_GPL(iomap_seek_data);

View File

@@ -139,6 +139,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc); extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc);
extern void fc_drop_locked(struct fs_context *fc);
/* /*
* sget() wrappers to be called from the ->get_tree() op. * sget() wrappers to be called from the ->get_tree() op.

View File

@@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
static inline bool sk_can_busy_loop(const struct sock *sk) static inline bool sk_can_busy_loop(const struct sock *sk)
{ {
return sk->sk_ll_usec && !signal_pending(current); return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
} }
bool sk_busy_loop_end(void *p, unsigned long start_time); bool sk_busy_loop_end(void *p, unsigned long start_time);

View File

@@ -340,8 +340,7 @@ enum {
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
* SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
* 192.88.99.0/24.
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
* addresses. * addresses.
*/ */
@@ -349,7 +348,6 @@ enum {
((htonl(INADDR_BROADCAST) == a) || \ ((htonl(INADDR_BROADCAST) == a) || \
ipv4_is_multicast(a) || \ ipv4_is_multicast(a) || \
ipv4_is_zeronet(a) || \ ipv4_is_zeronet(a) || \
ipv4_is_test_198(a) || \
ipv4_is_anycast_6to4(a)) ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */ /* Flags used for the bind address copy functions. */

View File

@@ -1228,9 +1228,7 @@ int cgroup1_get_tree(struct fs_context *fc)
ret = cgroup_do_get_tree(fc); ret = cgroup_do_get_tree(fc);
if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
struct super_block *sb = fc->root->d_sb; fc_drop_locked(fc);
dput(fc->root);
deactivate_locked_super(sb);
ret = 1; ret = 1;
} }

View File

@@ -879,10 +879,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
in_qs = likely(!t->trc_reader_nesting); in_qs = likely(!t->trc_reader_nesting);
} }
// Mark as checked. Because this is called from the grace-period // Mark as checked so that the grace-period kthread will
// kthread, also remove the task from the holdout list. // remove it from the holdout list.
t->trc_reader_checked = true; t->trc_reader_checked = true;
trc_del_holdout(t);
if (in_qs) if (in_qs)
return true; // Already in quiescent state, done!!! return true; // Already in quiescent state, done!!!
@@ -909,7 +908,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// The current task had better be in a quiescent state. // The current task had better be in a quiescent state.
if (t == current) { if (t == current) {
t->trc_reader_checked = true; t->trc_reader_checked = true;
trc_del_holdout(t);
WARN_ON_ONCE(t->trc_reader_nesting); WARN_ON_ONCE(t->trc_reader_nesting);
return; return;
} }

View File

@@ -3689,15 +3689,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
unbound_release_work); unbound_release_work);
struct workqueue_struct *wq = pwq->wq; struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool; struct worker_pool *pool = pwq->pool;
bool is_last; bool is_last = false;
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) /*
return; * when @pwq is not linked, it doesn't hold any reference to the
* @wq, and @wq is invalid to access.
*/
if (!list_empty(&pwq->pwqs_node)) {
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
return;
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
list_del_rcu(&pwq->pwqs_node); list_del_rcu(&pwq->pwqs_node);
is_last = list_empty(&wq->pwqs); is_last = list_empty(&wq->pwqs);
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
}
mutex_lock(&wq_pool_mutex); mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool); put_unbound_pool(pool);

View File

@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
kfree(attr); kfree(attr);
} }
static void garp_attr_destroy_all(struct garp_applicant *app)
{
struct rb_node *node, *next;
struct garp_attr *attr;
for (node = rb_first(&app->gid);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct garp_attr, node);
garp_attr_destroy(app, attr);
}
}
static int garp_pdu_init(struct garp_applicant *app) static int garp_pdu_init(struct garp_applicant *app)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
spin_lock_bh(&app->lock); spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
garp_attr_destroy_all(app);
garp_pdu_queue(app); garp_pdu_queue(app);
spin_unlock_bh(&app->lock); spin_unlock_bh(&app->lock);

View File

@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
kfree(attr); kfree(attr);
} }
static void mrp_attr_destroy_all(struct mrp_applicant *app)
{
struct rb_node *node, *next;
struct mrp_attr *attr;
for (node = rb_first(&app->mad);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct mrp_attr, node);
mrp_attr_destroy(app, attr);
}
}
static int mrp_pdu_init(struct mrp_applicant *app) static int mrp_pdu_init(struct mrp_applicant *app)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
spin_lock_bh(&app->lock); spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX); mrp_mad_event(app, MRP_EVENT_TX);
mrp_attr_destroy_all(app);
mrp_pdu_queue(app); mrp_pdu_queue(app);
spin_unlock_bh(&app->lock); spin_unlock_bh(&app->lock);

View File

@@ -1166,7 +1166,7 @@ set_sndbuf:
if (val < 0) if (val < 0)
ret = -EINVAL; ret = -EINVAL;
else else
sk->sk_ll_usec = val; WRITE_ONCE(sk->sk_ll_usec, val);
} }
break; break;
#endif #endif

View File

@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
{ {
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev; struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
int delta = hh_len - skb_headroom(skb);
const struct in6_addr *nexthop; const struct in6_addr *nexthop;
struct neighbour *neigh; struct neighbour *neigh;
int ret; int ret;
/* Be paranoid, rather than too clever. */
if (unlikely(delta > 0) && dev->header_ops) {
/* pskb_expand_head() might crash, if skb is shared */
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
if (skb->sk)
skb_set_owner_w(nskb, skb->sk);
consume_skb(skb);
} else {
kfree_skb(skb);
}
skb = nskb;
}
if (skb &&
pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
kfree_skb(skb);
skb = NULL;
}
if (!skb) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
return -ENOMEM;
}
}
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));

View File

@@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
retval = SCTP_SCOPE_LINK; retval = SCTP_SCOPE_LINK;
} else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
ipv4_is_private_172(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
retval = SCTP_SCOPE_PRIVATE; retval = SCTP_SCOPE_PRIVATE;
} else { } else {
retval = SCTP_SCOPE_GLOBAL; retval = SCTP_SCOPE_GLOBAL;

View File

@@ -1521,6 +1521,53 @@ out:
return err; return err;
} }
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
/*
* Garbage collection of unix sockets starts by selecting a set of
* candidate sockets which have reference only from being in flight
* (total_refs == inflight_refs). This condition is checked once during
* the candidate collection phase, and candidates are marked as such, so
* that non-candidates can later be ignored. While inflight_refs is
* protected by unix_gc_lock, total_refs (file count) is not, hence this
* is an instantaneous decision.
*
* Once a candidate, however, the socket must not be reinstalled into a
* file descriptor while the garbage collection is in progress.
*
* If the above conditions are met, then the directed graph of
* candidates (*) does not change while unix_gc_lock is held.
*
* Any operations that changes the file count through file descriptors
* (dup, close, sendmsg) does not change the graph since candidates are
* not installed in fds.
*
* Dequeing a candidate via recvmsg would install it into an fd, but
* that takes unix_gc_lock to decrement the inflight count, so it's
* serialized with garbage collection.
*
* MSG_PEEK is special in that it does not change the inflight count,
* yet does install the socket into an fd. The following lock/unlock
* pair is to ensure serialization with garbage collection. It must be
* done between incrementing the file count and installing the file into
* an fd.
*
* If garbage collection starts after the barrier provided by the
* lock/unlock, then it will see the elevated refcount and not mark this
* as a candidate. If a garbage collection is already in progress
* before the file count was incremented, then the lock/unlock pair will
* ensure that garbage collection is finished before progressing to
* installing the fd.
*
* (*) A -> B where B is on the queue of A or B is on the queue of C
* which is on the queue of listening socket A.
*/
spin_lock(&unix_gc_lock);
spin_unlock(&unix_gc_lock);
}
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{ {
int err = 0; int err = 0;
@@ -2170,7 +2217,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
sk_peek_offset_fwd(sk, size); sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp) if (UNIXCB(skb).fp)
scm.fp = scm_fp_dup(UNIXCB(skb).fp); unix_peek_fds(&scm, skb);
} }
err = (flags & MSG_TRUNC) ? skb->len - skip : size; err = (flags & MSG_TRUNC) ? skb->len - skip : size;
@@ -2413,7 +2460,7 @@ unlock:
/* It is questionable, see note in unix_dgram_recvmsg. /* It is questionable, see note in unix_dgram_recvmsg.
*/ */
if (UNIXCB(skb).fp) if (UNIXCB(skb).fp)
scm.fp = scm_fp_dup(UNIXCB(skb).fp); unix_peek_fds(&scm, skb);
sk_peek_offset_fwd(sk, chunk); sk_peek_offset_fwd(sk, chunk);

View File

@@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
EXTRA_WARNINGS += -Wwrite-strings EXTRA_WARNINGS += -Wwrite-strings
EXTRA_WARNINGS += -Wformat EXTRA_WARNINGS += -Wformat
CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
# Makefiles suck: This macro sets a default value of $(2) for the # Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by # variable named by $(1), unless the variable has been set by
# environment or command line. This is necessary for CC and AR # environment or command line. This is necessary for CC and AR
@@ -52,12 +50,22 @@ define allow-override
$(eval $(1) = $(2))) $(eval $(1) = $(2)))
endef endef
ifneq ($(LLVM),)
$(call allow-override,CC,clang)
$(call allow-override,AR,llvm-ar)
$(call allow-override,LD,ld.lld)
$(call allow-override,CXX,clang++)
$(call allow-override,STRIP,llvm-strip)
else
# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix. # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
$(call allow-override,CC,$(CROSS_COMPILE)gcc) $(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar) $(call allow-override,AR,$(CROSS_COMPILE)ar)
$(call allow-override,LD,$(CROSS_COMPILE)ld) $(call allow-override,LD,$(CROSS_COMPILE)ld)
$(call allow-override,CXX,$(CROSS_COMPILE)g++) $(call allow-override,CXX,$(CROSS_COMPILE)g++)
$(call allow-override,STRIP,$(CROSS_COMPILE)strip) $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
endif
CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
ifneq ($(LLVM),) ifneq ($(LLVM),)
HOSTAR ?= llvm-ar HOSTAR ?= llvm-ar