Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/netconsole.c net/bridge/br_private.h Three mostly trivial conflicts. The net/bridge/br_private.h conflict was a function signature (argument addition) change overlapping with the extern removals from Joe Perches. In drivers/net/netconsole.c we had one change adjusting a printk message whilst another changed "printk(KERN_INFO" into "pr_info(". Lastly, the emulex change was a new inline function addition overlapping with Joe Perches's extern removals. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
/* disabled for now */
|
||||
if (attr->mmap2)
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->__reserved_1)
|
||||
return -EINVAL;
|
||||
|
||||
|
@@ -87,10 +87,31 @@ again:
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Publish the known good head. Rely on the full barrier implied
|
||||
* by atomic_dec_and_test() order the rb->head read and this
|
||||
* write.
|
||||
* Since the mmap() consumer (userspace) can run on a different CPU:
|
||||
*
|
||||
* kernel user
|
||||
*
|
||||
* READ ->data_tail READ ->data_head
|
||||
* smp_mb() (A) smp_rmb() (C)
|
||||
* WRITE $data READ $data
|
||||
* smp_wmb() (B) smp_mb() (D)
|
||||
* STORE ->data_head WRITE ->data_tail
|
||||
*
|
||||
* Where A pairs with D, and B pairs with C.
|
||||
*
|
||||
* I don't think A needs to be a full barrier because we won't in fact
|
||||
* write data until we see the store from userspace. So we simply don't
|
||||
* issue the data WRITE until we observe it. Be conservative for now.
|
||||
*
|
||||
* OTOH, D needs to be a full barrier since it separates the data READ
|
||||
* from the tail WRITE.
|
||||
*
|
||||
* For B a WMB is sufficient since it separates two WRITEs, and for C
|
||||
* an RMB is sufficient since it separates two READs.
|
||||
*
|
||||
* See perf_output_begin().
|
||||
*/
|
||||
smp_wmb();
|
||||
rb->user_page->data_head = head;
|
||||
|
||||
/*
|
||||
@@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
|
||||
* Userspace could choose to issue a mb() before updating the
|
||||
* tail pointer. So that all reads will be completed before the
|
||||
* write is issued.
|
||||
*
|
||||
* See perf_output_put_handle().
|
||||
*/
|
||||
tail = ACCESS_ONCE(rb->user_page->data_tail);
|
||||
smp_rmb();
|
||||
smp_mb();
|
||||
offset = head = local_read(&rb->head);
|
||||
head += size;
|
||||
if (unlikely(!perf_output_space(rb, tail, offset, head)))
|
||||
|
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
||||
static __always_inline int __sched
|
||||
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
struct lockdep_map *nest_lock, unsigned long ip,
|
||||
struct ww_acquire_ctx *ww_ctx)
|
||||
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
struct mutex_waiter waiter;
|
||||
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
struct task_struct *owner;
|
||||
struct mspin_node node;
|
||||
|
||||
if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
|
||||
if (use_ww_ctx && ww_ctx->acquired > 0) {
|
||||
struct ww_mutex *ww;
|
||||
|
||||
ww = container_of(lock, struct ww_mutex, base);
|
||||
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
if ((atomic_read(&lock->count) == 1) &&
|
||||
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
if (!__builtin_constant_p(ww_ctx == NULL)) {
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww;
|
||||
ww = container_of(lock, struct ww_mutex, base);
|
||||
|
||||
@@ -551,7 +551,7 @@ slowpath:
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
|
||||
if (use_ww_ctx && ww_ctx->acquired > 0) {
|
||||
ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
@@ -575,7 +575,7 @@ skip_wait:
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
mutex_set_owner(lock);
|
||||
|
||||
if (!__builtin_constant_p(ww_ctx == NULL)) {
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
struct mutex_waiter *cur;
|
||||
|
||||
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
might_sleep();
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
||||
subclass, NULL, _RET_IP_, NULL);
|
||||
subclass, NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
||||
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
||||
{
|
||||
might_sleep();
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
||||
0, nest, _RET_IP_, NULL);
|
||||
0, nest, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
might_sleep();
|
||||
return __mutex_lock_common(lock, TASK_KILLABLE,
|
||||
subclass, NULL, _RET_IP_, NULL);
|
||||
subclass, NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
||||
|
||||
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
might_sleep();
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
||||
subclass, NULL, _RET_IP_, NULL);
|
||||
subclass, NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
|
||||
might_sleep();
|
||||
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
|
||||
0, &ctx->dep_map, _RET_IP_, ctx);
|
||||
0, &ctx->dep_map, _RET_IP_, ctx, 1);
|
||||
if (!ret && ctx->acquired > 1)
|
||||
return ww_mutex_deadlock_injection(lock, ctx);
|
||||
|
||||
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
|
||||
might_sleep();
|
||||
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
|
||||
0, &ctx->dep_map, _RET_IP_, ctx);
|
||||
0, &ctx->dep_map, _RET_IP_, ctx, 1);
|
||||
|
||||
if (!ret && ctx->acquired > 1)
|
||||
return ww_mutex_deadlock_injection(lock, ctx);
|
||||
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
|
||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
|
||||
NULL, _RET_IP_, NULL);
|
||||
NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
__mutex_lock_killable_slowpath(struct mutex *lock)
|
||||
{
|
||||
return __mutex_lock_common(lock, TASK_KILLABLE, 0,
|
||||
NULL, _RET_IP_, NULL);
|
||||
NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
__mutex_lock_interruptible_slowpath(struct mutex *lock)
|
||||
{
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
|
||||
NULL, _RET_IP_, NULL);
|
||||
NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
|
||||
NULL, _RET_IP_, ctx);
|
||||
NULL, _RET_IP_, ctx, 1);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
|
||||
NULL, _RET_IP_, ctx);
|
||||
NULL, _RET_IP_, ctx, 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -846,7 +846,7 @@ static int software_resume(void)
|
||||
goto Finish;
|
||||
}
|
||||
|
||||
late_initcall(software_resume);
|
||||
late_initcall_sync(software_resume);
|
||||
|
||||
|
||||
static const char * const hibernation_modes[] = {
|
||||
|
@@ -33,6 +33,54 @@ struct ce_unbind {
|
||||
int res;
|
||||
};
|
||||
|
||||
static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
|
||||
bool ismax)
|
||||
{
|
||||
u64 clc = (u64) latch << evt->shift;
|
||||
u64 rnd;
|
||||
|
||||
if (unlikely(!evt->mult)) {
|
||||
evt->mult = 1;
|
||||
WARN_ON(1);
|
||||
}
|
||||
rnd = (u64) evt->mult - 1;
|
||||
|
||||
/*
|
||||
* Upper bound sanity check. If the backwards conversion is
|
||||
* not equal latch, we know that the above shift overflowed.
|
||||
*/
|
||||
if ((clc >> evt->shift) != (u64)latch)
|
||||
clc = ~0ULL;
|
||||
|
||||
/*
|
||||
* Scaled math oddities:
|
||||
*
|
||||
* For mult <= (1 << shift) we can safely add mult - 1 to
|
||||
* prevent integer rounding loss. So the backwards conversion
|
||||
* from nsec to device ticks will be correct.
|
||||
*
|
||||
* For mult > (1 << shift), i.e. device frequency is > 1GHz we
|
||||
* need to be careful. Adding mult - 1 will result in a value
|
||||
* which when converted back to device ticks can be larger
|
||||
* than latch by up to (mult - 1) >> shift. For the min_delta
|
||||
* calculation we still want to apply this in order to stay
|
||||
* above the minimum device ticks limit. For the upper limit
|
||||
* we would end up with a latch value larger than the upper
|
||||
* limit of the device, so we omit the add to stay below the
|
||||
* device upper boundary.
|
||||
*
|
||||
* Also omit the add if it would overflow the u64 boundary.
|
||||
*/
|
||||
if ((~0ULL - clc > rnd) &&
|
||||
(!ismax || evt->mult <= (1U << evt->shift)))
|
||||
clc += rnd;
|
||||
|
||||
do_div(clc, evt->mult);
|
||||
|
||||
/* Deltas less than 1usec are pointless noise */
|
||||
return clc > 1000 ? clc : 1000;
|
||||
}
|
||||
|
||||
/**
|
||||
* clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
|
||||
* @latch: value to convert
|
||||
@@ -42,20 +90,7 @@ struct ce_unbind {
|
||||
*/
|
||||
u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
|
||||
{
|
||||
u64 clc = (u64) latch << evt->shift;
|
||||
|
||||
if (unlikely(!evt->mult)) {
|
||||
evt->mult = 1;
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
do_div(clc, evt->mult);
|
||||
if (clc < 1000)
|
||||
clc = 1000;
|
||||
if (clc > KTIME_MAX)
|
||||
clc = KTIME_MAX;
|
||||
|
||||
return clc;
|
||||
return cev_delta2ns(latch, evt, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
|
||||
|
||||
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
|
||||
sec = 600;
|
||||
|
||||
clockevents_calc_mult_shift(dev, freq, sec);
|
||||
dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
|
||||
dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
|
||||
dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
|
||||
dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user