Merge branch 'akpm' (patches from Andrew)

Merge third patchbomb from Andrew Morton:

 - various misc things

 - a couple of lib/ optimisations

 - provide DIV_ROUND_CLOSEST_ULL()

 - checkpatch updates

 - rtc tree

 - befs, nilfs2, hfs, hfsplus, fatfs, adfs, affs, bfs

 - ptrace fixes

 - fork() fixes

 - seccomp cleanups

 - more mmap_sem hold time reductions from Davidlohr

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (138 commits)
  proc: show locks in /proc/pid/fdinfo/X
  docs: add missing and new /proc/PID/status file entries, fix typos
  drivers/rtc/rtc-at91rm9200.c: make IO endian agnostic
  Documentation/spi/spidev_test.c: fix warning
  drivers/rtc/rtc-s5m.c: allow usage on device type different than main MFD type
  .gitignore: ignore *.tar
  MAINTAINERS: add Mediatek SoC mailing list
  tomoyo: reduce mmap_sem hold for mm->exe_file
  powerpc/oprofile: reduce mmap_sem hold for exe_file
  oprofile: reduce mmap_sem hold for mm->exe_file
  mips: ip32: add platform data hooks to use DS1685 driver
  lib/Kconfig: fix up HAVE_ARCH_BITREVERSE help text
  x86: switch to using asm-generic for seccomp.h
  sparc: switch to using asm-generic for seccomp.h
  powerpc: switch to using asm-generic for seccomp.h
  parisc: switch to using asm-generic for seccomp.h
  mips: switch to using asm-generic for seccomp.h
  microblaze: use asm-generic for seccomp.h
  arm: use asm-generic for seccomp.h
  seccomp: allow COMPAT sigreturn overrides
  ...
This commit is contained in:
Linus Torvalds
2015-04-17 09:04:38 -04:00
158 changed files with 2194 additions and 1555 deletions

View File

@@ -18,9 +18,8 @@ config HAVE_ARCH_BITREVERSE
default n
depends on BITREVERSE
help
This option provides an config for the architecture which have instruction
can do bitreverse operation, we use the hardware instruction if the architecture
have this capability.
This option enables the use of hardware bit-reversal instructions on
architectures which support such operations.
config RATIONAL
bool

View File

@@ -25,7 +25,7 @@ obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o

View File

@@ -42,36 +42,6 @@
* for the best explanations of this ordering.
*/
int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_empty);
int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{

View File

@@ -37,10 +37,11 @@ EXPORT_SYMBOL(__next_cpu_nr);
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
if (cpumask_test_cpu(n, src2p))
break;
return n;
struct cpumask tmp;
if (cpumask_and(&tmp, src1p, src2p))
return cpumask_next(n, &tmp);
return nr_cpu_ids;
}
EXPORT_SYMBOL(cpumask_next_and);

View File

@@ -361,7 +361,7 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
unsigned int range = 0;
while (range <= max_range) {
entry = __hash_bucket_find(*bucket, &index, containing_match);
entry = __hash_bucket_find(*bucket, ref, containing_match);
if (entry)
return entry;

193
lib/find_bit.c Normal file
View File

@@ -0,0 +1,193 @@
/* bit search implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Copyright (C) 2008 IBM Corporation
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
* (Inspired by David Howell's find_next_bit implementation)
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
#if !defined(find_next_bit) || !defined(find_next_zero_bit)
/*
* This is a common helper function for find_next_bit and
* find_next_zero_bit. The difference is the "invert" argument, which
* is XORed with each fetched word before searching it for one bits.
*/
static unsigned long _find_next_bit(const unsigned long *addr,
unsigned long nbits, unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (!nbits || start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
return min(start + __ffs(tmp), nbits);
}
#endif
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, size, offset, 0UL);
}
EXPORT_SYMBOL(find_next_bit);
#endif
#ifndef find_next_zero_bit
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, size, offset, ~0UL);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx])
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
}
return size;
}
EXPORT_SYMBOL(find_first_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx] != ~0UL)
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
}
return size;
}
EXPORT_SYMBOL(find_first_zero_bit);
#endif
#ifndef find_last_bit
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
unsigned long idx = (size-1) / BITS_PER_LONG;
do {
val &= addr[idx];
if (val)
return idx * BITS_PER_LONG + __fls(val);
val = ~0ul;
} while (idx--);
}
return size;
}
EXPORT_SYMBOL(find_last_bit);
#endif
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
static unsigned long _find_next_bit_le(const unsigned long *addr,
unsigned long nbits, unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (!nbits || start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
/* Handle 1st word. */
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
return min(start + __ffs(ext2_swab(tmp)), nbits);
}
#endif
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
return _find_next_bit_le(addr, size, offset, ~0UL);
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
return _find_next_bit_le(addr, size, offset, 0UL);
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */

View File

@@ -4,6 +4,9 @@
* Written by Rusty Russell <rusty@rustcorp.com.au>
* (Inspired by David Howell's find_next_bit implementation)
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -11,37 +14,26 @@
*/
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/kernel.h>
#ifndef find_last_bit
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
unsigned long words;
unsigned long tmp;
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
unsigned long idx = (size-1) / BITS_PER_LONG;
/* Start at final word. */
words = size / BITS_PER_LONG;
do {
val &= addr[idx];
if (val)
return idx * BITS_PER_LONG + __fls(val);
/* Partial final word? */
if (size & (BITS_PER_LONG-1)) {
tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
- (size & (BITS_PER_LONG-1)))));
if (tmp)
goto found;
val = ~0ul;
} while (idx--);
}
while (words) {
tmp = addr[--words];
if (tmp) {
found:
return words * BITS_PER_LONG + __fls(tmp);
}
}
/* Not found */
return size;
}
EXPORT_SYMBOL(find_last_bit);

View File

@@ -1,285 +0,0 @@
/* find_next_bit.c: fallback find next bit implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/export.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_next_bit);
#endif
#ifndef find_next_zero_bit
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (BITS_PER_LONG - offset);
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found_middle:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_first_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) | (~0UL << size);
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_first_zero_bit);
#endif
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swabp(const unsigned long * x)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64p((u64 *) x);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32p((u32 *) x);
#else
#error BITS_PER_LONG not defined
#endif
}
/* include/linux/byteorder doesn't support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp |= (~0UL >> (BITS_PER_LONG - offset));
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
if (~(tmp = *(p++)))
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. Skip ffz */
found_middle:
return result + ffz(tmp);
found_middle_swap:
return result + ffz(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
tmp = *(p++);
if (tmp)
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
found_middle_swap:
return result + __ffs(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */

View File

@@ -33,6 +33,7 @@
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/sections.h> /* for dereference_function_descriptor() */
#include <asm/byteorder.h> /* cpu_to_le16 */
#include <linux/string_helpers.h>
#include "kstrtox.h"
@@ -122,142 +123,145 @@ int skip_atoi(const char **s)
return i;
}
/* Decimal conversion is by far the most typical, and is used
* for /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed
* using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
* (with permission from the author, Douglas W. Jones).
/*
* Decimal conversion is by far the most typical, and is used for
* /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed by emitting
* two characters at a time, using a 200 byte lookup table. This
* roughly halves the number of multiplications compared to computing
* the digits one at a time. Implementation strongly inspired by the
* previous version, which in turn used ideas described at
* <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
* from the author, Douglas W. Jones).
*
* It turns out there is precisely one 26 bit fixed-point
* approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
* holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
* range happens to be somewhat larger (x <= 1073741898), but that's
* irrelevant for our purpose.
*
* For dividing a number in the range [10^4, 10^6-1] by 100, we still
* need a 32x32->64 bit multiply, so we simply use the same constant.
*
* For dividing a number in the range [100, 10^4-1] by 100, there are
* several options. The simplest is (x * 0x147b) >> 19, which is valid
* for all x <= 43698.
*/
#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
/* Formats correctly any integer in [0, 999999999] */
static noinline_for_stack
char *put_dec_full9(char *buf, unsigned q)
{
unsigned r;
static const u16 decpair[100] = {
#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
_( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
_(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
_(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
_(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
_(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
_(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
_(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
_(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
_(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
_(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
#undef _
};
/*
* Possible ways to approx. divide by 10
* (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
* (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul)
* (x * 0x6667) >> 18 x < 43699
* (x * 0x3334) >> 17 x < 16389
* (x * 0x199a) >> 16 x < 16389
* (x * 0x0ccd) >> 15 x < 16389
* (x * 0x0667) >> 14 x < 2739
* (x * 0x0334) >> 13 x < 1029
* (x * 0x019a) >> 12 x < 1029
* (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386)
* (x * 0x0067) >> 10 x < 179
* (x * 0x0034) >> 9 x < 69 same
* (x * 0x001a) >> 8 x < 69 same
* (x * 0x000d) >> 7 x < 69 same, shortest code (on i386)
* (x * 0x0007) >> 6 x < 19
* See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
*/
r = (q * (uint64_t)0x1999999a) >> 32;
*buf++ = (q - 10 * r) + '0'; /* 1 */
q = (r * (uint64_t)0x1999999a) >> 32;
*buf++ = (r - 10 * q) + '0'; /* 2 */
r = (q * (uint64_t)0x1999999a) >> 32;
*buf++ = (q - 10 * r) + '0'; /* 3 */
q = (r * (uint64_t)0x1999999a) >> 32;
*buf++ = (r - 10 * q) + '0'; /* 4 */
r = (q * (uint64_t)0x1999999a) >> 32;
*buf++ = (q - 10 * r) + '0'; /* 5 */
/* Now value is under 10000, can avoid 64-bit multiply */
q = (r * 0x199a) >> 16;
*buf++ = (r - 10 * q) + '0'; /* 6 */
r = (q * 0xcd) >> 11;
*buf++ = (q - 10 * r) + '0'; /* 7 */
q = (r * 0xcd) >> 11;
*buf++ = (r - 10 * q) + '0'; /* 8 */
*buf++ = q + '0'; /* 9 */
return buf;
}
#endif
/* Similar to above but do not pad with zeros.
* Code can be easily arranged to print 9 digits too, but our callers
* always call put_dec_full9() instead when the number has 9 decimal digits.
*/
/*
* This will print a single '0' even if r == 0, since we would
* immediately jump to out_r where two 0s would be written but only
* one of them accounted for in buf. This is needed by ip4_string
* below. All other callers pass a non-zero value of r.
*/
static noinline_for_stack
char *put_dec_trunc8(char *buf, unsigned r)
{
unsigned q;
/* Copy of previous function's body with added early returns */
while (r >= 10000) {
q = r + '0';
r = (r * (uint64_t)0x1999999a) >> 32;
*buf++ = q - 10*r;
}
/* 1 <= r < 10^8 */
if (r < 100)
goto out_r;
q = (r * 0x199a) >> 16; /* r <= 9999 */
*buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
r = (q * 0xcd) >> 11; /* q <= 999 */
*buf++ = (q - 10 * r) + '0';
if (r == 0)
return buf;
q = (r * 0xcd) >> 11; /* r <= 99 */
*buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
*buf++ = q + '0'; /* q <= 9 */
/* 100 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 1 <= q < 10^6 */
if (q < 100)
goto out_q;
/* 100 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
/* 1 <= r < 10^4 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
out_q:
/* 1 <= q < 100 */
r = q;
out_r:
/* 1 <= r < 100 */
*((u16 *)buf) = decpair[r];
buf += r < 10 ? 1 : 2;
return buf;
}
/* There are two algorithms to print larger numbers.
* One is generic: divide by 1000000000 and repeatedly print
* groups of (up to) 9 digits. It's conceptually simple,
* but requires a (unsigned long long) / 1000000000 division.
*
* Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
* manipulates them cleverly and generates groups of 4 decimal digits.
* It so happens that it does NOT require long long division.
*
* If long is > 32 bits, division of 64-bit values is relatively easy,
* and we will use the first algorithm.
* If long long is > 64 bits (strange architecture with VERY large long long),
* second algorithm can't be used, and we again use the first one.
*
* Else (if long is 32 bits and long long is 64 bits) we use second one.
*/
#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
static noinline_for_stack
char *put_dec_full8(char *buf, unsigned r)
{
unsigned q;
#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
/* 0 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* First algorithm: generic */
/* 0 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
static
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
buf += 2;
return buf;
}
static noinline_for_stack
char *put_dec(char *buf, unsigned long long n)
{
if (n >= 100*1000*1000) {
while (n >= 1000*1000*1000)
buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
if (n >= 100*1000*1000)
return put_dec_full9(buf, n);
}
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n <= 1.6e11 */
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n < 1e8 */
return put_dec_trunc8(buf, n);
}
#else
#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
/* Second algorithm: valid only for 64-bit long longs */
/* See comment in put_dec_full9 for choice of constants */
static noinline_for_stack
void put_dec_full4(char *buf, unsigned q)
static void
put_dec_full4(char *buf, unsigned r)
{
unsigned r;
r = (q * 0xccd) >> 15;
buf[0] = (q - 10 * r) + '0';
q = (r * 0xcd) >> 11;
buf[1] = (r - 10 * q) + '0';
r = (q * 0xcd) >> 11;
buf[2] = (q - 10 * r) + '0';
buf[3] = r + '0';
unsigned q;
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
}
/*
@@ -265,9 +269,9 @@ void put_dec_full4(char *buf, unsigned q)
* The approximation x/10000 == (x * 0x346DC5D7) >> 43
* holds for all x < 1,128,869,999. The largest value this
* helper will ever be asked to convert is 1,125,520,955.
* (d1 in the put_dec code, assuming n is all-ones).
* (second call in the put_dec code, assuming n is all-ones).
*/
static
static noinline_for_stack
unsigned put_dec_helper4(char *buf, unsigned x)
{
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
@@ -294,6 +298,8 @@ char *put_dec(char *buf, unsigned long long n)
d2 = (h ) & 0xffff;
d3 = (h >> 16); /* implicit "& 0xffff" */
/* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
= 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
q = put_dec_helper4(buf, q);
@@ -323,7 +329,8 @@ char *put_dec(char *buf, unsigned long long n)
*/
int num_to_str(char *buf, int size, unsigned long long num)
{
char tmp[sizeof(num) * 3];
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
int idx, len;
/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
@@ -384,7 +391,8 @@ static noinline_for_stack
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
{
char tmp[3 * sizeof(num)];
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[3 * sizeof(num)] __aligned(2);
char sign;
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
@@ -944,7 +952,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt)
break;
}
for (i = 0; i < 4; i++) {
char temp[3]; /* hold each IP quad in reverse order */
char temp[4] __aligned(2); /* hold each IP quad in reverse order */
int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)