[PATCH] Unlinline a bunch of other functions
Remove the "inline" keyword from a bunch of big functions in the kernel with the goal of shrinking it by 30kb to 40kb Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jeff Garzik <jgarzik@pobox.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Цей коміт міститься в:

зафіксовано
Linus Torvalds

джерело
b0a9499c3d
коміт
858119e159
@@ -200,7 +200,7 @@ out:
|
||||
/* if page is completely empty, put it back on the free list, or dealloc it */
|
||||
/* if page was hijacked, unmark the flag so it might get alloced next time */
|
||||
/* Note: lock should be held when calling this */
|
||||
static inline void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
|
||||
static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
|
||||
{
|
||||
char *ptr;
|
||||
|
||||
|
@@ -228,7 +228,7 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
|
||||
};
|
||||
|
||||
|
||||
static inline int
|
||||
static int
|
||||
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
|
||||
struct scatterlist *in, unsigned int length,
|
||||
int write, sector_t sector)
|
||||
|
@@ -598,7 +598,7 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
|
||||
/*
|
||||
* Always use UUID for lookups if it's present, otherwise use name or dev.
|
||||
*/
|
||||
static inline struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
|
||||
static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
|
||||
{
|
||||
if (*param->uuid)
|
||||
return __get_uuid_cell(param->uuid);
|
||||
@@ -608,7 +608,7 @@ static inline struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
|
||||
return dm_get_mdptr(huge_decode_dev(param->dev));
|
||||
}
|
||||
|
||||
static inline struct mapped_device *find_device(struct dm_ioctl *param)
|
||||
static struct mapped_device *find_device(struct dm_ioctl *param)
|
||||
{
|
||||
struct hash_cell *hc;
|
||||
struct mapped_device *md = NULL;
|
||||
|
@@ -691,7 +691,7 @@ static void copy_callback(int read_err, unsigned int write_err, void *context)
|
||||
/*
|
||||
* Dispatches the copy operation to kcopyd.
|
||||
*/
|
||||
static inline void start_copy(struct pending_exception *pe)
|
||||
static void start_copy(struct pending_exception *pe)
|
||||
{
|
||||
struct dm_snapshot *s = pe->snap;
|
||||
struct io_region src, dest;
|
||||
|
@@ -293,7 +293,7 @@ struct dm_table *dm_get_table(struct mapped_device *md)
|
||||
* Decrements the number of outstanding ios that a bio has been
|
||||
* cloned into, completing the original io if necc.
|
||||
*/
|
||||
static inline void dec_pending(struct dm_io *io, int error)
|
||||
static void dec_pending(struct dm_io *io, int error)
|
||||
{
|
||||
if (error)
|
||||
io->error = error;
|
||||
|
@@ -176,7 +176,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void free_r1bio(r1bio_t *r1_bio)
|
||||
static void free_r1bio(r1bio_t *r1_bio)
|
||||
{
|
||||
conf_t *conf = mddev_to_conf(r1_bio->mddev);
|
||||
|
||||
@@ -190,7 +190,7 @@ static inline void free_r1bio(r1bio_t *r1_bio)
|
||||
mempool_free(r1_bio, conf->r1bio_pool);
|
||||
}
|
||||
|
||||
static inline void put_buf(r1bio_t *r1_bio)
|
||||
static void put_buf(r1bio_t *r1_bio)
|
||||
{
|
||||
conf_t *conf = mddev_to_conf(r1_bio->mddev);
|
||||
int i;
|
||||
|
@@ -176,7 +176,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void free_r10bio(r10bio_t *r10_bio)
|
||||
static void free_r10bio(r10bio_t *r10_bio)
|
||||
{
|
||||
conf_t *conf = mddev_to_conf(r10_bio->mddev);
|
||||
|
||||
@@ -190,7 +190,7 @@ static inline void free_r10bio(r10bio_t *r10_bio)
|
||||
mempool_free(r10_bio, conf->r10bio_pool);
|
||||
}
|
||||
|
||||
static inline void put_buf(r10bio_t *r10_bio)
|
||||
static void put_buf(r10bio_t *r10_bio)
|
||||
{
|
||||
conf_t *conf = mddev_to_conf(r10_bio->mddev);
|
||||
|
||||
|
@@ -69,7 +69,7 @@
|
||||
|
||||
static void print_raid5_conf (raid5_conf_t *conf);
|
||||
|
||||
static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
|
||||
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
|
||||
{
|
||||
if (atomic_dec_and_test(&sh->count)) {
|
||||
if (!list_empty(&sh->lru))
|
||||
@@ -118,7 +118,7 @@ static inline void remove_hash(struct stripe_head *sh)
|
||||
hlist_del_init(&sh->hash);
|
||||
}
|
||||
|
||||
static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
|
||||
static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
|
||||
{
|
||||
struct hlist_head *hp = stripe_hash(conf, sh->sector);
|
||||
|
||||
@@ -178,7 +178,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
|
||||
|
||||
static void raid5_build_block (struct stripe_head *sh, int i);
|
||||
|
||||
static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
|
||||
static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
|
||||
{
|
||||
raid5_conf_t *conf = sh->raid_conf;
|
||||
int disks = conf->raid_disks, i;
|
||||
@@ -1415,7 +1415,7 @@ static void handle_stripe(struct stripe_head *sh)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void raid5_activate_delayed(raid5_conf_t *conf)
|
||||
static void raid5_activate_delayed(raid5_conf_t *conf)
|
||||
{
|
||||
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
|
||||
while (!list_empty(&conf->delayed_list)) {
|
||||
@@ -1431,7 +1431,7 @@ static inline void raid5_activate_delayed(raid5_conf_t *conf)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void activate_bit_delay(raid5_conf_t *conf)
|
||||
static void activate_bit_delay(raid5_conf_t *conf)
|
||||
{
|
||||
/* device_lock is held */
|
||||
struct list_head head;
|
||||
|
@@ -88,7 +88,7 @@ static inline int raid6_next_disk(int disk, int raid_disks)
|
||||
|
||||
static void print_raid6_conf (raid6_conf_t *conf);
|
||||
|
||||
static inline void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
|
||||
static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
|
||||
{
|
||||
if (atomic_dec_and_test(&sh->count)) {
|
||||
if (!list_empty(&sh->lru))
|
||||
@@ -197,7 +197,7 @@ static int grow_buffers(struct stripe_head *sh, int num)
|
||||
|
||||
static void raid6_build_block (struct stripe_head *sh, int i);
|
||||
|
||||
static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
|
||||
static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
|
||||
{
|
||||
raid6_conf_t *conf = sh->raid_conf;
|
||||
int disks = conf->raid_disks, i;
|
||||
@@ -1577,7 +1577,7 @@ static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void raid6_activate_delayed(raid6_conf_t *conf)
|
||||
static void raid6_activate_delayed(raid6_conf_t *conf)
|
||||
{
|
||||
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
|
||||
while (!list_empty(&conf->delayed_list)) {
|
||||
@@ -1593,7 +1593,7 @@ static inline void raid6_activate_delayed(raid6_conf_t *conf)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void activate_bit_delay(raid6_conf_t *conf)
|
||||
static void activate_bit_delay(raid6_conf_t *conf)
|
||||
{
|
||||
/* device_lock is held */
|
||||
struct list_head head;
|
||||
|
Посилання в новій задачі
Заблокувати користувача