mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
c05c2ec96b
commit
09cbfeaf1a
@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||
* release the pages we didn't map into the bio, if any
|
||||
*/
|
||||
while (j < page_limit)
|
||||
page_cache_release(pages[j++]);
|
||||
put_page(pages[j++]);
|
||||
}
|
||||
|
||||
kfree(pages);
|
||||
@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||
for (j = 0; j < nr_pages; j++) {
|
||||
if (!pages[j])
|
||||
break;
|
||||
page_cache_release(pages[j]);
|
||||
put_page(pages[j]);
|
||||
}
|
||||
out:
|
||||
kfree(pages);
|
||||
@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
|
||||
if (bio_data_dir(bio) == READ)
|
||||
set_page_dirty_lock(bvec->bv_page);
|
||||
|
||||
page_cache_release(bvec->bv_page);
|
||||
put_page(bvec->bv_page);
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
if (PageDirty(page) || PageCompound(page)) {
|
||||
page_cache_release(page);
|
||||
put_page(page);
|
||||
bvec->bv_page = NULL;
|
||||
} else {
|
||||
nr_clean_pages++;
|
||||
|
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
goto fail_id;
|
||||
|
||||
q->backing_dev_info.ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
||||
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||
q->backing_dev_info.name = "block";
|
||||
q->node = node_id;
|
||||
|
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
|
||||
struct queue_limits *limits = &q->limits;
|
||||
unsigned int max_sectors;
|
||||
|
||||
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
||||
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
||||
if ((max_hw_sectors << 9) < PAGE_SIZE) {
|
||||
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
|
||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||
__func__, max_hw_sectors);
|
||||
}
|
||||
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
|
||||
**/
|
||||
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
|
||||
{
|
||||
if (max_size < PAGE_CACHE_SIZE) {
|
||||
max_size = PAGE_CACHE_SIZE;
|
||||
if (max_size < PAGE_SIZE) {
|
||||
max_size = PAGE_SIZE;
|
||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||
__func__, max_size);
|
||||
}
|
||||
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
|
||||
**/
|
||||
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
|
||||
{
|
||||
if (mask < PAGE_CACHE_SIZE - 1) {
|
||||
mask = PAGE_CACHE_SIZE - 1;
|
||||
if (mask < PAGE_SIZE - 1) {
|
||||
mask = PAGE_SIZE - 1;
|
||||
printk(KERN_INFO "%s: set to minimum %lx\n",
|
||||
__func__, mask);
|
||||
}
|
||||
|
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||
static ssize_t queue_ra_show(struct request_queue *q, char *page)
|
||||
{
|
||||
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
|
||||
(PAGE_CACHE_SHIFT - 10);
|
||||
(PAGE_SHIFT - 10);
|
||||
|
||||
return queue_var_show(ra_kb, (page));
|
||||
}
|
||||
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
||||
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
||||
if (blk_queue_cluster(q))
|
||||
return queue_var_show(queue_max_segment_size(q), (page));
|
||||
|
||||
return queue_var_show(PAGE_CACHE_SIZE, (page));
|
||||
return queue_var_show(PAGE_SIZE, (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
|
||||
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||
{
|
||||
unsigned long max_sectors_kb,
|
||||
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
|
||||
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
|
||||
page_kb = 1 << (PAGE_SHIFT - 10);
|
||||
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
|
||||
|
||||
if (ret < 0)
|
||||
|
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
* idle timer unplug to continue working.
|
||||
*/
|
||||
if (cfq_cfqq_wait_request(cfqq)) {
|
||||
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
|
||||
if (blk_rq_bytes(rq) > PAGE_SIZE ||
|
||||
cfqd->busy_queues > 1) {
|
||||
cfq_del_timer(cfqd, cfqq);
|
||||
cfq_clear_cfqq_wait_request(cfqq);
|
||||
|
@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
return -EINVAL;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
return compat_put_long(arg,
|
||||
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
|
||||
(bdi->ra_pages * PAGE_SIZE) / 512);
|
||||
case BLKROGET: /* compatible */
|
||||
return compat_put_int(arg, bdev_read_only(bdev) != 0);
|
||||
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
|
||||
@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
||||
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
return 0;
|
||||
case BLKGETSIZE:
|
||||
size = i_size_read(bdev->bd_inode);
|
||||
|
@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
|
||||
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
|
||||
case BLKROGET:
|
||||
return put_int(arg, bdev_read_only(bdev) != 0);
|
||||
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
|
||||
@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
if(!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
||||
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
return 0;
|
||||
case BLKBSZSET:
|
||||
return blkdev_bszset(bdev, mode, argp);
|
||||
|
@@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
|
||||
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
|
||||
NULL);
|
||||
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
|
||||
NULL);
|
||||
}
|
||||
|
||||
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||
@@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||
if (PageError(page))
|
||||
goto fail;
|
||||
p->v = page;
|
||||
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
|
||||
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
|
||||
fail:
|
||||
page_cache_release(page);
|
||||
put_page(page);
|
||||
}
|
||||
p->v = NULL;
|
||||
return NULL;
|
||||
|
Reference in New Issue
Block a user