ANDROID: dm kcopyd: Use reserved memory for the copy buffer
MediaTek needs to use specific reserved memory for merge buffer to ensure the data correctness during merge period. The specific path would be enabled only if bootarg "mtk_kcopyd_quirk" is well defined to be isolated for MediaTek platform only. The format of bootarg would be: mtk_kcopyd_quirk=mediatek,dm_ota where "dm_ota" is exact the name of the reserved memory. Bug: 223346425 Change-Id: I2b295ca8c0cea65146077324c58ac17c05fe0099 Signed-off-by: Will Shiu <Will.Shiu@mediatek.com> Signed-off-by: Stanley Chu <stanley.chu@mediatek.com> Signed-off-by: Akilesh Kailash <akailash@google.com>
This commit is contained in:
@@ -17,6 +17,8 @@
|
|||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/mempool.h>
|
#include <linux/mempool.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/of_platform.h>
|
||||||
|
#include <linux/of_reserved_mem.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
@@ -39,6 +41,105 @@ static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
|
|||||||
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
|
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
|
||||||
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
|
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
|
||||||
|
|
||||||
|
static bool rsm_enabled;
|
||||||
|
static phys_addr_t rsm_mem_base, rsm_mem_size;
|
||||||
|
|
||||||
|
#ifndef MODULE
|
||||||
|
static DEFINE_SPINLOCK(rsm_lock);
|
||||||
|
static int *rsm_mem;
|
||||||
|
static int rsm_page_cnt;
|
||||||
|
static int rsm_tbl_idx;
|
||||||
|
static struct reserved_mem *rmem;
|
||||||
|
|
||||||
|
static void __init kcopyd_rsm_init(void)
|
||||||
|
{
|
||||||
|
static struct device_node *rsm_node;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!rsm_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
|
||||||
|
if (!rsm_node) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
rmem = of_reserved_mem_lookup(rsm_node);
|
||||||
|
if (!rmem) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_put_node;
|
||||||
|
}
|
||||||
|
|
||||||
|
rsm_mem_base = rmem->base;
|
||||||
|
rsm_mem_size = rmem->size;
|
||||||
|
rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
|
||||||
|
rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
|
||||||
|
if (!rsm_mem)
|
||||||
|
ret = -ENOMEM;
|
||||||
|
|
||||||
|
out_put_node:
|
||||||
|
of_node_put(rsm_node);
|
||||||
|
out:
|
||||||
|
if (ret)
|
||||||
|
pr_warn("kcopyd: failed to init rsm: %d", ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init kcopyd_rsm_enable(char *str)
|
||||||
|
{
|
||||||
|
rsm_enabled = true;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
|
||||||
|
|
||||||
|
static void kcopyd_rsm_get_page(struct page **p)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
*p = NULL;
|
||||||
|
spin_lock_irqsave(&rsm_lock, flags);
|
||||||
|
for (i = 0 ; i < rsm_page_cnt ; i++) {
|
||||||
|
rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
|
||||||
|
|
||||||
|
if (rsm_mem[rsm_tbl_idx] == 0) {
|
||||||
|
rsm_mem[rsm_tbl_idx] = 1;
|
||||||
|
*p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
|
||||||
|
* rsm_tbl_idx));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&rsm_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kcopyd_rsm_drop_page(struct page **p)
|
||||||
|
{
|
||||||
|
u64 off;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (*p) {
|
||||||
|
off = page_to_phys(*p) - rsm_mem_base;
|
||||||
|
spin_lock_irqsave(&rsm_lock, flags);
|
||||||
|
rsm_mem[off >> PAGE_SHIFT] = 0;
|
||||||
|
spin_unlock_irqrestore(&rsm_lock, flags);
|
||||||
|
*p = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kcopyd_rsm_destroy(void)
|
||||||
|
{
|
||||||
|
if (rsm_enabled)
|
||||||
|
kfree(rsm_mem);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define kcopyd_rsm_destroy(...)
|
||||||
|
#define kcopyd_rsm_drop_page(...)
|
||||||
|
#define kcopyd_rsm_get_page(...)
|
||||||
|
#define kcopyd_rsm_init(...)
|
||||||
|
#endif
|
||||||
|
|
||||||
static unsigned dm_get_kcopyd_subjob_size(void)
|
static unsigned dm_get_kcopyd_subjob_size(void)
|
||||||
{
|
{
|
||||||
unsigned sub_job_size_kb;
|
unsigned sub_job_size_kb;
|
||||||
@@ -211,7 +312,7 @@ static void wake(struct dm_kcopyd_client *kc)
|
|||||||
/*
|
/*
|
||||||
* Obtain one page for the use of kcopyd.
|
* Obtain one page for the use of kcopyd.
|
||||||
*/
|
*/
|
||||||
static struct page_list *alloc_pl(gfp_t gfp)
|
static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
|
||||||
{
|
{
|
||||||
struct page_list *pl;
|
struct page_list *pl;
|
||||||
|
|
||||||
@@ -219,7 +320,12 @@ static struct page_list *alloc_pl(gfp_t gfp)
|
|||||||
if (!pl)
|
if (!pl)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
|
||||||
|
kcopyd_rsm_get_page(&pl->page);
|
||||||
|
} else {
|
||||||
pl->page = alloc_page(gfp);
|
pl->page = alloc_page(gfp);
|
||||||
|
}
|
||||||
|
|
||||||
if (!pl->page) {
|
if (!pl->page) {
|
||||||
kfree(pl);
|
kfree(pl);
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -230,7 +336,14 @@ static struct page_list *alloc_pl(gfp_t gfp)
|
|||||||
|
|
||||||
static void free_pl(struct page_list *pl)
|
static void free_pl(struct page_list *pl)
|
||||||
{
|
{
|
||||||
|
struct page *p = pl->page;
|
||||||
|
phys_addr_t pa = page_to_phys(p);
|
||||||
|
|
||||||
|
if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
|
||||||
|
kcopyd_rsm_drop_page(&pl->page);
|
||||||
|
else
|
||||||
__free_page(pl->page);
|
__free_page(pl->page);
|
||||||
|
|
||||||
kfree(pl);
|
kfree(pl);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -258,14 +371,15 @@ static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
|
static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
|
||||||
unsigned int nr, struct page_list **pages)
|
unsigned int nr, struct page_list **pages,
|
||||||
|
unsigned long job_flags)
|
||||||
{
|
{
|
||||||
struct page_list *pl;
|
struct page_list *pl;
|
||||||
|
|
||||||
*pages = NULL;
|
*pages = NULL;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
|
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
|
||||||
if (unlikely(!pl)) {
|
if (unlikely(!pl)) {
|
||||||
/* Use reserved pages */
|
/* Use reserved pages */
|
||||||
pl = kc->pages;
|
pl = kc->pages;
|
||||||
@@ -309,7 +423,7 @@ static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
|
|||||||
struct page_list *pl = NULL, *next;
|
struct page_list *pl = NULL, *next;
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
next = alloc_pl(GFP_KERNEL);
|
next = alloc_pl(GFP_KERNEL, 0);
|
||||||
if (!next) {
|
if (!next) {
|
||||||
if (pl)
|
if (pl)
|
||||||
drop_pages(pl);
|
drop_pages(pl);
|
||||||
@@ -395,6 +509,8 @@ int __init dm_kcopyd_init(void)
|
|||||||
zero_page_list.next = &zero_page_list;
|
zero_page_list.next = &zero_page_list;
|
||||||
zero_page_list.page = ZERO_PAGE(0);
|
zero_page_list.page = ZERO_PAGE(0);
|
||||||
|
|
||||||
|
kcopyd_rsm_init();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -402,6 +518,7 @@ void dm_kcopyd_exit(void)
|
|||||||
{
|
{
|
||||||
kmem_cache_destroy(_job_cache);
|
kmem_cache_destroy(_job_cache);
|
||||||
_job_cache = NULL;
|
_job_cache = NULL;
|
||||||
|
kcopyd_rsm_destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -586,7 +703,7 @@ static int run_pages_job(struct kcopyd_job *job)
|
|||||||
int r;
|
int r;
|
||||||
unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
|
unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
|
||||||
|
|
||||||
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
|
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
|
||||||
if (!r) {
|
if (!r) {
|
||||||
/* this job is ready for io */
|
/* this job is ready for io */
|
||||||
push(&job->kc->io_jobs, job);
|
push(&job->kc->io_jobs, job);
|
||||||
|
@@ -1117,7 +1117,8 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
|
|||||||
for (i = 0; i < linear_chunks; i++)
|
for (i = 0; i < linear_chunks; i++)
|
||||||
__check_for_conflicting_io(s, old_chunk + i);
|
__check_for_conflicting_io(s, old_chunk + i);
|
||||||
|
|
||||||
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
|
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
|
||||||
|
merge_callback, s);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
shut:
|
shut:
|
||||||
|
@@ -21,6 +21,7 @@
|
|||||||
|
|
||||||
#define DM_KCOPYD_IGNORE_ERROR 1
|
#define DM_KCOPYD_IGNORE_ERROR 1
|
||||||
#define DM_KCOPYD_WRITE_SEQ 2
|
#define DM_KCOPYD_WRITE_SEQ 2
|
||||||
|
#define DM_KCOPYD_SNAP_MERGE 3
|
||||||
|
|
||||||
struct dm_kcopyd_throttle {
|
struct dm_kcopyd_throttle {
|
||||||
unsigned throttle;
|
unsigned throttle;
|
||||||
|
Reference in New Issue
Block a user