SUNRPC: always treat sv_nrpools==1 as "not pooled"
[ Upstream commit 93aa619eb0b42eec2f3a9b4d9db41f5095390aec ] Currently 'pooled' services hold a reference on the pool_map, and 'unpooled' services do not. svc_destroy() uses the presence of ->svo_function (via svc_serv_is_pooled()) to determine if the reference should be dropped. There is no direct correlation between being pooled and the use of svo_function, though in practice, lockd is the only non-pooled service, and the only one not to use svo_function. This is untidy and would cause problems if we changed lockd to use svc_set_num_threads(), which requires the use of ->svo_function. So change the test for "is the service pooled" to "is sv_nrpools > 1". This means that when svc_pool_map_get() returns 1, it must NOT take a reference to the pool. We discard svc_serv_is_pooled(), and test sv_nrpools directly. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
74a0e37a20
commit
deeda24a67
@@ -35,8 +35,6 @@
|
|||||||
|
|
||||||
static void svc_unregister(const struct svc_serv *serv, struct net *net);
|
static void svc_unregister(const struct svc_serv *serv, struct net *net);
|
||||||
|
|
||||||
#define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function)
|
|
||||||
|
|
||||||
#define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
|
#define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -238,8 +236,10 @@ svc_pool_map_init_pernode(struct svc_pool_map *m)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Add a reference to the global map of cpus to pools (and
|
* Add a reference to the global map of cpus to pools (and
|
||||||
* vice versa). Initialise the map if we're the first user.
|
* vice versa) if pools are in use.
|
||||||
* Returns the number of pools.
|
* Initialise the map if we're the first user.
|
||||||
|
* Returns the number of pools. If this is '1', no reference
|
||||||
|
* was taken.
|
||||||
*/
|
*/
|
||||||
static unsigned int
|
static unsigned int
|
||||||
svc_pool_map_get(void)
|
svc_pool_map_get(void)
|
||||||
@@ -251,6 +251,7 @@ svc_pool_map_get(void)
|
|||||||
|
|
||||||
if (m->count++) {
|
if (m->count++) {
|
||||||
mutex_unlock(&svc_pool_map_mutex);
|
mutex_unlock(&svc_pool_map_mutex);
|
||||||
|
WARN_ON_ONCE(m->npools <= 1);
|
||||||
return m->npools;
|
return m->npools;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,29 +267,36 @@ svc_pool_map_get(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (npools < 0) {
|
if (npools <= 0) {
|
||||||
/* default, or memory allocation failure */
|
/* default, or memory allocation failure */
|
||||||
npools = 1;
|
npools = 1;
|
||||||
m->mode = SVC_POOL_GLOBAL;
|
m->mode = SVC_POOL_GLOBAL;
|
||||||
}
|
}
|
||||||
m->npools = npools;
|
m->npools = npools;
|
||||||
|
|
||||||
|
if (npools == 1)
|
||||||
|
/* service is unpooled, so doesn't hold a reference */
|
||||||
|
m->count--;
|
||||||
|
|
||||||
mutex_unlock(&svc_pool_map_mutex);
|
mutex_unlock(&svc_pool_map_mutex);
|
||||||
return m->npools;
|
return npools;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drop a reference to the global map of cpus to pools.
|
* Drop a reference to the global map of cpus to pools, if
|
||||||
|
* pools were in use, i.e. if npools > 1.
|
||||||
* When the last reference is dropped, the map data is
|
* When the last reference is dropped, the map data is
|
||||||
* freed; this allows the sysadmin to change the pool
|
* freed; this allows the sysadmin to change the pool
|
||||||
* mode using the pool_mode module option without
|
* mode using the pool_mode module option without
|
||||||
* rebooting or re-loading sunrpc.ko.
|
* rebooting or re-loading sunrpc.ko.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
svc_pool_map_put(void)
|
svc_pool_map_put(int npools)
|
||||||
{
|
{
|
||||||
struct svc_pool_map *m = &svc_pool_map;
|
struct svc_pool_map *m = &svc_pool_map;
|
||||||
|
|
||||||
|
if (npools <= 1)
|
||||||
|
return;
|
||||||
mutex_lock(&svc_pool_map_mutex);
|
mutex_lock(&svc_pool_map_mutex);
|
||||||
|
|
||||||
if (!--m->count) {
|
if (!--m->count) {
|
||||||
@@ -357,12 +365,9 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu)
|
|||||||
struct svc_pool_map *m = &svc_pool_map;
|
struct svc_pool_map *m = &svc_pool_map;
|
||||||
unsigned int pidx = 0;
|
unsigned int pidx = 0;
|
||||||
|
|
||||||
/*
|
if (serv->sv_nrpools <= 1)
|
||||||
* An uninitialised map happens in a pure client when
|
return serv->sv_pools;
|
||||||
* lockd is brought up, so silently treat it the
|
|
||||||
* same as SVC_POOL_GLOBAL.
|
|
||||||
*/
|
|
||||||
if (svc_serv_is_pooled(serv)) {
|
|
||||||
switch (m->mode) {
|
switch (m->mode) {
|
||||||
case SVC_POOL_PERCPU:
|
case SVC_POOL_PERCPU:
|
||||||
pidx = m->to_pool[cpu];
|
pidx = m->to_pool[cpu];
|
||||||
@@ -371,7 +376,7 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu)
|
|||||||
pidx = m->to_pool[cpu_to_node(cpu)];
|
pidx = m->to_pool[cpu_to_node(cpu)];
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return &serv->sv_pools[pidx % serv->sv_nrpools];
|
return &serv->sv_pools[pidx % serv->sv_nrpools];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,7 +529,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
return serv;
|
return serv;
|
||||||
out_err:
|
out_err:
|
||||||
svc_pool_map_put();
|
svc_pool_map_put(npools);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(svc_create_pooled);
|
EXPORT_SYMBOL_GPL(svc_create_pooled);
|
||||||
@@ -559,8 +564,7 @@ svc_destroy(struct kref *ref)
|
|||||||
|
|
||||||
cache_clean_deferred(serv);
|
cache_clean_deferred(serv);
|
||||||
|
|
||||||
if (svc_serv_is_pooled(serv))
|
svc_pool_map_put(serv->sv_nrpools);
|
||||||
svc_pool_map_put();
|
|
||||||
|
|
||||||
kfree(serv->sv_pools);
|
kfree(serv->sv_pools);
|
||||||
kfree(serv);
|
kfree(serv);
|
||||||
|
Reference in New Issue
Block a user