Merge branch 'for-linus' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching

Pull livepatching updates from Jiri Kosina:

 - shadow variables support, allowing livepatches to associate new
   "shadow" fields to existing data structures, from Joe Lawrence

 - pre/post patch callbacks API, allowing livepatch writers to register
   callbacks to be called before and after patch application, from Joe
   Lawrence

* 'for-linus' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: __klp_disable_patch() should never be called for disabled patches
  livepatch: Correctly call klp_post_unpatch_callback() in error paths
  livepatch: add transition notices
  livepatch: move transition "complete" notice into klp_complete_transition()
  livepatch: add (un)patch callbacks
  livepatch: Small shadow variable documentation fixes
  livepatch: __klp_shadow_get_or_alloc() is local to shadow.c
  livepatch: introduce shadow variable API
This commit is contained in:
Linus Torvalds
2017-11-15 10:21:58 -08:00
17 changed files with 2162 additions and 21 deletions

View File

@@ -1,3 +1,3 @@
obj-$(CONFIG_LIVEPATCH) += livepatch.o
livepatch-objs := core.o patch.o transition.o
livepatch-objs := core.o patch.o shadow.o transition.o

View File

@@ -54,11 +54,6 @@ static bool klp_is_module(struct klp_object *obj)
return obj->name;
}
static bool klp_is_object_loaded(struct klp_object *obj)
{
return !obj->name || obj->mod;
}
/* sets obj->mod if object is not vmlinux and module is found */
static void klp_find_object_module(struct klp_object *obj)
{
@@ -285,6 +280,11 @@ static int klp_write_object_relocations(struct module *pmod,
static int __klp_disable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
if (WARN_ON(!patch->enabled))
return -EINVAL;
if (klp_transition_patch)
return -EBUSY;
@@ -295,6 +295,10 @@ static int __klp_disable_patch(struct klp_patch *patch)
klp_init_transition(patch, KLP_UNPATCHED);
klp_for_each_object(patch, obj)
if (obj->patched)
klp_pre_unpatch_callback(obj);
/*
* Enforce the order of the func->transition writes in
* klp_init_transition() and the TIF_PATCH_PENDING writes in
@@ -388,13 +392,18 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (!klp_is_object_loaded(obj))
continue;
ret = klp_pre_patch_callback(obj);
if (ret) {
pr_warn("pre-patch callback failed for object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto err;
}
ret = klp_patch_object(obj);
if (ret) {
pr_warn("failed to enable patch '%s'\n",
patch->mod->name);
klp_cancel_transition();
return ret;
pr_warn("failed to patch object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto err;
}
}
@@ -403,6 +412,11 @@ static int __klp_enable_patch(struct klp_patch *patch)
patch->enabled = true;
return 0;
err:
pr_warn("failed to enable patch '%s'\n", patch->mod->name);
klp_cancel_transition();
return ret;
}
/**
@@ -854,9 +868,15 @@ static void klp_cleanup_module_patches_limited(struct module *mod,
* is in transition.
*/
if (patch->enabled || patch == klp_transition_patch) {
if (patch != klp_transition_patch)
klp_pre_unpatch_callback(obj);
pr_notice("reverting patch '%s' on unloading module '%s'\n",
patch->mod->name, obj->mod->name);
klp_unpatch_object(obj);
klp_post_unpatch_callback(obj);
}
klp_free_object_loaded(obj);
@@ -906,13 +926,25 @@ int klp_module_coming(struct module *mod)
pr_notice("applying patch '%s' to loading module '%s'\n",
patch->mod->name, obj->mod->name);
ret = klp_pre_patch_callback(obj);
if (ret) {
pr_warn("pre-patch callback failed for object '%s'\n",
obj->name);
goto err;
}
ret = klp_patch_object(obj);
if (ret) {
pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
patch->mod->name, obj->mod->name, ret);
klp_post_unpatch_callback(obj);
goto err;
}
if (patch != klp_transition_patch)
klp_post_patch_callback(obj);
break;
}
}

View File

@@ -2,6 +2,46 @@
#ifndef _LIVEPATCH_CORE_H
#define _LIVEPATCH_CORE_H
#include <linux/livepatch.h>
extern struct mutex klp_mutex;
static inline bool klp_is_object_loaded(struct klp_object *obj)
{
return !obj->name || obj->mod;
}
static inline int klp_pre_patch_callback(struct klp_object *obj)
{
int ret = 0;
if (obj->callbacks.pre_patch)
ret = (*obj->callbacks.pre_patch)(obj);
obj->callbacks.post_unpatch_enabled = !ret;
return ret;
}
static inline void klp_post_patch_callback(struct klp_object *obj)
{
if (obj->callbacks.post_patch)
(*obj->callbacks.post_patch)(obj);
}
static inline void klp_pre_unpatch_callback(struct klp_object *obj)
{
if (obj->callbacks.pre_unpatch)
(*obj->callbacks.pre_unpatch)(obj);
}
static inline void klp_post_unpatch_callback(struct klp_object *obj)
{
if (obj->callbacks.post_unpatch_enabled &&
obj->callbacks.post_unpatch)
(*obj->callbacks.post_unpatch)(obj);
obj->callbacks.post_unpatch_enabled = false;
}
#endif /* _LIVEPATCH_CORE_H */

View File

@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/printk.h>
#include "core.h"
#include "patch.h"
#include "transition.h"

277
kernel/livepatch/shadow.c Normal file
View File

@@ -0,0 +1,277 @@
/*
* shadow.c - Shadow Variables
*
* Copyright (C) 2014 Josh Poimboeuf <jpoimboe@redhat.com>
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
* Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/**
* DOC: Shadow variable API concurrency notes:
*
* The shadow variable API provides a simple relationship between an
* <obj, id> pair and a pointer value. It is the responsibility of the
* caller to provide any mutual exclusion required of the shadow data.
*
* Once a shadow variable is attached to its parent object via the
* klp_shadow_*alloc() API calls, it is considered live: any subsequent
* call to klp_shadow_get() may then return the shadow variable's data
* pointer. Callers of klp_shadow_*alloc() should prepare shadow data
* accordingly.
*
* The klp_shadow_*alloc() API calls may allocate memory for new shadow
* variable structures. Their implementation does not call kmalloc
* inside any spinlocks, but API callers should pass GFP flags according
* to their specific needs.
*
* The klp_shadow_hash is an RCU-enabled hashtable and is safe against
* concurrent klp_shadow_free() and klp_shadow_get() operations.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hashtable.h>
#include <linux/slab.h>
#include <linux/livepatch.h>
static DEFINE_HASHTABLE(klp_shadow_hash, 12);
/*
* klp_shadow_lock provides exclusive access to the klp_shadow_hash and
* the shadow variables it references.
*/
static DEFINE_SPINLOCK(klp_shadow_lock);
/**
* struct klp_shadow - shadow variable structure
* @node: klp_shadow_hash hash table node
* @rcu_head: RCU is used to safely free this structure
* @obj: pointer to parent object
* @id: data identifier
* @data: data area
*/
struct klp_shadow {
struct hlist_node node;
struct rcu_head rcu_head;
void *obj;
unsigned long id;
char data[];
};
/**
* klp_shadow_match() - verify a shadow variable matches given <obj, id>
* @shadow: shadow variable to match
* @obj: pointer to parent object
* @id: data identifier
*
* Return: true if the shadow variable matches.
*/
static inline bool klp_shadow_match(struct klp_shadow *shadow, void *obj,
unsigned long id)
{
return shadow->obj == obj && shadow->id == id;
}
/**
* klp_shadow_get() - retrieve a shadow variable data pointer
* @obj: pointer to parent object
* @id: data identifier
*
* Return: the shadow variable data element, NULL on failure.
*/
void *klp_shadow_get(void *obj, unsigned long id)
{
struct klp_shadow *shadow;
rcu_read_lock();
hash_for_each_possible_rcu(klp_shadow_hash, shadow, node,
(unsigned long)obj) {
if (klp_shadow_match(shadow, obj, id)) {
rcu_read_unlock();
return shadow->data;
}
}
rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL_GPL(klp_shadow_get);
static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags, bool warn_on_exist)
{
struct klp_shadow *new_shadow;
void *shadow_data;
unsigned long flags;
/* Check if the shadow variable already exists */
shadow_data = klp_shadow_get(obj, id);
if (shadow_data)
goto exists;
/* Allocate a new shadow variable for use inside the lock below */
new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
if (!new_shadow)
return NULL;
new_shadow->obj = obj;
new_shadow->id = id;
/* Initialize the shadow variable if data provided */
if (data)
memcpy(new_shadow->data, data, size);
/* Look for <obj, id> again under the lock */
spin_lock_irqsave(&klp_shadow_lock, flags);
shadow_data = klp_shadow_get(obj, id);
if (unlikely(shadow_data)) {
/*
* Shadow variable was found, throw away speculative
* allocation.
*/
spin_unlock_irqrestore(&klp_shadow_lock, flags);
kfree(new_shadow);
goto exists;
}
/* No <obj, id> found, so attach the newly allocated one */
hash_add_rcu(klp_shadow_hash, &new_shadow->node,
(unsigned long)new_shadow->obj);
spin_unlock_irqrestore(&klp_shadow_lock, flags);
return new_shadow->data;
exists:
if (warn_on_exist) {
WARN(1, "Duplicate shadow variable <%p, %lx>\n", obj, id);
return NULL;
}
return shadow_data;
}
/**
* klp_shadow_alloc() - allocate and add a new shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @data: pointer to data to attach to parent
* @size: size of attached data
* @gfp_flags: GFP mask for allocation
*
* Allocates @size bytes for new shadow variable data using @gfp_flags
* and copies @size bytes from @data into the new shadow variable's own
* data space. If @data is NULL, @size bytes are still allocated, but
* no copy is performed. The new shadow variable is then added to the
* global hashtable.
*
* If an existing <obj, id> shadow variable can be found, this routine
* will issue a WARN, exit early and return NULL.
*
* Return: the shadow variable data element, NULL on duplicate or
* failure.
*/
void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags)
{
return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true);
}
EXPORT_SYMBOL_GPL(klp_shadow_alloc);
/**
* klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @data: pointer to data to attach to parent
* @size: size of attached data
* @gfp_flags: GFP mask for allocation
*
* Returns a pointer to existing shadow data if an <obj, id> shadow
* variable is already present. Otherwise, it creates a new shadow
* variable like klp_shadow_alloc().
*
* This function guarantees that only one shadow variable exists with
* the given @id for the given @obj. It also guarantees that the shadow
* variable will be initialized by the given @data only when it did not
* exist before.
*
* Return: the shadow variable data element, NULL on failure.
*/
void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags)
{
return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false);
}
EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
/**
* klp_shadow_free() - detach and free a <obj, id> shadow variable
* @obj: pointer to parent object
* @id: data identifier
*
* This function releases the memory for this <obj, id> shadow variable
* instance, callers should stop referencing it accordingly.
*/
void klp_shadow_free(void *obj, unsigned long id)
{
struct klp_shadow *shadow;
unsigned long flags;
spin_lock_irqsave(&klp_shadow_lock, flags);
/* Delete <obj, id> from hash */
hash_for_each_possible(klp_shadow_hash, shadow, node,
(unsigned long)obj) {
if (klp_shadow_match(shadow, obj, id)) {
hash_del_rcu(&shadow->node);
kfree_rcu(shadow, rcu_head);
break;
}
}
spin_unlock_irqrestore(&klp_shadow_lock, flags);
}
EXPORT_SYMBOL_GPL(klp_shadow_free);
/**
* klp_shadow_free_all() - detach and free all <*, id> shadow variables
* @id: data identifier
*
* This function releases the memory for all <*, id> shadow variable
* instances, callers should stop referencing them accordingly.
*/
void klp_shadow_free_all(unsigned long id)
{
struct klp_shadow *shadow;
unsigned long flags;
int i;
spin_lock_irqsave(&klp_shadow_lock, flags);
/* Delete all <*, id> from hash */
hash_for_each(klp_shadow_hash, i, shadow, node) {
if (klp_shadow_match(shadow, shadow->obj, id)) {
hash_del_rcu(&shadow->node);
kfree_rcu(shadow, rcu_head);
}
}
spin_unlock_irqrestore(&klp_shadow_lock, flags);
}
EXPORT_SYMBOL_GPL(klp_shadow_free_all);

View File

@@ -82,6 +82,10 @@ static void klp_complete_transition(void)
unsigned int cpu;
bool immediate_func = false;
pr_debug("'%s': completing %s transition\n",
klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
if (klp_target_state == KLP_UNPATCHED) {
/*
* All tasks have transitioned to KLP_UNPATCHED so we can now
@@ -109,9 +113,6 @@ static void klp_complete_transition(void)
}
}
if (klp_target_state == KLP_UNPATCHED && !immediate_func)
module_put(klp_transition_patch->mod);
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
if (klp_target_state == KLP_PATCHED)
klp_synchronize_transition();
@@ -130,6 +131,27 @@ static void klp_complete_transition(void)
}
done:
klp_for_each_object(klp_transition_patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
if (klp_target_state == KLP_PATCHED)
klp_post_patch_callback(obj);
else if (klp_target_state == KLP_UNPATCHED)
klp_post_unpatch_callback(obj);
}
pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
* See complementary comment in __klp_enable_patch() for why we
* keep the module reference for immediate patches.
*/
if (!klp_transition_patch->immediate && !immediate_func &&
klp_target_state == KLP_UNPATCHED) {
module_put(klp_transition_patch->mod);
}
klp_target_state = KLP_UNDEFINED;
klp_transition_patch = NULL;
}
@@ -145,6 +167,9 @@ void klp_cancel_transition(void)
if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
return;
pr_debug("'%s': canceling patching transition, going to unpatch\n",
klp_transition_patch->mod->name);
klp_target_state = KLP_UNPATCHED;
klp_complete_transition();
}
@@ -408,9 +433,6 @@ void klp_try_complete_transition(void)
}
success:
pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/* we're done, now cleanup the data structures */
klp_complete_transition();
}
@@ -426,7 +448,8 @@ void klp_start_transition(void)
WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
pr_notice("'%s': starting %s transition\n",
klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
@@ -482,6 +505,9 @@ void klp_init_transition(struct klp_patch *patch, int state)
*/
klp_target_state = state;
pr_debug("'%s': initializing %s transition\n", patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
* If the patch can be applied or reverted immediately, skip the
* per-task transitions.
@@ -547,6 +573,11 @@ void klp_reverse_transition(void)
unsigned int cpu;
struct task_struct *g, *task;
pr_debug("'%s': reversing transition from %s\n",
klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching to unpatching" :
"unpatching to patching");
klp_transition_patch->enabled = !klp_transition_patch->enabled;
klp_target_state = !klp_target_state;