crypto: engine - Handle the kthread worker using the new API
Use the new API to create and destroy the crypto engine kthread worker. The API hides some implementation details. In particular, kthread_create_worker() allocates and initializes struct kthread_worker. It runs the kthread the right way and stores task_struct into the worker structure. kthread_destroy_worker() flushes all pending works, stops the kthread and frees the structure. This patch does not change the existing behavior except for dynamically allocating struct kthread_worker and storing only the pointer of this structure. It is compile tested only because I did not find an easy way how to run the code. Well, it should be pretty safe given the nature of the change. Signed-off-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
此提交包含在:
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
|
||||
/* If another context is idling then defer */
|
||||
if (engine->idling) {
|
||||
kthread_queue_work(&engine->kworker, &engine->pump_requests);
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
|
||||
/* Only do teardown in the thread */
|
||||
if (!in_kthread) {
|
||||
kthread_queue_work(&engine->kworker,
|
||||
kthread_queue_work(engine->kworker,
|
||||
&engine->pump_requests);
|
||||
goto out;
|
||||
}
|
||||
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
|
||||
ret = ablkcipher_enqueue_request(&engine->queue, req);
|
||||
|
||||
if (!engine->busy && need_pump)
|
||||
kthread_queue_work(&engine->kworker, &engine->pump_requests);
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return ret;
|
||||
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
|
||||
ret = ahash_enqueue_request(&engine->queue, req);
|
||||
|
||||
if (!engine->busy && need_pump)
|
||||
kthread_queue_work(&engine->kworker, &engine->pump_requests);
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return ret;
|
||||
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
|
||||
kthread_queue_work(&engine->kworker, &engine->pump_requests);
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
|
||||
|
||||
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
|
||||
kthread_queue_work(&engine->kworker, &engine->pump_requests);
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
|
||||
|
||||
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
|
||||
engine->running = true;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
kthread_queue_work(&engine->kworker, &engine->pump_requests);
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
|
||||
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
|
||||
spin_lock_init(&engine->queue_lock);
|
||||
|
||||
kthread_init_worker(&engine->kworker);
|
||||
engine->kworker_task = kthread_run(kthread_worker_fn,
|
||||
&engine->kworker, "%s",
|
||||
engine->name);
|
||||
if (IS_ERR(engine->kworker_task)) {
|
||||
engine->kworker = kthread_create_worker(0, "%s", engine->name);
|
||||
if (IS_ERR(engine->kworker)) {
|
||||
dev_err(dev, "failed to create crypto request pump task\n");
|
||||
return NULL;
|
||||
}
|
||||
@@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
|
||||
|
||||
if (engine->rt) {
|
||||
dev_info(dev, "will run requests pump with realtime priority\n");
|
||||
sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m);
|
||||
sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
|
||||
}
|
||||
|
||||
return engine;
|
||||
@@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
kthread_flush_worker(&engine->kworker);
|
||||
kthread_stop(engine->kworker_task);
|
||||
kthread_destroy_worker(engine->kworker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
新增問題並參考
封鎖使用者