Merge branch 'k.o/for-4.13-rc' into k.o/for-next
Merging our (hopefully) final -rc pull branch into our for-next branch because some of our pending patches won't apply cleanly without having the -rc patches in our tree. Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
@@ -61,6 +61,7 @@ struct addr_req {
|
||||
void (*callback)(int status, struct sockaddr *src_addr,
|
||||
struct rdma_dev_addr *addr, void *context);
|
||||
unsigned long timeout;
|
||||
struct delayed_work work;
|
||||
int status;
|
||||
u32 seq;
|
||||
};
|
||||
@@ -293,7 +294,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_translate_ip);
|
||||
|
||||
static void set_timeout(unsigned long time)
|
||||
static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
@@ -301,7 +302,7 @@ static void set_timeout(unsigned long time)
|
||||
if ((long)delay < 0)
|
||||
delay = 0;
|
||||
|
||||
mod_delayed_work(addr_wq, &work, delay);
|
||||
mod_delayed_work(addr_wq, delayed_work, delay);
|
||||
}
|
||||
|
||||
static void queue_req(struct addr_req *req)
|
||||
@@ -316,8 +317,7 @@ static void queue_req(struct addr_req *req)
|
||||
|
||||
list_add(&req->list, &temp_req->list);
|
||||
|
||||
if (req_list.next == &req->list)
|
||||
set_timeout(req->timeout);
|
||||
set_timeout(&req->work, req->timeout);
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
@@ -572,6 +572,37 @@ static int addr_resolve(struct sockaddr *src_in,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void process_one_req(struct work_struct *_work)
|
||||
{
|
||||
struct addr_req *req;
|
||||
struct sockaddr *src_in, *dst_in;
|
||||
|
||||
mutex_lock(&lock);
|
||||
req = container_of(_work, struct addr_req, work.work);
|
||||
|
||||
if (req->status == -ENODATA) {
|
||||
src_in = (struct sockaddr *)&req->src_addr;
|
||||
dst_in = (struct sockaddr *)&req->dst_addr;
|
||||
req->status = addr_resolve(src_in, dst_in, req->addr,
|
||||
true, req->seq);
|
||||
if (req->status && time_after_eq(jiffies, req->timeout)) {
|
||||
req->status = -ETIMEDOUT;
|
||||
} else if (req->status == -ENODATA) {
|
||||
/* requeue the work for retrying again */
|
||||
set_timeout(&req->work, req->timeout);
|
||||
mutex_unlock(&lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
list_del(&req->list);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
req->callback(req->status, (struct sockaddr *)&req->src_addr,
|
||||
req->addr, req->context);
|
||||
put_client(req->client);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static void process_req(struct work_struct *work)
|
||||
{
|
||||
struct addr_req *req, *temp_req;
|
||||
@@ -589,20 +620,23 @@ static void process_req(struct work_struct *work)
|
||||
true, req->seq);
|
||||
if (req->status && time_after_eq(jiffies, req->timeout))
|
||||
req->status = -ETIMEDOUT;
|
||||
else if (req->status == -ENODATA)
|
||||
else if (req->status == -ENODATA) {
|
||||
set_timeout(&req->work, req->timeout);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
list_move_tail(&req->list, &done_list);
|
||||
}
|
||||
|
||||
if (!list_empty(&req_list)) {
|
||||
req = list_entry(req_list.next, struct addr_req, list);
|
||||
set_timeout(req->timeout);
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
|
||||
list_for_each_entry_safe(req, temp_req, &done_list, list) {
|
||||
list_del(&req->list);
|
||||
/* It is safe to cancel other work items from this work item
|
||||
* because at a time there can be only one work item running
|
||||
* with this single threaded work queue.
|
||||
*/
|
||||
cancel_delayed_work(&req->work);
|
||||
req->callback(req->status, (struct sockaddr *) &req->src_addr,
|
||||
req->addr, req->context);
|
||||
put_client(req->client);
|
||||
@@ -645,6 +679,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
|
||||
req->context = context;
|
||||
req->client = client;
|
||||
atomic_inc(&client->refcount);
|
||||
INIT_DELAYED_WORK(&req->work, process_one_req);
|
||||
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
|
||||
|
||||
req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
|
||||
@@ -699,7 +734,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
|
||||
req->status = -ECANCELED;
|
||||
req->timeout = jiffies;
|
||||
list_move(&req->list, &req_list);
|
||||
set_timeout(req->timeout);
|
||||
set_timeout(&req->work, req->timeout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -805,9 +840,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
|
||||
if (event == NETEVENT_NEIGH_UPDATE) {
|
||||
struct neighbour *neigh = ctx;
|
||||
|
||||
if (neigh->nud_state & NUD_VALID) {
|
||||
set_timeout(jiffies);
|
||||
}
|
||||
if (neigh->nud_state & NUD_VALID)
|
||||
set_timeout(&work, jiffies);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -818,7 +852,7 @@ static struct notifier_block nb = {
|
||||
|
||||
int addr_init(void)
|
||||
{
|
||||
addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
|
||||
addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
|
||||
if (!addr_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -572,10 +572,11 @@ void ib_unregister_device(struct ib_device *device)
|
||||
}
|
||||
up_read(&lists_rwsem);
|
||||
|
||||
mutex_unlock(&device_mutex);
|
||||
|
||||
ib_device_unregister_rdmacg(device);
|
||||
ib_device_unregister_sysfs(device);
|
||||
|
||||
mutex_unlock(&device_mutex);
|
||||
|
||||
ib_cache_cleanup_one(device);
|
||||
|
||||
ib_security_destroy_port_pkey_list(device);
|
||||
|
@@ -1158,7 +1158,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
|
||||
int out_len)
|
||||
{
|
||||
struct ib_uverbs_resize_cq cmd;
|
||||
struct ib_uverbs_resize_cq_resp resp;
|
||||
struct ib_uverbs_resize_cq_resp resp = {};
|
||||
struct ib_udata udata;
|
||||
struct ib_cq *cq;
|
||||
int ret = -EINVAL;
|
||||
|
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
|
||||
if (atomic_dec_and_test(&file->device->refcount))
|
||||
ib_uverbs_comp_dev(file->device);
|
||||
|
||||
kobject_put(&file->device->kobj);
|
||||
kfree(file);
|
||||
}
|
||||
|
||||
@@ -917,7 +918,6 @@ err:
|
||||
static int ib_uverbs_close(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct ib_uverbs_file *file = filp->private_data;
|
||||
struct ib_uverbs_device *dev = file->device;
|
||||
|
||||
mutex_lock(&file->cleanup_mutex);
|
||||
if (file->ucontext) {
|
||||
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
|
||||
ib_uverbs_release_async_event_file);
|
||||
|
||||
kref_put(&file->ref, ib_uverbs_release_file);
|
||||
kobject_put(&dev->kobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
|
||||
kref_get(&file->ref);
|
||||
mutex_unlock(&uverbs_dev->lists_mutex);
|
||||
|
||||
ib_uverbs_event_handler(&file->event_handler, &event);
|
||||
|
||||
mutex_lock(&file->cleanup_mutex);
|
||||
ucontext = file->ucontext;
|
||||
@@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
|
||||
* for example due to freeing the resources
|
||||
* (e.g mmput).
|
||||
*/
|
||||
ib_uverbs_event_handler(&file->event_handler, &event);
|
||||
ib_dev->disassociate_ucontext(ucontext);
|
||||
mutex_lock(&file->cleanup_mutex);
|
||||
ib_uverbs_cleanup_ucontext(file, ucontext, true);
|
||||
|
@@ -895,7 +895,6 @@ static const struct {
|
||||
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
|
||||
[IB_QPS_RESET] = {
|
||||
[IB_QPS_RESET] = { .valid = 1 },
|
||||
[IB_QPS_ERR] = { .valid = 1 },
|
||||
[IB_QPS_INIT] = {
|
||||
.valid = 1,
|
||||
.req_param = {
|
||||
|
Reference in New Issue
Block a user