Convert to the much saner new idr interface.
Only compile tested.
v2: Mike triggered WARN_ON() in idr_preload() because send_mad(),
which may be used from non-process context, was calling
idr_preload() unconditionally. Preload iff @gfp_mask has
__GFP_WAIT.
Signed-off-by: Tejun Heo t...@kernel.org
Reported-by: Marciniszyn, Mike mike.marcinis...@intel.com
Cc: Roland Dreier rol...@kernel.org
Cc: Sean Hefty sean.he...@intel.com
Cc: Hal Rosenstock hal.rosenst...@gmail.com
Cc: linux-rdma@vger.kernel.org
---
drivers/infiniband/core/cm.c | 22 +++---
drivers/infiniband/core/cma.c| 24 +++-
drivers/infiniband/core/sa_query.c | 18 ++
drivers/infiniband/core/ucm.c| 16
drivers/infiniband/core/ucma.c | 32
drivers/infiniband/core/uverbs_cmd.c | 17 -
6 files changed, 48 insertions(+), 81 deletions(-)
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
- int ret, id;
+ int id;
static int next_id;
- do {
- spin_lock_irqsave(cm.lock, flags);
- ret = idr_get_new_above(cm.local_id_table, cm_id_priv,
- next_id, id);
- if (!ret)
- next_id = ((unsigned) id + 1) MAX_IDR_MASK;
- spin_unlock_irqrestore(cm.lock, flags);
- } while( (ret == -EAGAIN) idr_pre_get(cm.local_id_table,
GFP_KERNEL) );
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(cm.lock, flags);
+
+ id = idr_alloc(cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
+ if (id = 0)
+ next_id = ((unsigned) id + 1) MAX_IDR_MASK;
+
+ spin_unlock_irqrestore(cm.lock, flags);
+ idr_preload_end();
cm_id_priv-id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return ret;
+ return id 0 ? id : 0;
}
static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
cm.remote_sidr_table = RB_ROOT;
idr_init(cm.local_id_table);
get_random_bytes(cm.random_id_operand, sizeof cm.random_id_operand);
- idr_pre_get(cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(cm.timewait_list);
ret = class_register(cm_class);
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps
unsigned short snum)
{
struct rdma_bind_list *bind_list;
- int port, ret;
+ int ret;
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
- do {
- ret = idr_get_new_above(ps, bind_list, snum, port);
- } while ((ret == -EAGAIN) idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
-
- if (port != snum) {
- ret = -EADDRNOTAVAIL;
- goto err2;
- }
+ ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+ if (ret 0)
+ goto err;
bind_list-ps = ps;
- bind_list-port = (unsigned short) port;
+ bind_list-port = (unsigned short)ret;
cma_bind_port(bind_list, id_priv);
return 0;
-err2:
- idr_remove(ps, port);
-err1:
+err:
kfree(bind_list);
- return ret;
+ return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *m
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
{
+ bool preload = gfp_mask __GFP_WAIT;
unsigned long flags;
int ret, id;
-retry:
- if (!idr_pre_get(query_idr, gfp_mask))
- return -ENOMEM;
+ if (preload)
+ idr_preload(gfp_mask);
spin_lock_irqsave(idr_lock, flags);
- ret = idr_get_new(query_idr, query, id);
+
+ id = idr_alloc(query_idr, query, 0, 0, GFP_NOWAIT);
+
spin_unlock_irqrestore(idr_lock, flags);
- if (ret == -EAGAIN)
- goto retry;
- if (ret)
- return ret;
+ if (preload)
+ idr_preload_end();
+ if (id 0)
+ return id;
query-mad_buf-timeout_ms = timeout_ms;
query-mad_buf-context[0] = query;
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
{
struct ib_ucm_context *ctx;
- int result;