Here's a start at porting the CMA implementation to the modified header.

I didn't get as far as I had hoped, but will continue tomorrow based
on feedback.

I commented out code that I hadn't gotten to yet, which is mostly the event
handling from the IB CM.

- Sean


/*
 * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
 *
 * This Software is licensed under one of the following licenses:
 *
 * 1) under the terms of the "Common Public License 1.0" a copy of which is
 *    available from the Open Source Initiative, see
 *    http://www.opensource.org/licenses/cpl.php.
 *
 * 2) under the terms of the "The BSD License" a copy of which is
 *    available from the Open Source Initiative, see
 *    http://www.opensource.org/licenses/bsd-license.php.
 *
 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
 *    copy of which is available from the Open Source Initiative, see
 *    http://www.opensource.org/licenses/gpl-license.php.
 *
 * Licensee has the right to choose one of the above licenses.
 *
 * Redistributions of source code must retain the above copyright
 * notice and one of the license notices.
 *
 * Redistributions in binary form must reproduce both the above copyright
 * notice, one of the license notices in the documentation
 * and/or other materials provided with the distribution.
 *
 */
#include <rdma/rdma_cma.h>
#include <rdma/ib_cm.h>

MODULE_AUTHOR("Guy German");
MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");

#define PFX "ib_cma: "

struct cma_id_private {
        struct rdma_cma_id cma_id;
        struct ib_cm_id *cm_id;

        /* TODO: add state if needed */
        /* TOOD: might need refcount for route queries */
        /* atomic_t refcount; */
        spinlock_t lock;
        int backlog;
};

struct cma_route_private {
        struct rdma_route route;
        struct ib_path_rec *path_rec;
};

struct rdma_cma_id* rdma_cma_create_id(struct ib_device *device, void *context,
                                       rdma_cma_event_handler event_handler)
{
        struct cma_id_private *cma_id_priv;
        struct ib_cm_id *cm_id;

        cma_id_priv = zmalloc(sizeof *cma_id_priv, GFP_KERNEL);
        if (!cma_id_priv)
                return -ENOMEM;

        cma_id_priv->cma_id.device = device;
        cma_id_priv->cma_id.context = context;
        cma_id_priv->cma_id.event_handler = event_handler;
        spin_lock_init(&cma_id_priv->lock);

        cm_id = ib_create_cm_id(device, cma_ib_handler, cma_id_priv);
        if (IS_ERR(cm_id)) {
                kfree(cma_id_priv);
                return ERR_PTR(PTR_ERR(cm_id));
        }

        cma_id_priv->cm_id = cm_id;
        return id;
}
EXPORT_SYMBOL(rdma_cma_create_id);

void rdma_cma_destroy_id(struct rdma_cma_id *cma_id)
{
        struct cma_id_private *cma_id_priv;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);
        
        /* TODO: cancel route lookup if active */

        ib_destroy_cm_id(cma_id_priv->cm_id);
        kfree(cma_id->route);
        kfree(cma_id_priv);
}
EXPORT_SYMBOL(rdma_cma_destroy_id);

int rdma_cma_listen(struct rdma_cma_id *cma_id, struct sockaddr *address,
                    int backlog)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        cma_id->route = zmalloc(sizeof *cma_id->route, GFP_KERNEL);
        if (!cma_id->route)
                return -ENOMEM;

        cma_id_priv->backlog = backlog;
        cma_id->route->src_ip = address;

        /* TODO: convert address into a service_id */
        ret = ib_cm_listen(cma_id_priv->cm_id, 0, 0);
        if (ret)
                goto err;

        return 0;
err:
        kfree(cma_id->route);
        return ret;
};
EXPORT_SYMBOL(rdma_cma_listen);

/*
static void cma_path_handler(u64 req_id, void *context, int rec_num)
{
        struct cma_context *cma_id = context;
        enum ib_cma_event event;
        int status = 0;

        if (rec_num <= 0) {
                event = IB_CMA_EVENT_UNREACHABLE;
                goto error;
        }

        cma_id->cma_param.primary_path = &cma_id->cma_path;
        cma_id->cma_param.alternate_path = NULL;

        printk(KERN_DEBUG PFX "%s: dlid=%d slid=%d pkey=%d mtu=%d sid=%llx "
                "qpn=%d qpt=%d psn=%d prd=%s respres=%d rcm=%d flc=%d "
                "cmt=%d rtrc=%d rntrtr=%d maxcm=%d \n",__func__,
                cma_id->cma_param.primary_path->dlid ,
                cma_id->cma_param.primary_path->slid ,
                cma_id->cma_param.primary_path->pkey ,
                cma_id->cma_param.primary_path->mtu ,
                cma_id->cma_param.service_id,
                cma_id->cma_param.qp_num,
                cma_id->cma_param.qp_type,
                cma_id->cma_param.starting_psn,
                (char *)cma_id->cma_param.private_data,
                cma_id->cma_param.responder_resources,
                cma_id->cma_param.remote_cm_response_timeout,
                cma_id->cma_param.flow_control,
                cma_id->cma_param.local_cm_response_timeout,
                cma_id->cma_param.retry_count,
                cma_id->cma_param.rnr_retry_count,
                cma_id->cma_param.max_cm_retries);

        status = ib_send_cm_req(cma_id->cm_id, &cma_id->cma_param);
        if (status) {
                printk(KERN_ERR PFX "%s: cm_req failed %d\n",__func__, status);
                event = IB_CMA_EVENT_REJECTED;
                goto error;
        }

        return;

error:
        printk(KERN_ERR PFX "%s: return error %d \n",__func__, status);
        cma_connection_callback(cma_id, event, NULL);
}

static void cma_route_handler(u64 req_id, void *context, int rec_num)
{
        struct cma_context *cma_id = context;
        enum ib_cma_event event;
        int status = 0;
        
        if (rec_num <= 0) {
                event = IB_CMA_EVENT_UNREACHABLE;
                goto error;
        }
        cma_id->ibat_comp.fn = &cma_path_handler;
        cma_id->ibat_comp.context = cma_id;

        status = ib_at_paths_by_route(&cma_id->cma_route, 0,
                                      &cma_id->cma_path, 1,
                                      &cma_id->ibat_comp);

        if (status) {
                event = IB_CMA_EVENT_DISCONNECTED;
                goto error;
        }
        return;

error:
        printk(KERN_ERR PFX "%s: return error %d \n",__func__, status);
        cma_connection_callback(cma_id, event ,NULL);
}
*/

int rdma_cma_get_route(struct rdma_cma_id *cma_id,
                       struct sockaddr *src_ip, struct sockaddr *dest_ip)
{
        struct cma_id_private *cma_id_priv;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        /* TODO: Get remote GID from ARP table, query for path record */
        return 0;
}
EXPORT_SYMBOL(rdma_cma_get_route);

int rdma_cma_connect(struct rdma_cma_id *cma_id,
                     struct rdma_cma_conn_param *conn_param)
{
        struct cma_id_private *cma_id_priv;
        struct cma_route_private *route;
        struct ib_cm_req_param req;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);
        route = container_of(cma_id->route, struct cma_route_private, route);

        cma_id->qp = conn_param->qp;

        memset(&req, 0, sizeof req);
        req.primary_path = route->path_rec;
        /* TODO: convert route->route.dest_ip to service id */
        req.service_id = 0;
        req.qp_num = conn_param->qp->qp_num;
        req.qp_type = IB_QPT_RC;
        req.starting_psn = req.qp_num;
        req.private_data = conn_param->private_data;
        req.private_data_len = conn_param->private_data_len;
        /* TODO: Get these values from user - from qp_attr ?
        u8                      responder_resources;
        u8                      initiator_depth;
        u8                      remote_cm_response_timeout;
        u8                      flow_control;
        u8                      local_cm_response_timeout;
        u8                      retry_count;
        u8                      rnr_retry_count;
        u8                      max_cm_retries;
        */
        req.srq = conn_param->qp->srq ? 1 : 0;

        return ib_send_cm_req(cma_id_priv->cm_id, &req);
}
EXPORT_SYMBOL(rdma_cma_connect);

/*
TODO: fix up
int rdma_cma_accept(struct rdma_cma_id *cma_id, struct ib_qp *qp, 
                    const void *private_data, u8 private_data_len)
{
        struct cma_context *cma_id = cma_id;
        struct ib_cm_rep_param passive_params;
        int status;

        printk(KERN_DEBUG PFX "%s: enter >> private_data = %s (len=%d)\n",
                __func__, (char *)private_data, private_data_len);

        if (private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) {
                status = -EINVAL;
                goto reject;
        }

        memset(&passive_params, 0, sizeof passive_params);
        passive_params.private_data = private_data;
        passive_params.private_data_len = private_data_len;
        passive_params.qp_num = qp->qp_num;
        passive_params.responder_resources = CMA_TARGET_MAX;
        passive_params.initiator_depth = CMA_INITIATOR_DEPTH;
        passive_params.rnr_retry_count = CMA_RNR_RETRY_COUNT;

        status = cma_modify_qp_state(cma_id->cm_id, qp, IB_QPS_RTR, 0);
        if (status)
                goto reject;
        
        cma_id->accept_cb.func = cm_accept_handler;
        cma_id->accept_cb.context = context;
        status = ib_send_cm_rep(cma_id->cm_id, &passive_params);
        if (status)
                goto reject;

        printk(KERN_DEBUG PFX "%s: return success\n", __func__);
        return 0;

reject:
        printk(KERN_ERR PFX "%s: error status %d\n", __func__, status);
        ib_send_cm_rej(cma_id->cm_id, IB_CM_REJ_CONSUMER_DEFINED,
                        NULL, 0, NULL, 0);
        destroy_cma_id(cma_id);
        return status;
}
EXPORT_SYMBOL(rdma_cma_accept);
*/

int rdma_cma_reject(struct rdma_cma_id *cma_id,
                    const void *private_data, u8 private_data_len)
{
        struct cma_id_private *cma_id_priv;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        return ib_send_cm_rej(cma_id_priv->cm_id, IB_CM_REJ_CONSUMER_DEFINED,
                              NULL, 0, private_data, private_data_len);
};
EXPORT_SYMBOL(rdma_cma_reject);

int rdma_cma_disconnect(struct rdma_cma_id *cma_id)
{
        struct cma_id_private *cma_id_priv;
        struct ib_qp_attr qp_attr;
        int qp_attr_mask;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        qp_attr.qp_state = IB_QPS_ERR;
        qp_attr_mask = IB_QP_STATE;
        ret = ib_modify_qp(cma_id_priv->cma_id.qp, &qp_attr, qp_attr_mask);
        if (ret)
                return ret;

        /* We could either be initiating the disconnect or responding to one. */
        ret = ib_send_cm_dreq(cma_id_priv->cm_id, NULL, 0);
        if (ret)
                ib_send_cm_drep(cma_id_priv->cm_id, NULL, 0);
        return 0;
}
EXPORT_SYMBOL(rdma_cma_disconnect);

/*
TODO: fixup
void cma_connection_callback(struct cma_context *cma_id,
                                const enum ib_cma_event event,
                                const void *private_data)
{
        ib_cma_event_handler conn_cb;
        struct ib_qp *qp = cma_id->cma_conn.qp;
        int status;

        conn_cb = cma_id->cma_conn.cma_event_handler;
        
        switch (event) {
        case IB_CMA_EVENT_ESTABLISHED:
                break;
        case IB_CMA_EVENT_DISCONNECTED:
        case IB_CMA_EVENT_REJECTED:
        case IB_CMA_EVENT_UNREACHABLE:
        case IB_CMA_EVENT_NON_PEER_REJECTED:
                status = cma_disconnect(qp, cma_id, CMA_CLOSE_ABRUPT);
                break;
        default:
                printk(KERN_ERR PFX "%s: unknown event !!\n", __func__);
        }

        printk(KERN_DEBUG PFX "%s: event=%d\n", __func__, event);

        conn_cb(event, cma_id->cma_conn.context, private_data);
}

static inline int cma_rep_recv(struct cma_context *cma_id,
                         struct ib_cm_event *rep_cm_event)
{
        int status;

        status = cma_modify_qp_state(cma_id->cm_id, cma_id->cma_conn.qp,
                                     IB_QPS_RTR, 0);
        if (status) {
                printk(KERN_ERR PFX "%s: fail to modify QPS_RTR %d\n", 
                       __func__, status);
                return status;
        }

        status = cma_modify_qp_state(cma_id->cm_id, cma_id->cma_conn.qp,
                                     IB_QPS_RTS, 0);
        if (status) {
                printk(KERN_ERR PFX "%s: fail to modify QPS_RTR %d\n", 
                       __func__, status);
                return status;
        }

        status = ib_send_cm_rtu(cma_id->cm_id, NULL, 0);
        if (status) {
                printk(KERN_ERR PFX "%s: fail to send cm rtu %d\n", 
                       __func__, status);
                return status;
        }

        return 0;
}

int cma_active_cb_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
        int status = 0;
        enum ib_cma_event cma_event = 0;
        struct cma_context *cma_id = cm_id->context;

        printk(KERN_DEBUG PFX "%s: enter >>> cm_id=%p cma_id=%p\n",__func__, 
                cm_id, cma_id);

        switch (event->event) {
        case IB_CM_REQ_ERROR:
                cma_event = IB_CMA_EVENT_UNREACHABLE;
                break;
        case IB_CM_REJ_RECEIVED:
                cma_event = IB_CMA_EVENT_NON_PEER_REJECTED;
                break;
        case IB_CM_DREP_RECEIVED:
        case IB_CM_TIMEWAIT_EXIT:
                cma_event = IB_CMA_EVENT_DISCONNECTED;
                break;
        case IB_CM_REP_RECEIVED:
                status = cma_rep_recv(cma_id, event);
                if (!status)
                        cma_event = IB_CMA_EVENT_ESTABLISHED;
                else
                        cma_event = IB_CMA_EVENT_DISCONNECTED;
                break;
        case IB_CM_DREQ_RECEIVED:
                ib_send_cm_drep(cm_id, NULL, 0);
                cma_event = IB_CMA_EVENT_DISCONNECTED;
                break;
        case IB_CM_DREQ_ERROR:
                break;
        default:
                printk(KERN_WARNING PFX "%s: cm event (%d) not handled\n",
                        __func__, event->event);
                break;
        }

        printk(KERN_WARNING PFX "%s: cm_event=%d cma_event=%d\n",
                __func__, event->event, cma_event);

        if (cma_event)
                cma_connection_callback(cma_id, cma_event, 
                                        event->private_data);

        return status;
}

static int cma_passive_cb_handler(struct ib_cm_id *cm_id,
                                  struct ib_cm_event *event)
{
        struct cma_context *cma_id;
        ib_cma_listen_handler crcb;
        ib_cma_ac_handler accb;
        void *cr_ctx, *ac_ctx; 
        int status = 0;
        
        printk(KERN_DEBUG PFX "%s: enter >>> cm_id=%p\n",__func__, cm_id);
        cma_id = get_cma_id(cm_id, event);
        if (!cma_id)
                return -EINVAL;

        accb = cma_id->accept_cb.func;
        ac_ctx = cma_id->accept_cb.context;
        switch (event->event) {
        case IB_CM_REQ_RECEIVED:
                crcb = cma_id->creq_cma_id->listen_cb.func;
                cr_ctx = cma_id->creq_cma_id->listen_cb.context;
                memcpy(&cma_id->cma_path,
                        ((struct ib_cm_req_event_param *)
                         &event->param)->primary_path, 
                        sizeof cma_id->cma_path);
                crcb(cma_id, cm_id->device, event->private_data, cr_ctx);
                break;
        case IB_CM_REP_ERROR:
                accb(IB_CMA_EVENT_UNREACHABLE, ac_ctx);
                break;
        case IB_CM_REJ_RECEIVED:
                accb(IB_CMA_EVENT_REJECTED, ac_ctx);
                break;
        case IB_CM_RTU_RECEIVED:
                status = cma_modify_qp_state(cma_id->cm_id, 
                                             cma_id->cma_conn.qp,
                                             IB_QPS_RTS, 0);
                if (!status)
                        accb(IB_CMA_EVENT_ESTABLISHED, ac_ctx);
                else {
                        accb(IB_CMA_EVENT_DISCONNECTED, ac_ctx);
                        status = cma_disconnect(cma_id->cma_conn.qp, cma_id, 
                                             CMA_CLOSE_ABRUPT);
                }
                break;
        case IB_CM_DREQ_RECEIVED:
                ib_send_cm_drep(cm_id, NULL, 0);
                break;
        case IB_CM_DREQ_ERROR:
                break;
        case IB_CM_DREP_RECEIVED:
        case IB_CM_TIMEWAIT_EXIT:
                accb(IB_CMA_EVENT_DISCONNECTED, ac_ctx);
                status = cma_disconnect(cma_id->cma_conn.qp, cma_id, 
                                     CMA_CLOSE_ABRUPT);
                break;
        default:
                break;
        }

        destroy_cma_id(cma_id);

        return status;
}
*/

static int cma_init(void)
{
        return 0;
}

static void cma_cleanup(void)
{
}

module_init(cma_init);
module_exit(cma_cleanup);


_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to