On Wed, 2004-11-03 at 23:13, Roland Dreier wrote:
> Can you resend either with a different mailer or as an attachment?
> The patch was pretty line-wrapped.
Sorry. My mailer in some cases in Evolution 1.2.2-4. Not sure if this is
fixed in newer versions or whether this is a configuration thing. Anhow,
let's try as an attachment for now.
I'm sure you know this but you will want to skip the changes to
openib-candidate as they have already been applied.
-- Hal
Index: openib-candidate/src/linux-kernel/infiniband/access/agent.c
===================================================================
--- openib-candidate/src/linux-kernel/infiniband/access/agent.c (revision 1125)
+++ openib-candidate/src/linux-kernel/infiniband/access/agent.c (working copy)
@@ -29,7 +29,6 @@
#include <asm/bug.h>
-
static spinlock_t ib_agent_port_list_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(ib_agent_port_list);
@@ -37,9 +36,9 @@
* Fixup a directed route SMP for sending. Return 0 if the SMP should be
* discarded.
*/
-static int smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type,
- int port_num)
+int smi_handle_dr_smp_send(struct ib_smp *smp,
+ u8 node_type,
+ int port_num)
{
u8 hop_ptr, hop_cnt;
@@ -111,23 +110,6 @@
}
/*
- * Sender side handling of outgoing SMPs. Fixup the SMP as required by
- * the spec. Return 0 if the SMP should be dropped.
- */
-static int smi_handle_smp_send(struct ib_smp *smp,
- u8 node_type,
- int port_num)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_send(smp, node_type, port_num);
- default: /* LR SM class */
- return 1;
- }
-}
-
-/*
* Return 1 if the SMP should be handled by the local SMA via process_mad.
*/
static inline int smi_check_local_smp(struct ib_mad_agent *mad_agent,
@@ -145,10 +127,10 @@
* Adjust information for a received SMP. Return 0 if the SMP should be
* dropped.
*/
-static int smi_handle_dr_smp_recv(struct ib_smp *smp,
- u8 node_type,
- int port_num,
- int phys_port_cnt)
+int smi_handle_dr_smp_recv(struct ib_smp *smp,
+ u8 node_type,
+ int port_num,
+ int phys_port_cnt)
{
u8 hop_ptr, hop_cnt;
@@ -221,29 +203,10 @@
}
/*
- * Receive side handling SMPs. Save receive information as required by
- * the spec. Return 0 if the SMP should be dropped.
- */
-static int smi_handle_smp_recv(struct ib_smp *smp,
- u8 node_type,
- int port_num,
- int phys_port_cnt)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_recv(smp, node_type,
- port_num, phys_port_cnt);
- default: /* LR SM class */
- return 1;
- }
-}
-
-/*
* Return 1 if the received DR SMP should be forwarded to the send queue.
* Return 0 if the SMP should be completed up the stack.
*/
-static int smi_check_forward_dr_smp(struct ib_smp *smp)
+int smi_check_forward_dr_smp(struct ib_smp *smp)
{
u8 hop_ptr, hop_cnt;
@@ -274,31 +237,6 @@
return 0;
}
-/*
- * Return 1 if the received SMP should be forwarded to the send queue.
- * Return 0 if the SMP should be completed up the stack.
- */
-static int smi_check_forward_smp(struct ib_smp *smp)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_check_forward_dr_smp(smp);
- default: /* LR SM class */
- return 1;
- }
-}
-
-static int mad_process_local(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad *mad_response,
- u16 slid)
-{
- return mad_agent->device->process_mad(mad_agent->device, 0,
- mad_agent->port_num,
- slid, mad, mad_response);
-}
-
static inline struct ib_agent_port_private *
__ib_get_agent_mad(struct ib_device *device, int port_num,
struct ib_mad_agent *mad_agent)
@@ -339,12 +277,28 @@
return entry;
}
-int agent_mad_send(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_grh *grh,
- struct ib_mad_recv_wc *mad_recv_wc)
+int smi_check_local_dr_smp(struct ib_smp *smp,
+ struct ib_device *device,
+ int port_num)
{
struct ib_agent_port_private *port_priv;
+
+ port_priv = ib_get_agent_mad(device, port_num, NULL);
+ if (!port_priv) {
+ printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d not open\n",
+ device->name, port_num);
+ return 1;
+ }
+
+ return smi_check_local_smp(port_priv->dr_smp_agent, smp);
+}
+
+static int agent_mad_send(struct ib_mad_agent *mad_agent,
+ struct ib_mad *mad,
+ struct ib_grh *grh,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ struct ib_agent_port_private *port_priv;
struct ib_agent_send_wr *agent_send_wr;
struct ib_sge gather_list;
struct ib_send_wr send_wr;
@@ -445,114 +399,41 @@
return ret;
}
-int smi_send_smp(struct ib_mad_agent *mad_agent,
- struct ib_smp *smp,
- struct ib_mad_recv_wc *mad_recv_wc,
- u16 slid,
- int phys_port_cnt)
+int agent_send(struct ib_mad *mad,
+ struct ib_grh *grh,
+ struct ib_wc *wc,
+ struct ib_device *device,
+ int port_num)
{
- struct ib_mad *smp_response;
- int ret;
+ struct ib_agent_port_private *port_priv;
+ struct ib_mad_agent *mad_agent;
+ struct ib_mad_recv_wc mad_recv_wc;
- if (!smi_handle_smp_send(smp, mad_agent->device->node_type,
- mad_agent->port_num)) {
- /* SMI failed send */
- return 0;
- }
-
- if (smi_check_local_smp(mad_agent, smp)) {
- smp_response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
- if (!smp_response)
- return 0;
-
- ret = mad_process_local(mad_agent, (struct ib_mad *)smp,
- smp_response, slid);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- if (!smi_handle_smp_recv((struct ib_smp *)smp_response,
- mad_agent->device->node_type,
- mad_agent->port_num,
- phys_port_cnt)) {
- /* SMI failed receive */
- kfree(smp_response);
- return 0;
- }
- if (agent_mad_send(mad_agent, smp_response,
- NULL, mad_recv_wc))
- kfree(smp_response);
- } else
- kfree(smp_response);
+ port_priv = ib_get_agent_mad(device, port_num, NULL);
+ if (!port_priv) {
+ printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
+ device->name, port_num);
return 1;
}
- /* Post the send on the QP */
- return 1;
-}
-
-int agent_mad_response(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad_recv_wc *mad_recv_wc,
- u16 slid)
-{
- struct ib_mad *response;
- struct ib_grh *grh;
- int ret;
-
- response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
- if (!response)
- return 0;
-
- ret = mad_process_local(mad_agent, mad, response, slid);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- grh = (void *)mad - sizeof(struct ib_grh);
- agent_mad_send(mad_agent, response, grh, mad_recv_wc);
- } else
- kfree(response);
- return 1;
-}
-
-int agent_recv_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad_recv_wc *mad_recv_wc,
- int phys_port_cnt)
-{
- int port_num;
-
- /* SM Directed Route or LID Routed class */
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ||
- mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) {
- if (mad_agent->device->node_type != IB_NODE_SWITCH)
- port_num = mad_agent->port_num;
- else
- port_num = mad_recv_wc->wc->port_num;
- if (!smi_handle_smp_recv((struct ib_smp *)mad,
- mad_agent->device->node_type,
- port_num, phys_port_cnt)) {
- /* SMI failed receive */
- return 0;
- }
-
- if (smi_check_forward_smp((struct ib_smp *)mad)) {
- smi_send_smp(mad_agent,
- (struct ib_smp *)mad,
- mad_recv_wc,
- mad_recv_wc->wc->slid,
- phys_port_cnt);
- return 0;
- }
-
- } else {
- /* PerfMgmt class */
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
- agent_mad_response(mad_agent, mad, mad_recv_wc,
- mad_recv_wc->wc->slid);
- } else {
- printk(KERN_ERR "agent_recv_mad: Unexpected mgmt class 0x%x received\n", mad->mad_hdr.mgmt_class);
- }
- return 0;
+ /* Get mad agent based on mgmt_class in MAD */
+ switch (mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ mad_agent = port_priv->dr_smp_agent;
+ break;
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ mad_agent = port_priv->lr_smp_agent;
+ break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ mad_agent = port_priv->perf_mgmt_agent;
+ break;
+ default:
+ return 1;
}
- /* Complete receive up stack */
- return 1;
+ /* Other fields don't matter so should change signature to just use wc */
+ mad_recv_wc.wc = wc;
+ return agent_mad_send(mad_agent, mad, grh, &mad_recv_wc);
}
static void agent_send_handler(struct ib_mad_agent *mad_agent,
@@ -603,26 +484,6 @@
kfree(agent_send_wr->mad);
}
-static void agent_recv_handler(struct ib_mad_agent *mad_agent,
- struct ib_mad_recv_wc *mad_recv_wc)
-{
- struct ib_agent_port_private *port_priv;
-
- /* Find matching MAD agent */
- port_priv = ib_get_agent_mad(NULL, 0, mad_agent);
- if (!port_priv) {
- printk(KERN_ERR SPFX "agent_recv_handler: no matching MAD agent %p\n",
- mad_agent);
- } else {
- agent_recv_mad(mad_agent,
- mad_recv_wc->recv_buf->mad,
- mad_recv_wc, port_priv->phys_port_cnt);
- }
-
- /* Free received MAD */
- ib_free_recv_mad(mad_recv_wc);
-}
-
int ib_agent_port_open(struct ib_device *device, int port_num,
int phys_port_cnt)
{
@@ -663,19 +524,12 @@
reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
reg_req.mgmt_class_version = 1;
- /* SMA needs to receive Get, Set, and TrapRepress methods */
- bitmap_zero((unsigned long *)®_req.method_mask, IB_MGMT_MAX_METHODS);
- set_bit(IB_MGMT_METHOD_GET, (unsigned long *)®_req.method_mask);
- set_bit(IB_MGMT_METHOD_SET, (unsigned long *)®_req.method_mask);
- set_bit(IB_MGMT_METHOD_TRAP_REPRESS,
- (unsigned long *)®_req.method_mask);
-
port_priv->dr_smp_agent = ib_register_mad_agent(device, port_num,
IB_QPT_SMI,
- ®_req, 0,
+ NULL, 0,
&agent_send_handler,
- &agent_recv_handler,
- NULL);
+ NULL, NULL);
+
if (IS_ERR(port_priv->dr_smp_agent)) {
ret = PTR_ERR(port_priv->dr_smp_agent);
goto error2;
@@ -685,10 +539,9 @@
reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
port_priv->lr_smp_agent = ib_register_mad_agent(device, port_num,
IB_QPT_SMI,
- ®_req, 0,
+ NULL, 0,
&agent_send_handler,
- &agent_recv_handler,
- NULL);
+ NULL, NULL);
if (IS_ERR(port_priv->lr_smp_agent)) {
ret = PTR_ERR(port_priv->lr_smp_agent);
goto error3;
@@ -696,14 +549,11 @@
/* Obtain MAD agent for PerfMgmt class */
reg_req.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
- clear_bit(IB_MGMT_METHOD_TRAP_REPRESS,
- (unsigned long *)®_req.method_mask);
- port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
+ port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
IB_QPT_GSI,
- ®_req, 0,
+ NULL, 0,
&agent_send_handler,
- &agent_recv_handler,
- NULL);
+ NULL, NULL);
if (IS_ERR(port_priv->perf_mgmt_agent)) {
ret = PTR_ERR(port_priv->perf_mgmt_agent);
goto error4;
Index: openib-candidate/src/linux-kernel/infiniband/access/mad.c
===================================================================
--- openib-candidate/src/linux-kernel/infiniband/access/mad.c (revision 1124)
+++ openib-candidate/src/linux-kernel/infiniband/access/mad.c (working copy)
@@ -781,21 +781,13 @@
goto out;
}
version = port_priv->version[mad->mad_hdr.class_version];
- if (!version) {
- printk(KERN_ERR PFX "MAD received on port %d for class "
- "version %d with no client\n",
- port_priv->port_num, mad->mad_hdr.class_version);
+ if (!version)
goto out;
- }
class = version->method_table[convert_mgmt_class(
mad->mad_hdr.mgmt_class)];
if (class)
mad_agent = class->agent[mad->mad_hdr.method &
~IB_MGMT_METHOD_RESP];
- else
- printk(KERN_ERR PFX "MAD received on port %d for class "
- "%d with no client\n",
- port_priv->port_num, mad->mad_hdr.mgmt_class);
}
out:
@@ -808,9 +800,7 @@
"%p on port %d\n",
&mad_agent->agent, port_priv->port_num);
}
- } else
- printk(KERN_NOTICE PFX "No matching mad agent found for "
- "received MAD on port %d\n", port_priv->port_num);
+ }
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
@@ -934,6 +924,23 @@
}
}
+extern int smi_handle_dr_smp_recv(struct ib_smp *smp,
+ u8 node_type,
+ int port_num,
+ int phys_port_cnt);
+extern int smi_check_forward_dr_smp(struct ib_smp *smp);
+extern int smi_handle_dr_smp_send(struct ib_smp *smp,
+ u8 node_type,
+ int port_num);
+extern int smi_check_local_dr_smp(struct ib_smp *smp,
+ struct ib_device *device,
+ int port_num);
+extern int agent_send(struct ib_mad *mad,
+ struct ib_grh *grh,
+ struct ib_wc *wc,
+ struct ib_device *device,
+ int port_num);
+
static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_wc *wc)
{
@@ -942,6 +949,7 @@
struct ib_mad_private *recv;
struct ib_mad_list_head *mad_list;
struct ib_mad_agent_private *mad_agent;
+ struct ib_smp *smp;
int solicited;
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
@@ -968,14 +976,69 @@
if (!validate_mad(recv->header.recv_buf.mad, qp_info->qp->qp_num))
goto out;
- /* Snoop MAD ? */
- if (port_priv->device->snoop_mad)
- if (port_priv->device->snoop_mad(port_priv->device,
- (u8)port_priv->port_num,
- wc->slid,
- recv->header.recv_buf.mad))
+ if (recv->header.recv_buf.mad->mad_hdr.mgmt_class ==
+ IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ smp = (struct ib_smp *)recv->header.recv_buf.mad;
+ if (!smi_handle_dr_smp_recv(smp,
+ port_priv->device->node_type,
+ port_priv->port_num,
+ port_priv->phys_port_cnt))
goto out;
+ if (!smi_check_forward_dr_smp(smp))
+ goto out;
+ if (!smi_handle_dr_smp_send(smp,
+ port_priv->device->node_type,
+ port_priv->port_num))
+ goto out;
+ if (!smi_check_local_dr_smp(smp,
+ port_priv->device,
+ port_priv->port_num))
+ goto out;
+ }
+ /* Give driver "right of first refusal" on incoming MAD */
+ if (port_priv->device->process_mad) {
+ struct ib_mad *response;
+ struct ib_grh *grh;
+ int ret;
+
+ response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
+ if (!response) {
+ printk(KERN_ERR PFX "No memory for response MAD\n");
+ /* Is it better to assume that it wouldn't be processed ? */
+ goto out;
+ }
+
+ ret = port_priv->device->process_mad(port_priv->device, 0,
+ port_priv->port_num,
+ wc->slid,
+ recv->header.recv_buf.mad,
+ response);
+ if ((ret & IB_MAD_RESULT_SUCCESS) &&
+ (ret & IB_MAD_RESULT_REPLY)) {
+ if (response->mad_hdr.mgmt_class ==
+ IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (!smi_handle_dr_smp_recv(
+ (struct ib_smp *)response,
+ port_priv->device->node_type,
+ port_priv->port_num,
+ port_priv->phys_port_cnt)) {
+ kfree(response);
+ goto out;
+ }
+ }
+ /* Send response */
+ grh = (void *)recv->header.recv_buf.mad - sizeof(struct ib_grh);
+ if (agent_send(response, grh, wc,
+ port_priv->device,
+ port_priv->port_num)) {
+ kfree(response);
+ goto out;
+ }
+ } else
+ kfree(response);
+ }
+
/* Determine corresponding MAD agent for incoming receive MAD */
solicited = solicited_mad(recv->header.recv_buf.mad);
mad_agent = find_mad_agent(port_priv, recv->header.recv_buf.mad,
@@ -1673,7 +1736,9 @@
* Open the port
* Create the QP, PD, MR, and CQ if needed
*/
-static int ib_mad_port_open(struct ib_device *device, int port_num)
+static int ib_mad_port_open(struct ib_device *device,
+ int port_num,
+ int num_ports)
{
int ret, cq_size;
u64 iova = 0;
@@ -1702,6 +1767,7 @@
memset(port_priv, 0, sizeof *port_priv);
port_priv->device = device;
port_priv->port_num = port_num;
+ port_priv->phys_port_cnt = num_ports;
spin_lock_init(&port_priv->reg_lock);
cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
@@ -1836,7 +1902,7 @@
cur_port = 1;
}
for (i = 0; i < num_ports; i++, cur_port++) {
- ret = ib_mad_port_open(device, cur_port);
+ ret = ib_mad_port_open(device, cur_port, num_ports);
if (ret) {
printk(KERN_ERR PFX "Couldn't open %s port %d\n",
device->name, cur_port);
Index: openib-candidate/src/linux-kernel/infiniband/access/mad_priv.h
===================================================================
--- openib-candidate/src/linux-kernel/infiniband/access/mad_priv.h (revision 1119)
+++ openib-candidate/src/linux-kernel/infiniband/access/mad_priv.h (working copy)
@@ -156,6 +156,7 @@
struct list_head port_list;
struct ib_device *device;
int port_num;
+ int phys_port_cnt;
struct ib_cq *cq;
struct ib_pd *pd;
struct ib_mr *mr;
Index: openib-candidate/src/linux-kernel/infiniband/include/ib_verbs.h
===================================================================
--- openib-candidate/src/linux-kernel/infiniband/include/ib_verbs.h (revision 1108)
+++ openib-candidate/src/linux-kernel/infiniband/include/ib_verbs.h (working copy)
@@ -640,14 +640,10 @@
enum ib_mad_result {
IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
- IB_MAD_RESULT_REPLY = 1 << 1 /* Reply packet needs to be sent */
+ IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
+ IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
};
-enum ib_snoop_mad_result {
- IB_SNOOP_MAD_IGNORED,
- IB_SNOOP_MAD_CONSUMED
-};
-
#define IB_DEVICE_NAME_MAX 64
struct ib_device {
Index: roland-merge/src/linux-kernel/infiniband/include/ib_verbs.h
===================================================================
--- roland-merge/src/linux-kernel/infiniband/include/ib_verbs.h (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/include/ib_verbs.h (working copy)
@@ -656,14 +656,10 @@
enum ib_mad_result {
IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
- IB_MAD_RESULT_REPLY = 1 << 1 /* Reply packet needs to be sent */
+ IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
+ IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
};
-enum ib_snoop_mad_result {
- IB_SNOOP_MAD_IGNORED,
- IB_SNOOP_MAD_CONSUMED
-};
-
#define IB_DEVICE_NAME_MAX 64
struct ib_device {
Index: roland-merge/src/linux-kernel/infiniband/core/agent.c
===================================================================
--- roland-merge/src/linux-kernel/infiniband/core/agent.c (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/core/agent.c (working copy)
@@ -29,7 +29,6 @@
#include <asm/bug.h>
-
static spinlock_t ib_agent_port_list_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(ib_agent_port_list);
@@ -37,9 +36,9 @@
* Fixup a directed route SMP for sending. Return 0 if the SMP should be
* discarded.
*/
-static int smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type,
- int port_num)
+int smi_handle_dr_smp_send(struct ib_smp *smp,
+ u8 node_type,
+ int port_num)
{
u8 hop_ptr, hop_cnt;
@@ -111,23 +110,6 @@
}
/*
- * Sender side handling of outgoing SMPs. Fixup the SMP as required by
- * the spec. Return 0 if the SMP should be dropped.
- */
-static int smi_handle_smp_send(struct ib_smp *smp,
- u8 node_type,
- int port_num)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_send(smp, node_type, port_num);
- default: /* LR SM class */
- return 1;
- }
-}
-
-/*
* Return 1 if the SMP should be handled by the local SMA via process_mad.
*/
static inline int smi_check_local_smp(struct ib_mad_agent *mad_agent,
@@ -145,10 +127,10 @@
* Adjust information for a received SMP. Return 0 if the SMP should be
* dropped.
*/
-static int smi_handle_dr_smp_recv(struct ib_smp *smp,
- u8 node_type,
- int port_num,
- int phys_port_cnt)
+int smi_handle_dr_smp_recv(struct ib_smp *smp,
+ u8 node_type,
+ int port_num,
+ int phys_port_cnt)
{
u8 hop_ptr, hop_cnt;
@@ -221,29 +203,10 @@
}
/*
- * Receive side handling SMPs. Save receive information as required by
- * the spec. Return 0 if the SMP should be dropped.
- */
-static int smi_handle_smp_recv(struct ib_smp *smp,
- u8 node_type,
- int port_num,
- int phys_port_cnt)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_handle_dr_smp_recv(smp, node_type,
- port_num, phys_port_cnt);
- default: /* LR SM class */
- return 1;
- }
-}
-
-/*
* Return 1 if the received DR SMP should be forwarded to the send queue.
* Return 0 if the SMP should be completed up the stack.
*/
-static int smi_check_forward_dr_smp(struct ib_smp *smp)
+int smi_check_forward_dr_smp(struct ib_smp *smp)
{
u8 hop_ptr, hop_cnt;
@@ -274,31 +237,6 @@
return 0;
}
-/*
- * Return 1 if the received SMP should be forwarded to the send queue.
- * Return 0 if the SMP should be completed up the stack.
- */
-static int smi_check_forward_smp(struct ib_smp *smp)
-{
- switch (smp->mgmt_class)
- {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- return smi_check_forward_dr_smp(smp);
- default: /* LR SM class */
- return 1;
- }
-}
-
-static int mad_process_local(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad *mad_response,
- u16 slid)
-{
- return mad_agent->device->process_mad(mad_agent->device, 0,
- mad_agent->port_num,
- slid, mad, mad_response);
-}
-
static inline struct ib_agent_port_private *
__ib_get_agent_mad(struct ib_device *device, int port_num,
struct ib_mad_agent *mad_agent)
@@ -339,30 +277,47 @@
return entry;
}
-void agent_mad_send(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_grh *grh,
- struct ib_mad_recv_wc *mad_recv_wc)
+int smi_check_local_dr_smp(struct ib_smp *smp,
+ struct ib_device *device,
+ int port_num)
{
struct ib_agent_port_private *port_priv;
+
+ port_priv = ib_get_agent_mad(device, port_num, NULL);
+ if (!port_priv) {
+ printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d not open\n",
+ device->name, port_num);
+ return 1;
+ }
+
+ return smi_check_local_smp(port_priv->dr_smp_agent, smp);
+}
+
+static int agent_mad_send(struct ib_mad_agent *mad_agent,
+ struct ib_mad *mad,
+ struct ib_grh *grh,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ struct ib_agent_port_private *port_priv;
struct ib_agent_send_wr *agent_send_wr;
struct ib_sge gather_list;
struct ib_send_wr send_wr;
struct ib_send_wr *bad_send_wr;
struct ib_ah_attr ah_attr;
unsigned long flags;
+ int ret = 1;
/* Find matching MAD agent */
port_priv = ib_get_agent_mad(NULL, 0, mad_agent);
if (!port_priv) {
printk(KERN_ERR SPFX "agent_mad_send: no matching MAD agent %p\n",
mad_agent);
- return;
+ goto out;
}
agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
if (!agent_send_wr)
- return;
+ goto out;
agent_send_wr->mad = mad;
/* PCI mapping */
@@ -406,8 +361,8 @@
agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
if (IS_ERR(agent_send_wr->ah)) {
printk(KERN_ERR SPFX "No memory for address handle\n");
- kfree(mad);
- return;
+ kfree(agent_send_wr);
+ goto out;
}
send_wr.wr.ud.ah = agent_send_wr->ah;
@@ -432,120 +387,53 @@
sizeof(struct ib_mad),
PCI_DMA_TODEVICE);
ib_destroy_ah(agent_send_wr->ah);
+ kfree(agent_send_wr);
} else {
list_add_tail(&agent_send_wr->send_list,
&port_priv->send_posted_list);
spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
+ ret = 0;
}
+
+out:
+ return ret;
}
-int smi_send_smp(struct ib_mad_agent *mad_agent,
- struct ib_smp *smp,
- struct ib_mad_recv_wc *mad_recv_wc,
- u16 slid,
- int phys_port_cnt)
+int agent_send(struct ib_mad *mad,
+ struct ib_grh *grh,
+ struct ib_wc *wc,
+ struct ib_device *device,
+ int port_num)
{
- struct ib_mad *smp_response;
- int ret;
+ struct ib_agent_port_private *port_priv;
+ struct ib_mad_agent *mad_agent;
+ struct ib_mad_recv_wc mad_recv_wc;
- if (!smi_handle_smp_send(smp, mad_agent->device->node_type,
- mad_agent->port_num)) {
- /* SMI failed send */
- return 0;
- }
-
- if (smi_check_local_smp(mad_agent, smp)) {
- smp_response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
- if (!smp_response)
- return 0;
-
- ret = mad_process_local(mad_agent, (struct ib_mad *)smp,
- smp_response, slid);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- if (!smi_handle_smp_recv((struct ib_smp *)smp_response,
- mad_agent->device->node_type,
- mad_agent->port_num,
- phys_port_cnt)) {
- /* SMI failed receive */
- kfree(smp_response);
- return 0;
- }
- agent_mad_send(mad_agent, smp_response,
- NULL, mad_recv_wc);
- } else
- kfree(smp_response);
+ port_priv = ib_get_agent_mad(device, port_num, NULL);
+ if (!port_priv) {
+ printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
+ device->name, port_num);
return 1;
}
- /* Post the send on the QP */
- return 1;
-}
-
-int agent_mad_response(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad_recv_wc *mad_recv_wc,
- u16 slid)
-{
- struct ib_mad *response;
- struct ib_grh *grh;
- int ret;
-
- response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
- if (!response)
- return 0;
-
- ret = mad_process_local(mad_agent, mad, response, slid);
- if (ret & IB_MAD_RESULT_SUCCESS) {
- grh = (void *)mad - sizeof(struct ib_grh);
- agent_mad_send(mad_agent, response, grh, mad_recv_wc);
- } else
- kfree(response);
- return 1;
-}
-
-int agent_recv_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad *mad,
- struct ib_mad_recv_wc *mad_recv_wc,
- int phys_port_cnt)
-{
- int port_num;
-
- /* SM Directed Route or LID Routed class */
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ||
- mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) {
- if (mad_agent->device->node_type != IB_NODE_SWITCH)
- port_num = mad_agent->port_num;
- else
- port_num = mad_recv_wc->wc->port_num;
- if (!smi_handle_smp_recv((struct ib_smp *)mad,
- mad_agent->device->node_type,
- port_num, phys_port_cnt)) {
- /* SMI failed receive */
- return 0;
- }
-
- if (smi_check_forward_smp((struct ib_smp *)mad)) {
- smi_send_smp(mad_agent,
- (struct ib_smp *)mad,
- mad_recv_wc,
- mad_recv_wc->wc->slid,
- phys_port_cnt);
- return 0;
- }
-
- } else {
- /* PerfMgmt class */
- if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
- agent_mad_response(mad_agent, mad, mad_recv_wc,
- mad_recv_wc->wc->slid);
- } else {
- printk(KERN_ERR "agent_recv_mad: Unexpected mgmt class 0x%x received\n", mad->mad_hdr.mgmt_class);
- }
- return 0;
+ /* Get mad agent based on mgmt_class in MAD */
+ switch (mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ mad_agent = port_priv->dr_smp_agent;
+ break;
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ mad_agent = port_priv->lr_smp_agent;
+ break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ mad_agent = port_priv->perf_mgmt_agent;
+ break;
+ default:
+ return 1;
}
- /* Complete receive up stack */
- return 1;
+ /* Other fields don't matter so should change signature to just use wc */
+ mad_recv_wc.wc = wc;
+ return agent_mad_send(mad_agent, mad, grh, &mad_recv_wc);
}
static void agent_send_handler(struct ib_mad_agent *mad_agent,
@@ -596,26 +484,6 @@
kfree(agent_send_wr->mad);
}
-static void agent_recv_handler(struct ib_mad_agent *mad_agent,
- struct ib_mad_recv_wc *mad_recv_wc)
-{
- struct ib_agent_port_private *port_priv;
-
- /* Find matching MAD agent */
- port_priv = ib_get_agent_mad(NULL, 0, mad_agent);
- if (!port_priv) {
- printk(KERN_ERR SPFX "agent_recv_handler: no matching MAD agent %p\n",
- mad_agent);
- } else {
- agent_recv_mad(mad_agent,
- mad_recv_wc->recv_buf->mad,
- mad_recv_wc, port_priv->phys_port_cnt);
- }
-
- /* Free received MAD */
- ib_free_recv_mad(mad_recv_wc);
-}
-
int ib_agent_port_open(struct ib_device *device, int port_num,
int phys_port_cnt)
{
@@ -656,19 +524,12 @@
reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
reg_req.mgmt_class_version = 1;
- /* SMA needs to receive Get, Set, and TrapRepress methods */
- bitmap_zero((unsigned long *)®_req.method_mask, IB_MGMT_MAX_METHODS);
- set_bit(IB_MGMT_METHOD_GET, (unsigned long *)®_req.method_mask);
- set_bit(IB_MGMT_METHOD_SET, (unsigned long *)®_req.method_mask);
- set_bit(IB_MGMT_METHOD_TRAP_REPRESS,
- (unsigned long *)®_req.method_mask);
-
port_priv->dr_smp_agent = ib_register_mad_agent(device, port_num,
IB_QPT_SMI,
- ®_req, 0,
+ NULL, 0,
&agent_send_handler,
- &agent_recv_handler,
- NULL);
+ NULL, NULL);
+
if (IS_ERR(port_priv->dr_smp_agent)) {
ret = PTR_ERR(port_priv->dr_smp_agent);
goto error2;
@@ -678,10 +539,9 @@
reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
port_priv->lr_smp_agent = ib_register_mad_agent(device, port_num,
IB_QPT_SMI,
- ®_req, 0,
+ NULL, 0,
&agent_send_handler,
- &agent_recv_handler,
- NULL);
+ NULL, NULL);
if (IS_ERR(port_priv->lr_smp_agent)) {
ret = PTR_ERR(port_priv->lr_smp_agent);
goto error3;
@@ -689,14 +549,11 @@
/* Obtain MAD agent for PerfMgmt class */
reg_req.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
- clear_bit(IB_MGMT_METHOD_TRAP_REPRESS,
- (unsigned long *)®_req.method_mask);
- port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
+ port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
IB_QPT_GSI,
- ®_req, 0,
+ NULL, 0,
&agent_send_handler,
- &agent_recv_handler,
- NULL);
+ NULL, NULL);
if (IS_ERR(port_priv->perf_mgmt_agent)) {
ret = PTR_ERR(port_priv->perf_mgmt_agent);
goto error4;
Index: roland-merge/src/linux-kernel/infiniband/core/mad.c
===================================================================
--- roland-merge/src/linux-kernel/infiniband/core/mad.c (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/core/mad.c (working copy)
@@ -747,13 +747,16 @@
struct ib_mad *mad,
int solicited)
{
- struct ib_mad_agent_private *entry, *mad_agent = NULL;
- struct ib_mad_mgmt_class_table *version;
- struct ib_mad_mgmt_method_table *class;
- u32 hi_tid;
+ struct ib_mad_agent_private *mad_agent = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&port_priv->reg_lock, flags);
+
/* Whether MAD was solicited determines type of routing to MAD client */
if (solicited) {
+ u32 hi_tid;
+ struct ib_mad_agent_private *entry;
+
/* Routing is based on high 32 bits of transaction ID of MAD */
hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
@@ -762,12 +765,14 @@
break;
}
}
- if (!mad_agent) {
+ if (!mad_agent)
printk(KERN_ERR PFX "No client 0x%x for received MAD "
- "on port %d\n", hi_tid, port_priv->port_num);
- goto out;
- }
+ "on port %d\n",
+ hi_tid, port_priv->port_num);
} else {
+ struct ib_mad_mgmt_class_table *version;
+ struct ib_mad_mgmt_method_table *class;
+
/* Routing is based on version, class, and method */
if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) {
printk(KERN_ERR PFX "MAD received with unsupported "
@@ -776,32 +781,29 @@
goto out;
}
version = port_priv->version[mad->mad_hdr.class_version];
- if (!version) {
- printk(KERN_ERR PFX "MAD received on port %d for class "
- "version %d with no client\n",
- port_priv->port_num, mad->mad_hdr.class_version);
+ if (!version)
goto out;
- }
class = version->method_table[convert_mgmt_class(
mad->mad_hdr.mgmt_class)];
- if (!class) {
- printk(KERN_ERR PFX "MAD received on port %d for class "
- "%d with no client\n",
- port_priv->port_num, mad->mad_hdr.mgmt_class);
- goto out;
- }
- mad_agent = class->agent[mad->mad_hdr.method &
- ~IB_MGMT_METHOD_RESP];
+ if (class)
+ mad_agent = class->agent[mad->mad_hdr.method &
+ ~IB_MGMT_METHOD_RESP];
}
out:
- if (mad_agent && !mad_agent->agent.recv_handler) {
- printk(KERN_ERR PFX "No receive handler for client "
- "%p on port %d\n",
- &mad_agent->agent, port_priv->port_num);
- mad_agent = NULL;
+ if (mad_agent) {
+ if (mad_agent->agent.recv_handler)
+ atomic_inc(&mad_agent->refcount);
+ else {
+ mad_agent = NULL;
+ printk(KERN_ERR PFX "No receive handler for client "
+ "%p on port %d\n",
+ &mad_agent->agent, port_priv->port_num);
+ }
}
+ spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+
return mad_agent;
}
@@ -922,6 +924,23 @@
}
}
+extern int smi_handle_dr_smp_recv(struct ib_smp *smp,
+ u8 node_type,
+ int port_num,
+ int phys_port_cnt);
+extern int smi_check_forward_dr_smp(struct ib_smp *smp);
+extern int smi_handle_dr_smp_send(struct ib_smp *smp,
+ u8 node_type,
+ int port_num);
+extern int smi_check_local_dr_smp(struct ib_smp *smp,
+ struct ib_device *device,
+ int port_num);
+extern int agent_send(struct ib_mad *mad,
+ struct ib_grh *grh,
+ struct ib_wc *wc,
+ struct ib_device *device,
+ int port_num);
+
static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_wc *wc)
{
@@ -929,9 +948,9 @@
struct ib_mad_private_header *mad_priv_hdr;
struct ib_mad_private *recv;
struct ib_mad_list_head *mad_list;
- struct ib_mad_agent_private *mad_agent = NULL;
+ struct ib_mad_agent_private *mad_agent;
+ struct ib_smp *smp;
int solicited;
- unsigned long flags;
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
qp_info = mad_list->mad_queue->qp_info;
@@ -957,31 +976,80 @@
if (!validate_mad(recv->header.recv_buf.mad, qp_info->qp->qp_num))
goto out;
- /* Snoop MAD ? */
- if (port_priv->device->snoop_mad)
- if (port_priv->device->snoop_mad(port_priv->device,
- (u8)port_priv->port_num,
- wc->slid,
- recv->header.recv_buf.mad))
+ if (recv->header.recv_buf.mad->mad_hdr.mgmt_class ==
+ IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ smp = (struct ib_smp *)recv->header.recv_buf.mad;
+ if (!smi_handle_dr_smp_recv(smp,
+ port_priv->device->node_type,
+ port_priv->port_num,
+ port_priv->phys_port_cnt))
goto out;
+ if (!smi_check_forward_dr_smp(smp))
+ goto out;
+ if (!smi_handle_dr_smp_send(smp,
+ port_priv->device->node_type,
+ port_priv->port_num))
+ goto out;
+ if (!smi_check_local_dr_smp(smp,
+ port_priv->device,
+ port_priv->port_num))
+ goto out;
+ }
- spin_lock_irqsave(&port_priv->reg_lock, flags);
+ /* Give driver "right of first refusal" on incoming MAD */
+ if (port_priv->device->process_mad) {
+ struct ib_mad *response;
+ struct ib_grh *grh;
+ int ret;
+
+ response = kmalloc(sizeof(struct ib_mad), GFP_KERNEL);
+ if (!response) {
+ printk(KERN_ERR PFX "No memory for response MAD\n");
+ /* Is it better to assume that it wouldn't be processed ? */
+ goto out;
+ }
+
+ ret = port_priv->device->process_mad(port_priv->device, 0,
+ port_priv->port_num,
+ wc->slid,
+ recv->header.recv_buf.mad,
+ response);
+ if ((ret & IB_MAD_RESULT_SUCCESS) &&
+ (ret & IB_MAD_RESULT_REPLY)) {
+ if (response->mad_hdr.mgmt_class ==
+ IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (!smi_handle_dr_smp_recv(
+ (struct ib_smp *)response,
+ port_priv->device->node_type,
+ port_priv->port_num,
+ port_priv->phys_port_cnt)) {
+ kfree(response);
+ goto out;
+ }
+ }
+ /* Send response */
+ grh = (void *)recv->header.recv_buf.mad - sizeof(struct ib_grh);
+ if (agent_send(response, grh, wc,
+ port_priv->device,
+ port_priv->port_num)) {
+ kfree(response);
+ goto out;
+ }
+ } else
+ kfree(response);
+ }
+
/* Determine corresponding MAD agent for incoming receive MAD */
solicited = solicited_mad(recv->header.recv_buf.mad);
mad_agent = find_mad_agent(port_priv, recv->header.recv_buf.mad,
solicited);
- if (!mad_agent) {
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
- printk(KERN_NOTICE PFX "No matching mad agent found for "
- "received MAD on port %d\n", port_priv->port_num);
- } else {
- atomic_inc(&mad_agent->refcount);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ if (mad_agent) {
ib_mad_complete_recv(mad_agent, recv, solicited);
+ recv = NULL; /* recv is freed up via ib_mad_complete_recv */
}
out:
- if (!mad_agent) {
+ if (recv) {
/* Should this case be optimized ? */
kmem_cache_free(ib_mad_cache, recv);
}
@@ -1668,7 +1736,9 @@
* Open the port
* Create the QP, PD, MR, and CQ if needed
*/
-static int ib_mad_port_open(struct ib_device *device, int port_num)
+static int ib_mad_port_open(struct ib_device *device,
+ int port_num,
+ int num_ports)
{
int ret, cq_size;
u64 iova = 0;
@@ -1697,6 +1767,7 @@
memset(port_priv, 0, sizeof *port_priv);
port_priv->device = device;
port_priv->port_num = port_num;
+ port_priv->phys_port_cnt = num_ports;
spin_lock_init(&port_priv->reg_lock);
cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
@@ -1831,7 +1902,7 @@
cur_port = 1;
}
for (i = 0; i < num_ports; i++, cur_port++) {
- ret = ib_mad_port_open(device, cur_port);
+ ret = ib_mad_port_open(device, cur_port, num_ports);
if (ret) {
printk(KERN_ERR PFX "Couldn't open %s port %d\n",
device->name, cur_port);
Index: roland-merge/src/linux-kernel/infiniband/core/mad_priv.h
===================================================================
--- roland-merge/src/linux-kernel/infiniband/core/mad_priv.h (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/core/mad_priv.h (working copy)
@@ -156,6 +156,7 @@
struct list_head port_list;
struct ib_device *device;
int port_num;
+ int phys_port_cnt;
struct ib_cq *cq;
struct ib_pd *pd;
struct ib_mr *mr;
Index: roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_dev.h
===================================================================
--- roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_dev.h (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_dev.h (working copy)
@@ -349,10 +349,6 @@
u16 slid,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
-enum ib_snoop_mad_result mthca_snoop_mad(struct ib_device *ibdev,
- u8 port_num,
- u16 slid,
- struct ib_mad *mad);
static inline struct mthca_dev *to_mdev(struct ib_device *ibdev)
{
Index: roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_provider.c
===================================================================
--- roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_provider.c (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_provider.c (working copy)
@@ -600,7 +600,6 @@
dev->ib_dev.attach_mcast = mthca_multicast_attach;
dev->ib_dev.detach_mcast = mthca_multicast_detach;
dev->ib_dev.process_mad = mthca_process_mad;
- dev->ib_dev.snoop_mad = mthca_snoop_mad;
ret = ib_register_device(&dev->ib_dev);
if (ret)
Index: roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_mad.c
===================================================================
--- roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_mad.c (revision 1125)
+++ roland-merge/src/linux-kernel/infiniband/hw/mthca/mthca_mad.c (working copy)
@@ -79,6 +79,16 @@
int err;
u8 status;
+ /* Forward locally generated traps to the SM */
+ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+ in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
+ slid == 0) {
+
+ /* XXX: forward locally generated MAD to SM */
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ }
+
/*
* Only handle SM gets, sets and trap represses for SM class
*
@@ -137,21 +147,6 @@
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-enum ib_snoop_mad_result mthca_snoop_mad(struct ib_device *ibdev,
- u8 port_num,
- u16 slid,
- struct ib_mad *mad)
-{
- if (mad->mad_hdr.method != IB_MGMT_METHOD_TRAP ||
- mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ||
- slid != 0)
- return IB_SNOOP_MAD_IGNORED;
-
- /* XXX: forward locally generated MAD to SM */
-
- return IB_SNOOP_MAD_CONSUMED;
-}
-
/*
* Local Variables:
* c-file-style: "linux"
_______________________________________________
openib-general mailing list
[EMAIL PROTECTED]
http://openib.org/mailman/listinfo/openib-general
To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general