Re: [PATCH net-next] hv_netvsc: Add NetVSP v6 and v6.1 into version negotiation

2018-04-18 Thread David Miller
From: Haiyang Zhang 
Date: Tue, 17 Apr 2018 15:31:47 -0700

> From: Haiyang Zhang 
> 
> This patch adds the NetVSP v6 and 6.1 message structures, and includes
> these versions into NetVSC/NetVSP version negotiation process.
> 
> Signed-off-by: Haiyang Zhang 

Applied to net-next, thank you.


Re: [PATCH net-next] hv_netvsc: Add NetVSP v6 and v6.1 into version negotiation

2018-04-18 Thread David Miller
From: Haiyang Zhang 
Date: Tue, 17 Apr 2018 15:31:47 -0700

> From: Haiyang Zhang 
> 
> This patch adds the NetVSP v6 and 6.1 message structures, and includes
> these versions into NetVSC/NetVSP version negotiation process.
> 
> Signed-off-by: Haiyang Zhang 

Applied to net-next, thank you.


[PATCH net-next] hv_netvsc: Add NetVSP v6 and v6.1 into version negotiation

2018-04-17 Thread Haiyang Zhang
From: Haiyang Zhang 

This patch adds the NetVSP v6 and 6.1 message structures, and includes
these versions into NetVSC/NetVSP version negotiation process.

Signed-off-by: Haiyang Zhang 
---
 drivers/net/hyperv/hyperv_net.h | 164 
 drivers/net/hyperv/netvsc.c |   3 +-
 2 files changed, 166 insertions(+), 1 deletion(-)

diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 960f06141472..6ebe39a3dde6 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -237,6 +237,8 @@ void netvsc_switch_datapath(struct net_device *nv_dev, bool 
vf);
 #define NVSP_PROTOCOL_VERSION_20x30002
 #define NVSP_PROTOCOL_VERSION_40x4
 #define NVSP_PROTOCOL_VERSION_50x5
+#define NVSP_PROTOCOL_VERSION_60x6
+#define NVSP_PROTOCOL_VERSION_61   0x60001
 
 enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -308,6 +310,12 @@ enum {
NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
 
NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+   /* Version 6 messages */
+   NVSP_MSG6_TYPE_PD_API,
+   NVSP_MSG6_TYPE_PD_POST_BATCH,
+
+   NVSP_MSG6_MAX = NVSP_MSG6_TYPE_PD_POST_BATCH
 };
 
 enum {
@@ -619,12 +627,168 @@ union nvsp_5_message_uber {
struct nvsp_5_send_indirect_table send_table;
 } __packed;
 
+enum nvsp_6_pd_api_op {
+   PD_API_OP_CONFIG = 1,
+   PD_API_OP_SW_DATAPATH, /* Switch Datapath */
+   PD_API_OP_OPEN_PROVIDER,
+   PD_API_OP_CLOSE_PROVIDER,
+   PD_API_OP_CREATE_QUEUE,
+   PD_API_OP_FLUSH_QUEUE,
+   PD_API_OP_FREE_QUEUE,
+   PD_API_OP_ALLOC_COM_BUF, /* Allocate Common Buffer */
+   PD_API_OP_FREE_COM_BUF, /* Free Common Buffer */
+   PD_API_OP_MAX
+};
+
+struct grp_affinity {
+   u64 mask;
+   u16 grp;
+   u16 reserved[3];
+} __packed;
+
+struct nvsp_6_pd_api_req {
+   u32 op;
+
+   union {
+   /* MMIO information is sent from the VM to VSP */
+   struct __packed {
+   u64 mmio_pa; /* MMIO Physical Address */
+   u32 mmio_len;
+
+   /* Number of PD queues a VM can support */
+   u16 num_subchn;
+   } config;
+
+   /* Switch Datapath */
+   struct __packed {
+   /* Host Datapath Is PacketDirect */
+   u8 host_dpath_is_pd;
+
+   /* Guest PacketDirect Is Enabled */
+   u8 guest_pd_enabled;
+   } sw_dpath;
+
+   /* Open Provider*/
+   struct __packed {
+   u32 prov_id; /* Provider id */
+   u32 flag;
+   } open_prov;
+
+   /* Close Provider */
+   struct __packed {
+   u32 prov_id;
+   } cls_prov;
+
+   /* Create Queue*/
+   struct __packed {
+   u32 prov_id;
+   u16 q_id;
+   u16 q_size;
+   u8 is_recv_q;
+   u8 is_rss_q;
+   u32 recv_data_len;
+   struct grp_affinity affy;
+   } cr_q;
+
+   /* Delete Queue*/
+   struct __packed {
+   u32 prov_id;
+   u16 q_id;
+   } del_q;
+
+   /* Flush Queue */
+   struct __packed {
+   u32 prov_id;
+   u16 q_id;
+   } flush_q;
+
+   /* Allocate Common Buffer */
+   struct __packed {
+   u32 len;
+   u32 pf_node; /* Preferred Node */
+   u16 region_id;
+   } alloc_com_buf;
+
+   /* Free Common Buffer */
+   struct __packed {
+   u32 len;
+   u64 pa; /* Physical Address */
+   u32 pf_node; /* Preferred Node */
+   u16 region_id;
+   u8 cache_type;
+   } free_com_buf;
+   } __packed;
+} __packed;
+
+struct nvsp_6_pd_api_comp {
+   u32 op;
+   u32 status;
+
+   union {
+   struct __packed {
+   /* actual number of PD queues allocated to the VM */
+   u16 num_pd_q;
+
+   /* Num Receive Rss PD Queues */
+   u8 num_rss_q;
+
+   u8 is_supported; /* Is supported by VSP */
+   u8 is_enabled; /* Is enabled by VSP */
+   } config;
+
+   /* Open Provider */
+   struct __packed {
+   u32 prov_id;
+   } open_prov;
+
+   /* Create Queue */
+   struct 

[PATCH net-next] hv_netvsc: Add NetVSP v6 and v6.1 into version negotiation

2018-04-17 Thread Haiyang Zhang
From: Haiyang Zhang 

This patch adds the NetVSP v6 and 6.1 message structures, and includes
these versions into NetVSC/NetVSP version negotiation process.

Signed-off-by: Haiyang Zhang 
---
 drivers/net/hyperv/hyperv_net.h | 164 
 drivers/net/hyperv/netvsc.c |   3 +-
 2 files changed, 166 insertions(+), 1 deletion(-)

diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 960f06141472..6ebe39a3dde6 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -237,6 +237,8 @@ void netvsc_switch_datapath(struct net_device *nv_dev, bool 
vf);
 #define NVSP_PROTOCOL_VERSION_20x30002
 #define NVSP_PROTOCOL_VERSION_40x4
 #define NVSP_PROTOCOL_VERSION_50x5
+#define NVSP_PROTOCOL_VERSION_60x6
+#define NVSP_PROTOCOL_VERSION_61   0x60001
 
 enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -308,6 +310,12 @@ enum {
NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
 
NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+   /* Version 6 messages */
+   NVSP_MSG6_TYPE_PD_API,
+   NVSP_MSG6_TYPE_PD_POST_BATCH,
+
+   NVSP_MSG6_MAX = NVSP_MSG6_TYPE_PD_POST_BATCH
 };
 
 enum {
@@ -619,12 +627,168 @@ union nvsp_5_message_uber {
struct nvsp_5_send_indirect_table send_table;
 } __packed;
 
+enum nvsp_6_pd_api_op {
+   PD_API_OP_CONFIG = 1,
+   PD_API_OP_SW_DATAPATH, /* Switch Datapath */
+   PD_API_OP_OPEN_PROVIDER,
+   PD_API_OP_CLOSE_PROVIDER,
+   PD_API_OP_CREATE_QUEUE,
+   PD_API_OP_FLUSH_QUEUE,
+   PD_API_OP_FREE_QUEUE,
+   PD_API_OP_ALLOC_COM_BUF, /* Allocate Common Buffer */
+   PD_API_OP_FREE_COM_BUF, /* Free Common Buffer */
+   PD_API_OP_MAX
+};
+
+struct grp_affinity {
+   u64 mask;
+   u16 grp;
+   u16 reserved[3];
+} __packed;
+
+struct nvsp_6_pd_api_req {
+   u32 op;
+
+   union {
+   /* MMIO information is sent from the VM to VSP */
+   struct __packed {
+   u64 mmio_pa; /* MMIO Physical Address */
+   u32 mmio_len;
+
+   /* Number of PD queues a VM can support */
+   u16 num_subchn;
+   } config;
+
+   /* Switch Datapath */
+   struct __packed {
+   /* Host Datapath Is PacketDirect */
+   u8 host_dpath_is_pd;
+
+   /* Guest PacketDirect Is Enabled */
+   u8 guest_pd_enabled;
+   } sw_dpath;
+
+   /* Open Provider*/
+   struct __packed {
+   u32 prov_id; /* Provider id */
+   u32 flag;
+   } open_prov;
+
+   /* Close Provider */
+   struct __packed {
+   u32 prov_id;
+   } cls_prov;
+
+   /* Create Queue*/
+   struct __packed {
+   u32 prov_id;
+   u16 q_id;
+   u16 q_size;
+   u8 is_recv_q;
+   u8 is_rss_q;
+   u32 recv_data_len;
+   struct grp_affinity affy;
+   } cr_q;
+
+   /* Delete Queue*/
+   struct __packed {
+   u32 prov_id;
+   u16 q_id;
+   } del_q;
+
+   /* Flush Queue */
+   struct __packed {
+   u32 prov_id;
+   u16 q_id;
+   } flush_q;
+
+   /* Allocate Common Buffer */
+   struct __packed {
+   u32 len;
+   u32 pf_node; /* Preferred Node */
+   u16 region_id;
+   } alloc_com_buf;
+
+   /* Free Common Buffer */
+   struct __packed {
+   u32 len;
+   u64 pa; /* Physical Address */
+   u32 pf_node; /* Preferred Node */
+   u16 region_id;
+   u8 cache_type;
+   } free_com_buf;
+   } __packed;
+} __packed;
+
+struct nvsp_6_pd_api_comp {
+   u32 op;
+   u32 status;
+
+   union {
+   struct __packed {
+   /* actual number of PD queues allocated to the VM */
+   u16 num_pd_q;
+
+   /* Num Receive Rss PD Queues */
+   u8 num_rss_q;
+
+   u8 is_supported; /* Is supported by VSP */
+   u8 is_enabled; /* Is enabled by VSP */
+   } config;
+
+   /* Open Provider */
+   struct __packed {
+   u32 prov_id;
+   } open_prov;
+
+   /* Create Queue */
+   struct __packed {
+   u32 prov_id;
+