REF:https://bugzilla.tianocore.org/show_bug.cgi?id=1260

For the PassThru() service of NVM Express Pass Through Protocol, the
current implementation (function NvmExpressPassThru()) will only use the
IO Completion/Submission queues created internally by this driver during
the controller initialization process. Any other IO queues created will
not be consumed.

So the value is little to accept external IO Completion/Submission queue
creation request. This commit will refine the behavior of function
NvmExpressPassThru(), it will only accept driver internal IO queue
creation commands and will return "EFI_UNSUPPORTED" for external ones.

Cc: Jiewen Yao <[email protected]>
Cc: Ruiyu Ni <[email protected]>
Cc: Star Zeng <[email protected]>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Hao Wu <[email protected]>
---
 MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h         |  7 +++++-
 MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c      |  6 +++++
 MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c | 25 
+++++++++++++-------
 3 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h 
b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h
index ad0d9b8966..fe7d37c118 100644
--- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h
+++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h
@@ -3,7 +3,7 @@
   NVM Express specification.
 
   (C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>
-  Copyright (c) 2013 - 2017, Intel Corporation. All rights reserved.<BR>
+  Copyright (c) 2013 - 2018, Intel Corporation. All rights reserved.<BR>
   This program and the accompanying materials
   are licensed and made available under the terms and conditions of the BSD 
License
   which accompanies this distribution.  The full text of the license may be 
found at
@@ -147,6 +147,11 @@ struct _NVME_CONTROLLER_PRIVATE_DATA {
   NVME_CQHDBL                         CqHdbl[NVME_MAX_QUEUES];
   UINT16                              AsyncSqHead;
 
+  //
+  // Flag to indicate internal IO queue creation.
+  //
+  BOOLEAN                             CreateIoQueue;
+
   UINT8                               Pt[NVME_MAX_QUEUES];
   UINT16                              Cid[NVME_MAX_QUEUES];
 
diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c 
b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c
index 421561f16d..4a070f3f13 100644
--- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c
+++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c
@@ -584,6 +584,7 @@ NvmeCreateIoCompletionQueue (
   UINT16                                   QueueSize;
 
   Status = EFI_SUCCESS;
+  Private->CreateIoQueue = TRUE;
 
   for (Index = 1; Index < NVME_MAX_QUEUES; Index++) {
     ZeroMem (&CommandPacket, sizeof(EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET));
@@ -627,6 +628,8 @@ NvmeCreateIoCompletionQueue (
     }
   }
 
+  Private->CreateIoQueue = FALSE;
+
   return Status;
 }
 
@@ -653,6 +656,7 @@ NvmeCreateIoSubmissionQueue (
   UINT16                                   QueueSize;
 
   Status = EFI_SUCCESS;
+  Private->CreateIoQueue = TRUE;
 
   for (Index = 1; Index < NVME_MAX_QUEUES; Index++) {
     ZeroMem (&CommandPacket, sizeof(EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET));
@@ -698,6 +702,8 @@ NvmeCreateIoSubmissionQueue (
     }
   }
 
+  Private->CreateIoQueue = FALSE;
+
   return Status;
 }
 
diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c 
b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c
index c52e960771..78464ff422 100644
--- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c
+++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c
@@ -587,14 +587,23 @@ NvmExpressPassThru (
   }
 
   Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;
-  //
-  // If the NVMe cmd has data in or out, then mapping the user buffer to the 
PCI controller specific addresses.
-  // Note here we don't handle data buffer for CreateIOSubmitionQueue and 
CreateIOCompletionQueue cmds because
-  // these two cmds are special which requires their data buffer must support 
simultaneous access by both the
-  // processor and a PCI Bus Master. It's caller's responsbility to ensure 
this.
-  //
-  if (((Sq->Opc & (BIT0 | BIT1)) != 0) &&
-      !((Packet->QueueType == NVME_ADMIN_QUEUE) && ((Sq->Opc == 
NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD)))) {
+  if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&
+      ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == 
NVME_ADMIN_CRIOSQ_CMD))) {
+    //
+    // Currently, we only use the IO Completion/Submission queues created 
internally
+    // by this driver during controller initialization. Any other IO queues 
created
+    // will not be consumed here. The value is little to accept external IO 
queue
+    // creation requests, so here we will return EFI_UNSUPPORTED for external 
IO
+    // queue creation request.
+    //
+    if (!Private->CreateIoQueue) {
+      DEBUG ((DEBUG_ERROR, "NvmExpressPassThru: Does not support external IO 
queues creation request.\n"));
+      return EFI_UNSUPPORTED;
+    }
+  } else if ((Sq->Opc & (BIT0 | BIT1)) != 0) {
+    //
+    // If the NVMe cmd has data in or out, then mapping the user buffer to the 
PCI controller specific addresses.
+    //
     if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) ||
         ((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL))) {
       return EFI_INVALID_PARAMETER;
-- 
2.12.0.windows.1

_______________________________________________
edk2-devel mailing list
[email protected]
https://lists.01.org/mailman/listinfo/edk2-devel

Reply via email to