Matthew Poremba has uploaded this change for review. ( https://gem5-review.googlesource.com/8303


Change subject: mem: Use token ports on DRAMCtrl side of system
......................................................................

mem: Use token ports on DRAMCtrl side of system

When interfacing with Ruby, MessageBuffers between AbstractController
and DRAMCtrl do resource checking to ensure finite buffering. Currently
checking of resources is not possible with queued ports, leading to
hidden infinite buffering around Ruby directories and memory. This
reuses the token port class designed for the GPUCoalescer to provide
resource checking at the directory. Currently this support only resource
checking for request from directory to memory.

Change-Id: I3423b819ea4e2d6c33d3bdd11f20b3d7cd7e7e3e
---
M src/mem/dram_ctrl.cc
M src/mem/dram_ctrl.hh
M src/mem/ruby/slicc_interface/AbstractController.cc
M src/mem/ruby/slicc_interface/AbstractController.hh
4 files changed, 78 insertions(+), 41 deletions(-)



diff --git a/src/mem/dram_ctrl.cc b/src/mem/dram_ctrl.cc
index b1fff15..6df9350 100644
--- a/src/mem/dram_ctrl.cc
+++ b/src/mem/dram_ctrl.cc
@@ -66,6 +66,7 @@
     busStateNext(READ),
     nextReqEvent([this]{ processNextReqEvent(); }, name()),
     respondEvent([this]{ processRespondEvent(); }, name()),
+    backendEvent([this]{ processBackendEvent(); }, name()),
     deviceSize(p->device_size),
     deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
     deviceRowBufferSize(p->device_rowbuffer_size),
@@ -228,6 +229,10 @@
             assert(columnsPerStripe <= columnsPerRowBuffer);
         }
     }
+
+    // Setup the initial token count. Use the size of the unified buffer
+    // as the maximum number of requests the master can send.
+    port.sendTokens(unifiedBufferSize);
 }

 void
@@ -282,12 +287,7 @@
         return;
     }

-    // See how many reads and writes we can issue
-    int read_queue_space = readBufferSize
-                         - (readQueue.size() + respQueue.size());
-    int write_queue_space = writeBufferSize - writeQueue.size();
     int search_count = 0;
-
     for (auto iter = unifiedQueue.begin();
          iter != unifiedQueue.end(); ++iter) {
         // Emulate a maximum number of requests we can search per cycle
@@ -303,21 +303,19 @@
         // Look for as many requests up to the maximum search depth. Note
         // that write merging and read serviings by write queue are not
         // handled here and left to the split read and write queues.
-        if (pkt->isRead() && read_queue_space >= dram_pkt_count) {
+        if (pkt->isRead() && !readQueueFull(dram_pkt_count)) {
             DPRINTF(DRAM, "Read addr %lld moved from unified queue\n",
                     pkt->getAddr());
             inUnifiedQueue.erase(burstAlign(pkt->getAddr()));
             unifiedPending.insert(burstAlign(pkt->getAddr()));
             iter = unifiedQueue.erase(iter);
-            read_queue_space -= dram_pkt_count;
             issueTimingReq(pkt);
-        } else if (pkt->isWrite() && write_queue_space >= dram_pkt_count) {
+        } else if (pkt->isWrite() && !writeQueueFull(dram_pkt_count)) {
             DPRINTF(DRAM, "Write addr %lld moved from unified queue\n",
                     pkt->getAddr());
             inUnifiedQueue.erase(burstAlign(pkt->getAddr()));
             unifiedPending.insert(burstAlign(pkt->getAddr()));
             iter = unifiedQueue.erase(iter);
-            write_queue_space -= dram_pkt_count;
             issueTimingReq(pkt);
         }

@@ -996,6 +994,9 @@
         unifiedPending.erase(burstAlign(pkt->getAddr()));
     }

+    // Done with a request. Send back a token.
+    port.sendTokens(1);
+
     bool needsResponse = pkt->needsResponse();
     // do the actual memory access which also turns the packet into a
     // response
@@ -1016,7 +1017,17 @@

         // queue the packet in the response queue to be sent out after
         // the static latency has passed
-        port.schedTimingResp(pkt, response_time, true);
+        if (response_time == curTick()) {
+            port.sendTimingResp(pkt);
+        } else {
+            backendMap[response_time].push_back(pkt);
+
+            if (!backendEvent.scheduled()) {
+                schedule(backendEvent, response_time);
+            } else if (response_time < backendEvent.when()) {
+                reschedule(backendEvent, response_time);
+            }
+        }
     } else {
         // @todo the packet is going to be deleted, and the DRAMPacket
         // is still having a pointer to it
@@ -1029,6 +1040,25 @@
 }

 void
+DRAMCtrl::processBackendEvent()
+{
+    DPRINTF(DRAM, "Processing backend event\n");
+
+    std::deque<PacketPtr>& backendQueue = backendMap.begin()->second;
+    PacketPtr pkt = backendQueue.front();
+    backendQueue.pop_front();
+    if (backendQueue.empty()) {
+        backendMap.erase(backendMap.begin());
+    }
+
+    port.sendTimingResp(pkt);
+
+    if (!backendMap.empty()) {
+        schedule(backendEvent, backendMap.begin()->first);
+    }
+}
+
+void
 DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
                        Tick act_tick, uint32_t row)
 {
@@ -2851,8 +2881,7 @@
 }

DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
-    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
-      memory(_memory)
+    : TokenSlavePort(name, &_memory), memory(_memory)
 { }

 AddrRangeList
@@ -2868,12 +2897,7 @@
 {
     pkt->pushLabel(memory.name());

-    if (!queue.checkFunctional(pkt)) {
-        // Default implementation of SimpleTimingPort::recvFunctional()
-        // calls recvAtomic() and throws away the latency; we can save a
-        // little here by just not calculating the latency.
-        memory.recvFunctional(pkt);
-    }
+    memory.recvFunctional(pkt);

     pkt->popLabel();
 }
diff --git a/src/mem/dram_ctrl.hh b/src/mem/dram_ctrl.hh
index dc5be36..0203a26 100644
--- a/src/mem/dram_ctrl.hh
+++ b/src/mem/dram_ctrl.hh
@@ -65,6 +65,7 @@
 #include "enums/PageManage.hh"
 #include "mem/abstract_mem.hh"
 #include "mem/qport.hh"
+#include "mem/token_port.hh"
 #include "params/DRAMCtrl.hh"
 #include "sim/eventq.hh"
 #include "mem/drampower.hh"
@@ -99,12 +100,8 @@

   private:

-    // For now, make use of a queued slave port to avoid dealing with
-    // flow control for the responses being sent back
-    class MemoryPort : public QueuedSlavePort
+    class MemoryPort : public TokenSlavePort
     {
-
-        RespPacketQueue queue;
         DRAMCtrl& memory;

       public:
@@ -713,6 +710,9 @@
     void processRespondEvent();
     EventFunctionWrapper respondEvent;

+    void processBackendEvent();
+    EventFunctionWrapper backendEvent;
+
     /**
      * Check the unified queue for any requests that may be able to be
      * copied into the separated read/write queues.
@@ -920,6 +920,13 @@
     std::deque<DRAMPacket*> respQueue;

     /**
+     * Backened queue is used to delay packet responses until the packet
+     * header delay, payload delay, and any frontend/backend latency have
+     * elapsed.
+     */
+    std::map<Tick, std::deque<PacketPtr>> backendMap;
+
+    /**
      * Vector of ranks
      */
     std::vector<Rank*> ranks;
diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc
index 2869f4b..540579e 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.cc
+++ b/src/mem/ruby/slicc_interface/AbstractController.cc
@@ -46,6 +46,7 @@
 #include "mem/ruby/system/GPUCoalescer.hh"
 #include "mem/ruby/system/RubySystem.hh"
 #include "mem/ruby/system/Sequencer.hh"
+#include "mem/token_port.hh"
 #include "sim/system.hh"

 AbstractController::AbstractController(const Params *p)
@@ -63,6 +64,10 @@
         // of this particular type.
         Stats::registerDumpCallback(new StatsCallback(this));
     }
+
+    // Setup token port for flow control
+    memPortTokens = new TokenManager(0);
+    memoryPort.setTokenManager(memPortTokens);
 }

 void
@@ -249,12 +254,18 @@
         // to make more progress. Make sure it wakes up
         scheduleEvent(Cycles(1));
         recvTimingResp(pkt);
-    } else {
+    } else if (memoryPort.haveTokens(1)) {
         getMemReqQueue()->dequeue(clockEdge());
         memoryPort.sendTimingReq(pkt);
+        memoryPort.acquireTokens(1);
         // Since the queue was popped the controller may be able
         // to make more progress. Make sure it wakes up
         scheduleEvent(Cycles(1));
+    } else {
+        // Free memory of allocations
+        delete s;
+        delete pkt->req;
+        delete pkt;
     }

     return true;
@@ -304,16 +315,9 @@
 int
 AbstractController::functionalMemoryWrite(PacketPtr pkt)
 {
-    int num_functional_writes = 0;
-
-    // Check the buffer from the controller to the memory.
-    if (memoryPort.checkFunctional(pkt)) {
-        num_functional_writes++;
-    }
-
     // Update memory itself.
     memoryPort.sendFunctional(pkt);
-    return num_functional_writes + 1;
+    return 1;
 }

 void
@@ -373,9 +377,6 @@
 AbstractController::MemoryPort::MemoryPort(const std::string &_name,
                                            AbstractController *_controller,
                                            const std::string &_label)
-    : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
-      reqQueue(*_controller, *this, _label),
-      snoopRespQueue(*_controller, *this, _label),
-      controller(_controller)
+    : TokenMasterPort(_name, _controller), controller(_controller)
 {
 }
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index 0aa6789..0b98fdc 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -58,6 +58,7 @@
 #include "mem/ruby/common/MachineID.hh"
 #include "mem/ruby/network/MessageBuffer.hh"
 #include "mem/ruby/system/CacheRecorder.hh"
+#include "mem/token_port.hh"
 #include "params/RubyController.hh"

 class Network;
@@ -215,17 +216,17 @@
         void process() {ctr->collateStats();}
     };

+    // Manager for the number of tokens available to this directory to
+    // send memory requests to the memory controller.
+    TokenManager *memPortTokens;
+
     /**
      * Port that forwards requests and receives responses from the
      * memory controller.  It has a queue of packets not yet sent.
      */
-    class MemoryPort : public QueuedMasterPort
+    class MemoryPort : public TokenMasterPort
     {
       private:
- // Packet queues used to store outgoing requests and snoop responses.
-        ReqPacketQueue reqQueue;
-        SnoopRespPacketQueue snoopRespQueue;
-
         // Controller that operates this port.
         AbstractController *controller;

@@ -237,6 +238,10 @@
         // Currently the pkt is handed to the coherence controller
         // associated with this port.
         bool recvTimingResp(PacketPtr pkt);
+
+      protected:
+        virtual void recvReqRetry()
+        { fatal("TokenPort received unexpected retry."); }
     };

     /* Master port to the memory controller. */

--
To view, visit https://gem5-review.googlesource.com/8303
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: master
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3423b819ea4e2d6c33d3bdd11f20b3d7cd7e7e3e
Gerrit-Change-Number: 8303
Gerrit-PatchSet: 1
Gerrit-Owner: Matthew Poremba <matthew.pore...@amd.com>
_______________________________________________
gem5-dev mailing list
gem5-dev@gem5.org
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to