Daniel Carvalho has uploaded this change for review. ( https://gem5-review.googlesource.com/c/public/gem5/+/17530

Change subject: mem-cache: Add match function to QueueEntry
......................................................................

mem-cache: Add match function to QueueEntry

Having the caller decide the matching logic is error-prone, and
frequently ends up with the secure bit being forgotten. This
change adds a matching function to the QueueEntry to avoid this
problem.

As a side effect, blkAddr and isSecure are now protected, and a
findPending(QueueEntry*) has been added.

Change-Id: I6e494a821c1e6e841ab103ec69632c0e1b269a08
Signed-off-by: Daniel R. Carvalho <[email protected]>
---
M src/mem/cache/base.cc
M src/mem/cache/cache.cc
M src/mem/cache/mshr.cc
M src/mem/cache/queue.hh
M src/mem/cache/queue_entry.hh
M src/mem/cache/write_queue_entry.cc
6 files changed, 57 insertions(+), 32 deletions(-)



diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 19655a5..9a8270d 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -729,9 +729,7 @@
     // full write buffer, otherwise we favour the miss requests
     if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
         // need to search MSHR queue for conflicting earlier miss.
-        MSHR *conflict_mshr =
-            mshrQueue.findPending(wq_entry->blkAddr,
-                                  wq_entry->isSecure);
+        MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);

         if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
             // Service misses in order until conflict is cleared.
@@ -744,9 +742,7 @@
         return wq_entry;
     } else if (miss_mshr) {
         // need to check for conflicting earlier writeback
-        WriteQueueEntry *conflict_mshr =
-            writeBuffer.findPending(miss_mshr->blkAddr,
-                                    miss_mshr->isSecure);
+ WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
         if (conflict_mshr) {
             // not sure why we don't check order here... it was in the
             // original code but commented out.
@@ -1556,7 +1552,7 @@
             // if we are currently write coalescing, hold on the
             // MSHR as many cycles extra as we need to completely
             // write a cache line
-            if (writeAllocator->delay(mshr->blkAddr)) {
+            if (writeAllocator->delay(mshr->getAddr())) {
                 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
                         "for write coalescing\n", tgt_pkt->print(), delay);
@@ -1566,11 +1562,11 @@
                 writeAllocator->reset();
             }
         } else {
-            writeAllocator->resetDelay(mshr->blkAddr);
+            writeAllocator->resetDelay(mshr->getAddr());
         }
     }

-    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
+    CacheBlk *blk = tags->findBlock(mshr->getAddr(), mshr->isSecure());

     // either a prefetch that is not present upstream, or a normal
     // MSHR request, proceed to get the packet to send downstream
@@ -2451,7 +2447,7 @@
     } else {
         // let our snoop responses go first if there are responses to
         // the same addresses
-        if (checkConflictingSnoop(entry->blkAddr)) {
+        if (checkConflictingSnoop(entry->getAddr())) {
             return;
         }
         waitingOnRetry = entry->sendPacket(cache);
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 89e40f3..139c8a1 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -1326,7 +1326,7 @@

         // we should never have hardware prefetches to allocated
         // blocks
-        assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
+        assert(!tags->findBlock(mshr->getAddr(), mshr->isSecure()));

         // We need to check the caches above us to verify that
         // they don't have a copy of this block in the dirty state
@@ -1372,7 +1372,7 @@
         if (snoop_pkt.isBlockCached()) {
             DPRINTF(Cache, "Block present, prefetch squashed by cache.  "
                     "Deallocating mshr target %#x.\n",
-                    mshr->blkAddr);
+                    mshr->getAddr());

             // Deallocate the mshr target
             if (mshrQueue.forceDeallocateTarget(mshr)) {
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 5b93029..479a1b4 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -255,7 +255,7 @@
 {
     blkAddr = blk_addr;
     blkSize = blk_size;
-    isSecure = target->isSecure();
+    _isSecure = target->isSecure();
     readyTime = when_ready;
     order = _order;
     assert(target);
@@ -635,7 +635,7 @@
     // For other requests, we iterate over the individual targets
     // since that's where the actual data lies.
     if (pkt->isPrint()) {
- pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr); + pkt->trySatisfyFunctional(this, blkAddr, _isSecure, blkSize, nullptr);
         return false;
     } else {
         return (targets.trySatisfyFunctional(pkt) ||
@@ -654,7 +654,7 @@
 {
     ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
              prefix, blkAddr, blkAddr + blkSize - 1,
-             isSecure ? "s" : "ns",
+             isSecure() ? "s" : "ns",
              isForward ? "Forward" : "",
              allocOnFill() ? "AllocOnFill" : "",
              needsWritable() ? "Wrtbl" : "",
diff --git a/src/mem/cache/queue.hh b/src/mem/cache/queue.hh
index 6c8a192..8c853d2 100644
--- a/src/mem/cache/queue.hh
+++ b/src/mem/cache/queue.hh
@@ -174,7 +174,7 @@
             // cacheable accesses being added to an WriteQueueEntry
             // serving an uncacheable access
             if (!(ignore_uncacheable && entry->isUncacheable()) &&
- entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
+                entry->match(blk_addr, is_secure)) {
                 return entry;
             }
         }
@@ -185,7 +185,8 @@
     {
         pkt->pushLabel(label);
         for (const auto& entry : allocatedList) {
- if (entry->blkAddr == blk_addr && entry->trySatisfyFunctional(pkt)) {
+            if (entry->match(blk_addr, pkt->isSecure()) &&
+                entry->trySatisfyFunctional(pkt)) {
                 pkt->popLabel();
                 return true;
             }
@@ -196,14 +197,15 @@

     /**
      * Find any pending requests that overlap the given request.
+     *
      * @param blk_addr Block address.
      * @param is_secure True if the target memory space is secure.
-     * @return A pointer to the earliest matching WriteQueueEntry.
+     * @return A pointer to the earliest matching entry.
      */
     Entry* findPending(Addr blk_addr, bool is_secure) const
     {
         for (const auto& entry : readyList) {
- if (entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
+            if (entry->match(blk_addr, is_secure)) {
                 return entry;
             }
         }
@@ -211,6 +213,18 @@
     }

     /**
+     * Find any pending requests that overlap the given request of a
+     * different queue.
+     *
+     * @param entry The entry to be compared against.
+     * @return A pointer to the earliest matching entry.
+     */
+    Entry* findPending(const QueueEntry* entry) const
+    {
+        return findPending(entry->getAddr(), entry->isSecure());
+    }
+
+    /**
      * Returns the WriteQueueEntry at the head of the readyList.
      * @return The next request to service.
      */
diff --git a/src/mem/cache/queue_entry.hh b/src/mem/cache/queue_entry.hh
index 7ab9e4f..17426e7 100644
--- a/src/mem/cache/queue_entry.hh
+++ b/src/mem/cache/queue_entry.hh
@@ -69,6 +69,12 @@

   protected:

+    /** Block aligned address. */
+    Addr blkAddr;
+
+    /** True if the entry targets the secure memory space. */
+    bool _isSecure;
+
     /** Tick when ready to issue */
     Tick readyTime;

@@ -83,23 +89,32 @@
     /** Order number assigned to disambiguate writes and misses. */
     Counter order;

-    /** Block aligned address. */
-    Addr blkAddr;
-
     /** Block size of the cache. */
     unsigned blkSize;

-    /** True if the entry targets the secure memory space. */
-    bool isSecure;
-
-    QueueEntry() : readyTime(0), _isUncacheable(false),
-                   inService(false), order(0), blkAddr(0), blkSize(0),
-                   isSecure(false)
+    QueueEntry()
+ : blkAddr(0), _isSecure(false), readyTime(0), _isUncacheable(false),
+          inService(false), order(0), blkSize(0)
     {}

+    Addr getAddr() const { return blkAddr; }
+
+    bool isSecure() const { return _isSecure; }
+
     bool isUncacheable() const { return _isUncacheable; }

     /**
+     * Check if entry corresponds to the one being looked for.
+     *
+     * @param addr Address to match against.
+ * @param is_secure Whether the target should be in secure space or not.
+     * @return True if entry matches given information.
+     */
+    bool match(const Addr addr, const bool is_secure) const {
+        return (blkAddr == addr) && (_isSecure == is_secure);
+    }
+
+    /**
      * Send this queue entry as a downstream packet, with the exact
      * behaviour depending on the specific entry type.
      */
diff --git a/src/mem/cache/write_queue_entry.cc b/src/mem/cache/write_queue_entry.cc
index 6c8387b..eab9386 100644
--- a/src/mem/cache/write_queue_entry.cc
+++ b/src/mem/cache/write_queue_entry.cc
@@ -93,7 +93,7 @@
 {
     blkAddr = blk_addr;
     blkSize = blk_size;
-    isSecure = target->isSecure();
+    _isSecure = target->isSecure();
     readyTime = when_ready;
     order = _order;
     assert(target);
@@ -104,7 +104,7 @@
     // writes (writebacks and clean evictions)
     panic_if(!_isUncacheable && !targets.empty(),
              "Write queue entry %#llx should never have more than one "
-             "cacheable target", blkAddr);
+             "cacheable target", blk_addr);
     panic_if(!((target->isWrite() && _isUncacheable) ||
                (target->isEviction() && !_isUncacheable) ||
                target->cmd == MemCmd::WriteClean),
@@ -128,7 +128,7 @@
     // entity. For other requests, we iterate over the individual
     // targets since that's where the actual data lies.
     if (pkt->isPrint()) {
- pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr); + pkt->trySatisfyFunctional(this, blkAddr, _isSecure, blkSize, nullptr);
         return false;
     } else {
         return targets.trySatisfyFunctional(pkt);
@@ -147,7 +147,7 @@
 {
     ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s\n",
              prefix, blkAddr, blkAddr + blkSize - 1,
-             isSecure ? "s" : "ns",
+             isSecure() ? "s" : "ns",
              _isUncacheable ? "Unc" : "",
              inService ? "InSvc" : "");


--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/17530
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: master
Gerrit-Change-Id: I6e494a821c1e6e841ab103ec69632c0e1b269a08
Gerrit-Change-Number: 17530
Gerrit-PatchSet: 1
Gerrit-Owner: Daniel Carvalho <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to