changeset b3926db25371 in /z/repo/gem5
details: http://repo.gem5.org/gem5?cmd=changeset;node=b3926db25371
description:
        mem: Make cache terminology easier to understand

        This patch changes the name of a bunch of packet flags and MSHR member
        functions and variables to make the coherency protocol easier to
        understand. In addition the patch adds and updates lots of
        descriptions, explicitly spelling out assumptions.

        The following name changes are made:

        * the packet memInhibit flag is renamed to cacheResponding

        * the packet sharedAsserted flag is renamed to hasSharers

        * the packet NeedsExclusive attribute is renamed to NeedsWritable

        * the packet isSupplyExclusive is renamed responderHadWritable

        * the MSHR pendingDirty is renamed to pendingModified

        The cache states, Modified, Owned, Exclusive, Shared are also called
        out in the cache and MSHR code to make it easier to understand.

diffstat:

 src/cpu/o3/cpu.cc                   |    4 +-
 src/dev/dma_device.cc               |    4 +-
 src/mem/abstract_mem.cc             |    4 +-
 src/mem/addr_mapper.cc              |    7 +-
 src/mem/bridge.cc                   |    6 +-
 src/mem/cache/base.hh               |    4 +-
 src/mem/cache/blk.hh                |   13 +-
 src/mem/cache/cache.cc              |  435 +++++++++++++++++++----------------
 src/mem/cache/cache.hh              |   13 +-
 src/mem/cache/mshr.cc               |  100 ++++---
 src/mem/cache/mshr.hh               |   45 ++-
 src/mem/cache/mshr_queue.cc         |    4 +-
 src/mem/cache/mshr_queue.hh         |    6 +-
 src/mem/coherent_xbar.cc            |   27 +-
 src/mem/comm_monitor.cc             |    9 +-
 src/mem/dram_ctrl.cc                |    6 +-
 src/mem/dramsim2.cc                 |    6 +-
 src/mem/hmc_controller.cc           |    5 +-
 src/mem/mem_checker_monitor.cc      |   10 +-
 src/mem/noncoherent_xbar.cc         |    5 +-
 src/mem/packet.cc                   |   36 +-
 src/mem/packet.hh                   |  121 ++++++++-
 src/mem/ruby/system/DMASequencer.cc |    5 +-
 src/mem/ruby/system/RubyPort.cc     |    5 +-
 src/mem/serial_link.cc              |    2 +-
 src/mem/simple_mem.cc               |    6 +-
 src/mem/snoop_filter.cc             |   18 +-
 src/mem/tport.cc                    |    8 +-
 28 files changed, 539 insertions(+), 375 deletions(-)

diffs (truncated from 1936 to 300 lines):

diff -r 4cc8b312f026 -r b3926db25371 src/cpu/o3/cpu.cc
--- a/src/cpu/o3/cpu.cc Mon Jul 20 09:15:18 2015 -0500
+++ b/src/cpu/o3/cpu.cc Thu Dec 31 09:32:58 2015 -0500
@@ -92,9 +92,9 @@
 FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
 {
     DPRINTF(O3CPU, "Fetch unit received timing\n");
-    // We shouldn't ever get a cacheable block in ownership state
+    // We shouldn't ever get a cacheable block in Modified state
     assert(pkt->req->isUncacheable() ||
-           !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+           !(pkt->cacheResponding() && !pkt->hasSharers()));
     fetch->processCacheCompletion(pkt);
 
     return true;
diff -r 4cc8b312f026 -r b3926db25371 src/dev/dma_device.cc
--- a/src/dev/dma_device.cc     Mon Jul 20 09:15:18 2015 -0500
+++ b/src/dev/dma_device.cc     Thu Dec 31 09:32:58 2015 -0500
@@ -105,9 +105,9 @@
 bool
 DmaPort::recvTimingResp(PacketPtr pkt)
 {
-    // We shouldn't ever get a cacheable block in ownership state
+    // We shouldn't ever get a cacheable block in Modified state
     assert(pkt->req->isUncacheable() ||
-           !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+           !(pkt->cacheResponding() && !pkt->hasSharers()));
 
     handleResp(pkt);
 
diff -r 4cc8b312f026 -r b3926db25371 src/mem/abstract_mem.cc
--- a/src/mem/abstract_mem.cc   Mon Jul 20 09:15:18 2015 -0500
+++ b/src/mem/abstract_mem.cc   Thu Dec 31 09:32:58 2015 -0500
@@ -323,8 +323,8 @@
 void
 AbstractMemory::access(PacketPtr pkt)
 {
-    if (pkt->memInhibitAsserted()) {
-        DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
+    if (pkt->cacheResponding()) {
+        DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
                 pkt->getAddr());
         return;
     }
diff -r 4cc8b312f026 -r b3926db25371 src/mem/addr_mapper.cc
--- a/src/mem/addr_mapper.cc    Mon Jul 20 09:15:18 2015 -0500
+++ b/src/mem/addr_mapper.cc    Thu Dec 31 09:32:58 2015 -0500
@@ -116,16 +116,15 @@
 {
     Addr orig_addr = pkt->getAddr();
     bool needsResponse = pkt->needsResponse();
-    bool memInhibitAsserted = pkt->memInhibitAsserted();
+    bool cacheResponding = pkt->cacheResponding();
 
-    if (needsResponse && !memInhibitAsserted) {
+    if (needsResponse && !cacheResponding) {
         pkt->pushSenderState(new AddrMapperSenderState(orig_addr));
     }
 
     pkt->setAddr(remapAddr(orig_addr));
 
-    // Attempt to send the packet (always succeeds for inhibited
-    // packets)
+    // Attempt to send the packet
     bool successful = masterPort.sendTimingReq(pkt);
 
     // If not successful, restore the address and sender state
diff -r 4cc8b312f026 -r b3926db25371 src/mem/bridge.cc
--- a/src/mem/bridge.cc Mon Jul 20 09:15:18 2015 -0500
+++ b/src/mem/bridge.cc Thu Dec 31 09:32:58 2015 -0500
@@ -154,9 +154,9 @@
     DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
             pkt->cmdString(), pkt->getAddr());
 
-    // sink inhibited packets without further action, also discard any
-    // packet that is not a read or a write
-    if (pkt->memInhibitAsserted() ||
+    // if a cache is responding, sink the packet without further
+    // action, also discard any packet that is not a read or a write
+    if (pkt->cacheResponding() ||
         !(pkt->isWrite() || pkt->isRead())) {
         assert(!pkt->needsResponse());
         pendingDelete.reset(pkt);
diff -r 4cc8b312f026 -r b3926db25371 src/mem/cache/base.hh
--- a/src/mem/cache/base.hh     Mon Jul 20 09:15:18 2015 -0500
+++ b/src/mem/cache/base.hh     Thu Dec 31 09:32:58 2015 -0500
@@ -224,11 +224,11 @@
         return mshr;
     }
 
-    void markInServiceInternal(MSHR *mshr, bool pending_dirty_resp)
+    void markInServiceInternal(MSHR *mshr, bool pending_modified_resp)
     {
         MSHRQueue *mq = mshr->queue;
         bool wasFull = mq->isFull();
-        mq->markInService(mshr, pending_dirty_resp);
+        mq->markInService(mshr, pending_modified_resp);
         if (wasFull && !mq->isFull()) {
             clearBlocked((BlockedCause)mq->index);
         }
diff -r 4cc8b312f026 -r b3926db25371 src/mem/cache/blk.hh
--- a/src/mem/cache/blk.hh      Mon Jul 20 09:15:18 2015 -0500
+++ b/src/mem/cache/blk.hh      Thu Dec 31 09:32:58 2015 -0500
@@ -281,7 +281,7 @@
 
     /**
      * Pretty-print a tag, and interpret state bits to readable form
-     * including mapping to a MOESI stat.
+     * including mapping to a MOESI state.
      *
      * @return string with basic state information
      */
@@ -299,6 +299,17 @@
          *  E       1           0       1
          *  S       0           0       1
          *  I       0           0       0
+         *
+         * Note that only one cache ever has a block in Modified or
+         * Owned state, i.e., only one cache owns the block, or
+         * equivalently has the BlkDirty bit set. However, multiple
+         * caches on the same path to memory can have a block in the
+         * Exclusive state (despite the name). Exclusive means this
+         * cache has the only copy at this level of the hierarchy,
+         * i.e., there may be copies in caches above this cache (in
+         * various states), but there are no peers that have copies on
+         * this branch of the hierarchy, and no caches at or above
+         * this level on any other branch have copies either.
          **/
         unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
         char s = '?';
diff -r 4cc8b312f026 -r b3926db25371 src/mem/cache/cache.cc
--- a/src/mem/cache/cache.cc    Mon Jul 20 09:15:18 2015 -0500
+++ b/src/mem/cache/cache.cc    Thu Dec 31 09:32:58 2015 -0500
@@ -157,7 +157,7 @@
     // can satisfy a following ReadEx anyway since we can rely on the
     // Read requester(s) to have buffered the ReadEx snoop and to
     // invalidate their blocks after receiving them.
-    // assert(!pkt->needsExclusive() || blk->isWritable());
+    // assert(!pkt->needsWritable() || blk->isWritable());
     assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
 
     // Check RMW operations first since both isRead() and
@@ -165,15 +165,19 @@
     if (pkt->cmd == MemCmd::SwapReq) {
         cmpAndSwap(blk, pkt);
     } else if (pkt->isWrite()) {
+        // we have the block in a writable state and can go ahead,
+        // note that the line may be also be considered writable in
+        // downstream caches along the path to memory, but always
+        // Exclusive, and never Modified
         assert(blk->isWritable());
-        // Write or WriteLine at the first cache with block in Exclusive
+        // Write or WriteLine at the first cache with block in writable state
         if (blk->checkWrite(pkt)) {
             pkt->writeDataToBlock(blk->data, blkSize);
         }
-        // Always mark the line as dirty even if we are a failed
-        // StoreCond so we supply data to any snoops that have
-        // appended themselves to this cache before knowing the store
-        // will fail.
+        // Always mark the line as dirty (and thus transition to the
+        // Modified state) even if we are a failed StoreCond so we
+        // supply data to any snoops that have appended themselves to
+        // this cache before knowing the store will fail.
         blk->status |= BlkDirty;
         DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
                 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@@ -193,79 +197,80 @@
             assert(pkt->getSize() == blkSize);
             // special handling for coherent block requests from
             // upper-level caches
-            if (pkt->needsExclusive()) {
+            if (pkt->needsWritable()) {
                 // sanity check
                 assert(pkt->cmd == MemCmd::ReadExReq ||
                        pkt->cmd == MemCmd::SCUpgradeFailReq);
 
                 // if we have a dirty copy, make sure the recipient
-                // keeps it marked dirty
+                // keeps it marked dirty (in the modified state)
                 if (blk->isDirty()) {
-                    pkt->assertMemInhibit();
+                    pkt->setCacheResponding();
                 }
                 // on ReadExReq we give up our copy unconditionally,
                 // even if this cache is mostly inclusive, we may want
                 // to revisit this
                 invalidateBlock(blk);
             } else if (blk->isWritable() && !pending_downgrade &&
-                       !pkt->sharedAsserted() &&
+                       !pkt->hasSharers() &&
                        pkt->cmd != MemCmd::ReadCleanReq) {
-                // we can give the requester an exclusive copy (by not
-                // asserting shared line) on a read request if:
-                // - we have an exclusive copy at this level (& below)
+                // we can give the requester a writable copy on a read
+                // request if:
+                // - we have a writable copy at this level (& below)
                 // - we don't have a pending snoop from below
                 //   signaling another read request
                 // - no other cache above has a copy (otherwise it
-                //   would have asseretd shared line on request)
-                // - we are not satisfying an instruction fetch (this
-                //   prevents dirty data in the i-cache)
-
+                //   would have set hasSharers flag when
+                //   snooping the packet)
+                // - the read has explicitly asked for a clean
+                //   copy of the line
                 if (blk->isDirty()) {
                     // special considerations if we're owner:
                     if (!deferred_response) {
-                        // if we are responding immediately and can
-                        // signal that we're transferring ownership
-                        // (inhibit set) along with exclusivity
-                        // (shared not set), do so
-                        pkt->assertMemInhibit();
+                        // respond with the line in Modified state
+                        // (cacheResponding set, hasSharers not set)
+                        pkt->setCacheResponding();
 
-                        // if this cache is mostly inclusive, we keep
-                        // the block as writable (exclusive), and pass
-                        // it upwards as writable and dirty
-                        // (modified), hence we have multiple caches
-                        // considering the same block writable,
-                        // something that we get away with due to the
-                        // fact that: 1) this cache has been
-                        // considered the ordering points and
-                        // responded to all snoops up till now, and 2)
-                        // we always snoop upwards before consulting
-                        // the local cache, both on a normal request
-                        // (snooping done by the crossbar), and on a
-                        // snoop
-                        blk->status &= ~BlkDirty;
+                        if (clusivity == Enums::mostly_excl) {
+                            // if this cache is mostly exclusive with
+                            // respect to the cache above, drop the
+                            // block, no need to first unset the dirty
+                            // bit
+                            invalidateBlock(blk);
+                        } else {
+                            // if this cache is mostly inclusive, we
+                            // keep the block in the Exclusive state,
+                            // and pass it upwards as Modified
+                            // (writable and dirty), hence we have
+                            // multiple caches, all on the same path
+                            // towards memory, all considering the
+                            // same block writable, but only one
+                            // considering it Modified
 
-                        // if this cache is mostly exclusive with
-                        // respect to the cache above, drop the block
-                        if (clusivity == Enums::mostly_excl) {
-                            invalidateBlock(blk);
+                            // we get away with multiple caches (on
+                            // the same path to memory) considering
+                            // the block writeable as we always enter
+                            // the cache hierarchy through a cache,
+                            // and first snoop upwards in all other
+                            // branches
+                            blk->status &= ~BlkDirty;
                         }
                     } else {
                         // if we're responding after our own miss,
                         // there's a window where the recipient didn't
                         // know it was getting ownership and may not
                         // have responded to snoops correctly, so we
-                        // can't pass off ownership *or* exclusivity
-                        pkt->assertShared();
+                        // have to respond with a shared line
+                        pkt->setHasSharers();
                     }
                 }
             } else {
                 // otherwise only respond with a shared copy
-                pkt->assertShared();
+                pkt->setHasSharers();
             }
         }
     } else {
-        // Upgrade or Invalidate, since we have it Exclusively (E or
-        // M), we ack then invalidate.
+        // Upgrade or Invalidate
         assert(pkt->isUpgrade() || pkt->isInvalidate());
 
         // for invalidations we could be looking at the temp block
@@ -285,9 +290,9 @@
 
 
 void
-Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
+Cache::markInService(MSHR *mshr, bool pending_modified_resp)
 {
-    markInServiceInternal(mshr, pending_dirty_resp);
+    markInServiceInternal(mshr, pending_modified_resp);
 }
 
 /////////////////////////////////////////////////////
@@ -420,9 +425,10 @@
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to