changeset 81b1f46061c8 in /z/repo/gem5
details: http://repo.gem5.org/gem5?cmd=changeset;node=81b1f46061c8
description:
        mem: Move cache_impl.hh to cache.cc

        There is no longer any need to keep the implementation in a header.

diffstat:

 src/mem/cache/base.hh       |     6 +-
 src/mem/cache/cache.cc      |  2483 ++++++++++++++++++++++++++++++++++++++++++-
 src/mem/cache/cache.hh      |     6 +-
 src/mem/cache/cache_impl.hh |  2514 -------------------------------------------
 4 files changed, 2481 insertions(+), 2528 deletions(-)

diffs (truncated from 5061 to 300 lines):

diff -r 65fc1db5d795 -r 81b1f46061c8 src/mem/cache/base.hh
--- a/src/mem/cache/base.hh     Fri Aug 21 07:03:14 2015 -0400
+++ b/src/mem/cache/base.hh     Fri Aug 21 07:03:20 2015 -0400
@@ -47,8 +47,8 @@
  * Declares a basic cache interface BaseCache.
  */
 
-#ifndef __BASE_CACHE_HH__
-#define __BASE_CACHE_HH__
+#ifndef __MEM_CACHE_BASE_HH__
+#define __MEM_CACHE_BASE_HH__
 
 #include <algorithm>
 #include <list>
@@ -597,4 +597,4 @@
 
 };
 
-#endif //__BASE_CACHE_HH__
+#endif //__MEM_CACHE_BASE_HH__
diff -r 65fc1db5d795 -r 81b1f46061c8 src/mem/cache/cache.cc
--- a/src/mem/cache/cache.cc    Fri Aug 21 07:03:14 2015 -0400
+++ b/src/mem/cache/cache.cc    Fri Aug 21 07:03:20 2015 -0400
@@ -1,5 +1,18 @@
 /*
- * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * Copyright (c) 2010-2015 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder.  You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -26,18 +39,2472 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Authors: Erik Hallnor
+ *          Dave Greene
+ *          Nathan Binkert
  *          Steve Reinhardt
- *          Lisa Hsu
- *          Kevin Lim
+ *          Ron Dreslinski
+ *          Andreas Sandberg
  */
 
 /**
  * @file
- * Cache template instantiations.
+ * Cache definitions.
  */
 
-#include "mem/cache/tags/fa_lru.hh"
-#include "mem/cache/tags/lru.hh"
-#include "mem/cache/tags/random_repl.hh"
-#include "mem/cache/cache_impl.hh"
+#include "mem/cache/cache.hh"
 
+#include "base/misc.hh"
+#include "base/types.hh"
+#include "debug/Cache.hh"
+#include "debug/CachePort.hh"
+#include "debug/CacheTags.hh"
+#include "mem/cache/blk.hh"
+#include "mem/cache/mshr.hh"
+#include "mem/cache/prefetch/base.hh"
+#include "sim/sim_exit.hh"
+
+Cache::Cache(const Params *p)
+    : BaseCache(p),
+      tags(p->tags),
+      prefetcher(p->prefetcher),
+      doFastWrites(true),
+      prefetchOnAccess(p->prefetch_on_access)
+{
+    tempBlock = new CacheBlk();
+    tempBlock->data = new uint8_t[blkSize];
+
+    cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
+                                  "CpuSidePort");
+    memSidePort = new MemSidePort(p->name + ".mem_side", this,
+                                  "MemSidePort");
+
+    tags->setCache(this);
+    if (prefetcher)
+        prefetcher->setCache(this);
+}
+
+Cache::~Cache()
+{
+    delete [] tempBlock->data;
+    delete tempBlock;
+
+    delete cpuSidePort;
+    delete memSidePort;
+}
+
+void
+Cache::regStats()
+{
+    BaseCache::regStats();
+}
+
+void
+Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
+{
+    assert(pkt->isRequest());
+
+    uint64_t overwrite_val;
+    bool overwrite_mem;
+    uint64_t condition_val64;
+    uint32_t condition_val32;
+
+    int offset = tags->extractBlkOffset(pkt->getAddr());
+    uint8_t *blk_data = blk->data + offset;
+
+    assert(sizeof(uint64_t) >= pkt->getSize());
+
+    overwrite_mem = true;
+    // keep a copy of our possible write value, and copy what is at the
+    // memory address into the packet
+    pkt->writeData((uint8_t *)&overwrite_val);
+    pkt->setData(blk_data);
+
+    if (pkt->req->isCondSwap()) {
+        if (pkt->getSize() == sizeof(uint64_t)) {
+            condition_val64 = pkt->req->getExtraData();
+            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
+                                         sizeof(uint64_t));
+        } else if (pkt->getSize() == sizeof(uint32_t)) {
+            condition_val32 = (uint32_t)pkt->req->getExtraData();
+            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
+                                         sizeof(uint32_t));
+        } else
+            panic("Invalid size for conditional read/write\n");
+    }
+
+    if (overwrite_mem) {
+        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
+        blk->status |= BlkDirty;
+    }
+}
+
+
+void
+Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
+                             bool deferred_response, bool pending_downgrade)
+{
+    assert(pkt->isRequest());
+
+    assert(blk && blk->isValid());
+    // Occasionally this is not true... if we are a lower-level cache
+    // satisfying a string of Read and ReadEx requests from
+    // upper-level caches, a Read will mark the block as shared but we
+    // can satisfy a following ReadEx anyway since we can rely on the
+    // Read requester(s) to have buffered the ReadEx snoop and to
+    // invalidate their blocks after receiving them.
+    // assert(!pkt->needsExclusive() || blk->isWritable());
+    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
+
+    // Check RMW operations first since both isRead() and
+    // isWrite() will be true for them
+    if (pkt->cmd == MemCmd::SwapReq) {
+        cmpAndSwap(blk, pkt);
+    } else if (pkt->isWrite()) {
+        assert(blk->isWritable());
+        // Write or WriteLine at the first cache with block in Exclusive
+        if (blk->checkWrite(pkt)) {
+            pkt->writeDataToBlock(blk->data, blkSize);
+        }
+        // Always mark the line as dirty even if we are a failed
+        // StoreCond so we supply data to any snoops that have
+        // appended themselves to this cache before knowing the store
+        // will fail.
+        blk->status |= BlkDirty;
+        DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
+                pkt->cmdString(), pkt->getAddr(), pkt->getSize());
+    } else if (pkt->isRead()) {
+        if (pkt->isLLSC()) {
+            blk->trackLoadLocked(pkt);
+        }
+        pkt->setDataFromBlock(blk->data, blkSize);
+        // determine if this read is from a (coherent) cache, or not
+        // by looking at the command type; we could potentially add a
+        // packet attribute such as 'FromCache' to make this check a
+        // bit cleaner
+        if (pkt->cmd == MemCmd::ReadExReq ||
+            pkt->cmd == MemCmd::ReadSharedReq ||
+            pkt->cmd == MemCmd::ReadCleanReq ||
+            pkt->cmd == MemCmd::SCUpgradeFailReq) {
+            assert(pkt->getSize() == blkSize);
+            // special handling for coherent block requests from
+            // upper-level caches
+            if (pkt->needsExclusive()) {
+                // sanity check
+                assert(pkt->cmd == MemCmd::ReadExReq ||
+                       pkt->cmd == MemCmd::SCUpgradeFailReq);
+
+                // if we have a dirty copy, make sure the recipient
+                // keeps it marked dirty
+                if (blk->isDirty()) {
+                    pkt->assertMemInhibit();
+                }
+                // on ReadExReq we give up our copy unconditionally
+                if (blk != tempBlock)
+                    tags->invalidate(blk);
+                blk->invalidate();
+            } else if (blk->isWritable() && !pending_downgrade &&
+                       !pkt->sharedAsserted() &&
+                       pkt->cmd != MemCmd::ReadCleanReq) {
+                // we can give the requester an exclusive copy (by not
+                // asserting shared line) on a read request if:
+                // - we have an exclusive copy at this level (& below)
+                // - we don't have a pending snoop from below
+                //   signaling another read request
+                // - no other cache above has a copy (otherwise it
+                //   would have asseretd shared line on request)
+                // - we are not satisfying an instruction fetch (this
+                //   prevents dirty data in the i-cache)
+
+                if (blk->isDirty()) {
+                    // special considerations if we're owner:
+                    if (!deferred_response) {
+                        // if we are responding immediately and can
+                        // signal that we're transferring ownership
+                        // along with exclusivity, do so
+                        pkt->assertMemInhibit();
+                        blk->status &= ~BlkDirty;
+                    } else {
+                        // if we're responding after our own miss,
+                        // there's a window where the recipient didn't
+                        // know it was getting ownership and may not
+                        // have responded to snoops correctly, so we
+                        // can't pass off ownership *or* exclusivity
+                        pkt->assertShared();
+                    }
+                }
+            } else {
+                // otherwise only respond with a shared copy
+                pkt->assertShared();
+            }
+        }
+    } else {
+        // Upgrade or Invalidate, since we have it Exclusively (E or
+        // M), we ack then invalidate.
+        assert(pkt->isUpgrade() || pkt->isInvalidate());
+        assert(blk != tempBlock);
+        tags->invalidate(blk);
+        blk->invalidate();
+        DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n",
+                __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
+    }
+}
+
+
+/////////////////////////////////////////////////////
+//
+// MSHR helper functions
+//
+/////////////////////////////////////////////////////
+
+
+void
+Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
+{
+    markInServiceInternal(mshr, pending_dirty_resp);
+}
+
+
+void
+Cache::squash(int threadNum)
+{
+    bool unblock = false;
+    BlockedCause cause = NUM_BLOCKED_CAUSES;
+
+    if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
+        noTargetMSHR = NULL;
+        unblock = true;
+        cause = Blocked_NoTargets;
+    }
+    if (mshrQueue.isFull()) {
+        unblock = true;
+        cause = Blocked_NoMSHRs;
+    }
+    mshrQueue.squash(threadNum);
+    if (unblock && !mshrQueue.isFull()) {
+        clearBlocked(cause);
+    }
+}
+
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to