changeset f15f02d8c79e in /z/repo/gem5
details: http://repo.gem5.org/gem5?cmd=changeset;node=f15f02d8c79e
description:
        mem: Split the hit_latency into tag_latency and data_latency

        If the cache access mode is parallel, i.e. "sequential_access" parameter
        is set to "False", tags and data are accessed in parallel. Therefore,
        the hit_latency is the maximum latency between tag_latency and
        data_latency. On the other hand, if the cache access mode is
        sequential, i.e. "sequential_access" parameter is set to "True",
        tags and data are accessed sequentially. Therefore, the hit_latency
        is the sum of tag_latency plus data_latency.

        Signed-off-by: Jason Lowe-Power <ja...@lowepower.com>

diffstat:

 configs/common/Caches.py              |  12 ++++++++----
 configs/common/O3_ARM_v7a.py          |  12 ++++++++----
 configs/example/arm/devices.py        |  15 ++++++++++-----
 configs/example/memcheck.py           |   5 +++--
 configs/example/memtest.py            |   5 +++--
 configs/learning_gem5/part1/caches.py |   6 ++++--
 src/mem/cache/Cache.py                |   3 ++-
 src/mem/cache/base.cc                 |   7 ++++---
 src/mem/cache/base.hh                 |   6 ++++++
 src/mem/cache/tags/Tags.py            |  15 ++++++++++-----
 src/mem/cache/tags/base.cc            |   6 +++++-
 src/mem/cache/tags/base.hh            |   8 +++++++-
 src/mem/cache/tags/base_set_assoc.hh  |  17 ++++++++++++-----
 src/mem/cache/tags/fa_lru.cc          |  13 ++++++++++++-
 src/mem/cache/tags/fa_lru.hh          |   1 +
 15 files changed, 95 insertions(+), 36 deletions(-)

diffs (truncated from 401 to 300 lines):

diff -r b0853929e223 -r f15f02d8c79e configs/common/Caches.py
--- a/configs/common/Caches.py  Wed Nov 30 17:10:27 2016 -0500
+++ b/configs/common/Caches.py  Wed Nov 30 17:10:27 2016 -0500
@@ -48,7 +48,8 @@
 
 class L1Cache(Cache):
     assoc = 2
-    hit_latency = 2
+    tag_latency = 2
+    data_latency = 2
     response_latency = 2
     mshrs = 4
     tgts_per_mshr = 20
@@ -63,7 +64,8 @@
 
 class L2Cache(Cache):
     assoc = 8
-    hit_latency = 20
+    tag_latency = 20
+    data_latency = 20
     response_latency = 20
     mshrs = 20
     tgts_per_mshr = 12
@@ -71,7 +73,8 @@
 
 class IOCache(Cache):
     assoc = 8
-    hit_latency = 50
+    tag_latency = 50
+    data_latency = 50
     response_latency = 50
     mshrs = 20
     size = '1kB'
@@ -79,7 +82,8 @@
 
 class PageTableWalkerCache(Cache):
     assoc = 2
-    hit_latency = 2
+    tag_latency = 2
+    data_latency = 2
     response_latency = 2
     mshrs = 10
     size = '1kB'
diff -r b0853929e223 -r f15f02d8c79e configs/common/O3_ARM_v7a.py
--- a/configs/common/O3_ARM_v7a.py      Wed Nov 30 17:10:27 2016 -0500
+++ b/configs/common/O3_ARM_v7a.py      Wed Nov 30 17:10:27 2016 -0500
@@ -147,7 +147,8 @@
 
 # Instruction Cache
 class O3_ARM_v7a_ICache(Cache):
-    hit_latency = 1
+    tag_latency = 1
+    data_latency = 1
     response_latency = 1
     mshrs = 2
     tgts_per_mshr = 8
@@ -159,7 +160,8 @@
 
 # Data Cache
 class O3_ARM_v7a_DCache(Cache):
-    hit_latency = 2
+    tag_latency = 2
+    data_latency = 2
     response_latency = 2
     mshrs = 6
     tgts_per_mshr = 8
@@ -172,7 +174,8 @@
 # TLB Cache
 # Use a cache as a L2 TLB
 class O3_ARM_v7aWalkCache(Cache):
-    hit_latency = 4
+    tag_latency = 4
+    data_latency = 4
     response_latency = 4
     mshrs = 6
     tgts_per_mshr = 8
@@ -185,7 +188,8 @@
 
 # L2 Cache
 class O3_ARM_v7aL2(Cache):
-    hit_latency = 12
+    tag_latency = 12
+    data_latency = 12
     response_latency = 12
     mshrs = 16
     tgts_per_mshr = 8
diff -r b0853929e223 -r f15f02d8c79e configs/example/arm/devices.py
--- a/configs/example/arm/devices.py    Wed Nov 30 17:10:27 2016 -0500
+++ b/configs/example/arm/devices.py    Wed Nov 30 17:10:27 2016 -0500
@@ -45,7 +45,8 @@
 from common import CpuConfig
 
 class L1I(L1_ICache):
-    hit_latency = 1
+    tag_latency = 1
+    data_latency = 1
     response_latency = 1
     mshrs = 4
     tgts_per_mshr = 8
@@ -54,7 +55,8 @@
 
 
 class L1D(L1_DCache):
-    hit_latency = 2
+    tag_latency = 2
+    data_latency = 2
     response_latency = 1
     mshrs = 16
     tgts_per_mshr = 16
@@ -64,7 +66,8 @@
 
 
 class WalkCache(PageTableWalkerCache):
-    hit_latency = 4
+    tag_latency = 4
+    data_latency = 4
     response_latency = 4
     mshrs = 6
     tgts_per_mshr = 8
@@ -74,7 +77,8 @@
 
 
 class L2(L2Cache):
-    hit_latency = 12
+    tag_latency = 12
+    data_latency = 12
     response_latency = 5
     mshrs = 32
     tgts_per_mshr = 8
@@ -87,7 +91,8 @@
 class L3(Cache):
     size = '16MB'
     assoc = 16
-    hit_latency = 20
+    tag_latency = 20
+    data_latency = 20
     response_latency = 20
     mshrs = 20
     tgts_per_mshr = 12
diff -r b0853929e223 -r f15f02d8c79e configs/example/memcheck.py
--- a/configs/example/memcheck.py       Wed Nov 30 17:10:27 2016 -0500
+++ b/configs/example/memcheck.py       Wed Nov 30 17:10:27 2016 -0500
@@ -153,7 +153,7 @@
 
 # Define a prototype L1 cache that we scale for all successive levels
 proto_l1 = Cache(size = '32kB', assoc = 4,
-                 hit_latency = 1, response_latency = 1,
+                 tag_latency = 1, data_latency = 1, response_latency = 1,
                  tgts_per_mshr = 8)
 
 if options.blocking:
@@ -175,7 +175,8 @@
      prev = cache_proto[0]
      next = prev()
      next.size = prev.size * scale
-     next.hit_latency = prev.hit_latency * 10
+     next.tag_latency = prev.tag_latency * 10
+     next.data_latency = prev.data_latency * 10
      next.response_latency = prev.response_latency * 10
      next.assoc = prev.assoc * scale
      next.mshrs = prev.mshrs * scale
diff -r b0853929e223 -r f15f02d8c79e configs/example/memtest.py
--- a/configs/example/memtest.py        Wed Nov 30 17:10:27 2016 -0500
+++ b/configs/example/memtest.py        Wed Nov 30 17:10:27 2016 -0500
@@ -176,7 +176,7 @@
 
 # Define a prototype L1 cache that we scale for all successive levels
 proto_l1 = Cache(size = '32kB', assoc = 4,
-                 hit_latency = 1, response_latency = 1,
+                 tag_latency = 1, data_latency = 1, response_latency = 1,
                  tgts_per_mshr = 8, clusivity = 'mostly_incl',
                  writeback_clean = True)
 
@@ -194,7 +194,8 @@
      prev = cache_proto[0]
      next = prev()
      next.size = prev.size * scale
-     next.hit_latency = prev.hit_latency * 10
+     next.tag_latency = prev.tag_latency * 10
+     next.data_latency = prev.data_latency * 10
      next.response_latency = prev.response_latency * 10
      next.assoc = prev.assoc * scale
      next.mshrs = prev.mshrs * scale
diff -r b0853929e223 -r f15f02d8c79e configs/learning_gem5/part1/caches.py
--- a/configs/learning_gem5/part1/caches.py     Wed Nov 30 17:10:27 2016 -0500
+++ b/configs/learning_gem5/part1/caches.py     Wed Nov 30 17:10:27 2016 -0500
@@ -45,7 +45,8 @@
     """Simple L1 Cache with default values"""
 
     assoc = 2
-    hit_latency = 2
+    tag_latency = 2
+    data_latency = 2
     response_latency = 2
     mshrs = 4
     tgts_per_mshr = 20
@@ -107,7 +108,8 @@
     # Default parameters
     size = '256kB'
     assoc = 8
-    hit_latency = 20
+    tag_latency = 20
+    data_latency = 20
     response_latency = 20
     mshrs = 20
     tgts_per_mshr = 12
diff -r b0853929e223 -r f15f02d8c79e src/mem/cache/Cache.py
--- a/src/mem/cache/Cache.py    Wed Nov 30 17:10:27 2016 -0500
+++ b/src/mem/cache/Cache.py    Wed Nov 30 17:10:27 2016 -0500
@@ -53,7 +53,8 @@
     size = Param.MemorySize("Capacity")
     assoc = Param.Unsigned("Associativity")
 
-    hit_latency = Param.Cycles("Hit latency")
+    tag_latency = Param.Cycles("Tag lookup latency")
+    data_latency = Param.Cycles("Data access latency")
     response_latency = Param.Cycles("Latency for the return path on a miss");
 
     max_miss_count = Param.Counter(0,
diff -r b0853929e223 -r f15f02d8c79e src/mem/cache/base.cc
--- a/src/mem/cache/base.cc     Wed Nov 30 17:10:27 2016 -0500
+++ b/src/mem/cache/base.cc     Wed Nov 30 17:10:27 2016 -0500
@@ -72,9 +72,10 @@
       mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
       writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
       blkSize(blk_size),
-      lookupLatency(p->hit_latency),
-      forwardLatency(p->hit_latency),
-      fillLatency(p->response_latency),
+      lookupLatency(p->tag_latency),
+      dataLatency(p->data_latency),
+      forwardLatency(p->tag_latency),
+      fillLatency(p->data_latency),
       responseLatency(p->response_latency),
       numTarget(p->tgts_per_mshr),
       forwardSnoops(true),
diff -r b0853929e223 -r f15f02d8c79e src/mem/cache/base.hh
--- a/src/mem/cache/base.hh     Wed Nov 30 17:10:27 2016 -0500
+++ b/src/mem/cache/base.hh     Wed Nov 30 17:10:27 2016 -0500
@@ -265,6 +265,12 @@
     const Cycles lookupLatency;
 
     /**
+     * The latency of data access of a cache. It occurs when there is
+     * an access to the cache.
+     */
+    const Cycles dataLatency;
+
+    /**
      * This is the forward latency of the cache. It occurs when there
      * is a cache miss and a request is forwarded downstream, in
      * particular an outbound miss.
diff -r b0853929e223 -r f15f02d8c79e src/mem/cache/tags/Tags.py
--- a/src/mem/cache/tags/Tags.py        Wed Nov 30 17:10:27 2016 -0500
+++ b/src/mem/cache/tags/Tags.py        Wed Nov 30 17:10:27 2016 -0500
@@ -49,17 +49,22 @@
     # Get the block size from the parent (system)
     block_size = Param.Int(Parent.cache_line_size, "block size in bytes")
 
-    # Get the hit latency from the parent (cache)
-    hit_latency = Param.Cycles(Parent.hit_latency,
-                               "The hit latency for this cache")
+    # Get the tag lookup latency from the parent (cache)
+    tag_latency = Param.Cycles(Parent.tag_latency,
+                               "The tag lookup latency for this cache")
+
+    # Get the RAM access latency from the parent (cache)
+    data_latency = Param.Cycles(Parent.data_latency,
+                               "The data access latency for this cache")
+
+    sequential_access = Param.Bool(Parent.sequential_access,
+        "Whether to access tags and data sequentially")
 
 class BaseSetAssoc(BaseTags):
     type = 'BaseSetAssoc'
     abstract = True
     cxx_header = "mem/cache/tags/base_set_assoc.hh"
     assoc = Param.Int(Parent.assoc, "associativity")
-    sequential_access = Param.Bool(Parent.sequential_access,
-        "Whether to access tags and data sequentially")
 
 class LRU(BaseSetAssoc):
     type = 'LRU'
diff -r b0853929e223 -r f15f02d8c79e src/mem/cache/tags/base.cc
--- a/src/mem/cache/tags/base.cc        Wed Nov 30 17:10:27 2016 -0500
+++ b/src/mem/cache/tags/base.cc        Wed Nov 30 17:10:27 2016 -0500
@@ -56,7 +56,11 @@
 
 BaseTags::BaseTags(const Params *p)
     : ClockedObject(p), blkSize(p->block_size), size(p->size),
-      accessLatency(p->hit_latency), cache(nullptr), warmupBound(0),
+      lookupLatency(p->tag_latency),
+      accessLatency(p->sequential_access ?
+                    p->tag_latency + p->data_latency :
+                    std::max(p->tag_latency, p->data_latency)),
+      cache(nullptr), warmupBound(0),
       warmedUp(false), numBlocks(0)
 {
 }
diff -r b0853929e223 -r f15f02d8c79e src/mem/cache/tags/base.hh
_______________________________________________
gem5-dev mailing list
gem5-dev@gem5.org
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to