Nikos Nikoleris has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/30096 )
Change subject: mem-cache: Add support for blocking the cache on fills
......................................................................
mem-cache: Add support for blocking the cache on fills
Change-Id: I0b15139cf457e4c34d4f11a6b95ca4f6bd64e4ce
Signed-off-by: Nikos Nikoleris <nikos.nikole...@arm.com>
---
M src/mem/cache/Cache.py
M src/mem/cache/base.cc
M src/mem/cache/base.hh
3 files changed, 44 insertions(+), 9 deletions(-)
diff --git a/src/mem/cache/Cache.py b/src/mem/cache/Cache.py
index 4f4e445..a55ed2a 100644
--- a/src/mem/cache/Cache.py
+++ b/src/mem/cache/Cache.py
@@ -79,6 +79,9 @@
tag_latency = Param.Cycles("Tag lookup latency")
data_latency = Param.Cycles("Data access latency")
+ block_on_fills = Param.Bool(False, "Block the cache for further "
+ "read/writes until fill completes")
+ fill_latency = Param.Cycles(Self.data_latency, "Fill latency")
response_latency = Param.Cycles("Latency for the return path on a
miss");
warmup_percentage = Param.Percent(0,
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 0187703..6614d16 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -92,7 +92,8 @@
lookupLatency(p->tag_latency),
dataLatency(p->data_latency),
forwardLatency(p->tag_latency),
- fillLatency(p->data_latency),
+ blockOnFills(p->block_on_fills),
+ fillLatency(p->fill_latency),
responseLatency(p->response_latency),
sequentialAccess(p->sequential_access),
numTarget(p->tgts_per_mshr),
@@ -104,6 +105,8 @@
noTargetMSHR(nullptr),
missCount(p->max_miss_count),
addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
+ dataArrayUnblockEvent([this]{ clearBlocked(Blocked_DataArray); },
+ name()),
system(p->system),
stats(*this)
{
@@ -1171,8 +1174,15 @@
// When the packet metadata arrives, the tag lookup will be done
while
// the payload is arriving. Then the block will be ready to access
as
// soon as the fill is done
- blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
- std::max(cyclesToTicks(tag_latency),
(uint64_t)pkt->payloadDelay));
+ const Tick fill_done_tick = clockEdge(fillLatency) +
pkt->headerDelay +
+ std::max(cyclesToTicks(tag_latency),
(uint64_t)pkt->payloadDelay);
+
+ if (system->isTimingMode() && blockOnFills) {
+ setBlocked(Blocked_DataArray);
+ reschedule(dataArrayUnblockEvent, clockEdge(fillLatency),
true);
+ }
+
+ blk->setWhenReady(fill_done_tick);
return true;
} else if (pkt->cmd == MemCmd::CleanEvict) {
@@ -1245,8 +1255,15 @@
// When the packet metadata arrives, the tag lookup will be done
while
// the payload is arriving. Then the block will be ready to access
as
// soon as the fill is done
- blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
- std::max(cyclesToTicks(tag_latency),
(uint64_t)pkt->payloadDelay));
+ const Tick fill_done_tick = clockEdge(fillLatency) +
pkt->headerDelay +
+ std::max(cyclesToTicks(tag_latency),
(uint64_t)pkt->payloadDelay);
+
+ if (system->isTimingMode() && blockOnFills) {
+ setBlocked(Blocked_DataArray);
+ schedule(dataArrayUnblockEvent, clockEdge(fillLatency));
+ }
+
+ blk->setWhenReady(fill_done_tick);
// If this a write-through packet it will be sent to cache below
return !pkt->writeThrough();
@@ -1383,6 +1400,10 @@
DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
addr, is_secure ? "s" : "ns", old_state, blk->print());
+ // The block will be ready when the payload arrives and the fill is
done
+ const Tick fill_done_tick = clockEdge(fillLatency) + pkt->headerDelay +
+ pkt->payloadDelay;
+
// if we got new data, copy it in (checking for a read response
// and a response that has data is the same in the end)
if (pkt->isRead()) {
@@ -1391,10 +1412,12 @@
assert(pkt->getSize() == blkSize);
pkt->writeDataToBlock(blk->data, blkSize);
+ if (system->isTimingMode() && blockOnFills) {
+ setBlocked(Blocked_DataArray);
+ schedule(dataArrayUnblockEvent, clockEdge(fillLatency));
+ }
}
- // The block will be ready when the payload arrives and the fill is
done
- blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
- pkt->payloadDelay);
+ blk->setWhenReady(fill_done_tick);
return blk;
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 3efc7c7..1f02c86 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -105,6 +105,7 @@
Blocked_NoMSHRs = MSHRQueue_MSHRs,
Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
Blocked_NoTargets,
+ Blocked_DataArray,
NUM_BLOCKED_CAUSES
};
@@ -857,7 +858,13 @@
*/
const Cycles forwardLatency;
- /** The latency to fill a cache block */
+ const bool blockOnFills;
+
+ /**
+ * The latency of filling the data array. It occurs when there is
+ * a fill to the cache and renders the data array in accessible
+ * until it is completed.
+ */
const Cycles fillLatency;
/**
@@ -916,6 +923,8 @@
* Normally this is all possible memory addresses. */
const AddrRangeList addrRanges;
+ EventFunctionWrapper dataArrayUnblockEvent;
+
public:
/** System we are currently operating in. */
System *system;
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/30096
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I0b15139cf457e4c34d4f11a6b95ca4f6bd64e4ce
Gerrit-Change-Number: 30096
Gerrit-PatchSet: 1
Gerrit-Owner: Nikos Nikoleris <nikos.nikole...@arm.com>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s