Daniel Carvalho has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/17535
Change subject: mem-cache: Make MSHR and writebuffer unaware of address
......................................................................
mem-cache: Make MSHR and writebuffer unaware of address
Eliminate all address awareness on both queues by passing all
address handling to the packets, since they are the ones that
contain this information.
Change-Id: Ib1a4b2d595461e2c0e9a1eace53bf586a677f9fd
Signed-off-by: Daniel R. Carvalho <[email protected]>
---
M src/mem/cache/base.cc
M src/mem/cache/base.hh
M src/mem/cache/cache.cc
M src/mem/cache/mshr.cc
M src/mem/cache/mshr.hh
M src/mem/cache/mshr_queue.cc
M src/mem/cache/mshr_queue.hh
M src/mem/cache/queue_entry.hh
M src/mem/cache/write_queue.cc
M src/mem/cache/write_queue.hh
M src/mem/cache/write_queue_entry.cc
M src/mem/cache/write_queue_entry.hh
M src/mem/packet_queue.cc
M src/mem/packet_queue.hh
14 files changed, 46 insertions(+), 88 deletions(-)
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index e091fe7..09374a9 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -1540,6 +1540,7 @@
// use request from 1st target
PacketPtr tgt_pkt = mshr->getTarget()->pkt;
+ Addr addr = tgt_pkt->getBlockAddr(blkSize);
DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
@@ -1552,7 +1553,7 @@
// if we are currently write coalescing, hold on the
// MSHR as many cycles extra as we need to completely
// write a cache line
- if (writeAllocator->delay(mshr->getAddr())) {
+ if (writeAllocator->delay(addr)) {
Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to
allow "
"for write coalescing\n", tgt_pkt->print(), delay);
@@ -1562,11 +1563,11 @@
writeAllocator->reset();
}
} else {
- writeAllocator->resetDelay(mshr->getAddr());
+ writeAllocator->resetDelay(addr);
}
}
- CacheBlk *blk = tags->findBlock(mshr->getAddr(), mshr->isSecure());
+ CacheBlk *blk = tags->findBlock(addr, mshr->isSecure());
// either a prefetch that is not present upstream, or a normal
// MSHR request, proceed to get the packet to send downstream
@@ -2447,7 +2448,7 @@
} else {
// let our snoop responses go first if there are responses to
// the same addresses
- if (checkConflictingSnoop(entry->getAddr())) {
+ if (checkConflictingSnoop(entry->getTarget()->pkt)) {
return;
}
waitingOnRetry = entry->sendPacket(cache);
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index a45dcba..66100c8 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -189,10 +189,12 @@
* send out, and if so simply stall any requests, and schedule
* a send event at the same time as the next snoop response is
* being sent out.
+ *
+ * @param pkt The packet to check for conflicts against.
*/
- bool checkConflictingSnoop(Addr addr)
+ bool checkConflictingSnoop(const PacketPtr pkt)
{
- if (snoopRespQueue.hasAddr(addr)) {
+ if (snoopRespQueue.match(pkt, cache.blkSize)) {
DPRINTF(CachePort, "Waiting for snoop response to be "
"sent\n");
Tick when = snoopRespQueue.deferredPacketReadyTime();
@@ -1045,8 +1047,7 @@
MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send =
true)
{
- MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize),
blkSize,
- pkt, time, order++,
+ MSHR *mshr = mshrQueue.allocate(blkSize, pkt, time, order++,
allocOnFill(pkt->cmd));
if (mshrQueue.isFull()) {
@@ -1066,15 +1067,13 @@
// should only see writes or clean evicts here
assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
- Addr blk_addr = pkt->getBlockAddr(blkSize);
-
WriteQueueEntry *wq_entry =
- writeBuffer.findMatch(blk_addr, pkt->isSecure());
+ writeBuffer.findMatch(pkt->getBlockAddr(blkSize),
pkt->isSecure());
if (wq_entry && !wq_entry->inService) {
DPRINTF(Cache, "Potential to merge writeback %s",
pkt->print());
}
- writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
+ writeBuffer.allocate(blkSize, pkt, time, order++);
if (writeBuffer.isFull()) {
setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 59e165c..8009496 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -1325,7 +1325,7 @@
// we should never have hardware prefetches to allocated
// blocks
- assert(!tags->findBlock(mshr->getAddr(), mshr->isSecure()));
+ assert(!tags->findBlock(tgt_pkt->getAddr(), mshr->isSecure()));
// We need to check the caches above us to verify that
// they don't have a copy of this block in the dirty state
@@ -1370,8 +1370,7 @@
if (snoop_pkt.isBlockCached()) {
DPRINTF(Cache, "Block present, prefetch squashed by cache. "
- "Deallocating mshr target %#x.\n",
- mshr->getAddr());
+ "Deallocating mshr target %s.\n", tgt_pkt->print());
// Deallocate the mshr target
if (mshrQueue.forceDeallocateTarget(mshr)) {
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 11a461d..449a18e 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -250,7 +250,7 @@
void
-MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
+MSHR::allocate(unsigned blk_size, PacketPtr target,
Tick when_ready, Counter _order, bool alloc_on_fill)
{
blkSize = blk_size;
@@ -264,8 +264,8 @@
inService = false;
downstreamPending = false;
- targets.init(blk_addr, blkSize);
- deferredTargets.init(blk_addr, blkSize);
+ targets.init(blkSize);
+ deferredTargets.init(blkSize);
// Don't know of a case where we would allocate a new MSHR for a
// snoop (mem-side request), so set source according to request here
@@ -489,7 +489,7 @@
MSHR::extractServiceableTargets(PacketPtr pkt)
{
TargetList ready_targets;
- ready_targets.init(getAddr(), blkSize);
+ ready_targets.init(blkSize);
// If the downstream MSHR got an invalidation request then we only
// service the first of the FromCPU targets and any other
// non-FromCPU target. This way the remaining FromCPU targets
@@ -634,8 +634,7 @@
// For other requests, we iterate over the individual targets
// since that's where the actual data lies.
if (pkt->isPrint()) {
- pkt->trySatisfyFunctional(this, getAddr(), _isSecure, blkSize,
- nullptr);
+ print();
return false;
} else {
return (targets.trySatisfyFunctional(pkt) ||
@@ -653,7 +652,7 @@
MSHR::print(std::ostream &os, int verbosity, const std::string &prefix)
const
{
ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
- prefix, getAddr(), getAddr() + blkSize - 1,
+ prefix,
isSecure() ? "s" : "ns",
isForward ? "Forward" : "",
allocOnFill() ? "AllocOnFill" : "",
diff --git a/src/mem/cache/mshr.hh b/src/mem/cache/mshr.hh
index 8dbc744..19712b5 100644
--- a/src/mem/cache/mshr.hh
+++ b/src/mem/cache/mshr.hh
@@ -191,11 +191,9 @@
/**
* Reset state
*
- * @param blk_addr Address of the cache block
* @param blk_size Size of the cache block
*/
- void init(Addr blk_addr, Addr blk_size) {
- blkAddr = blk_addr;
+ void init(Addr blk_size) {
blkSize = blk_size;
writesBitmap.resize(blk_size);
@@ -303,9 +301,6 @@
}
private:
- /** Address of the cache block for this list of targets. */
- Addr blkAddr;
-
/** Size of the cache block. */
Addr blkSize;
@@ -410,14 +405,13 @@
/**
* Allocate a miss to this MSHR.
- * @param blk_addr The address of the block.
* @param blk_size The number of bytes to request.
* @param pkt The original miss.
* @param when_ready When should the MSHR be ready to act upon.
* @param _order The logical order of this MSHR
* @param alloc_on_fill Should the cache allocate a block on fill
*/
- void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
+ void allocate(unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter _order, bool alloc_on_fill);
void markInService(bool pending_modified_resp);
@@ -478,16 +472,6 @@
}
/**
- * The address of any of the MSHR targets is the entry's address.
- * @return The address to which this entry refers.
- */
- Addr getAddr() const override
- {
- assert(hasTargets());
- return targets.front().pkt->getBlockAddr(blkSize);
- }
-
- /**
* Pop first target.
*/
void popTarget()
diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc
index f4b8054..7aad583 100644
--- a/src/mem/cache/mshr_queue.cc
+++ b/src/mem/cache/mshr_queue.cc
@@ -58,7 +58,7 @@
{}
MSHR *
-MSHRQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
+MSHRQueue::allocate(unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter order, bool alloc_on_fill)
{
assert(!freeList.empty());
@@ -66,7 +66,7 @@
assert(mshr->getNumTargets() == 0);
freeList.pop_front();
- mshr->allocate(blk_addr, blk_size, pkt, when_ready, order,
alloc_on_fill);
+ mshr->allocate(blk_size, pkt, when_ready, order, alloc_on_fill);
mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
mshr->readyIter = addToReadyList(mshr);
diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh
index 1e4eaeb..fc83ab4 100644
--- a/src/mem/cache/mshr_queue.hh
+++ b/src/mem/cache/mshr_queue.hh
@@ -85,7 +85,6 @@
* Allocates a new MSHR for the request and size. This places the
request
* as the first target in the MSHR.
*
- * @param blk_addr The address of the block.
* @param blk_size The number of bytes to request.
* @param pkt The original miss.
* @param when_ready When should the MSHR be ready to act upon.
@@ -96,7 +95,7 @@
*
* @pre There are free entries.
*/
- MSHR *allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
+ MSHR *allocate(unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter order, bool alloc_on_fill);
/**
diff --git a/src/mem/cache/queue_entry.hh b/src/mem/cache/queue_entry.hh
index 4d84b8f..3f17f7d 100644
--- a/src/mem/cache/queue_entry.hh
+++ b/src/mem/cache/queue_entry.hh
@@ -120,18 +120,6 @@
inService(false), order(0), blkSize(0)
{}
- /**
- * The address of the block to which this entry refers is not stored in
- * the entry itself, but in the packet that originated it. We do not
keep
- * a copy because if the packet is compressed, this may incur in
extending
- * compression handling to the queue entry. This should ultimately be
- * removed, and the address(es) must be only known and handled by the
- * packet.
- *
- * @return The address to which this entry refers.
- */
- virtual Addr getAddr() const = 0;
-
bool isSecure() const { return _isSecure; }
bool isUncacheable() const { return _isUncacheable; }
diff --git a/src/mem/cache/write_queue.cc b/src/mem/cache/write_queue.cc
index 11a2620..b60382a 100644
--- a/src/mem/cache/write_queue.cc
+++ b/src/mem/cache/write_queue.cc
@@ -58,7 +58,7 @@
{}
WriteQueueEntry *
-WriteQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
+WriteQueue::allocate(unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter order)
{
assert(!freeList.empty());
@@ -66,7 +66,7 @@
assert(entry->getNumTargets() == 0);
freeList.pop_front();
- entry->allocate(blk_addr, blk_size, pkt, when_ready, order);
+ entry->allocate(blk_size, pkt, when_ready, order);
entry->allocIter = allocatedList.insert(allocatedList.end(), entry);
entry->readyIter = addToReadyList(entry);
diff --git a/src/mem/cache/write_queue.hh b/src/mem/cache/write_queue.hh
index d9ace0d..e9c15fe 100644
--- a/src/mem/cache/write_queue.hh
+++ b/src/mem/cache/write_queue.hh
@@ -73,7 +73,6 @@
* Allocates a new WriteQueueEntry for the request and size. This
* places the request as the first target in the WriteQueueEntry.
*
- * @param blk_addr The address of the block.
* @param blk_size The number of bytes to request.
* @param pkt The original write.
* @param when_ready When is the WriteQueueEntry be ready to act upon.
@@ -83,7 +82,7 @@
*
* @pre There are free entries.
*/
- WriteQueueEntry *allocate(Addr blk_addr, unsigned blk_size,
+ WriteQueueEntry *allocate(unsigned blk_size,
PacketPtr pkt, Tick when_ready, Counter
order);
/**
diff --git a/src/mem/cache/write_queue_entry.cc
b/src/mem/cache/write_queue_entry.cc
index 0a02adb..640affd 100644
--- a/src/mem/cache/write_queue_entry.cc
+++ b/src/mem/cache/write_queue_entry.cc
@@ -88,7 +88,7 @@
}
void
-WriteQueueEntry::allocate(Addr blk_addr, unsigned blk_size, PacketPtr
target,
+WriteQueueEntry::allocate(unsigned blk_size, PacketPtr target,
Tick when_ready, Counter _order)
{
blkSize = blk_size;
@@ -102,8 +102,8 @@
// we should never have more than a single target for cacheable
// writes (writebacks and clean evictions)
panic_if(!_isUncacheable && !targets.empty(),
- "Write queue entry %#llx should never have more than one "
- "cacheable target", blk_addr);
+ "Write queue entry should never have more than one "
+ "cacheable target: %s", print());
panic_if(!((target->isWrite() && _isUncacheable) ||
(target->isEviction() && !_isUncacheable) ||
target->cmd == MemCmd::WriteClean),
@@ -127,8 +127,7 @@
// entity. For other requests, we iterate over the individual
// targets since that's where the actual data lies.
if (pkt->isPrint()) {
- pkt->trySatisfyFunctional(this, getAddr(), _isSecure, blkSize,
- nullptr);
+ print();
return false;
} else {
return targets.trySatisfyFunctional(pkt);
@@ -181,9 +180,8 @@
WriteQueueEntry::print(std::ostream &os, int verbosity,
const std::string &prefix) const
{
- ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s\n",
- prefix, getAddr(), getAddr() + blkSize - 1,
- isSecure() ? "s" : "ns",
+ ccprintf(os, "%s(%s) %s %s %s state: %s %s %s %s %s\n",
+ prefix, isSecure() ? "s" : "ns",
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "");
diff --git a/src/mem/cache/write_queue_entry.hh
b/src/mem/cache/write_queue_entry.hh
index a33fc5d..cc4fc2b 100644
--- a/src/mem/cache/write_queue_entry.hh
+++ b/src/mem/cache/write_queue_entry.hh
@@ -111,33 +111,19 @@
/** List of all requests that match the address */
TargetList targets;
- /**
- * Get one of the packets' address and use it as a representative of
the
- * entry's address. Should only be used for print calls since it
assumes
- * each packet has a single address.
- *
- * @return One of the addresses this entry refers to.
- */
- Addr getAddr() const override
- {
- assert(hasTargets());
- return targets.front().pkt->getBlockAddr(blkSize);
- }
-
public:
/** A simple constructor. */
WriteQueueEntry() {}
/**
- * Allocate a miss to this entry.
- * @param blk_addr The address of the block.
+ * Allocate a write to this entry.
* @param blk_size The number of bytes to request.
* @param pkt The original write.
* @param when_ready When should the write be sent out.
* @param _order The logical order of this write.
*/
- void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
+ void allocate(unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter _order);
diff --git a/src/mem/packet_queue.cc b/src/mem/packet_queue.cc
index 7371871..4e1e925 100644
--- a/src/mem/packet_queue.cc
+++ b/src/mem/packet_queue.cc
@@ -72,13 +72,14 @@
}
bool
-PacketQueue::hasAddr(Addr addr) const
+PacketQueue::match(const PacketPtr pkt, const int blk_size) const
{
// caller is responsible for ensuring that all packets have the
// same alignment
for (const auto& p : transmitList) {
- if (p.pkt->getAddr() == addr)
+ if (p.pkt->match(pkt, blk_size)) {
return true;
+ }
}
return false;
}
diff --git a/src/mem/packet_queue.hh b/src/mem/packet_queue.hh
index 4ac4bf3..32cc77f 100644
--- a/src/mem/packet_queue.hh
+++ b/src/mem/packet_queue.hh
@@ -172,9 +172,14 @@
{ return transmitList.empty() ? MaxTick : transmitList.front().tick; }
/**
- * Check if a packets address exists in the queue.
+ * Check if a packet corresponding to the same address exists in the
+ * queue.
+ *
+ * @param pkt The packet to compare against.
+ * @param blk_size Block size in bytes.
+ * @return Whether a corresponding packet is found.
*/
- bool hasAddr(Addr addr) const;
+ bool match(const PacketPtr pkt, const int blk_size) const;
/** Check the list of buffered packets against the supplied
* functional request. */
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/17535
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: master
Gerrit-Change-Id: Ib1a4b2d595461e2c0e9a1eace53bf586a677f9fd
Gerrit-Change-Number: 17535
Gerrit-PatchSet: 1
Gerrit-Owner: Daniel Carvalho <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev