changeset 581fb2484bd6 in /z/repo/gem5
details: http://repo.gem5.org/gem5?cmd=changeset;node=581fb2484bd6
description:
mem: Snoop into caches on uncacheable accesses
This patch takes a last step in fixing issues related to uncacheable
accesses. We do not separate uncacheable memory from uncacheable
devices, and in cases where it is really memory, there are valid
scenarios where we need to snoop since we do not support cache
maintenance instructions (yet). On snooping an uncacheable access we
thus provide data if possible. In essence this makes uncacheable
accesses IO coherent.
The snoop filter is also queried to steer the snoops, but not updated
since the uncacheable accesses do not allocate a block.
diffstat:
src/cpu/o3/cpu.cc | 5 +++--
src/dev/dma_device.cc | 5 +++--
src/mem/cache/base.cc | 11 +++++++++++
src/mem/cache/base.hh | 10 ++++++++++
src/mem/cache/cache_impl.hh | 40 +++++++++++++---------------------------
src/mem/cache/mshr.cc | 5 ++++-
src/mem/coherent_xbar.cc | 17 ++++++-----------
src/mem/snoop_filter.cc | 21 +++++++++++++++------
8 files changed, 65 insertions(+), 49 deletions(-)
diffs (truncated from 315 to 300 lines):
diff -r e2a283400c43 -r 581fb2484bd6 src/cpu/o3/cpu.cc
--- a/src/cpu/o3/cpu.cc Tue May 05 03:22:27 2015 -0400
+++ b/src/cpu/o3/cpu.cc Tue May 05 03:22:29 2015 -0400
@@ -92,8 +92,9 @@
FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(O3CPU, "Fetch unit received timing\n");
- // We shouldn't ever get a block in ownership state
- assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+ // We shouldn't ever get a cacheable block in ownership state
+ assert(pkt->req->isUncacheable() ||
+ !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
fetch->processCacheCompletion(pkt);
return true;
diff -r e2a283400c43 -r 581fb2484bd6 src/dev/dma_device.cc
--- a/src/dev/dma_device.cc Tue May 05 03:22:27 2015 -0400
+++ b/src/dev/dma_device.cc Tue May 05 03:22:29 2015 -0400
@@ -104,8 +104,9 @@
bool
DmaPort::recvTimingResp(PacketPtr pkt)
{
- // We shouldn't ever get a block in ownership state
- assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+ // We shouldn't ever get a cacheable block in ownership state
+ assert(pkt->req->isUncacheable() ||
+ !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
handleResp(pkt);
diff -r e2a283400c43 -r 581fb2484bd6 src/mem/cache/base.cc
--- a/src/mem/cache/base.cc Tue May 05 03:22:27 2015 -0400
+++ b/src/mem/cache/base.cc Tue May 05 03:22:29 2015 -0400
@@ -153,6 +153,17 @@
}
}
+bool
+BaseCache::inRange(Addr addr) const
+{
+ for (const auto& r : addrRanges) {
+ if (r.contains(addr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
void
BaseCache::regStats()
{
diff -r e2a283400c43 -r 581fb2484bd6 src/mem/cache/base.hh
--- a/src/mem/cache/base.hh Tue May 05 03:22:27 2015 -0400
+++ b/src/mem/cache/base.hh Tue May 05 03:22:29 2015 -0400
@@ -262,6 +262,16 @@
*/
virtual bool isDirty() const = 0;
+ /**
+ * Determine if an address is in the ranges covered by this
+ * cache. This is useful to filter snoops.
+ *
+ * @param addr Address to check against
+ *
+ * @return If the address in question is in range
+ */
+ bool inRange(Addr addr) const;
+
/** Block size of this cache */
const unsigned blkSize;
diff -r e2a283400c43 -r 581fb2484bd6 src/mem/cache/cache_impl.hh
--- a/src/mem/cache/cache_impl.hh Tue May 05 03:22:27 2015 -0400
+++ b/src/mem/cache/cache_impl.hh Tue May 05 03:22:29 2015 -0400
@@ -475,7 +475,6 @@
// responding to the request
DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
- assert(!pkt->req->isUncacheable());
// if the packet needs exclusive, and the cache that has
// promised to respond (setting the inhibit flag) is not
@@ -856,7 +855,6 @@
promoteWholeLineWrites(pkt);
if (pkt->memInhibitAsserted()) {
- assert(!pkt->req->isUncacheable());
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
@@ -1560,7 +1558,8 @@
// responses)
pkt = new Packet(req_pkt, false, req_pkt->isRead());
- assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
+ assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
+ pkt->sharedAsserted());
pkt->makeTimingResponse();
if (pkt->isRead()) {
pkt->setDataFromBlock(blk_data, blkSize);
@@ -1676,7 +1675,7 @@
return;
}
- if (pkt->isRead() && !invalidate) {
+ if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
assert(!needs_exclusive);
pkt->assertShared();
int bits_to_clear = BlkWritable;
@@ -1699,6 +1698,9 @@
// will write it back at a later point
pkt->assertMemInhibit();
if (have_exclusive) {
+ // in the case of an uncacheable request there is no need
+ // to set the exclusive flag, but since the recipient does
+ // not care there is no harm in doing so
pkt->setSupplyExclusive();
}
if (is_timing) {
@@ -1707,7 +1709,9 @@
pkt->makeAtomicResponse();
pkt->setDataFromBlock(blk->data, blkSize);
}
- } else if (is_timing && is_deferred) {
+ }
+
+ if (!respond && is_timing && is_deferred) {
// if it's a deferred timing snoop then we've made a copy of
// the packet, and so if we're not using that copy to respond
// then we need to delete it here.
@@ -1735,25 +1739,8 @@
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
- // check if the packet is for an address range covered by this
- // cache, partly to not waste time looking for it, but also to
- // ensure that we only forward the snoop upwards if it is within
- // our address ranges
- bool in_range = false;
- for (AddrRangeList::const_iterator r = addrRanges.begin();
- r != addrRanges.end(); ++r) {
- if (r->contains(pkt->getAddr())) {
- in_range = true;
- break;
- }
- }
-
- // Note that some deferred snoops don't have requests, since the
- // original access may have already completed
- if ((pkt->req && pkt->req->isUncacheable()) ||
- pkt->cmd == MemCmd::Writeback || !in_range) {
- //Can't get a hit on an uncacheable address
- //Revisit this for multi level coherence
+ // no need to snoop writebacks or requests that are not in range
+ if (pkt->cmd == MemCmd::Writeback || !inRange(pkt->getAddr())) {
return;
}
@@ -1843,9 +1830,8 @@
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
- if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
- // Can't get a hit on an uncacheable address
- // Revisit this for multi level coherence
+ // no need to snoop writebacks or requests that are not in range
+ if (pkt->cmd == MemCmd::Writeback || !inRange(pkt->getAddr())) {
return 0;
}
diff -r e2a283400c43 -r 581fb2484bd6 src/mem/cache/mshr.cc
--- a/src/mem/cache/mshr.cc Tue May 05 03:22:27 2015 -0400
+++ b/src/mem/cache/mshr.cc Tue May 05 03:22:29 2015 -0400
@@ -371,6 +371,9 @@
if (isPendingDirty()) {
pkt->assertMemInhibit();
+ // in the case of an uncacheable request there is no need
+ // to set the exclusive flag, but since the recipient does
+ // not care there is no harm in doing so
pkt->setSupplyExclusive();
}
@@ -380,7 +383,7 @@
}
}
- if (!pkt->needsExclusive()) {
+ if (!pkt->needsExclusive() && !pkt->req->isUncacheable()) {
// This transaction will get a read-shared copy, downgrading
// our copy if we had an exclusive one
postDowngrade = true;
diff -r e2a283400c43 -r 581fb2484bd6 src/mem/coherent_xbar.cc
--- a/src/mem/coherent_xbar.cc Tue May 05 03:22:27 2015 -0400
+++ b/src/mem/coherent_xbar.cc Tue May 05 03:22:29 2015 -0400
@@ -180,8 +180,7 @@
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
- // uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (!system->bypassCaches()) {
// the packet is a memory-mapped request and should be
// broadcasted to our snoopers but the source
if (snoopFilter) {
@@ -213,8 +212,7 @@
// since it is a normal request, attempt to send the packet
bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
- if (snoopFilter && !pkt->req->isUncacheable()
- && !system->bypassCaches()) {
+ if (snoopFilter && !system->bypassCaches()) {
// The packet may already be overwritten by the sendTimingReq function.
// The snoop filter needs to see the original request *and* the return
// status of the send operation, so we need to recreate the original
@@ -323,7 +321,7 @@
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
- if (snoopFilter && !pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (snoopFilter && !system->bypassCaches()) {
// let the snoop filter inspect the response and update its state
snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
}
@@ -578,8 +576,7 @@
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
Tick snoop_response_latency = 0;
- // uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (!system->bypassCaches()) {
// forward to all snoopers but the source
std::pair<MemCmd, Tick> snoop_result;
if (snoopFilter) {
@@ -613,8 +610,7 @@
Tick response_latency = masterPorts[master_port_id]->sendAtomic(pkt);
// Lower levels have replied, tell the snoop filter
- if (snoopFilter && !pkt->req->isUncacheable() && !system->bypassCaches() &&
- pkt->isResponse()) {
+ if (snoopFilter && !system->bypassCaches() && pkt->isResponse()) {
snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
}
@@ -764,8 +760,7 @@
pkt->cmdString());
}
- // uncacheable requests need never be snooped
- if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
+ if (!system->bypassCaches()) {
// forward to all snoopers but the source
forwardFunctional(pkt, slave_port_id);
}
diff -r e2a283400c43 -r 581fb2484bd6 src/mem/snoop_filter.cc
--- a/src/mem/snoop_filter.cc Tue May 05 03:22:27 2015 -0400
+++ b/src/mem/snoop_filter.cc Tue May 05 03:22:29 2015 -0400
@@ -74,7 +74,7 @@
DPRINTF(SnoopFilter, "%s: SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
- if (cpkt->needsResponse()) {
+ if (!cpkt->req->isUncacheable() && cpkt->needsResponse()) {
if (!cpkt->memInhibitAsserted()) {
// Max one request per address per port
panic_if(sf_item.requested & req_port, "double request :( "\
@@ -104,6 +104,9 @@
DPRINTF(SnoopFilter, "%s: packet src %s addr 0x%x cmd %s\n",
__func__, slave_port.name(), cpkt->getAddr(), cpkt->cmdString());
+ if (cpkt->req->isUncacheable())
+ return;
+
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask req_port = portToMask(slave_port);
SnoopItem& sf_item = cachedLocations[line_addr];
@@ -195,14 +198,17 @@
__func__, rsp_port.name(), req_port.name(), cpkt->getAddr(),
cpkt->cmdString());
+ assert(cpkt->isResponse());
+ assert(cpkt->memInhibitAsserted());
+
+ if (cpkt->req->isUncacheable())
+ return;
+
Addr line_addr = cpkt->getAddr() & ~(linesize - 1);
SnoopMask rsp_mask = portToMask(rsp_port);
SnoopMask req_mask = portToMask(req_port);
SnoopItem& sf_item = cachedLocations[line_addr];
- assert(cpkt->isResponse());
- assert(cpkt->memInhibitAsserted());
-
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
@@ -270,12 +276,15 @@
DPRINTF(SnoopFilter, "%s: packet src %s addr 0x%x cmd %s\n",
__func__, slave_port.name(), cpkt->getAddr(), cpkt->cmdString());
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev