changeset f3facce3d2b4 in /z/repo/gem5
details: http://repo.gem5.org/gem5?cmd=changeset;node=f3facce3d2b4
description:
mem: Use STL deque in favour of list for DRAM queues
This patch changes the data structure used for the DRAM read, write
and response queues from an STL list to deque. This optimisation is
based on the observation that the size is small (and fixed), and that
the structures are frequently iterated over in a linear fashion.
diffstat:
src/mem/simple_dram.cc | 19 +++++++------------
src/mem/simple_dram.hh | 6 +++---
2 files changed, 10 insertions(+), 15 deletions(-)
diffs (87 lines):
diff -r eaf87dfcdbb9 -r f3facce3d2b4 src/mem/simple_dram.cc
--- a/src/mem/simple_dram.cc Mon Aug 19 03:52:31 2013 -0400
+++ b/src/mem/simple_dram.cc Mon Aug 19 03:52:32 2013 -0400
@@ -310,8 +310,7 @@
// First check write buffer to see if the data is already at
// the controller
bool foundInWrQ = false;
- list<DRAMPacket*>::const_iterator i;
- for (i = writeQueue.begin(); i != writeQueue.end(); ++i) {
+ for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) {
// check if the read is subsumed in the write entry we are
// looking at
if ((*i)->addr <= addr &&
@@ -662,19 +661,16 @@
void
SimpleDRAM::printQs() const {
-
- list<DRAMPacket*>::const_iterator i;
-
DPRINTF(DRAM, "===READ QUEUE===\n\n");
- for (i = readQueue.begin() ; i != readQueue.end() ; ++i) {
+ for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) {
DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
}
DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
- for (i = respQueue.begin() ; i != respQueue.end() ; ++i) {
+ for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) {
DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
}
DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
- for (i = writeQueue.begin() ; i != writeQueue.end() ; ++i) {
+ for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) {
DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
}
}
@@ -829,7 +825,7 @@
if (memSchedPolicy == Enums::fcfs) {
// Do nothing, since the correct request is already head
} else if (memSchedPolicy == Enums::frfcfs) {
- list<DRAMPacket*>::iterator i = writeQueue.begin();
+ auto i = writeQueue.begin();
bool foundRowHit = false;
while (!foundRowHit && i != writeQueue.end()) {
DRAMPacket* dram_pkt = *i;
@@ -870,8 +866,7 @@
// Do nothing, since the request to serve is already the first
// one in the read queue
} else if (memSchedPolicy == Enums::frfcfs) {
- for (list<DRAMPacket*>::iterator i = readQueue.begin();
- i != readQueue.end() ; ++i) {
+ for (auto i = readQueue.begin(); i != readQueue.end() ; ++i) {
DRAMPacket* dram_pkt = *i;
const Bank& bank = dram_pkt->bank_ref;
// Check if it is a row hit
@@ -1153,7 +1148,7 @@
schedule(respondEvent, dram_pkt->readyTime);
} else {
bool done = false;
- list<DRAMPacket*>::iterator i = respQueue.begin();
+ auto i = respQueue.begin();
while (!done && i != respQueue.end()) {
if ((*i)->readyTime > dram_pkt->readyTime) {
respQueue.insert(i, dram_pkt);
diff -r eaf87dfcdbb9 -r f3facce3d2b4 src/mem/simple_dram.hh
--- a/src/mem/simple_dram.hh Mon Aug 19 03:52:31 2013 -0400
+++ b/src/mem/simple_dram.hh Mon Aug 19 03:52:32 2013 -0400
@@ -406,8 +406,8 @@
/**
* The controller's main read and write queues
*/
- std::list<DRAMPacket*> readQueue;
- std::list<DRAMPacket*> writeQueue;
+ std::deque<DRAMPacket*> readQueue;
+ std::deque<DRAMPacket*> writeQueue;
/**
* Response queue where read packets wait after we're done working
@@ -417,7 +417,7 @@
* as sizing the read queue, this and the main read queue need to
* be added together.
*/
- std::list<DRAMPacket*> respQueue;
+ std::deque<DRAMPacket*> respQueue;
/**
* If we need to drain, keep the drain manager around until we're
_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev