Gabe Black has submitted this change. ( https://gem5-review.googlesource.com/c/public/gem5/+/38385 )

Change subject: cpu: Style fixes in the trace CPU.
......................................................................

cpu: Style fixes in the trace CPU.

Change-Id: I3ef51aa8667926f3c4fab3c11e188102dd4bab3c
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/38385
Tested-by: kokoro <noreply+kok...@google.com>
Reviewed-by: Giacomo Travaglini <giacomo.travagl...@arm.com>
Maintainer: Bobby R. Bruce <bbr...@ucdavis.edu>
---
M src/cpu/trace/trace_cpu.cc
M src/cpu/trace/trace_cpu.hh
2 files changed, 183 insertions(+), 188 deletions(-)

Approvals:
  Giacomo Travaglini: Looks good to me, approved
  Bobby R. Bruce: Looks good to me, approved
  kokoro: Regressions pass



diff --git a/src/cpu/trace/trace_cpu.cc b/src/cpu/trace/trace_cpu.cc
index 9c39e59..c9b9944 100644
--- a/src/cpu/trace/trace_cpu.cc
+++ b/src/cpu/trace/trace_cpu.cc
@@ -67,21 +67,17 @@

     // Check that the python parameters for sizes of ROB, store buffer and
     // load buffer do not overflow the corresponding C++ variables.
-    fatal_if(params.sizeROB > UINT16_MAX, "ROB size set to %d exceeds the "
-                "max. value of %d.\n", params.sizeROB, UINT16_MAX);
-    fatal_if(params.sizeStoreBuffer > UINT16_MAX, "ROB size set to %d "
-                "exceeds the max. value of %d.\n", params.sizeROB,
-                UINT16_MAX);
-    fatal_if(params.sizeLoadBuffer > UINT16_MAX, "Load buffer size set to"
-                " %d exceeds the max. value of %d.\n",
+    fatal_if(params.sizeROB > UINT16_MAX,
+             "ROB size set to %d exceeds the max. value of %d.",
+             params.sizeROB, UINT16_MAX);
+    fatal_if(params.sizeStoreBuffer > UINT16_MAX,
+             "ROB size set to %d exceeds the max. value of %d.",
+             params.sizeROB, UINT16_MAX);
+    fatal_if(params.sizeLoadBuffer > UINT16_MAX,
+             "Load buffer size set to %d exceeds the max. value of %d.",
                 params.sizeLoadBuffer, UINT16_MAX);
 }

-TraceCPU::~TraceCPU()
-{
-
-}
-
 void
 TraceCPU::updateNumOps(uint64_t rob_num)
 {
@@ -104,8 +100,8 @@
 void
 TraceCPU::init()
 {
-    DPRINTF(TraceCPUInst, "Instruction fetch request trace file is \"%s\"."
-            "\n", instTraceFile);
+ DPRINTF(TraceCPUInst, "Instruction fetch request trace file is \"%s\".\n",
+            instTraceFile);
     DPRINTF(TraceCPUData, "Data memory request trace file is \"%s\".\n",
             dataTraceFile);

@@ -119,7 +115,7 @@

     // Set the trace offset as the minimum of that in both traces
     traceOffset = std::min(first_icache_tick, first_dcache_tick);
-    inform("%s: Time offset (tick) found as min of both traces is %lli.\n",
+    inform("%s: Time offset (tick) found as min of both traces is %lli.",
             name(), traceOffset);

     // Schedule next icache and dcache event by subtracting the offset
@@ -153,8 +149,9 @@
     bool sched_next = icacheGen.tryNext();
     // If packet sent successfully, schedule next event
     if (sched_next) {
-        DPRINTF(TraceCPUInst, "Scheduling next icacheGen event "
-                "at %d.\n", curTick() + icacheGen.tickDelta());
+        DPRINTF(TraceCPUInst,
+                "Scheduling next icacheGen event at %d.\n",
+                curTick() + icacheGen.tickDelta());
         schedule(icacheNextEvent, curTick() + icacheGen.tickDelta());
         ++traceStats.numSchedIcacheEvent;
     } else {
@@ -191,7 +188,7 @@
     } else {
         // Schedule event to indicate execution is complete as both
         // instruction and data access traces have been played back.
-        inform("%s: Execution complete.\n", name());
+        inform("%s: Execution complete.", name());
         // If the replay is configured to exit early, that is when any one
// execution is complete then exit immediately and return. Otherwise, // schedule the counted exit that counts down completion of each Trace
@@ -203,22 +200,25 @@
         }
     }
 }
- TraceCPU::TraceStats::TraceStats(TraceCPU *trace)
-    : Stats::Group(trace),
+
+TraceCPU::TraceStats::TraceStats(TraceCPU *trace) :
+    Stats::Group(trace),
     ADD_STAT(numSchedDcacheEvent,
-     "Number of events scheduled to trigger data request generator"),
+ "Number of events scheduled to trigger data request generator"),
     ADD_STAT(numSchedIcacheEvent,
- "Number of events scheduled to trigger instruction request generator"),
+            "Number of events scheduled to trigger instruction request "
+            "generator"),
     ADD_STAT(numOps, "Number of micro-ops simulated by the Trace CPU"),
     ADD_STAT(cpi, "Cycles per micro-op used as a proxy for CPI",
-     trace->baseStats.numCycles / numOps)
+            trace->baseStats.numCycles / numOps)
 {
-        cpi.precision(6);
+    cpi.precision(6);
 }
+
 TraceCPU::ElasticDataGen::
 ElasticDataGenStatGroup::ElasticDataGenStatGroup(Stats::Group *parent,
-                                                 const std::string& _name)
-    : Stats::Group(parent, _name.c_str()),
+ const std::string& _name) :
+    Stats::Group(parent, _name.c_str()),
     ADD_STAT(maxDependents, "Max number of dependents observed on a node"),
     ADD_STAT(maxReadyListSize, "Max size of the ready list observed"),
ADD_STAT(numSendAttempted, "Number of first attempts to send a request"),
@@ -238,15 +238,15 @@
     DPRINTF(TraceCPUData, "Initializing data memory request generator "
             "DcacheGen: elastic issue with retry.\n");

-    if (!readNextWindow())
- panic("Trace has %d elements. It must have at least %d elements.\n",
-              depGraph.size(), 2 * windowSize);
+    panic_if(!readNextWindow(),
+            "Trace has %d elements. It must have at least %d elements.",
+            depGraph.size(), 2 * windowSize);
     DPRINTF(TraceCPUData, "After 1st read, depGraph size:%d.\n",
             depGraph.size());

-    if (!readNextWindow())
- panic("Trace has %d elements. It must have at least %d elements.\n",
-              depGraph.size(), 2 * windowSize);
+    panic_if(!readNextWindow(),
+            "Trace has %d elements. It must have at least %d elements.",
+            depGraph.size(), 2 * windowSize);
     DPRINTF(TraceCPUData, "After 2st read, depGraph size:%d.\n",
             depGraph.size());

@@ -255,15 +255,17 @@
         printReadyList();
     }
     auto free_itr = readyList.begin();
- DPRINTF(TraceCPUData, "Execute tick of the first dependency free node %lli"
-            " is %d.\n", free_itr->seqNum, free_itr->execTick);
+    DPRINTF(TraceCPUData,
+            "Execute tick of the first dependency free node %lli is %d.\n",
+            free_itr->seqNum, free_itr->execTick);
     // Return the execute tick of the earliest ready node so that an event
     // can be scheduled to call execute()
     return (free_itr->execTick);
 }

 void
-TraceCPU::ElasticDataGen::adjustInitTraceOffset(Tick& offset) {
+TraceCPU::ElasticDataGen::adjustInitTraceOffset(Tick& offset)
+{
     for (auto& free_node : readyList) {
         free_node.execTick -= offset;
     }
@@ -278,7 +280,6 @@
 bool
 TraceCPU::ElasticDataGen::readNextWindow()
 {
-
     // Read and add next window
     DPRINTF(TraceCPUData, "Reading next window from file.\n");

@@ -328,9 +329,10 @@
     return true;
 }

-template<typename T> void
+template<typename T>
+void
 TraceCPU::ElasticDataGen::addDepsOnParent(GraphNode *new_node,
-                                            T& dep_array, uint8_t& num_dep)
+                                          T& dep_array, uint8_t& num_dep)
 {
     for (auto& a_dep : dep_array) {
// The convention is to set the dependencies starting with the first
@@ -380,8 +382,9 @@
     // then issue it, i.e. add the node to readyList.
     while (!depFreeQueue.empty()) {
         if (checkAndIssue(depFreeQueue.front(), false)) {
-            DPRINTF(TraceCPUData, "Removing from depFreeQueue: seq. num "
-                "%lli.\n", (depFreeQueue.front())->seqNum);
+            DPRINTF(TraceCPUData,
+                    "Removing from depFreeQueue: seq. num %lli.\n",
+                    (depFreeQueue.front())->seqNum);
             depFreeQueue.pop();
         } else {
             break;
@@ -434,8 +437,9 @@
         // are based on successful sending of the load as complete.
         if (node_ptr->isLoad() && !node_ptr->isStrictlyOrdered()) {
             // If execute succeeded mark its dependents as complete
-            DPRINTF(TraceCPUData, "Node seq. num %lli sent. Waking up "
-                    "dependents..\n", node_ptr->seqNum);
+            DPRINTF(TraceCPUData,
+                    "Node seq. num %lli sent. Waking up dependents..\n",
+                    node_ptr->seqNum);

             auto child_itr = (node_ptr->dependents).begin();
             while (child_itr != (node_ptr->dependents).end()) {
@@ -556,7 +560,6 @@
 PacketPtr
 TraceCPU::ElasticDataGen::executeMemReq(GraphNode* node_ptr)
 {
-
     DPRINTF(TraceCPUData, "Executing memory request %lli (phys addr %d, "
             "virt addr %d, pc %#x, size %d, flags %d).\n",
             node_ptr->seqNum, node_ptr->physAddr, node_ptr->virtAddr,
@@ -639,32 +642,32 @@
// If this is the first attempt, print a debug message to indicate this.
     if (first) {
DPRINTFR(TraceCPUData, "\t\tseq. num %lli(%s) with rob num %lli is now"
-            " dependency free.\n", node_ptr->seqNum, node_ptr->typeToStr(),
-            node_ptr->robNum);
+ " dependency free.\n", node_ptr->seqNum, node_ptr->typeToStr(),
+                node_ptr->robNum);
     }

     // Check if resources are available to issue the specific node
     if (hwResource.isAvailable(node_ptr)) {
         // If resources are free only then add to readyList
- DPRINTFR(TraceCPUData, "\t\tResources available for seq. num %lli. Adding"
-            " to readyList, occupying resources.\n", node_ptr->seqNum);
+ DPRINTFR(TraceCPUData, "\t\tResources available for seq. num %lli. "
+                "Adding to readyList, occupying resources.\n",
+                node_ptr->seqNum);
// Compute the execute tick by adding the compute delay for the node
         // and add the ready node to the ready list
         addToSortedReadyList(node_ptr->seqNum,
-                                owner.clockEdge() + node_ptr->compDelay);
+                             owner.clockEdge() + node_ptr->compDelay);
         // Account for the resources taken up by this issued node.
         hwResource.occupy(node_ptr);
         return true;
-
     } else {
         if (first) {
// Although dependencies are complete, resources are not available. - DPRINTFR(TraceCPUData, "\t\tResources unavailable for seq. num %lli."
-                " Adding to depFreeQueue.\n", node_ptr->seqNum);
+ DPRINTFR(TraceCPUData, "\t\tResources unavailable for seq. num "
+                    "%lli. Adding to depFreeQueue.\n", node_ptr->seqNum);
             depFreeQueue.push(node_ptr);
         } else {
- DPRINTFR(TraceCPUData, "\t\tResources unavailable for seq. num %lli. "
-                "Still pending issue.\n", node_ptr->seqNum);
+ DPRINTFR(TraceCPUData, "\t\tResources unavailable for seq. num "
+                    "%lli. Still pending issue.\n", node_ptr->seqNum);
         }
         return false;
     }
@@ -739,7 +742,7 @@

 void
 TraceCPU::ElasticDataGen::addToSortedReadyList(NodeSeqNum seq_num,
-                                                    Tick exec_tick)
+                                               Tick exec_tick)
 {
     ReadyNode ready_node;
     ready_node.seqNum = seq_num;
@@ -752,17 +755,19 @@
     // and return
     if (itr == readyList.end()) {
         readyList.insert(itr, ready_node);
-        elasticStats.maxReadyListSize = std::max<double>(readyList.size(),
- elasticStats.maxReadyListSize.value());
+        elasticStats.maxReadyListSize =
+            std::max<double>(readyList.size(),
+                             elasticStats.maxReadyListSize.value());
         return;
     }

// If the new node has its execution tick equal to the first node in the
     // list then go to the next node. If the first node in the list failed
     // to execute, its position as the first is thus maintained.
-    if (retryPkt)
+    if (retryPkt) {
         if (retryPkt->req->getReqInstSeqNum() == itr->seqNum)
             itr++;
+    }

// Increment the iterator and compare the node pointed to by it to the new
     // node till the position to insert the new node is found.
@@ -770,23 +775,24 @@
     while (!found && itr != readyList.end()) {
         // If the execution tick of the new node is less than the node then
         // this is the position to insert
-        if (exec_tick < itr->execTick)
+        if (exec_tick < itr->execTick) {
             found = true;
         // If the execution tick of the new node is equal to the node then
         // sort in ascending order of sequence numbers
-        else if (exec_tick == itr->execTick) {
+        } else if (exec_tick == itr->execTick) {
             // If the sequence number of the new node is less than the node
             // then this is the position to insert
-            if (seq_num < itr->seqNum)
+            if (seq_num < itr->seqNum) {
                 found = true;
             // Else go to next node
-            else
+            } else {
                 itr++;
-        }
- // If the execution tick of the new node is greater than the node then
-        // go to the next node
-        else
+            }
+        } else {
+ // If the execution tick of the new node is greater than the node
+            // then go to the next node.
             itr++;
+        }
     }
     readyList.insert(itr, ready_node);
     // Update the stat for max size reached of the readyList
@@ -795,8 +801,8 @@
 }

 void
-TraceCPU::ElasticDataGen::printReadyList() {
-
+TraceCPU::ElasticDataGen::printReadyList()
+{
     auto itr = readyList.begin();
     if (itr == readyList.end()) {
         DPRINTF(TraceCPUData, "readyList is empty.\n");
@@ -813,8 +819,8 @@
 }

 TraceCPU::ElasticDataGen::HardwareResource::HardwareResource(
-    uint16_t max_rob, uint16_t max_stores, uint16_t max_loads)
-  : sizeROB(max_rob),
+        uint16_t max_rob, uint16_t max_stores, uint16_t max_loads) :
+    sizeROB(max_rob),
     sizeStoreBuffer(max_stores),
     sizeLoadBuffer(max_loads),
     oldestInFlightRobNum(UINT64_MAX),
@@ -845,8 +851,9 @@
TraceCPU::ElasticDataGen::HardwareResource::release(const GraphNode* done_node)
 {
     assert(!inFlightNodes.empty());
- DPRINTFR(TraceCPUData, "\tClearing done seq. num %d from inFlightNodes..\n",
-        done_node->seqNum);
+    DPRINTFR(TraceCPUData,
+            "\tClearing done seq. num %d from inFlightNodes..\n",
+            done_node->seqNum);

     assert(inFlightNodes.find(done_node->seqNum) != inFlightNodes.end());
     inFlightNodes.erase(done_node->seqNum);
@@ -861,9 +868,10 @@
         oldestInFlightRobNum = inFlightNodes.begin()->second;
     }

-    DPRINTFR(TraceCPUData, "\tCleared. inFlightNodes.size() = %d, "
-        "oldestInFlightRobNum = %d\n", inFlightNodes.size(),
-        oldestInFlightRobNum);
+    DPRINTFR(TraceCPUData,
+            "\tCleared. inFlightNodes.size() = %d, "
+            "oldestInFlightRobNum = %d\n", inFlightNodes.size(),
+            oldestInFlightRobNum);

// A store is considered complete when a request is sent, thus ROB entry is // freed. But it occupies an entry in the Store Buffer until its response
@@ -891,21 +899,21 @@

 bool
 TraceCPU::ElasticDataGen::HardwareResource::isAvailable(
-    const GraphNode* new_node) const
+        const GraphNode* new_node) const
 {
     uint16_t num_in_flight_nodes;
     if (inFlightNodes.empty()) {
         num_in_flight_nodes = 0;
DPRINTFR(TraceCPUData, "\t\tChecking resources to issue seq. num %lli:"
-            " #in-flight nodes = 0", new_node->seqNum);
+                " #in-flight nodes = 0", new_node->seqNum);
     } else if (new_node->robNum > oldestInFlightRobNum) {
         // This is the intuitive case where new dep-free node is younger
// instruction than the oldest instruction in-flight. Thus we make sure
         // in_flight_nodes does not overflow.
         num_in_flight_nodes = new_node->robNum - oldestInFlightRobNum;
DPRINTFR(TraceCPUData, "\t\tChecking resources to issue seq. num %lli:"
-            " #in-flight nodes = %d - %d =  %d", new_node->seqNum,
-             new_node->robNum, oldestInFlightRobNum, num_in_flight_nodes);
+                " #in-flight nodes = %d - %d =  %d", new_node->seqNum,
+ new_node->robNum, oldestInFlightRobNum, num_in_flight_nodes);
     } else {
         // This is the case where an instruction older than the oldest in-
         // flight instruction becomes dep-free. Thus we must have already
@@ -914,12 +922,12 @@
         // be updated in occupy(). We simply let this node issue now.
         num_in_flight_nodes = 0;
DPRINTFR(TraceCPUData, "\t\tChecking resources to issue seq. num %lli:"
-            " new oldestInFlightRobNum = %d, #in-flight nodes ignored",
-            new_node->seqNum, new_node->robNum);
+                " new oldestInFlightRobNum = %d, #in-flight nodes ignored",
+                new_node->seqNum, new_node->robNum);
     }
     DPRINTFR(TraceCPUData, ", LQ = %d/%d, SQ  = %d/%d.\n",
-        numInFlightLoads, sizeLoadBuffer,
-        numInFlightStores, sizeStoreBuffer);
+            numInFlightLoads, sizeLoadBuffer,
+            numInFlightStores, sizeStoreBuffer);
     // Check if resources are available to issue the specific node
     if (num_in_flight_nodes >= sizeROB) {
         return false;
@@ -934,23 +942,25 @@
 }

 bool
-TraceCPU::ElasticDataGen::HardwareResource::awaitingResponse() const {
+TraceCPU::ElasticDataGen::HardwareResource::awaitingResponse() const
+{
     // Return true if there is at least one read or write request in flight
     return (numInFlightStores != 0 || numInFlightLoads != 0);
 }

 void
-TraceCPU::ElasticDataGen::HardwareResource::printOccupancy() {
+TraceCPU::ElasticDataGen::HardwareResource::printOccupancy()
+{
     DPRINTFR(TraceCPUData, "oldestInFlightRobNum = %d, "
             "LQ = %d/%d, SQ  = %d/%d.\n",
             oldestInFlightRobNum,
             numInFlightLoads, sizeLoadBuffer,
             numInFlightStores, sizeStoreBuffer);
 }
-TraceCPU::FixedRetryGen::
-FixedRetryGenStatGroup::FixedRetryGenStatGroup(Stats::Group *parent,
-                                               const std::string& _name)
-    : Stats::Group(parent, _name.c_str()),
+
+TraceCPU::FixedRetryGen::FixedRetryGenStatGroup::FixedRetryGenStatGroup(
+        Stats::Group *parent, const std::string& _name) :
+    Stats::Group(parent, _name.c_str()),
ADD_STAT(numSendAttempted, "Number of first attempts to send a request"),
     ADD_STAT(numSendSucceeded, "Number of successful first attempts"),
     ADD_STAT(numSendFailed, "Number of failed first attempts"),
@@ -980,7 +990,6 @@
 {
     // If there is a retry packet, try to send it
     if (retryPkt) {
-
         DPRINTF(TraceCPUInst, "Trying to send retry packet.\n");

         if (!port.sendTimingReq(retryPkt)) {
@@ -990,7 +999,6 @@
         }
         ++fixedStats.numRetrySucceeded;
     } else {
-
         DPRINTF(TraceCPUInst, "Trying to send packet for currElement.\n");

         // try sending current element
@@ -1063,7 +1071,7 @@

 bool
 TraceCPU::FixedRetryGen::send(Addr addr, unsigned size, const MemCmd& cmd,
-              Request::FlagsType flags, Addr pc)
+        Request::FlagsType flags, Addr pc)
 {

     // Create new request
@@ -1172,11 +1180,10 @@
 }

 TraceCPU::ElasticDataGen::InputStream::InputStream(
-    const std::string& filename,
-    const double time_multiplier)
-    : trace(filename),
-      timeMultiplier(time_multiplier),
-      microOpCount(0)
+        const std::string& filename, const double time_multiplier) :
+    trace(filename),
+    timeMultiplier(time_multiplier),
+    microOpCount(0)
 {
     // Create a protobuf message for the header and read it from the stream
     ProtoMessage::InstDepRecordHeader header_msg;
@@ -1284,8 +1291,8 @@
             own_reg_dep = 0;
             assert(numRegDep > 0);
             --numRegDep;
- DPRINTFR(TraceCPUData, "\tFor %lli: Marking register dependency %lli "
-                    "done.\n", seqNum, reg_dep);
+ DPRINTFR(TraceCPUData, "\tFor %lli: Marking register dependency "
+                    "%lli done.\n", seqNum, reg_dep);
             return true;
         }
     }
@@ -1303,8 +1310,9 @@
             own_rob_dep = 0;
             assert(numRobDep > 0);
             --numRobDep;
- DPRINTFR(TraceCPUData, "\tFor %lli: Marking ROB dependency %lli "
-                "done.\n", seqNum, rob_dep);
+            DPRINTFR(TraceCPUData,
+                    "\tFor %lli: Marking ROB dependency %lli done.\n",
+                    seqNum, rob_dep);
             return true;
         }
     }
@@ -1312,7 +1320,8 @@
 }

 void
-TraceCPU::ElasticDataGen::GraphNode::clearRegDep() {
+TraceCPU::ElasticDataGen::GraphNode::clearRegDep()
+{
     for (auto& own_reg_dep : regDep) {
         own_reg_dep = 0;
     }
@@ -1320,7 +1329,8 @@
 }

 void
-TraceCPU::ElasticDataGen::GraphNode::clearRobDep() {
+TraceCPU::ElasticDataGen::GraphNode::clearRobDep()
+{
     for (auto& own_rob_dep : robDep) {
         own_rob_dep = 0;
     }
diff --git a/src/cpu/trace/trace_cpu.hh b/src/cpu/trace/trace_cpu.hh
index 4fb72d2..d0f0e47 100644
--- a/src/cpu/trace/trace_cpu.hh
+++ b/src/cpu/trace/trace_cpu.hh
@@ -142,7 +142,6 @@

   public:
     TraceCPU(const TraceCPUParams &params);
-    ~TraceCPU();

     void init();

@@ -153,10 +152,7 @@
      *
      * @return 0
      */
-    Counter totalInsts() const
-    {
-        return 0;
-    }
+    Counter totalInsts() const { return 0; }

     /**
      * Return totalOps as the number of committed micro-ops plus the
@@ -164,10 +160,7 @@
      *
      * @return number of micro-ops i.e. nodes in the elastic data generator
      */
-    Counter totalOps() const
-    {
-        return traceStats.numOps.value();
-    }
+    Counter totalOps() const { return traceStats.numOps.value(); }

     /*
      * Set the no. of ops when elastic data generator completes executing a
@@ -176,10 +169,7 @@
     void updateNumOps(uint64_t rob_num);

     /* Pure virtual function in BaseCPU. Do nothing. */
-    void wakeup(ThreadID tid = 0)
-    {
-        return;
-    }
+    void wakeup(ThreadID tid=0) { return; }

     /*
* When resuming from checkpoint in FS mode, the TraceCPU takes over from
@@ -225,10 +215,9 @@
     {
       public:
         /** Default constructor. */
-        IcachePort(TraceCPU* _cpu)
-            : RequestPort(_cpu->name() + ".icache_port", _cpu),
-                         owner(_cpu)
-        { }
+        IcachePort(TraceCPU* _cpu) :
+            RequestPort(_cpu->name() + ".icache_port", _cpu), owner(_cpu)
+        {}

       public:
         /**
@@ -246,7 +235,7 @@
          *
          * @param pkt Pointer to packet received
          */
-        void recvTimingSnoopReq(PacketPtr pkt) { }
+        void recvTimingSnoopReq(PacketPtr pkt) {}

         /**
* Handle a retry signalled by the cache if instruction read failed in
@@ -266,10 +255,9 @@

       public:
         /** Default constructor. */
-        DcachePort(TraceCPU* _cpu)
-            : RequestPort(_cpu->name() + ".dcache_port", _cpu),
-                         owner(_cpu)
-        { }
+        DcachePort(TraceCPU* _cpu) :
+            RequestPort(_cpu->name() + ".dcache_port", _cpu), owner(_cpu)
+        {}

       public:

@@ -287,16 +275,14 @@
          *
          * @param pkt Pointer to packet received
          */
-        void recvTimingSnoopReq(PacketPtr pkt)
-        { }
+        void recvTimingSnoopReq(PacketPtr pkt) {}

         /**
          * Required functionally but do nothing.
          *
          * @param pkt Pointer to packet received
          */
-        void recvFunctionalSnoop(PacketPtr pkt)
-        { }
+        void recvFunctionalSnoop(PacketPtr pkt) {}

         /**
* Handle a retry signalled by the cache if data access failed in the
@@ -344,7 +330,8 @@
         /**
          * This struct stores a line in the trace file.
          */
-        struct TraceElement {
+        struct TraceElement
+        {

             /** Specifies if the request is to be a read or a write */
             MemCmd cmd;
@@ -369,16 +356,12 @@
              *
              * @return if this element is valid
              */
-            bool isValid() const {
-                return cmd != MemCmd::InvalidCmd;
-            }
+            bool isValid() const { return cmd != MemCmd::InvalidCmd; }

             /**
              * Make this element invalid.
              */
-            void clear() {
-                cmd = MemCmd::InvalidCmd;
-            }
+            void clear() { cmd = MemCmd::InvalidCmd; }
         };

         /**
@@ -388,14 +371,11 @@
          */
         class InputStream
         {
-
           private:
-
             // Input file stream for the protobuf trace
             ProtoInputStream trace;

           public:
-
             /**
              * Create a trace input stream for a given file name.
              *
@@ -420,19 +400,19 @@
             bool read(TraceElement* element);
         };

-        public:
+      public:
         /* Constructor */
         FixedRetryGen(TraceCPU& _owner, const std::string& _name,
                    RequestPort& _port, RequestorID requestor_id,
-                   const std::string& trace_file)
-            : owner(_owner),
-              port(_port),
-              requestorId(requestor_id),
-              trace(trace_file),
-              genName(owner.name() + ".fixedretry." + _name),
-              retryPkt(nullptr),
-              delta(0),
-              traceComplete(false), fixedStats(&_owner, _name)
+                   const std::string& trace_file) :
+            owner(_owner),
+            port(_port),
+            requestorId(requestor_id),
+            trace(trace_file),
+            genName(owner.name() + ".fixedretry." + _name),
+            retryPkt(nullptr),
+            delta(0),
+            traceComplete(false), fixedStats(&_owner, _name)
         {
         }

@@ -493,9 +473,7 @@

         int64_t tickDelta() { return delta; }

-
       private:
-
         /** Reference of the TraceCPU. */
         TraceCPU& owner;

@@ -558,9 +536,7 @@
      */
     class ElasticDataGen
     {
-
       private:
-
         /** Node sequence number type. */
         typedef uint64_t NodeSeqNum;

@@ -576,8 +552,8 @@
          * the execution and this struct is used to encapsulate the request
          * data as well as pointers to its dependent GraphNodes.
          */
-        class GraphNode {
-
+        class GraphNode
+        {
           public:
             /**
              * The maximum no. of ROB dependencies. There can be at most 2
@@ -598,7 +574,10 @@
             /** ROB occupancy number */
             NodeRobNum robNum;

- /** Type of the node corresponding to the instruction modelled by it */
+           /**
+            * Type of the node corresponding to the instruction modeled by
+            * it.
+            */
             RecordType type;

             /** The address for the request if any */
@@ -666,7 +645,9 @@
             bool removeDepOnInst(NodeSeqNum done_seq_num);

/** Return true if node has a request which is strictly ordered */
-            bool isStrictlyOrdered() const {
+            bool
+            isStrictlyOrdered() const
+            {
                 return (flags.isSet(Request::STRICT_ORDER));
             }
             /**
@@ -741,25 +722,25 @@
              */
             bool awaitingResponse() const;

-            /** Print resource occupancy for debugging */
+            /** Print resource occupancy for debugging. */
             void printOccupancy();

           private:
             /**
- * The size of the ROB used to throttle the max. number of in-flight
-             * nodes.
+             * The size of the ROB used to throttle the max. number of
+             * in-flight nodes.
              */
             const uint16_t sizeROB;

             /**
- * The size of store buffer. This is used to throttle the max. number
-             * of in-flight stores.
+             * The size of store buffer. This is used to throttle the max.
+             * number of in-flight stores.
              */
             const uint16_t sizeStoreBuffer;

             /**
- * The size of load buffer. This is used to throttle the max. number
-             * of in-flight loads.
+             * The size of load buffer. This is used to throttle the max.
+             * number of in-flight loads.
              */
             const uint16_t sizeLoadBuffer;

@@ -778,10 +759,14 @@
             /** The ROB number of the oldest in-flight node */
             NodeRobNum oldestInFlightRobNum;

- /** Number of ready loads for which request may or may not be sent */
+            /** Number of ready loads for which request may or may not be
+             * sent.
+             */
             uint16_t numInFlightLoads;

- /** Number of ready stores for which request may or may not be sent */
+            /** Number of ready stores for which request may or may not be
+             * sent.
+             */
             uint16_t numInFlightStores;
         };

@@ -792,9 +777,7 @@
          */
         class InputStream
         {
-
           private:
-
             /** Input file stream for the protobuf trace */
             ProtoInputStream trace;

@@ -814,8 +797,8 @@
              * trace and used to process the dependency trace
              */
             uint32_t windowSize;
-          public:

+          public:
             /**
              * Create a trace input stream for a given file name.
              *
@@ -853,19 +836,20 @@
         /* Constructor */
         ElasticDataGen(TraceCPU& _owner, const std::string& _name,
                    RequestPort& _port, RequestorID requestor_id,
- const std::string& trace_file, const TraceCPUParams &params)
-            : owner(_owner),
-              port(_port),
-              requestorId(requestor_id),
-              trace(trace_file, 1.0 / params.freqMultiplier),
-              genName(owner.name() + ".elastic." + _name),
-              retryPkt(nullptr),
-              traceComplete(false),
-              nextRead(false),
-              execComplete(false),
-              windowSize(trace.getWindowSize()),
-              hwResource(params.sizeROB, params.sizeStoreBuffer,
- params.sizeLoadBuffer), elasticStats(&_owner, _name)
+                   const std::string& trace_file,
+                   const TraceCPUParams &params) :
+            owner(_owner),
+            port(_port),
+            requestorId(requestor_id),
+            trace(trace_file, 1.0 / params.freqMultiplier),
+            genName(owner.name() + ".elastic." + _name),
+            retryPkt(nullptr),
+            traceComplete(false),
+            nextRead(false),
+            execComplete(false),
+            windowSize(trace.getWindowSize()),
+            hwResource(params.sizeROB, params.sizeStoreBuffer,
+                       params.sizeLoadBuffer), elasticStats(&_owner, _name)
         {
             DPRINTF(TraceCPUData, "Window size in the trace is %d.\n",
                     windowSize);
@@ -912,9 +896,9 @@
          * @param   num_dep     the number of dependencies set in the array
          *                      which may get modified during iteration
          */
-        template<typename T> void addDepsOnParent(GraphNode *new_node,
-                                                    T& dep_array,
-                                                    uint8_t& num_dep);
+        template<typename T>
+        void addDepsOnParent(GraphNode *new_node, T& dep_array,
+                             uint8_t& num_dep);

         /**
          * This is the main execute function which consumes nodes from the
@@ -976,14 +960,12 @@
* @param first true if this is the first attempt to issue this node
          * @return true if node was added to readyList
          */
-        bool checkAndIssue(const GraphNode* node_ptr, bool first = true);
+        bool checkAndIssue(const GraphNode* node_ptr, bool first=true);

         /** Get number of micro-ops modelled in the TraceCPU replay */
uint64_t getMicroOpCount() const { return trace.getMicroOpCount(); }

-
       private:
-
         /** Reference of the TraceCPU. */
         TraceCPU& owner;

@@ -1092,7 +1074,10 @@
     /** Event for the control flow method schedDcacheNext() */
     EventFunctionWrapper dcacheNextEvent;

- /** This is called when either generator finishes executing from the trace */
+    /**
+     * This is called when either generator finishes executing from the
+     * trace.
+     */
     void checkAndSchedExitEvent();

/** Set to true when one of the generators finishes replaying its trace. */



The change was submitted with unreviewed changes in the following files:

--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/38385
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I3ef51aa8667926f3c4fab3c11e188102dd4bab3c
Gerrit-Change-Number: 38385
Gerrit-PatchSet: 7
Gerrit-Owner: Gabe Black <gabe.bl...@gmail.com>
Gerrit-Reviewer: Andreas Sandberg <andreas.sandb...@arm.com>
Gerrit-Reviewer: Bobby R. Bruce <bbr...@ucdavis.edu>
Gerrit-Reviewer: Gabe Black <gabe.bl...@gmail.com>
Gerrit-Reviewer: Giacomo Travaglini <giacomo.travagl...@arm.com>
Gerrit-Reviewer: Jason Lowe-Power <ja...@lowepower.com>
Gerrit-Reviewer: kokoro <noreply+kok...@google.com>
Gerrit-MessageType: merged
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s

Reply via email to