Gabe Black has uploaded this change for review. ( https://gem5-review.googlesource.com/c/public/gem5/+/36976 )

Change subject: cpu: Style fixes in the AtomicSimpleCPU.
......................................................................

cpu: Style fixes in the AtomicSimpleCPU.

Change-Id: I42391e5a75c55022077f1ef78df97c54fa70f198
---
M src/cpu/simple/atomic.cc
M src/cpu/simple/atomic.hh
2 files changed, 53 insertions(+), 73 deletions(-)



diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 70162c9..8658353 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -95,9 +95,8 @@

 AtomicSimpleCPU::~AtomicSimpleCPU()
 {
-    if (tickEvent.scheduled()) {
+    if (tickEvent.scheduled())
         deschedule(tickEvent);
-    }
 }

 DrainState
@@ -130,9 +129,8 @@

     for (ThreadID tid = 0; tid < numThreads; tid++) {
         if (tid != sender) {
-            if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+            if (getCpuAddrMonitor(tid)->doMonitor(pkt))
                 wakeup(tid);
-            }

             TheISA::handleLockedSnoop(threadInfo[tid]->thread,
                                       pkt, dcachePort.cacheBlockMask);
@@ -161,9 +159,8 @@
             _status = BaseSimpleCPU::Running;

             // Tick if any threads active
-            if (!tickEvent.scheduled()) {
+            if (!tickEvent.scheduled())
                 schedule(tickEvent, nextCycle());
-            }
         } else {
             threadInfo[tid]->notIdleFraction = 0;
         }
@@ -213,10 +210,9 @@
 void
 AtomicSimpleCPU::verifyMemoryMode() const
 {
-    if (!system->isAtomicMode()) {
-        fatal("The atomic CPU requires the memory system to be in "
-              "'atomic' mode.\n");
-    }
+    fatal_if(!system->isAtomicMode(),
+            "The atomic CPU requires the memory system to be in "
+              "'atomic' mode.");
 }

 void
@@ -236,8 +232,8 @@
         schedule(tickEvent, clockEdge(Cycles(0)));
     }
     _status = BaseSimpleCPU::Running;
-    if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
-        == activeThreads.end()) {
+ if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
+        activeThreads.end()) {
         activeThreads.push_back(thread_num);
     }

@@ -263,9 +259,8 @@
     if (activeThreads.empty()) {
         _status = Idle;

-        if (tickEvent.scheduled()) {
+        if (tickEvent.scheduled())
             deschedule(tickEvent);
-        }
     }

     BaseCPU::suspendContext(thread_num);
@@ -287,9 +282,8 @@
     AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);

     for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
-        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt))
             cpu->wakeup(tid);
-        }
     }

     // if snoop invalidates, release any associated locks
@@ -299,9 +293,8 @@
     if (pkt->isInvalidate() || pkt->isWrite()) {
         DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
                 pkt->getAddr());
-        for (auto &t_info : cpu->threadInfo) {
+        for (auto &t_info : cpu->threadInfo)
             TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
-        }
     }

     return 0;
@@ -316,18 +309,16 @@
     // X86 ISA: Snooping an invalidation for monitor/mwait
     AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
     for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
-        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt))
             cpu->wakeup(tid);
-        }
     }

     // if snoop invalidates, release any associated locks
     if (pkt->isInvalidate()) {
         DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
                 pkt->getAddr());
-        for (auto &t_info : cpu->threadInfo) {
+        for (auto &t_info : cpu->threadInfo)
             TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
-        }
     }
 }

@@ -342,7 +333,7 @@

     frag_size = std::min(
         cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
-        (Addr) size_left);
+        (Addr)size_left);
     size_left -= frag_size;

     // Set up byte-enable mask for the current fragment
@@ -360,12 +351,12 @@
 }

 Fault
-AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
+AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
                          Request::Flags flags,
                          const std::vector<bool>& byte_enable)
 {
-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;

     // use the CPU's statically allocated read request and packet objects
     const RequestPtr &req = data_read_req;
@@ -399,28 +390,21 @@
             Packet pkt(req, Packet::makeReadCmd(req));
             pkt.dataStatic(data);

-            if (req->isLocalAccess()) {
+            if (req->isLocalAccess())
dcache_latency += req->localAccessor(thread->getTC(), &pkt);
-            } else {
+            else
                 dcache_latency += sendPacket(dcachePort, &pkt);
-            }
             dcache_access = true;

             assert(!pkt.isError());

-            if (req->isLLSC()) {
+            if (req->isLLSC())
                 TheISA::handleLockedRead(thread, req);
-            }
         }

         //If there's a fault, return it
-        if (fault != NoFault) {
-            if (req->isPrefetch()) {
-                return NoFault;
-            } else {
-                return fault;
-            }
-        }
+        if (fault != NoFault)
+            return req->isPrefetch() ? NoFault : fault;

         // If we don't need to access further cache lines, stop now.
         if (size_left == 0) {
@@ -447,7 +431,7 @@
                           const std::vector<bool>& byte_enable)
 {
     SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleThread *thread = t_info.thread;
     static uint8_t zero_array[64] = {};

     if (data == NULL) {
@@ -522,25 +506,19 @@
                 }
             }

-            if (res && !req->isSwap()) {
+            if (res && !req->isSwap())
                 *res = req->getExtraData();
-            }
         }

//If there's a fault or we don't need to access a second cache line,
         //stop now.
-        if (fault != NoFault || size_left == 0)
-        {
+        if (fault != NoFault || size_left == 0) {
             if (req->isLockedRMW() && fault == NoFault) {
                 assert(!req->isMasked());
                 locked = false;
             }

-            if (fault != NoFault && req->isPrefetch()) {
-                return NoFault;
-            } else {
-                return fault;
-            }
+            return req->isPrefetch() ? NoFault : fault;
         }

         /*
@@ -559,8 +537,8 @@
 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
                         Request::Flags flags, AtomicOpFunctorPtr amo_op)
 {
-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;

     // use the CPU's statically allocated amo request and packet objects
     const RequestPtr &req = data_amo_req;
@@ -579,9 +557,8 @@
     // accesses that cross cache-line boundaries, the cache needs to be
     // modified to support locking both cache lines to guarantee the
     // atomicity.
-    if (secondAddr > addr) {
- panic("AMO request should not access across a cache line boundary\n");
-    }
+    panic_if(secondAddr > addr,
+        "AMO request should not access across a cache line boundary.");

     dcache_latency = 0;

@@ -600,9 +577,9 @@
         Packet pkt(req, Packet::makeWriteCmd(req));
         pkt.dataStatic(data);

-        if (req->isLocalAccess())
+        if (req->isLocalAccess()) {
             dcache_latency += req->localAccessor(thread->getTC(), &pkt);
-        else {
+        } else {
             dcache_latency += sendPacket(dcachePort, &pkt);
         }

@@ -612,9 +589,8 @@
         assert(!req->isLLSC());
     }

-    if (fault != NoFault && req->isPrefetch()) {
+    if (fault != NoFault && req->isPrefetch())
         return NoFault;
-    }

     //If there's a fault and we're not doing prefetch, return it
     return fault;
@@ -628,7 +604,7 @@
     // Change thread if multi-threaded
     swapActiveThread();

-    // Set memroy request ids to current thread
+    // Set memory request ids to current thread
     if (numThreads > 1) {
         ContextID cid = threadContexts[curThread]->contextId();

@@ -638,8 +614,8 @@
         data_amo_req->setContext(cid);
     }

-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;

     Tick latency = 0;

@@ -692,8 +668,8 @@

                     assert(!ifetch_pkt.isError());

- // ifetch_req is initialized to read the instruction directly
-                    // into the CPU object's inst field.
+                    // ifetch_req is initialized to read the instruction
+                    // directly into the CPU object's inst field.
                 //}
             }

@@ -724,8 +700,9 @@

             // @todo remove me after debugging with legion done
             if (curStaticInst && (!curStaticInst->isMicroop() ||
-                        curStaticInst->isFirstMicroop()))
+                        curStaticInst->isFirstMicroop())) {
                 instCnt++;
+            }

             if (simulate_inst_stalls && icache_access)
                 stall_ticks += icache_latency;
diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh
index 26f4c0c..7c7269a 100644
--- a/src/cpu/simple/atomic.hh
+++ b/src/cpu/simple/atomic.hh
@@ -86,12 +86,12 @@
      * <li>Stay at PC is true.
      * </ul>
      */
-    bool isCpuDrained() const {
+    bool
+    isCpuDrained() const
+    {
         SimpleExecContext &t_info = *threadInfo[curThread];
-
         return t_info.thread->microPC() == 0 &&
-            !locked &&
-            !t_info.stayAtPC;
+            !locked && !t_info.stayAtPC;
     }

     /**
@@ -120,13 +120,14 @@

       protected:

-        bool recvTimingResp(PacketPtr pkt)
+        bool
+        recvTimingResp(PacketPtr pkt)
         {
             panic("Atomic CPU doesn't expect recvTimingResp!\n");
-            return true;
         }

-        void recvReqRetry()
+        void
+        recvReqRetry()
         {
             panic("Atomic CPU doesn't expect recvRetry!\n");
         }
@@ -219,13 +220,15 @@
const std::vector<bool>& byte_enable = std::vector<bool>())
         override;

-    Fault initiateHtmCmd(Request::Flags flags) override
+    Fault
+    initiateHtmCmd(Request::Flags flags) override
     {
         panic("initiateHtmCmd() is for timing accesses, and should "
               "never be called on AtomicSimpleCPU.\n");
     }

-    void htmSendAbortSignal(HtmFailureFaultCause cause) override
+    void
+    htmSendAbortSignal(HtmFailureFaultCause cause) override
     {
         panic("htmSendAbortSignal() is for timing accesses, and should "
               "never be called on AtomicSimpleCPU.\n");
@@ -233,7 +236,7 @@

     Fault writeMem(uint8_t *data, unsigned size,
                    Addr addr, Request::Flags flags, uint64_t *res,
- const std::vector<bool>& byte_enable = std::vector<bool>()) + const std::vector<bool>& byte_enable=std::vector<bool>())
         override;

     Fault amoMem(Addr addr, uint8_t* data, unsigned size,

--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/36976
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I42391e5a75c55022077f1ef78df97c54fa70f198
Gerrit-Change-Number: 36976
Gerrit-PatchSet: 1
Gerrit-Owner: Gabe Black <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- [email protected]
To unsubscribe send an email to [email protected]
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s

Reply via email to