changeset a8d64545cda6 in /z/repo/m5
details: http://repo.m5sim.org/m5?cmd=changeset;node=a8d64545cda6
description:
        This patch supports cache flushing in MOESI_hammer

diffstat:

 configs/example/ruby_random_test.py    |    9 +-
 src/cpu/testers/rubytest/Check.cc      |   35 +++
 src/cpu/testers/rubytest/Check.hh      |    1 +
 src/cpu/testers/rubytest/RubyTester.cc |    3 +-
 src/cpu/testers/rubytest/RubyTester.hh |    2 +
 src/cpu/testers/rubytest/RubyTester.py |    1 +
 src/mem/packet.cc                      |    4 +-
 src/mem/packet.hh                      |    4 +
 src/mem/protocol/MOESI_hammer-cache.sm |  312 +++++++++++++++++++++++++++++++-
 src/mem/protocol/MOESI_hammer-dir.sm   |  135 +++++++++++++-
 src/mem/protocol/MOESI_hammer-msg.sm   |    3 +
 src/mem/protocol/RubySlicc_Exports.sm  |    1 +
 src/mem/ruby/system/RubyPort.cc        |   14 +-
 src/mem/ruby/system/Sequencer.cc       |   12 +-
 14 files changed, 508 insertions(+), 28 deletions(-)

diffs (truncated from 1084 to 300 lines):

diff -r 1333bd6cc2eb -r a8d64545cda6 configs/example/ruby_random_test.py
--- a/configs/example/ruby_random_test.py       Mon Mar 28 10:49:36 2011 -0500
+++ b/configs/example/ruby_random_test.py       Mon Mar 28 10:49:45 2011 -0500
@@ -82,7 +82,14 @@
 #
 # Create the ruby random tester
 #
-tester = RubyTester(checks_to_complete = options.checks,
+
+# Check the protocol
+check_flush = False
+if buildEnv['PROTOCOL'] == 'MOESI_hammer':
+    check_flush = True
+
+tester = RubyTester(check_flush = check_flush,
+                    checks_to_complete = options.checks,
                     wakeup_frequency = options.wakeup_freq)
 
 #
diff -r 1333bd6cc2eb -r a8d64545cda6 src/cpu/testers/rubytest/Check.cc
--- a/src/cpu/testers/rubytest/Check.cc Mon Mar 28 10:49:36 2011 -0500
+++ b/src/cpu/testers/rubytest/Check.cc Mon Mar 28 10:49:45 2011 -0500
@@ -59,6 +59,10 @@
         initiatePrefetch(); // Prefetch from random processor
     }
 
+    if (m_tester_ptr->getCheckFlush() && (random() & 0xff) == 0) {
+        initiateFlush(); // issue a Flush request from random processor
+    }
+
     if (m_status == TesterStatus_Idle) {
         initiateAction();
     } else if (m_status == TesterStatus_Ready) {
@@ -124,6 +128,37 @@
 }
 
 void
+Check::initiateFlush()
+{
+
+    DPRINTF(RubyTest, "initiating Flush\n");
+
+    int index = random() % m_num_cpu_sequencers;
+    RubyTester::CpuPort* port =
+        safe_cast<RubyTester::CpuPort*>(m_tester_ptr->getCpuPort(index));
+
+    Request::Flags flags;
+
+    Request *req = new Request(m_address.getAddress(), CHECK_SIZE, flags, 
curTick(),
+                               m_pc.getAddress());
+
+    Packet::Command cmd;
+
+    cmd = MemCmd::FlushReq;
+
+    PacketPtr pkt = new Packet(req, cmd, port->idx);
+
+    // push the subblock onto the sender state.  The sequencer will
+    // update the subblock on the return
+    pkt->senderState =
+        new SenderState(m_address, req->getSize(), pkt->senderState);
+
+    if (port->sendTiming(pkt)) {
+        DPRINTF(RubyTest, "initiating Flush - successful\n");
+    }
+}
+
+void
 Check::initiateAction()
 {
     DPRINTF(RubyTest, "initiating Action\n");
diff -r 1333bd6cc2eb -r a8d64545cda6 src/cpu/testers/rubytest/Check.hh
--- a/src/cpu/testers/rubytest/Check.hh Mon Mar 28 10:49:36 2011 -0500
+++ b/src/cpu/testers/rubytest/Check.hh Mon Mar 28 10:49:45 2011 -0500
@@ -58,6 +58,7 @@
     void print(std::ostream& out) const;
 
   private:
+    void initiateFlush();
     void initiatePrefetch();
     void initiateAction();
     void initiateCheck();
diff -r 1333bd6cc2eb -r a8d64545cda6 src/cpu/testers/rubytest/RubyTester.cc
--- a/src/cpu/testers/rubytest/RubyTester.cc    Mon Mar 28 10:49:36 2011 -0500
+++ b/src/cpu/testers/rubytest/RubyTester.cc    Mon Mar 28 10:49:45 2011 -0500
@@ -40,7 +40,8 @@
   : MemObject(p), checkStartEvent(this),
     m_checks_to_complete(p->checks_to_complete),
     m_deadlock_threshold(p->deadlock_threshold),
-    m_wakeup_frequency(p->wakeup_frequency)
+    m_wakeup_frequency(p->wakeup_frequency),
+    m_check_flush(p->check_flush)
 {
     m_checks_completed = 0;
 
diff -r 1333bd6cc2eb -r a8d64545cda6 src/cpu/testers/rubytest/RubyTester.hh
--- a/src/cpu/testers/rubytest/RubyTester.hh    Mon Mar 28 10:49:36 2011 -0500
+++ b/src/cpu/testers/rubytest/RubyTester.hh    Mon Mar 28 10:49:45 2011 -0500
@@ -99,6 +99,7 @@
     void printConfig(std::ostream& out) const {}
 
     void print(std::ostream& out) const;
+    bool getCheckFlush() { return m_check_flush; }
 
   protected:
     class CheckStartEvent : public Event
@@ -134,6 +135,7 @@
     int m_deadlock_threshold;
     int m_num_cpu_sequencers;
     int m_wakeup_frequency;
+    bool m_check_flush;
 };
 
 inline std::ostream&
diff -r 1333bd6cc2eb -r a8d64545cda6 src/cpu/testers/rubytest/RubyTester.py
--- a/src/cpu/testers/rubytest/RubyTester.py    Mon Mar 28 10:49:36 2011 -0500
+++ b/src/cpu/testers/rubytest/RubyTester.py    Mon Mar 28 10:49:45 2011 -0500
@@ -36,3 +36,4 @@
     checks_to_complete = Param.Int(100, "checks to complete")
     deadlock_threshold = Param.Int(50000, "how often to check for deadlock")
     wakeup_frequency = Param.Int(10, "number of cycles between wakeups")
+    check_flush = Param.Bool(False, "check cache flushing")
diff -r 1333bd6cc2eb -r a8d64545cda6 src/mem/packet.cc
--- a/src/mem/packet.cc Mon Mar 28 10:49:36 2011 -0500
+++ b/src/mem/packet.cc Mon Mar 28 10:49:45 2011 -0500
@@ -148,7 +148,9 @@
     /* BadAddressError   -- memory address invalid */
     { SET2(IsResponse, IsError), InvalidCmd, "BadAddressError" },
     /* PrintReq */
-    { SET2(IsRequest, IsPrint), InvalidCmd, "PrintReq" }
+    { SET2(IsRequest, IsPrint), InvalidCmd, "PrintReq" },
+    /* Flush Request */
+    { SET3(IsRequest, IsFlush, NeedsExclusive), InvalidCmd, "FlushReq" }
 };
 
 bool
diff -r 1333bd6cc2eb -r a8d64545cda6 src/mem/packet.hh
--- a/src/mem/packet.hh Mon Mar 28 10:49:36 2011 -0500
+++ b/src/mem/packet.hh Mon Mar 28 10:49:45 2011 -0500
@@ -105,6 +105,7 @@
         BadAddressError,   // memory address invalid
         // Fake simulator-only commands
         PrintReq,       // Print state matching address
+        FlushReq,      //request for a cache flush
         NUM_MEM_CMDS
     };
 
@@ -129,6 +130,7 @@
         HasData,        //!< There is an associated payload
         IsError,        //!< Error response
         IsPrint,        //!< Print state matching address (for debugging)
+        IsFlush,        //!< Flush the address from caches
         NUM_COMMAND_ATTRIBUTES
     };
 
@@ -175,6 +177,7 @@
     bool isLLSC() const         { return testCmdAttrib(IsLlsc); }
     bool isError() const        { return testCmdAttrib(IsError); }
     bool isPrint() const        { return testCmdAttrib(IsPrint); }
+    bool isFlush() const        { return testCmdAttrib(IsFlush); }
 
     const Command
     responseCommand() const
@@ -411,6 +414,7 @@
     bool isLLSC() const         { return cmd.isLLSC(); }
     bool isError() const        { return cmd.isError(); }
     bool isPrint() const        { return cmd.isPrint(); }
+    bool isFlush() const        { return cmd.isFlush(); }
 
     // Snoop flags
     void assertMemInhibit()     { flags.set(MEM_INHIBIT); }
diff -r 1333bd6cc2eb -r a8d64545cda6 src/mem/protocol/MOESI_hammer-cache.sm
--- a/src/mem/protocol/MOESI_hammer-cache.sm    Mon Mar 28 10:49:36 2011 -0500
+++ b/src/mem/protocol/MOESI_hammer-cache.sm    Mon Mar 28 10:49:45 2011 -0500
@@ -78,7 +78,16 @@
     ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
     OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
     MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
-    MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L1";
+    MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
+
+    //Transition States Related to Flushing
+    MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, 
waiting for ack";
+    MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, 
waiting for ack";
+    IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
+    ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received 
data, waiting for all acks";
+    SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have 
an old copy of the line";
+    OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received 
data";
+    MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received 
exclusive data";
   }
 
   // EVENTS
@@ -113,6 +122,10 @@
     // Triggers
     All_acks,                  desc="Received all required data and message 
acks";
     All_acks_no_sharers,        desc="Received all acks and no other processor 
has a shared copy";
+
+    // For Flush
+    Flush_line,                  desc="flush the cache line from all caches";
+    Block_Ack,                   desc="the directory is blocked and ready for 
the flush";
   }
 
   // TYPES
@@ -221,6 +234,8 @@
       return Event:Ifetch;
     } else if ((type == RubyRequestType:ST) || (type == 
RubyRequestType:ATOMIC)) {
       return Event:Store;
+    } else if ((type == RubyRequestType:FLUSH)) {
+      return Event:Flush_line;
     } else {
       error("Invalid RubyRequestType");
     }
@@ -318,7 +333,7 @@
         Entry cache_entry := getCacheEntry(in_msg.Address);
         TBE tbe := TBEs[in_msg.Address];
 
-        if (in_msg.Type == CoherenceRequestType:GETX) {
+        if ((in_msg.Type == CoherenceRequestType:GETX) || (in_msg.Type == 
CoherenceRequestType:GETF)) {
           trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
         } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
           trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
@@ -342,6 +357,8 @@
           trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
         } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
           trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
+        } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
+          trigger(Event:Block_Ack, in_msg.Address, cache_entry, tbe);
         } else {
           error("Unexpected message");
         }
@@ -504,6 +521,19 @@
     }
   }
 
+  action(bf_issueGETF, "bf", desc="Issue GETF") {
+    enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
+      assert(is_valid(tbe));
+      out_msg.Address := address;
+      out_msg.Type := CoherenceRequestType:GETF;
+      out_msg.Requestor := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
+      out_msg.MessageSize := MessageSizeType:Request_Control;
+      out_msg.InitialRequestTime := get_time();
+      tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from 
each other cache (n-1) plus the memory (+1)
+    }
+  }
+
   action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to 
requestor") {
     peek(forwardToCache_in, RequestMsg) {
       enqueue(responseNetwork_out, ResponseMsg, 
latency=cache_response_latency) {
@@ -527,6 +557,29 @@
     }
   }
 
+  action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe 
to requestor") {
+    peek(forwardToCache_in, RequestMsg) {
+      enqueue(responseNetwork_out, ResponseMsg, 
latency=cache_response_latency) {
+        assert(is_valid(tbe));
+        out_msg.Address := address;
+        out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+        out_msg.Sender := machineID;
+        out_msg.Destination.add(in_msg.Requestor);
+        out_msg.DataBlk := tbe.DataBlk;
+        out_msg.Dirty := tbe.Dirty;
+        if (in_msg.DirectedProbe) {
+          out_msg.Acks := machineCount(MachineType:L1Cache);
+        } else {
+          out_msg.Acks := 2;
+        }
+        out_msg.SilentAcks := in_msg.SilentAcks;
+        out_msg.MessageSize := MessageSizeType:Response_Data;
+        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+      }
+    }
+  }
+
   action(d_issuePUT, "d", desc="Issue PUT") {
     enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
       out_msg.Address := address;
@@ -537,6 +590,16 @@
     }
   }
 
+  action(df_issuePUTF, "df", desc="Issue PUTF") {
+    enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
+      out_msg.Address := address;
+      out_msg.Type := CoherenceRequestType:PUTF;
+      out_msg.Requestor := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
+      out_msg.MessageSize := MessageSizeType:Writeback_Control;
+    }
+  }
+
   action(e_sendData, "e", desc="Send data from cache to requestor") {
     peek(forwardToCache_in, RequestMsg) {
       enqueue(responseNetwork_out, ResponseMsg, 
latency=cache_response_latency) {
@@ -583,7 +646,31 @@
       }
     }
_______________________________________________
m5-dev mailing list
m5-dev@m5sim.org
http://m5sim.org/mailman/listinfo/m5-dev

Reply via email to