-----Original Message-----
From: g...@cmdmail.amd.com [mailto:g...@cmdmail.amd.com] 
Sent: Monday, November 09, 2009 10:33 AM
To: Beckmann, Brad
Subject: [PATCH 28 of 31] ruby: Changes necessary to get the hammer
protocolto work in GEM5

# HG changeset patch
# User Brad Beckmann <brad.beckm...@amd.com>
# Date 1257791383 28800
# Node ID c545ad8a3b16ec5d8e76eaf290624dfa65cc0c4b
# Parent  1adee9c6a42f6c8b8c4d9b87d77b63c45f006c33
ruby: Changes necessary to get the hammer protocol to work in GEM5

diff -r 1adee9c6a42f -r c545ad8a3b16
src/mem/protocol/MOESI_hammer-cache.sm
--- a/src/mem/protocol/MOESI_hammer-cache.sm    Mon Nov 09 10:29:43 2009
-0800
+++ b/src/mem/protocol/MOESI_hammer-cache.sm    Mon Nov 09 10:29:43 2009
-0800
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -24,9 +25,24 @@
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Milo Martin
+ *          Brad Beckmann
  */
 
-machine(L1Cache, "AMD Hammer-like protocol") {
+machine(L1Cache, "AMD Hammer-like protocol") 
+: int cache_response_latency,
+  int issue_latency
+{
+
+  // NETWORK BUFFERS
+  MessageBuffer requestFromCache, network="To", virtual_network="3",
ordered="false";
+  MessageBuffer responseFromCache, network="To", virtual_network="1",
ordered="false";
+  MessageBuffer unblockFromCache, network="To", virtual_network="0",
ordered="false";
+
+  MessageBuffer forwardToCache, network="From", virtual_network="2",
ordered="false";
+  MessageBuffer responseToCache, network="From", virtual_network="1",
ordered="false";
+
 
   // STATES
   enumeration(State, desc="Cache states", default="L1Cache_State_I") {
@@ -82,14 +98,16 @@
 
   // TYPES
 
+  // STRUCTURE DEFINITIONS
+
+  MessageBuffer mandatoryQueue, ordered="false";
+  Sequencer sequencer,
factory='RubySystem::getSequencer(m_cfg["sequencer"])';
+
   // CacheEntry
-  structure(Entry, desc="...") {
-    Address Address,         desc="Address of this block, required by
CacheMemory";
-    Time LastRef,            desc="Last time this block was referenced,
required by CacheMemory";
-    AccessPermission Permission, desc="Access permission for this
block, required by CacheMemory"; 
-    DataBlock DataBlk,       desc="data for the block, required by
CacheMemory";
+  structure(Entry, desc="...", interface="AbstractCacheEntry") {
     State CacheState,        desc="cache state";
     bool Dirty,              desc="Is the data dirty (different than
memory)?";
+    DataBlock DataBlk,       desc="data for the block";
   }
 
   // TBE fields
@@ -101,27 +119,28 @@
     bool Sharers,            desc="On a GetS, did we find any other
sharers in the system";
   }
 
-  external_type(NewCacheMemory) {
+  external_type(CacheMemory) {
     bool cacheAvail(Address);
     Address cacheProbe(Address);
-    void allocate(Address);
+    void allocate(Address, Entry);
     void deallocate(Address);
     Entry lookup(Address);
     void changePermission(Address, AccessPermission);
     bool isTagPresent(Address);
+    void profileMiss(CacheMsg);
   }
 
-  external_type(NewTBETable) {
+  external_type(TBETable) {
     TBE lookup(Address);
     void allocate(Address);
     void deallocate(Address);
     bool isPresent(Address);
   }
 
-  NewTBETable TBEs, template_hack="<L1Cache_TBE>";
-  NewCacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>",
constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,"L1I"';
-  NewCacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>",
constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,"L1D"';
-  NewCacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>",
constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,"L2"';
+  TBETable TBEs, template_hack="<L1Cache_TBE>";
+  CacheMemory L1IcacheMemory,
factory='RubySystem::getCache(m_cfg["icache"])';
+  CacheMemory L1DcacheMemory,
factory='RubySystem::getCache(m_cfg["dcache"])';
+  CacheMemory L2cacheMemory,
factory='RubySystem::getCache(m_cfg["cache"])';
 
   Entry getCacheEntry(Address addr), return_by_ref="yes" {
     if (L2cacheMemory.isTagPresent(addr)) {
@@ -284,36 +303,36 @@
           // ** INSTRUCTION ACCESS ***
 
           // Check to see if it is in the OTHER L1
-          if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+          if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
             // The block is in the wrong L1, try to write it to the L2
-            if (L2cacheMemory.cacheAvail(in_msg.Address)) {
-              trigger(Event:L1_to_L2, in_msg.Address);
+            if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
+              trigger(Event:L1_to_L2, in_msg.LineAddress);
             } else {
-              trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(in_msg.Address));
+              trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(in_msg.LineAddress));
             }
           }
 
-          if (L1IcacheMemory.isTagPresent(in_msg.Address)) { 
+          if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) { 
             // The tag matches for the L1, so the L1 fetches the line.
We know it can't be in the L2 due to exclusion
-            trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.Address);
+            trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress);
           } else {
-            if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+            if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it in
the L1
-              if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+              if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
                 // L2 has it (maybe not with the right permissions)
-                trigger(Event:L2_to_L1I, in_msg.Address);
+                trigger(Event:L2_to_L1I, in_msg.LineAddress);
               } else {
                 // We have room, the L2 doesn't have it, so the L1
fetches the line
-                trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.Address);
+                trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress);
               }
             } else {
               // No room in the L1, so we need to make room
-              if
(L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
+              if
(L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))
) {
                 // The L2 has room, so we move the line from the L1 to
the L2
-                trigger(Event:L1_to_L2,
L1IcacheMemory.cacheProbe(in_msg.Address));
+                trigger(Event:L1_to_L2,
L1IcacheMemory.cacheProbe(in_msg.LineAddress));
               } else {
                 // The L2 does not have room, so we replace a line from
the L2
-                trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
+                trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)))
;
               }
             }
           }
@@ -321,36 +340,36 @@
           // *** DATA ACCESS ***
 
           // Check to see if it is in the OTHER L1
-          if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+          if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
             // The block is in the wrong L1, try to write it to the L2
-            if (L2cacheMemory.cacheAvail(in_msg.Address)) {
-              trigger(Event:L1_to_L2, in_msg.Address);
+            if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
+              trigger(Event:L1_to_L2, in_msg.LineAddress);
             } else {
-              trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(in_msg.Address));
+              trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(in_msg.LineAddress));
             }
           }
 
-          if (L1DcacheMemory.isTagPresent(in_msg.Address)) { 
+          if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) { 
             // The tag matches for the L1, so the L1 fetches the line.
We know it can't be in the L2 due to exclusion
-            trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.Address);
+            trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress);
           } else {
-            if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+            if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
               // L1 does't have the line, but we have space for it in
the L1
-              if (L2cacheMemory.isTagPresent(in_msg.Address)) {
+              if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
                 // L2 has it (maybe not with the right permissions)
-                trigger(Event:L2_to_L1D, in_msg.Address);
+                trigger(Event:L2_to_L1D, in_msg.LineAddress);
               } else {
                 // We have room, the L2 doesn't have it, so the L1
fetches the line
-                trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.Address);
+                trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress);
               }
             } else {
               // No room in the L1, so we need to make room
-              if
(L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
+              if
(L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))
) {
                 // The L2 has room, so we move the line from the L1 to
the L2
-                trigger(Event:L1_to_L2,
L1DcacheMemory.cacheProbe(in_msg.Address));
+                trigger(Event:L1_to_L2,
L1DcacheMemory.cacheProbe(in_msg.LineAddress));
               } else {
                 // The L2 does not have room, so we replace a line from
the L2
-                trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
+                trigger(Event:L2_Replacement,
L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)))
;
               }
             }
           }
@@ -362,33 +381,33 @@
   // ACTIONS
 
   action(a_issueGETS, "a", desc="Issue GETS") {
-    enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+    enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
       out_msg.Address := address;
       out_msg.Type := CoherenceRequestType:GETS;
-      out_msg.Requestor := id;
-      out_msg.Destination.add(map_address_to_node(address));
+      out_msg.Requestor := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
       out_msg.MessageSize := MessageSizeType:Request_Control;
-      TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each
other processor (n-1) plus the memory (+1)
+      TBEs[address].NumPendingMsgs := getNumberOfLastLevelCaches(); //
One from each other cache (n-1) plus the memory (+1)
     }
   }
 
   action(b_issueGETX, "b", desc="Issue GETX") {
-    enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+    enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
       out_msg.Address := address;
       out_msg.Type := CoherenceRequestType:GETX;
-      out_msg.Requestor := id;
-      out_msg.Destination.add(map_address_to_node(address));
+      out_msg.Requestor := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
       out_msg.MessageSize := MessageSizeType:Request_Control;
-      TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each
other processor (n-1) plus the memory (+1)
+      TBEs[address].NumPendingMsgs := getNumberOfLastLevelCaches(); //
One from each other cache (n-1) plus the memory (+1)
     }
   }
 
   action(c_sendExclusiveData, "c", desc="Send exclusive data from cache
to requestor") {
     peek(forwardToCache_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="CACHE_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg,
latency=cache_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -399,21 +418,21 @@
   }
 
   action(d_issuePUT, "d", desc="Issue PUT") {
-    enqueue(requestNetwork_out, RequestMsg, latency="CACHE_LATENCY") {
+    enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
       out_msg.Address := address;
       out_msg.Type := CoherenceRequestType:PUT;
-      out_msg.Requestor := id;
-      out_msg.Destination.add(map_address_to_node(address));
+      out_msg.Requestor := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
       out_msg.MessageSize := MessageSizeType:Writeback_Control;
     }
   }
 
   action(e_sendData, "e", desc="Send data from cache to requestor") {
     peek(forwardToCache_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="CACHE_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg,
latency=cache_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -425,10 +444,10 @@
 
   action(ee_sendDataShared, "\e", desc="Send data from cache to
requestor, keep a shared copy") {
     peek(forwardToCache_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="CACHE_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg,
latency=cache_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA_SHARED;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -440,10 +459,10 @@
   
   action(f_sendAck, "f", desc="Send ack from cache to requestor") {
     peek(forwardToCache_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="CACHE_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg,
latency=cache_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:ACK;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.Acks := 1;
         out_msg.MessageSize := MessageSizeType:Response_Control;
@@ -453,10 +472,10 @@
 
   action(ff_sendAckShared, "\f", desc="Send shared ack from cache to
requestor") {
     peek(forwardToCache_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="CACHE_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg,
latency=cache_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:ACK_SHARED;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.Acks := 1;
         out_msg.MessageSize := MessageSizeType:Response_Control;
@@ -465,11 +484,11 @@
   }
 
   action(g_sendUnblock, "g", desc="Send unblock to memory") {
-    enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+    enqueue(unblockNetwork_out, ResponseMsg,
latency=cache_response_latency) {
       out_msg.Address := address;
       out_msg.Type := CoherenceResponseType:UNBLOCK;
-      out_msg.Sender := id;
-      out_msg.Destination.add(map_address_to_node(address));
+      out_msg.Sender := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
       out_msg.MessageSize := MessageSizeType:Unblock_Control;
     }
   }
@@ -541,10 +560,10 @@
 
   action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to
cache") {
     peek(forwardToCache_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="CACHE_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg,
latency=cache_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.DataBlk := TBEs[address].DataBlk;
         out_msg.Dirty := TBEs[address].Dirty;
@@ -555,10 +574,10 @@
   }
 
   action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to
memory") {
-    enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+    enqueue(unblockNetwork_out, ResponseMsg,
latency=cache_response_latency) {
       out_msg.Address := address;
-      out_msg.Sender := id;
-      out_msg.Destination.add(map_address_to_node(address));
+      out_msg.Sender := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
       out_msg.Dirty := TBEs[address].Dirty;
       if (TBEs[address].Dirty) {
         out_msg.Type := CoherenceResponseType:WB_DIRTY;
@@ -583,10 +602,10 @@
   }
 
   action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive
data from TBE to memory") {
-    enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_LATENCY") {
+    enqueue(unblockNetwork_out, ResponseMsg,
latency=cache_response_latency) {
       out_msg.Address := address;
-      out_msg.Sender := id;
-      out_msg.Destination.add(map_address_to_node(address));
+      out_msg.Sender := machineID;
+      out_msg.Destination.add(map_Address_to_Directory(address));
       out_msg.DataBlk := TBEs[address].DataBlk; 
       out_msg.Dirty := TBEs[address].Dirty;
       if (TBEs[address].Dirty) {
@@ -628,18 +647,18 @@
   
   action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal
to tag of block B.") {
     if (L1DcacheMemory.isTagPresent(address) == false) {
-      L1DcacheMemory.allocate(address);
+      L1DcacheMemory.allocate(address, new Entry);
     }
   }
 
   action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal
to tag of block B.") {
     if (L1IcacheMemory.isTagPresent(address) == false) {
-      L1IcacheMemory.allocate(address);
+      L1IcacheMemory.allocate(address, new Entry);
     }
   }
 
   action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to
tag of block B.") {
-    L2cacheMemory.allocate(address);
+    L2cacheMemory.allocate(address, new Entry);
   }
 
   action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache
block.  Sets the cache to not present, allowing a replacement in
parallel with a fetch.") {
@@ -664,7 +683,13 @@
 
   action(uu_profileMiss, "\u", desc="Profile the demand miss") {
     peek(mandatoryQueue_in, CacheMsg) {
-      profile_miss(in_msg, id);
+      if (L1IcacheMemory.isTagPresent(address)) {
+        L1IcacheMemory.profileMiss(in_msg);
+      } else if (L1DcacheMemory.isTagPresent(address)) {
+        L1DcacheMemory.profileMiss(in_msg);
+      } else {
+        L2cacheMemory.profileMiss(in_msg);
+      }
     }
   }
 
diff -r 1adee9c6a42f -r c545ad8a3b16
src/mem/protocol/MOESI_hammer-dir.sm
--- a/src/mem/protocol/MOESI_hammer-dir.sm      Mon Nov 09 10:29:43 2009
-0800
+++ b/src/mem/protocol/MOESI_hammer-dir.sm      Mon Nov 09 10:29:43 2009
-0800
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -24,9 +25,23 @@
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Milo Martin
+ *          Brad Beckmann
  */
 
-machine(Directory, "AMD Hammer-like protocol") {
+machine(Directory, "AMD Hammer-like protocol") 
+: int memory_controller_latency,
+  int memory_latency
+{
+
+  MessageBuffer forwardFromDir, network="To", virtual_network="2",
ordered="false";
+  MessageBuffer responseFromDir, network="To", virtual_network="1",
ordered="false";
+//  MessageBuffer dmaRequestFromDir, network="To", virtual_network="4",
ordered="true";
+
+  MessageBuffer requestToDir, network="From", virtual_network="3",
ordered="false";
+  MessageBuffer unblockToDir, network="From", virtual_network="0",
ordered="false";
+//  MessageBuffer dmaRequestToDir, network="From", virtual_network="5",
ordered="true";
 
   // STATES
   enumeration(State, desc="Directory states",
default="Directory_State_E") {
@@ -66,7 +81,7 @@
 
   // ** OBJECTS **
 
-  DirectoryMemory directory;
+  DirectoryMemory directory,
factory='RubySystem::getDirectory(m_cfg["directory_name"])';
 
   State getState(Address addr) {
     return directory[addr].DirectoryState;
@@ -123,7 +138,7 @@
   
   action(a_sendWriteBackAck, "a", desc="Send writeback ack to
requestor") {
     peek(requestQueue_in, RequestMsg) {
-      enqueue(forwardNetwork_out, RequestMsg,
latency="DIRECTORY_LATENCY") {
+      enqueue(forwardNetwork_out, RequestMsg,
latency=memory_controller_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:WB_ACK;
         out_msg.Requestor := in_msg.Requestor;
@@ -135,7 +150,7 @@
 
   action(b_sendWriteBackNack, "b", desc="Send writeback nack to
requestor") {
     peek(requestQueue_in, RequestMsg) {
-      enqueue(forwardNetwork_out, RequestMsg,
latency="DIRECTORY_LATENCY") {
+      enqueue(forwardNetwork_out, RequestMsg,
latency=memory_controller_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceRequestType:WB_NACK;
         out_msg.Requestor := in_msg.Requestor;
@@ -147,10 +162,10 @@
 
   action(d_sendData, "d", desc="Send data to requestor") {
     peek(requestQueue_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="MEMORY_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg, latency=memory_latency)
{
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.DataBlk := directory[in_msg.Address].DataBlk;
         out_msg.Dirty := false; // By definition, the block is now
clean
@@ -162,10 +177,10 @@
 
   action(dd_sendExclusiveData, "\d", desc="Send exclusive data to
requestor") {
     peek(requestQueue_in, RequestMsg) {
-      enqueue(responseNetwork_out, ResponseMsg,
latency="MEMORY_LATENCY") {
+      enqueue(responseNetwork_out, ResponseMsg, latency=memory_latency)
{
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
-        out_msg.Sender := id;
+        out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
         out_msg.DataBlk := directory[in_msg.Address].DataBlk;
         out_msg.Dirty := false; // By definition, the block is now
clean
@@ -176,9 +191,9 @@
   }
 
   action(f_forwardRequest, "f", desc="Forward requests") {
-    if (numberOfNodes() > 1) {
+    if (getNumberOfLastLevelCaches() > 1) {
       peek(requestQueue_in, RequestMsg) {
-        enqueue(forwardNetwork_out, RequestMsg,
latency="DIRECTORY_LATENCY") {
+        enqueue(forwardNetwork_out, RequestMsg,
latency=memory_controller_latency) {
           out_msg.Address := address;
           out_msg.Type := in_msg.Type;
           out_msg.Requestor := in_msg.Requestor;
diff -r 1adee9c6a42f -r c545ad8a3b16
src/mem/protocol/MOESI_hammer-msg.sm
--- a/src/mem/protocol/MOESI_hammer-msg.sm      Mon Nov 09 10:29:43 2009
-0800
+++ b/src/mem/protocol/MOESI_hammer-msg.sm      Mon Nov 09 10:29:43 2009
-0800
@@ -65,7 +65,7 @@
 structure(RequestMsg, desc="...", interface="NetworkMessage") {
   Address Address,             desc="Physical address for this
request";
   CoherenceRequestType Type,   desc="Type of request (GetS, GetX, PutX,
etc)";
-  NodeID Requestor,            desc="Node who initiated the request";
+  MachineID Requestor,            desc="Node who initiated the
request";
   NetDest Destination,             desc="Multicast destination mask";
   MessageSizeType MessageSize, desc="size category of the message";
 }
@@ -74,7 +74,7 @@
 structure(ResponseMsg, desc="...", interface="NetworkMessage") {
   Address Address,             desc="Physical address for this
request";
   CoherenceResponseType Type,  desc="Type of response (Ack, Data,
etc)";
-  NodeID Sender,               desc="Node who sent the data";
+  MachineID Sender,               desc="Node who sent the data";
   NetDest Destination,             desc="Node to whom the data is
sent";
   DataBlock DataBlk,           desc="data for the cache line";
   bool Dirty,                  desc="Is the data dirty (different than
memory)?";
diff -r 1adee9c6a42f -r c545ad8a3b16 src/mem/protocol/MOESI_hammer.slicc
--- a/src/mem/protocol/MOESI_hammer.slicc       Mon Nov 09 10:29:43 2009
-0800
+++ b/src/mem/protocol/MOESI_hammer.slicc       Mon Nov 09 10:29:43 2009
-0800
@@ -1,5 +1,4 @@
-../protocols/MOESI_hammer-msg.sm
-../protocols/hammer-ni.sm 
-../protocols/MOESI_hammer-cache.sm
-../protocols/MOESI_hammer-dir.sm
-../protocols/standard_1level-node.sm
+MOESI_hammer-msg.sm
+MOESI_hammer-cache.sm
+MOESI_hammer-dir.sm
+standard_1level_CMP-protocol.sm
diff -r 1adee9c6a42f -r c545ad8a3b16 src/mem/protocol/SConsopts
--- a/src/mem/protocol/SConsopts        Mon Nov 09 10:29:43 2009 -0800
+++ b/src/mem/protocol/SConsopts        Mon Nov 09 10:29:43 2009 -0800
@@ -47,6 +47,7 @@
     'MOSI_SMP_bcast_m',
     'MOSI_SMP_directory_1level',
     'MSI_MOSI_CMP_directory',
+    'MOESI_hammer',
     ]
 
 opt = EnumVariable('PROTOCOL', 'Coherence Protocol for Ruby',
'MI_example',

_______________________________________________
m5-dev mailing list
m5-dev@m5sim.org
http://m5sim.org/mailman/listinfo/m5-dev

Reply via email to