changeset 5dc128cab5dc in /z/repo/m5
details: http://repo.m5sim.org/m5?cmd=changeset;node=5dc128cab5dc
description:
        ruby: fixed token bugs associated with owner token counts

        This patch fixes several bugs related to previous inconsistent 
assumptions on
        how many tokens the Owner had.  Mike Marty should have fixes these bugs 
years
        ago.  :)

diffstat:

 src/mem/protocol/MOESI_CMP_token-L1cache.sm |  152 ++++++++++++++++++++-------
 src/mem/protocol/MOESI_CMP_token-L2cache.sm |   73 +++++++++---
 2 files changed, 165 insertions(+), 60 deletions(-)

diffs (truncated from 616 to 300 lines):

diff -r ccd55d73c75d -r 5dc128cab5dc src/mem/protocol/MOESI_CMP_token-L1cache.sm
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm       Fri Aug 20 11:46:13 
2010 -0700
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm       Fri Aug 20 11:46:13 
2010 -0700
@@ -112,6 +112,7 @@
     // Lock/Unlock for distributed
     Persistent_GETX,     desc="Another processor has priority to read/write";
     Persistent_GETS,     desc="Another processor has priority to read";
+    Persistent_GETS_Last_Token, desc="Another processor has priority to read, 
no more tokens";
     Own_Lock_or_Unlock,  desc="This processor now has priority";
 
     // Triggers
@@ -208,6 +209,7 @@
 
   Entry getCacheEntry(Address addr), return_by_ref="yes" {
     if (L1DcacheMemory.isTagPresent(addr)) {
+      assert(L1IcacheMemory.isTagPresent(addr) == false);
       return static_cast(Entry, L1DcacheMemory[addr]);
     } else {
       return static_cast(Entry, L1IcacheMemory[addr]);
@@ -216,6 +218,7 @@
 
   int getTokens(Address addr) {
     if (L1DcacheMemory.isTagPresent(addr)) {
+      assert(L1IcacheMemory.isTagPresent(addr) == false);
       return static_cast(Entry, L1DcacheMemory[addr]).Tokens;
     } else if (L1IcacheMemory.isTagPresent(addr)) {
       return static_cast(Entry, L1IcacheMemory[addr]).Tokens;
@@ -269,6 +272,7 @@
       // Make sure the token count is in range
       assert(getCacheEntry(addr).Tokens >= 0);
       assert(getCacheEntry(addr).Tokens <= max_tokens());
+      assert(getCacheEntry(addr).Tokens != (max_tokens() / 2));
 
       if ((state == State:I_L) ||
           (state == State:IM_L) ||
@@ -287,6 +291,7 @@
       } else if ((state == State:S_L) ||
                  (state == State:SM_L)) {
         assert(getCacheEntry(addr).Tokens >= 1);
+        assert(getCacheEntry(addr).Tokens < (max_tokens() / 2));
 
         // Make sure the line is locked...
         // assert(persistentTable.isLocked(addr));
@@ -327,8 +332,7 @@
 
       // You have at least half the token in O-like states
       if (state == State:O && state == State:OM) {
-        assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one 
token
-        assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only 
mostly true; this might not always hold
+        assert(getCacheEntry(addr).Tokens > (max_tokens() / 2));
       }
 
       getCacheEntry(addr).CacheState := state;
@@ -462,7 +466,12 @@
             trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
           } else {
             if (persistentTable.typeOfSmallest(in_msg.Address) == 
AccessType:Read) {
-              trigger(Event:Persistent_GETS, in_msg.Address);
+              if (getTokens(in_msg.Address) == 1 ||
+                  getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
+                trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
+              } else {
+                trigger(Event:Persistent_GETS, in_msg.Address);
+              }
             } else {
               trigger(Event:Persistent_GETX, in_msg.Address);
             }
@@ -489,7 +498,8 @@
             trigger(Event:Transient_GETX, in_msg.Address);
           }
         } else if (in_msg.Type == CoherenceRequestType:GETS) {
-          if ( (L1DcacheMemory.isTagPresent(in_msg.Address) || 
L1IcacheMemory.isTagPresent(in_msg.Address)) && 
getCacheEntry(in_msg.Address).Tokens == 1) {
+          if (getTokens(in_msg.Address) == 1 || 
+              getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
             if (in_msg.isLocal) {
               trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address);
             }
@@ -557,16 +567,19 @@
 
         if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
           if (in_msg.Type == CoherenceResponseType:ACK) {
+            assert(in_msg.Tokens < (max_tokens() / 2));
             trigger(Event:Ack, in_msg.Address);
           } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
             trigger(Event:Data_Owner, in_msg.Address);
           } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+            assert(in_msg.Tokens < (max_tokens() / 2));
             trigger(Event:Data_Shared, in_msg.Address);
           } else {
             error("Unexpected message");
           }
         } else {
           if (in_msg.Type == CoherenceResponseType:ACK) {
+            assert(in_msg.Tokens < (max_tokens() / 2));
             trigger(Event:Ack_All_Tokens, in_msg.Address);
           } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || 
in_msg.Type == CoherenceResponseType:DATA_SHARED) {
             trigger(Event:Data_All_Tokens, in_msg.Address);
@@ -914,10 +927,32 @@
     getCacheEntry(address).Tokens := 0;
   }
 
-  action(cc_sharedReplacement, "\c", desc="Issue dirty writeback") {
+  action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
 
     // don't send writeback if replacing block with no tokens
-    if (getCacheEntry(address).Tokens != 0) {
+    assert (getCacheEntry(address).Tokens > 0);
+    enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+        out_msg.Address := address;
+        out_msg.Sender := machineID;
+
+        out_msg.Destination.add(mapAddressToRange(address,
+                                                  MachineType:L2Cache,
+                                                  l2_select_low_bit,
+                                                  l2_select_num_bits));
+
+        out_msg.Tokens := getCacheEntry(address).Tokens;
+        out_msg.DataBlk := getCacheEntry(address).DataBlk;
+        // assert(getCacheEntry(address).Dirty == false);
+        out_msg.Dirty := false;
+
+        out_msg.MessageSize := MessageSizeType:Writeback_Data;
+        out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
+    }
+    getCacheEntry(address).Tokens := 0;
+  }
+
+  action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
+    if (getCacheEntry(address).Tokens > 0) {
       enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) 
{
         out_msg.Address := address;
         out_msg.Sender := machineID;
@@ -933,16 +968,11 @@
         out_msg.Dirty := false;
 
         // always send the data?
-        if (getCacheEntry(address).Tokens > 1) {
-          out_msg.MessageSize := MessageSizeType:Writeback_Data;
-          out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
-        } else {
-          out_msg.MessageSize := MessageSizeType:Writeback_Control;
-          out_msg.Type := CoherenceResponseType:WB_TOKENS;
-        }
+        out_msg.MessageSize := MessageSizeType:Writeback_Control;
+        out_msg.Type := CoherenceResponseType:WB_TOKENS;
       }
-      getCacheEntry(address).Tokens := 0;
     }
+    getCacheEntry(address).Tokens := 0;
   }
 
 
@@ -970,7 +1000,7 @@
 
   action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from 
cache to requestor") {
     peek(requestNetwork_in, RequestMsg) {
-      if (getCacheEntry(address).Tokens > N_tokens) {
+      if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
         enqueue(responseNetwork_out, ResponseMsg, latency = 
l1_response_latency) {
           out_msg.Address := address;
           out_msg.Type := CoherenceResponseType:DATA_SHARED;
@@ -1017,7 +1047,7 @@
         out_msg.Type := CoherenceResponseType:DATA_OWNER;
         out_msg.Sender := machineID;
         out_msg.Destination.add(in_msg.Requestor);
-        assert(getCacheEntry(address).Tokens >= 1);
+        assert(getCacheEntry(address).Tokens > (max_tokens() / 2));
         out_msg.Tokens := getCacheEntry(address).Tokens;
         out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -1036,11 +1066,16 @@
     if (getCacheEntry(address).Tokens > 0) {
       enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) 
{
         out_msg.Address := address;
-        out_msg.Type := CoherenceResponseType:ACK;
+        if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+          out_msg.Type := CoherenceResponseType:DATA_OWNER;
+        } else {
+          out_msg.Type := CoherenceResponseType:ACK;
+        }
         out_msg.Sender := machineID;
         out_msg.Destination.add(persistentTable.findSmallest(address));
         assert(getCacheEntry(address).Tokens >= 1);
         out_msg.Tokens := getCacheEntry(address).Tokens;
+        out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.MessageSize := MessageSizeType:Response_Control;
       }
     }
@@ -1055,7 +1090,7 @@
       out_msg.Type := CoherenceResponseType:DATA_OWNER;
       out_msg.Sender := machineID;
       out_msg.Destination.add(persistentTable.findSmallest(address));
-      assert(getCacheEntry(address).Tokens >= 1);
+      assert(getCacheEntry(address).Tokens > (max_tokens() / 2));
       out_msg.Tokens := getCacheEntry(address).Tokens;
       out_msg.DataBlk := getCacheEntry(address).DataBlk;
       out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -1070,7 +1105,11 @@
     if (getCacheEntry(address).Tokens > 1) {
       enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) 
{
         out_msg.Address := address;
-        out_msg.Type := CoherenceResponseType:ACK;
+        if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+          out_msg.Type := CoherenceResponseType:DATA_OWNER;
+        } else {
+          out_msg.Type := CoherenceResponseType:ACK;
+        }
         out_msg.Sender := machineID;
         out_msg.Destination.add(persistentTable.findSmallest(address));
         assert(getCacheEntry(address).Tokens >= 1);
@@ -1079,6 +1118,7 @@
         } else {
           out_msg.Tokens := getCacheEntry(address).Tokens - 1;
         }
+        out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.MessageSize := MessageSizeType:Response_Control;
       }
     }
@@ -1091,29 +1131,43 @@
 
   action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out 
tokens but one to starver") {
     //assert(persistentTable.findSmallest(address) != id); // Make sure we 
never bounce tokens to ourself
-    assert(getCacheEntry(address).Tokens > 0);
-    if (getCacheEntry(address).Tokens > 1) {
-      enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) 
{
+    assert(getCacheEntry(address).Tokens > ((max_tokens() / 2) + 1));
+    enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
         out_msg.Address := address;
         out_msg.Type := CoherenceResponseType:DATA_OWNER;
         out_msg.Sender := machineID;
         out_msg.Destination.add(persistentTable.findSmallest(address));
-        assert(getCacheEntry(address).Tokens >= 1);
-        if (getCacheEntry(address).Tokens > N_tokens) {
+        if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
           out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens;
         } else {
           out_msg.Tokens := getCacheEntry(address).Tokens - 1;
         }
+        assert(out_msg.Tokens > (max_tokens() / 2));
         out_msg.DataBlk := getCacheEntry(address).DataBlk;
         out_msg.Dirty := getCacheEntry(address).Dirty;
         out_msg.MessageSize := MessageSizeType:Response_Data;
-      }
-      if (getCacheEntry(address).Tokens > N_tokens) {
-        getCacheEntry(address).Tokens := N_tokens;
-      } else {
-        getCacheEntry(address).Tokens := 1;
-      }
     }
+    if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
+      getCacheEntry(address).Tokens := N_tokens;
+    } else {
+      getCacheEntry(address).Tokens := 1;
+    }
+  }
+
+  action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
+    assert(getCacheEntry(address).Tokens == ((max_tokens() / 2) + 1));
+    enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+        out_msg.Address := address;
+        out_msg.Type := CoherenceResponseType:DATA_OWNER;
+        out_msg.Sender := machineID;
+        out_msg.Destination.add(persistentTable.findSmallest(address));
+        out_msg.Tokens := getCacheEntry(address).Tokens;
+        assert(out_msg.Tokens > (max_tokens() / 2));
+        out_msg.DataBlk := getCacheEntry(address).DataBlk;
+        out_msg.Dirty := getCacheEntry(address).Dirty;
+        out_msg.MessageSize := MessageSizeType:Response_Data;
+    }
+    getCacheEntry(address).Tokens := 0;
   }
 
   action(g_bounceResponseToStarver, "g", desc="Redirect response to starving 
processor") {
@@ -1313,11 +1367,16 @@
       peek(requestNetwork_in, RequestMsg) {
         enqueue(responseNetwork_out, ResponseMsg, latency = 
l1_response_latency) {
           out_msg.Address := address;
-          out_msg.Type := CoherenceResponseType:ACK;
+          if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+            out_msg.Type := CoherenceResponseType:DATA_OWNER;
+          } else {
+            out_msg.Type := CoherenceResponseType:ACK;
+          }
           out_msg.Sender := machineID;
           out_msg.Destination.add(in_msg.Requestor);
           assert(getCacheEntry(address).Tokens >= 1);
           out_msg.Tokens := getCacheEntry(address).Tokens;
+          out_msg.DataBlk := getCacheEntry(address).DataBlk;
           out_msg.MessageSize := MessageSizeType:Response_Control;
         }
       }
@@ -1336,6 +1395,7 @@
   }
 
   action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block.  Sets 
the cache to invalid, allowing a replacement in parallel with a fetch.") {
+    assert(getTokens(address) == 0);
     if (L1DcacheMemory.isTagPresent(address)) {
       L1DcacheMemory.deallocate(address);
     } else {
@@ -1432,7 +1492,7 @@
_______________________________________________
m5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/m5-dev

Reply via email to