Nilay,

This change breaks the regressions:

/tmp/gem5.ali/build/ALPHA_SE_MOESI_CMP_directory/mem/ruby/system/PerfectCacheMemory.hh:62:31:
 error: 'CacheMsg' does not name a type
/tmp/gem5.ali/build/ALPHA_SE_MOESI_CMP_directory/mem/ruby/system/PerfectCacheMemory.hh:62:41:
 error: ISO C++ forbids declaration of 'msg' with no type
/tmp/gem5.ali/build/ALPHA_SE_MOESI_CMP_directory/mem/ruby/system/PerfectCacheMemory.hh:123:49:
 error: 'CacheMsg' does not name a type
/tmp/gem5.ali/build/ALPHA_SE_MOESI_CMP_directory/mem/ruby/system/PerfectCacheMemory.hh:123:59:
 error: ISO C++ forbids declaration of 'msg' with no type
scons: *** [build/ALPHA_SE_MOESI_CMP_directory/mem/protocol/L1Cache_Wakeup.do] 
Error 1
scons: *** 
[build/ALPHA_SE_MOESI_CMP_directory/mem/protocol/L2Cache_Controller.do] Error 1


Ali

On Jan 11, 2012, at 1:53 PM, Nilay Vaish wrote:

> changeset 5ca9dd977386 in /z/repo/gem5
> details: http://repo.gem5.org/gem5?cmd=changeset;node=5ca9dd977386
> description:
>       Ruby: Resurrect Cache Warmup Capability
>       This patch resurrects ruby's cache warmup capability. It essentially
>       makes use of all the infrastructure that was added to the controllers,
>       memories and the cache recorder.
> 
> diffstat:
> 
> src/mem/ruby/buffers/MessageBuffer.cc  |    6 +-
> src/mem/ruby/system/DMASequencer.hh    |    3 +
> src/mem/ruby/system/DirectoryMemory.cc |    1 +
> src/mem/ruby/system/RubyPort.cc        |   80 ++++++++++-
> src/mem/ruby/system/RubyPort.hh        |   11 +-
> src/mem/ruby/system/Sequencer.cc       |   17 ++-
> src/mem/ruby/system/Sequencer.hh       |   14 +-
> src/mem/ruby/system/System.cc          |  241 +++++++++++++++++++++++++++++++-
> src/mem/ruby/system/System.hh          |   42 ++++-
> 9 files changed, 390 insertions(+), 25 deletions(-)
> 
> diffs (truncated from 672 to 300 lines):
> 
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/buffers/MessageBuffer.cc
> --- a/src/mem/ruby/buffers/MessageBuffer.cc   Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/buffers/MessageBuffer.cc   Wed Jan 11 13:48:48 2012 -0600
> @@ -198,7 +198,11 @@
>                   m_last_arrival_time * g_eventQueue_ptr->getClock());
>         }
>     }
> -    m_last_arrival_time = arrival_time;
> +
> +    // If running a cache trace, don't worry about the last arrival checks
> +    if (!g_system_ptr->m_warmup_enabled) {
> +        m_last_arrival_time = arrival_time;
> +    }
> 
>     // compute the delay cycles and set enqueue time
>     Message* msg_ptr = message.get();
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/DMASequencer.hh
> --- a/src/mem/ruby/system/DMASequencer.hh     Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/DMASequencer.hh     Wed Jan 11 13:48:48 2012 -0600
> @@ -55,6 +55,9 @@
>     /* external interface */
>     RequestStatus makeRequest(PacketPtr pkt);
>     bool busy() { return m_is_busy;}
> +    int outstandingCount() const { return (m_is_busy ? 1 : 0); }
> +    bool isDeadlockEventScheduled() const { return false; }
> +    void descheduleDeadlockEvent() {}
> 
>     /* SLICC callback */
>     void dataCallback(const DataBlock & dblk);
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/DirectoryMemory.cc
> --- a/src/mem/ruby/system/DirectoryMemory.cc  Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/DirectoryMemory.cc  Wed Jan 11 13:48:48 2012 -0600
> @@ -58,6 +58,7 @@
> 
>     if (m_use_map) {
>         m_sparseMemory = new SparseMemory(m_map_levels);
> +        g_system_ptr->registerSparseMemory(m_sparseMemory);
>     } else {
>         m_entries = new AbstractEntry*[m_num_entries];
>         for (int i = 0; i < m_num_entries; i++)
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/RubyPort.cc
> --- a/src/mem/ruby/system/RubyPort.cc Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/RubyPort.cc Wed Jan 11 13:48:48 2012 -0600
> @@ -27,11 +27,11 @@
>  */
> 
> #include "cpu/testers/rubytest/RubyTester.hh"
> +#include "debug/Config.hh"
> #include "debug/Ruby.hh"
> #include "mem/protocol/AccessPermission.hh"
> #include "mem/ruby/slicc_interface/AbstractController.hh"
> #include "mem/ruby/system/RubyPort.hh"
> -#include "mem/physical.hh"
> 
> RubyPort::RubyPort(const Params *p)
>     : MemObject(p)
> @@ -51,6 +51,8 @@
>     m_usingRubyTester = p->using_ruby_tester;
>     access_phys_mem = p->access_phys_mem;
> 
> +    drainEvent = NULL;
> +
>     ruby_system = p->ruby_system;
>     waitingOnSequencer = false;
> }
> @@ -510,6 +512,82 @@
>             (*i)->sendRetry();
>         }
>     }
> +
> +    testDrainComplete();
> +}
> +
> +void
> +RubyPort::testDrainComplete()
> +{
> +    //If we weren't able to drain before, we might be able to now.
> +    if (drainEvent != NULL) {
> +        unsigned int drainCount = getDrainCount(drainEvent);
> +        DPRINTF(Config, "Drain count: %u\n", drainCount);
> +        if (drainCount == 0) {
> +            drainEvent->process();
> +            // Clear the drain event once we're done with it.
> +            drainEvent = NULL;
> +        }
> +    }
> +}
> +
> +unsigned int
> +RubyPort::getDrainCount(Event *de)
> +{
> +    int count = 0;
> +    //
> +    // If the sequencer is not empty, then requests need to drain.
> +    // The outstandingCount is the number of requests outstanding and thus 
> the
> +    // number of times M5's timing port will process the drain event.
> +    //
> +    count += outstandingCount();
> +
> +    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
> +
> +    // To simplify the draining process, the sequencer's deadlock detection
> +    // event should have been descheduled.
> +    assert(isDeadlockEventScheduled() == false);
> +
> +    if (pio_port != NULL) {
> +        count += pio_port->drain(de);
> +        DPRINTF(Config, "count after pio check %d\n", count);
> +    }
> +    if (physMemPort != NULL) {
> +        count += physMemPort->drain(de);
> +        DPRINTF(Config, "count after physmem check %d\n", count);
> +    }
> +
> +    for (CpuPortIter p_iter = cpu_ports.begin(); p_iter != cpu_ports.end();
> +         p_iter++) {
> +        M5Port* cpu_port = *p_iter;
> +        count += cpu_port->drain(de);
> +        DPRINTF(Config, "count after cpu port check %d\n", count);
> +    }
> +
> +    DPRINTF(Config, "final count %d\n", count);
> +
> +    return count;
> +}
> +
> +unsigned int
> +RubyPort::drain(Event *de)
> +{
> +    if (isDeadlockEventScheduled()) {
> +        descheduleDeadlockEvent();
> +    }
> +
> +    int count = getDrainCount(de);
> +
> +    // Set status
> +    if (count != 0) {
> +        drainEvent = de;
> +
> +        changeState(SimObject::Draining);
> +        return count;
> +    }
> +
> +    changeState(SimObject::Drained);
> +    return 0;
> }
> 
> void
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/RubyPort.hh
> --- a/src/mem/ruby/system/RubyPort.hh Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/RubyPort.hh Wed Jan 11 13:48:48 2012 -0600
> @@ -33,7 +33,6 @@
> #include <string>
> 
> #include "mem/protocol/RequestStatus.hh"
> -#include "mem/ruby/slicc_interface/RubyRequest.hh"
> #include "mem/ruby/system/System.hh"
> #include "mem/mem_object.hh"
> #include "mem/physical.hh"
> @@ -115,17 +114,23 @@
>     Port *getPort(const std::string &if_name, int idx);
> 
>     virtual RequestStatus makeRequest(PacketPtr pkt) = 0;
> +    virtual int outstandingCount() const = 0;
> +    virtual bool isDeadlockEventScheduled() const = 0;
> +    virtual void descheduleDeadlockEvent() = 0;
> 
>     //
>     // Called by the controller to give the sequencer a pointer.
>     // A pointer to the controller is needed for atomic support.
>     //
>     void setController(AbstractController* _cntrl) { m_controller = _cntrl; }
> +    int getId() { return m_version; }
> +    unsigned int drain(Event *de);
> 
>   protected:
>     const std::string m_name;
>     void ruby_hit_callback(PacketPtr pkt);
>     void hit(PacketPtr pkt);
> +    void testDrainComplete();
> 
>     int m_version;
>     AbstractController* m_controller;
> @@ -143,6 +148,8 @@
>         }
>     }
> 
> +    unsigned int getDrainCount(Event *de);
> +
>     uint16_t m_port_id;
>     uint64_t m_request_cnt;
> 
> @@ -152,6 +159,8 @@
>     typedef std::vector<M5Port*>::iterator CpuPortIter;
>     std::vector<M5Port*> cpu_ports;
> 
> +    Event *drainEvent;
> +
>     PhysicalMemory* physmem;
>     RubySystem* ruby_system;
> 
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/Sequencer.cc
> --- a/src/mem/ruby/system/Sequencer.cc        Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/Sequencer.cc        Wed Jan 11 13:48:48 2012 -0600
> @@ -519,7 +519,11 @@
>     }
> 
>     // update the data
> -    if (pkt->getPtr<uint8_t>(true) != NULL) {
> +    if (g_system_ptr->m_warmup_enabled) {
> +        assert(pkt->getPtr<uint8_t>(false) != NULL);
> +        data.setData(pkt->getPtr<uint8_t>(false),
> +                     request_address.getOffset(), pkt->getSize());
> +    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
>         if ((type == RubyRequestType_LD) ||
>             (type == RubyRequestType_IFETCH) ||
>             (type == RubyRequestType_RMW_Read) ||
> @@ -551,8 +555,17 @@
>         testerSenderState->subBlock->mergeFrom(data);
>     }
> 
> -    ruby_hit_callback(pkt);
>     delete srequest;
> +
> +    if (g_system_ptr->m_warmup_enabled) {
> +        delete pkt;
> +        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
> +    } else if (g_system_ptr->m_cooldown_enabled) {
> +        delete pkt;
> +        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
> +    } else {
> +        ruby_hit_callback(pkt);
> +    }
> }
> 
> bool
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/Sequencer.hh
> --- a/src/mem/ruby/system/Sequencer.hh        Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/Sequencer.hh        Wed Jan 11 13:48:48 2012 -0600
> @@ -39,8 +39,6 @@
> #include "mem/ruby/system/RubyPort.hh"
> 
> class DataBlock;
> -class CacheMsg;
> -class MachineID;
> class CacheMemory;
> 
> class RubySequencerParams;
> @@ -100,6 +98,18 @@
> 
>     RequestStatus makeRequest(PacketPtr pkt);
>     bool empty() const;
> +    int outstandingCount() const { return m_outstanding_count; }
> +    bool
> +    isDeadlockEventScheduled() const
> +    {
> +        return deadlockCheckEvent.scheduled();
> +    }
> +
> +    void
> +    descheduleDeadlockEvent()
> +    {
> +        deschedule(deadlockCheckEvent);
> +    }
> 
>     void print(std::ostream& out) const;
>     void printStats(std::ostream& out) const;
> diff -r 0b7825ddbb17 -r 5ca9dd977386 src/mem/ruby/system/System.cc
> --- a/src/mem/ruby/system/System.cc   Wed Jan 11 13:42:00 2012 -0600
> +++ b/src/mem/ruby/system/System.cc   Wed Jan 11 13:48:48 2012 -0600
> @@ -1,5 +1,5 @@
> /*
> - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
> + * Copyright (c) 1999-2011 Mark D. Hill and David A. Wood
>  * All rights reserved.
>  *
>  * Redistribution and use in source and binary forms, with or without
> @@ -26,15 +26,19 @@
>  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
>  */
> 
> +#include <fcntl.h>
> +#include <zlib.h>
> +
> +#include <cstdio>
> +
> #include "base/intmath.hh"
> #include "base/output.hh"
> -#include "mem/ruby/buffers/MessageBuffer.hh"
> +#include "debug/RubySystem.hh"
> #include "mem/ruby/common/Address.hh"
> #include "mem/ruby/network/Network.hh"
> #include "mem/ruby/profiler/Profiler.hh"
> -#include "mem/ruby/slicc_interface/AbstractController.hh"
> -#include "mem/ruby/system/MemoryVector.hh"
> #include "mem/ruby/system/System.hh"
> +#include "sim/simulate.hh"
> 
> using namespace std;
> 
> _______________________________________________
> gem5-dev mailing list
> [email protected]
> http://m5sim.org/mailman/listinfo/gem5-dev
> 

_______________________________________________
gem5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/gem5-dev

Reply via email to