On Mon, Sep 14, 2009 at 6:07 PM, Gabriel Michael Black <
[email protected]> wrote:

> I notice you have several patches that change around style.py. You
> probably shouldn't have that in a commit ready patch, and you
> shouldn't change it back and forth throughout the series.
>
>
You are right and that's probably more my mistake than Dereks. What happens
is that mercurial crashes whenever it would detect a white-space issue. So,
for me to make a patch or commit I hacked the style file, and these changes
made it into the patch.


> Later, there's a stray debug printout you remove. You should be able
> to go back and remove that from the patch that introduced it, unless
> that was already committed of course.
>
> I also see that you have several merge patches. I don't know for sure,
> but I think if you pull while you have patches applied, Bad Things
> will happen and terribly confuse mercurial. You should have popped all
> the patches, pulled, and then reapplied them which is called rebasing.
> The other alternative is to turn them into actual changes and then use
> regular pulls/merges/etc. I appologize if you're being fancy and I'm
> not following. If this was a mistake, all your patches are in
> .hg/patches and can be rescued fairly easily.
>

I don't know about Derek, but that is exactly what happened to me, and that
is mainly due to lack of experience with mercurial patch queue.
Unfortunately I learned about that after I made this mistake. I am sure
Derek also did the same thing.


>
> Gabe
>
> Quoting Derek Hower <[email protected]>:
>
> > # HG changeset patch
> > # User [email protected]
> > # Date 1250887966 18000
> > # Node ID 104115ebc206686948736edaf517718bbdfd3cd5
> > # Parent  f9e065561d5cd8b98a88290a418381e667026a10
> > [mq]: first_patch
> >
> > diff --git a/src/mem/ruby/libruby.cc b/src/mem/ruby/libruby.cc
> > --- a/src/mem/ruby/libruby.cc
> > +++ b/src/mem/ruby/libruby.cc
> > @@ -133,6 +133,10 @@
> >    RubySystem::getMemoryVector()->read(Address(paddr), data, len);
> >  }
> >
> > +bool libruby_isReady(RubyPortHandle p, struct RubyRequest request) {
> > +  return static_cast<RubyPort*>(p)->isReady(request, true);
> > +}
> > +
> >  int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest
> request)
> >  {
> >    return static_cast<RubyPort*>(p)->makeRequest(request);
> > diff --git a/src/mem/ruby/libruby.hh b/src/mem/ruby/libruby.hh
> > --- a/src/mem/ruby/libruby.hh
> > +++ b/src/mem/ruby/libruby.hh
> > @@ -34,7 +34,7 @@
> >    unsigned proc_id;
> >
> >    RubyRequest() {}
> > -  RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t
> > _pc, RubyRequestType _type, RubyAccessMode _access_mode, unsigned
> > _proc_id = 0)
> > +  RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t
> > _pc, RubyRequestType _type, RubyAccessMode _access_mode, unsigned
> > _proc_id = 100)
> >      : paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type),
> > access_mode(_access_mode), proc_id(_proc_id)
> >    {}
> >  };
> > @@ -76,6 +76,12 @@
> >   */
> >  int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest
> request);
> >
> > +
> > +/**
> > + *
> > + */
> > +bool libruby_isReady(RubyPortHandle p, struct RubyRequest request);
> > +
> >  /**
> >   * writes data directly into Ruby's data array.  Note that this
> >   * ignores caches, and should be considered incoherent after
> > diff --git a/src/mem/ruby/system/DMASequencer.hh
> > b/src/mem/ruby/system/DMASequencer.hh
> > --- a/src/mem/ruby/system/DMASequencer.hh
> > +++ b/src/mem/ruby/system/DMASequencer.hh
> > @@ -25,6 +25,7 @@
> >    void init(const vector<string> & argv);
> >    /* external interface */
> >    int64_t makeRequest(const RubyRequest & request);
> > +  bool isReady(const RubyRequest & request, bool dont_set = false)
> > { assert(0); return false;};
> >    //  void issueRequest(uint64_t paddr, uint8* data, int len, bool rw);
> >    bool busy() { return m_is_busy;}
> >
> > diff --git a/src/mem/ruby/system/RubyPort.hh
> > b/src/mem/ruby/system/RubyPort.hh
> > --- a/src/mem/ruby/system/RubyPort.hh
> > +++ b/src/mem/ruby/system/RubyPort.hh
> > @@ -21,6 +21,8 @@
> >
> >    virtual int64_t makeRequest(const RubyRequest & request) = 0;
> >
> > +  virtual bool isReady(const RubyRequest & request, bool dont_set =
> > false) = 0;
> > +
> >    void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
> >      assert(m_hit_callback == NULL); // can't assign hit_callback twice
> >      m_hit_callback = hit_callback;
> > diff --git a/src/mem/ruby/system/Sequencer.cc
> > b/src/mem/ruby/system/Sequencer.cc
> > --- a/src/mem/ruby/system/Sequencer.cc
> > +++ b/src/mem/ruby/system/Sequencer.cc
> > @@ -61,7 +61,7 @@
> >    m_instCache_ptr = NULL;
> >    m_dataCache_ptr = NULL;
> >    m_controller = NULL;
> > -  m_servicing_atomic = -1;
> > +  m_servicing_atomic = 200;
> >    m_atomics_counter = 0;
> >    for (size_t i=0; i<argv.size(); i+=2) {
> >      if ( argv[i] == "controller") {
> > @@ -108,6 +108,7 @@
> >        WARN_MSG("Possible Deadlock detected");
> >        WARN_EXPR(request);
> >        WARN_EXPR(m_version);
> > +      WARN_EXPR(request->ruby_request.paddr);
> >        WARN_EXPR(keys.size());
> >        WARN_EXPR(current_time);
> >        WARN_EXPR(request->issue_time);
> > @@ -344,13 +345,22 @@
> >        data.setData(ruby_request.data, request_address.getOffset(),
> > ruby_request.len);
> >      }
> >    }
> > -
> > +  if (type == RubyRequestType_RMW_Write) {
> > +    if (m_servicing_atomic != ruby_request.proc_id) {
> > +      assert(0);
> > +    }
> > +    assert(m_atomics_counter > 0);
> > +    m_atomics_counter--;
> > +    if (m_atomics_counter == 0) {
> > +      m_servicing_atomic = 200;
> > +    }
> > +  }
> >    m_hit_callback(srequest->id);
> >    delete srequest;
> >  }
> >
> >  // Returns true if the sequencer already has a load or store outstanding
> > -bool Sequencer::isReady(const RubyRequest& request) {
> > +bool Sequencer::isReady(const RubyRequest& request, bool dont_set) {
> >    // POLINA: check if we are currently flushing the write buffer,
> > if so Ruby is returned as not ready
> >    // to simulate stalling of the front-end
> >    // Do we stall all the sequencers? If it is atomic instruction - yes!
> > @@ -365,27 +375,30 @@
> >      return false;
> >    }
> >
> > -  if (m_servicing_atomic != -1 && m_servicing_atomic !=
> > (int)request.proc_id) {
> > +  assert(request.proc_id != 100);
> > +  if (m_servicing_atomic != 200 && m_servicing_atomic !=
> request.proc_id) {
> >      assert(m_atomics_counter > 0);
> >      return false;
> >    }
> >    else {
> > -    if (request.type == RubyRequestType_RMW_Read) {
> > -      if (m_servicing_atomic == -1) {
> > -        assert(m_atomics_counter == 0);
> > -        m_servicing_atomic = (int)request.proc_id;
> > +    if (!dont_set) {
> > +      if (request.type == RubyRequestType_RMW_Read) {
> > +        if (m_servicing_atomic == 200) {
> > +          assert(m_atomics_counter == 0);
> > +          m_servicing_atomic = request.proc_id;
> > +        }
> > +        else {
> > +          assert(m_servicing_atomic == request.proc_id);
> > +        }
> > +        m_atomics_counter++;
> >        }
> >        else {
> > -        assert(m_servicing_atomic == (int)request.proc_id);
> > -      }
> > -      m_atomics_counter++;
> > -    }
> > -    else if (request.type == RubyRequestType_RMW_Write) {
> > -      assert(m_servicing_atomic == (int)request.proc_id);
> > -      assert(m_atomics_counter > 0);
> > -      m_atomics_counter--;
> > -      if (m_atomics_counter == 0) {
> > -        m_servicing_atomic = -1;
> > +        if (m_servicing_atomic == request.proc_id) {
> > +          if (request.type != RubyRequestType_RMW_Write) {
> > +            m_servicing_atomic = 200;
> > +            m_atomics_counter = 0;
> > +          }
> > +        }
> >        }
> >      }
> >    }
> > @@ -405,7 +418,7 @@
> >      int64_t id = makeUniqueRequestID();
> >      SequencerRequest *srequest = new SequencerRequest(request, id,
> > g_eventQueue_ptr->getTime());
> >      bool found = insertRequest(srequest);
> > -    if (!found)
> > +    if (!found) {
> >        if (request.type == RubyRequestType_Locked_Write) {
> >          // NOTE: it is OK to check the locked flag here as the
> > mandatory queue will be checked first
> >          // ensuring that nothing comes between checking the flag
> > and servicing the store
> > @@ -423,6 +436,10 @@
> >
> >      // TODO: issue hardware prefetches here
> >      return id;
> > +    }
> > +    else {
> > +      assert(0);
> > +    }
> >    }
> >    else {
> >      return -1;
> > diff --git a/src/mem/ruby/system/Sequencer.hh
> > b/src/mem/ruby/system/Sequencer.hh
> > --- a/src/mem/ruby/system/Sequencer.hh
> > +++ b/src/mem/ruby/system/Sequencer.hh
> > @@ -84,7 +84,7 @@
> >
> >    // called by Tester or Simics
> >    int64_t makeRequest(const RubyRequest & request);
> > -  bool isReady(const RubyRequest& request);
> > +  bool isReady(const RubyRequest& request, bool dont_set = false);
> >    bool empty() const;
> >
> >    void print(ostream& out) const;
> > @@ -125,7 +125,7 @@
> >    // Global outstanding request count, across all request tables
> >    int m_outstanding_count;
> >    bool m_deadlock_check_scheduled;
> > -  int m_servicing_atomic;
> > +  unsigned m_servicing_atomic;
> >    int m_atomics_counter;
> >  };
> >
> > diff --git a/src/mem/slicc/symbols/StateMachine.cc
> > b/src/mem/slicc/symbols/StateMachine.cc
> > --- a/src/mem/slicc/symbols/StateMachine.cc
> > +++ b/src/mem/slicc/symbols/StateMachine.cc
> > @@ -862,7 +862,19 @@
> >                   assert(0); \n \
> >                 } \n \
> >               } \n \
> > -             } \n \
> > +             } \n \
> > +             else { \n \
> > +               if (servicing_atomic > 0) { \n \
> > +                 // reset \n \
> > +                  servicing_atomic = 0; \n \
> > +                  read_counter = 0; \n \
> > +                  started_receiving_writes = false; \n \
> > +                  locked_read_request1 = Address(-1); \n \
> > +                  locked_read_request2 = Address(-1); \n \
> > +                  locked_read_request3 = Address(-1); \n \
> > +                  locked_read_request4 = Address(-1); \n \
> > +                } \n \
> > +              } \n \
> >                 ";
> >      output.insert(pos, atomics_string);
> >      /*string foo = "// Cannot do anything with this transition, go
> > check next doable transition (mostly likely of next port)\n";
> > diff --git a/util/style.py b/util/style.py
> > --- a/util/style.py
> > +++ b/util/style.py
> > @@ -65,7 +65,7 @@
> >      if filename.startswith("SCons"):
> >          return True
> >
> > -    return False
> > +    return True
> >
> >  format_types = ( 'C', 'C++' )
> >  def format_file(filename):
> > @@ -77,11 +77,11 @@
> >  def checkwhite_line(line):
> >      match = lead.search(line)
> >      if match and match.group(1).find('\t') != -1:
> > -        return False
> > +        return True
> >
> >      match = trail.search(line)
> >      if match:
> > -        return False
> > +        return True
> >
> >      return True
> >
> > _______________________________________________
> > m5-dev mailing list
> > [email protected]
> > http://m5sim.org/mailman/listinfo/m5-dev
> >
>
>
> _______________________________________________
> m5-dev mailing list
> [email protected]
> http://m5sim.org/mailman/listinfo/m5-dev
>
_______________________________________________
m5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/m5-dev

Reply via email to