# HG changeset patch
# User Brad Beckmann <[email protected]>
# Date 1261412980 28800
# Node ID 2b2eb8881591bd418466cabcd2c855d9ea31dd7f
# Parent a69fcc6caa1ba560ded1c5cf86d0c6f419467f04
ruby: Cleaned up static members in RubyPort
Removed static members in RubyPort and removed the ruby request unique id. This
patch
currently breaks atomic support and libruby. More changes are needed.
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/libruby.cc
--- a/src/mem/ruby/libruby.cc Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/libruby.cc Mon Dec 21 08:29:40 2009 -0800
@@ -123,7 +123,11 @@
RubyPortHandle libruby_get_port(const char* port_name, void
(*hit_callback)(int64_t access_id))
{
- return static_cast<RubyPortHandle>(RubySystem::getPort(port_name,
hit_callback));
+ //
+ // Fix me: Hit callback is now a non-static member function pointer of
+ // RubyPort and cannot be set to an arbitrary global function
+ //
+ return NULL;//static_cast<RubyPortHandle>(RubySystem::getPort(port_name,
hit_callback));
}
RubyPortHandle libruby_get_port_by_name(const char* port_name)
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/libruby.hh
--- a/src/mem/ruby/libruby.hh Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/libruby.hh Mon Dec 21 08:29:40 2009 -0800
@@ -4,6 +4,7 @@
#include <stdint.h>
#include <ostream>
+#include "mem/packet.hh"
typedef void* RubyPortHandle;
enum RubyRequestType {
@@ -31,11 +32,26 @@
uint64_t pc;
RubyRequestType type;
RubyAccessMode access_mode;
+ PacketPtr pkt;
unsigned proc_id;
RubyRequest() {}
- RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc,
RubyRequestType _type, RubyAccessMode _access_mode, unsigned _proc_id = 0)
- : paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type),
access_mode(_access_mode), proc_id(_proc_id)
+ RubyRequest(uint64_t _paddr,
+ uint8_t* _data,
+ int _len,
+ uint64_t _pc,
+ RubyRequestType _type,
+ RubyAccessMode _access_mode,
+ PacketPtr _pkt,
+ unsigned _proc_id = 0)
+ : paddr(_paddr),
+ data(_data),
+ len(_len),
+ pc(_pc),
+ type(_type),
+ access_mode(_access_mode),
+ pkt(_pkt),
+ proc_id(_proc_id)
{}
};
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/recorder/TraceRecord.cc
--- a/src/mem/ruby/recorder/TraceRecord.cc Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/recorder/TraceRecord.cc Mon Dec 21 08:29:40 2009 -0800
@@ -85,7 +85,8 @@
RubySystem::getBlockSizeBytes(),
m_pc_address.getAddress(),
m_type,
- RubyAccessMode_User);
+ RubyAccessMode_User,
+ NULL);
// Clear out the sequencer
while (!m_sequencer_ptr->empty()) {
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/DMASequencer.cc
--- a/src/mem/ruby/system/DMASequencer.cc Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/DMASequencer.cc Mon Dec 21 08:29:40 2009 -0800
@@ -24,7 +24,7 @@
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
}
-int64_t DMASequencer::makeRequest(const RubyRequest & request)
+bool DMASequencer::makeRequest(const RubyRequest & request)
{
uint64_t paddr = request.paddr;
uint8_t* data = request.data;
@@ -56,7 +56,7 @@
active_request.len = len;
active_request.bytes_completed = 0;
active_request.bytes_issued = 0;
- active_request.id = makeUniqueRequestID();
+ active_request.pkt = request.pkt;
SequencerMsg msg;
msg.getPhysicalAddress() = Address(paddr);
@@ -76,7 +76,7 @@
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
- return active_request.id;
+ return true;
}
void DMASequencer::issueNext()
@@ -84,14 +84,14 @@
assert(m_is_busy == true);
active_request.bytes_completed = active_request.bytes_issued;
if (active_request.len == active_request.bytes_completed) {
- ruby_hit_callback(active_request.id);
+ (*this.*m_hit_callback)(active_request.pkt);
m_is_busy = false;
return;
}
SequencerMsg msg;
msg.getPhysicalAddress() = Address(active_request.start_paddr +
- active_request.bytes_completed);
+ active_request.bytes_completed);
assert((msg.getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/DMASequencer.hh
--- a/src/mem/ruby/system/DMASequencer.hh Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/DMASequencer.hh Mon Dec 21 08:29:40 2009 -0800
@@ -15,7 +15,7 @@
int bytes_completed;
int bytes_issued;
uint8* data;
- int64_t id;
+ PacketPtr pkt;
};
class DMASequencer :public RubyPort {
@@ -24,7 +24,7 @@
DMASequencer(const Params *);
void init();
/* external interface */
- int64_t makeRequest(const RubyRequest & request);
+ bool makeRequest(const RubyRequest & request);
// void issueRequest(uint64_t paddr, uint8* data, int len, bool rw);
bool busy() { return m_is_busy;}
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/RubyPort.cc
--- a/src/mem/ruby/system/RubyPort.cc Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/RubyPort.cc Mon Dec 21 08:29:40 2009 -0800
@@ -2,10 +2,6 @@
#include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
-uint16_t RubyPort::m_num_ports = 0;
-
-RubyPort::RequestMap RubyPort::pending_cpu_requests;
-
RubyPort::RubyPort(const Params *p)
: MemObject(p)
{
@@ -17,12 +13,14 @@
m_controller = NULL;
m_mandatory_q_ptr = NULL;
- m_port_id = m_num_ports++;
m_request_cnt = 0;
- m_hit_callback = NULL;
pio_port = NULL;
physMemPort = NULL;
- assert(m_num_ports <= 2048); // see below for reason
+
+ //
+ // By default, use the ruby_hit_callback function for all responses
+ //
+ m_hit_callback = &RubyPort::ruby_hit_callback;
}
void RubyPort::init()
@@ -133,14 +131,13 @@
RubyPort::M5Port::recvTiming(PacketPtr pkt)
{
DPRINTF(MemoryAccess,
- "Timing access caught for address %#x\n",
+ "Timing access caught for address 0x%#x\n",
pkt->getAddr());
//dsm: based on SimpleTimingPort::recvTiming(pkt);
//
- // After checking for pio responses, the remainder of packets
- // received by ruby should only be M5 requests, which should never
+ // The received packets should only be M5 requests, which should never
// get nacked. There used to be code to hanldle nacks here, but
// I'm pretty sure it didn't work correctly with the drain code,
// so that would need to be fixed if we ever added it back.
@@ -156,17 +153,20 @@
}
//
+ // Save the port in the sender state object to be used later to
+ // route the response
+ //
+ pkt->senderState = new SenderState(this, pkt->senderState);
+
+ //
// Check for pio requests and directly send them to the dedicated
// pio port.
//
if (!isPhysMemAddress(pkt->getAddr())) {
assert(ruby_port->pio_port != NULL);
-
- //
- // Save the port in the sender state object to be used later to
- // route the response
- //
- pkt->senderState = new SenderState(this, pkt->senderState);
+ DPRINTF(MemoryAccess,
+ "Request for address 0x%#x is assumed to be a pio request\n",
+ pkt->getAddr());
return ruby_port->pio_port->sendTiming(pkt);
}
@@ -187,42 +187,38 @@
} else if (pkt->isWrite()) {
type = RubyRequestType_ST;
} else if (pkt->isReadWrite()) {
+ panic("Atomic read write packets are not yet supported by ruby\n");
// type = RubyRequestType_RMW;
+ } else {
+ panic("Unsupported ruby packet type\n");
}
- RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
- pkt->getSize(), pc, type,
- RubyAccessMode_Supervisor);
+ RubyRequest ruby_request(pkt->getAddr(),
+ pkt->getPtr<uint8_t>(),
+ pkt->getSize(),
+ pc,
+ type,
+ RubyAccessMode_Supervisor,
+ pkt);
// Submit the ruby request
- int64_t req_id = ruby_port->makeRequest(ruby_request);
- if (req_id == -1) {
- return false;
- }
-
- // Save the request for the callback
- RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
-
- return true;
+ return ruby_port->makeRequest(ruby_request);
}
void
-RubyPort::ruby_hit_callback(int64_t req_id)
+RubyPort::ruby_hit_callback(PacketPtr pkt)
{
//
- // Note: This single fuction can be called by cpu and dma ports,
- // as well as the functional port.
+ // Retrieve the request port from the sender State
//
- RequestMap::iterator i = pending_cpu_requests.find(req_id);
- if (i == pending_cpu_requests.end())
- panic("could not find pending request %d\n", req_id);
-
- RequestCookie *cookie = i->second;
- pending_cpu_requests.erase(i);
-
- Packet *pkt = cookie->pkt;
- M5Port *port = cookie->m5Port;
- delete cookie;
+ RubyPort::SenderState *senderState =
+ safe_cast<RubyPort::SenderState *>(pkt->senderState);
+ M5Port *port = senderState->port;
+ assert(port != NULL);
+
+ // pop the sender state from the packet
+ pkt->senderState = senderState->saved;
+ delete senderState;
port->hitCallback(pkt);
}
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/RubyPort.hh
--- a/src/mem/ruby/system/RubyPort.hh Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/RubyPort.hh Mon Dec 21 08:29:40 2009 -0800
@@ -76,11 +76,15 @@
Port *getPort(const std::string &if_name, int idx);
- virtual int64_t makeRequest(const RubyRequest & request) = 0;
+ virtual bool makeRequest(const RubyRequest & request) = 0;
- void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
- assert(m_hit_callback == NULL); // can't assign hit_callback twice
- m_hit_callback = hit_callback;
+ void registerHitCallback(void (RubyPort::*hit_callback)(PacketPtr pkt)) {
+ //
+ // By default, the ruby_hit_callback should be set. One should only
+ // try to reset the ruby_hit_callback message once.
+ //
+ assert(m_hit_callback == &RubyPort::ruby_hit_callback);
+ this->m_hit_callback = hit_callback;
}
//
@@ -91,29 +95,9 @@
protected:
const string m_name;
- void (*m_hit_callback)(int64_t);
- static void ruby_hit_callback(int64_t req_id);
-
- int64_t makeUniqueRequestID() {
- // The request ID is generated by combining the port ID with a request
count
- // so that request IDs can be formed concurrently by multiple threads.
- // IDs are formed as follows:
- //
- //
- // 0 PortID Request Count
- //
+----+---------------+-----------------------------------------------------+
- // | 63 | 62-48 | 47-0
|
- //
+----+---------------+-----------------------------------------------------+
- //
- //
- // This limits the system to a maximum of 2^11 == 2048 components
- // and 2^48 ~= 3x10^14 requests per component
-
- int64_t id = (static_cast<uint64_t>(m_port_id) << 48) | m_request_cnt;
- m_request_cnt++;
- // assert((m_request_cnt & (1<<48)) == 0);
- return id;
- }
+ void (RubyPort::*m_hit_callback)(PacketPtr);
+ void ruby_hit_callback(PacketPtr pkt);
+ void hit(PacketPtr pkt);
int m_version;
AbstractController* m_controller;
@@ -121,20 +105,8 @@
PioPort* pio_port;
private:
- static uint16_t m_num_ports;
- uint16_t m_port_id;
- uint64_t m_request_cnt;
-
- struct RequestCookie {
- Packet *pkt;
- M5Port *m5Port;
- RequestCookie(Packet *p, M5Port *m5p)
- : pkt(p), m5Port(m5p)
- {}
- };
-
- typedef std::map<int64_t, RequestCookie*> RequestMap;
- static RequestMap pending_cpu_requests;
+ uint16_t m_port_id;
+ uint64_t m_request_cnt;
M5Port* physMemPort;
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/Sequencer.cc
--- a/src/mem/ruby/system/Sequencer.cc Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/Sequencer.cc Mon Dec 21 08:29:40 2009 -0800
@@ -43,9 +43,6 @@
#include "params/RubySequencer.hh"
-//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
-
-#define LLSC_FAIL -2
ostream& operator<<(std::ostream& out, const SequencerRequest& obj) {
out << obj.ruby_request << flush;
return out;
@@ -335,7 +332,7 @@
}
}
- ruby_hit_callback(srequest->id);
+ (*this.*m_hit_callback)(ruby_request.pkt);
delete srequest;
}
@@ -388,34 +385,34 @@
}
-int64_t Sequencer::makeRequest(const RubyRequest & request)
+bool Sequencer::makeRequest(const RubyRequest & request)
{
assert(Address(request.paddr).getOffset() + request.len <=
RubySystem::getBlockSizeBytes());
if (isReady(request)) {
- int64_t id = makeUniqueRequestID();
- SequencerRequest *srequest = new SequencerRequest(request, id,
g_eventQueue_ptr->getTime());
+ SequencerRequest *srequest = new SequencerRequest(request,
+
g_eventQueue_ptr->getTime());
bool found = insertRequest(srequest);
if (!found)
if (request.type == RubyRequestType_Locked_Write) {
// NOTE: it is OK to check the locked flag here as the mandatory queue
will be checked first
// ensuring that nothing comes between checking the flag and servicing
the store
if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)),
m_version)) {
- return LLSC_FAIL;
+ return false;
}
else {
m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
}
}
- if (request.type == RubyRequestType_RMW_Write) {
- m_controller->started_writes();
- }
- issueRequest(request);
+ if (request.type == RubyRequestType_RMW_Write) {
+ m_controller->started_writes();
+ }
+ issueRequest(request);
// TODO: issue hardware prefetches here
- return id;
+ return true;
}
else {
- return -1;
+ return false;
}
}
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/Sequencer.hh
--- a/src/mem/ruby/system/Sequencer.hh Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/Sequencer.hh Mon Dec 21 08:29:40 2009 -0800
@@ -56,11 +56,12 @@
struct SequencerRequest {
RubyRequest ruby_request;
- int64_t id;
Time issue_time;
- SequencerRequest(const RubyRequest & _ruby_request, int64_t _id, Time
_issue_time)
- : ruby_request(_ruby_request), id(_id), issue_time(_issue_time)
+ SequencerRequest(const RubyRequest & _ruby_request,
+ Time _issue_time)
+ : ruby_request(_ruby_request),
+ issue_time(_issue_time)
{}
};
@@ -86,7 +87,7 @@
void readCallback(const Address& address, DataBlock& data);
// called by Tester or Simics
- int64_t makeRequest(const RubyRequest & request);
+ bool makeRequest(const RubyRequest & request);
bool isReady(const RubyRequest& request);
bool empty() const;
diff -r a69fcc6caa1b -r 2b2eb8881591 src/mem/ruby/system/System.hh
--- a/src/mem/ruby/system/System.hh Mon Dec 21 08:29:40 2009 -0800
+++ b/src/mem/ruby/system/System.hh Mon Dec 21 08:29:40 2009 -0800
@@ -106,7 +106,8 @@
// Public Methods
static RubyPort* getPortOnly(const string & name) {
assert(m_ports.count(name) == 1); return m_ports[name]; }
- static RubyPort* getPort(const string & name, void (*hit_callback)(int64_t))
{
+ static RubyPort* getPort(const string & name,
+ void (RubyPort::*hit_callback)(PacketPtr)) {
if (m_ports.count(name) != 1){
cerr << "Port " << name << " has " << m_ports.count(name) << "
instances" << endl;
}
_______________________________________________
m5-dev mailing list
[email protected]
http://m5sim.org/mailman/listinfo/m5-dev