Shivani Parekh has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/33523 )
Change subject: mem-ruby: Update master/slave variables
......................................................................
mem-ruby: Update master/slave variables
master -> mem_side, slave -> cpu_side
update comments
Change-Id: I21ace53f697c4e12d1d7e8d5cbd6fa68017a7f74
---
M src/mem/ruby/network/MessageBuffer.py
M src/mem/ruby/network/Network.py
M src/mem/ruby/slicc_interface/AbstractController.hh
M src/mem/ruby/system/DMASequencer.cc
M src/mem/ruby/system/GPUCoalescer.cc
M src/mem/ruby/system/RubyPort.cc
M src/mem/ruby/system/RubyPort.hh
M src/mem/ruby/system/RubySystem.cc
M src/mem/ruby/system/RubySystem.hh
M src/mem/ruby/system/Sequencer.py
10 files changed, 106 insertions(+), 92 deletions(-)
diff --git a/src/mem/ruby/network/MessageBuffer.py
b/src/mem/ruby/network/MessageBuffer.py
index c796960..712af5f 100644
--- a/src/mem/ruby/network/MessageBuffer.py
+++ b/src/mem/ruby/network/MessageBuffer.py
@@ -40,5 +40,7 @@
random delays if RubySystem \
randomization flag is True)")
- master = RequestPort("Master port to MessageBuffer receiver")
- slave = ResponsePort("Slave port from MessageBuffer sender")
+ out_port = RequestPort("Request port to MessageBuffer receiver")
+ master = DeprecatedParam(out_port, '`master` is now called `out_port`')
+ in_port = ResponsePort("Response port from MessageBuffer sender")
+ slave = DeprecatedParam(in_port, '`slave` is now called `in_port`')
\ No newline at end of file
diff --git a/src/mem/ruby/network/Network.py
b/src/mem/ruby/network/Network.py
index 5acad60..cd5625a 100644
--- a/src/mem/ruby/network/Network.py
+++ b/src/mem/ruby/network/Network.py
@@ -49,5 +49,7 @@
ext_links = VectorParam.BasicExtLink("Links to external nodes")
int_links = VectorParam.BasicIntLink("Links between internal nodes")
- slave = VectorSlavePort("CPU slave port")
- master = VectorMasterPort("CPU master port")
+ in_port = VectorSlavePort("CPU slave port")
+ slave = DeprecatedParam(in_port, '`slave` is now called `in_port`')
+ out_port = VectorMasterPort("CPU master port")
+ master = DeprecatedParam(out_port, '`master` is now called `out_port`')
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh
b/src/mem/ruby/slicc_interface/AbstractController.hh
index daa52da..1d7e5a7 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -237,7 +237,7 @@
void recvReqRetry();
};
- /* Master port to the memory controller. */
+ /* Request port to the memory controller. */
MemoryPort memoryPort;
// State that is stored in packets sent to the memory controller.
diff --git a/src/mem/ruby/system/DMASequencer.cc
b/src/mem/ruby/system/DMASequencer.cc
index bad49c9..80cbc4c 100644
--- a/src/mem/ruby/system/DMASequencer.cc
+++ b/src/mem/ruby/system/DMASequencer.cc
@@ -57,7 +57,7 @@
RubyPort::init();
m_data_block_mask = mask(RubySystem::getBlockSizeBits());
- for (const auto &s_port : slave_ports)
+ for (const auto &s_port : cpu_side_ports)
s_port->sendRangeChange();
}
diff --git a/src/mem/ruby/system/GPUCoalescer.cc
b/src/mem/ruby/system/GPUCoalescer.cc
index 80bc19a..ce1f149 100644
--- a/src/mem/ruby/system/GPUCoalescer.cc
+++ b/src/mem/ruby/system/GPUCoalescer.cc
@@ -669,7 +669,7 @@
// back the requesting CU when we receive write
// complete callbacks for all issued Ruby requests of this
// instruction.
- RubyPort::MemSlavePort* mem_slave_port = ss->port;
+ RubyPort::MemSlavePort* mem_response_port = ss->port;
GPUDynInstPtr gpuDynInst = nullptr;
@@ -686,7 +686,8 @@
}
PendingWriteInst& inst = pendingWriteInsts[seqNum];
- inst.addPendingReq(mem_slave_port, gpuDynInst,
m_usingRubyTester);
+ inst.addPendingReq(mem_response_port, gpuDynInst,
+ m_usingRubyTester);
}
return true;
diff --git a/src/mem/ruby/system/RubyPort.cc
b/src/mem/ruby/system/RubyPort.cc
index 4510e3a..fa13cf4 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -55,27 +55,28 @@
: ClockedObject(p), m_ruby_system(p->ruby_system),
m_version(p->version),
m_controller(NULL), m_mandatory_q_ptr(NULL),
m_usingRubyTester(p->using_ruby_tester), system(p->system),
- pioMasterPort(csprintf("%s.pio-master-port", name()), this),
- pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
- memMasterPort(csprintf("%s.mem-master-port", name()), this),
- memSlavePort(csprintf("%s-mem-slave-port", name()), this,
+ pioRequestPort(csprintf("%s.pio-request-port", name()), this),
+ pioResponsePort(csprintf("%s.pio-response-port", name()), this),
+ memRequestPort(csprintf("%s.mem-request-port", name()), this),
+ memResponsePort(csprintf("%s-mem-response-port", name()), this,
p->ruby_system->getAccessBackingStore(), -1,
p->no_retry_on_stall),
- gotAddrRanges(p->port_master_connection_count),
+ gotAddrRanges(p->port_requestor_connection_count),
m_isCPUSequencer(p->is_cpu_sequencer)
{
assert(m_version != -1);
- // create the slave ports based on the number of connected ports
- for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
- slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d",
name(),
- i), this, p->ruby_system->getAccessBackingStore(),
+ // create the cpu_side ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_responder_connection_count; ++i) {
+ cpu_side_ports.push_back(new MemSlavePort(csprintf
+ ("%s.cpu_side%d", name(), i), this,
+ p->ruby_system->getAccessBackingStore(),
i, p->no_retry_on_stall));
}
- // create the master ports based on the number of connected ports
- for (size_t i = 0; i < p->port_master_connection_count; ++i) {
- master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
+ // create the mem_side ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_requestor_connection_count; ++i) {
+ mem_side_ports.push_back(new
PioMasterPort(csprintf("%s.mem_side%d",
name(), i), this));
}
}
@@ -90,30 +91,30 @@
Port &
RubyPort::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "mem_master_port") {
- return memMasterPort;
- } else if (if_name == "pio_master_port") {
- return pioMasterPort;
- } else if (if_name == "mem_slave_port") {
- return memSlavePort;
- } else if (if_name == "pio_slave_port") {
- return pioSlavePort;
- } else if (if_name == "master") {
+ if (if_name == "mem_request_port") {
+ return memRequestPort;
+ } else if (if_name == "pio_request_port") {
+ return pioRequestPort;
+ } else if (if_name == "mem_response_port") {
+ return memResponsePort;
+ } else if (if_name == "pio_response_port") {
+ return pioResponsePort;
+ } else if (if_name == "mem_side") {
// used by the x86 CPUs to connect the interrupt PIO and interrupt
- // slave port
- if (idx >= static_cast<PortID>(master_ports.size())) {
- panic("RubyPort::getPort master: unknown index %d\n", idx);
+ // response port
+ if (idx >= static_cast<PortID>(mem_side_ports.size())) {
+ panic("RubyPort::getPort cpu_side: unknown index %d\n", idx);
}
- return *master_ports[idx];
- } else if (if_name == "slave") {
+ return *mem_side_ports[idx];
+ } else if (if_name == "cpu_side") {
// used by the CPUs to connect the caches to the interconnect, and
- // for the x86 case also the interrupt master
- if (idx >= static_cast<PortID>(slave_ports.size())) {
- panic("RubyPort::getPort slave: unknown index %d\n", idx);
+ // for the x86 case also the interrupt requestor
+ if (idx >= static_cast<PortID>(cpu_side_ports.size())) {
+ panic("RubyPort::getPort mem_side: unknown index %d\n", idx);
}
- return *slave_ports[idx];
+ return *cpu_side_ports[idx];
}
// pass it along to our super class
@@ -160,7 +161,7 @@
DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
// send next cycle
- rp->pioSlavePort.schedTimingResp(
+ rp->pioResponsePort.schedTimingResp(
pkt, curTick() + rp->m_ruby_system->clockPeriod());
return true;
}
@@ -194,14 +195,14 @@
{
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
- for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
- AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+ for (size_t i = 0; i < ruby_port->mem_side_ports.size(); ++i) {
+ AddrRangeList l = ruby_port->mem_side_ports[i]->getAddrRanges();
for (auto it = l.begin(); it != l.end(); ++it) {
if (it->contains(pkt->getAddr())) {
// generally it is not safe to assume success here as
// the port could be blocked
bool M5_VAR_USED success =
- ruby_port->master_ports[i]->sendTimingReq(pkt);
+ ruby_port->mem_side_ports[i]->sendTimingReq(pkt);
assert(success);
return true;
}
@@ -219,11 +220,11 @@
panic("Ruby supports atomic accesses only in noncaching mode\n");
}
- for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
- AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+ for (size_t i = 0; i < ruby_port->mem_side_ports.size(); ++i) {
+ AddrRangeList l = ruby_port->mem_side_ports[i]->getAddrRanges();
for (auto it = l.begin(); it != l.end(); ++it) {
if (it->contains(pkt->getAddr())) {
- return ruby_port->master_ports[i]->sendAtomic(pkt);
+ return ruby_port->mem_side_ports[i]->sendAtomic(pkt);
}
}
}
@@ -253,7 +254,7 @@
// pio port.
if (pkt->cmd != MemCmd::MemSyncReq) {
if (!isPhysMemAddress(pkt)) {
- assert(ruby_port->memMasterPort.isConnected());
+ assert(ruby_port->memRequestPort.isConnected());
DPRINTF(RubyPort, "Request address %#x assumed to be a "
"pio address\n", pkt->getAddr());
@@ -263,7 +264,7 @@
// send next cycle
RubySystem *rs = ruby_port->m_ruby_system;
- ruby_port->memMasterPort.schedTimingReq(pkt,
+ ruby_port->memRequestPort.schedTimingReq(pkt,
curTick() + rs->clockPeriod());
return true;
}
@@ -314,7 +315,7 @@
// pio port.
if (pkt->cmd != MemCmd::MemSyncReq) {
if (!isPhysMemAddress(pkt)) {
- assert(ruby_port->memMasterPort.isConnected());
+ assert(ruby_port->memRequestPort.isConnected());
DPRINTF(RubyPort, "Request address %#x assumed to be a "
"pio address\n", pkt->getAddr());
@@ -323,7 +324,7 @@
pkt->pushSenderState(new SenderState(this));
// send next cycle
- Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt);
+ Tick req_ticks = ruby_port->memRequestPort.sendAtomic(pkt);
return ruby_port->ticksToCycles(req_ticks);
}
@@ -373,8 +374,8 @@
// pio port.
if (!isPhysMemAddress(pkt)) {
DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n",
pkt->getAddr());
- assert(rp->pioMasterPort.isConnected());
- rp->pioMasterPort.sendFunctional(pkt);
+ assert(rp->pioRequestPort.isConnected());
+ rp->pioRequestPort.sendFunctional(pkt);
return;
}
@@ -450,8 +451,8 @@
RubyPort::trySendRetries()
{
//
- // If we had to stall the MemSlavePorts, wake them up because the
sequencer
- // likely has free resources now.
+ // If we had to stall the MemResponsePorts, wake them up because the
+ // sequencer likely has free resources now.
//
if (!retryList.empty()) {
// Record the current list of ports to retry on a temporary list
@@ -511,7 +512,7 @@
{
bool needsResponse = pkt->needsResponse();
- // Unless specified at configuraiton, all responses except failed SC
+ // Unless specified at configuration, all responses except failed SC
// and Flush operations access M5 physical memory.
bool accessPhysMem = access_backing_store;
@@ -588,9 +589,9 @@
AddrRangeList ranges;
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
- for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
+ for (size_t i = 0; i < ruby_port->mem_side_ports.size(); ++i) {
ranges.splice(ranges.begin(),
- ruby_port->master_ports[i]->getAddrRanges());
+ ruby_port->mem_side_ports[i]->getAddrRanges());
}
for (const auto M5_VAR_USED &r : ranges)
DPRINTF(RubyPort, "%s\n", r.to_string());
@@ -619,8 +620,9 @@
// Use a single packet to signal all snooping ports of the
invalidation.
// This assumes that snooping ports do NOT modify the packet/request
Packet pkt(request, MemCmd::InvalidateReq);
- for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p)
{
- // check if the connected master port is snooping
+ for (CpuPortIter p = cpu_side_ports.begin(); p != cpu_side_ports.end();
+ ++p) {
+ // check if the connected request port is snooping
if ((*p)->isSnooping()) {
// send as a snoop request
(*p)->sendTimingSnoopReq(&pkt);
@@ -634,7 +636,7 @@
RubyPort &r = static_cast<RubyPort &>(owner);
r.gotAddrRanges--;
if (r.gotAddrRanges == 0 && FullSystem) {
- r.pioSlavePort.sendRangeChange();
+ r.pioResponsePort.sendRangeChange();
}
}
@@ -643,7 +645,7 @@
RubyPort::functionalWrite(Packet *func_pkt)
{
int num_written = 0;
- for (auto port : slave_ports) {
+ for (auto port : cpu_side_ports) {
if (port->trySatisfyFunctional(func_pkt)) {
num_written += 1;
}
diff --git a/src/mem/ruby/system/RubyPort.hh
b/src/mem/ruby/system/RubyPort.hh
index 1e21090..2642540 100644
--- a/src/mem/ruby/system/RubyPort.hh
+++ b/src/mem/ruby/system/RubyPort.hh
@@ -178,11 +178,11 @@
* Called by the PIO port when receiving a timing response.
*
* @param pkt Response packet
- * @param master_port_id Port id of the PIO port
+ * @param request_port_id Port id of the PIO port
*
* @return Whether successfully sent
*/
- bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
+ bool recvTimingResp(PacketPtr pkt, PortID request_port_id);
RubySystem *m_ruby_system;
uint32_t m_version;
@@ -191,7 +191,7 @@
bool m_usingRubyTester;
System* system;
- std::vector<MemSlavePort *> slave_ports;
+ std::vector<MemSlavePort *> cpu_side_ports;
private:
bool onRetryList(MemSlavePort * port)
@@ -205,15 +205,15 @@
retryList.push_back(port);
}
- PioMasterPort pioMasterPort;
- PioSlavePort pioSlavePort;
- MemMasterPort memMasterPort;
- MemSlavePort memSlavePort;
+ PioMasterPort pioRequestPort;
+ PioSlavePort pioResponsePort;
+ MemMasterPort memRequestPort;
+ MemSlavePort memResponsePort;
unsigned int gotAddrRanges;
/** Vector of M5 Ports attached to this Ruby port. */
typedef std::vector<MemSlavePort *>::iterator CpuPortIter;
- std::vector<PioMasterPort *> master_ports;
+ std::vector<PioMasterPort *> mem_side_ports;
//
// Based on similar code in the M5 bus. Stores pointers to those ports
diff --git a/src/mem/ruby/system/RubySystem.cc
b/src/mem/ruby/system/RubySystem.cc
index c35ab02..9507d25 100644
--- a/src/mem/ruby/system/RubySystem.cc
+++ b/src/mem/ruby/system/RubySystem.cc
@@ -135,11 +135,11 @@
// AbstractControllers are registered in their constructor. This is
done
// in two steps: (1) Add all of the AbstractControllers. Since we don't
// have a mapping of MasterID to MachineID this is the easiest way to
- // filter out AbstractControllers from non-Ruby masters. (2) Go through
+ // filter out AbstractControllers from non-Ruby requestors. (2) Go
through
// the system's list of MasterIDs and add missing MasterIDs to network
0
// (the default).
for (auto& cntrl : m_abs_cntrl_vec) {
- MasterID mid = cntrl->getMasterId();
+ MasterID id = cntrl->getMasterId();
MachineID mach_id = cntrl->getMachineID();
// These are setup in Network constructor and should exist
@@ -148,16 +148,16 @@
MachineIDToString(mach_id).c_str());
auto network_id = machineToNetwork[mach_id];
- masterToNetwork.insert(std::make_pair(mid, network_id));
+ requestorToNetwork.insert(std::make_pair(id, network_id));
// Create helper vectors for each network to iterate over.
netCntrls[network_id].push_back(cntrl);
}
- // Default all other master IDs to network 0
- for (auto mid = 0; mid < params()->system->maxMasters(); ++mid) {
- if (!masterToNetwork.count(mid)) {
- masterToNetwork.insert(std::make_pair(mid, 0));
+ // Default all other requestor IDs to network 0
+ for (auto id = 0; id < params()->system->maxMasters(); ++id) {
+ if (!requestorToNetwork.count(id)) {
+ requestorToNetwork.insert(std::make_pair(id, 0));
}
}
}
@@ -491,9 +491,9 @@
unsigned int num_invalid = 0;
// Only send functional requests within the same network.
- assert(masterToNetwork.count(pkt->masterId()));
- int master_net_id = masterToNetwork[pkt->masterId()];
- assert(netCntrls.count(master_net_id));
+ assert(requestorToNetwork.count(pkt->masterId()));
+ int request_net_id = requestorToNetwork[pkt->masterId()];
+ assert(netCntrls.count(request_net_id));
AbstractController *ctrl_ro = nullptr;
AbstractController *ctrl_rw = nullptr;
@@ -501,7 +501,7 @@
// In this loop we count the number of controllers that have the given
// address in read only, read write and busy states.
- for (auto& cntrl : netCntrls[master_net_id]) {
+ for (auto& cntrl : netCntrls[request_net_id]) {
access_perm = cntrl-> getAccessPermission(line_address);
if (access_perm == AccessPermission_Read_Only){
num_ro++;
@@ -537,7 +537,7 @@
// The reason is because the Backing_Store memory could easily be
stale, if
// there are copies floating around the cache hierarchy, so you want
to read
// it only if it's not in the cache hierarchy at all.
- int num_controllers = netCntrls[master_net_id].size();
+ int num_controllers = netCntrls[request_net_id].size();
if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
DPRINTF(RubySystem, "only copy in Backing_Store memory, read from
it\n");
ctrl_backing_store->functionalRead(line_address, pkt);
@@ -573,7 +573,7 @@
DPRINTF(RubySystem, "Controllers functionalRead lookup "
"(num_maybe_stale=%d, num_busy = %d)\n",
num_maybe_stale, num_busy);
- for (auto& cntrl : netCntrls[master_net_id]) {
+ for (auto& cntrl : netCntrls[request_net_id]) {
if (cntrl->functionalReadBuffers(pkt))
return true;
}
@@ -605,11 +605,11 @@
uint32_t M5_VAR_USED num_functional_writes = 0;
// Only send functional requests within the same network.
- assert(masterToNetwork.count(pkt->masterId()));
- int master_net_id = masterToNetwork[pkt->masterId()];
- assert(netCntrls.count(master_net_id));
+ assert(requestorToNetwork.count(pkt->masterId()));
+ int request_net_id = requestorToNetwork[pkt->masterId()];
+ assert(netCntrls.count(request_net_id));
- for (auto& cntrl : netCntrls[master_net_id]) {
+ for (auto& cntrl : netCntrls[request_net_id]) {
num_functional_writes += cntrl->functionalWriteBuffers(pkt);
access_perm = cntrl->getAccessPermission(line_addr);
diff --git a/src/mem/ruby/system/RubySystem.hh
b/src/mem/ruby/system/RubySystem.hh
index d14b383..1cf1271 100644
--- a/src/mem/ruby/system/RubySystem.hh
+++ b/src/mem/ruby/system/RubySystem.hh
@@ -142,7 +142,7 @@
Cycles m_start_cycle;
std::unordered_map<MachineID, unsigned> machineToNetwork;
- std::unordered_map<MasterID, unsigned> masterToNetwork;
+ std::unordered_map<MasterID, unsigned> requestorToNetwork;
std::unordered_map<unsigned, std::vector<AbstractController*>>
netCntrls;
public:
diff --git a/src/mem/ruby/system/Sequencer.py
b/src/mem/ruby/system/Sequencer.py
index f97224d..18ebd0e 100644
--- a/src/mem/ruby/system/Sequencer.py
+++ b/src/mem/ruby/system/Sequencer.py
@@ -34,12 +34,19 @@
cxx_header = "mem/ruby/system/RubyPort.hh"
version = Param.Int(0, "")
- slave = VectorSlavePort("CPU slave port")
- master = VectorMasterPort("CPU master port")
- pio_master_port = RequestPort("Ruby mem master port")
- mem_master_port = RequestPort("Ruby mem master port")
- pio_slave_port = ResponsePort("Ruby pio slave port")
- mem_slave_port = ResponsePort("Ruby memory port")
+ responder = VectorSlavePort("CPU response port")
+ slave = DeprecatedParam(responder, '`slave` is now called
`responder`')
+ requestor = VectorMasterPort("CPU request port")
+ master = DeprecatedParam(requestor, '`master` is now called
`requestor`')
+ pio_request_port = RequestPort("Ruby pio request port")
+ pio_master_port = DeprecatedParam(pio_request_port,
+ '`pio_master_port` is now called
`pio_request_port`')
+ mem_request_port = RequestPort("Ruby mem request port")
+ mem_master_port = DeprecatedParam(mem_request_port,
+ '`mem_master_port` is now called
`mem_request_port`')
+ pio_response_port = ResponsePort("Ruby pio response port")
+ pio_slave_port = DeprecatedParam(pio_response_port,
+ '`pio_slave_port` is now called
`pio_response_port`')
using_ruby_tester = Param.Bool(False, "")
no_retry_on_stall = Param.Bool(False, "")
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/33523
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I21ace53f697c4e12d1d7e8d5cbd6fa68017a7f74
Gerrit-Change-Number: 33523
Gerrit-PatchSet: 1
Gerrit-Owner: Shivani Parekh <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- [email protected]
To unsubscribe send an email to [email protected]
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s