Tiago Mück has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/41864 )
Change subject: mem-ruby: add priorities in SimpleNetwork routing
......................................................................
mem-ruby: add priorities in SimpleNetwork routing
Configurations can specify a routing priority for message buffers.
This priority is used by SimpleNetwork when checking for messages
in the routers' input ports. Higher priority ports are always checked
first.
JIRA: https://gem5.atlassian.net/browse/GEM5-920
Change-Id: I7e2b35e2cae63086a76def1145f9b4b56220a2ba
Signed-off-by: Tiago Mück <[email protected]>
---
M src/mem/ruby/network/MessageBuffer.cc
M src/mem/ruby/network/MessageBuffer.hh
M src/mem/ruby/network/MessageBuffer.py
M src/mem/ruby/network/simple/PerfectSwitch.cc
M src/mem/ruby/network/simple/PerfectSwitch.hh
5 files changed, 76 insertions(+), 26 deletions(-)
diff --git a/src/mem/ruby/network/MessageBuffer.cc
b/src/mem/ruby/network/MessageBuffer.cc
index 5c8bf59..bde4de7 100644
--- a/src/mem/ruby/network/MessageBuffer.cc
+++ b/src/mem/ruby/network/MessageBuffer.cc
@@ -59,6 +59,7 @@
m_last_arrival_time(0), m_strict_fifo(p.ordered),
m_randomization(p.randomization),
m_allow_zero_latency(p.allow_zero_latency),
+ m_routing_priority(p.routing_priority),
ADD_STAT(m_not_avail_count, "Number of times this buffer did not have "
"N slots available"),
ADD_STAT(m_buf_msgs, "Average number of messages in buffer"),
diff --git a/src/mem/ruby/network/MessageBuffer.hh
b/src/mem/ruby/network/MessageBuffer.hh
index d940dcb..8c6ceda 100644
--- a/src/mem/ruby/network/MessageBuffer.hh
+++ b/src/mem/ruby/network/MessageBuffer.hh
@@ -152,6 +152,9 @@
void setIncomingLink(int link_id) { m_input_link_id = link_id; }
void setVnet(int net) { m_vnet_id = net; }
+ int getIncomingLink() const { return m_input_link_id; }
+ int getVnet() const { return m_vnet_id; }
+
Port &
getPort(const std::string &, PortID idx=InvalidPortID) override
{
@@ -181,6 +184,8 @@
return functionalAccess(pkt, true, &mask) == 1;
}
+ int routingPriority() const { return m_routing_priority; }
+
private:
void reanalyzeList(std::list<MsgPtr> &, Tick);
@@ -264,6 +269,8 @@
const MessageRandomization m_randomization;
const bool m_allow_zero_latency;
+ const int m_routing_priority;
+
int m_input_link_id;
int m_vnet_id;
diff --git a/src/mem/ruby/network/MessageBuffer.py
b/src/mem/ruby/network/MessageBuffer.py
index cb7f02d..d0161d6 100644
--- a/src/mem/ruby/network/MessageBuffer.py
+++ b/src/mem/ruby/network/MessageBuffer.py
@@ -69,3 +69,6 @@
max_dequeue_rate = Param.Unsigned(0, "Maximum number of messages that
can \
be dequeued per cycle \
(0 allows dequeueing all ready
messages)")
+ routing_priority = Param.Int(0, "Buffer priority when messages are \
+ consumed by the network. Smaller
value \
+ means higher priority")
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc
b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 201d091..f7a4313 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -82,11 +82,40 @@
in[i]->setConsumer(this);
in[i]->setIncomingLink(port);
in[i]->setVnet(i);
+ updatePriorityGroups(i, in[i]);
}
}
}
void
+PerfectSwitch::updatePriorityGroups(int vnet, MessageBuffer* in_buf)
+{
+ while (m_in_prio.size() <= vnet) {
+ m_in_prio.emplace_back();
+ m_in_prio_groups.emplace_back();
+ }
+
+ m_in_prio[vnet].push_back(in_buf);
+
+ struct MessageBufferSort {
+ bool operator() (const MessageBuffer* i,
+ const MessageBuffer* j)
+ { return i->routingPriority() < j->routingPriority(); }
+ } sortObj;
+ std::sort(m_in_prio[vnet].begin(), m_in_prio[vnet].end(), sortObj);
+
+ // reset groups
+ m_in_prio_groups[vnet].clear();
+ int cur_prio = m_in_prio[vnet].front()->routingPriority();
+ m_in_prio_groups[vnet].emplace_back();
+ for (auto buf : m_in_prio[vnet]) {
+ if (buf->routingPriority() != cur_prio)
+ m_in_prio_groups[vnet].emplace_back();
+ m_in_prio_groups[vnet].back().push_back(buf);
+ }
+}
+
+void
PerfectSwitch::addOutPort(const std::vector<MessageBuffer*>& out,
const NetDest& routing_table_entry,
const PortDirection &dst_inport,
@@ -111,35 +140,38 @@
PerfectSwitch::operateVnet(int vnet)
{
if (m_pending_message_count[vnet] > 0) {
- // first check the port with the oldest message
- unsigned incoming = 0;
- Tick lowest_tick = MaxTick;
- for (int counter = 0; counter < m_in.size(); ++counter) {
- MessageBuffer *buffer = inBuffer(counter, vnet);
- if (buffer == nullptr)
- continue;
- if (buffer->readyTime() < lowest_tick){
- lowest_tick = buffer->readyTime();
- incoming = counter;
+ // iterate over priority levels for this vnet
+ for (auto &in : m_in_prio_groups[vnet]) {
+ // first check the port with the oldest message
+ unsigned incoming = 0;
+ Tick lowest_tick = MaxTick;
+ for (int counter = 0; counter < in.size(); ++counter) {
+ MessageBuffer *buffer = in[counter];
+ if (buffer == nullptr)
+ continue;
+ if (buffer->readyTime() < lowest_tick){
+ lowest_tick = buffer->readyTime();
+ incoming = counter;
+ }
}
- }
- DPRINTF(RubyNetwork, "vnet %d: %d pending msgs. "
- "Checking port %d first\n",
- vnet, m_pending_message_count[vnet], incoming);
- // check all ports starting with the one with the oldest message
- for (int counter = 0; counter < m_in.size();
- ++counter, incoming = (incoming + 1) % m_in.size()) {
- MessageBuffer *buffer = inBuffer(incoming, vnet);
- if (buffer == nullptr)
- continue;
- operateMessageBuffer(buffer, incoming, vnet);
+ DPRINTF(RubyNetwork, "vnet %d: %d pending msgs. "
+ "Checking port %d (%s) first\n",
+ vnet, m_pending_message_count[vnet],
+ in[incoming]->getIncomingLink(), in[incoming]);
+ // check all ports starting with the one with the oldest
message
+ for (int counter = 0; counter < in.size();
+ ++counter, incoming = (incoming + 1) % in.size()) {
+ MessageBuffer *buffer = in[incoming];
+ if (buffer == nullptr)
+ continue;
+ operateMessageBuffer(buffer, vnet);
+ }
}
}
}
void
-PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
- int vnet)
+PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int vnet)
{
MsgPtr msg_ptr;
Message *net_msg_ptr = NULL;
@@ -149,7 +181,7 @@
Tick current_time = m_switch->clockEdge();
while (buffer->isReady(current_time)) {
- DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+ DPRINTF(RubyNetwork, "incoming: %d\n", buffer->getIncomingLink());
// Peek at message
msg_ptr = buffer->peekMsgPtr();
@@ -219,7 +251,7 @@
// Enqeue msg
DPRINTF(RubyNetwork, "Enqueuing net msg from "
"inport[%d][%d] to outport [%d][%d].\n",
- incoming, vnet, outgoing, vnet);
+ buffer->getIncomingLink(), vnet, outgoing, vnet);
m_out[outgoing][vnet]->enqueue(msg_ptr, current_time,
m_out_latencies[outgoing]);
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh
b/src/mem/ruby/network/simple/PerfectSwitch.hh
index d4f35e3..52af132 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -94,7 +94,7 @@
PerfectSwitch& operator=(const PerfectSwitch& obj);
void operateVnet(int vnet);
- void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
+ void operateMessageBuffer(MessageBuffer *b, int vnet);
const SwitchID m_switch_id;
Switch * const m_switch;
@@ -106,6 +106,13 @@
// latency for routing to each out port
std::vector<Tick> m_out_latencies;
+ // input ports ordered by priority; indexed by vnet first
+ std::vector<std::vector<MessageBuffer*> > m_in_prio;
+ // input ports grouped by priority; indexed by vnet,prio_lv
+ std::vector<std::vector<std::vector<MessageBuffer*>>> m_in_prio_groups;
+
+ void updatePriorityGroups(int vnet, MessageBuffer* buf);
+
uint32_t m_virtual_networks;
int m_wakeups_wo_switch;
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/41864
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I7e2b35e2cae63086a76def1145f9b4b56220a2ba
Gerrit-Change-Number: 41864
Gerrit-PatchSet: 1
Gerrit-Owner: Tiago Mück <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- [email protected]
To unsubscribe send an email to [email protected]
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s