Tiago Muck has submitted this change. (
https://gem5-review.googlesource.com/c/public/gem5/+/41864 )
(
3 is the latest approved patch-set.
No files were changed between the latest approved patch-set and the
submitted one.
)Change subject: mem-ruby: add priorities in SimpleNetwork routing
......................................................................
mem-ruby: add priorities in SimpleNetwork routing
Configurations can specify a routing priority for message buffers.
This priority is used by SimpleNetwork when checking for messages
in the routers' input ports. Higher priority ports are always checked
first.
JIRA: https://gem5.atlassian.net/browse/GEM5-920
Change-Id: I7e2b35e2cae63086a76def1145f9b4b56220a2ba
Signed-off-by: Tiago Mück <[email protected]>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/41864
Reviewed-by: Meatboy 106 <[email protected]>
Maintainer: Jason Lowe-Power <[email protected]>
Tested-by: kokoro <[email protected]>
---
M src/mem/ruby/network/MessageBuffer.cc
M src/mem/ruby/network/MessageBuffer.hh
M src/mem/ruby/network/MessageBuffer.py
M src/mem/ruby/network/simple/PerfectSwitch.cc
M src/mem/ruby/network/simple/PerfectSwitch.hh
5 files changed, 80 insertions(+), 13 deletions(-)
Approvals:
Meatboy 106: Looks good to me, approved
Jason Lowe-Power: Looks good to me, approved
kokoro: Regressions pass
diff --git a/src/mem/ruby/network/MessageBuffer.cc
b/src/mem/ruby/network/MessageBuffer.cc
index a891d5a..9a65009 100644
--- a/src/mem/ruby/network/MessageBuffer.cc
+++ b/src/mem/ruby/network/MessageBuffer.cc
@@ -65,6 +65,7 @@
m_last_arrival_time(0), m_strict_fifo(p.ordered),
m_randomization(p.randomization),
m_allow_zero_latency(p.allow_zero_latency),
+ m_routing_priority(p.routing_priority),
ADD_STAT(m_not_avail_count, statistics::units::Count::get(),
"Number of times this buffer did not have N slots available"),
ADD_STAT(m_msg_count, statistics::units::Count::get(),
diff --git a/src/mem/ruby/network/MessageBuffer.hh
b/src/mem/ruby/network/MessageBuffer.hh
index 9cabbaf..2795993 100644
--- a/src/mem/ruby/network/MessageBuffer.hh
+++ b/src/mem/ruby/network/MessageBuffer.hh
@@ -158,6 +158,9 @@
void setIncomingLink(int link_id) { m_input_link_id = link_id; }
void setVnet(int net) { m_vnet_id = net; }
+ int getIncomingLink() const { return m_input_link_id; }
+ int getVnet() const { return m_vnet_id; }
+
Port &
getPort(const std::string &, PortID idx=InvalidPortID) override
{
@@ -187,6 +190,8 @@
return functionalAccess(pkt, true, &mask) == 1;
}
+ int routingPriority() const { return m_routing_priority; }
+
private:
void reanalyzeList(std::list<MsgPtr> &, Tick);
@@ -270,6 +275,8 @@
const MessageRandomization m_randomization;
const bool m_allow_zero_latency;
+ const int m_routing_priority;
+
int m_input_link_id;
int m_vnet_id;
diff --git a/src/mem/ruby/network/MessageBuffer.py
b/src/mem/ruby/network/MessageBuffer.py
index 80dc872..b776196 100644
--- a/src/mem/ruby/network/MessageBuffer.py
+++ b/src/mem/ruby/network/MessageBuffer.py
@@ -70,3 +70,6 @@
max_dequeue_rate = Param.Unsigned(0, "Maximum number of messages that
can \
be dequeued per cycle \
(0 allows dequeueing all ready
messages)")
+ routing_priority = Param.Int(0, "Buffer priority when messages are \
+ consumed by the network. Smaller
value \
+ means higher priority")
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc
b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 665fd0f..74d78e3 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -88,11 +88,37 @@
in[i]->setConsumer(this);
in[i]->setIncomingLink(port);
in[i]->setVnet(i);
+ updatePriorityGroups(i, in[i]);
}
}
}
void
+PerfectSwitch::updatePriorityGroups(int vnet, MessageBuffer* in_buf)
+{
+ while (m_in_prio.size() <= vnet) {
+ m_in_prio.emplace_back();
+ m_in_prio_groups.emplace_back();
+ }
+
+ m_in_prio[vnet].push_back(in_buf);
+
+ std::sort(m_in_prio[vnet].begin(), m_in_prio[vnet].end(),
+ [](const MessageBuffer* i, const MessageBuffer* j)
+ { return i->routingPriority() < j->routingPriority(); });
+
+ // reset groups
+ m_in_prio_groups[vnet].clear();
+ int cur_prio = m_in_prio[vnet].front()->routingPriority();
+ m_in_prio_groups[vnet].emplace_back();
+ for (auto buf : m_in_prio[vnet]) {
+ if (buf->routingPriority() != cur_prio)
+ m_in_prio_groups[vnet].emplace_back();
+ m_in_prio_groups[vnet].back().push_back(buf);
+ }
+}
+
+void
PerfectSwitch::addOutPort(const std::vector<MessageBuffer*>& out,
const NetDest& routing_table_entry,
const PortDirection &dst_inport,
@@ -126,12 +152,15 @@
void
PerfectSwitch::operateVnet(int vnet)
{
- if (m_pending_message_count[vnet] > 0) {
+ if (m_pending_message_count[vnet] == 0)
+ return;
+
+ for (auto &in : m_in_prio_groups[vnet]) {
// first check the port with the oldest message
unsigned start_in_port = 0;
Tick lowest_tick = MaxTick;
- for (int i = 0; i < m_in.size(); ++i) {
- MessageBuffer *buffer = inBuffer(i, vnet);
+ for (int i = 0; i < in.size(); ++i) {
+ MessageBuffer *buffer = in[i];
if (buffer) {
Tick ready_time = buffer->readyTime();
if (ready_time < lowest_tick){
@@ -141,21 +170,20 @@
}
}
DPRINTF(RubyNetwork, "vnet %d: %d pending msgs. "
- "Checking port %d first\n",
+ "Checking port %d first\n",
vnet, m_pending_message_count[vnet], start_in_port);
// check all ports starting with the one with the oldest message
- for (int i = 0; i < m_in.size(); ++i) {
- int in_port = (i + start_in_port) % m_in.size();
- MessageBuffer *buffer = inBuffer(in_port, vnet);
+ for (int i = 0; i < in.size(); ++i) {
+ int in_port = (i + start_in_port) % in.size();
+ MessageBuffer *buffer = in[in_port];
if (buffer)
- operateMessageBuffer(buffer, in_port, vnet);
+ operateMessageBuffer(buffer, vnet);
}
}
}
void
-PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
- int vnet)
+PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int vnet)
{
MsgPtr msg_ptr;
Message *net_msg_ptr = NULL;
@@ -166,7 +194,7 @@
Tick current_time = m_switch->clockEdge();
while (buffer->isReady(current_time)) {
- DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+ DPRINTF(RubyNetwork, "incoming: %d\n", buffer->getIncomingLink());
// Peek at message
msg_ptr = buffer->peekMsgPtr();
@@ -237,7 +265,7 @@
// Enqeue msg
DPRINTF(RubyNetwork, "Enqueuing net msg from "
"inport[%d][%d] to outport [%d][%d].\n",
- incoming, vnet, outgoing, vnet);
+ buffer->getIncomingLink(), vnet, outgoing, vnet);
out_port.buffers[vnet]->enqueue(msg_ptr, current_time,
out_port.latency);
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh
b/src/mem/ruby/network/simple/PerfectSwitch.hh
index 446ae83..589bca1 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -99,7 +99,7 @@
PerfectSwitch& operator=(const PerfectSwitch& obj);
void operateVnet(int vnet);
- void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
+ void operateMessageBuffer(MessageBuffer *b, int vnet);
const SwitchID m_switch_id;
Switch * const m_switch;
@@ -115,6 +115,13 @@
};
std::vector<OutputPort> m_out;
+ // input ports ordered by priority; indexed by vnet first
+ std::vector<std::vector<MessageBuffer*> > m_in_prio;
+ // input ports grouped by priority; indexed by vnet,prio_lv
+ std::vector<std::vector<std::vector<MessageBuffer*>>> m_in_prio_groups;
+
+ void updatePriorityGroups(int vnet, MessageBuffer* buf);
+
uint32_t m_virtual_networks;
int m_wakeups_wo_switch;
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/41864
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I7e2b35e2cae63086a76def1145f9b4b56220a2ba
Gerrit-Change-Number: 41864
Gerrit-PatchSet: 7
Gerrit-Owner: Tiago Muck <[email protected]>
Gerrit-Reviewer: Jason Lowe-Power <[email protected]>
Gerrit-Reviewer: Jason Lowe-Power <[email protected]>
Gerrit-Reviewer: Meatboy 106 <[email protected]>
Gerrit-Reviewer: Tiago Muck <[email protected]>
Gerrit-Reviewer: kokoro <[email protected]>
Gerrit-MessageType: merged
_______________________________________________
gem5-dev mailing list -- [email protected]
To unsubscribe send an email to [email protected]
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s