mapleFU commented on code in PR #1657:
URL: https://github.com/apache/kvrocks/pull/1657#discussion_r1290159293
##########
src/commands/cmd_pubsub.cc:
##########
@@ -27,20 +27,48 @@ namespace redis {
class CommandPublish : public Commander {
public:
- // mark is_write as false here because slave should be able to execute
publish command
Status Execute(Server *svr, Connection *conn, std::string *output) override {
if (!svr->IsSlave()) {
- // Compromise: can't replicate message to sub-replicas in a
cascading-like structure.
- // Replication relies on WAL seq, increase the seq on slave will break
the replication, hence the compromise
+ // Compromise: can't replicate a message to sub-replicas in a
cascading-like structure.
+ // Replication relies on WAL seq; increasing the seq on a replica will
break the replication process,
+ // hence the compromise solution
redis::PubSub pubsub_db(svr->storage);
+
auto s = pubsub_db.Publish(args_[1], args_[2]);
if (!s.ok()) {
return {Status::RedisExecErr, s.ToString()};
}
}
int receivers = svr->PublishMessage(args_[1], args_[2]);
+
*output = redis::Integer(receivers);
+
+ return Status::OK();
+ }
+};
+
+class CommandMPublish : public Commander {
+ public:
+ Status Execute(Server *svr, Connection *conn, std::string *output) override {
+ int total_receivers = 0;
+
+ for (size_t i = 2; i < args_.size(); i++) {
+ if (!svr->IsSlave()) {
+ redis::PubSub pubsub_db(svr->storage);
+
+ auto s = pubsub_db.Publish(args_[1], args_[i]);
+ if (!s.ok()) {
+ return {Status::RedisExecErr, s.ToString()};
+ }
+ }
+
+ int receivers = svr->PublishMessage(args_[1], args_[i]);
+ total_receivers += receivers;
+ }
+
+ *output = redis::Integer(total_receivers);
Review Comment:
Oh this LGTM. Seems this is not stable. Maybe we can mark "mpublish" as
"experimental" or something, and make it able to change the syntax of it.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]