git-hulk commented on code in PR #1855:
URL: https://github.com/apache/kvrocks/pull/1855#discussion_r1375395364
##########
src/server/server.cc:
##########
@@ -1680,6 +1677,64 @@ void Server::AdjustOpenFilesLimit() {
}
}
+void Server::AdjustWorkerThreads() {
+ auto new_worker_threads = static_cast<size_t>(config_->workers);
+ if (new_worker_threads == worker_threads_.size()) {
+ return;
+ }
+ size_t delta = 0;
+ if (new_worker_threads > worker_threads_.size()) {
+ delta = new_worker_threads - worker_threads_.size();
+ increaseWorkerThreads(delta);
+ LOG(INFO) << "[server] Increase worker threads to " << new_worker_threads;
+ return;
+ }
+
+ delta = worker_threads_.size() - new_worker_threads;
+ LOG(INFO) << "[server] Decrease worker threads to " << new_worker_threads;
+ decreaseWorkerThreads(delta);
+}
+
+void Server::increaseWorkerThreads(size_t delta) {
+ std::vector<std::unique_ptr<WorkerThread>> new_threads;
+ for (size_t i = 0; i < delta; i++) {
+ auto worker = std::make_unique<Worker>(this, config_);
+ auto worker_thread = std::make_unique<WorkerThread>(std::move(worker));
+ worker_thread->Start();
+ worker_threads_.emplace_back(std::move(worker_thread));
+ }
+}
+
+void Server::decreaseWorkerThreads(size_t delta) {
+ auto current_worker_threads = worker_threads_.size();
+ auto remain_worker_threads = current_worker_threads - delta;
+ for (size_t i = remain_worker_threads; i < current_worker_threads; i++) {
+ // Unix socket will be listening on the first worker,
+ // so it MUST remove workers from the end of the vector.
+ // Otherwise, the unix socket will be closed.
+ auto worker_thread = std::move(worker_threads_.back());
+ worker_threads_.pop_back();
+ // Migrate connections to other workers before stopping the worker,
+ // we use round-robin to choose the target worker here.
+ auto connections = worker_thread->GetWorker()->GetConnections();
+ for (const auto &iter : connections) {
+ auto target_worker = worker_threads_[iter.first %
remain_worker_threads]->GetWorker();
+ worker_thread->GetWorker()->MigrateConnection(target_worker,
iter.second);
+ }
+ worker_thread->Stop();
+ // Don't join the worker thread here, because it may join itself.
+ recycle_worker_threads_.push(std::move(worker_thread));
+ }
+}
+
+void Server::cleanupExitedWorkerThreads() {
+ std::unique_ptr<WorkerThread> worker_thread = nullptr;
Review Comment:
I guess it's fine since those threads are from decreaseWorkerThreads
##########
src/server/server.cc:
##########
@@ -1680,6 +1677,64 @@ void Server::AdjustOpenFilesLimit() {
}
}
+void Server::AdjustWorkerThreads() {
+ auto new_worker_threads = static_cast<size_t>(config_->workers);
+ if (new_worker_threads == worker_threads_.size()) {
+ return;
+ }
+ size_t delta = 0;
+ if (new_worker_threads > worker_threads_.size()) {
+ delta = new_worker_threads - worker_threads_.size();
+ increaseWorkerThreads(delta);
+ LOG(INFO) << "[server] Increase worker threads to " << new_worker_threads;
+ return;
+ }
+
+ delta = worker_threads_.size() - new_worker_threads;
+ LOG(INFO) << "[server] Decrease worker threads to " << new_worker_threads;
+ decreaseWorkerThreads(delta);
+}
+
+void Server::increaseWorkerThreads(size_t delta) {
+ std::vector<std::unique_ptr<WorkerThread>> new_threads;
+ for (size_t i = 0; i < delta; i++) {
+ auto worker = std::make_unique<Worker>(this, config_);
+ auto worker_thread = std::make_unique<WorkerThread>(std::move(worker));
+ worker_thread->Start();
+ worker_threads_.emplace_back(std::move(worker_thread));
+ }
+}
+
+void Server::decreaseWorkerThreads(size_t delta) {
+ auto current_worker_threads = worker_threads_.size();
+ auto remain_worker_threads = current_worker_threads - delta;
Review Comment:
@mapleFU Do you mean this?
```suggestion
DCHECK(current_worker_threads > delta);
auto remain_worker_threads = current_worker_threads - delta;
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]