martinzink commented on code in PR #1606:
URL: https://github.com/apache/nifi-minifi-cpp/pull/1606#discussion_r1293709416


##########
libminifi/test/unit/BackTraceTests.cpp:
##########
@@ -24,76 +24,60 @@
 #include "utils/BackTrace.h"
 #include "utils/Monitors.h"
 #include "utils/ThreadPool.h"
+#include "range/v3/algorithm/any_of.hpp"
 
 using namespace std::literals::chrono_literals;
 
-class WorkerNumberExecutions : public utils::AfterExecute<int> {
- public:
-  explicit WorkerNumberExecutions(int tasks)
-      : tasks(tasks) {
-  }
-
-  WorkerNumberExecutions(WorkerNumberExecutions && other) noexcept
-      : runs(other.runs),
-      tasks(other.tasks) {
-  }
-
-  bool isFinished(const int &result) override {
-    return result <= 0 || ++runs >= tasks;
-  }
-  bool isCancelled(const int& /*result*/) override {
-    return false;
-  }
-
-  std::chrono::steady_clock::duration wait_time() override {
-    return 50ms;
-  }
-
- protected:
-  int runs{0};
-  int tasks;
-};
-
 TEST_CASE("BT1", "[TPT1]") {
   const BackTrace trace = TraceResolver::getResolver().getBackTrace("BT1");
 #ifdef HAS_EXECINFO
-  REQUIRE(!trace.getTraces().empty());
+  CHECK(!trace.getTraces().empty());
 #endif
 }
 
-TEST_CASE("BT2", "[TPT2]") {
-  std::atomic<int> counter = 0;
-  utils::ThreadPool<int> pool(4);
-  pool.start();
-  std::this_thread::sleep_for(std::chrono::milliseconds(150));
-  for (int i = 0; i < 3; i++) {
-    std::unique_ptr<utils::AfterExecute<int>> after_execute = 
std::unique_ptr<utils::AfterExecute<int>>(new WorkerNumberExecutions(5));
-    utils::Worker<int> functor([&counter]() { return ++counter; }, "id", 
std::move(after_execute));
+void inner_function(std::atomic_flag& ready_to_check, std::atomic_flag& 
checking_done) {
+  ready_to_check.test_and_set();
+  ready_to_check.notify_all();
+  checking_done.wait(false);
+}
 
-    std::future<int> fut;
-    pool.execute(std::move(functor), fut);  // NOLINT(bugprone-use-after-move)
-  }
+void outer_function(std::atomic_flag& ready_to_check, std::atomic_flag& 
checking_done) {
+  inner_function(ready_to_check, checking_done);
+}
 
-  std::unique_ptr<utils::AfterExecute<int>> after_execute = 
std::unique_ptr<utils::AfterExecute<int>>(new WorkerNumberExecutions(5));
-  utils::Worker<int> functor([&counter]() { return ++counter; }, "id", 
std::move(after_execute));
 
-  std::future<int> fut;
-  pool.execute(std::move(functor), fut);  // NOLINT(bugprone-use-after-move)
+TEST_CASE("BT2", "[TPT2]") {
+  std::atomic_flag ready_for_checking;
+  std::atomic_flag done_with_checking;
 
-  std::vector<BackTrace> traces = pool.getTraces();
-  for (const auto &trace : traces) {
-    std::cerr << "Thread name: " << trace.getName() << std::endl;
-    const auto &trace_strings = trace.getTraces();
-#ifdef HAS_EXECINFO
-    REQUIRE(trace_strings.size() > 2);
-    for (const auto& trace_string : trace_strings) {
-      std::cerr << " - " << trace_string << std::endl;
-    }
-    if (trace_strings.at(0).find("sleep_for") != std::string::npos) {
-      REQUIRE(trace_strings.at(1).find("counterFunction") != 
std::string::npos);
+  constexpr std::string_view thread_pool_name = "Winnie the pool";
+  constexpr size_t number_of_worker_threads = 3;
+  utils::ThreadPool pool(number_of_worker_threads, nullptr, 
thread_pool_name.data());
+  utils::Worker worker([&]() -> utils::TaskRescheduleInfo {
+    outer_function(ready_for_checking, done_with_checking);
+    return utils::TaskRescheduleInfo::Done();
+  }, "id");
+  std::future<utils::TaskRescheduleInfo> future;
+  pool.execute(std::move(worker), future);
+
+  pool.start();
+  {
+    ready_for_checking.wait(false);
+    std::vector<BackTrace> traces = pool.getTraces();
+    CHECK(traces.size() <= number_of_worker_threads);

Review Comment:
   The size of the traces depend on the number of threads running. When we 
start the pool (configured with 3 threads) it starts 3 threads. (but only one 
will have a task to do).
   It might not actually start all of them before we get to this check, we only 
wait until the one with the task changes the ready_for_checking. So we can be 
sure that there will be at least one thread running with traces. But if the 
system has spare threads its more likely that we have three traces when we do 
this CHECK.
   
   We could schedule multiple tasks, I didnt want to dwelve into this more than 
whats neccessary in this PR because it focused on the threadpool. Previously it 
did schedule more tasks but the checking was not synced and the check was 
really weak. (Looking at it realisticly it didnt really check anything before 
this PR).
   
   If you would like I could expand the test that it schedules multiple tasks 
(it would increase the complexity of the test a bit due to additional sync-s)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to