This is an automated email from the ASF dual-hosted git repository.
adebreceni pushed a commit to branch minifi-api-reduced
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git
The following commit(s) were added to refs/heads/minifi-api-reduced by this
push:
new 1cc35dcb4 Fix llamacpp proc/test
1cc35dcb4 is described below
commit 1cc35dcb4bcff2bf1e81f517b4be928a3722e690
Author: Adam Debreceni <[email protected]>
AuthorDate: Mon Jun 23 10:53:15 2025 +0200
Fix llamacpp proc/test
---
.../llamacpp/processors/RunLlamaCppInference.h | 9 ++++---
.../llamacpp/tests/RunLlamaCppInferenceTests.cpp | 28 +++++++++++++++-------
2 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/extensions/llamacpp/processors/RunLlamaCppInference.h
b/extensions/llamacpp/processors/RunLlamaCppInference.h
index a9fd90a02..13eafa4e6 100644
--- a/extensions/llamacpp/processors/RunLlamaCppInference.h
+++ b/extensions/llamacpp/processors/RunLlamaCppInference.h
@@ -20,7 +20,7 @@
#include <mutex>
#include <atomic>
-#include "core/Processor.h"
+#include "core/ProcessorImpl.h"
#include "core/logging/LoggerFactory.h"
#include "core/PropertyDefinitionBuilder.h"
#include "LlamaContext.h"
@@ -33,7 +33,7 @@ using LlamaContextProvider =
class RunLlamaCppInferenceMetrics : public core::ProcessorMetricsImpl {
public:
- explicit RunLlamaCppInferenceMetrics(const core::Processor& source_processor)
+ explicit RunLlamaCppInferenceMetrics(const core::ProcessorImpl&
source_processor)
: core::ProcessorMetricsImpl(source_processor) {
}
@@ -63,8 +63,8 @@ class RunLlamaCppInferenceMetrics : public
core::ProcessorMetricsImpl {
class RunLlamaCppInference : public core::ProcessorImpl {
public:
- explicit RunLlamaCppInference(std::string_view name, const
utils::Identifier& uuid = {}, LlamaContextProvider llama_context_provider = {})
- : core::ProcessorImpl(name, uuid),
+ explicit RunLlamaCppInference(core::ProcessorMetadata info,
LlamaContextProvider llama_context_provider = {})
+ : core::ProcessorImpl(info),
llama_context_provider_(std::move(llama_context_provider)) {
metrics_ =
gsl::make_not_null(std::make_shared<RunLlamaCppInferenceMetrics>(*this));
}
@@ -186,7 +186,6 @@ class RunLlamaCppInference : public core::ProcessorImpl {
private:
void increaseTokensIn(uint64_t token_count);
void increaseTokensOut(uint64_t token_count);
- std::shared_ptr<core::logging::Logger> logger_ =
core::logging::LoggerFactory<RunLlamaCppInference>::getLogger(uuid_);
std::string model_path_;
std::string system_prompt_;
diff --git a/extensions/llamacpp/tests/RunLlamaCppInferenceTests.cpp
b/extensions/llamacpp/tests/RunLlamaCppInferenceTests.cpp
index 1f8bf2902..797649f22 100644
--- a/extensions/llamacpp/tests/RunLlamaCppInferenceTests.cpp
+++ b/extensions/llamacpp/tests/RunLlamaCppInferenceTests.cpp
@@ -20,6 +20,7 @@
#include "RunLlamaCppInference.h"
#include "unit/SingleProcessorTestController.h"
#include "core/FlowFile.h"
+#include "unit/ProcessorUtils.h"
namespace org::apache::nifi::minifi::extensions::llamacpp::test {
@@ -78,7 +79,8 @@ TEST_CASE("Prompt is generated correctly with default
parameters") {
std::filesystem::path test_model_path;
processors::LlamaSamplerParams test_sampler_params;
processors::LlamaContextParams test_context_params;
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path& model_path, const
processors::LlamaSamplerParams& sampler_params, const
processors::LlamaContextParams& context_params) {
test_model_path = model_path;
test_sampler_params = sampler_params;
@@ -123,7 +125,8 @@ TEST_CASE("Prompt is generated correctly with custom
parameters") {
std::filesystem::path test_model_path;
processors::LlamaSamplerParams test_sampler_params;
processors::LlamaContextParams test_context_params;
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path& model_path, const
processors::LlamaSamplerParams& sampler_params, const
processors::LlamaContextParams& context_params) {
test_model_path = model_path;
test_sampler_params = sampler_params;
@@ -174,7 +177,8 @@ TEST_CASE("Prompt is generated correctly with custom
parameters") {
TEST_CASE("Empty flow file does not include input data in prompt") {
auto mock_llama_context = std::make_unique<MockLlamaContext>();
auto mock_llama_context_ptr = mock_llama_context.get();
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&,
const processors::LlamaContextParams&) {
return std::move(mock_llama_context);
}));
@@ -197,7 +201,8 @@ TEST_CASE("Empty flow file does not include input data in
prompt") {
}
TEST_CASE("Invalid values for optional double type properties throw
exception") {
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&,
const processors::LlamaContextParams&) {
return std::make_unique<MockLlamaContext>();
}));
@@ -225,7 +230,8 @@ TEST_CASE("Invalid values for optional double type
properties throw exception")
TEST_CASE("Top K property empty and invalid values are handled properly") {
std::optional<int32_t> test_top_k = 0;
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&
sampler_params, const processors::LlamaContextParams&) {
test_top_k = sampler_params.top_k;
return std::make_unique<MockLlamaContext>();
@@ -256,7 +262,8 @@ TEST_CASE("Error handling during generation and applying
template") {
mock_llama_context->setApplyTemplateFailure();
}
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&,
const processors::LlamaContextParams&) {
return std::move(mock_llama_context);
}));
@@ -273,7 +280,8 @@ TEST_CASE("Error handling during generation and applying
template") {
}
TEST_CASE("Route flow file to failure when prompt and input data is empty") {
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&,
const processors::LlamaContextParams&) {
return std::make_unique<MockLlamaContext>();
}));
@@ -292,7 +300,8 @@ TEST_CASE("Route flow file to failure when prompt and input
data is empty") {
TEST_CASE("System prompt is optional") {
auto mock_llama_context = std::make_unique<MockLlamaContext>();
auto mock_llama_context_ptr = mock_llama_context.get();
- minifi::test::SingleProcessorTestController
controller(std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ minifi::test::SingleProcessorTestController
controller(minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&,
const processors::LlamaContextParams&) {
return std::move(mock_llama_context);
}));
@@ -313,7 +322,8 @@ TEST_CASE("System prompt is optional") {
}
TEST_CASE("Test output metrics") {
- auto processor =
std::make_unique<processors::RunLlamaCppInference>("RunLlamaCppInference",
utils::Identifier(),
+ auto processor =
minifi::test::utils::make_custom_processor<processors::RunLlamaCppInference>(
+ core::ProcessorMetadata{utils::Identifier{}, "RunLlamaCppInference",
logging::LoggerFactory<processors::RunLlamaCppInference>::getLogger()},
[&](const std::filesystem::path&, const processors::LlamaSamplerParams&,
const processors::LlamaContextParams&) {
return std::make_unique<MockLlamaContext>();
});