This is an automated email from the ASF dual-hosted git repository.
chaokunyang pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/fory.git
The following commit(s) were added to refs/heads/main by this push:
new 7b94fe5a0 feat(c++ ): add cpp benchmark (#2943)
7b94fe5a0 is described below
commit 7b94fe5a0a12eca1e9711bb74c55a623149631e9
Author: Shawn Yang <[email protected]>
AuthorDate: Thu Nov 27 22:30:40 2025 +0800
feat(c++ ): add cpp benchmark (#2943)
## What does this PR do?
add cpp benchmarks
## Related issues
#2906
## Does this PR introduce any user-facing change?
<!--
If any user-facing interface changes, please [open an
issue](https://github.com/apache/fory/issues/new/choose) describing the
need to do so and update the document if necessary.
Delete section if not applicable.
-->
- [ ] Does this PR introduce any public API change?
- [ ] Does this PR introduce any binary protocol compatibility change?
## Benchmark
<!--
When the PR has an impact on performance (if you don't know whether the
PR will have an impact on performance, you can submit the PR first, and
if it will have impact on performance, the code reviewer will explain
it), be sure to attach a benchmark data here.
Delete section if not applicable.
-->
---
benchmarks/cpp_benchmark/CMakeLists.txt | 137 ++++++++
benchmarks/cpp_benchmark/README.md | 175 ++++++++++
benchmarks/cpp_benchmark/benchmark.cc | 363 +++++++++++++++++++++
benchmarks/cpp_benchmark/benchmark_report.py | 340 +++++++++++++++++++
benchmarks/cpp_benchmark/profile.sh | 175 ++++++++++
benchmarks/cpp_benchmark/run.sh | 131 ++++++++
.../src/main => benchmarks}/proto/bench.proto | 0
java/benchmark/pom.xml | 2 +-
8 files changed, 1322 insertions(+), 1 deletion(-)
diff --git a/benchmarks/cpp_benchmark/CMakeLists.txt
b/benchmarks/cpp_benchmark/CMakeLists.txt
new file mode 100644
index 000000000..aa34b66e9
--- /dev/null
+++ b/benchmarks/cpp_benchmark/CMakeLists.txt
@@ -0,0 +1,137 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+cmake_minimum_required(VERSION 3.16)
+
+project(fory_cpp_benchmark
+ VERSION 1.0.0
+ DESCRIPTION "C++ Benchmark comparing Fory and Protobuf serialization"
+ LANGUAGES CXX
+)
+
+# C++17 required for Fory
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+# Set default build type to Release for benchmarks
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
+endif()
+
+# =============================================================================
+# Dependencies via FetchContent
+# =============================================================================
+include(FetchContent)
+
+# Fory C++ library (local source) - this will also fetch Abseil
+FetchContent_Declare(
+ fory
+ SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../cpp"
+)
+FetchContent_MakeAvailable(fory)
+
+# Protobuf - fetch to ensure Abseil version compatibility
+# Using v25.5 which is compatible with Abseil 20240722.0
+set(protobuf_BUILD_TESTS OFF CACHE BOOL "" FORCE)
+set(protobuf_BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE)
+set(protobuf_ABSL_PROVIDER "package" CACHE STRING "" FORCE)
+set(protobuf_BUILD_PROTOBUF_BINARIES ON CACHE BOOL "" FORCE)
+set(protobuf_BUILD_PROTOC_BINARIES ON CACHE BOOL "" FORCE)
+FetchContent_Declare(
+ protobuf
+ GIT_REPOSITORY https://github.com/protocolbuffers/protobuf.git
+ GIT_TAG v25.5
+ GIT_SHALLOW TRUE
+)
+FetchContent_MakeAvailable(protobuf)
+
+# Google Benchmark
+FetchContent_Declare(
+ benchmark
+ GIT_REPOSITORY https://github.com/google/benchmark.git
+ GIT_TAG v1.9.1
+ GIT_SHALLOW TRUE
+)
+set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
+set(BENCHMARK_ENABLE_INSTALL OFF CACHE BOOL "" FORCE)
+FetchContent_MakeAvailable(benchmark)
+
+# =============================================================================
+# Proto file compilation
+# =============================================================================
+set(PROTO_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../proto")
+set(PROTO_FILE "${PROTO_DIR}/bench.proto")
+set(PROTO_OUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+# Create output directory
+file(MAKE_DIRECTORY ${PROTO_OUT_DIR})
+
+# Get protoc executable path
+set(PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
+
+# Generate protobuf sources using custom command
+set(PROTO_SRCS "${PROTO_OUT_DIR}/bench.pb.cc")
+set(PROTO_HDRS "${PROTO_OUT_DIR}/bench.pb.h")
+
+add_custom_command(
+ OUTPUT ${PROTO_SRCS} ${PROTO_HDRS}
+ COMMAND protobuf::protoc
+ --cpp_out=${PROTO_OUT_DIR}
+ --proto_path=${PROTO_DIR}
+ ${PROTO_FILE}
+ DEPENDS ${PROTO_FILE} protobuf::protoc
+ COMMENT "Generating protobuf sources from bench.proto"
+ VERBATIM
+)
+
+# Create a library for the generated proto files
+add_library(bench_proto STATIC ${PROTO_SRCS} ${PROTO_HDRS})
+target_include_directories(bench_proto PUBLIC ${PROTO_OUT_DIR})
+target_link_libraries(bench_proto PUBLIC protobuf::libprotobuf)
+
+# =============================================================================
+# Benchmark executable
+# =============================================================================
+add_executable(fory_benchmark
+ ${CMAKE_CURRENT_SOURCE_DIR}/benchmark.cc
+)
+
+target_include_directories(fory_benchmark PRIVATE
+ ${PROTO_OUT_DIR}
+)
+
+target_link_libraries(fory_benchmark PRIVATE
+ fory::serialization
+ bench_proto
+ benchmark::benchmark
+ benchmark::benchmark_main
+)
+
+# Compiler optimizations for benchmarks
+if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
+ target_compile_options(fory_benchmark PRIVATE -O3 -DNDEBUG)
+endif()
+
+# =============================================================================
+# Print configuration
+# =============================================================================
+message(STATUS "")
+message(STATUS "Fory C++ Benchmark Configuration:")
+message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
+message(STATUS " C++ standard: ${CMAKE_CXX_STANDARD}")
+message(STATUS "")
diff --git a/benchmarks/cpp_benchmark/README.md
b/benchmarks/cpp_benchmark/README.md
new file mode 100644
index 000000000..f2f89e27e
--- /dev/null
+++ b/benchmarks/cpp_benchmark/README.md
@@ -0,0 +1,175 @@
+# Fory C++ Benchmark
+
+This benchmark compares serialization/deserialization performance between
Apache Fory and Protocol Buffers in C++.
+
+## Prerequisites
+
+- CMake 3.16+
+- C++17 compatible compiler (GCC 8+, Clang 7+, MSVC 2019+)
+- Git (for fetching dependencies)
+
+Note: Protobuf is fetched automatically via CMake FetchContent, so no manual
installation is required.
+
+## Quick Start
+
+Run the complete benchmark pipeline (build, run, generate report):
+
+```bash
+cd benchmarks/cpp_benchmark
+./run.sh
+```
+
+## Building
+
+```bash
+cd benchmarks/cpp_benchmark
+mkdir build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release ..
+cmake --build . -j$(nproc)
+```
+
+## Running Benchmarks
+
+```bash
+./fory_benchmark
+```
+
+### Filter specific benchmarks
+
+```bash
+# Run only Struct benchmarks
+./fory_benchmark --benchmark_filter="Struct"
+
+# Run only Fory benchmarks
+./fory_benchmark --benchmark_filter="Fory"
+
+# Run only serialization benchmarks
+./fory_benchmark --benchmark_filter="Serialize"
+```
+
+### Output formats
+
+```bash
+# JSON output
+./fory_benchmark --benchmark_format=json --benchmark_out=results.json
+
+# CSV output
+./fory_benchmark --benchmark_format=csv --benchmark_out=results.csv
+```
+
+## Benchmark Cases
+
+| Benchmark | Description
|
+| -------------------------------- |
------------------------------------------------------------------- |
+| `BM_Fory_Struct_Serialize` | Serialize a simple struct with 8 int32
fields using Fory |
+| `BM_Protobuf_Struct_Serialize` | Serialize the same struct using Protobuf
|
+| `BM_Fory_Struct_Deserialize` | Deserialize a simple struct using Fory
|
+| `BM_Protobuf_Struct_Deserialize` | Deserialize the same struct using
Protobuf |
+| `BM_Fory_Sample_Serialize` | Serialize a complex object with various
types and arrays using Fory |
+| `BM_Protobuf_Sample_Serialize` | Serialize the same object using Protobuf
|
+| `BM_Fory_Sample_Deserialize` | Deserialize a complex object using Fory
|
+| `BM_Protobuf_Sample_Deserialize` | Deserialize the same object using
Protobuf |
+
+## Data Structures
+
+### Struct (Simple)
+
+A simple structure with 8 int32 fields, useful for measuring baseline
serialization overhead.
+
+### Sample (Complex)
+
+A complex structure containing:
+
+- Primitive types (int32, int64, float, double, bool)
+- Multiple arrays (int, long, float, double, short, char, bool)
+- String field
+
+## Proto Definition
+
+The benchmark uses `benchmarks/proto/bench.proto` which is shared with the
Java benchmark for consistency.
+
+## Generating Benchmark Report
+
+A Python script is provided to generate visual reports from benchmark results.
+
+### Prerequisites for Report Generation
+
+```bash
+pip install matplotlib numpy psutil
+```
+
+### Generate Report
+
+```bash
+# Run benchmark and save JSON output
+cd build
+./fory_benchmark --benchmark_format=json --benchmark_out=benchmark_results.json
+
+# Generate report
+cd ..
+python benchmark_report.py --json-file build/benchmark_results.json
--output-dir report
+```
+
+The script will generate:
+
+- PNG plots comparing Fory vs Protobuf performance
+- A markdown report (`REPORT.md`) with detailed results
+
+### Report Options
+
+```bash
+python benchmark_report.py --help
+
+Options:
+ --json-file Benchmark JSON output file (default: benchmark_results.json)
+ --output-dir Output directory for plots and report
+ --plot-prefix Image path prefix in Markdown report
+```
+
+## Profiling / Flamegraph
+
+Use `profile.sh` to generate flamegraphs for performance analysis:
+
+```bash
+# Profile all benchmarks
+./profile.sh
+
+# Profile specific benchmarks
+./profile.sh --data struct --serializer fory
+
+# Profile with custom duration
+./profile.sh --serializer fory --duration 10
+```
+
+### Profile Options
+
+```bash
+./profile.sh --help
+
+Options:
+ --filter <pattern> Custom benchmark filter (regex pattern)
+ --data <struct|sample> Filter benchmark by data type
+ --serializer <fory|protobuf> Filter benchmark by serializer
+ --duration <seconds> Profiling duration (default: 5)
+ --output-dir <dir> Output directory (default: profile_output)
+```
+
+Example with custom filter:
+
+```bash
+# Profile a specific benchmark
+./profile.sh --filter BM_Fory_Struct_Serialize
+```
+
+### Supported Profiling Tools
+
+The script automatically detects and uses available tools (in order of
preference):
+
+1. **samply** (recommended): `cargo install samply`
+2. **perf** (Linux)
+
+For flamegraph SVG generation with `perf`, install FlameGraph tools:
+
+```bash
+git clone https://github.com/brendangregg/FlameGraph.git ~/FlameGraph
+```
diff --git a/benchmarks/cpp_benchmark/benchmark.cc
b/benchmarks/cpp_benchmark/benchmark.cc
new file mode 100644
index 000000000..2256651f0
--- /dev/null
+++ b/benchmarks/cpp_benchmark/benchmark.cc
@@ -0,0 +1,363 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <benchmark/benchmark.h>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "bench.pb.h"
+#include "fory/serialization/fory.h"
+
+// ============================================================================
+// Fory struct definitions (must match proto messages)
+// ============================================================================
+
+struct ForyStruct {
+ int32_t f1;
+ int32_t f2;
+ int32_t f3;
+ int32_t f4;
+ int32_t f5;
+ int32_t f6;
+ int32_t f7;
+ int32_t f8;
+
+ bool operator==(const ForyStruct &other) const {
+ return f1 == other.f1 && f2 == other.f2 && f3 == other.f3 &&
+ f4 == other.f4 && f5 == other.f5 && f6 == other.f6 &&
+ f7 == other.f7 && f8 == other.f8;
+ }
+};
+FORY_STRUCT(ForyStruct, f1, f2, f3, f4, f5, f6, f7, f8);
+
+struct ForySample {
+ int32_t int_value;
+ int64_t long_value;
+ float float_value;
+ double double_value;
+ int32_t short_value;
+ int32_t char_value;
+ bool boolean_value;
+ int32_t int_value_boxed;
+ int64_t long_value_boxed;
+ float float_value_boxed;
+ double double_value_boxed;
+ int32_t short_value_boxed;
+ int32_t char_value_boxed;
+ bool boolean_value_boxed;
+ std::vector<int32_t> int_array;
+ std::vector<int64_t> long_array;
+ std::vector<float> float_array;
+ std::vector<double> double_array;
+ std::vector<int32_t> short_array;
+ std::vector<int32_t> char_array;
+ std::vector<bool> boolean_array;
+ std::string string;
+
+ bool operator==(const ForySample &other) const {
+ return int_value == other.int_value && long_value == other.long_value &&
+ float_value == other.float_value &&
+ double_value == other.double_value &&
+ short_value == other.short_value && char_value == other.char_value
&&
+ boolean_value == other.boolean_value &&
+ int_value_boxed == other.int_value_boxed &&
+ long_value_boxed == other.long_value_boxed &&
+ float_value_boxed == other.float_value_boxed &&
+ double_value_boxed == other.double_value_boxed &&
+ short_value_boxed == other.short_value_boxed &&
+ char_value_boxed == other.char_value_boxed &&
+ boolean_value_boxed == other.boolean_value_boxed &&
+ int_array == other.int_array && long_array == other.long_array &&
+ float_array == other.float_array &&
+ double_array == other.double_array &&
+ short_array == other.short_array && char_array == other.char_array
&&
+ boolean_array == other.boolean_array && string == other.string;
+ }
+};
+FORY_STRUCT(ForySample, int_value, long_value, float_value, double_value,
+ short_value, char_value, boolean_value, int_value_boxed,
+ long_value_boxed, float_value_boxed, double_value_boxed,
+ short_value_boxed, char_value_boxed, boolean_value_boxed,
int_array,
+ long_array, float_array, double_array, short_array, char_array,
+ boolean_array, string);
+
+// ============================================================================
+// Test data creation
+// ============================================================================
+
+ForyStruct CreateForyStruct() { return ForyStruct{1, 2, 3, 4, 5, 6, 7, 8}; }
+
+protobuf::Struct CreateProtoStruct() {
+ protobuf::Struct s;
+ s.set_f1(1);
+ s.set_f2(2);
+ s.set_f3(3);
+ s.set_f4(4);
+ s.set_f5(5);
+ s.set_f6(6);
+ s.set_f7(7);
+ s.set_f8(8);
+ return s;
+}
+
+ForySample CreateForySample() {
+ ForySample sample;
+ sample.int_value = 42;
+ sample.long_value = 1234567890123LL;
+ sample.float_value = 3.14f;
+ sample.double_value = 2.718281828;
+ sample.short_value = 100;
+ sample.char_value = 65;
+ sample.boolean_value = true;
+ sample.int_value_boxed = 42;
+ sample.long_value_boxed = 1234567890123LL;
+ sample.float_value_boxed = 3.14f;
+ sample.double_value_boxed = 2.718281828;
+ sample.short_value_boxed = 100;
+ sample.char_value_boxed = 65;
+ sample.boolean_value_boxed = true;
+ sample.int_array = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ sample.long_array = {100, 200, 300, 400, 500};
+ sample.float_array = {1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
+ sample.double_array = {1.11, 2.22, 3.33, 4.44, 5.55};
+ sample.short_array = {10, 20, 30, 40, 50};
+ sample.char_array = {65, 66, 67, 68, 69};
+ sample.boolean_array = {true, false, true, false, true};
+ sample.string = "Hello, Fory benchmark!";
+ return sample;
+}
+
+protobuf::Sample CreateProtoSample() {
+ protobuf::Sample sample;
+ sample.set_int_value(42);
+ sample.set_long_value(1234567890123LL);
+ sample.set_float_value(3.14f);
+ sample.set_double_value(2.718281828);
+ sample.set_short_value(100);
+ sample.set_char_value(65);
+ sample.set_boolean_value(true);
+ sample.set_int_value_boxed(42);
+ sample.set_long_value_boxed(1234567890123LL);
+ sample.set_float_value_boxed(3.14f);
+ sample.set_double_value_boxed(2.718281828);
+ sample.set_short_value_boxed(100);
+ sample.set_char_value_boxed(65);
+ sample.set_boolean_value_boxed(true);
+ for (int i = 1; i <= 10; ++i) {
+ sample.add_int_array(i);
+ }
+ for (int64_t v : {100LL, 200LL, 300LL, 400LL, 500LL}) {
+ sample.add_long_array(v);
+ }
+ for (float v : {1.1f, 2.2f, 3.3f, 4.4f, 5.5f}) {
+ sample.add_float_array(v);
+ }
+ for (double v : {1.11, 2.22, 3.33, 4.44, 5.55}) {
+ sample.add_double_array(v);
+ }
+ for (int v : {10, 20, 30, 40, 50}) {
+ sample.add_short_array(v);
+ }
+ for (int v : {65, 66, 67, 68, 69}) {
+ sample.add_char_array(v);
+ }
+ for (bool v : {true, false, true, false, true}) {
+ sample.add_boolean_array(v);
+ }
+ sample.set_string("Hello, Fory benchmark!");
+ return sample;
+}
+
+// ============================================================================
+// Helper to configure Fory instance
+// ============================================================================
+
+void RegisterForyTypes(fory::serialization::Fory &fory) {
+ fory.register_struct<ForyStruct>(1);
+ fory.register_struct<ForySample>(2);
+}
+
+// ============================================================================
+// Struct benchmarks (simple object with 8 int32 fields)
+// ============================================================================
+
+static void BM_Fory_Struct_Serialize(benchmark::State &state) {
+ auto fory =
+
fory::serialization::Fory::builder().xlang(true).track_ref(false).build();
+ RegisterForyTypes(fory);
+ ForyStruct obj = CreateForyStruct();
+
+ for (auto _ : state) {
+ auto result = fory.serialize(obj);
+ benchmark::DoNotOptimize(result);
+ }
+}
+BENCHMARK(BM_Fory_Struct_Serialize);
+
+static void BM_Protobuf_Struct_Serialize(benchmark::State &state) {
+ protobuf::Struct obj = CreateProtoStruct();
+ std::string output;
+
+ for (auto _ : state) {
+ output.clear();
+ obj.SerializeToString(&output);
+ benchmark::DoNotOptimize(output);
+ }
+}
+BENCHMARK(BM_Protobuf_Struct_Serialize);
+
+static void BM_Fory_Struct_Deserialize(benchmark::State &state) {
+ auto fory =
+
fory::serialization::Fory::builder().xlang(true).track_ref(false).build();
+ RegisterForyTypes(fory);
+ ForyStruct obj = CreateForyStruct();
+ auto serialized = fory.serialize(obj);
+ if (!serialized.ok()) {
+ state.SkipWithError("Serialization failed");
+ return;
+ }
+ auto &bytes = serialized.value();
+
+ // Verify deserialization works first
+ auto test_result = fory.deserialize<ForyStruct>(bytes.data(), bytes.size());
+ if (!test_result.ok()) {
+ state.SkipWithError("Deserialization test failed");
+ return;
+ }
+
+ for (auto _ : state) {
+ auto result = fory.deserialize<ForyStruct>(bytes.data(), bytes.size());
+ benchmark::DoNotOptimize(result);
+ }
+}
+BENCHMARK(BM_Fory_Struct_Deserialize);
+
+static void BM_Protobuf_Struct_Deserialize(benchmark::State &state) {
+ protobuf::Struct obj = CreateProtoStruct();
+ std::string serialized;
+ obj.SerializeToString(&serialized);
+
+ for (auto _ : state) {
+ protobuf::Struct result;
+ result.ParseFromString(serialized);
+ benchmark::DoNotOptimize(result);
+ }
+}
+BENCHMARK(BM_Protobuf_Struct_Deserialize);
+
+// ============================================================================
+// Sample benchmarks (complex object with various types and arrays)
+// ============================================================================
+
+static void BM_Fory_Sample_Serialize(benchmark::State &state) {
+ auto fory =
+
fory::serialization::Fory::builder().xlang(true).track_ref(false).build();
+ RegisterForyTypes(fory);
+ ForySample obj = CreateForySample();
+
+ for (auto _ : state) {
+ auto result = fory.serialize(obj);
+ benchmark::DoNotOptimize(result);
+ }
+}
+BENCHMARK(BM_Fory_Sample_Serialize);
+
+static void BM_Protobuf_Sample_Serialize(benchmark::State &state) {
+ protobuf::Sample obj = CreateProtoSample();
+ std::string output;
+
+ for (auto _ : state) {
+ output.clear();
+ obj.SerializeToString(&output);
+ benchmark::DoNotOptimize(output);
+ }
+}
+BENCHMARK(BM_Protobuf_Sample_Serialize);
+
+static void BM_Fory_Sample_Deserialize(benchmark::State &state) {
+ auto fory =
+
fory::serialization::Fory::builder().xlang(true).track_ref(false).build();
+ RegisterForyTypes(fory);
+ ForySample obj = CreateForySample();
+ auto serialized = fory.serialize(obj);
+ if (!serialized.ok()) {
+ state.SkipWithError("Serialization failed");
+ return;
+ }
+ auto &bytes = serialized.value();
+
+ // Verify deserialization works first
+ auto test_result = fory.deserialize<ForySample>(bytes.data(), bytes.size());
+ if (!test_result.ok()) {
+ state.SkipWithError("Deserialization test failed");
+ return;
+ }
+
+ for (auto _ : state) {
+ auto result = fory.deserialize<ForySample>(bytes.data(), bytes.size());
+ benchmark::DoNotOptimize(result);
+ }
+}
+BENCHMARK(BM_Fory_Sample_Deserialize);
+
+static void BM_Protobuf_Sample_Deserialize(benchmark::State &state) {
+ protobuf::Sample obj = CreateProtoSample();
+ std::string serialized;
+ obj.SerializeToString(&serialized);
+
+ for (auto _ : state) {
+ protobuf::Sample result;
+ result.ParseFromString(serialized);
+ benchmark::DoNotOptimize(result);
+ }
+}
+BENCHMARK(BM_Protobuf_Sample_Deserialize);
+
+// ============================================================================
+// Serialized size comparison (printed once at the end)
+// ============================================================================
+
+static void BM_PrintSerializedSizes(benchmark::State &state) {
+ // Fory
+ auto fory =
+
fory::serialization::Fory::builder().xlang(true).track_ref(false).build();
+ RegisterForyTypes(fory);
+ ForyStruct fory_struct = CreateForyStruct();
+ ForySample fory_sample = CreateForySample();
+ auto fory_struct_bytes = fory.serialize(fory_struct).value();
+ auto fory_sample_bytes = fory.serialize(fory_sample).value();
+
+ // Protobuf
+ protobuf::Struct proto_struct = CreateProtoStruct();
+ protobuf::Sample proto_sample = CreateProtoSample();
+ std::string proto_struct_bytes, proto_sample_bytes;
+ proto_struct.SerializeToString(&proto_struct_bytes);
+ proto_sample.SerializeToString(&proto_sample_bytes);
+
+ for (auto _ : state) {
+ // Just run once to print sizes
+ }
+
+ state.counters["fory_struct_size"] = fory_struct_bytes.size();
+ state.counters["proto_struct_size"] = proto_struct_bytes.size();
+ state.counters["fory_sample_size"] = fory_sample_bytes.size();
+ state.counters["proto_sample_size"] = proto_sample_bytes.size();
+}
+BENCHMARK(BM_PrintSerializedSizes)->Iterations(1);
diff --git a/benchmarks/cpp_benchmark/benchmark_report.py
b/benchmarks/cpp_benchmark/benchmark_report.py
new file mode 100644
index 000000000..884603268
--- /dev/null
+++ b/benchmarks/cpp_benchmark/benchmark_report.py
@@ -0,0 +1,340 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import json
+import os
+import platform
+import argparse
+import matplotlib.pyplot as plt
+import numpy as np
+from collections import defaultdict
+from datetime import datetime
+
+try:
+ import psutil
+
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+# === Colors ===
+FORY_COLOR = "#FF6f01" # Orange for Fory
+PROTOBUF_COLOR = "#55BCC2" # Teal for Protobuf
+
+# === Parse arguments ===
+parser = argparse.ArgumentParser(
+ description="Plot Google Benchmark stats and generate Markdown report for
C++ benchmarks"
+)
+parser.add_argument(
+ "--json-file", default="benchmark_results.json", help="Benchmark JSON
output file"
+)
+parser.add_argument(
+ "--output-dir", default="", help="Output directory for plots and report"
+)
+parser.add_argument(
+ "--plot-prefix", default="", help="Image path prefix in Markdown report"
+)
+args = parser.parse_args()
+
+# === Determine output directory ===
+if args.output_dir.strip():
+ output_dir = args.output_dir
+else:
+ output_dir = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+
+os.makedirs(output_dir, exist_ok=True)
+
+
+# === Get system info ===
+def get_system_info():
+ try:
+ info = {
+ "OS": f"{platform.system()} {platform.release()}",
+ "Machine": platform.machine(),
+ "Processor": platform.processor() or "Unknown",
+ }
+ if HAS_PSUTIL:
+ info["CPU Cores (Physical)"] = psutil.cpu_count(logical=False)
+ info["CPU Cores (Logical)"] = psutil.cpu_count(logical=True)
+ info["Total RAM (GB)"] = round(psutil.virtual_memory().total /
(1024**3), 2)
+ except Exception as e:
+ info = {"Error gathering system info": str(e)}
+ return info
+
+
+# === Parse benchmark name ===
+def parse_benchmark_name(name):
+ """
+ Parse benchmark names like:
+ - BM_Fory_Struct_Serialize
+ - BM_Protobuf_Sample_Deserialize
+ Returns: (library, datatype, operation)
+ """
+ # Remove BM_ prefix
+ if name.startswith("BM_"):
+ name = name[3:]
+
+ parts = name.split("_")
+ if len(parts) >= 3:
+ library = parts[0].lower() # fory or protobuf
+ datatype = parts[1].lower() # struct or sample
+ operation = parts[2].lower() # serialize or deserialize
+ return library, datatype, operation
+ return None, None, None
+
+
+# === Read and parse benchmark JSON ===
+def load_benchmark_data(json_file):
+ with open(json_file, "r", encoding="utf-8") as f:
+ data = json.load(f)
+ return data
+
+
+# === Data storage ===
+# Structure: data[datatype][operation][library] = time_ns
+data = defaultdict(lambda: defaultdict(dict))
+sizes = {} # Store serialized sizes
+
+# === Load and process data ===
+benchmark_data = load_benchmark_data(args.json_file)
+
+# Extract context info
+context = benchmark_data.get("context", {})
+
+# Process benchmarks
+for bench in benchmark_data.get("benchmarks", []):
+ name = bench.get("name", "")
+ # Skip aggregate results and size benchmarks
+ if "/iterations:" in name or "PrintSerializedSizes" in name:
+ # Extract sizes from PrintSerializedSizes
+ if "PrintSerializedSizes" in name:
+ for key in [
+ "fory_struct_size",
+ "proto_struct_size",
+ "fory_sample_size",
+ "proto_sample_size",
+ ]:
+ if key in bench:
+ sizes[key] = int(bench[key])
+ continue
+
+ library, datatype, operation = parse_benchmark_name(name)
+ if library and datatype and operation:
+ # Get time in nanoseconds
+ time_ns = bench.get("real_time", bench.get("cpu_time", 0))
+ time_unit = bench.get("time_unit", "ns")
+
+ # Convert to nanoseconds if needed
+ if time_unit == "us":
+ time_ns *= 1000
+ elif time_unit == "ms":
+ time_ns *= 1000000
+ elif time_unit == "s":
+ time_ns *= 1000000000
+
+ data[datatype][operation][library] = time_ns
+
+# === System info ===
+system_info = get_system_info()
+
+# Add context info from benchmark
+if context:
+ if "date" in context:
+ system_info["Benchmark Date"] = context["date"]
+ if "num_cpus" in context:
+ system_info["CPU Cores (from benchmark)"] = context["num_cpus"]
+
+
+# === Plotting ===
+def plot_datatype(ax, datatype, operation):
+ """Plot a single datatype/operation comparison."""
+ if datatype not in data or operation not in data[datatype]:
+ ax.set_title(f"{datatype} {operation} - No Data")
+ ax.axis("off")
+ return
+
+ libs = sorted(data[datatype][operation].keys())
+ lib_order = [lib for lib in ["fory", "protobuf"] if lib in libs]
+
+ times = [data[datatype][operation].get(lib, 0) for lib in lib_order]
+ colors = [FORY_COLOR if lib == "fory" else PROTOBUF_COLOR for lib in
lib_order]
+
+ x = np.arange(len(lib_order))
+ bars = ax.bar(x, times, color=colors, width=0.6)
+
+ ax.set_title(f"{datatype.capitalize()} {operation.capitalize()}")
+ ax.set_xticks(x)
+ ax.set_xticklabels([lib.capitalize() for lib in lib_order])
+ ax.set_ylabel("Time (ns)")
+ ax.grid(True, axis="y", linestyle="--", alpha=0.5)
+
+ # Add value labels on bars
+ for bar, time_val in zip(bars, times):
+ height = bar.get_height()
+ ax.annotate(
+ f"{time_val:.1f}",
+ xy=(bar.get_x() + bar.get_width() / 2, height),
+ xytext=(0, 3),
+ textcoords="offset points",
+ ha="center",
+ va="bottom",
+ fontsize=9,
+ )
+
+
+# === Create plots ===
+plot_images = []
+datatypes = sorted(data.keys())
+operations = ["serialize", "deserialize"]
+
+for datatype in datatypes:
+ fig, axes = plt.subplots(1, 2, figsize=(12, 5))
+ for i, op in enumerate(operations):
+ plot_datatype(axes[i], datatype, op)
+ fig.suptitle(f"{datatype.capitalize()} Benchmark Results", fontsize=14)
+ fig.tight_layout(rect=[0, 0, 1, 0.95])
+ plot_path = os.path.join(output_dir, f"{datatype}.png")
+ plt.savefig(plot_path, dpi=150)
+ plot_images.append((datatype, plot_path))
+ plt.close()
+
+# === Create combined TPS comparison plot ===
+fig, axes = plt.subplots(1, 2, figsize=(14, 6))
+
+for idx, op in enumerate(operations):
+ ax = axes[idx]
+ x = np.arange(len(datatypes))
+ width = 0.35
+
+ fory_times = [data[dt][op].get("fory", 0) for dt in datatypes]
+ proto_times = [data[dt][op].get("protobuf", 0) for dt in datatypes]
+
+ # Convert to TPS (operations per second)
+ fory_tps = [1e9 / t if t > 0 else 0 for t in fory_times]
+ proto_tps = [1e9 / t if t > 0 else 0 for t in proto_times]
+
+ bars1 = ax.bar(x - width / 2, fory_tps, width, label="Fory",
color=FORY_COLOR)
+ bars2 = ax.bar(
+ x + width / 2, proto_tps, width, label="Protobuf", color=PROTOBUF_COLOR
+ )
+
+ ax.set_ylabel("Throughput (ops/sec)")
+ ax.set_title(f"{op.capitalize()} Throughput Comparison")
+ ax.set_xticks(x)
+ ax.set_xticklabels([dt.capitalize() for dt in datatypes])
+ ax.legend()
+ ax.grid(True, axis="y", linestyle="--", alpha=0.5)
+
+ # Format y-axis with K/M suffixes
+ ax.ticklabel_format(style="scientific", axis="y", scilimits=(0, 0))
+
+fig.tight_layout()
+combined_plot_path = os.path.join(output_dir, "throughput_comparison.png")
+plt.savefig(combined_plot_path, dpi=150)
+plot_images.append(("throughput_comparison", combined_plot_path))
+plt.close()
+
+# === Markdown report ===
+md_report = [
+ "# C++ Benchmark Performance Report\n\n",
+ f"_Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}_\n\n",
+ "## How to Generate This Report\n\n",
+ "```bash\n",
+ "cd benchmarks/cpp_benchmark/build\n",
+ "./fory_benchmark --benchmark_format=json
--benchmark_out=benchmark_results.json\n",
+ "cd ..\n",
+ "python benchmark_report.py --json-file build/benchmark_results.json
--output-dir report\n",
+ "```\n\n",
+ "## Hardware & OS Info\n\n",
+ "| Key | Value |\n",
+ "|-----|-------|\n",
+]
+for k, v in system_info.items():
+ md_report.append(f"| {k} | {v} |\n")
+
+# Plots section
+md_report.append("\n## Benchmark Plots\n")
+for datatype, img in plot_images:
+ img_filename = os.path.basename(img)
+ img_path_report = args.plot_prefix + img_filename
+ md_report.append(f"\n### {datatype.replace('_', ' ').title()}\n\n")
+ md_report.append(
+ f'<p align="center">\n<img src="{img_path_report}"
width="90%">\n</p>\n'
+ )
+
+# Results table
+md_report.append("\n## Benchmark Results\n\n")
+md_report.append("### Timing Results (nanoseconds)\n\n")
+md_report.append("| Datatype | Operation | Fory (ns) | Protobuf (ns) | Faster
|\n")
+md_report.append("|----------|-----------|-----------|---------------|--------|\n")
+
+for datatype in datatypes:
+ for op in operations:
+ f_time = data[datatype][op].get("fory", 0)
+ p_time = data[datatype][op].get("protobuf", 0)
+ if f_time > 0 and p_time > 0:
+ faster = "Fory" if f_time < p_time else "Protobuf"
+ ratio = max(f_time, p_time) / min(f_time, p_time)
+ faster_str = f"{faster} ({ratio:.1f}x)"
+ else:
+ faster_str = "N/A"
+ md_report.append(
+ f"| {datatype.capitalize()} | {op.capitalize()} | {f_time:.1f} |
{p_time:.1f} | {faster_str} |\n"
+ )
+
+# Throughput table
+md_report.append("\n### Throughput Results (ops/sec)\n\n")
+md_report.append("| Datatype | Operation | Fory TPS | Protobuf TPS | Faster
|\n")
+md_report.append("|----------|-----------|----------|--------------|--------|\n")
+
+for datatype in datatypes:
+ for op in operations:
+ f_time = data[datatype][op].get("fory", 0)
+ p_time = data[datatype][op].get("protobuf", 0)
+ f_tps = 1e9 / f_time if f_time > 0 else 0
+ p_tps = 1e9 / p_time if p_time > 0 else 0
+ if f_tps > 0 and p_tps > 0:
+ faster = "Fory" if f_tps > p_tps else "Protobuf"
+ ratio = max(f_tps, p_tps) / min(f_tps, p_tps)
+ faster_str = f"{faster} ({ratio:.1f}x)"
+ else:
+ faster_str = "N/A"
+ md_report.append(
+ f"| {datatype.capitalize()} | {op.capitalize()} | {f_tps:,.0f} |
{p_tps:,.0f} | {faster_str} |\n"
+ )
+
+# Serialized sizes
+if sizes:
+ md_report.append("\n### Serialized Data Sizes (bytes)\n\n")
+ md_report.append("| Datatype | Fory | Protobuf |\n")
+ md_report.append("|----------|------|----------|\n")
+ if "fory_struct_size" in sizes and "proto_struct_size" in sizes:
+ md_report.append(
+ f"| Struct | {sizes['fory_struct_size']} |
{sizes['proto_struct_size']} |\n"
+ )
+ if "fory_sample_size" in sizes and "proto_sample_size" in sizes:
+ md_report.append(
+ f"| Sample | {sizes['fory_sample_size']} |
{sizes['proto_sample_size']} |\n"
+ )
+
+# Save Markdown
+report_path = os.path.join(output_dir, "REPORT.md")
+with open(report_path, "w", encoding="utf-8") as f:
+ f.writelines(md_report)
+
+print(f"✅ Plots saved in: {output_dir}")
+print(f"📄 Markdown report generated at: {report_path}")
diff --git a/benchmarks/cpp_benchmark/profile.sh
b/benchmarks/cpp_benchmark/profile.sh
new file mode 100755
index 000000000..48accc6b7
--- /dev/null
+++ b/benchmarks/cpp_benchmark/profile.sh
@@ -0,0 +1,175 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+cd "$SCRIPT_DIR"
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Default values
+DATA=""
+SERIALIZER=""
+FILTER=""
+DURATION=5
+OUTPUT_DIR="profile_output"
+
+# Parse arguments
+usage() {
+ echo "Usage: $0 [OPTIONS]"
+ echo ""
+ echo "Generate flamegraph/profile for C++ benchmarks"
+ echo ""
+ echo "Options:"
+ echo " --filter <pattern> Custom benchmark filter (regex
pattern)"
+ echo " --data <struct|sample> Filter benchmark by data type"
+ echo " --serializer <fory|protobuf> Filter benchmark by serializer"
+ echo " --duration <seconds> Profiling duration (default: 5)"
+ echo " --output-dir <dir> Output directory (default:
profile_output)"
+ echo " --help Show this help message"
+ echo ""
+ echo "Examples:"
+ echo " $0 # Profile all benchmarks"
+ echo " $0 --data struct --serializer fory # Profile Fory Struct
benchmarks"
+ echo " $0 --serializer protobuf --duration 10"
+ echo " $0 --filter BM_Fory_Struct_Serialize # Profile specific benchmark"
+ echo ""
+ echo "Supported profiling tools (in order of preference):"
+ echo " - samply (recommended): cargo install samply"
+ echo " - perf (Linux)"
+ exit 0
+}
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --filter)
+ FILTER="$2"
+ shift 2
+ ;;
+ --data)
+ DATA="$2"
+ shift 2
+ ;;
+ --serializer)
+ SERIALIZER="$2"
+ shift 2
+ ;;
+ --duration)
+ DURATION="$2"
+ shift 2
+ ;;
+ --output-dir)
+ OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ --help|-h)
+ usage
+ ;;
+ *)
+ echo -e "${RED}Unknown option: $1${NC}"
+ usage
+ ;;
+ esac
+done
+
+# Build benchmark filter (only if --filter not provided)
+if [[ -z "$FILTER" ]]; then
+ if [[ -n "$DATA" ]]; then
+ DATA_CAP="$(echo "${DATA:0:1}" | tr '[:lower:]' '[:upper:]')${DATA:1}"
+ FILTER="${DATA_CAP}"
+ fi
+ if [[ -n "$SERIALIZER" ]]; then
+ SER_CAP="$(echo "${SERIALIZER:0:1}" | tr '[:lower:]'
'[:upper:]')${SERIALIZER:1}"
+ if [[ -n "$FILTER" ]]; then
+ FILTER="${SER_CAP}_${FILTER}"
+ else
+ FILTER="${SER_CAP}"
+ fi
+ fi
+fi
+
+# Check if benchmark exists
+if [[ ! -f "build/fory_benchmark" ]]; then
+ echo -e "${RED}Benchmark not found. Run ./run.sh first to build.${NC}"
+ exit 1
+fi
+
+# Create output directory
+mkdir -p "$OUTPUT_DIR"
+cd build
+
+# Build benchmark command
+BENCH_CMD="./fory_benchmark --benchmark_min_time=${DURATION}s"
+if [[ -n "$FILTER" ]]; then
+ BENCH_CMD="$BENCH_CMD --benchmark_filter=$FILTER"
+fi
+
+echo -e "${GREEN}=== Fory C++ Benchmark Profiler ===${NC}"
+echo -e "Filter: ${FILTER:-all}"
+echo -e "Duration: ${DURATION}s"
+echo -e "Output: ${OUTPUT_DIR}"
+echo ""
+
+TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+
+# Try different profiling tools
+if command -v samply &> /dev/null; then
+ echo -e "${YELLOW}Profiling with samply...${NC}"
+ echo -e "Running: samply record $BENCH_CMD"
+ samply record $BENCH_CMD
+ echo -e "${GREEN}Done! Samply should have opened in your browser.${NC}"
+
+elif command -v perf &> /dev/null; then
+ echo -e "${YELLOW}Profiling with perf...${NC}"
+ PERF_DATA="../${OUTPUT_DIR}/perf_${TIMESTAMP}.data"
+ FLAMEGRAPH_SVG="../${OUTPUT_DIR}/flamegraph_${TIMESTAMP}.svg"
+
+ echo -e "Running: perf record -g --call-graph dwarf -o $PERF_DATA
$BENCH_CMD"
+ perf record -g --call-graph dwarf -o "$PERF_DATA" $BENCH_CMD
+
+ echo -e "${GREEN}Profile saved to: ${PERF_DATA}${NC}"
+
+ # Try to generate flamegraph SVG
+ if [[ -d "$HOME/FlameGraph" ]]; then
+ echo -e "${YELLOW}Generating flamegraph SVG...${NC}"
+ perf script -i "$PERF_DATA" | "$HOME/FlameGraph/stackcollapse-perf.pl"
| "$HOME/FlameGraph/flamegraph.pl" > "$FLAMEGRAPH_SVG"
+ echo -e "${GREEN}Flamegraph saved to: ${FLAMEGRAPH_SVG}${NC}"
+ elif command -v stackcollapse-perf.pl &> /dev/null && command -v
flamegraph.pl &> /dev/null; then
+ echo -e "${YELLOW}Generating flamegraph SVG...${NC}"
+ perf script -i "$PERF_DATA" | stackcollapse-perf.pl | flamegraph.pl >
"$FLAMEGRAPH_SVG"
+ echo -e "${GREEN}Flamegraph saved to: ${FLAMEGRAPH_SVG}${NC}"
+ else
+ echo -e "${YELLOW}To generate flamegraph SVG:${NC}"
+ echo " git clone https://github.com/brendangregg/FlameGraph.git
~/FlameGraph"
+ echo " perf script -i $PERF_DATA | ~/FlameGraph/stackcollapse-perf.pl
| ~/FlameGraph/flamegraph.pl > flamegraph.svg"
+ fi
+
+else
+ echo -e "${RED}No profiling tool found. Please install one of:${NC}"
+ echo " - samply: cargo install samply (recommended, cross-platform)"
+ echo " - perf (Linux): apt install linux-tools-generic"
+ exit 1
+fi
+
+echo ""
+echo -e "${GREEN}=== Profiling complete! ===${NC}"
diff --git a/benchmarks/cpp_benchmark/run.sh b/benchmarks/cpp_benchmark/run.sh
new file mode 100755
index 000000000..4a1b57da3
--- /dev/null
+++ b/benchmarks/cpp_benchmark/run.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+cd "$SCRIPT_DIR"
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Default values
+JOBS=16
+DATA=""
+SERIALIZER=""
+
+# Parse arguments
+usage() {
+ echo "Usage: $0 [OPTIONS]"
+ echo ""
+ echo "Build and run C++ benchmarks"
+ echo ""
+ echo "Options:"
+ echo " --data <struct|sample> Filter benchmark by data type"
+ echo " --serializer <fory|protobuf> Filter benchmark by serializer"
+ echo " --help Show this help message"
+ echo ""
+ echo "Examples:"
+ echo " $0 # Run all benchmarks"
+ echo " $0 --data struct # Run only Struct benchmarks"
+ echo " $0 --serializer fory # Run only Fory benchmarks"
+ echo " $0 --data struct --serializer fory"
+ echo ""
+ echo "For profiling/flamegraph, use: ./profile.sh"
+ exit 0
+}
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --data)
+ DATA="$2"
+ shift 2
+ ;;
+ --serializer)
+ SERIALIZER="$2"
+ shift 2
+ ;;
+ --help|-h)
+ usage
+ ;;
+ *)
+ echo -e "${RED}Unknown option: $1${NC}"
+ usage
+ ;;
+ esac
+done
+
+# Build benchmark filter
+FILTER=""
+if [[ -n "$DATA" ]]; then
+ DATA_CAP="$(echo "${DATA:0:1}" | tr '[:lower:]' '[:upper:]')${DATA:1}"
+ FILTER="${DATA_CAP}"
+fi
+if [[ -n "$SERIALIZER" ]]; then
+ SER_CAP="$(echo "${SERIALIZER:0:1}" | tr '[:lower:]'
'[:upper:]')${SERIALIZER:1}"
+ if [[ -n "$FILTER" ]]; then
+ FILTER="${SER_CAP}_${FILTER}"
+ else
+ FILTER="${SER_CAP}"
+ fi
+fi
+
+echo -e "${GREEN}=== Fory C++ Benchmark ===${NC}"
+echo ""
+
+# Step 1: Build
+echo -e "${YELLOW}[1/3] Building benchmark...${NC}"
+mkdir -p build
+cd build
+cmake -DCMAKE_BUILD_TYPE=Release ..
+cmake --build . -j"$JOBS"
+echo -e "${GREEN}Build complete!${NC}"
+echo ""
+
+# Step 2: Run benchmark
+echo -e "${YELLOW}[2/3] Running benchmark...${NC}"
+BENCH_ARGS="--benchmark_format=json --benchmark_out=benchmark_results.json"
+if [[ -n "$FILTER" ]]; then
+ BENCH_ARGS="$BENCH_ARGS --benchmark_filter=$FILTER"
+ echo -e "Filter: ${FILTER}"
+fi
+./fory_benchmark $BENCH_ARGS
+echo -e "${GREEN}Benchmark complete!${NC}"
+echo ""
+
+# Step 3: Generate report
+echo -e "${YELLOW}[3/3] Generating report...${NC}"
+cd "$SCRIPT_DIR"
+
+# Check for Python dependencies
+if ! python3 -c "import matplotlib" 2>/dev/null; then
+ echo -e "${YELLOW}Installing required Python packages...${NC}"
+ pip3 install matplotlib numpy psutil
+fi
+
+python3 benchmark_report.py --json-file build/benchmark_results.json
--output-dir report
+echo ""
+
+echo -e "${GREEN}=== All done! ===${NC}"
+echo -e "Report generated at: ${SCRIPT_DIR}/report/REPORT.md"
+echo -e "Plots saved in: ${SCRIPT_DIR}/report/"
+echo ""
+echo -e "For profiling/flamegraph, run: ${YELLOW}./profile.sh --help${NC}"
diff --git a/java/benchmark/src/main/proto/bench.proto
b/benchmarks/proto/bench.proto
similarity index 100%
rename from java/benchmark/src/main/proto/bench.proto
rename to benchmarks/proto/bench.proto
diff --git a/java/benchmark/pom.xml b/java/benchmark/pom.xml
index 529a2a5de..ac4eb5024 100644
--- a/java/benchmark/pom.xml
+++ b/java/benchmark/pom.xml
@@ -354,7 +354,7 @@
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}</protocArtifact>
<pluginId>grpc-java</pluginId>
<pluginArtifact>io.grpc:protoc-gen-grpc-java:1.47.0:exe:${os.detected.classifier}</pluginArtifact>
- <protoSourceRoot>${basedir}/src/main/proto</protoSourceRoot>
+ <protoSourceRoot>${basedir}/../../benchmarks/proto</protoSourceRoot>
</configuration>
<executions>
<execution>
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]