This is an automated email from the ASF dual-hosted git repository.
wusheng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git
The following commit(s) were added to refs/heads/main by this push:
new 0eff0fbe Fix the property repair UT (#825)
0eff0fbe is described below
commit 0eff0fbe60b6df529e9be785d318b57dc1509495
Author: mrproliu <[email protected]>
AuthorDate: Wed Oct 29 20:58:21 2025 +0900
Fix the property repair UT (#825)
---
.github/workflows/property-repair.yml | 74 +++++++++++
.github/workflows/slow-test.yml | 9 --
test/property_repair/base-compose.yml | 2 +-
test/property_repair/full_data/integrated_test.go | 29 +++--
test/property_repair/half_data/integrated_test.go | 29 +++--
test/property_repair/same_data/integrated_test.go | 25 +++-
test/property_repair/shared_utils.go | 144 ++++++++++++++++++----
7 files changed, 255 insertions(+), 57 deletions(-)
diff --git a/.github/workflows/property-repair.yml
b/.github/workflows/property-repair.yml
new file mode 100644
index 00000000..b49d4256
--- /dev/null
+++ b/.github/workflows/property-repair.yml
@@ -0,0 +1,74 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name: Property Repair Test
+
+on:
+ schedule:
+ - cron: '0 * * * *'
+
+jobs:
+ prepare:
+ name: Prepare (Generate)
+ uses: ./.github/workflows/prepare.yml
+
+ property-repair:
+ name: Property Repair Tests
+ runs-on: ubuntu-latest
+ needs: [prepare]
+ timeout-minutes: 480
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-tags: true
+ - name: Setup build environment
+ uses: ./.github/actions/setup-build-env
+ with:
+ download-artifacts: 'true'
+ setup-docker: 'false'
+ - name: Cache tools
+ uses: actions/cache@v4
+ id: cache-tool
+ with:
+ path: bin
+ key: ${{ runner.os }}-test-tool-${{ hashFiles('**version.mk') }}
+ restore-keys: |
+ ${{ runner.os }}-test-tool-
+ - name: Generate codes
+ run: make generate
+ - name: Build Linux binaries
+ run: |
+ TARGET_OS=linux PLATFORMS=linux/amd64 make release
+ - name: Build docker image
+ run: |
+ make docker.build || make docker.build
+ docker image ls
+ - name: Test integration and banyand
+ run: TEST_CI_OPTS="--cover --covermode atomic
--coverprofile=coverage.out --label-filter property_repair -timeout=6h" make
test-ci PKG=./test/property_repair/...
+ - if: ${{ failure() }}
+ run: |
+ ls /tmp
+ - name: Sanitize test name for artifact
+ if: ${{ failure() }}
+ id: sanitize-name
+ run: echo "sanitized=$(echo '${{ inputs.test-name }}' | sed
's/[^a-zA-Z0-9._-]/-/g')" >> $GITHUB_OUTPUT
+ - uses: actions/upload-artifact@v4
+ if: ${{ failure() }}
+ name: Upload BanyanDB Data Folder
+ with:
+ name: test-data-${{ steps.sanitize-name.outputs.sanitized }}
+ path: "/tmp/banyandb-test-*"
diff --git a/.github/workflows/slow-test.yml b/.github/workflows/slow-test.yml
index a712cfcc..2e1488b1 100644
--- a/.github/workflows/slow-test.yml
+++ b/.github/workflows/slow-test.yml
@@ -37,15 +37,6 @@ jobs:
options: --label-filter slow
timeout-minutes: 120
- property-repair:
- if: github.repository == 'apache/skywalking-banyandb'
- needs: [prepare]
- uses: ./.github/workflows/test.yml
- with:
- test-name: Property Repair Tests
- options: --label-filter property_repair
- timeout-minutes: 120
-
e2e:
if: github.repository == 'apache/skywalking-banyandb'
needs: [prepare]
diff --git a/test/property_repair/base-compose.yml
b/test/property_repair/base-compose.yml
index 49d06416..19f859b7 100644
--- a/test/property_repair/base-compose.yml
+++ b/test/property_repair/base-compose.yml
@@ -91,7 +91,7 @@ services:
--stream-root-path=/tmp/banyandb-data
--measure-root-path=/tmp/banyandb-data
--property-root-path=/tmp/banyandb-data
- --property-repair-trigger-cron="*/10 * * * *"
+ --property-repair-trigger-cron="*/3 * * * *"
data-node-2:
<<: *data_base
diff --git a/test/property_repair/full_data/integrated_test.go
b/test/property_repair/full_data/integrated_test.go
index 3a481bea..dcbcebef 100644
--- a/test/property_repair/full_data/integrated_test.go
+++ b/test/property_repair/full_data/integrated_test.go
@@ -38,6 +38,7 @@ import (
var (
composeFile string
+ logDir string
conn *grpc.ClientConn
groupClient databasev1.GroupRegistryServiceClient
propertyClient databasev1.PropertyRegistryServiceClient
@@ -52,6 +53,11 @@ func TestPropertyRepairIntegrated(t *testing.T) {
var _ = ginkgo.BeforeSuite(func() {
fmt.Println("Starting Property Repair Integration Test Suite...")
+ // Create log directory for this test run
+ var err error
+ logDir, err = propertyrepair.CreateLogDir("full_data")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
// Disable Ryuk reaper to avoid container creation issues
os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true")
@@ -65,9 +71,16 @@ var _ = ginkgo.AfterSuite(func() {
if conn != nil {
_ = conn.Close()
}
+ if composeFile != "" && logDir != "" {
+ fmt.Println("Exporting docker compose logs...")
+ if exportErr :=
propertyrepair.ExportDockerComposeLogs(composeFile, logDir); exportErr != nil {
+ fmt.Printf("Warning: failed to export logs: %v\n",
exportErr)
+ }
+ fmt.Printf("Logs are available at: %s\n", logDir)
+ }
if composeFile != "" {
fmt.Println("Stopping compose stack...")
- propertyrepair.ExecuteComposeCommand("-f", composeFile, "down")
+ propertyrepair.ExecuteComposeCommand(false, "-f", composeFile,
"down")
}
})
@@ -83,11 +96,11 @@ var _ = ginkgo.Describe("Property Repair Full Data Test",
ginkgo.Ordered, func()
// Start the docker compose stack without waiting first
fmt.Println("Starting services...")
- err = propertyrepair.ExecuteComposeCommand("-f",
composeFile, "up", "-d")
+ err = propertyrepair.ExecuteComposeCommand(true, "-f",
composeFile, "up", "-d")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Simple wait for services to be ready
- time.Sleep(20 * time.Second)
+ time.Sleep(time.Minute)
})
ginkgo.It("Should connect to liaison and setup clients", func()
{
@@ -103,7 +116,7 @@ var _ = ginkgo.Describe("Property Repair Full Data Test",
ginkgo.Ordered, func()
propertyServiceClient =
propertyv1.NewPropertyServiceClient(conn)
})
- ginkgo.It("Should create group with 1 replica and write 100k
properties", func() {
+ ginkgo.It("Should create group with 1 replica and write 5k
properties", func() {
ctx := context.Background()
fmt.Println("=== Step 1: Creating group with 1 replica
and loading initial data ===")
@@ -115,10 +128,10 @@ var _ = ginkgo.Describe("Property Repair Full Data Test",
ginkgo.Ordered, func()
propertyrepair.CreatePropertySchema(ctx, propertyClient)
// Write 100,000 properties
- fmt.Println("Starting to write 100,000 properties...")
+ fmt.Println("Starting to write 5000 properties...")
startTime := time.Now()
- err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 0, 100000)
+ err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 0, 5000)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
duration := time.Since(startTime)
@@ -160,10 +173,10 @@ var _ = ginkgo.Describe("Property Repair Full Data Test",
ginkgo.Ordered, func()
}
fmt.Println("\n=== Triggering property repair by
waiting for scheduled repair cycle ===")
- fmt.Println("Waiting for property repair to trigger
(@every 10 minutes)...")
+ fmt.Println("Waiting for property repair to trigger
(@every 5 minutes)...")
gomega.Eventually(func() bool {
- time.Sleep(time.Second * 30)
+ time.Sleep(time.Second * 10)
// Get metrics after repair
fmt.Println("Trying to reading prometheus
metrics to check repair status...")
afterMetrics :=
propertyrepair.GetAllNodeMetrics()
diff --git a/test/property_repair/half_data/integrated_test.go
b/test/property_repair/half_data/integrated_test.go
index 119d0b49..28906186 100644
--- a/test/property_repair/half_data/integrated_test.go
+++ b/test/property_repair/half_data/integrated_test.go
@@ -38,6 +38,7 @@ import (
var (
composeFile string
+ logDir string
conn *grpc.ClientConn
groupClient databasev1.GroupRegistryServiceClient
propertyClient databasev1.PropertyRegistryServiceClient
@@ -52,6 +53,11 @@ func TestPropertyRepairHalfData(t *testing.T) {
var _ = ginkgo.BeforeSuite(func() {
fmt.Println("Starting Property Repair Half Data Integration Test
Suite...")
+ // Create log directory for this test run
+ var err error
+ logDir, err = propertyrepair.CreateLogDir("half_data")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
// Disable Ryuk reaper to avoid container creation issues
os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true")
@@ -65,9 +71,16 @@ var _ = ginkgo.AfterSuite(func() {
if conn != nil {
_ = conn.Close()
}
+ if composeFile != "" && logDir != "" {
+ fmt.Println("Exporting docker compose logs...")
+ if exportErr :=
propertyrepair.ExportDockerComposeLogs(composeFile, logDir); exportErr != nil {
+ fmt.Printf("Warning: failed to export logs: %v\n",
exportErr)
+ }
+ fmt.Printf("Logs are available at: %s\n", logDir)
+ }
if composeFile != "" {
fmt.Println("Stopping compose stack...")
- _ = propertyrepair.ExecuteComposeCommand(composeFile, "down")
+ _ = propertyrepair.ExecuteComposeCommand(false, "-f",
composeFile, "down")
}
})
@@ -83,7 +96,7 @@ var _ = ginkgo.Describe("Property Repair Half Data Test",
ginkgo.Ordered, func()
// Start the docker compose stack without waiting first
fmt.Println("Starting services...")
- err = propertyrepair.ExecuteComposeCommand("-f",
composeFile, "up", "-d")
+ err = propertyrepair.ExecuteComposeCommand(true, "-f",
composeFile, "up", "-d")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Simple wait for services to be ready
@@ -115,10 +128,10 @@ var _ = ginkgo.Describe("Property Repair Half Data Test",
ginkgo.Ordered, func()
propertyrepair.CreatePropertySchema(ctx, propertyClient)
// Write 50,000 properties
- fmt.Println("Starting to write 50,000 properties...")
+ fmt.Println("Starting to write 5,000 properties...")
startTime := time.Now()
- err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 0, 50000)
+ err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 0, 5000)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
duration := time.Since(startTime)
@@ -140,14 +153,14 @@ var _ = ginkgo.Describe("Property Repair Half Data Test",
ginkgo.Ordered, func()
fmt.Printf("=== Step 2 completed: updated replicas to 1
in %v ===\n", duration)
})
- ginkgo.It("Should write additional 50k properties", func() {
+ ginkgo.It("Should write additional 5k properties", func() {
ctx := context.Background()
fmt.Println("=== Step 3: Writing additional properties
===")
startTime := time.Now()
// Write another 50,000 properties (50000-100000)
- err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 50000, 100000)
+ err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 5000, 10000)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
duration := time.Since(startTime)
@@ -187,10 +200,10 @@ var _ = ginkgo.Describe("Property Repair Half Data Test",
ginkgo.Ordered, func()
}
fmt.Println("\n=== Triggering property repair by
waiting for scheduled repair cycle ===")
- fmt.Println("Waiting for property repair to trigger
(@every 10 minutes)...")
+ fmt.Println("Waiting for property repair to trigger
(@every 5 minutes)...")
gomega.Eventually(func() bool {
- time.Sleep(time.Second * 30)
+ time.Sleep(time.Second * 10)
// Get metrics after repair
fmt.Println("Trying to reading prometheus
metrics to check repair status...")
afterMetrics :=
propertyrepair.GetAllNodeMetrics()
diff --git a/test/property_repair/same_data/integrated_test.go
b/test/property_repair/same_data/integrated_test.go
index e4674c8c..b83138b2 100644
--- a/test/property_repair/same_data/integrated_test.go
+++ b/test/property_repair/same_data/integrated_test.go
@@ -38,6 +38,7 @@ import (
var (
composeFile string
+ logDir string
conn *grpc.ClientConn
groupClient databasev1.GroupRegistryServiceClient
propertyClient databasev1.PropertyRegistryServiceClient
@@ -52,6 +53,11 @@ func TestPropertyRepairSameData(t *testing.T) {
var _ = ginkgo.BeforeSuite(func() {
fmt.Println("Starting Property Repair Same Data Integration Test
Suite...")
+ // Create log directory for this test run
+ var err error
+ logDir, err = propertyrepair.CreateLogDir("same_data")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
// Disable Ryuk reaper to avoid container creation issues
os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true")
@@ -65,9 +71,16 @@ var _ = ginkgo.AfterSuite(func() {
if conn != nil {
_ = conn.Close()
}
+ if composeFile != "" && logDir != "" {
+ fmt.Println("Exporting docker compose logs...")
+ if exportErr :=
propertyrepair.ExportDockerComposeLogs(composeFile, logDir); exportErr != nil {
+ fmt.Printf("Warning: failed to export logs: %v\n",
exportErr)
+ }
+ fmt.Printf("Logs are available at: %s\n", logDir)
+ }
if composeFile != "" {
fmt.Println("Stopping compose stack...")
- propertyrepair.ExecuteComposeCommand(composeFile, "down")
+ propertyrepair.ExecuteComposeCommand(false, "-f", composeFile,
"down")
}
})
@@ -83,7 +96,7 @@ var _ = ginkgo.Describe("Property Repair Same Data Test",
ginkgo.Ordered, func()
// Start the docker compose stack without waiting first
fmt.Println("Starting services...")
- err = propertyrepair.ExecuteComposeCommand("-f",
composeFile, "up", "-d")
+ err = propertyrepair.ExecuteComposeCommand(true, "-f",
composeFile, "up", "-d")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Simple wait for services to be ready
@@ -115,10 +128,10 @@ var _ = ginkgo.Describe("Property Repair Same Data Test",
ginkgo.Ordered, func()
propertyrepair.CreatePropertySchema(ctx, propertyClient)
// Write 100,000 properties (same amount across all
replicas)
- fmt.Println("Starting to write 100,000 properties...")
+ fmt.Println("Starting to write 5,000 properties...")
startTime := time.Now()
- err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 0, 100000)
+ err := propertyrepair.WriteProperties(ctx,
propertyServiceClient, 0, 5000)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
duration := time.Since(startTime)
@@ -145,10 +158,10 @@ var _ = ginkgo.Describe("Property Repair Same Data Test",
ginkgo.Ordered, func()
}
fmt.Println("\n=== Triggering property repair by
waiting for scheduled repair cycle ===")
- fmt.Println("Waiting for property repair to trigger
(@every 10 minutes)...")
+ fmt.Println("Waiting for property repair to trigger
(@every 5 minutes)...")
gomega.Eventually(func() bool {
- time.Sleep(time.Second * 30)
+ time.Sleep(time.Second * 10)
// Get metrics after repair
fmt.Println("Trying to reading prometheus
metrics to check repair status...")
afterMetrics :=
propertyrepair.GetAllNodeMetrics()
diff --git a/test/property_repair/shared_utils.go
b/test/property_repair/shared_utils.go
index 5d490c5c..ee926d30 100644
--- a/test/property_repair/shared_utils.go
+++ b/test/property_repair/shared_utils.go
@@ -19,6 +19,7 @@
package propertyrepair
import (
+ "bytes"
"context"
"crypto/rand"
"errors"
@@ -47,7 +48,7 @@ import (
const (
DataSize = 2048 // 2KB per property
LiaisonAddr = "localhost:17912"
- Concurrency = 6
+ Concurrency = 2
GroupName = "perf-test-group"
PropertyName = "perf-test-property"
)
@@ -246,6 +247,7 @@ func WriteProperties(ctx context.Context,
propertyServiceClient propertyv1.Prope
duration := endTime.Sub(startTime)
fmt.Printf("Write completed: %d properties in %s (%s props/sec)\n",
endIdx, FormatDuration(duration),
FormatThroughput(int64(endIdx), duration))
+ time.Sleep(20 * time.Second)
return nil
}
@@ -404,32 +406,25 @@ func PrintMetricsComparison(beforeMetrics, afterMetrics
[]*NodeMetrics) {
}
// ExecuteComposeCommand executes a docker-compose command, supporting both v1
and v2.
-func ExecuteComposeCommand(args ...string) error {
- // v2
- var executed bool
- if _, err := exec.LookPath("docker"); err == nil {
- check := exec.Command("docker", "compose", "version")
- if out, err := check.CombinedOutput(); err == nil &&
strings.Contains(string(out), "Docker Compose") {
- composeArgs := append([]string{"compose"}, args...)
- composeArgs = append(composeArgs, "--wait")
- cmd := exec.Command("docker", composeArgs...)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if dockerErr := cmd.Run(); dockerErr != nil {
- return dockerErr
- }
- executed = true
- }
+func ExecuteComposeCommand(startCommand bool, args ...string) error {
+ // Detect compose invoker
+ invoker, detectErr := detectComposeInvoker()
+ if detectErr != nil {
+ return fmt.Errorf("failed to detect compose invoker: %w",
detectErr)
}
- // v1
- if _, err := exec.LookPath("docker-compose"); err == nil && !executed {
- cmd := exec.Command("docker-compose", args...)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if dockerErr := cmd.Run(); dockerErr != nil {
- return dockerErr
- }
+ // Build and execute command
+ cmdArgs := append([]string{}, invoker...)
+ cmdArgs = append(cmdArgs, args...)
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) // #nosec G204 --
invoker is from trusted detectComposeInvoker function
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if runErr := cmd.Run(); runErr != nil {
+ return runErr
+ }
+
+ if !startCommand {
+ return nil
}
// wait all the container ready
@@ -494,6 +489,21 @@ func ExecuteComposeCommand(args ...string) error {
}
}
+func detectComposeInvoker() ([]string, error) {
+ // Try v2: docker compose
+ if _, err := exec.LookPath("docker"); err == nil {
+ check := exec.Command("docker", "compose", "version")
+ if out, checkErr := check.CombinedOutput(); checkErr == nil &&
strings.Contains(string(out), "Docker Compose") {
+ return []string{"docker", "compose"}, nil
+ }
+ }
+ // Try v1: docker-compose
+ if _, err := exec.LookPath("docker-compose"); err == nil {
+ return []string{"docker-compose"}, nil
+ }
+ return nil, errors.New("no docker compose found (neither 'docker
compose' nor 'docker-compose')")
+}
+
func listComposeContainerIDs() ([]string, error) {
out, err := exec.Command("docker", "ps", "-q").CombinedOutput()
if err != nil {
@@ -526,3 +536,87 @@ func inspectHealth(containerID string) (name, health,
state string, err error) {
state = parts[2]
return
}
+
+// CreateLogDir creates a timestamped log directory for a test scenario.
+func CreateLogDir(testScenario string) (string, error) {
+ timestamp := time.Now().Format("20060102-150405")
+ logDir := fmt.Sprintf("/tmp/banyandb-test-%s-%s", testScenario,
timestamp)
+ if createErr := os.MkdirAll(logDir, 0o755); createErr != nil {
+ return "", fmt.Errorf("failed to create log directory: %w",
createErr)
+ }
+ fmt.Printf("Created log directory: %s\n", logDir)
+ return logDir, nil
+}
+
+// ExportDockerComposeLogs exports logs from all services in the docker
compose stack.
+func ExportDockerComposeLogs(composeFile, logDir string) error {
+ fmt.Printf("Exporting docker compose logs to %s...\n", logDir)
+
+ // Detect compose invoker
+ invoker, detectErr := detectComposeInvoker()
+ if detectErr != nil {
+ return fmt.Errorf("failed to detect compose invoker: %w",
detectErr)
+ }
+
+ // Get list of services dynamically from compose file
+ fmt.Println(" Discovering services from compose file...")
+ configArgs := append([]string{}, invoker...)
+ configArgs = append(configArgs, "-f", composeFile, "config",
"--services")
+ configCmd := exec.Command(configArgs[0], configArgs[1:]...) // #nosec
G204 -- invoker is from trusted detectComposeInvoker function
+
+ // Separate stdout and stderr to filter out warnings
+ var stdout, stderr bytes.Buffer
+ configCmd.Stdout = &stdout
+ configCmd.Stderr = &stderr
+
+ if configErr := configCmd.Run(); configErr != nil {
+ return fmt.Errorf("failed to get services list: %w (stderr:
%s)", configErr, stderr.String())
+ }
+
+ servicesOutput := stdout.Bytes()
+
+ // Parse services list and filter out warning lines
+ serviceLines :=
strings.Split(strings.TrimSpace(string(servicesOutput)), "\n")
+ var services []string
+ for _, line := range serviceLines {
+ serviceName := strings.TrimSpace(line)
+ // Skip empty lines and WARN lines
+ if serviceName != "" && !strings.HasPrefix(serviceName,
"WARN[") {
+ services = append(services, serviceName)
+ }
+ }
+
+ if len(services) == 0 {
+ return errors.New("no services found in compose file")
+ }
+
+ fmt.Printf(" Found %d services: %v\n", len(services), services)
+
+ // Export logs for each service
+ for _, service := range services {
+ logFile := fmt.Sprintf("%s/%s.log", logDir, service)
+ fmt.Printf(" Exporting logs for service %s to %s...\n",
service, logFile)
+
+ // Build command: docker compose -f <file> logs --no-color
<service>
+ logsArgs := append([]string{}, invoker...)
+ logsArgs = append(logsArgs, "-f", composeFile, "logs",
"--no-color", service)
+ logsCmd := exec.Command(logsArgs[0], logsArgs[1:]...) // #nosec
G204 -- invoker is from trusted detectComposeInvoker function
+
+ // Capture output
+ output, cmdErr := logsCmd.CombinedOutput()
+ if cmdErr != nil {
+ // Service might not exist or have no logs, continue
with warning
+ fmt.Printf(" Warning: failed to get logs for %s:
%v\n", service, cmdErr)
+ continue
+ }
+
+ // Write to file
+ if writeErr := os.WriteFile(logFile, output, 0o600); writeErr
!= nil {
+ return fmt.Errorf("failed to write log file for %s:
%w", service, writeErr)
+ }
+ fmt.Printf(" Successfully exported %d bytes of logs for %s\n",
len(output), service)
+ }
+
+ fmt.Printf("All logs exported to %s\n", logDir)
+ return nil
+}