This is an automated email from the ASF dual-hosted git repository.

wusheng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git


The following commit(s) were added to refs/heads/main by this push:
     new a94aaa0d Restore the data (#606)
a94aaa0d is described below

commit a94aaa0d2bb89abf99c2d93992794e2f067e1e58
Author: Gao Hongtao <[email protected]>
AuthorDate: Wed Feb 12 14:12:21 2025 +0800

    Restore the data (#606)
---
 CHANGES.md                                         |   1 +
 banyand/measure/snapshot.go                        |  28 ++-
 banyand/property/listener.go                       |   4 +-
 banyand/property/service.go                        |  14 +-
 banyand/stream/snapshot.go                         |  28 ++-
 banyand/stream/snapshot_test.go                    |   3 +
 bydbbackup/Makefile                                |   2 +-
 bydbbackup/cmd/backup/main.go                      |   4 +-
 bydbbackup/cmd/{backup => restore}/main.go         |   6 +-
 bydbbackup/{internal/backup => pkg}/backup.go      |  27 +--
 bydbbackup/{internal/backup => pkg}/backup_test.go |   6 +-
 bydbbackup/pkg/restore.go                          | 242 +++++++++++++++++++++
 bydbbackup/pkg/restore_test.go                     |  91 ++++++++
 bydbbackup/pkg/timedir.go                          | 239 ++++++++++++++++++++
 bydbbackup/pkg/timedir_test.go                     | 145 ++++++++++++
 docs/menu.yml                                      |   2 +
 docs/operation/backup.md                           |   4 +-
 docs/operation/restore.md                          | 135 ++++++++++++
 pkg/test/helpers/context.go                        |   5 +-
 test/cases/backup/all.go                           | 176 +++++++++++++++
 .../{snapshot/snapshot.go => backup/backup.go}     |  72 ++++--
 .../backup_suite_test.go}                          |  13 +-
 .../backup_suite_test.go}                          |  11 +-
 23 files changed, 1194 insertions(+), 64 deletions(-)

diff --git a/CHANGES.md b/CHANGES.md
index 062785e0..ed8b7482 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -27,6 +27,7 @@ Release Notes.
 - Test: Limit the CPU and memory usage of the e2e test.
 - Add taking the snapshot of data files.
 - Add backup command line tool to backup the data files.
+- Add restore command line tool to restore the data files.
 
 ### Bug Fixes
 
diff --git a/banyand/measure/snapshot.go b/banyand/measure/snapshot.go
index a873b19b..ad64e8eb 100644
--- a/banyand/measure/snapshot.go
+++ b/banyand/measure/snapshot.go
@@ -19,6 +19,7 @@ package measure
 
 import (
        "context"
+       "encoding/json"
        "fmt"
        "path/filepath"
        "sync"
@@ -32,6 +33,7 @@ import (
        databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
        "github.com/apache/skywalking-banyandb/banyand/internal/storage"
        "github.com/apache/skywalking-banyandb/pkg/bus"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
        "github.com/apache/skywalking-banyandb/pkg/schema"
 )
 
@@ -168,11 +170,35 @@ func (tst *tsTable) TakeFileSnapshot(dst string) error {
                        return fmt.Errorf("failed to create snapshot for part 
%d: %w", part.partMetadata.ID, err)
                }
        }
+       tst.createMetadata(dst, snapshot)
        parent := filepath.Dir(dst)
        tst.fileSystem.SyncPath(parent)
        return nil
 }
 
+func (tst *tsTable) createMetadata(dst string, snapshot *snapshot) {
+       var partNames []string
+       for i := range snapshot.parts {
+               partNames = append(partNames, partName(snapshot.parts[i].ID()))
+       }
+       data, err := json.Marshal(partNames)
+       if err != nil {
+               logger.Panicf("cannot marshal partNames to JSON: %s", err)
+       }
+       snapshotPath := filepath.Join(dst, snapshotName(snapshot.epoch))
+       lf, err := tst.fileSystem.CreateFile(snapshotPath, filePermission)
+       if err != nil {
+               logger.Panicf("cannot create lock file %s: %s", snapshotPath, 
err)
+       }
+       n, err := lf.Write(data)
+       if err != nil {
+               logger.Panicf("cannot write snapshot %s: %s", snapshotPath, err)
+       }
+       if n != len(data) {
+               logger.Panicf("unexpected number of bytes written to %s; got 
%d; want %d", snapshotPath, n, len(data))
+       }
+}
+
 func (s *service) takeGroupSnapshot(dstDir string, groupName string) error {
        group, ok := s.schemaRepo.LoadGroup(groupName)
        if !ok {
@@ -228,7 +254,7 @@ func (s *snapshotListener) Rev(ctx context.Context, message 
bus.Message) bus.Mes
                        return 
bus.NewMessage(bus.MessageID(time.Now().UnixNano()), nil)
                default:
                }
-               if errGroup := 
s.s.takeGroupSnapshot(filepath.Join(s.s.snapshotDir, sn), 
g.GetSchema().Metadata.Name); err != nil {
+               if errGroup := 
s.s.takeGroupSnapshot(filepath.Join(s.s.snapshotDir, sn, dataDir), 
g.GetSchema().Metadata.Name); err != nil {
                        s.s.l.Error().Err(errGroup).Str("group", 
g.GetSchema().Metadata.Name).Msg("fail to take group snapshot")
                        err = multierr.Append(err, errGroup)
                        continue
diff --git a/banyand/property/listener.go b/banyand/property/listener.go
index 2aee3a0a..a7025314 100644
--- a/banyand/property/listener.go
+++ b/banyand/property/listener.go
@@ -230,7 +230,7 @@ func (s *snapshotListener) Rev(ctx context.Context, message 
bus.Message) bus.Mes
        }
        s.snapshotMux.Lock()
        defer s.snapshotMux.Unlock()
-       storage.DeleteStaleSnapshots(filepath.Join(s.s.db.location, 
snapshotsDir), s.s.maxFileSnapshotNum, s.s.lfs)
+       storage.DeleteStaleSnapshots(s.s.snapshotDir, s.s.maxFileSnapshotNum, 
s.s.lfs)
        sn := s.snapshotName()
        shardsRef := s.s.db.sLst.Load()
        if shardsRef == nil {
@@ -243,7 +243,7 @@ func (s *snapshotListener) Rev(ctx context.Context, message 
bus.Message) bus.Mes
                        return 
bus.NewMessage(bus.MessageID(time.Now().UnixNano()), nil)
                default:
                }
-               snpDir := path.Join(s.s.db.location, snapshotsDir, sn, 
filepath.Base(shard.location))
+               snpDir := path.Join(s.s.snapshotDir, sn, dataDir, 
filepath.Base(shard.location))
                lfs.MkdirPanicIfExist(snpDir, dirPerm)
                err := shard.store.TakeFileSnapshot(snpDir)
                if err != nil {
diff --git a/banyand/property/service.go b/banyand/property/service.go
index 6afd7198..4932cbae 100644
--- a/banyand/property/service.go
+++ b/banyand/property/service.go
@@ -21,6 +21,7 @@ import (
        "context"
        "errors"
        "path"
+       "path/filepath"
        "time"
 
        "go.uber.org/multierr"
@@ -36,7 +37,10 @@ import (
        "github.com/apache/skywalking-banyandb/pkg/run"
 )
 
-const defaultFlushTimeout = 5 * time.Second
+const (
+       defaultFlushTimeout = 5 * time.Second
+       dataDir             = "data"
+)
 
 var (
        errEmptyRootPath         = errors.New("root path is empty")
@@ -48,11 +52,12 @@ type service struct {
        pipeline            queue.Server
        omr                 observability.MetricsRegistry
        lfs                 fs.FileSystem
-       l                   *logger.Logger
-       db                  *database
        close               chan struct{}
+       db                  *database
+       l                   *logger.Logger
        root                string
        nodeID              string
+       snapshotDir         string
        flushTimeout        time.Duration
        maxDiskUsagePercent int
        maxFileSnapshotNum  int
@@ -92,6 +97,7 @@ func (s *service) PreRun(ctx context.Context) error {
        s.l = logger.GetLogger(s.Name())
        s.lfs = fs.NewLocalFileSystemWithLogger(s.l)
        path := path.Join(s.root, s.Name())
+       s.snapshotDir = filepath.Join(path, snapshotsDir)
        observability.UpdatePath(path)
        val := ctx.Value(common.ContextNodeKey)
        if val == nil {
@@ -101,7 +107,7 @@ func (s *service) PreRun(ctx context.Context) error {
        s.nodeID = node.NodeID
 
        var err error
-       s.db, err = openDB(ctx, path, s.flushTimeout, s.omr)
+       s.db, err = openDB(ctx, filepath.Join(path, dataDir), s.flushTimeout, 
s.omr)
        if err != nil {
                return err
        }
diff --git a/banyand/stream/snapshot.go b/banyand/stream/snapshot.go
index 1cee3f2a..df00698e 100644
--- a/banyand/stream/snapshot.go
+++ b/banyand/stream/snapshot.go
@@ -19,6 +19,7 @@ package stream
 
 import (
        "context"
+       "encoding/json"
        "fmt"
        "path/filepath"
        "sync"
@@ -32,6 +33,7 @@ import (
        databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
        "github.com/apache/skywalking-banyandb/banyand/internal/storage"
        "github.com/apache/skywalking-banyandb/pkg/bus"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
        "github.com/apache/skywalking-banyandb/pkg/schema"
 )
 
@@ -174,11 +176,35 @@ func (tst *tsTable) TakeFileSnapshot(dst string) error {
                        return fmt.Errorf("failed to create snapshot for part 
%d: %w", part.partMetadata.ID, err)
                }
        }
+       tst.createMetadata(dst, snapshot)
        parent := filepath.Dir(dst)
        tst.fileSystem.SyncPath(parent)
        return nil
 }
 
+func (tst *tsTable) createMetadata(dst string, snapshot *snapshot) {
+       var partNames []string
+       for i := range snapshot.parts {
+               partNames = append(partNames, partName(snapshot.parts[i].ID()))
+       }
+       data, err := json.Marshal(partNames)
+       if err != nil {
+               logger.Panicf("cannot marshal partNames to JSON: %s", err)
+       }
+       snapshotPath := filepath.Join(dst, snapshotName(snapshot.epoch))
+       lf, err := tst.fileSystem.CreateFile(snapshotPath, filePermission)
+       if err != nil {
+               logger.Panicf("cannot create lock file %s: %s", snapshotPath, 
err)
+       }
+       n, err := lf.Write(data)
+       if err != nil {
+               logger.Panicf("cannot write snapshot %s: %s", snapshotPath, err)
+       }
+       if n != len(data) {
+               logger.Panicf("unexpected number of bytes written to %s; got 
%d; want %d", snapshotPath, n, len(data))
+       }
+}
+
 func (s *service) takeGroupSnapshot(dstDir string, groupName string) error {
        group, ok := s.schemaRepo.LoadGroup(groupName)
        if !ok {
@@ -234,7 +260,7 @@ func (s *snapshotListener) Rev(ctx context.Context, message 
bus.Message) bus.Mes
                        return 
bus.NewMessage(bus.MessageID(time.Now().UnixNano()), nil)
                default:
                }
-               if errGroup := 
s.s.takeGroupSnapshot(filepath.Join(s.s.snapshotDir, sn), 
g.GetSchema().Metadata.Name); err != nil {
+               if errGroup := 
s.s.takeGroupSnapshot(filepath.Join(s.s.snapshotDir, sn, dataDir), 
g.GetSchema().Metadata.Name); err != nil {
                        s.s.l.Error().Err(errGroup).Str("group", 
g.GetSchema().Metadata.Name).Msg("fail to take group snapshot")
                        err = multierr.Append(err, errGroup)
                        continue
diff --git a/banyand/stream/snapshot_test.go b/banyand/stream/snapshot_test.go
index 201cc8ad..a61dc039 100644
--- a/banyand/stream/snapshot_test.go
+++ b/banyand/stream/snapshot_test.go
@@ -487,6 +487,9 @@ func TestSnapshotFunctionality(t *testing.T) {
        hasIndex := false
        var partDir string
        for _, entry := range entries {
+               if !entry.IsDir() {
+                       continue
+               }
                if entry.Name() == elementIndexFilename {
                        hasIndex = true
                } else {
diff --git a/bydbbackup/Makefile b/bydbbackup/Makefile
index 699e660a..4bcdf289 100644
--- a/bydbbackup/Makefile
+++ b/bydbbackup/Makefile
@@ -17,7 +17,7 @@
 #
 
 NAME := bydbbackup
-BINARIES := $(NAME)-backup
+BINARIES := $(NAME)-backup $(NAME)-restore
 
 IMG_NAME := skywalking-bydbbackup
 
diff --git a/bydbbackup/cmd/backup/main.go b/bydbbackup/cmd/backup/main.go
index 4c823973..e88a7996 100644
--- a/bydbbackup/cmd/backup/main.go
+++ b/bydbbackup/cmd/backup/main.go
@@ -22,11 +22,11 @@ import (
        "fmt"
        "os"
 
-       "github.com/apache/skywalking-banyandb/bydbbackup/internal/backup"
+       "github.com/apache/skywalking-banyandb/bydbbackup/pkg"
 )
 
 func main() {
-       cmd := backup.NewBackupCommand()
+       cmd := pkg.NewBackupCommand()
        if err := cmd.Execute(); err != nil {
                fmt.Fprintln(os.Stderr, err)
                os.Exit(1)
diff --git a/bydbbackup/cmd/backup/main.go b/bydbbackup/cmd/restore/main.go
similarity index 84%
copy from bydbbackup/cmd/backup/main.go
copy to bydbbackup/cmd/restore/main.go
index 4c823973..333cc7d7 100644
--- a/bydbbackup/cmd/backup/main.go
+++ b/bydbbackup/cmd/restore/main.go
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-// Package main provides main entry for the backup command-line tool.
+// Package main provides main entry for the restore command-line tool.
 package main
 
 import (
        "fmt"
        "os"
 
-       "github.com/apache/skywalking-banyandb/bydbbackup/internal/backup"
+       "github.com/apache/skywalking-banyandb/bydbbackup/pkg"
 )
 
 func main() {
-       cmd := backup.NewBackupCommand()
+       cmd := pkg.NewRestoreCommand()
        if err := cmd.Execute(); err != nil {
                fmt.Fprintln(os.Stderr, err)
                os.Exit(1)
diff --git a/bydbbackup/internal/backup/backup.go b/bydbbackup/pkg/backup.go
similarity index 92%
rename from bydbbackup/internal/backup/backup.go
rename to bydbbackup/pkg/backup.go
index ae1042b9..484ce83a 100644
--- a/bydbbackup/internal/backup/backup.go
+++ b/bydbbackup/pkg/backup.go
@@ -15,8 +15,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-// Package backup provides the backup command-line tool.
-package backup
+// Package pkg provides the backup command-line tool.
+package pkg
 
 import (
        "context"
@@ -42,6 +42,7 @@ import (
        "github.com/apache/skywalking-banyandb/pkg/grpchelper"
        "github.com/apache/skywalking-banyandb/pkg/logger"
        "github.com/apache/skywalking-banyandb/pkg/timestamp"
+       "github.com/apache/skywalking-banyandb/pkg/version"
 )
 
 const snapshotDir = "snapshots"
@@ -67,8 +68,9 @@ func NewBackupCommand() *cobra.Command {
        )
 
        cmd := &cobra.Command{
-               Use:   "backup",
-               Short: "Backup BanyanDB snapshots to remote storage",
+               Short:             "Backup BanyanDB snapshots to remote 
storage",
+               DisableAutoGenTag: true,
+               Version:           version.Build(),
                RunE: func(_ *cobra.Command, _ []string) error {
                        if scheduleStyle == "" {
                                return backupAction(dest, gRPCAddr, enableTLS, 
insecure, cert,
@@ -147,7 +149,7 @@ func backupAction(dest, gRPCAddr string, enableTLS, 
insecure bool, cert,
                        logger.Warningf("Failed to get snapshot directory for 
%s: %v", snapshot.Name, err)
                        continue
                }
-               multierr.AppendInto(&err, backupSnapshot(fs, snapshotDir, 
snapshot.Name, timeDir))
+               multierr.AppendInto(&err, backupSnapshot(fs, snapshotDir, 
getCatalogName(snapshot.Catalog), timeDir))
        }
        return err
 }
@@ -189,15 +191,14 @@ func getSnapshotDir(snapshot *databasev1.Snapshot, 
streamRoot, measureRoot, prop
        var baseDir string
        switch snapshot.Catalog {
        case commonv1.Catalog_CATALOG_STREAM:
-               baseDir = streamRoot
+               baseDir = getLocalDir(streamRoot, snapshot.Catalog)
        case commonv1.Catalog_CATALOG_MEASURE:
-               baseDir = measureRoot
+               baseDir = getLocalDir(measureRoot, snapshot.Catalog)
        case commonv1.Catalog_CATALOG_PROPERTY:
-               baseDir = propertyRoot
+               baseDir = getLocalDir(propertyRoot, snapshot.Catalog)
        default:
                return "", errors.New("unknown catalog type")
        }
-
        return filepath.Join(baseDir, snapshotDir, snapshot.Name), nil
 }
 
@@ -211,14 +212,14 @@ func getTimeDir(style string) string {
        }
 }
 
-func backupSnapshot(fs remote.FS, snapshotDir, snapshotName, timeDir string) 
error {
+func backupSnapshot(fs remote.FS, snapshotDir, catalog, timeDir string) error {
        localFiles, err := getAllFiles(snapshotDir)
        if err != nil {
                return err
        }
 
        ctx := context.Background()
-       remotePrefix := path.Join(timeDir, snapshotName) + "/"
+       remotePrefix := path.Join(timeDir, catalog) + "/"
 
        remoteFiles, err := fs.List(ctx, remotePrefix)
        if err != nil {
@@ -226,7 +227,7 @@ func backupSnapshot(fs remote.FS, snapshotDir, 
snapshotName, timeDir string) err
        }
 
        for _, relPath := range localFiles {
-               remotePath := path.Join(timeDir, snapshotName, relPath)
+               remotePath := path.Join(timeDir, catalog, relPath)
                if !contains(remoteFiles, remotePath) {
                        if err := uploadFile(ctx, fs, snapshotDir, relPath, 
remotePath); err != nil {
                                return err
@@ -234,7 +235,7 @@ func backupSnapshot(fs remote.FS, snapshotDir, 
snapshotName, timeDir string) err
                }
        }
 
-       deleteOrphanedFiles(ctx, fs, localFiles, remoteFiles, timeDir, 
snapshotName)
+       deleteOrphanedFiles(ctx, fs, localFiles, remoteFiles, timeDir, catalog)
        return nil
 }
 
diff --git a/bydbbackup/internal/backup/backup_test.go 
b/bydbbackup/pkg/backup_test.go
similarity index 98%
rename from bydbbackup/internal/backup/backup_test.go
rename to bydbbackup/pkg/backup_test.go
index ef3a6247..64759955 100644
--- a/bydbbackup/internal/backup/backup_test.go
+++ b/bydbbackup/pkg/backup_test.go
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package backup
+package pkg
 
 import (
        "context"
@@ -63,8 +63,8 @@ func TestGetSnapshotDir(t *testing.T) {
                {
                        "stream catalog",
                        &databasev1.Snapshot{Catalog: 
commonv1.Catalog_CATALOG_STREAM, Name: "test"},
-                       "/stream", "/measure", "/prop",
-                       filepath.Join("/stream", snapshotDir, "test"),
+                       "/tmp", "/tmp", "/tmp",
+                       filepath.Join("/tmp/stream", snapshotDir, "test"),
                        false,
                },
                {
diff --git a/bydbbackup/pkg/restore.go b/bydbbackup/pkg/restore.go
new file mode 100644
index 00000000..141d94bb
--- /dev/null
+++ b/bydbbackup/pkg/restore.go
@@ -0,0 +1,242 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package pkg
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+       "strings"
+
+       "github.com/spf13/cobra"
+       "go.uber.org/multierr"
+
+       commonv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+       "github.com/apache/skywalking-banyandb/pkg/fs/remote"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+       "github.com/apache/skywalking-banyandb/pkg/version"
+)
+
+const (
+       dirPerm = 0o755
+       dataDir = "data"
+)
+
+// NewRestoreCommand creates a new restore command.
+func NewRestoreCommand() *cobra.Command {
+       rootCmd := &cobra.Command{
+               DisableAutoGenTag: true,
+               Version:           version.Build(),
+               Short:             "Restore BanyanDB data from remote storage",
+       }
+       rootCmd.AddCommand(newRunCommand())
+       rootCmd.AddCommand(NewTimeDirCommand())
+       return rootCmd
+}
+
+func newRunCommand() *cobra.Command {
+       var (
+               source       string
+               streamRoot   string
+               measureRoot  string
+               propertyRoot string
+       )
+       cmd := &cobra.Command{
+               Use:   "run",
+               Short: "Restore BanyanDB data from remote storage",
+               RunE: func(_ *cobra.Command, _ []string) error {
+                       if streamRoot == "" && measureRoot == "" && 
propertyRoot == "" {
+                               return errors.New("at least one of 
stream-root-path, measure-root-path, or property-root-path is required")
+                       }
+                       if source == "" {
+                               return errors.New("source is required")
+                       }
+                       fs, err := newFS(source)
+                       if err != nil {
+                               return err
+                       }
+                       defer fs.Close()
+
+                       var errs error
+
+                       if streamRoot != "" {
+                               timeDirPath := filepath.Join(streamRoot, 
"stream-time-dir")
+                               if data, err := os.ReadFile(timeDirPath); err 
== nil {
+                                       timeDir := 
strings.TrimSpace(string(data))
+                                       if err = restoreCatalog(fs, timeDir, 
streamRoot, commonv1.Catalog_CATALOG_STREAM); err != nil {
+                                               errs = multierr.Append(errs, 
fmt.Errorf("stream restore failed: %w", err))
+                                       } else {
+                                               _ = os.Remove(timeDirPath)
+                                       }
+                               } else if !errors.Is(err, os.ErrNotExist) {
+                                       return err
+                               }
+                       }
+                       if measureRoot != "" {
+                               timeDirPath := filepath.Join(measureRoot, 
"measure-time-dir")
+                               if data, err := os.ReadFile(timeDirPath); err 
== nil {
+                                       timeDir := 
strings.TrimSpace(string(data))
+                                       if err = restoreCatalog(fs, timeDir, 
measureRoot, commonv1.Catalog_CATALOG_MEASURE); err != nil {
+                                               errs = multierr.Append(errs, 
fmt.Errorf("measure restore failed: %w", err))
+                                       } else {
+                                               _ = os.Remove(timeDirPath)
+                                       }
+                               } else if !errors.Is(err, os.ErrNotExist) {
+                                       return err
+                               }
+                       }
+                       if propertyRoot != "" {
+                               timeDirPath := filepath.Join(propertyRoot, 
"property-time-dir")
+                               if data, err := os.ReadFile(timeDirPath); err 
== nil {
+                                       timeDir := 
strings.TrimSpace(string(data))
+                                       if err = restoreCatalog(fs, timeDir, 
propertyRoot, commonv1.Catalog_CATALOG_PROPERTY); err != nil {
+                                               errs = multierr.Append(errs, 
fmt.Errorf("property restore failed: %w", err))
+                                       } else {
+                                               _ = os.Remove(timeDirPath)
+                                       }
+                               } else if !errors.Is(err, os.ErrNotExist) {
+                                       return err
+                               }
+                       }
+
+                       return errs
+               },
+       }
+       cmd.Flags().StringVar(&source, "source", "", "Source URL (e.g., 
file:///backups)")
+       cmd.Flags().StringVar(&streamRoot, "stream-root-path", "/tmp", "Root 
directory for stream catalog")
+       cmd.Flags().StringVar(&measureRoot, "measure-root-path", "/tmp", "Root 
directory for measure catalog")
+       cmd.Flags().StringVar(&propertyRoot, "property-root-path", "/tmp", 
"Root directory for property catalog")
+
+       return cmd
+}
+
+func restoreCatalog(fs remote.FS, timeDir, rootPath string, catalog 
commonv1.Catalog) error {
+       remotePrefix := filepath.Join(timeDir, getCatalogName(catalog), "/")
+
+       remoteFiles, err := fs.List(context.Background(), remotePrefix)
+       if err != nil {
+               return fmt.Errorf("failed to list remote files: %w", err)
+       }
+
+       localDir := filepath.Join(getLocalDir(rootPath, catalog), dataDir)
+       if err = os.MkdirAll(localDir, dirPerm); err != nil {
+               return fmt.Errorf("failed to create local directory %s: %w", 
localDir, err)
+       }
+
+       remoteRelSet := make(map[string]bool)
+       var relPath string
+       for _, remoteFile := range remoteFiles {
+               relPath, err = filepath.Rel(timeDir, remoteFile)
+               if err != nil {
+                       return fmt.Errorf("failed to get relative path for %s: 
%w", remoteFile, err)
+               }
+               remoteRelSet[filepath.ToSlash(relPath)] = true
+       }
+
+       localFiles, err := getAllFiles(localDir)
+       if err != nil {
+               return fmt.Errorf("failed to list local files: %w", err)
+       }
+
+       for _, localRelPath := range localFiles {
+               if !remoteRelSet[localRelPath] {
+                       localPath := filepath.Join(localDir, localRelPath)
+                       if err := os.Remove(localPath); err != nil {
+                               return fmt.Errorf("failed to remove local file 
%s: %w", localPath, err)
+                       }
+                       cleanEmptyDirs(filepath.Dir(localPath), localDir)
+               }
+       }
+
+       for _, remoteFile := range remoteFiles {
+               relPath, err := filepath.Rel(timeDir, remoteFile)
+               if err != nil {
+                       return fmt.Errorf("failed to get relative path for %s: 
%w", remoteFile, err)
+               }
+               relPath = filepath.ToSlash(relPath)
+               localPath := filepath.Join(rootPath, relPath)
+
+               if !contains(localFiles, relPath) {
+                       if err := os.MkdirAll(filepath.Dir(localPath), 
dirPerm); err != nil {
+                               return fmt.Errorf("failed to create directory 
for %s: %w", localPath, err)
+                       }
+
+                       if err := downloadFile(context.Background(), fs, 
remoteFile, localPath); err != nil {
+                               return fmt.Errorf("failed to download %s: %w", 
remoteFile, err)
+                       }
+                       logger.Infof("Downloaded %s to %s", remoteFile, 
localPath)
+               }
+       }
+
+       return nil
+}
+
+func cleanEmptyDirs(dir, stopDir string) {
+       for {
+               if dir == stopDir || dir == "." {
+                       break
+               }
+               entries, err := os.ReadDir(dir)
+               if err != nil || len(entries) > 0 {
+                       break
+               }
+               _ = os.Remove(dir)
+               dir = filepath.Dir(dir)
+       }
+}
+
+func getLocalDir(rootPath string, catalog commonv1.Catalog) string {
+       return filepath.Join(rootPath, getCatalogName(catalog))
+}
+
+func getCatalogName(catalog commonv1.Catalog) string {
+       switch catalog {
+       case commonv1.Catalog_CATALOG_STREAM:
+               return "stream"
+       case commonv1.Catalog_CATALOG_MEASURE:
+               return "measure"
+       case commonv1.Catalog_CATALOG_PROPERTY:
+               return "property"
+       default:
+               logger.Panicf("unknown catalog type: %v", catalog)
+               return ""
+       }
+}
+
+func downloadFile(ctx context.Context, fs remote.FS, remotePath, localPath 
string) error {
+       reader, err := fs.Download(ctx, remotePath)
+       if err != nil {
+               return err
+       }
+       defer reader.Close()
+
+       file, err := os.Create(localPath)
+       if err != nil {
+               return err
+       }
+       defer file.Close()
+
+       if _, err := io.Copy(file, reader); err != nil {
+               return err
+       }
+
+       return nil
+}
diff --git a/bydbbackup/pkg/restore_test.go b/bydbbackup/pkg/restore_test.go
new file mode 100644
index 00000000..2e9cb3ef
--- /dev/null
+++ b/bydbbackup/pkg/restore_test.go
@@ -0,0 +1,91 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package pkg
+
+import (
+       "context"
+       "os"
+       "path/filepath"
+       "strings"
+       "testing"
+
+       commonv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+       "github.com/apache/skywalking-banyandb/pkg/fs/remote/local"
+)
+
+func TestRestoreDownload(t *testing.T) {
+       remoteDir := t.TempDir()
+       localRestoreDir := t.TempDir()
+
+       fs, err := local.NewFS(remoteDir)
+       if err != nil {
+               t.Fatalf("failed to create remote FS: %v", err)
+       }
+
+       timeDir := "2023-10-10"
+       remoteFilePath := filepath.Join(timeDir, 
getCatalogName(commonv1.Catalog_CATALOG_STREAM), dataDir, "test.txt")
+       content := "hello"
+       err = fs.Upload(context.Background(), remoteFilePath, 
strings.NewReader(content))
+       if err != nil {
+               t.Fatalf("failed to upload file: %v", err)
+       }
+
+       err = restoreCatalog(fs, timeDir, localRestoreDir, 
commonv1.Catalog_CATALOG_STREAM)
+       if err != nil {
+               t.Fatalf("restoreCatalog failed: %v", err)
+       }
+
+       localFilePath := filepath.Join(localRestoreDir, 
getCatalogName(commonv1.Catalog_CATALOG_STREAM), dataDir, "test.txt")
+       got, err := os.ReadFile(localFilePath)
+       if err != nil {
+               t.Fatalf("failed to read local file: %v", err)
+       }
+       if string(got) != content {
+               t.Fatalf("expected content %q, got %q", content, string(got))
+       }
+}
+
+func TestRestoreDelete(t *testing.T) {
+       remoteDir := t.TempDir()
+       localRestoreDir := t.TempDir()
+
+       fs, err := local.NewFS(remoteDir)
+       if err != nil {
+               t.Fatalf("failed to create remote FS: %v", err)
+       }
+
+       streamDir := filepath.Join(localRestoreDir, "stream", dataDir)
+       if err = os.MkdirAll(streamDir, dirPerm); err != nil {
+               t.Fatalf("failed to create local stream directory: %v", err)
+       }
+       extraFilePath := filepath.Join(streamDir, "old.txt")
+       extraContent := "stale"
+       if err = os.WriteFile(extraFilePath, []byte(extraContent), 0o600); err 
!= nil {
+               t.Fatalf("failed to write extra local file: %v", err)
+       }
+
+       timeDir := "2023-10-10"
+       err = restoreCatalog(fs, timeDir, localRestoreDir, 
commonv1.Catalog_CATALOG_STREAM)
+       if err != nil {
+               t.Fatalf("restoreCatalog failed: %v", err)
+       }
+
+       if _, err := os.Stat(extraFilePath); !os.IsNotExist(err) {
+               t.Fatalf("expected extra file %q to be deleted", extraFilePath)
+       }
+}
diff --git a/bydbbackup/pkg/timedir.go b/bydbbackup/pkg/timedir.go
new file mode 100644
index 00000000..12e6640c
--- /dev/null
+++ b/bydbbackup/pkg/timedir.go
@@ -0,0 +1,239 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package pkg
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "os"
+       "path/filepath"
+       "sort"
+       "strings"
+
+       "github.com/spf13/cobra"
+)
+
+const filePerm = 0o600
+
+// NewTimeDirCommand creates a new time-dir command.
+func NewTimeDirCommand() *cobra.Command {
+       rootCmd := &cobra.Command{
+               Use:   "timedir",
+               Short: "Manage 'time-dir' files for backup and restoration",
+       }
+
+       // Register subcommands.
+       rootCmd.AddCommand(newListCmd())
+       rootCmd.AddCommand(newCreateCmd())
+       rootCmd.AddCommand(newReadCmd())
+       rootCmd.AddCommand(newDeleteCmd())
+
+       return rootCmd
+}
+
+func newListCmd() *cobra.Command {
+       var dest string
+       var prefix string
+
+       cmd := &cobra.Command{
+               Use:   "list",
+               Short: "List remote time directories in the remote file system",
+               RunE: func(cmd *cobra.Command, _ []string) error {
+                       if dest == "" {
+                               return errors.New("--dest is required")
+                       }
+
+                       // Create a remote file system client using the 
provided URL.
+                       fs, err := newFS(dest)
+                       if err != nil {
+                               return err
+                       }
+
+                       ctx := context.Background()
+                       // List files starting with an optional prefix.
+                       files, err := fs.List(ctx, prefix)
+                       if err != nil {
+                               return fmt.Errorf("failed to list remote files: 
%w", err)
+                       }
+
+                       // Extract unique top-level directories (which are our 
time directories).
+                       dirSet := make(map[string]bool)
+                       for _, f := range files {
+                               // Normalize to forward-slash separators.
+                               normalized := filepath.ToSlash(f)
+                               parts := strings.SplitN(normalized, "/", 2)
+                               if len(parts) > 0 && parts[0] != "" {
+                                       dirSet[parts[0]] = true
+                               }
+                       }
+                       var dirs []string
+                       for d := range dirSet {
+                               dirs = append(dirs, d)
+                       }
+                       sort.Strings(dirs)
+                       fmt.Fprintln(cmd.OutOrStdout(), "Remote time 
directories:")
+                       for _, d := range dirs {
+                               fmt.Fprintln(cmd.OutOrStdout(), d)
+                       }
+                       return nil
+               },
+       }
+
+       cmd.Flags().StringVar(&dest, "dest", "", "Destination URL of the remote 
file system (e.g., file:///backups)")
+       cmd.Flags().StringVar(&prefix, "prefix", "", "Prefix in the remote file 
system to list")
+       return cmd
+}
+
+func newCreateCmd() *cobra.Command {
+       var catalogs []string
+       var streamRoot, measureRoot, propertyRoot string
+       var timeStyle string
+
+       cmd := &cobra.Command{
+               Use:   "create [time]",
+               Short: "Create local 'time-dir' file(s) in catalog directories",
+               RunE: func(cmd *cobra.Command, args []string) error {
+                       if len(catalogs) == 0 {
+                               catalogs = []string{"stream", "measure", 
"property"}
+                       }
+                       var tValue string
+                       if len(args) > 0 {
+                               tValue = strings.TrimSpace(args[0])
+                       } else {
+                               tValue = getTimeDir(timeStyle)
+                       }
+
+                       for _, cat := range catalogs {
+                               filePath, err := getLocalTimeDirFilePath(cat, 
streamRoot, measureRoot, propertyRoot)
+                               if err != nil {
+                                       fmt.Fprintf(cmd.OutOrStdout(), 
"Skipping unknown catalog '%s': %v\n", cat, err)
+                                       continue
+                               }
+                               err = os.WriteFile(filePath, []byte(tValue), 
filePerm)
+                               if err != nil {
+                                       fmt.Fprintf(cmd.ErrOrStderr(), "Failed 
to create time-dir for catalog '%s' at %s: %v\n", cat, filePath, err)
+                               } else {
+                                       fmt.Fprintf(cmd.OutOrStdout(), "Created 
time-dir for catalog '%s' at %s with content '%s'\n", cat, filePath, tValue)
+                               }
+                       }
+                       return nil
+               },
+       }
+
+       cmd.Flags().StringSliceVar(&catalogs, "catalog", nil, "Catalog(s) to 
create time-dir file (e.g., stream, measure, property). Defaults to all if not 
provided.")
+       cmd.Flags().StringVar(&streamRoot, "stream-root", "/tmp", "Local root 
directory for stream catalog")
+       cmd.Flags().StringVar(&measureRoot, "measure-root", "/tmp", "Local root 
directory for measure catalog")
+       cmd.Flags().StringVar(&propertyRoot, "property-root", "/tmp", "Local 
root directory for property catalog")
+       cmd.Flags().StringVar(&timeStyle, "time-style", "daily", "Time style to 
compute time string (daily or hourly)")
+       return cmd
+}
+
+func newReadCmd() *cobra.Command {
+       var catalogs []string
+       var streamRoot, measureRoot, propertyRoot string
+
+       cmd := &cobra.Command{
+               Use:   "read",
+               Short: "Read local 'time-dir' file(s) from catalog directories",
+               RunE: func(cmd *cobra.Command, _ []string) error {
+                       // If no catalog is specified, process all three.
+                       if len(catalogs) == 0 {
+                               catalogs = []string{"stream", "measure", 
"property"}
+                       }
+
+                       for _, cat := range catalogs {
+                               filePath, err := getLocalTimeDirFilePath(cat, 
streamRoot, measureRoot, propertyRoot)
+                               if err != nil {
+                                       fmt.Fprintf(cmd.OutOrStdout(), 
"Skipping unknown catalog '%s': %v\n", cat, err)
+                                       continue
+                               }
+                               data, err := os.ReadFile(filePath)
+                               if err != nil {
+                                       if os.IsNotExist(err) {
+                                               fmt.Fprintf(cmd.ErrOrStderr(), 
"Catalog '%s': time-dir file not found at %s\n", cat, filePath)
+                                       } else {
+                                               fmt.Fprintf(cmd.ErrOrStderr(), 
"Catalog '%s': failed to read time-dir file at %s: %v\n", cat, filePath, err)
+                                       }
+                               } else {
+                                       fmt.Fprintf(cmd.OutOrStdout(), "Catalog 
'%s': time-dir content: '%s'\n", cat, strings.TrimSpace(string(data)))
+                               }
+                       }
+                       return nil
+               },
+       }
+
+       cmd.Flags().StringSliceVar(&catalogs, "catalog", nil, "Catalog(s) to 
read time-dir file (e.g., stream, measure, property). Defaults to all if not 
provided.")
+       cmd.Flags().StringVar(&streamRoot, "stream-root", "/tmp", "Local root 
directory for stream catalog")
+       cmd.Flags().StringVar(&measureRoot, "measure-root", "/tmp", "Local root 
directory for measure catalog")
+       cmd.Flags().StringVar(&propertyRoot, "property-root", "/tmp", "Local 
root directory for property catalog")
+       return cmd
+}
+
+func newDeleteCmd() *cobra.Command {
+       var catalogs []string
+       var streamRoot, measureRoot, propertyRoot string
+
+       cmd := &cobra.Command{
+               Use:   "delete",
+               Short: "Delete local 'time-dir' file(s) from catalog 
directories",
+               RunE: func(cmd *cobra.Command, _ []string) error {
+                       // If no catalog is specified, process all three.
+                       if len(catalogs) == 0 {
+                               catalogs = []string{"stream", "measure", 
"property"}
+                       }
+                       for _, cat := range catalogs {
+                               filePath, err := getLocalTimeDirFilePath(cat, 
streamRoot, measureRoot, propertyRoot)
+                               if err != nil {
+                                       fmt.Fprintf(cmd.ErrOrStderr(), 
"Skipping unknown catalog '%s': %v\n", cat, err)
+                                       continue
+                               }
+                               err = os.Remove(filePath)
+                               if err != nil {
+                                       if os.IsNotExist(err) {
+                                               fmt.Fprintf(cmd.ErrOrStderr(), 
"Catalog '%s': time-dir file not found at %s\n", cat, filePath)
+                                       } else {
+                                               fmt.Fprintf(cmd.ErrOrStderr(), 
"Failed to delete time-dir for catalog '%s' at %s: %v\n", cat, filePath, err)
+                                       }
+                               } else {
+                                       fmt.Fprintf(cmd.OutOrStdout(), "Deleted 
time-dir for catalog '%s' at %s\n", cat, filePath)
+                               }
+                       }
+                       return nil
+               },
+       }
+
+       cmd.Flags().StringSliceVar(&catalogs, "catalog", nil, "Catalog(s) to 
delete time-dir file (e.g., stream, measure, property). Defaults to all if not 
provided.")
+       cmd.Flags().StringVar(&streamRoot, "stream-root", "/tmp", "Local root 
directory for stream catalog")
+       cmd.Flags().StringVar(&measureRoot, "measure-root", "/tmp", "Local root 
directory for measure catalog")
+       cmd.Flags().StringVar(&propertyRoot, "property-root", "/tmp", "Local 
root directory for property catalog")
+       return cmd
+}
+
+func getLocalTimeDirFilePath(catalog, streamRoot, measureRoot, propertyRoot 
string) (string, error) {
+       switch strings.ToLower(catalog) {
+       case "stream":
+               return filepath.Join(streamRoot, fmt.Sprintf("%s-time-dir", 
"stream")), nil
+       case "measure":
+               return filepath.Join(measureRoot, fmt.Sprintf("%s-time-dir", 
"measure")), nil
+       case "property":
+               return filepath.Join(propertyRoot, fmt.Sprintf("%s-time-dir", 
"property")), nil
+       default:
+               return "", fmt.Errorf("unknown catalog type: %s", catalog)
+       }
+}
diff --git a/bydbbackup/pkg/timedir_test.go b/bydbbackup/pkg/timedir_test.go
new file mode 100644
index 00000000..40bef667
--- /dev/null
+++ b/bydbbackup/pkg/timedir_test.go
@@ -0,0 +1,145 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package pkg
+
+import (
+       "bytes"
+       "fmt"
+       "os"
+       "path/filepath"
+       "strings"
+       "testing"
+)
+
+func TestNewCreateCmd(t *testing.T) {
+       baseDir := t.TempDir()
+       streamDir := filepath.Join(baseDir, "stream")
+       measureDir := filepath.Join(baseDir, "measure")
+       propertyDir := filepath.Join(baseDir, "property")
+       if err := os.MkdirAll(streamDir, dirPerm); err != nil {
+               t.Fatalf("Failed to create stream dir: %v", err)
+       }
+       if err := os.MkdirAll(measureDir, dirPerm); err != nil {
+               t.Fatalf("Failed to create measure dir: %v", err)
+       }
+       if err := os.MkdirAll(propertyDir, dirPerm); err != nil {
+               t.Fatalf("Failed to create property dir: %v", err)
+       }
+
+       content := "2023-10-12"
+       cmd := newCreateCmd()
+       cmd.SetArgs([]string{
+               content,
+               "--stream-root", streamDir,
+               "--measure-root", measureDir,
+               "--property-root", propertyDir,
+       })
+       if err := cmd.Execute(); err != nil {
+               t.Fatalf("newCreateCmd.Execute() error: %v", err)
+       }
+
+       for _, dir := range []struct {
+               name string
+               root string
+       }{
+               {"stream", streamDir},
+               {"measure", measureDir},
+               {"property", propertyDir},
+       } {
+               fp := filepath.Join(dir.root, fmt.Sprintf("%s-time-dir", 
dir.name))
+               data, err := os.ReadFile(fp)
+               if err != nil {
+                       t.Errorf("Catalog '%s': expected file at %s, error: 
%v", dir.name, fp, err)
+                       continue
+               }
+               if strings.TrimSpace(string(data)) != content {
+                       t.Errorf("Catalog '%s': expected content '%s', got 
'%s'", dir.name, content, string(data))
+               }
+       }
+}
+
+func TestNewReadCmd(t *testing.T) {
+       baseDir := t.TempDir()
+       streamDir := filepath.Join(baseDir, "stream")
+       measureDir := filepath.Join(baseDir, "measure")
+       propertyDir := filepath.Join(baseDir, "property")
+       for _, dir := range []string{streamDir, measureDir, propertyDir} {
+               if err := os.MkdirAll(dir, dirPerm); err != nil {
+                       t.Fatalf("Failed to create dir %s: %v", dir, err)
+               }
+               catalog := filepath.Base(dir)
+               fp := filepath.Join(dir, fmt.Sprintf("%s-time-dir", catalog))
+               if err := os.WriteFile(fp, []byte("dummy-time"), filePerm); err 
!= nil {
+                       t.Fatalf("Failed to write file %s: %v", fp, err)
+               }
+       }
+
+       cmd := newReadCmd()
+       var outBuf bytes.Buffer
+       cmd.SetOut(&outBuf)
+       cmd.SetArgs([]string{
+               "--stream-root", streamDir,
+               "--measure-root", measureDir,
+               "--property-root", propertyDir,
+       })
+       if err := cmd.Execute(); err != nil {
+               t.Fatalf("newReadCmd.Execute() error: %v", err)
+       }
+
+       output := outBuf.String()
+       for _, catalog := range []string{"stream", "measure", "property"} {
+               if !strings.Contains(output, "Catalog '"+catalog+"'") {
+                       t.Errorf("Output missing expected catalog '%s'", 
catalog)
+               }
+       }
+}
+
+func TestNewDeleteCmd(t *testing.T) {
+       baseDir := t.TempDir()
+       streamDir := filepath.Join(baseDir, "stream")
+       measureDir := filepath.Join(baseDir, "measure")
+       propertyDir := filepath.Join(baseDir, "property")
+       for _, dir := range []string{streamDir, measureDir, propertyDir} {
+               if err := os.MkdirAll(dir, dirPerm); err != nil {
+                       t.Fatalf("Failed to create dir %s: %v", dir, err)
+               }
+               catalog := filepath.Base(dir)
+               fp := filepath.Join(dir, fmt.Sprintf("%s-time-dir", catalog))
+               if err := os.WriteFile(fp, []byte("to-be-deleted"), 0o600); err 
!= nil {
+                       t.Fatalf("Failed to write file %s: %v", fp, err)
+               }
+       }
+
+       cmd := newDeleteCmd()
+       cmd.SetArgs([]string{
+               "--stream-root", streamDir,
+               "--measure-root", measureDir,
+               "--property-root", propertyDir,
+       })
+       if err := cmd.Execute(); err != nil {
+               t.Fatalf("newDeleteCmd.Execute() error: %v", err)
+       }
+
+       for _, dir := range []string{streamDir, measureDir, propertyDir} {
+               catalog := filepath.Base(dir)
+               fp := filepath.Join(dir, fmt.Sprintf("%s-time-dir", catalog))
+               if _, err := os.Stat(fp); !os.IsNotExist(err) {
+                       t.Errorf("Expected file at %s to be deleted, but it 
exists", fp)
+               }
+       }
+}
diff --git a/docs/menu.yml b/docs/menu.yml
index 230851ee..26a80f29 100644
--- a/docs/menu.yml
+++ b/docs/menu.yml
@@ -129,6 +129,8 @@ catalog:
         path: "/operation/security"
       - name: "Backup"
         path: "/operation/backup"
+      - name: "Restore"
+        path: "/operation/restore"
   - name: "File Format"
     catalog:
       - name: "v1.1.0"
diff --git a/docs/operation/backup.md b/docs/operation/backup.md
index fb5eaf97..b2dd043c 100644
--- a/docs/operation/backup.md
+++ b/docs/operation/backup.md
@@ -34,7 +34,7 @@ To perform a one-time backup, run the backup command with the 
required flags. At
 
 **Example Command:**
 ```bash
-./bydbbackup backup --dest "file:///backups"
+./backup --dest "file:///backups"
 ```
 
 ### Scheduled Backup
@@ -47,7 +47,7 @@ To enable periodic backups, provide the `--schedule` flag 
with a schedule style
 **Example Command:**
 
 ```bash
-./bydbbackup backup --dest "file:///backups" --schedule daily --time-style 
daily
+./backup --dest "file:///backups" --schedule daily --time-style daily
 ```
 
 When a schedule is provided, the tool:
diff --git a/docs/operation/restore.md b/docs/operation/restore.md
new file mode 100644
index 00000000..89ef4c26
--- /dev/null
+++ b/docs/operation/restore.md
@@ -0,0 +1,135 @@
+# Restore BanyanDB Data Using the Restore Tool
+
+This document explains how to use the backup and restore command line tools 
for BanyanDB.
+
+## Overview
+
+- **Backup Tool:**  
+  Backs up snapshot data from the BanyanDB data node to a remote storage 
destination. Files are organized under a time-based directory (e.g., daily or 
hourly) computed via a configurable time style.
+
+- **Restore Tool:**  
+  Reads the *timedir* files from local catalog directories and uses the 
timestamp within to determine which remote backup snapshot should be applied. 
Once restoration is successful, it removes all *timedir* files to avoid 
repeated restore operations, especially during unexpected scenarios.
+
+- **Timedir Utility:**  
+  Provides commands to create, list, read, and delete *time-dir* marker files 
for each catalog (e.g., _stream_, _measure_, and _property_).
+
+## Restore Workflow
+
+In an on-prem deployment, the BanyanDB data node runs on a dedicated server 
(or virtual machine), and backup/restore operations are performed using the 
provided command line tool binaries. This mode is ideal for traditional 
environments where the tools are run manually or scheduled via system cron jobs.
+
+### Backup Tool
+
+The `backup` binary is used to create backups of BanyanDB snapshots. It 
connects to the gRPC endpoint of the data node to obtain snapshots and then 
uploads the backed-up data (organized by timestamp) to remote storage.
+
+#### Example Backup Command
+
+```sh
+backup run \
+  --grpc-addr 127.0.0.1:17912 \
+  --stream-root-path /data \
+  --measure-root-path /data \
+  --property-root-path /data \
+  --dest file:///backups \
+  --time-style daily
+```
+
+**Notes:**
+
+- `--grpc-addr`: gRPC address for the data node.
+- `--stream-root-path`, `--measure-root-path`, `--property-root-path`: Local 
directories for the respective catalogs.
+- `--dest`: Remote destination URL where backups will be stored (e.g., local 
filesystem path with the `file://` scheme).
+- `--time-style`: Defines the time directory style, such as `daily` or 
`hourly`.
+
+### Timedir Utilities
+
+The `timedir` tool provides commands to create, read, list, and delete marker 
files (time directories) that indicate the backup snapshot’s timestamp. These 
marker files are used by the restore process to identify which snapshot to 
restore.
+
+#### List Remote Timedir Files
+
+Before creating a new local timedir file for restoration, use the timedir list 
command to inspect the remote backup destination. This lets you view all 
available timedir names and select the expected one.
+
+```sh
+restore timedir list \
+  --dest file:///backups
+```
+
+Example output:
+
+```sh
+Remote time directories:
+2025-02-12
+2025-02-13
+2025-02-14
+```
+
+Examine the list and choose the appropriate timedir (for example, 2025-02-12).
+
+#### Creating Timedir Files
+
+Before undertaking a pod or service restart, an administrator may create 
timedir marker files to capture the current backup state:
+
+```sh
+restore timedir create 2025-02-12 \
+  --stream-root /data \
+  --measure-root /data \
+  --property-root /data
+```
+
+This command writes the specified timestamp (e.g., 2025-02-12) into files 
named `stream-time-dir`, `measure-time-dir`, and `property-time-dir` in the 
designated root directories.
+
+#### Verifying Timedir Files
+
+Ensure that the timedir files have been created with the correct content by 
reading them:
+
+```sh
+restore timedir read \
+  --stream-root /data \
+  --measure-root /data \
+  --property-root /data
+```
+
+The output should display the timedir content for each catalog.
+
+### Restore Tool
+
+**Important**: Before triggering a restore, ensure the BanyanDB data node is 
completely stopped.
+
+The `restore` binary restores data from the remote backup storage onto the 
local machine. It examines the timedir marker files to determine which backup 
snapshot to apply, then synchronizes the local data directories accordingly. 
After a successful restore, the tool removes the timedir files to prevent 
unintended repeated restores.
+
+#### Example Restore Command
+
+```sh
+restore run \
+  --source file:///backups \
+  --stream-root-path /data \
+  --measure-root-path /data \
+  --property-root-path /data
+```
+
+**Key Points:**
+
+- The tool reads the timedir files (e.g., `stream-time-dir`) to fetch the 
appropriate timestamp.
+- Local data is compared with the remote backup snapshot; orphaned files in 
local directories are removed.
+- Upon success, the timedir marker files are deleted to ensure a clean 
recovery state.
+
+## Kubernetes Deployment
+
+For environments running BanyanDB in Kubernetes, the backup and restore tools 
can be integrated as sidecar containers. A common pattern is to use an init 
container for restoring data and a sidecar to manage backup and timedir 
operations.
+
+### Kubernetes Sidecar Deployment Model
+
+1. **Sidecar Container:**
+   - The sidecar runs the backup and timedir command binaries alongside the 
main BanyanDB container.
+   - Administrators can attach to the sidecar (using `kubectl exec`) to 
execute timedir operations.
+   - The sidecar is responsible for backing up snapshot data and maintaining 
timedir files.
+
+2. **Init Container for Restore:**
+   - When a pod is (re)created, an init container runs before the main 
container starts.
+   - The init container uses the `restore` binary to read the timedir files 
from shared volumes (mounted from a persistent volume or hostPath).
+   - It restores the data from the remote backup store by processing the 
timedir markers, and then deletes the marker files to avoid accidental 
re-triggering.
+
+3. **Shared Volumes:**
+   - The timedir files and data directories are shared between the init 
container, sidecar, and main container (data node) via Kubernetes volumes.
+   - This shared setup ensures that the restore process can correctly 
synchronize the data state before the server starts.
+
+By following the guidelines and examples provided in this document, 
administrators can choose the approach that best fits their deployment 
environment—whether on-prem or in Kubernetes—to ensure reliable data backup and 
restoration for BanyanDB.
diff --git a/pkg/test/helpers/context.go b/pkg/test/helpers/context.go
index 20d1d37a..c0c4523c 100644
--- a/pkg/test/helpers/context.go
+++ b/pkg/test/helpers/context.go
@@ -71,8 +71,9 @@ func TimeRange(args Args, shardContext SharedContext) 
*modelv1.TimeRange {
        }
 }
 
-// SnapshotSharedContext is the context shared between test cases in the 
snapshot testing.
-type SnapshotSharedContext struct {
+// BackupSharedContext is the context shared between test cases in the 
snapshot testing.
+type BackupSharedContext struct {
+       DataAddr   string
        Connection *grpclib.ClientConn
        RootDir    string
 }
diff --git a/test/cases/backup/all.go b/test/cases/backup/all.go
new file mode 100644
index 00000000..b155c9bd
--- /dev/null
+++ b/test/cases/backup/all.go
@@ -0,0 +1,176 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package backup_test
+
+import (
+       "bytes"
+       "os"
+       "path/filepath"
+       "strings"
+
+       "github.com/onsi/ginkgo/v2"
+       "github.com/onsi/gomega"
+
+       "github.com/apache/skywalking-banyandb/bydbbackup/pkg"
+)
+
+var _ = ginkgo.Describe("Backup All", func() {
+       _ = ginkgo.Describe("Backup and Restore Integration", func() {
+               ginkgo.It("should backup, create timedir and restore data 
correctly", func() {
+                       ginkgo.By("Backup data to a remote destination")
+                       destDir, err := os.MkdirTemp("", "backup-restore-dest")
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                       defer os.RemoveAll(destDir)
+                       destURL := "file://" + destDir
+
+                       backupCmd := pkg.NewBackupCommand()
+                       backupCmd.SetArgs([]string{
+                               "--grpc-addr", SharedContext.DataAddr,
+                               "--stream-root-path", SharedContext.RootDir,
+                               "--measure-root-path", SharedContext.RootDir,
+                               "--property-root-path", SharedContext.RootDir,
+                               "--dest", destURL,
+                               "--time-style", "daily",
+                       })
+                       err = backupCmd.Execute()
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+                       var backupTimeDir string
+                       entries, err := os.ReadDir(destDir)
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                       for _, entry := range entries {
+                               if entry.IsDir() {
+                                       backupTimeDir = entry.Name()
+                                       break
+                               }
+                       }
+                       gomega.Expect(backupTimeDir).NotTo(gomega.BeEmpty())
+
+                       ginkgo.By("List remote time directories")
+                       newCatalogDir, err := os.MkdirTemp("", 
"backup-restore-new-catalog")
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                       defer os.RemoveAll(newCatalogDir)
+
+                       listCmd := pkg.NewTimeDirCommand()
+                       listCmd.SetArgs([]string{"list", "--dest", destURL})
+                       listOut := &bytes.Buffer{}
+                       listCmd.SetOut(listOut)
+                       listCmd.SetErr(listOut)
+                       err = listCmd.Execute()
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+                       // Parse the list command output.
+                       // Example expected output:
+                       //   Remote time directories:
+                       //   2025-02-12
+                       outputLines := strings.Split(listOut.String(), "\n")
+                       var latestTimedir string
+                       for _, line := range outputLines {
+                               trim := strings.TrimSpace(line)
+                               if trim != "" && !strings.HasPrefix(trim, 
"Remote time directories:") {
+                                       latestTimedir = trim
+                                       break
+                               }
+                       }
+                       
gomega.Expect(latestTimedir).To(gomega.Equal(backupTimeDir))
+
+                       ginkgo.By("Create timedir in new catalog's root path")
+                       createCmd := pkg.NewTimeDirCommand()
+                       createCmd.SetArgs([]string{
+                               "create",
+                               "--stream-root", newCatalogDir,
+                               "--measure-root", newCatalogDir,
+                               "--property-root", newCatalogDir,
+                               latestTimedir,
+                       })
+                       createOut := &bytes.Buffer{}
+                       createCmd.SetOut(createOut)
+                       createCmd.SetErr(createOut)
+                       err = createCmd.Execute()
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+                       ginkgo.By("Read timedir from new catalog's root path")
+                       readCmd := pkg.NewTimeDirCommand()
+                       readCmd.SetArgs([]string{
+                               "read",
+                               "--stream-root", newCatalogDir,
+                               "--measure-root", newCatalogDir,
+                               "--property-root", newCatalogDir,
+                       })
+                       readOut := &bytes.Buffer{}
+                       readCmd.SetOut(readOut)
+                       readCmd.SetErr(readOut)
+                       err = readCmd.Execute()
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                       readResult := readOut.String()
+                       
gomega.Expect(readResult).To(gomega.ContainSubstring(latestTimedir))
+
+                       ginkgo.By("Create random files in the data directories 
of the new catalog")
+                       catalogs := []string{"stream", "measure", "property"}
+                       for _, cat := range catalogs {
+                               dataDir := filepath.Join(newCatalogDir, cat, 
"data")
+                               err = os.MkdirAll(dataDir, 0o755)
+                               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                               randomFile := filepath.Join(dataDir, 
"random.txt")
+                               err = os.WriteFile(randomFile, []byte("some 
random data"), 0o600)
+                               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                       }
+
+                       ginkgo.By("Restore data from the remote destination")
+                       restoreCmd := pkg.NewRestoreCommand()
+                       restoreCmd.SetArgs([]string{
+                               "run",
+                               "--source", destURL,
+                               "--stream-root-path", newCatalogDir,
+                               "--measure-root-path", newCatalogDir,
+                               "--property-root-path", newCatalogDir,
+                       })
+                       restoreOut := &bytes.Buffer{}
+                       restoreCmd.SetOut(restoreOut)
+                       restoreCmd.SetErr(restoreOut)
+                       err = restoreCmd.Execute()
+                       gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+                       // Verify that the random files are removed and the 
data from remote backup is restored.
+                       for _, cat := range catalogs {
+                               // The extra file should have been removed.
+                               randomFile := filepath.Join(newCatalogDir, cat, 
"data", "random.txt")
+                               _, err = os.Stat(randomFile)
+                               
gomega.Expect(os.IsNotExist(err)).To(gomega.BeTrue())
+
+                               // Verify that the restored files exist.
+                               // The remote backup data for each catalog is 
under: destDir/<latestTimedir>/<catalog>
+                               remoteDataDir := filepath.Join(destDir, 
latestTimedir, cat, "data")
+                               restoredDataDir := filepath.Join(newCatalogDir, 
cat, "data")
+                               var remoteEntries, restoredEntries []os.DirEntry
+                               remoteEntries, err = os.ReadDir(remoteDataDir)
+                               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                               restoredEntries, err = 
os.ReadDir(restoredDataDir)
+                               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+                               
gomega.Expect(len(restoredEntries)).To(gomega.Equal(len(remoteEntries)))
+                       }
+
+                       // Verify that the timedir file is removed from the new 
catalog's root path (after a successful restore).
+                       for _, cat := range catalogs {
+                               timedirFile := filepath.Join(newCatalogDir, 
cat+"-time-dir")
+                               _, err = os.Stat(timedirFile)
+                               
gomega.Expect(os.IsNotExist(err)).To(gomega.BeTrue())
+                       }
+               })
+       })
+})
diff --git a/test/cases/snapshot/snapshot.go b/test/cases/backup/backup.go
similarity index 58%
rename from test/cases/snapshot/snapshot.go
rename to test/cases/backup/backup.go
index bdb0149e..f78a5999 100644
--- a/test/cases/snapshot/snapshot.go
+++ b/test/cases/backup/backup.go
@@ -15,53 +15,61 @@
 // specific language governing permissions and limitations
 // under the License.
 
-// Package snapshot_test contains integration test cases of the taking the 
snapshots.
-package snapshot_test
+// Package backup_test provides the test cases for the backup command-line 
tool.
+package backup_test
 
 import (
        "context"
+       "os"
        "path/filepath"
+       "time"
 
        "github.com/onsi/ginkgo/v2"
        "github.com/onsi/gomega"
 
        commonv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
        databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/bydbbackup/pkg"
        "github.com/apache/skywalking-banyandb/pkg/fs"
        "github.com/apache/skywalking-banyandb/pkg/test/helpers"
 )
 
 // SharedContext is the shared context for the snapshot test cases.
-var SharedContext helpers.SnapshotSharedContext
+var SharedContext helpers.BackupSharedContext
 
-var _ = ginkgo.Describe("Snapshot", func() {
+var _ = ginkgo.Describe("Backup", func() {
        lfs := fs.NewLocalFileSystem()
 
+       verifySnapshot := func(snpName string, entries []fs.DirEntry) int {
+               for _, entry := range entries {
+                       if entry.Name() == snpName {
+                               return len(entries)
+                       }
+               }
+               ginkgo.Fail("snapshot not found")
+               return 0
+       }
+
        ginkgo.It("should take a snapshot", func() {
                client := 
databasev1.NewSnapshotServiceClient(SharedContext.Connection)
                resp, err := client.Snapshot(context.Background(), 
&databasev1.SnapshotRequest{})
                gomega.Expect(err).NotTo(gomega.HaveOccurred())
                gomega.Expect(resp).NotTo(gomega.BeNil())
                gomega.Expect(resp.Snapshots).To(gomega.HaveLen(3))
+               catalogNumMap := make(map[commonv1.Catalog]int)
                for _, snp := range resp.Snapshots {
+                       var snpDir string
                        if snp.Catalog == commonv1.Catalog_CATALOG_MEASURE {
-                               measureSnapshotDir := 
filepath.Join(SharedContext.RootDir, "measure", "snapshots")
-                               entries := lfs.ReadDir(measureSnapshotDir)
-                               gomega.Expect(entries).To(gomega.HaveLen(1))
-                               
gomega.Expect(entries[0].Name()).To(gomega.Equal(snp.Name))
+                               snpDir = filepath.Join(SharedContext.RootDir, 
"measure", "snapshots")
                        } else if snp.Catalog == 
commonv1.Catalog_CATALOG_STREAM {
-                               streamSnapshotDir := 
filepath.Join(SharedContext.RootDir, "stream", "snapshots")
-                               entries := lfs.ReadDir(streamSnapshotDir)
-                               gomega.Expect(entries).To(gomega.HaveLen(1))
-                               
gomega.Expect(entries[0].Name()).To(gomega.Equal(snp.Name))
+                               snpDir = filepath.Join(SharedContext.RootDir, 
"stream", "snapshots")
                        } else if snp.Catalog == 
commonv1.Catalog_CATALOG_PROPERTY {
-                               propertySnapshotDir := 
filepath.Join(SharedContext.RootDir, "property", "snapshots")
-                               entries := lfs.ReadDir(propertySnapshotDir)
-                               gomega.Expect(entries).To(gomega.HaveLen(1))
-                               
gomega.Expect(entries[0].Name()).To(gomega.Equal(snp.Name))
+                               snpDir = filepath.Join(SharedContext.RootDir, 
"property", "snapshots")
                        } else {
                                ginkgo.Fail("unexpected snapshot catalog")
                        }
+                       entries := lfs.ReadDir(snpDir)
+                       catalogNumMap[snp.GetCatalog()] = 
verifySnapshot(snp.Name, entries)
                }
                resp, err = client.Snapshot(context.Background(), 
&databasev1.SnapshotRequest{})
                gomega.Expect(err).NotTo(gomega.HaveOccurred())
@@ -71,18 +79,44 @@ var _ = ginkgo.Describe("Snapshot", func() {
                        if snp.Catalog == commonv1.Catalog_CATALOG_MEASURE {
                                measureSnapshotDir := 
filepath.Join(SharedContext.RootDir, "measure", "snapshots")
                                entries := lfs.ReadDir(measureSnapshotDir)
-                               gomega.Expect(entries).To(gomega.HaveLen(2))
+                               
gomega.Expect(entries).To(gomega.HaveLen(catalogNumMap[snp.GetCatalog()] + 1))
                        } else if snp.Catalog == 
commonv1.Catalog_CATALOG_STREAM {
                                streamSnapshotDir := 
filepath.Join(SharedContext.RootDir, "stream", "snapshots")
                                entries := lfs.ReadDir(streamSnapshotDir)
-                               gomega.Expect(entries).To(gomega.HaveLen(2))
+                               
gomega.Expect(entries).To(gomega.HaveLen(catalogNumMap[snp.GetCatalog()] + 1))
                        } else if snp.Catalog == 
commonv1.Catalog_CATALOG_PROPERTY {
                                propertySnapshotDir := 
filepath.Join(SharedContext.RootDir, "property", "snapshots")
                                entries := lfs.ReadDir(propertySnapshotDir)
-                               gomega.Expect(entries).To(gomega.HaveLen(2))
+                               
gomega.Expect(entries).To(gomega.HaveLen(catalogNumMap[snp.GetCatalog()] + 1))
                        } else {
                                ginkgo.Fail("unexpected snapshot catalog")
                        }
                }
        })
+
+       ginkgo.It("should backup direct test files in root paths to remote 
destination", func() {
+               destDir, err := os.MkdirTemp("", "backup-test")
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               defer os.RemoveAll(destDir)
+               destURL := "file://" + destDir
+
+               backupCmd := pkg.NewBackupCommand()
+               backupCmd.SetArgs([]string{
+                       "--grpc-addr", SharedContext.DataAddr,
+                       "--stream-root-path", SharedContext.RootDir,
+                       "--measure-root-path", SharedContext.RootDir,
+                       "--property-root-path", SharedContext.RootDir,
+                       "--dest", destURL,
+                       "--time-style", "daily",
+               })
+               err = backupCmd.Execute()
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+               timeDir := time.Now().Format("2006-01-02")
+               entries := lfs.ReadDir(filepath.Join(destDir, timeDir))
+               gomega.Expect(entries).To(gomega.HaveLen(3))
+               for _, entry := range entries {
+                       
gomega.Expect(entry.Name()).To(gomega.BeElementOf([]string{"stream", "measure", 
"property"}))
+               }
+       })
 })
diff --git a/test/integration/distributed/snapshot/snapshot_suite_test.go 
b/test/integration/distributed/backup/backup_suite_test.go
similarity index 95%
rename from test/integration/distributed/snapshot/snapshot_suite_test.go
rename to test/integration/distributed/backup/backup_suite_test.go
index bfd4c158..fe2e748a 100644
--- a/test/integration/distributed/snapshot/snapshot_suite_test.go
+++ b/test/integration/distributed/backup/backup_suite_test.go
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package snapshot_test
+package backup_test
 
 import (
        "context"
@@ -48,12 +48,12 @@ import (
        test_stream "github.com/apache/skywalking-banyandb/pkg/test/stream"
        "github.com/apache/skywalking-banyandb/pkg/timestamp"
        test_cases "github.com/apache/skywalking-banyandb/test/cases"
-       casessnapshot 
"github.com/apache/skywalking-banyandb/test/cases/snapshot"
+       casesbackup "github.com/apache/skywalking-banyandb/test/cases/backup"
 )
 
-func TestSnapshot(t *testing.T) {
+func TestBackup(t *testing.T) {
        RegisterFailHandler(Fail)
-       RunSpecs(t, "Distributed Snapshot Suite")
+       RunSpecs(t, "Distributed Backup Suite")
 }
 
 var (
@@ -61,6 +61,7 @@ var (
        dir        string
        deferFunc  func()
        goods      []gleak.Goroutine
+       dataAddr   string
 )
 
 var _ = SynchronizedBeforeSuite(func() []byte {
@@ -92,7 +93,6 @@ var _ = SynchronizedBeforeSuite(func() []byte {
        test_stream.PreloadSchema(ctx, schemaRegistry)
        test_measure.PreloadSchema(ctx, schemaRegistry)
        By("Starting data node 0")
-       var dataAddr string
        var closeDataNode0 func()
        dataAddr, dir, closeDataNode0 = setup.DataNodeWithAddrAndDir(ep)
        By("Starting liaison node")
@@ -146,7 +146,8 @@ var _ = SynchronizedBeforeSuite(func() []byte {
        connection, err = grpchelper.Conn(string(address), 10*time.Second,
                grpc.WithTransportCredentials(insecure.NewCredentials()))
        Expect(err).NotTo(HaveOccurred())
-       casessnapshot.SharedContext = helpers.SnapshotSharedContext{
+       casesbackup.SharedContext = helpers.BackupSharedContext{
+               DataAddr:   dataAddr,
                Connection: connection,
                RootDir:    dir,
        }
diff --git a/test/integration/standalone/snapshot/snapshot_suite_test.go 
b/test/integration/standalone/backup/backup_suite_test.go
similarity index 93%
rename from test/integration/standalone/snapshot/snapshot_suite_test.go
rename to test/integration/standalone/backup/backup_suite_test.go
index 4d2eb310..c8f05fe7 100644
--- a/test/integration/standalone/snapshot/snapshot_suite_test.go
+++ b/test/integration/standalone/backup/backup_suite_test.go
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package snapshot_test
+package backup_test
 
 import (
        "context"
@@ -42,13 +42,13 @@ import (
        "github.com/apache/skywalking-banyandb/pkg/test/setup"
        "github.com/apache/skywalking-banyandb/pkg/timestamp"
        test_cases "github.com/apache/skywalking-banyandb/test/cases"
-       casessnapshot 
"github.com/apache/skywalking-banyandb/test/cases/snapshot"
+       casesbackup "github.com/apache/skywalking-banyandb/test/cases/backup"
        integration_standalone 
"github.com/apache/skywalking-banyandb/test/integration/standalone"
 )
 
-func TestSnapshot(t *testing.T) {
+func TestBackup(t *testing.T) {
        RegisterFailHandler(Fail)
-       RunSpecs(t, "Snapshot Suite", Label(integration_standalone.Labels...))
+       RunSpecs(t, "Backup Suite", Label(integration_standalone.Labels...))
 }
 
 var (
@@ -82,7 +82,8 @@ var _ = SynchronizedBeforeSuite(func() []byte {
        connection, err = grpchelper.Conn(string(address), 10*time.Second,
                grpc.WithTransportCredentials(insecure.NewCredentials()))
        Expect(err).NotTo(HaveOccurred())
-       casessnapshot.SharedContext = helpers.SnapshotSharedContext{
+       casesbackup.SharedContext = helpers.BackupSharedContext{
+               DataAddr:   string(address),
                Connection: connection,
                RootDir:    dir,
        }

Reply via email to