This is an automated email from the ASF dual-hosted git repository.

miaoliyao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git


The following commit(s) were added to refs/heads/main by this push:
     new a413fed  chore(pitr): restore support display progress
     new c29abce  Merge pull request #326 from Xu-Wentao/pitr
a413fed is described below

commit a413fed0b73e9c4fe51bb547e36fd9c9e27db1f1
Author: xuwentao <[email protected]>
AuthorDate: Tue Apr 25 18:46:40 2023 +0800

    chore(pitr): restore support display progress
---
 pitr/agent/internal/handler/restore.go             |   9 +-
 pitr/agent/main.go                                 |   2 +-
 pitr/cli/internal/cmd/backup.go                    |  23 ++--
 pitr/cli/internal/cmd/restore.go                   | 146 +++++++++++++--------
 pitr/cli/internal/cmd/restore_test.go              |  66 ++++++----
 pitr/cli/internal/cmd/root.go                      |   8 +-
 pitr/cli/internal/pkg/model/as_backup.go           |   1 +
 pitr/cli/internal/pkg/model/as_restore.go          |   6 +
 pitr/cli/internal/pkg/model/const.go               |   1 +
 .../pkg/prettyoutput/prettyoutput_suite_test.go    |  30 +++++
 pitr/cli/pkg/prettyoutput/progress_test.go         |  56 ++++++++
 11 files changed, 244 insertions(+), 104 deletions(-)

diff --git a/pitr/agent/internal/handler/restore.go 
b/pitr/agent/internal/handler/restore.go
index 32d2045..ff1bb33 100644
--- a/pitr/agent/internal/handler/restore.go
+++ b/pitr/agent/internal/handler/restore.go
@@ -63,11 +63,13 @@ func Restore(ctx *fiber.Ctx) (err error) {
                }
        }()
 
+       // move pgdata to temp
        if err = pkg.OG.MvPgDataToTemp(); err != nil {
                err = fmt.Errorf("pkg.OG.MvPgDataToTemp return err=%w", err)
                return
        }
 
+       // restore data from backup
        if err = pkg.OG.Restore(in.DnBackupPath, in.Instance, in.DnBackupID); 
err != nil {
                efmt := "pkg.OG.Restore 
failure[path=%s,instance=%s,backupID=%s],err=%w"
                err = fmt.Errorf(efmt, in.DnBackupPath, in.Instance, 
in.DnBackupID, err)
@@ -78,6 +80,7 @@ func Restore(ctx *fiber.Ctx) (err error) {
                return
        }
 
+       // clean temp
        if err = pkg.OG.CleanPgDataTemp(); err != nil {
                err = fmt.Errorf("pkg.OG.CleanPgDataTemp return err=%w", err)
                return
@@ -88,9 +91,5 @@ func Restore(ctx *fiber.Ctx) (err error) {
                return
        }
 
-       if err = responder.Success(ctx, nil); err != nil {
-               err = fmt.Errorf("responder failure,err=%s,wrap=%w", err, 
cons.Internal)
-               return nil
-       }
-       return
+       return responder.Success(ctx, nil)
 }
diff --git a/pitr/agent/main.go b/pitr/agent/main.go
index 805874b..73bf84f 100644
--- a/pitr/agent/main.go
+++ b/pitr/agent/main.go
@@ -92,7 +92,7 @@ func main() {
                panic(fmt.Errorf("PGDATA:%s the database directory does not 
exist", pgData))
        }
 
-       pgData := strings.Trim(pgData, " ")
+       pgData = strings.Trim(pgData, " ")
        if strings.HasSuffix(pgData, "/") {
                dirs := strings.Split(pgData, "/")
                dirs = dirs[0 : len(dirs)-1]
diff --git a/pitr/cli/internal/cmd/backup.go b/pitr/cli/internal/cmd/backup.go
index 9d57763..1490c38 100644
--- a/pitr/cli/internal/cmd/backup.go
+++ b/pitr/cli/internal/cmd/backup.go
@@ -103,21 +103,22 @@ func init() {
 // 7. Double check backups all finished
 func backup() error {
        var err error
+       var lsBackup *model.LsBackup
        proxy, err := pkg.NewShardingSphereProxy(Username, Password, 
pkg.DefaultDBName, Host, Port)
        if err != nil {
-               return xerr.NewCliErr("create ss-proxy connect failed")
+               return xerr.NewCliErr("Create ss-proxy connect failed")
        }
 
        ls, err := pkg.NewLocalStorage(pkg.DefaultRootDir())
        if err != nil {
-               return xerr.NewCliErr("create local storage failed")
+               return xerr.NewCliErr("Create local storage failed")
        }
 
        defer func() {
                if err != nil {
-                       logging.Info("try to unlock cluster ...")
+                       logging.Info("Try to unlock cluster ...")
                        if err := proxy.Unlock(); err != nil {
-                               logging.Error(fmt.Sprintf("coz backup failed, 
try to unlock cluster, but still failed, err:%s", err.Error()))
+                               logging.Error(fmt.Sprintf("Coz backup failed, 
try to unlock cluster, but still failed, err:%s", err.Error()))
                        }
                }
        }()
@@ -126,12 +127,12 @@ func backup() error {
        logging.Info("Starting lock cluster ...")
        err = proxy.LockForBackup()
        if err != nil {
-               return xerr.NewCliErr("lock for backup failed")
+               return xerr.NewCliErr("Lock for backup failed")
        }
 
        // Step2. Get cluster info and save local backup info
        logging.Info("Starting export metadata ...")
-       lsBackup, err := exportData(proxy, ls)
+       lsBackup, err = exportData(proxy, ls)
        if err != nil {
                return xerr.NewCliErr(fmt.Sprintf("export backup data failed, 
err:%s", err.Error()))
        }
@@ -140,7 +141,7 @@ func backup() error {
        // Step3. Check agent server status
        logging.Info("Checking agent server status...")
        if available := checkAgentServerStatus(lsBackup); !available {
-               err = xerr.NewCliErr("one or more agent server are not 
available.")
+               err = xerr.NewCliErr("One or more agent server are not 
available.")
                return err
        }
 
@@ -302,6 +303,11 @@ func checkBackupStatus(lsBackup *model.LsBackup) 
model.BackupStatus {
                dnResult          = make([]*model.DataNode, 0)
        )
 
+       if totalNum == 0 {
+               logging.Info("No data node need to backup")
+               return model.SsBackupStatusCanceled
+       }
+
        for _, dn := range lsBackup.DnList {
                dataNodeMap[dn.IP] = dn
        }
@@ -318,9 +324,6 @@ func checkBackupStatus(lsBackup *model.LsBackup) 
model.BackupStatus {
        // wait for all data node backup finished
        time.Sleep(time.Millisecond * 100)
        for pw.IsRenderInProgress() {
-               if pw.LengthActive() == 0 {
-                       pw.Stop()
-               }
                time.Sleep(time.Millisecond * 100)
        }
 
diff --git a/pitr/cli/internal/cmd/restore.go b/pitr/cli/internal/cmd/restore.go
index 88d1623..6c7a6a6 100644
--- a/pitr/cli/internal/cmd/restore.go
+++ b/pitr/cli/internal/cmd/restore.go
@@ -19,17 +19,19 @@ package cmd
 
 import (
        "fmt"
+       "os"
        "strings"
-       "sync"
+       "time"
 
        "github.com/apache/shardingsphere-on-cloud/pitr/cli/internal/pkg"
        "github.com/apache/shardingsphere-on-cloud/pitr/cli/internal/pkg/model"
        "github.com/apache/shardingsphere-on-cloud/pitr/cli/internal/pkg/xerr"
-       "github.com/spf13/pflag"
-
-       "github.com/spf13/cobra"
-
        "github.com/apache/shardingsphere-on-cloud/pitr/cli/pkg/logging"
+       "github.com/apache/shardingsphere-on-cloud/pitr/cli/pkg/prettyoutput"
+       "github.com/jedib0t/go-pretty/v6/progress"
+       "github.com/jedib0t/go-pretty/v6/table"
+       "github.com/spf13/cobra"
+       "github.com/spf13/pflag"
 )
 
 var (
@@ -156,82 +158,112 @@ func checkDatabaseExist(proxy pkg.IShardingSphereProxy, 
bak *model.LsBackup) err
 
        // get user input to confirm
        prompt := fmt.Sprintf(
-               "Detected that the database [%s] already exists in 
shardingsphere-proxy metadata.\n"+
-                       "The logic database will be DROPPED and then insert 
backup's metadata into shardingsphere-proxy after restoring the backup data.\n"+
-                       "PLEASE MAKE SURE OF THIS ACTION, CONTINUE? (Y|N)\n", 
strings.Join(databaseNamesExist, ","))
+               "Detected That The Database [%s] Already Exists In 
ShardingSphere-Proxy Metadata.\n"+
+                       "The Logic Database Will Be DROPPED And Then Insert 
Backup's Metadata Into ShardingSphere-Proxy After Restoring The Backup Data.\n"+
+                       "Are you sure to continue? (Y|N)", 
strings.Join(databaseNamesExist, ","))
        return getUserApproveInTerminal(prompt)
 }
 
+func restoreDataToSSProxy(proxy pkg.IShardingSphereProxy, lsBackup 
*model.LsBackup) error {
+       // drop database if exists
+       for _, shardingDBName := range databaseNamesExist {
+               logging.Info(fmt.Sprintf("Dropping database: [%s] ...", 
shardingDBName))
+               if err := proxy.DropDatabase(shardingDBName); err != nil {
+                       return xerr.NewCliErr(fmt.Sprintf("drop database 
failed:%s", err.Error()))
+               }
+       }
+
+       // import metadata
+       if err := proxy.ImportMetaData(lsBackup.SsBackup.ClusterInfo); err != 
nil {
+               return xerr.NewCliErr(fmt.Sprintf("Import metadata to ss-proxy 
failed:%s", err.Error()))
+       }
+
+       return nil
+}
+
 func execRestore(lsBackup *model.LsBackup) error {
        var (
-               wg           sync.WaitGroup
-               storageNodes = lsBackup.SsBackup.StorageNodes
-               dataNodeMap  = make(map[string]*model.DataNode)
-               failedCh     = make(chan error, len(storageNodes))
+               totalNum           = len(lsBackup.SsBackup.StorageNodes)
+               dataNodeMap        = make(map[string]*model.DataNode)
+               resultCh           = make(chan *model.RestoreResult, totalNum)
+               dnResult           = make([]*model.RestoreResult, 0)
+               restoreFinalStatus = "Completed"
        )
 
        for _, dataNode := range lsBackup.DnList {
                dataNodeMap[dataNode.IP] = dataNode
        }
 
-       for _, storageNode := range storageNodes {
-               wg.Add(1)
-               storageNode := storageNode
-               agentHost := storageNode.IP
-               if agentHost == "127.0.0.1" {
-                       agentHost = Host
-               }
-               as := pkg.NewAgentServer(fmt.Sprintf("%s:%d", agentHost, 
AgentPort))
-               dataNode, ok := dataNodeMap[storageNode.IP]
-               if !ok {
-                       return xerr.NewCliErr(fmt.Sprintf("data node not 
found:%s", storageNode.IP))
-               }
-               go func() {
-                       defer wg.Done()
-                       _execRestore(as, storageNode, dataNode.BackupID, 
failedCh)
-               }()
-       }
-       wg.Wait()
-       close(failedCh)
-       if len(failedCh) > 0 {
-               var errMsg string
-               for err := range failedCh {
-                       errMsg += err.Error() + "\n"
+       if totalNum == 0 {
+               return xerr.NewCliErr(fmt.Sprintf("no storage node found, 
please check backup record [%s].", lsBackup.Info.ID))
+       }
+
+       pw := prettyoutput.NewPW(totalNum)
+       go pw.Render()
+       for i := 0; i < totalNum; i++ {
+               sn := lsBackup.SsBackup.StorageNodes[i]
+               dn := dataNodeMap[sn.IP]
+               as := pkg.NewAgentServer(fmt.Sprintf("%s:%d", 
convertLocalhost(sn.IP), AgentPort))
+               go doRestore(as, sn, dn.BackupID, resultCh, pw)
+       }
+
+       time.Sleep(time.Millisecond * 100)
+       for pw.IsRenderInProgress() {
+               time.Sleep(time.Millisecond * 100)
+       }
+
+       close(resultCh)
+
+       for result := range resultCh {
+               dnResult = append(dnResult, result)
+               if result.Status != "Completed" {
+                       restoreFinalStatus = "Failed"
                }
-               return xerr.NewCliErr(errMsg)
        }
+
+       // print result formatted
+       t := table.NewWriter()
+       t.SetOutputMirror(os.Stdout)
+       t.SetTitle("Restore Task Result: %s", restoreFinalStatus)
+       t.AppendHeader(table.Row{"#", "Data Node IP", "Data Node Port", 
"Result"})
+
+       for i, dn := range dnResult {
+               t.AppendRow([]interface{}{i + 1, dn.IP, dn.Port, dn.Status})
+               t.AppendSeparator()
+       }
+
+       t.Render()
+
        return nil
 }
 
-func _execRestore(as pkg.IAgentServer, node *model.StorageNode, backupID 
string, failedCh chan error) {
+func doRestore(as pkg.IAgentServer, sn *model.StorageNode, backupID string, 
resultCh chan *model.RestoreResult, pw progress.Writer) {
+       tracker := &progress.Tracker{Message: fmt.Sprintf("Restore data to 
openGauss: %s", sn.IP)}
+       result := ""
+
        in := &model.RestoreIn{
-               DBPort:       node.Port,
-               DBName:       node.Database,
-               Username:     node.Username,
-               Password:     node.Password,
+               DBPort:       sn.Port,
+               DBName:       sn.Database,
+               Username:     sn.Username,
+               Password:     sn.Password,
                Instance:     defaultInstance,
                DnBackupPath: BackupPath,
                DnBackupID:   backupID,
        }
 
-       if err := as.Restore(in); err != nil {
-               failedCh <- xerr.NewCliErr(fmt.Sprintf("restore node:[IP:%s] 
failed:%s", node.IP, err.Error()))
-       }
-}
+       pw.AppendTracker(tracker)
 
-func restoreDataToSSProxy(proxy pkg.IShardingSphereProxy, lsBackup 
*model.LsBackup) error {
-       // drop database if exists
-       for _, shardingDBName := range databaseNamesExist {
-               logging.Info(fmt.Sprintf("Dropping database: [%s] ...", 
shardingDBName))
-               if err := proxy.DropDatabase(shardingDBName); err != nil {
-                       return xerr.NewCliErr(fmt.Sprintf("drop database 
failed:%s", err.Error()))
-               }
+       if err := as.Restore(in); err != nil {
+               tracker.MarkAsErrored()
+               result = "Failed"
+       } else {
+               tracker.MarkAsDone()
+               result = "Completed"
        }
 
-       // import metadata
-       if err := proxy.ImportMetaData(lsBackup.SsBackup.ClusterInfo); err != 
nil {
-               return xerr.NewCliErr(fmt.Sprintf("Import metadata to ss-proxy 
failed:%s", err.Error()))
+       resultCh <- &model.RestoreResult{
+               IP:     sn.IP,
+               Port:   sn.Port,
+               Status: result,
        }
-
-       return nil
 }
diff --git a/pitr/cli/internal/cmd/restore_test.go 
b/pitr/cli/internal/cmd/restore_test.go
index fd1898f..44d4764 100644
--- a/pitr/cli/internal/cmd/restore_test.go
+++ b/pitr/cli/internal/cmd/restore_test.go
@@ -20,6 +20,7 @@ package cmd
 
 import (
        "reflect"
+       "time"
 
        "bou.ke/monkey"
        "github.com/apache/shardingsphere-on-cloud/pitr/cli/internal/pkg"
@@ -59,14 +60,21 @@ var _ = Describe("Restore", func() {
                })
        })
 })
+
 var _ = Describe("test restore", func() {
        var (
                proxy *mock_pkg.MockIShardingSphereProxy
                ls    *mock_pkg.MockILocalStorage
                as    *mock_pkg.MockIAgentServer
                bak   = &model.LsBackup{
-                       Info:   nil,
-                       DnList: nil,
+                       Info: &model.BackupMetaInfo{
+                               ID: "backup-id-1",
+                       },
+                       DnList: []*model.DataNode{
+                               {
+                                       IP: "127.0.0.1",
+                               },
+                       },
                        SsBackup: &model.SsBackup{
                                Status: "",
                                ClusterInfo: &model.ClusterInfo{
@@ -77,17 +85,13 @@ var _ = Describe("test restore", func() {
                                        },
                                        SnapshotInfo: nil,
                                },
-                               StorageNodes: nil,
+                               StorageNodes: []*model.StorageNode{
+                                       {
+                                               IP: "127.0.0.1",
+                                       },
+                               },
                        },
                }
-               sn = &model.StorageNode{
-                       IP:       "127.0.0.1",
-                       Port:     3306,
-                       Username: "",
-                       Password: "",
-                       Database: "",
-                       Remark:   "",
-               }
        )
 
        BeforeEach(func() {
@@ -114,24 +118,16 @@ var _ = Describe("test restore", func() {
                Expect(checkDatabaseExist(proxy, bak)).To(BeNil())
        })
 
-       It("test exec restore", func() {
-               failedCh := make(chan error, 1)
-               as.EXPECT().Restore(gomock.Any()).Return(nil)
-               _execRestore(as, sn, "backup-id", failedCh)
-               close(failedCh)
-               Expect(<-failedCh).To(BeNil())
-       })
-
        It("test exec restore main func", func() {
                // patch ReadByID of mock ls
-               monkey.PatchInstanceMethod(reflect.TypeOf(ls), "ReadByID", 
func(_ *mock_pkg.MockILocalStorage, _ string) (*model.LsBackup, error) {
-                       return bak, nil
-               })
-               // mock ExportMetaData and return a *ClusterInfo with bak in it
-               
proxy.EXPECT().ExportMetaData().Return(bak.SsBackup.ClusterInfo, nil)
-               // mock ImportMetaData and return nil
-               proxy.EXPECT().ImportMetaData(gomock.Any()).Return(nil)
+               monkey.PatchInstanceMethod(reflect.TypeOf(ls), "ReadByID", 
func(_ *mock_pkg.MockILocalStorage, _ string) (*model.LsBackup, error) { return 
bak, nil })
+               monkey.Patch(pkg.NewAgentServer, func(_ string) 
pkg.IAgentServer { return as })
+
                RecordID = "backup-id"
+               proxy.EXPECT().ExportMetaData().Return(&model.ClusterInfo{}, 
nil)
+               proxy.EXPECT().ImportMetaData(gomock.Any()).Return(nil)
+               as.EXPECT().CheckStatus().Return(nil)
+               as.EXPECT().Restore(gomock.Any()).Return(nil)
                Expect(restore()).To(BeNil())
        })
 
@@ -142,7 +138,6 @@ var _ = Describe("test restore", func() {
                        // exec getUserApproveInTerminal
                        
Expect(getUserApproveInTerminal("")).To(Equal(xerr.NewCliErr("User abort")))
                })
-               // TODO test user approve, how to patch os.Stdin?
        })
 
        Context("restore data to ss proxy", func() {
@@ -160,4 +155,21 @@ var _ = Describe("test restore", func() {
                })
        })
 
+       Context("test exec restore", func() {
+               It("should be success", func() {
+                       ctrl := gomock.NewController(GinkgoT())
+                       as := mock_pkg.NewMockIAgentServer(ctrl)
+                       monkey.Patch(pkg.NewAgentServer, func(_ string) 
pkg.IAgentServer {
+                               return as
+                       })
+                       defer func() {
+                               ctrl.Finish()
+                               monkey.UnpatchAll()
+                       }()
+                       as.EXPECT().Restore(gomock.Any()).Do(func(_ 
*model.RestoreIn) {
+                               time.Sleep(3 * time.Second)
+                       }).Return(nil)
+                       Expect(execRestore(bak)).To(BeNil())
+               })
+       })
 })
diff --git a/pitr/cli/internal/cmd/root.go b/pitr/cli/internal/cmd/root.go
index 3d763d6..6df5fc7 100644
--- a/pitr/cli/internal/cmd/root.go
+++ b/pitr/cli/internal/cmd/root.go
@@ -97,20 +97,20 @@ func checkAgentServerStatus(lsBackup *model.LsBackup) bool {
                sn := node
                as := pkg.NewAgentServer(fmt.Sprintf("%s:%d", 
convertLocalhost(sn.IP), AgentPort))
                if err := as.CheckStatus(); err != nil {
-                       statusList = append(statusList, 
&model.AgentServerStatus{IP: sn.IP, Status: "Unavailable"})
+                       statusList = append(statusList, 
&model.AgentServerStatus{IP: sn.IP, Port: sn.Port, Status: "Unavailable"})
                        available = false
                } else {
-                       statusList = append(statusList, 
&model.AgentServerStatus{IP: sn.IP, Status: "Available"})
+                       statusList = append(statusList, 
&model.AgentServerStatus{IP: sn.IP, Port: sn.Port, Status: "Available"})
                }
        }
 
        t := table.NewWriter()
        t.SetOutputMirror(os.Stdout)
        t.SetTitle("Agent Server Status")
-       t.AppendHeader(table.Row{"#", "Agent Server IP", "Status"})
+       t.AppendHeader(table.Row{"#", "Agent Server IP", "Agent Server Port", 
"Status"})
 
        for i, s := range statusList {
-               t.AppendRow([]interface{}{i + 1, s.IP, s.Status})
+               t.AppendRow([]interface{}{i + 1, s.IP, s.Port, s.Status})
                t.AppendSeparator()
        }
 
diff --git a/pitr/cli/internal/pkg/model/as_backup.go 
b/pitr/cli/internal/pkg/model/as_backup.go
index ee3078f..3ba03aa 100644
--- a/pitr/cli/internal/pkg/model/as_backup.go
+++ b/pitr/cli/internal/pkg/model/as_backup.go
@@ -43,6 +43,7 @@ type (
 
 type AgentServerStatus struct {
        IP     string `json:"ip"`
+       Port   uint16 `json:"port"`
        Status string `json:"status"`
 }
 
diff --git a/pitr/cli/internal/pkg/model/as_restore.go 
b/pitr/cli/internal/pkg/model/as_restore.go
index 2381de7..3033559 100644
--- a/pitr/cli/internal/pkg/model/as_restore.go
+++ b/pitr/cli/internal/pkg/model/as_restore.go
@@ -32,4 +32,10 @@ type (
                Code int    `json:"code" validate:"required"`
                Msg  string `json:"msg" validate:"required"`
        }
+
+       RestoreResult struct {
+               IP     string `json:"ip"`
+               Port   uint16 `json:"port"`
+               Status string `json:"status"`
+       }
 )
diff --git a/pitr/cli/internal/pkg/model/const.go 
b/pitr/cli/internal/pkg/model/const.go
index d745929..58452f8 100644
--- a/pitr/cli/internal/pkg/model/const.go
+++ b/pitr/cli/internal/pkg/model/const.go
@@ -26,6 +26,7 @@ const (
        SsBackupStatusCompleted  BackupStatus = "Completed"
        SsBackupStatusFailed     BackupStatus = "Failed"
        SsBackupStatusCheckError BackupStatus = "CheckError"
+       SsBackupStatusCanceled   BackupStatus = "Canceled"
 
        BDBackModeFull   DBBackupMode = "FULL"
        DBBackModePTrack DBBackupMode = "PTRACK"
diff --git a/pitr/cli/pkg/prettyoutput/prettyoutput_suite_test.go 
b/pitr/cli/pkg/prettyoutput/prettyoutput_suite_test.go
new file mode 100644
index 0000000..d18bc0f
--- /dev/null
+++ b/pitr/cli/pkg/prettyoutput/prettyoutput_suite_test.go
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package prettyoutput_test
+
+import (
+       "testing"
+
+       . "github.com/onsi/ginkgo/v2"
+       . "github.com/onsi/gomega"
+)
+
+func TestPrettyOutput(t *testing.T) {
+       RegisterFailHandler(Fail)
+       RunSpecs(t, "PrettyOutput Suite")
+}
diff --git a/pitr/cli/pkg/prettyoutput/progress_test.go 
b/pitr/cli/pkg/prettyoutput/progress_test.go
new file mode 100644
index 0000000..b89c21a
--- /dev/null
+++ b/pitr/cli/pkg/prettyoutput/progress_test.go
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package prettyoutput_test
+
+import (
+       "time"
+
+       "github.com/apache/shardingsphere-on-cloud/pitr/cli/pkg/prettyoutput"
+       "github.com/jedib0t/go-pretty/v6/progress"
+       . "github.com/onsi/ginkgo/v2"
+)
+
+var _ = Describe("Progress", func() {
+       It("test style", func() {
+               pw := prettyoutput.NewPW(5)
+               go pw.Render()
+
+               for i := 0; i < 5; i++ {
+                       go func() {
+                               tracker := &progress.Tracker{
+                                       Message: "test",
+                                       Total:   10,
+                                       Units:   progress.UnitsDefault,
+                               }
+
+                               pw.AppendTracker(tracker)
+                               ticker := time.Tick(time.Millisecond * 100)
+                               for !tracker.IsDone() {
+                                       for range ticker {
+                                               tracker.Increment(1)
+                                       }
+                               }
+                       }()
+               }
+
+               time.Sleep(time.Millisecond * 100)
+               for pw.IsRenderInProgress() {
+                       time.Sleep(time.Millisecond * 100)
+               }
+       })
+})

Reply via email to