This is an automated email from the ASF dual-hosted git repository.
pabloem pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/beam.git
The following commit(s) were added to refs/heads/master by this push:
new 2a45a5b Merge pull request #16826 from [BEAM-13870] [Playground]
Increase test coverage for the cache package
2a45a5b is described below
commit 2a45a5b6c34141dcc595c5cda3d28264e542bdff
Author: Pavel Avilov <[email protected]>
AuthorDate: Thu Feb 24 08:27:02 2022 +0300
Merge pull request #16826 from [BEAM-13870] [Playground] Increase test
coverage for the cache package
* Increase test coverage for the cache package
* Add commentaries for several tests.
* Edit comments
* Edit comments
* Refactoring code
---
.../internal/cache/local/local_cache_test.go | 115 ++++++++++++++++++---
.../internal/cache/redis/redis_cache_test.go | 68 +++++++++---
2 files changed, 157 insertions(+), 26 deletions(-)
diff --git a/playground/backend/internal/cache/local/local_cache_test.go
b/playground/backend/internal/cache/local/local_cache_test.go
index c4d5a58..34b2654 100644
--- a/playground/backend/internal/cache/local/local_cache_test.go
+++ b/playground/backend/internal/cache/local/local_cache_test.go
@@ -58,6 +58,8 @@ func TestLocalCache_GetValue(t *testing.T) {
preparedItemsMap[preparedId][preparedSubKey] = value
preparedExpMap := make(map[uuid.UUID]time.Time)
preparedExpMap[preparedId] = time.Now().Add(time.Millisecond)
+ endedExpMap := make(map[uuid.UUID]time.Time)
+ endedExpMap[preparedId] = time.Now().Add(-time.Millisecond)
type fields struct {
cleanupInterval time.Duration
items map[uuid.UUID]map[cache.SubKey]interface{}
@@ -76,7 +78,7 @@ func TestLocalCache_GetValue(t *testing.T) {
wantErr bool
}{
{
- name: "Get exist value",
+ name: "Get existing value",
fields: fields{
cleanupInterval: cleanupInterval,
items: preparedItemsMap,
@@ -91,7 +93,7 @@ func TestLocalCache_GetValue(t *testing.T) {
wantErr: false,
},
{
- name: "Get not exist value",
+ name: "Get not existing value",
fields: fields{
cleanupInterval: cleanupInterval,
items:
make(map[uuid.UUID]map[cache.SubKey]interface{}),
@@ -103,6 +105,21 @@ func TestLocalCache_GetValue(t *testing.T) {
want: nil,
wantErr: true,
},
+ {
+ name: "Get an existing value that is expiring",
+ fields: fields{
+ cleanupInterval: cleanupInterval,
+ items: preparedItemsMap,
+ pipelinesExpiration: endedExpMap,
+ },
+ args: args{
+ ctx: context.Background(),
+ pipelineId: preparedId,
+ subKey: preparedSubKey,
+ },
+ want: nil,
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -159,6 +176,21 @@ func TestLocalCache_SetValue(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "Set value for RunOutputIndex subKey",
+ fields: fields{
+ cleanupInterval: cleanupInterval,
+ items:
make(map[uuid.UUID]map[cache.SubKey]interface{}),
+ pipelinesExpiration: preparedExpMap,
+ },
+ args: args{
+ ctx: context.Background(),
+ pipelineId: preparedId,
+ subKey: cache.RunOutputIndex,
+ value: 5,
+ },
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -180,6 +212,9 @@ func TestLocalCache_SetValue(t *testing.T) {
func TestLocalCache_SetExpTime(t *testing.T) {
preparedId, _ := uuid.NewUUID()
+ preparedItems := make(map[uuid.UUID]map[cache.SubKey]interface{})
+ preparedItems[preparedId] = make(map[cache.SubKey]interface{})
+ preparedItems[preparedId][cache.Status] = 1
type fields struct {
cleanupInterval time.Duration
items map[uuid.UUID]map[cache.SubKey]interface{}
@@ -191,14 +226,29 @@ func TestLocalCache_SetExpTime(t *testing.T) {
expTime time.Duration
}
tests := []struct {
- name string
- fields fields
- args args
+ name string
+ fields fields
+ args args
+ wantErr bool
}{
{
name: "Set expiration time",
fields: fields{
cleanupInterval: cleanupInterval,
+ items: preparedItems,
+ pipelinesExpiration:
make(map[uuid.UUID]time.Time),
+ },
+ args: args{
+ ctx: context.Background(),
+ pipelineId: preparedId,
+ expTime: time.Minute,
+ },
+ wantErr: false,
+ },
+ {
+ name: "Set expiration time for not existing value in
cache items",
+ fields: fields{
+ cleanupInterval: cleanupInterval,
items:
make(map[uuid.UUID]map[cache.SubKey]interface{}),
pipelinesExpiration:
make(map[uuid.UUID]time.Time),
},
@@ -207,6 +257,7 @@ func TestLocalCache_SetExpTime(t *testing.T) {
pipelineId: preparedId,
expTime: time.Minute,
},
+ wantErr: true,
},
}
for _, tt := range tests {
@@ -216,14 +267,13 @@ func TestLocalCache_SetExpTime(t *testing.T) {
items: tt.fields.items,
pipelinesExpiration:
tt.fields.pipelinesExpiration,
}
- _ = lc.SetValue(tt.args.ctx, preparedId, cache.Status,
1)
err := lc.SetExpTime(tt.args.ctx, tt.args.pipelineId,
tt.args.expTime)
- if err != nil {
- t.Error(err)
- }
- expTime, found :=
lc.pipelinesExpiration[tt.args.pipelineId]
- if expTime.Round(time.Second) !=
time.Now().Add(tt.args.expTime).Round(time.Second) || !found {
- t.Errorf("Expiration time of the pipeline: %s
not set in cache.", tt.args.pipelineId)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("SetExpTime() error = %v, wantErr %v",
err, tt.wantErr)
+ expTime, found :=
lc.pipelinesExpiration[tt.args.pipelineId]
+ if expTime.Round(time.Second) !=
time.Now().Add(tt.args.expTime).Round(time.Second) || !found {
+ t.Errorf("Expiration time of the
pipeline: %s not set in cache.", tt.args.pipelineId)
+ }
}
})
}
@@ -429,6 +479,16 @@ func TestLocalCache_startGC(t *testing.T) {
pipelinesExpiration: preparedExpMap,
},
},
+ {
+ // Test case with calling startGC method with nil cache
items.
+ // As a result, items stay the same.
+ name: "Checking for deleting expired pipelines with nil
cache items",
+ fields: fields{
+ cleanupInterval: time.Microsecond,
+ items: nil,
+ pipelinesExpiration: preparedExpMap,
+ },
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -540,3 +600,34 @@ func TestLocalCache_clearItems(t *testing.T) {
})
}
}
+
+func TestNew(t *testing.T) {
+ items := make(map[uuid.UUID]map[cache.SubKey]interface{})
+ pipelinesExpiration := make(map[uuid.UUID]time.Time)
+ type args struct {
+ ctx context.Context
+ }
+ tests := []struct {
+ name string
+ args args
+ want *Cache
+ }{
+ {
+ name: "Initialize local cache",
+ args: args{ctx: context.Background()},
+ want: &Cache{
+ cleanupInterval: cleanupInterval,
+ items: items,
+ pipelinesExpiration: pipelinesExpiration,
+ catalog: nil,
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := New(tt.args.ctx); !reflect.DeepEqual(got,
tt.want) {
+ t.Errorf("New() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/playground/backend/internal/cache/redis/redis_cache_test.go
b/playground/backend/internal/cache/redis/redis_cache_test.go
index 02dec43..60a0ecf 100644
--- a/playground/backend/internal/cache/redis/redis_cache_test.go
+++ b/playground/backend/internal/cache/redis/redis_cache_test.go
@@ -54,7 +54,7 @@ func TestRedisCache_GetValue(t *testing.T) {
wantErr bool
}{
{
- name: "error during HGet operation",
+ name: "Error during HGet operation",
mocks: func() {
mock.ExpectHGet(pipelineId.String(),
string(marshSubKey)).SetErr(fmt.Errorf("MOCK_ERROR"))
},
@@ -68,7 +68,7 @@ func TestRedisCache_GetValue(t *testing.T) {
wantErr: true,
},
{
- name: "all success",
+ name: "Get existing value",
mocks: func() {
mock.ExpectHGet(pipelineId.String(),
string(marshSubKey)).SetVal(string(marshValue))
},
@@ -122,7 +122,7 @@ func TestRedisCache_SetExpTime(t *testing.T) {
wantErr bool
}{
{
- name: "error during Exists operation",
+ name: "Error during Exists operation",
mocks: func() {
mock.ExpectExists(pipelineId.String()).SetErr(fmt.Errorf("MOCK_ERROR"))
},
@@ -148,7 +148,7 @@ func TestRedisCache_SetExpTime(t *testing.T) {
wantErr: true,
},
{
- name: "error during Expire operation",
+ name: "Set expiration time with error during Expire
operation",
mocks: func() {
mock.ExpectExists(pipelineId.String()).SetVal(1)
mock.ExpectExpire(pipelineId.String(),
expTime).SetErr(fmt.Errorf("MOCK_ERROR"))
@@ -162,7 +162,7 @@ func TestRedisCache_SetExpTime(t *testing.T) {
wantErr: true,
},
{
- name: "all success",
+ name: "Set expiration time",
mocks: func() {
mock.ExpectExists(pipelineId.String()).SetVal(1)
mock.ExpectExpire(pipelineId.String(),
expTime).SetVal(true)
@@ -215,7 +215,7 @@ func TestRedisCache_SetValue(t *testing.T) {
wantErr bool
}{
{
- name: "error during HSet operation",
+ name: "Error during HSet operation",
mocks: func() {
mock.ExpectHSet(pipelineId.String(),
marshSubKey, marshValue).SetErr(fmt.Errorf("MOCK_ERROR"))
},
@@ -229,7 +229,7 @@ func TestRedisCache_SetValue(t *testing.T) {
wantErr: true,
},
{
- name: "all success",
+ name: "Set correct value",
mocks: func() {
mock.ExpectHSet(pipelineId.String(),
marshSubKey, marshValue).SetVal(1)
mock.ExpectExpire(pipelineId.String(),
time.Minute*15).SetVal(true)
@@ -243,6 +243,21 @@ func TestRedisCache_SetValue(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "Set incorrect value",
+ mocks: func() {
+ mock.ExpectHSet(pipelineId.String(),
marshSubKey, marshValue).SetVal(1)
+ mock.ExpectExpire(pipelineId.String(),
time.Minute*15).SetVal(true)
+ },
+ fields: fields{client},
+ args: args{
+ ctx: context.Background(),
+ pipelineId: pipelineId,
+ subKey: subKey,
+ value: make(chan int),
+ },
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -318,7 +333,7 @@ func TestCache_GetCatalog(t *testing.T) {
wantErr bool
}{
{
- name: "Success",
+ name: "Get existing catalog",
mocks: func() {
mock.ExpectGet(cache.ExamplesCatalog).SetVal(string(catalogMarsh))
},
@@ -416,7 +431,7 @@ func TestCache_SetCatalog(t *testing.T) {
wantErr bool
}{
{
- name: "Success",
+ name: "Set catalog",
mocks: func() {
mock.ExpectSet(cache.ExamplesCatalog,
catalogMarsh, 0).SetVal("")
},
@@ -465,7 +480,7 @@ func Test_newRedisCache(t *testing.T) {
wantErr bool
}{
{
- name: "error during Ping operation",
+ name: "Error during Ping operation",
args: args{
ctx: context.Background(),
addr: address,
@@ -487,6 +502,9 @@ func Test_unmarshalBySubKey(t *testing.T) {
statusValue, _ := json.Marshal(status)
output := "MOCK_OUTPUT"
outputValue, _ := json.Marshal(output)
+ canceledValue, _ := json.Marshal(false)
+ runOutputIndex := 0
+ runOutputIndexValue, _ := json.Marshal(runOutputIndex)
type args struct {
ctx context.Context
subKey cache.SubKey
@@ -499,7 +517,7 @@ func Test_unmarshalBySubKey(t *testing.T) {
wantErr bool
}{
{
- name: "status subKey",
+ name: "Status subKey",
args: args{
ctx: context.Background(),
subKey: cache.Status,
@@ -509,7 +527,7 @@ func Test_unmarshalBySubKey(t *testing.T) {
wantErr: false,
},
{
- name: "runOutput subKey",
+ name: "RunOutput subKey",
args: args{
subKey: cache.RunOutput,
value: string(outputValue),
@@ -518,7 +536,7 @@ func Test_unmarshalBySubKey(t *testing.T) {
wantErr: false,
},
{
- name: "compileOutput subKey",
+ name: "CompileOutput subKey",
args: args{
subKey: cache.CompileOutput,
value: string(outputValue),
@@ -527,7 +545,7 @@ func Test_unmarshalBySubKey(t *testing.T) {
wantErr: false,
},
{
- name: "graph subKey",
+ name: "Graph subKey",
args: args{
subKey: cache.Graph,
value: string(outputValue),
@@ -535,6 +553,28 @@ func Test_unmarshalBySubKey(t *testing.T) {
want: output,
wantErr: false,
},
+ {
+ // Test case with calling unmarshalBySubKey method with
Canceled subKey.
+ // As a result, want to receive false.
+ name: "Canceled subKey",
+ args: args{
+ subKey: cache.Canceled,
+ value: string(canceledValue),
+ },
+ want: false,
+ wantErr: false,
+ },
+ {
+ // Test case with calling unmarshalBySubKey method with
RunOutputIndex subKey.
+ // As a result, want to receive expected runOutputIndex.
+ name: "RunOutputIndex subKey",
+ args: args{
+ subKey: cache.RunOutputIndex,
+ value: string(runOutputIndexValue),
+ },
+ want: float64(runOutputIndex),
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {