This is an automated email from the ASF dual-hosted git repository.

robocanic pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/dubbo-admin.git


The following commit(s) were added to refs/heads/develop by this push:
     new 19051903 feat: implement mysql and postgresql store for resources 
(#1360)
19051903 is described below

commit 1905190315bf94ade5be050d25a87a4d369b96e7
Author: Tew <[email protected]>
AuthorDate: Sun Nov 30 23:46:44 2025 +0800

    feat: implement mysql and postgresql store for resources (#1360)
    
    * feat: implement mysql and postgresql store for resources
    
    * fix some issues
    
    * ut: add some test cases
    
    * fix: dynamic table name
---
 go.mod                                     |   14 +-
 go.sum                                     |   28 +-
 pkg/config/store/config.go                 |    4 +-
 pkg/core/bootstrap/init.go                 |    2 +
 pkg/store/dbcommon/connection_pool.go      |  226 +++++
 pkg/store/dbcommon/connection_pool_test.go |  502 ++++++++++
 pkg/store/dbcommon/gorm_store.go           |  579 +++++++++++
 pkg/store/dbcommon/gorm_store_test.go      | 1430 ++++++++++++++++++++++++++++
 pkg/store/dbcommon/index.go                |  254 +++++
 pkg/store/dbcommon/model.go                |  125 +++
 pkg/store/mysql/mysql.go                   |   40 +-
 pkg/store/postgres/postgres.go             |   58 ++
 12 files changed, 3257 insertions(+), 5 deletions(-)

diff --git a/go.mod b/go.mod
index c60caef1..e10d555b 100644
--- a/go.mod
+++ b/go.mod
@@ -20,7 +20,6 @@ go 1.23.0
 require (
        github.com/Masterminds/semver/v3 v3.2.1
        github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
-       github.com/bakito/go-log-logr-adapter v0.0.2
        github.com/dubbogo/gost v1.14.0
        github.com/duke-git/lancet/v2 v2.3.6
        github.com/emicklei/go-restful/v3 v3.11.0
@@ -55,6 +54,10 @@ require (
        google.golang.org/grpc v1.73.0
        google.golang.org/protobuf v1.36.6
        gopkg.in/natefinch/lumberjack.v2 v2.2.1
+       gorm.io/driver/mysql v1.6.0
+       gorm.io/driver/postgres v1.6.0
+       gorm.io/driver/sqlite v1.5.7
+       gorm.io/gorm v1.30.0
        k8s.io/api v0.32.0
        k8s.io/apimachinery v0.32.0
        k8s.io/client-go v0.32.0
@@ -65,6 +68,7 @@ require (
 
 require (
        cel.dev/expr v0.23.0 // indirect
+       filippo.io/edwards25519 v1.1.0 // indirect
        github.com/beorn7/perks v1.0.1 // indirect
        github.com/bufbuild/protocompile v0.10.0 // indirect
        github.com/bytedance/sonic v1.13.2 // indirect
@@ -85,6 +89,7 @@ require (
        github.com/go-playground/locales v0.14.1 // indirect
        github.com/go-playground/universal-translator v0.18.1 // indirect
        github.com/go-playground/validator/v10 v10.26.0 // indirect
+       github.com/go-sql-driver/mysql v1.8.1 // indirect
        github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
        github.com/goccy/go-json v0.10.5 // indirect
        github.com/gogo/protobuf v1.3.2 // indirect
@@ -95,12 +100,19 @@ require (
        github.com/gorilla/securecookie v1.1.2 // indirect
        github.com/gorilla/sessions v1.4.0 // indirect
        github.com/inconshreveable/mousetrap v1.1.0 // indirect
+       github.com/jackc/pgpassfile v1.0.0 // indirect
+       github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // 
indirect
+       github.com/jackc/pgx/v5 v5.6.0 // indirect
+       github.com/jackc/puddle/v2 v2.2.2 // indirect
+       github.com/jinzhu/inflection v1.0.0 // indirect
+       github.com/jinzhu/now v1.1.5 // indirect
        github.com/josharian/intern v1.0.0 // indirect
        github.com/json-iterator/go v1.1.12 // indirect
        github.com/klauspost/cpuid/v2 v2.2.10 // indirect
        github.com/leodido/go-urn v1.4.0 // indirect
        github.com/mailru/easyjson v0.7.7 // indirect
        github.com/mattn/go-isatty v0.0.20 // indirect
+       github.com/mattn/go-sqlite3 v1.14.22 // indirect
        github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // 
indirect
        github.com/modern-go/reflect2 v1.0.2 // indirect
        github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // 
indirect
diff --git a/go.sum b/go.sum
index 74ff327f..1b52d70d 100644
--- a/go.sum
+++ b/go.sum
@@ -34,6 +34,8 @@ cloud.google.com/go/storage v1.6.0/go.mod 
h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
 cloud.google.com/go/storage v1.8.0/go.mod 
h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
 cloud.google.com/go/storage v1.10.0/go.mod 
h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod 
h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod 
h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
 github.com/BurntSushi/toml v0.3.1/go.mod 
h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod 
h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/Masterminds/semver/v3 v3.2.1 
h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
@@ -51,8 +53,6 @@ github.com/armon/go-metrics 
v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod 
h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 
h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod 
h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/bakito/go-log-logr-adapter v0.0.2 
h1:epK+VaMPkK7dK+Vs78xo0BABqN1lIXD3IXX1VUj4PcM=
-github.com/bakito/go-log-logr-adapter v0.0.2/go.mod 
h1:B2tvB31L1Sxpkfhpj13QkJEisDNNKcC9FoYU8KL87AA=
 github.com/benbjohnson/clock v1.1.0/go.mod 
h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod 
h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod 
h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -180,6 +180,8 @@ github.com/go-playground/universal-translator v0.18.1 
h1:Bcnm0ZwsGyWbCzImXv+pAJn
 github.com/go-playground/universal-translator v0.18.1/go.mod 
h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
 github.com/go-playground/validator/v10 v10.26.0 
h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
 github.com/go-playground/validator/v10 v10.26.0/go.mod 
h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
+github.com/go-sql-driver/mysql v1.8.1 
h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod 
h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
 github.com/go-stack/stack v1.8.0/go.mod 
h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/go-task/slim-sprig/v3 v3.0.0 
h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
 github.com/go-task/slim-sprig/v3 v3.0.0/go.mod 
h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@@ -305,8 +307,20 @@ github.com/ianlancetaylor/demangle 
v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
 github.com/inconshreveable/mousetrap v1.0.0/go.mod 
h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/inconshreveable/mousetrap v1.1.0 
h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
 github.com/inconshreveable/mousetrap v1.1.0/go.mod 
h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jackc/pgpassfile v1.0.0 
h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod 
h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 
h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod 
h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
+github.com/jackc/pgx/v5 v5.6.0/go.mod 
h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
+github.com/jackc/puddle/v2 v2.2.2 
h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
+github.com/jackc/puddle/v2 v2.2.2/go.mod 
h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
 github.com/jhump/protoreflect v1.16.0 
h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg=
 github.com/jhump/protoreflect v1.16.0/go.mod 
h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8=
+github.com/jinzhu/inflection v1.0.0 
h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod 
h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod 
h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod 
h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
 github.com/jonboulle/clockwork v0.1.0/go.mod 
h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
 github.com/jonboulle/clockwork v0.2.2/go.mod 
h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
@@ -361,6 +375,8 @@ github.com/mattn/go-isatty v0.0.3/go.mod 
h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
 github.com/mattn/go-isatty v0.0.14/go.mod 
h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
 github.com/mattn/go-isatty v0.0.20 
h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
 github.com/mattn/go-isatty v0.0.20/go.mod 
h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.22 
h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod 
h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod 
h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/miekg/dns v1.0.14/go.mod 
h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
 github.com/mitchellh/cli v1.0.0/go.mod 
h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -917,6 +933,14 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod 
h1:K4uyk7z7BCEPqu6E+C
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod 
h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.6.0 h1:eNbLmNTpPpTOVZi8MMxCi2aaIm0ZpInbORNXDwyLGvg=
+gorm.io/driver/mysql v1.6.0/go.mod 
h1:D/oCC2GWK3M/dqoLxnOlaNKmXz8WNTfcS9y5ovaSqKo=
+gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
+gorm.io/driver/postgres v1.6.0/go.mod 
h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
+gorm.io/driver/sqlite v1.5.7 h1:8NvsrhP0ifM7LX9G4zPB97NwovUakUxc+2V2uuf3Z1I=
+gorm.io/driver/sqlite v1.5.7/go.mod 
h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4=
+gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
+gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod 
h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod 
h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod 
h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/pkg/config/store/config.go b/pkg/config/store/config.go
index 6797e83d..dbf61799 100644
--- a/pkg/config/store/config.go
+++ b/pkg/config/store/config.go
@@ -26,7 +26,9 @@ var _ config.Config = &Config{}
 type Type = string
 
 const (
-       Memory Type = "memory"
+       Memory   Type = "memory"
+       MySQL    Type = "mysql"
+       Postgres Type = "postgres"
 )
 
 // Config defines the ResourceStore configuration
diff --git a/pkg/core/bootstrap/init.go b/pkg/core/bootstrap/init.go
index 32d94f36..41aee604 100644
--- a/pkg/core/bootstrap/init.go
+++ b/pkg/core/bootstrap/init.go
@@ -28,4 +28,6 @@ import (
        _ "github.com/apache/dubbo-admin/pkg/discovery/mock"
        _ "github.com/apache/dubbo-admin/pkg/engine/kubernetes"
        _ "github.com/apache/dubbo-admin/pkg/store/memory"
+       _ "github.com/apache/dubbo-admin/pkg/store/mysql"
+       _ "github.com/apache/dubbo-admin/pkg/store/postgres"
 )
diff --git a/pkg/store/dbcommon/connection_pool.go 
b/pkg/store/dbcommon/connection_pool.go
new file mode 100644
index 00000000..c9487825
--- /dev/null
+++ b/pkg/store/dbcommon/connection_pool.go
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dbcommon
+
+import (
+       "database/sql"
+       "fmt"
+       "sync"
+       "time"
+
+       "gorm.io/gorm"
+
+       storecfg "github.com/apache/dubbo-admin/pkg/config/store"
+       "github.com/apache/dubbo-admin/pkg/core/logger"
+)
+
+var (
+       // pools stores all connection pools indexed by a unique key 
(storeType:address)
+       pools = make(map[string]*ConnectionPool)
+       // poolsMutex protects concurrent access to the pools map
+       poolsMutex sync.RWMutex
+)
+
+// ConnectionPoolConfig defines connection pool configuration
+type ConnectionPoolConfig struct {
+       MaxIdleConns    int           // Maximum number of idle connections
+       MaxOpenConns    int           // Maximum number of open connections
+       ConnMaxLifetime time.Duration // Maximum lifetime of a connection
+       ConnMaxIdleTime time.Duration // Maximum idle time of a connection
+}
+
+// ConnectionPool manages database connections with connection pooling
+type ConnectionPool struct {
+       db        *gorm.DB
+       sqlDB     *sql.DB
+       address   string
+       storeType storecfg.Type
+       mu        sync.RWMutex
+       refCount  int       // Reference counter for the number of stores using 
this pool
+       closeOnce sync.Once // Ensure Close is called only once
+       closed    bool      // Track if the pool is closed
+}
+
+// GetOrCreatePool returns or creates a connection pool for the given store 
type and address
+// It implements a singleton pattern with reference counting to allow pool 
reuse across multiple stores
+// If a pool already exists for the same storeType and address, it increments 
the reference count and returns the existing pool
+// Otherwise, it creates a new pool with the provided dialector
+func GetOrCreatePool(dialector gorm.Dialector, storeType storecfg.Type, 
address string, config *ConnectionPoolConfig) (*ConnectionPool, error) {
+       if storeType == storecfg.Memory {
+               return nil, fmt.Errorf("memory pool store is no need to create 
connection pool")
+       }
+
+       poolKey := fmt.Sprintf("%s:%s", storeType, address)
+
+       poolsMutex.Lock()
+       defer poolsMutex.Unlock()
+
+       // Check if pool already exists
+       if existingPool, exists := pools[poolKey]; exists {
+               // Increment reference count when reusing existing pool
+               existingPool.refCount++
+               logger.Infof("Reusing %s connection pool: address=%s, 
refCount=%d", storeType, address, existingPool.refCount)
+               return existingPool, nil
+       }
+
+       // Create new pool
+       if config == nil {
+               config = DefaultConnectionPoolConfig()
+       }
+
+       pool, err := NewConnectionPool(dialector, storeType, address, config)
+       if err != nil {
+               return nil, err
+       }
+
+       // Store the pool
+       pools[poolKey] = pool
+       logger.Infof("%s connection pool created successfully: address=%s, 
maxIdleConns=%d, maxOpenConns=%d",
+               storeType, address, config.MaxIdleConns, config.MaxOpenConns)
+
+       return pool, nil
+}
+
+// RemovePool removes a pool from the global registry
+// This should only be called when the pool's reference count reaches zero
+func RemovePool(storeType storecfg.Type, address string) {
+       poolKey := fmt.Sprintf("%s:%s", storeType, address)
+
+       poolsMutex.Lock()
+       defer poolsMutex.Unlock()
+
+       delete(pools, poolKey)
+       logger.Infof("Removed %s connection pool from registry: address=%s", 
storeType, address)
+}
+
+// DefaultConnectionPoolConfig returns default connection pool configuration
+func DefaultConnectionPoolConfig() *ConnectionPoolConfig {
+       return &ConnectionPoolConfig{
+               MaxIdleConns:    10,               // Default: 10 idle 
connections
+               MaxOpenConns:    100,              // Default: 100 max open 
connections
+               ConnMaxLifetime: time.Hour,        // Default: 1 hour max 
lifetime
+               ConnMaxIdleTime: 10 * time.Minute, // Default: 10 minutes max 
idle time
+       }
+}
+
+// NewConnectionPool creates a new connection pool
+func NewConnectionPool(dialector gorm.Dialector, storeType storecfg.Type, 
address string, config *ConnectionPoolConfig) (*ConnectionPool, error) {
+       db, err := gorm.Open(dialector, &gorm.Config{})
+       if err != nil {
+               return nil, fmt.Errorf("failed to connect to %s: %w", 
storeType, err)
+       }
+
+       sqlDB, err := db.DB()
+       if err != nil {
+               return nil, fmt.Errorf("failed to get underlying sql.DB: %w", 
err)
+       }
+
+       // Configure connection pool
+       sqlDB.SetMaxIdleConns(config.MaxIdleConns)
+       sqlDB.SetMaxOpenConns(config.MaxOpenConns)
+       sqlDB.SetConnMaxLifetime(config.ConnMaxLifetime)
+       sqlDB.SetConnMaxIdleTime(config.ConnMaxIdleTime)
+
+       return &ConnectionPool{
+               db:        db,
+               sqlDB:     sqlDB,
+               address:   address,
+               storeType: storeType,
+               refCount:  1, // Initial reference count
+       }, nil
+}
+
+// GetDB returns the gorm.DB instance
+func (p *ConnectionPool) GetDB() *gorm.DB {
+       p.mu.RLock()
+       defer p.mu.RUnlock()
+       return p.db
+}
+
+// Address returns the connection address
+func (p *ConnectionPool) Address() string {
+       p.mu.RLock()
+       defer p.mu.RUnlock()
+       return p.address
+}
+
+// RefCount returns the current reference count
+func (p *ConnectionPool) RefCount() int {
+       p.mu.RLock()
+       defer p.mu.RUnlock()
+       return p.refCount
+}
+
+// IncrementRef increments the reference count
+func (p *ConnectionPool) IncrementRef() {
+       p.mu.Lock()
+       defer p.mu.Unlock()
+       p.refCount++
+}
+
+// Close closes the connection pool gracefully with reference counting
+// The pool is only actually closed when refCount reaches 0
+func (p *ConnectionPool) Close() error {
+       p.mu.Lock()
+       defer p.mu.Unlock()
+
+       if p.closed {
+               return nil // Already closed
+       }
+
+       p.refCount--
+       logger.Infof("Decremented %s connection pool refCount: address=%s, 
refCount=%d", p.storeType, p.address, p.refCount)
+
+       // Only close the pool when no stores are using it
+       if p.refCount <= 0 {
+               var closeErr error
+               p.closeOnce.Do(func() {
+                       if p.sqlDB != nil {
+                               logger.Infof("Closing %s connection pool: 
address=%s", p.storeType, p.address)
+                               closeErr = p.sqlDB.Close()
+                               p.closed = true
+                       }
+                       RemovePool(p.storeType, p.address)
+               })
+               return closeErr
+       }
+
+       return nil
+}
+
+// Ping checks if the database connection is alive
+func (p *ConnectionPool) Ping() error {
+       p.mu.RLock()
+       defer p.mu.RUnlock()
+
+       if p.sqlDB != nil {
+               return p.sqlDB.Ping()
+       }
+       return fmt.Errorf("connection pool not initialized")
+}
+
+// Stats returns database connection pool statistics
+func (p *ConnectionPool) Stats() sql.DBStats {
+       p.mu.RLock()
+       defer p.mu.RUnlock()
+
+       if p.sqlDB != nil {
+               return p.sqlDB.Stats()
+       }
+       return sql.DBStats{}
+}
diff --git a/pkg/store/dbcommon/connection_pool_test.go 
b/pkg/store/dbcommon/connection_pool_test.go
new file mode 100644
index 00000000..a9e5c71d
--- /dev/null
+++ b/pkg/store/dbcommon/connection_pool_test.go
@@ -0,0 +1,502 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dbcommon
+
+import (
+       "sync"
+       "testing"
+       "time"
+
+       "github.com/stretchr/testify/assert"
+       "github.com/stretchr/testify/require"
+       "gorm.io/driver/sqlite"
+
+       storecfg "github.com/apache/dubbo-admin/pkg/config/store"
+)
+
+func TestDefaultConnectionPoolConfig(t *testing.T) {
+       config := DefaultConnectionPoolConfig()
+
+       assert.NotNil(t, config)
+       assert.Equal(t, 10, config.MaxIdleConns)
+       assert.Equal(t, 100, config.MaxOpenConns)
+       assert.Equal(t, time.Hour, config.ConnMaxLifetime)
+       assert.Equal(t, 10*time.Minute, config.ConnMaxIdleTime)
+}
+
+func TestNewConnectionPool(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       config := DefaultConnectionPoolConfig()
+
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", config)
+
+       assert.NoError(t, err)
+       assert.NotNil(t, pool)
+       assert.NotNil(t, pool.db)
+       assert.NotNil(t, pool.sqlDB)
+       assert.Equal(t, "test-address", pool.address)
+       assert.Equal(t, storecfg.MySQL, pool.storeType)
+       assert.Equal(t, 1, pool.refCount)
+       assert.False(t, pool.closed)
+
+       // Cleanup
+       if err := pool.Close(); err != nil {
+               return
+       }
+}
+
+func TestConnectionPool_GetDB(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       db := pool.GetDB()
+       assert.NotNil(t, db)
+}
+
+func TestConnectionPool_Address(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address-123", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       assert.Equal(t, "test-address-123", pool.Address())
+}
+
+func TestConnectionPool_RefCount(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       // Initial ref count should be 1
+       assert.Equal(t, 1, pool.RefCount())
+
+       // Increment ref count
+       pool.IncrementRef()
+       assert.Equal(t, 2, pool.RefCount())
+
+       pool.IncrementRef()
+       assert.Equal(t, 3, pool.RefCount())
+}
+
+func TestConnectionPool_IncrementRef(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       initialRefCount := pool.RefCount()
+       pool.IncrementRef()
+       assert.Equal(t, initialRefCount+1, pool.RefCount())
+}
+
+func TestConnectionPool_Close(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // First close should decrement ref count
+       err = pool.Close()
+       assert.NoError(t, err)
+       assert.Equal(t, 0, pool.refCount)
+       assert.True(t, pool.closed)
+
+       // Second close should be a no-op
+       err = pool.Close()
+       assert.NoError(t, err)
+       assert.Equal(t, 0, pool.refCount)
+}
+
+func TestConnectionPool_CloseWithMultipleReferences(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Increment ref count
+       pool.IncrementRef()
+       pool.IncrementRef()
+       assert.Equal(t, 3, pool.RefCount())
+
+       // First close should just decrement
+       err = pool.Close()
+       assert.NoError(t, err)
+       assert.Equal(t, 2, pool.RefCount())
+       assert.False(t, pool.closed)
+
+       // Second close should just decrement
+       err = pool.Close()
+       assert.NoError(t, err)
+       assert.Equal(t, 1, pool.RefCount())
+       assert.False(t, pool.closed)
+
+       // Third close should actually close
+       err = pool.Close()
+       assert.NoError(t, err)
+       assert.Equal(t, 0, pool.RefCount())
+       assert.True(t, pool.closed)
+}
+
+func TestConnectionPool_Ping(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       err = pool.Ping()
+       assert.NoError(t, err)
+}
+
+func TestConnectionPool_Stats(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       config := &ConnectionPoolConfig{
+               MaxIdleConns:    5,
+               MaxOpenConns:    10,
+               ConnMaxLifetime: time.Hour,
+               ConnMaxIdleTime: 10 * time.Minute,
+       }
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", config)
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       stats := pool.Stats()
+       assert.NotNil(t, stats)
+       // The max values should match configuration
+       assert.Equal(t, 10, stats.MaxOpenConnections)
+}
+
+func TestGetOrCreatePool_CreateNew(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := GetOrCreatePool(dialector, storecfg.MySQL, "test-address", 
DefaultConnectionPoolConfig())
+
+       assert.NoError(t, err)
+       assert.NotNil(t, pool)
+       assert.Equal(t, 1, pool.RefCount())
+
+       // Cleanup
+       if err := pool.Close(); err != nil {
+               return
+       }
+}
+
+func TestGetOrCreatePool_ReuseExisting(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       dialector := sqlite.Open("file::memory:?cache=shared")
+
+       // Create first pool
+       pool1, err := GetOrCreatePool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       assert.Equal(t, 1, pool1.RefCount())
+
+       // Get same pool again
+       pool2, err := GetOrCreatePool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Should be the same pool with incremented ref count
+       assert.Equal(t, pool1, pool2)
+       assert.Equal(t, 2, pool1.RefCount())
+
+       // Cleanup
+       pool1.Close()
+       pool2.Close()
+}
+
+func TestGetOrCreatePool_DifferentAddresses(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       dialector1 := sqlite.Open("file::memory:?cache=shared")
+       dialector2 := sqlite.Open("file::memory:?cache=shared")
+
+       // Create pool for address 1
+       pool1, err := GetOrCreatePool(dialector1, storecfg.MySQL, "address-1", 
DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Create pool for address 2
+       pool2, err := GetOrCreatePool(dialector2, storecfg.MySQL, "address-2", 
DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Should be different pools
+       assert.NotEqual(t, pool1, pool2)
+       assert.Equal(t, "address-1", pool1.Address())
+       assert.Equal(t, "address-2", pool2.Address())
+
+       // Cleanup
+       pool1.Close()
+       pool2.Close()
+}
+
+func TestGetOrCreatePool_DifferentStoreTypes(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       dialector1 := sqlite.Open("file::memory:?cache=shared")
+       dialector2 := sqlite.Open("file::memory:?cache=shared")
+
+       // Create MySQL pool
+       pool1, err := GetOrCreatePool(dialector1, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Create Postgres pool with same address
+       pool2, err := GetOrCreatePool(dialector2, storecfg.Postgres, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Should be different pools (different store types)
+       assert.NotEqual(t, pool1, pool2)
+       assert.Equal(t, storecfg.MySQL, pool1.storeType)
+       assert.Equal(t, storecfg.Postgres, pool2.storeType)
+
+       // Cleanup
+       pool1.Close()
+       pool2.Close()
+}
+
+func TestGetOrCreatePool_MemoryStoreError(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+
+       pool, err := GetOrCreatePool(dialector, storecfg.Memory, 
"test-address", DefaultConnectionPoolConfig())
+
+       assert.Error(t, err)
+       assert.Nil(t, pool)
+       assert.Contains(t, err.Error(), "memory pool store is no need to create 
connection pool")
+}
+
+func TestRemovePool(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := GetOrCreatePool(dialector, storecfg.MySQL, "test-address", 
DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Verify pool exists in registry
+       poolKey := "mysql:test-address"
+       poolsMutex.RLock()
+       _, exists := pools[poolKey]
+       poolsMutex.RUnlock()
+       assert.True(t, exists, "Pool should exist in registry after creation")
+
+       // Close pool (which should call RemovePool internally)
+       err = pool.Close()
+       require.NoError(t, err)
+
+       // Verify pool was removed from registry
+       poolsMutex.RLock()
+       _, exists = pools[poolKey]
+       poolsMutex.RUnlock()
+       assert.False(t, exists, "Pool should be removed from registry after 
close")
+}
+
+func TestConnectionPool_ConcurrentAccess(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer pool.Close()
+
+       const numGoroutines = 10
+       var wg sync.WaitGroup
+       wg.Add(numGoroutines)
+
+       // Concurrently access pool methods
+       for i := 0; i < numGoroutines; i++ {
+               go func() {
+                       defer wg.Done()
+
+                       // Test concurrent reads
+                       _ = pool.GetDB()
+                       _ = pool.Address()
+                       _ = pool.RefCount()
+                       _ = pool.Stats()
+
+                       // Test concurrent ping
+                       _ = pool.Ping()
+               }()
+       }
+
+       wg.Wait()
+}
+
+func TestConnectionPool_ConcurrentIncrement(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       const numGoroutines = 100
+       var wg sync.WaitGroup
+       wg.Add(numGoroutines)
+
+       // Concurrently increment ref count
+       for i := 0; i < numGoroutines; i++ {
+               go func() {
+                       defer wg.Done()
+                       pool.IncrementRef()
+               }()
+       }
+
+       wg.Wait()
+
+       // Verify ref count (initial 1 + 100 increments)
+       assert.Equal(t, 101, pool.RefCount())
+
+       // Cleanup
+       for i := 0; i <= numGoroutines; i++ {
+               pool.Close()
+       }
+}
+
+func TestGetOrCreatePool_ConcurrentCreation(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       const numGoroutines = 10
+       var wg sync.WaitGroup
+       wg.Add(numGoroutines)
+
+       createdPools := make([]*ConnectionPool, numGoroutines)
+
+       // Concurrently try to create/get the same pool
+       for i := 0; i < numGoroutines; i++ {
+               go func(index int) {
+                       defer wg.Done()
+                       dialector := sqlite.Open("file::memory:?cache=shared")
+                       pool, err := GetOrCreatePool(dialector, storecfg.MySQL, 
"concurrent-address", DefaultConnectionPoolConfig())
+                       assert.NoError(t, err)
+                       createdPools[index] = pool
+               }(i)
+       }
+
+       wg.Wait()
+
+       // All goroutines should get the same pool
+       firstPool := createdPools[0]
+       for i := 1; i < numGoroutines; i++ {
+               assert.Equal(t, firstPool, createdPools[i])
+       }
+
+       // Ref count should be numGoroutines
+       assert.Equal(t, numGoroutines, firstPool.RefCount())
+
+       // Cleanup
+       for i := 0; i < numGoroutines; i++ {
+               firstPool.Close()
+       }
+}
+
+func TestConnectionPool_CustomConfig(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       config := &ConnectionPoolConfig{
+               MaxIdleConns:    5,
+               MaxOpenConns:    20,
+               ConnMaxLifetime: 30 * time.Minute,
+               ConnMaxIdleTime: 5 * time.Minute,
+       }
+
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", config)
+       require.NoError(t, err)
+       defer pool.Close()
+
+       stats := pool.Stats()
+       assert.Equal(t, 20, stats.MaxOpenConnections)
+}
+
+func TestConnectionPool_NilConfig(t *testing.T) {
+       // Clear the pools map for isolated test
+       poolsMutex.Lock()
+       pools = make(map[string]*ConnectionPool)
+       poolsMutex.Unlock()
+
+       dialector := sqlite.Open("file::memory:?cache=shared")
+
+       // Pass nil config - should use default
+       pool, err := GetOrCreatePool(dialector, storecfg.MySQL, "test-address", 
nil)
+       require.NoError(t, err)
+       defer pool.Close()
+
+       assert.NotNil(t, pool)
+       stats := pool.Stats()
+       // Should have default max open connections (100)
+       assert.Equal(t, 100, stats.MaxOpenConnections)
+}
+
+func TestConnectionPool_MultipleCloseIdempotent(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Close multiple times
+       err = pool.Close()
+       assert.NoError(t, err)
+
+       err = pool.Close()
+       assert.NoError(t, err)
+
+       err = pool.Close()
+       assert.NoError(t, err)
+
+       // Should still be closed
+       assert.True(t, pool.closed)
+       assert.Equal(t, 0, pool.refCount)
+}
diff --git a/pkg/store/dbcommon/gorm_store.go b/pkg/store/dbcommon/gorm_store.go
new file mode 100644
index 00000000..dd7d6078
--- /dev/null
+++ b/pkg/store/dbcommon/gorm_store.go
@@ -0,0 +1,579 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dbcommon
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+       "sort"
+
+       "gorm.io/gorm"
+       "k8s.io/client-go/tools/cache"
+
+       "github.com/apache/dubbo-admin/pkg/common/bizerror"
+       "github.com/apache/dubbo-admin/pkg/core/logger"
+       "github.com/apache/dubbo-admin/pkg/core/resource/model"
+       "github.com/apache/dubbo-admin/pkg/core/runtime"
+       "github.com/apache/dubbo-admin/pkg/core/store"
+)
+
+// GormStore is a GORM-backed store implementation for Dubbo resources
+// It uses GORM for database operations and maintains in-memory indices for 
fast lookups
+// This implementation is database-agnostic and works with any GORM-supported 
database
+type GormStore struct {
+       pool    *ConnectionPool // Shared connection pool with reference 
counting
+       kind    model.ResourceKind
+       address string
+       indices *Index // In-memory index with thread-safe operations
+       stopCh  chan struct{}
+}
+
+var _ store.ManagedResourceStore = &GormStore{}
+
+// NewGormStore creates a new GORM store for the specified resource kind
+func NewGormStore(kind model.ResourceKind, address string, pool 
*ConnectionPool) *GormStore {
+       return &GormStore{
+               kind:    kind,
+               address: address,
+               pool:    pool,
+               indices: NewIndex(),
+               stopCh:  make(chan struct{}),
+       }
+}
+
+// Init initializes the GORM store by migrating the schema and rebuilding 
indices
+func (gs *GormStore) Init(_ runtime.BuilderContext) error {
+       // Perform table migration
+       db := gs.pool.GetDB()
+       // Use Scopes to set the table name dynamically for migration
+       if err := 
db.Scopes(TableScope(gs.kind.ToString())).AutoMigrate(&ResourceModel{}); err != 
nil {
+               return fmt.Errorf("failed to migrate schema for %s: %w", 
gs.kind.ToString(), err)
+       }
+
+       // Rebuild indices from existing data in the database
+       if err := gs.rebuildIndices(); err != nil {
+               return fmt.Errorf("failed to rebuild indices for %s: %w", 
gs.kind.ToString(), err)
+       }
+
+       logger.Infof("GORM store initialized for resource kind: %s", 
gs.kind.ToString())
+       return nil
+}
+
+// Start starts the GORM store and monitors for shutdown signal
+func (gs *GormStore) Start(_ runtime.Runtime, stopCh <-chan struct{}) error {
+       logger.Infof("GORM store started for resource kind: %s", 
gs.kind.ToString())
+
+       // Monitor stop channel for graceful shutdown in a goroutine
+       go func() {
+               <-stopCh
+               logger.Infof("GORM store for %s received stop signal, 
initiating graceful shutdown", gs.kind.ToString())
+
+               // Close the internal stop channel to signal any ongoing 
operations
+               close(gs.stopCh)
+
+               // Decrement the reference count and potentially close the 
connection pool
+               if gs.pool != nil {
+                       if err := gs.pool.Close(); err != nil {
+                               logger.Errorf("Failed to close connection pool 
for %s: %v", gs.kind.ToString(), err)
+                       } else {
+                               logger.Infof("GORM store for %s shutdown 
completed", gs.kind.ToString())
+                       }
+               }
+       }()
+
+       return nil
+}
+
+// Add inserts a new resource into the database
+func (gs *GormStore) Add(obj interface{}) error {
+       resource, ok := obj.(model.Resource)
+       if !ok {
+               return bizerror.NewAssertionError("Resource", 
reflect.TypeOf(obj).Name())
+       }
+
+       if resource.ResourceKind() != gs.kind {
+               return fmt.Errorf("resource kind mismatch: expected %s, got 
%s", gs.kind, resource.ResourceKind())
+       }
+
+       var count int64
+       db := gs.pool.GetDB()
+       err := 
db.Scopes(TableScope(gs.kind.ToString())).Model(&ResourceModel{}).
+               Where("resource_key = ?", resource.ResourceKey()).
+               Count(&count).Error
+       if err != nil {
+               return err
+       }
+       if count > 0 {
+               return store.ErrorResourceAlreadyExists(
+                       resource.ResourceKind().ToString(),
+                       resource.ResourceMeta().Name,
+                       resource.MeshName(),
+               )
+       }
+
+       m, err := FromResource(resource)
+       if err != nil {
+               return err
+       }
+
+       if err := db.Scopes(TableScope(gs.kind.ToString())).Create(m).Error; 
err != nil {
+               return err
+       }
+
+       // Update indices after successful DB operation
+       gs.indices.UpdateResource(resource, nil)
+
+       return nil
+}
+
+// Update modifies an existing resource in the database
+func (gs *GormStore) Update(obj interface{}) error {
+       resource, ok := obj.(model.Resource)
+       if !ok {
+               return bizerror.NewAssertionError("Resource", 
reflect.TypeOf(obj).Name())
+       }
+
+       if resource.ResourceKind() != gs.kind {
+               return fmt.Errorf("resource kind mismatch: expected %s, got 
%s", gs.kind, resource.ResourceKind())
+       }
+
+       // Get old resource for index update
+       oldResource, exists, err := gs.GetByKey(resource.ResourceKey())
+       if err != nil {
+               return err
+       }
+       if !exists {
+               return store.ErrorResourceNotFound(
+                       resource.ResourceKind().ToString(),
+                       resource.ResourceMeta().Name,
+                       resource.MeshName(),
+               )
+       }
+
+       m, err := FromResource(resource)
+       if err != nil {
+               return err
+       }
+
+       db := gs.pool.GetDB()
+       result := 
db.Scopes(TableScope(gs.kind.ToString())).Model(&ResourceModel{}).
+               Where("resource_key = ?", resource.ResourceKey()).
+               Updates(map[string]interface{}{
+                       "name":       m.Name,
+                       "mesh":       m.Mesh,
+                       "data":       m.Data,
+                       "updated_at": m.UpdatedAt,
+               })
+
+       if result.Error != nil {
+               return result.Error
+       }
+
+       if result.RowsAffected == 0 {
+               return store.ErrorResourceNotFound(
+                       resource.ResourceKind().ToString(),
+                       resource.ResourceMeta().Name,
+                       resource.MeshName(),
+               )
+       }
+
+       // Update indices: remove old and add new
+       gs.indices.UpdateResource(resource, oldResource.(model.Resource))
+
+       return nil
+}
+
+// Delete removes a resource from the database
+func (gs *GormStore) Delete(obj interface{}) error {
+       resource, ok := obj.(model.Resource)
+       if !ok {
+               return bizerror.NewAssertionError("Resource", 
reflect.TypeOf(obj).Name())
+       }
+
+       db := gs.pool.GetDB()
+       result := db.Scopes(TableScope(gs.kind.ToString())).
+               Where("resource_key = ?", resource.ResourceKey()).
+               Delete(&ResourceModel{})
+
+       if result.Error != nil {
+               return result.Error
+       }
+
+       if result.RowsAffected == 0 {
+               return store.ErrorResourceNotFound(
+                       resource.ResourceKind().ToString(),
+                       resource.ResourceMeta().Name,
+                       resource.MeshName(),
+               )
+       }
+
+       // Remove from indices
+       gs.indices.RemoveResource(resource)
+
+       return nil
+}
+
+// List returns all resources of the configured kind from the database
+func (gs *GormStore) List() []interface{} {
+       var models []ResourceModel
+       db := gs.pool.GetDB()
+       if err := 
db.Scopes(TableScope(gs.kind.ToString())).Model(&ResourceModel{}).Find(&models).Error;
 err != nil {
+               logger.Errorf("failed to list resources: %v", err)
+               return []interface{}{}
+       }
+
+       result := make([]interface{}, 0, len(models))
+       for _, m := range models {
+               resource, err := m.ToResource()
+               if err != nil {
+                       logger.Errorf("failed to deserialize resource: %v", err)
+                       continue
+               }
+               result = append(result, resource)
+       }
+       return result
+}
+
+// ListKeys returns all resource keys of the configured kind from the database
+func (gs *GormStore) ListKeys() []string {
+       var keys []string
+       db := gs.pool.GetDB()
+       if err := 
db.Scopes(TableScope(gs.kind.ToString())).Model(&ResourceModel{}).Pluck("resource_key",
 &keys).Error; err != nil {
+               logger.Errorf("failed to list keys: %v", err)
+               return []string{}
+       }
+       return keys
+}
+
+// Get retrieves a resource by its object reference
+func (gs *GormStore) Get(obj interface{}) (item interface{}, exists bool, err 
error) {
+       resource, ok := obj.(model.Resource)
+       if !ok {
+               return nil, false, bizerror.NewAssertionError("Resource", 
reflect.TypeOf(obj).Name())
+       }
+       return gs.GetByKey(resource.ResourceKey())
+}
+
+// GetByKey retrieves a resource by its unique key
+func (gs *GormStore) GetByKey(key string) (item interface{}, exists bool, err 
error) {
+       var m ResourceModel
+       db := gs.pool.GetDB()
+       result := db.Scopes(TableScope(gs.kind.ToString())).
+               Where("resource_key = ?", key).
+               First(&m)
+
+       if result.Error != nil {
+               if errors.Is(result.Error, gorm.ErrRecordNotFound) {
+                       return nil, false, nil
+               }
+               return nil, false, result.Error
+       }
+
+       resource, err := m.ToResource()
+       if err != nil {
+               return nil, false, err
+       }
+
+       return resource, true, nil
+}
+
+// Replace atomically replaces all resources in the database with the provided 
list
+func (gs *GormStore) Replace(list []interface{}, _ string) error {
+       db := gs.pool.GetDB()
+       return db.Transaction(func(tx *gorm.DB) error {
+               // Delete all existing records for this resource kind
+               if err := tx.Scopes(TableScope(gs.kind.ToString())).
+                       Delete(&ResourceModel{}, "1=1").Error; err != nil {
+                       return err
+               }
+
+               // Clear all indices
+               gs.clearIndices()
+
+               // Return early if list is empty
+               if len(list) == 0 {
+                       return nil
+               }
+
+               // Convert all resources to ResourceModel
+               models := make([]*ResourceModel, 0, len(list))
+               resources := make([]model.Resource, 0, len(list))
+               for _, obj := range list {
+                       resource, ok := obj.(model.Resource)
+                       if !ok {
+                               return bizerror.NewAssertionError("Resource", 
reflect.TypeOf(obj).Name())
+                       }
+
+                       m, err := FromResource(resource)
+                       if err != nil {
+                               return err
+                       }
+                       models = append(models, m)
+                       resources = append(resources, resource)
+               }
+
+               // Batch insert all models at once
+               if err := 
tx.Scopes(TableScope(gs.kind.ToString())).CreateInBatches(models, 100).Error; 
err != nil {
+                       return err
+               }
+
+               // Rebuild indices for all resources
+               for _, resource := range resources {
+                       gs.indices.UpdateResource(resource, nil)
+               }
+
+               return nil
+       })
+}
+
+func (gs *GormStore) Resync() error {
+       return nil
+}
+
+func (gs *GormStore) Index(indexName string, obj interface{}) ([]interface{}, 
error) {
+       if !gs.indices.IndexExists(indexName) {
+               return nil, fmt.Errorf("index %s does not exist", indexName)
+       }
+
+       indexFunc := gs.indices.GetIndexers()[indexName]
+       indexValues, err := indexFunc(obj)
+       if err != nil {
+               return nil, err
+       }
+
+       if len(indexValues) == 0 {
+               return []interface{}{}, nil
+       }
+
+       return gs.findByIndex(indexName, indexValues[0])
+}
+
+func (gs *GormStore) IndexKeys(indexName, indexedValue string) ([]string, 
error) {
+       if !gs.indices.IndexExists(indexName) {
+               return nil, fmt.Errorf("index %s does not exist", indexName)
+       }
+
+       resources, err := gs.findByIndex(indexName, indexedValue)
+       if err != nil {
+               return nil, err
+       }
+
+       keys := make([]string, 0, len(resources))
+       for _, obj := range resources {
+               if resource, ok := obj.(model.Resource); ok {
+                       keys = append(keys, resource.ResourceKey())
+               }
+       }
+
+       return keys, nil
+}
+
+func (gs *GormStore) ListIndexFuncValues(indexName string) []string {
+       if !gs.indices.IndexExists(indexName) {
+               return []string{}
+       }
+
+       return gs.indices.ListIndexFuncValues(indexName)
+}
+
+func (gs *GormStore) ByIndex(indexName, indexedValue string) ([]interface{}, 
error) {
+       if !gs.indices.IndexExists(indexName) {
+               return nil, fmt.Errorf("index %s does not exist", indexName)
+       }
+
+       return gs.findByIndex(indexName, indexedValue)
+}
+
+func (gs *GormStore) GetIndexers() cache.Indexers {
+       return gs.indices.GetIndexers()
+}
+
+func (gs *GormStore) AddIndexers(newIndexers cache.Indexers) error {
+       return gs.indices.AddIndexers(newIndexers)
+}
+
+func (gs *GormStore) GetByKeys(keys []string) ([]model.Resource, error) {
+       if len(keys) == 0 {
+               return []model.Resource{}, nil
+       }
+
+       var models []ResourceModel
+       db := gs.pool.GetDB()
+       err := 
db.Scopes(TableScope(gs.kind.ToString())).Model(&ResourceModel{}).
+               Where("resource_key IN ?", keys).
+               Find(&models).Error
+       if err != nil {
+               return nil, err
+       }
+
+       resources := make([]model.Resource, 0, len(models))
+       for _, m := range models {
+               resource, err := m.ToResource()
+               if err != nil {
+                       return nil, err
+               }
+               resources = append(resources, resource)
+       }
+
+       return resources, nil
+}
+
+func (gs *GormStore) ListByIndexes(indexes map[string]string) 
([]model.Resource, error) {
+       keys, err := gs.getKeysByIndexes(indexes)
+       if err != nil {
+               return nil, err
+       }
+
+       resources, err := gs.GetByKeys(keys)
+       if err != nil {
+               return nil, err
+       }
+
+       sort.Slice(resources, func(i, j int) bool {
+               return resources[i].ResourceKey() < resources[j].ResourceKey()
+       })
+
+       return resources, nil
+}
+
+func (gs *GormStore) PageListByIndexes(indexes map[string]string, pq 
model.PageReq) (*model.PageData[model.Resource], error) {
+       keys, err := gs.getKeysByIndexes(indexes)
+       if err != nil {
+               return nil, err
+       }
+
+       sort.Strings(keys)
+       total := len(keys)
+
+       if pq.PageOffset >= total {
+               return model.NewPageData(total, pq.PageOffset, pq.PageSize, 
[]model.Resource{}), nil
+       }
+
+       end := pq.PageOffset + pq.PageSize
+       if end > total {
+               end = total
+       }
+
+       pageKeys := keys[pq.PageOffset:end]
+       resources, err := gs.GetByKeys(pageKeys)
+       if err != nil {
+               return nil, err
+       }
+
+       return model.NewPageData(total, pq.PageOffset, pq.PageSize, resources), 
nil
+}
+
+func (gs *GormStore) findByIndex(indexName, indexedValue string) 
([]interface{}, error) {
+       if !gs.indices.IndexExists(indexName) {
+               return nil, fmt.Errorf("index %s does not exist", indexName)
+       }
+
+       // Get resource keys from in-memory index
+       keys := gs.indices.GetKeys(indexName, indexedValue)
+
+       if len(keys) == 0 {
+               return []interface{}{}, nil
+       }
+
+       // Fetch resources from DB by keys
+       resources, err := gs.GetByKeys(keys)
+       if err != nil {
+               return nil, err
+       }
+
+       // Convert to []interface{}
+       result := make([]interface{}, len(resources))
+       for i, resource := range resources {
+               result[i] = resource
+       }
+
+       return result, nil
+}
+
+func (gs *GormStore) getKeysByIndexes(indexes map[string]string) ([]string, 
error) {
+       if len(indexes) == 0 {
+               return gs.ListKeys(), nil
+       }
+
+       var keySet map[string]struct{}
+       first := true
+
+       for indexName, indexValue := range indexes {
+               keys, err := gs.IndexKeys(indexName, indexValue)
+               if err != nil {
+                       return nil, err
+               }
+
+               if first {
+                       keySet = make(map[string]struct{}, len(keys))
+                       for _, key := range keys {
+                               keySet[key] = struct{}{}
+                       }
+                       first = false
+               } else {
+                       nextSet := make(map[string]struct{}, len(keys))
+                       for _, key := range keys {
+                               if _, exists := keySet[key]; exists {
+                                       nextSet[key] = struct{}{}
+                               }
+                       }
+                       keySet = nextSet
+               }
+       }
+
+       result := make([]string, 0, len(keySet))
+       for key := range keySet {
+               result = append(result, key)
+       }
+
+       return result, nil
+}
+
+// clearIndices clears all in-memory indices
+func (gs *GormStore) clearIndices() {
+       gs.indices.Clear()
+}
+
+// rebuildIndices rebuilds all in-memory indices from existing database records
+// This is called during initialization to ensure indices are populated with 
existing data
+func (gs *GormStore) rebuildIndices() error {
+       // Clear existing indices first
+       gs.clearIndices()
+
+       // Load all resources from the database
+       var models []ResourceModel
+       db := gs.pool.GetDB()
+       if err := 
db.Scopes(TableScope(gs.kind.ToString())).Model(&ResourceModel{}).Find(&models).Error;
 err != nil {
+               return fmt.Errorf("failed to load resources for index rebuild: 
%w", err)
+       }
+
+       // Rebuild indices for all resources
+       for _, m := range models {
+               resource, err := m.ToResource()
+               if err != nil {
+                       logger.Errorf("failed to deserialize resource during 
index rebuild: %v", err)
+                       continue
+               }
+               // Add resource to indices (nil for oldResource since this is 
initial load)
+               gs.indices.UpdateResource(resource, nil)
+       }
+
+       logger.Infof("Rebuilt indices for %s: loaded %d resources", 
gs.kind.ToString(), len(models))
+       return nil
+}
diff --git a/pkg/store/dbcommon/gorm_store_test.go 
b/pkg/store/dbcommon/gorm_store_test.go
new file mode 100644
index 00000000..7d9d77fb
--- /dev/null
+++ b/pkg/store/dbcommon/gorm_store_test.go
@@ -0,0 +1,1430 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dbcommon
+
+import (
+       "encoding/json"
+       "fmt"
+       "os"
+       "testing"
+
+       "github.com/stretchr/testify/assert"
+       "github.com/stretchr/testify/require"
+       "gorm.io/driver/sqlite"
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+       "k8s.io/apimachinery/pkg/runtime"
+       "k8s.io/apimachinery/pkg/runtime/schema"
+       "k8s.io/client-go/tools/cache"
+
+       storecfg "github.com/apache/dubbo-admin/pkg/config/store"
+       "github.com/apache/dubbo-admin/pkg/core/resource/model"
+)
+
+// mockResource is a mock implementation of model.Resource for testing
+type mockResource struct {
+       Kind      model.ResourceKind `json:"kind"`
+       Key       string             `json:"key"`
+       Mesh      string             `json:"mesh"`
+       Meta      metav1.ObjectMeta  `json:"meta"`
+       Spec      model.ResourceSpec `json:"spec"`
+       ObjectRef runtime.Object     `json:"-"`
+}
+
+func (mr *mockResource) GetObjectKind() schema.ObjectKind {
+       return schema.EmptyObjectKind
+}
+
+func (mr *mockResource) DeepCopyObject() runtime.Object {
+       return mr.ObjectRef
+}
+
+func (mr *mockResource) ResourceKind() model.ResourceKind {
+       return mr.Kind
+}
+
+func (mr *mockResource) ResourceKey() string {
+       return mr.Key
+}
+
+func (mr *mockResource) MeshName() string {
+       return mr.Mesh
+}
+
+func (mr *mockResource) ResourceMeta() metav1.ObjectMeta {
+       return mr.Meta
+}
+
+func (mr *mockResource) ResourceSpec() model.ResourceSpec {
+       return mr.Spec
+}
+
+func (mr *mockResource) String() string {
+       b, err := json.Marshal(mr)
+       if err != nil {
+               return ""
+       }
+       return string(b)
+}
+
+// setupTestStore creates a new GormStore with an in-memory SQLite database 
for testing
+func setupTestStore(t *testing.T) (*GormStore, func()) {
+       // Create temporary SQLite database file for better isolation and 
reliability
+       tmpFile, err := os.CreateTemp("", fmt.Sprintf("test-db-%s-*.db", 
t.Name()))
+       require.NoError(t, err)
+       dbPath := tmpFile.Name()
+       tmpFile.Close()
+
+       dialector := sqlite.Open(dbPath)
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, t.Name(), 
DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+
+       // Register the mock resource type (only once)
+       kind := model.ResourceKind("TestResource")
+       if _, err := model.ResourceSchemaRegistry().NewResourceFunc(kind); err 
!= nil {
+               model.RegisterResourceSchema(kind, func() model.Resource {
+                       return &mockResource{Kind: kind}
+               })
+       }
+
+       store := NewGormStore(kind, t.Name(), pool)
+
+       // Cleanup function
+       cleanup := func() {
+               pool.Close()
+               os.Remove(dbPath)
+       }
+
+       return store, cleanup
+}
+
+func TestNewGormStore(t *testing.T) {
+       dialector := sqlite.Open("file::memory:?cache=shared")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer func(pool *ConnectionPool) {
+               err := pool.Close()
+               if err != nil {
+                       return
+               }
+       }(pool)
+
+       kind := model.ResourceKind("TestResource")
+       store := NewGormStore(kind, "test-address", pool)
+
+       assert.NotNil(t, store)
+       assert.Equal(t, kind, store.kind)
+       assert.Equal(t, "test-address", store.address)
+       assert.NotNil(t, store.pool)
+       assert.NotNil(t, store.indices)
+       assert.NotNil(t, store.stopCh)
+}
+
+func TestGormStore_Init(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       assert.NoError(t, err)
+
+       // Verify table was created by attempting to add a resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "init-test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "init-test"},
+       }
+       err = store.Add(mockRes)
+       assert.NoError(t, err, "Should be able to add resource after Init")
+}
+
+func TestGormStore_AddAndGet(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create a mock resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name:      "test-resource",
+                       Namespace: "default",
+               },
+       }
+
+       // Add the resource
+       err = store.Add(mockRes)
+       assert.NoError(t, err)
+
+       // Get the resource
+       item, exists, err := store.Get(mockRes)
+       assert.NoError(t, err)
+       assert.True(t, exists)
+       assert.NotNil(t, item)
+
+       // Verify the resource content
+       retrieved := item.(*mockResource)
+       assert.Equal(t, mockRes.ResourceKey(), retrieved.ResourceKey())
+       assert.Equal(t, mockRes.MeshName(), retrieved.MeshName())
+       assert.Equal(t, mockRes.ResourceMeta().Name, 
retrieved.ResourceMeta().Name)
+
+       // Get by key
+       item, exists, err = store.GetByKey("test-key")
+       assert.NoError(t, err)
+       assert.True(t, exists)
+       assert.Equal(t, mockRes.ResourceKey(), 
item.(model.Resource).ResourceKey())
+}
+
+func TestGormStore_AddDuplicate(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name:      "test-resource",
+                       Namespace: "default",
+               },
+       }
+
+       // Add the resource
+       err = store.Add(mockRes)
+       assert.NoError(t, err)
+
+       // Try to add the same resource again
+       err = store.Add(mockRes)
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "already exists")
+}
+
+func TestGormStore_AddWrongKind(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create a resource with different kind
+       mockRes := &mockResource{
+               Kind: "DifferentKind",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name: "test-resource",
+               },
+       }
+
+       err = store.Add(mockRes)
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "resource kind mismatch")
+}
+
+func TestGormStore_Update(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create a mock resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name:      "test-resource",
+                       Namespace: "default",
+               },
+       }
+
+       // Add the resource
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // Update the resource
+       updatedRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name:      "updated-resource",
+                       Namespace: "default",
+               },
+       }
+
+       err = store.Update(updatedRes)
+       assert.NoError(t, err)
+
+       // Get the updated resource
+       item, exists, err := store.Get(updatedRes)
+       assert.NoError(t, err)
+       assert.True(t, exists)
+       assert.Equal(t, "updated-resource", 
item.(*mockResource).ResourceMeta().Name)
+}
+
+func TestGormStore_UpdateNonExistent(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "non-existent-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name: "test-resource",
+               },
+       }
+
+       err = store.Update(mockRes)
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "not found")
+}
+
+func TestGormStore_Delete(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create a mock resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name:      "test-resource",
+                       Namespace: "default",
+               },
+       }
+
+       // Add the resource
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // Delete the resource
+       err = store.Delete(mockRes)
+       assert.NoError(t, err)
+
+       // Try to get the deleted resource
+       _, exists, err := store.Get(mockRes)
+       assert.NoError(t, err)
+       assert.False(t, exists)
+}
+
+func TestGormStore_DeleteNonExistent(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "non-existent-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{
+                       Name: "test-resource",
+               },
+       }
+
+       err = store.Delete(mockRes)
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "not found")
+}
+
+func TestGormStore_List(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+
+       // List resources
+       list := store.List()
+       assert.Len(t, list, 2)
+
+       // Verify resource keys are present
+       keys := make([]string, len(list))
+       for i, item := range list {
+               keys[i] = item.(model.Resource).ResourceKey()
+       }
+       assert.Contains(t, keys, "test-key-1")
+       assert.Contains(t, keys, "test-key-2")
+}
+
+func TestGormStore_ListKeys(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+
+       // List keys
+       keys := store.ListKeys()
+       assert.Len(t, keys, 2)
+       assert.Contains(t, keys, "test-key-1")
+       assert.Contains(t, keys, "test-key-2")
+}
+
+func TestGormStore_Replace(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create initial mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       // Add initial resource
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+
+       // Replace with new set of resources
+       newResources := []interface{}{mockRes2}
+       err = store.Replace(newResources, "version-1")
+       assert.NoError(t, err)
+
+       // Check that only the new resource exists
+       keys := store.ListKeys()
+       assert.Len(t, keys, 1)
+       assert.Contains(t, keys, "test-key-2")
+       assert.NotContains(t, keys, "test-key-1")
+}
+
+func TestGormStore_ReplaceEmpty(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add a resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // Replace with empty list
+       err = store.Replace([]interface{}{}, "version-1")
+       assert.NoError(t, err)
+
+       // Check that all resources are removed
+       keys := store.ListKeys()
+       assert.Len(t, keys, 0)
+}
+
+func TestGormStore_GetByKeys(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+
+       // Get by multiple keys
+       keys := []string{"test-key-1", "test-key-2", "test-key-3"}
+       resources, err := store.GetByKeys(keys)
+       assert.NoError(t, err)
+       assert.Len(t, resources, 2)
+
+       // Verify resource keys
+       resKeys := make([]string, len(resources))
+       for i, res := range resources {
+               resKeys[i] = res.ResourceKey()
+       }
+       assert.Contains(t, resKeys, "test-key-1")
+       assert.Contains(t, resKeys, "test-key-2")
+}
+
+func TestGormStore_GetByKeysEmpty(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       resources, err := store.GetByKeys([]string{})
+       assert.NoError(t, err)
+       assert.Len(t, resources, 0)
+}
+
+func TestGormStore_AddIndexers(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       assert.NoError(t, err)
+
+       // Verify indexers were added
+       storedIndexers := store.GetIndexers()
+       assert.Len(t, storedIndexers, 1)
+       assert.Contains(t, storedIndexers, "by-mesh")
+}
+
+func TestGormStore_IndexKeys(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-2",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       mockRes3 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh2/test-key-3",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{Name: "test-resource-3"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+       err = store.Add(mockRes3)
+       require.NoError(t, err)
+
+       // Test IndexKeys method
+       keys, err := store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.Len(t, keys, 2)
+       assert.Contains(t, keys, "mesh1/test-key-1")
+       assert.Contains(t, keys, "mesh1/test-key-2")
+
+       keys, err = store.IndexKeys("by-mesh", "mesh2")
+       assert.NoError(t, err)
+       assert.Len(t, keys, 1)
+       assert.Contains(t, keys, "mesh2/test-key-3")
+}
+
+func TestGormStore_ByIndex(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers
+       indexers := map[string]cache.IndexFunc{
+               "by-name": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.ResourceMeta().Name}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "resource-a"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "resource-b"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+
+       // Test ByIndex method
+       items, err := store.ByIndex("by-name", "resource-a")
+       assert.NoError(t, err)
+       assert.Len(t, items, 1)
+       assert.Equal(t, "test-key-1", items[0].(model.Resource).ResourceKey())
+
+       items, err = store.ByIndex("by-name", "resource-b")
+       assert.NoError(t, err)
+       assert.Len(t, items, 1)
+       assert.Equal(t, "test-key-2", items[0].(model.Resource).ResourceKey())
+}
+
+func TestGormStore_ListByIndexes(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-2",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       mockRes3 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh2/test-key-3",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{Name: "test-resource-3"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+       err = store.Add(mockRes3)
+       require.NoError(t, err)
+
+       // List by indexes
+       indexes := map[string]string{"by-mesh": "mesh1"}
+       resources, err := store.ListByIndexes(indexes)
+       assert.NoError(t, err)
+       assert.Len(t, resources, 2)
+       // Should be sorted by ResourceKey
+       assert.Equal(t, "mesh1/test-key-1", resources[0].ResourceKey())
+       assert.Equal(t, "mesh1/test-key-2", resources[1].ResourceKey())
+}
+
+func TestGormStore_ListByIndexesEmpty(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add some resources
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // List with empty indexes should return all resources
+       resources, err := store.ListByIndexes(map[string]string{})
+       assert.NoError(t, err)
+       assert.Len(t, resources, 1)
+}
+
+func TestGormStore_PageListByIndexes(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-2",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+
+       mockRes3 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/test-key-3",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-3"},
+       }
+
+       // Add resources
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+       err = store.Add(mockRes3)
+       require.NoError(t, err)
+
+       // Page list by indexes
+       indexes := map[string]string{"by-mesh": "mesh1"}
+       pageReq := model.PageReq{
+               PageOffset: 0,
+               PageSize:   2,
+       }
+       pageData, err := store.PageListByIndexes(indexes, pageReq)
+       assert.NoError(t, err)
+       // Total 3 resources
+       assert.Equal(t, 3, pageData.Total)
+       // Page offset 0
+       assert.Equal(t, 0, pageData.PageOffset)
+       // Page size 2
+       assert.Equal(t, 2, pageData.PageSize)
+       // 2 items in this page
+       assert.Len(t, pageData.Data, 2)
+       // Sorted by key
+       assert.Equal(t, "mesh1/test-key-1", pageData.Data[0].ResourceKey())
+       assert.Equal(t, "mesh1/test-key-2", pageData.Data[1].ResourceKey())
+
+       // Second page
+       pageReq.PageOffset = 2
+       pageData, err = store.PageListByIndexes(indexes, pageReq)
+       assert.NoError(t, err)
+       // Total still 3
+       assert.Equal(t, 3, pageData.Total)
+       // Page offset 2
+       assert.Equal(t, 2, pageData.PageOffset)
+       // Page size 2
+       assert.Equal(t, 2, pageData.PageSize)
+       // Only 1 item left
+       assert.Len(t, pageData.Data, 1)
+       // Last item
+       assert.Equal(t, "mesh1/test-key-3", pageData.Data[0].ResourceKey())
+}
+
+func TestGormStore_PageListByIndexesOffsetBeyondTotal(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Add one resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // Request page beyond total
+       indexes := map[string]string{"by-mesh": "default"}
+       pageReq := model.PageReq{
+               PageOffset: 10,
+               PageSize:   2,
+       }
+       pageData, err := store.PageListByIndexes(indexes, pageReq)
+       assert.NoError(t, err)
+       assert.Equal(t, 1, pageData.Total)
+       assert.Len(t, pageData.Data, 0)
+}
+
+func TestGormStore_MultipleIndexes(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexers for multiple fields
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+               "by-namespace": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.ResourceMeta().Namespace}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create mock resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/default/app1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{
+                       Name:      "app1",
+                       Namespace: "default",
+               },
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/default/app2",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{
+                       Name:      "app2",
+                       Namespace: "default",
+               },
+       }
+
+       mockRes3 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/custom/app3",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{
+                       Name:      "app3",
+                       Namespace: "custom",
+               },
+       }
+
+       mockRes4 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh2/default/app4",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{
+                       Name:      "app4",
+                       Namespace: "default",
+               },
+       }
+
+       // Add resources
+       resources := []model.Resource{mockRes1, mockRes2, mockRes3, mockRes4}
+       for _, res := range resources {
+               err = store.Add(res)
+               require.NoError(t, err)
+       }
+
+       // Test multiple indexes - get all resources in mesh1 and default 
namespace
+       indexes := map[string]string{
+               "by-mesh":      "mesh1",
+               "by-namespace": "default",
+       }
+       result, err := store.ListByIndexes(indexes)
+       assert.NoError(t, err)
+       assert.Len(t, result, 2)
+
+       // Should contain app1 and app2
+       keys := make([]string, len(result))
+       for i, res := range result {
+               keys[i] = res.ResourceKey()
+       }
+       assert.Contains(t, keys, "mesh1/default/app1")
+       assert.Contains(t, keys, "mesh1/default/app2")
+}
+
+func TestGormStore_ListIndexFuncValues(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexer
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create mock resources with different meshes
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/resource1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "resource1"},
+       }
+
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh2/resource2",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{Name: "resource2"},
+       }
+
+       mockRes3 := &mockResource{
+               Kind: "TestResource",
+               Key:  "mesh1/resource3",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "resource3"},
+       }
+
+       // Add resources
+       resources := []model.Resource{mockRes1, mockRes2, mockRes3}
+       for _, res := range resources {
+               err = store.Add(res)
+               require.NoError(t, err)
+       }
+
+       // Test ListIndexFuncValues method
+       values := store.ListIndexFuncValues("by-mesh")
+       assert.Len(t, values, 2)
+       assert.Contains(t, values, "mesh1")
+       assert.Contains(t, values, "mesh2")
+}
+
+func TestGormStore_IndexNonExistent(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource"},
+       }
+
+       // Try to use non-existent index
+       _, err = store.IndexKeys("non-existent-index", "value")
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "does not exist")
+
+       _, err = store.ByIndex("non-existent-index", "value")
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "does not exist")
+
+       _, err = store.Index("non-existent-index", mockRes)
+       assert.Error(t, err)
+       assert.Contains(t, err.Error(), "does not exist")
+}
+
+func TestGormStore_UpdateIndices(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexer
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create and add initial resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource"},
+       }
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // Verify initial index
+       keys, err := store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key")
+
+       // Update resource with different mesh
+       updatedRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{Name: "test-resource"},
+       }
+       err = store.Update(updatedRes)
+       require.NoError(t, err)
+
+       // Verify index was updated
+       keys, err = store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.NotContains(t, keys, "test-key")
+
+       keys, err = store.IndexKeys("by-mesh", "mesh2")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key")
+}
+
+func TestGormStore_DeleteFromIndices(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexer
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Create and add resource
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource"},
+       }
+       err = store.Add(mockRes)
+       require.NoError(t, err)
+
+       // Verify initial index
+       keys, err := store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key")
+
+       // Delete resource
+       err = store.Delete(mockRes)
+       require.NoError(t, err)
+
+       // Verify index was updated
+       keys, err = store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.NotContains(t, keys, "test-key")
+}
+
+func TestGormStore_ReplaceIndices(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexer
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Add initial resources
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+
+       // Replace with new resources
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+       err = store.Replace([]interface{}{mockRes2}, "version-1")
+       require.NoError(t, err)
+
+       // Verify indices were cleared and rebuilt
+       keys, err := store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.Len(t, keys, 0)
+
+       keys, err = store.IndexKeys("by-mesh", "mesh2")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key-2")
+}
+
+func TestGormStore_InitRebuildIndices(t *testing.T) {
+       // This test verifies that indices are rebuilt from existing data 
during Init()
+       // Simulates the scenario where a GormStore starts with existing data 
in the database
+
+       // Create store and add data
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexer before adding data
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Add some resources to the database
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "mesh1",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+       mockRes2 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-2",
+               Mesh: "mesh2",
+               Meta: metav1.ObjectMeta{Name: "test-resource-2"},
+       }
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+       err = store.Add(mockRes2)
+       require.NoError(t, err)
+
+       // Verify indices are populated
+       keys, err := store.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key-1")
+
+       // Now simulate a restart by creating a new store instance with the 
same pool
+       // This simulates the scenario where existing data exists in the 
database
+       pool := store.pool
+       pool.IncrementRef() // Increment ref count since we're creating another 
store using it
+
+       newStore := NewGormStore("TestResource", pool.Address(), pool)
+
+       // Add indexers BEFORE Init to ensure they're available during index 
rebuild
+       err = newStore.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Init should rebuild indices from existing database data
+       err = newStore.Init(nil)
+       require.NoError(t, err)
+
+       // Verify indices were rebuilt with existing data
+       keys, err = newStore.IndexKeys("by-mesh", "mesh1")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key-1", "Index should contain existing 
data after Init()")
+
+       keys, err = newStore.IndexKeys("by-mesh", "mesh2")
+       assert.NoError(t, err)
+       assert.Contains(t, keys, "test-key-2", "Index should contain existing 
data after Init()")
+
+       // Verify all keys are present
+       allKeys := newStore.ListKeys()
+       assert.Len(t, allKeys, 2)
+}
+
+func TestGormStore_Resync(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Resync should return nil (no-op for database stores)
+       err = store.Resync()
+       assert.NoError(t, err)
+}
+
+func TestGormStore_TransactionRollback(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Add initial resource
+       mockRes1 := &mockResource{
+               Kind: "TestResource",
+               Key:  "test-key-1",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "test-resource-1"},
+       }
+       err = store.Add(mockRes1)
+       require.NoError(t, err)
+
+       // Try to replace with invalid resource (wrong type)
+       // This should cause transaction rollback
+       invalidList := []interface{}{"not-a-resource"}
+       err = store.Replace(invalidList, "version-1")
+       assert.Error(t, err)
+
+       // Verify original resource still exists (transaction was rolled back)
+       keys := store.ListKeys()
+       assert.Len(t, keys, 1)
+       assert.Contains(t, keys, "test-key-1")
+}
+
+func TestGormStore_ConcurrentOperations(t *testing.T) {
+       // Use WAL mode for better concurrent write support in SQLite
+       dialector := sqlite.Open("file::memory:?cache=shared&_journal_mode=WAL")
+       pool, err := NewConnectionPool(dialector, storecfg.MySQL, 
"test-address", DefaultConnectionPoolConfig())
+       require.NoError(t, err)
+       defer pool.Close()
+
+       // Register the mock resource type
+       kind := model.ResourceKind("TestResource")
+       model.RegisterResourceSchema(kind, func() model.Resource {
+               return &mockResource{Kind: kind}
+       })
+
+       store := NewGormStore(kind, "test-address", pool)
+       err = store.Init(nil)
+       require.NoError(t, err)
+
+       // Add indexer
+       indexers := map[string]cache.IndexFunc{
+               "by-mesh": func(obj interface{}) ([]string, error) {
+                       resource := obj.(model.Resource)
+                       return []string{resource.MeshName()}, nil
+               },
+       }
+       err = store.AddIndexers(indexers)
+       require.NoError(t, err)
+
+       // Perform concurrent Add operations with less concurrency for SQLite
+       const numGoroutines = 5
+       done := make(chan error, numGoroutines)
+
+       for i := 0; i < numGoroutines; i++ {
+               go func(index int) {
+                       mockRes := &mockResource{
+                               Kind: "TestResource",
+                               Key:  string(rune('a' + index)),
+                               Mesh: "default",
+                               Meta: metav1.ObjectMeta{Name: string(rune('a' + 
index))},
+                       }
+                       done <- store.Add(mockRes)
+               }(i)
+       }
+
+       // Wait for all goroutines to complete and check for errors
+       successCount := 0
+       for i := 0; i < numGoroutines; i++ {
+               if err := <-done; err == nil {
+                       successCount++
+               }
+       }
+
+       // Due to SQLite concurrency limitations, at least some operations 
should succeed
+       assert.Greater(t, successCount, 0, "At least some concurrent operations 
should succeed")
+
+       // Verify resources were added
+       keys := store.ListKeys()
+       assert.Greater(t, len(keys), 0, "Should have added some resources")
+       assert.LessOrEqual(t, len(keys), numGoroutines, "Should not have more 
resources than goroutines")
+}
+
+func TestGormStore_GetNonExistent(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       mockRes := &mockResource{
+               Kind: "TestResource",
+               Key:  "non-existent-key",
+               Mesh: "default",
+               Meta: metav1.ObjectMeta{Name: "non-existent"},
+       }
+
+       item, exists, err := store.Get(mockRes)
+       assert.NoError(t, err)
+       assert.False(t, exists)
+       assert.Nil(t, item)
+}
+
+func TestGormStore_GetByKeyNonExistent(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       item, exists, err := store.GetByKey("non-existent-key")
+       assert.NoError(t, err)
+       assert.False(t, exists)
+       assert.Nil(t, item)
+}
+
+func TestGormStore_InvalidResourceType(t *testing.T) {
+       store, cleanup := setupTestStore(t)
+       defer cleanup()
+
+       err := store.Init(nil)
+       require.NoError(t, err)
+
+       // Try to add invalid type
+       err = store.Add("not-a-resource")
+       assert.Error(t, err)
+
+       // Try to update invalid type
+       err = store.Update("not-a-resource")
+       assert.Error(t, err)
+
+       // Try to delete invalid type
+       err = store.Delete("not-a-resource")
+       assert.Error(t, err)
+
+       // Try to get invalid type
+       _, _, err = store.Get("not-a-resource")
+       assert.Error(t, err)
+}
diff --git a/pkg/store/dbcommon/index.go b/pkg/store/dbcommon/index.go
new file mode 100644
index 00000000..5c535619
--- /dev/null
+++ b/pkg/store/dbcommon/index.go
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dbcommon
+
+import (
+       "fmt"
+       "sync"
+
+       set "github.com/duke-git/lancet/v2/datastructure/set"
+       "k8s.io/client-go/tools/cache"
+
+       "github.com/apache/dubbo-admin/pkg/core/resource/model"
+)
+
+// ValueIndex represents the mapping from indexed values to resource keys for 
a single index.
+// Structure: map[indexedValue]set[resourceKey]
+// Example: map["default"]{"resource1", "resource2", "resource3"}
+type ValueIndex struct {
+       values map[string]set.Set[string]
+}
+
+// NewValueIndex creates a new ValueIndex
+func NewValueIndex() *ValueIndex {
+       return &ValueIndex{
+               values: make(map[string]set.Set[string]),
+       }
+}
+
+// Add adds a resource key to the specified indexed value
+func (vi *ValueIndex) Add(indexedValue, resourceKey string) {
+       if vi.values[indexedValue] == nil {
+               vi.values[indexedValue] = set.New[string]()
+       }
+       vi.values[indexedValue].Add(resourceKey)
+}
+
+// Remove removes a resource key from the specified indexed value
+// Returns true if the value entry becomes empty after removal
+func (vi *ValueIndex) Remove(indexedValue, resourceKey string) bool {
+       if vi.values[indexedValue] == nil {
+               return false
+       }
+
+       vi.values[indexedValue].Delete(resourceKey)
+
+       // Check if the set is now empty
+       if vi.values[indexedValue].Size() == 0 {
+               delete(vi.values, indexedValue)
+               return true
+       }
+
+       return false
+}
+
+// GetKeys returns all resource keys for the specified indexed value
+func (vi *ValueIndex) GetKeys(indexedValue string) []string {
+       if vi.values[indexedValue] == nil {
+               return []string{}
+       }
+       return vi.values[indexedValue].ToSlice()
+}
+
+// GetAllValues returns all indexed values in this ValueIndex
+func (vi *ValueIndex) GetAllValues() []string {
+       if len(vi.values) == 0 {
+               return []string{}
+       }
+
+       result := make([]string, 0, len(vi.values))
+       for value := range vi.values {
+               result = append(result, value)
+       }
+       return result
+}
+
+// IsEmpty returns true if the ValueIndex has no entries
+func (vi *ValueIndex) IsEmpty() bool {
+       return len(vi.values) == 0
+}
+
+// Index is a thread-safe in-memory index structure that manages multiple 
named indices.
+// Each index maps values to sets of resource keys.
+//
+// Structure: map[indexName]*ValueIndex
+// Example: map["mesh"]*ValueIndex where ValueIndex contains {"default": 
{"res1", "res2"}}
+type Index struct {
+       mu       sync.RWMutex
+       indices  map[string]*ValueIndex // map[indexName]*ValueIndex
+       indexers cache.Indexers         // Index functions for creating indices
+}
+
+// NewIndex creates a new empty Index instance
+func NewIndex() *Index {
+       return &Index{
+               indices:  make(map[string]*ValueIndex),
+               indexers: cache.Indexers{},
+       }
+}
+
+// AddIndexers adds new indexer functions to the Index
+// Returns an error if an indexer with the same name already exists
+func (idx *Index) AddIndexers(newIndexers cache.Indexers) error {
+       idx.mu.Lock()
+       defer idx.mu.Unlock()
+
+       for name, indexFunc := range newIndexers {
+               if _, exists := idx.indexers[name]; exists {
+                       return fmt.Errorf("indexer %s already exists", name)
+               }
+               idx.indexers[name] = indexFunc
+       }
+
+       return nil
+}
+
+// GetIndexers returns a copy of all registered indexers
+func (idx *Index) GetIndexers() cache.Indexers {
+       idx.mu.RLock()
+       defer idx.mu.RUnlock()
+
+       result := make(cache.Indexers, len(idx.indexers))
+       for k, v := range idx.indexers {
+               result[k] = v
+       }
+       return result
+}
+
+// UpdateResource atomically updates all indices for a resource
+// If oldResource is nil, it's treated as an add operation
+// If oldResource is not nil, it's treated as an update operation (remove old, 
add new)
+// This is a high-level atomic operation that handles all indexers internally
+func (idx *Index) UpdateResource(newResource model.Resource, oldResource 
model.Resource) {
+       idx.mu.Lock()
+       defer idx.mu.Unlock()
+
+       // Remove old resource from indices if this is an update
+       if oldResource != nil {
+               idx.removeResourceUnsafe(oldResource)
+       }
+
+       // Add new resource to indices
+       idx.addResourceUnsafe(newResource)
+}
+
+// RemoveResource atomically removes a resource from all indices
+func (idx *Index) RemoveResource(resource model.Resource) {
+       idx.mu.Lock()
+       defer idx.mu.Unlock()
+
+       idx.removeResourceUnsafe(resource)
+}
+
+// GetKeys returns all resource keys for a given index name and value
+// Returns an empty slice if the index name or value doesn't exist
+func (idx *Index) GetKeys(indexName, indexValue string) []string {
+       idx.mu.RLock()
+       defer idx.mu.RUnlock()
+
+       valueIndex := idx.indices[indexName]
+       if valueIndex == nil {
+               return []string{}
+       }
+
+       return valueIndex.GetKeys(indexValue)
+}
+
+// ListIndexFuncValues returns all indexed values for a given index name
+// This directly retrieves values from the in-memory index without 
recalculating
+func (idx *Index) ListIndexFuncValues(indexName string) []string {
+       idx.mu.RLock()
+       defer idx.mu.RUnlock()
+
+       valueIndex := idx.indices[indexName]
+       if valueIndex == nil {
+               return []string{}
+       }
+
+       return valueIndex.GetAllValues()
+}
+
+// Clear removes all entries from the index
+func (idx *Index) Clear() {
+       idx.mu.Lock()
+       defer idx.mu.Unlock()
+       idx.indices = make(map[string]*ValueIndex)
+}
+
+// IndexExists checks if an indexer with the given name exists
+func (idx *Index) IndexExists(indexName string) bool {
+       idx.mu.RLock()
+       defer idx.mu.RUnlock()
+       _, exists := idx.indexers[indexName]
+       return exists
+}
+
+// addResourceUnsafe adds a resource to all indices (must be called with lock 
held)
+func (idx *Index) addResourceUnsafe(resource model.Resource) {
+       for indexName, indexFunc := range idx.indexers {
+               values, err := indexFunc(resource)
+               if err != nil {
+                       continue
+               }
+
+               // Ensure the ValueIndex exists for this index name
+               if idx.indices[indexName] == nil {
+                       idx.indices[indexName] = NewValueIndex()
+               }
+
+               // Add resource key to each indexed value
+               for _, value := range values {
+                       idx.indices[indexName].Add(value, 
resource.ResourceKey())
+               }
+       }
+}
+
+// removeResourceUnsafe removes a resource from all indices (must be called 
with lock held)
+func (idx *Index) removeResourceUnsafe(resource model.Resource) {
+       for indexName, indexFunc := range idx.indexers {
+               values, err := indexFunc(resource)
+               if err != nil {
+                       continue
+               }
+
+               valueIndex := idx.indices[indexName]
+               if valueIndex == nil {
+                       continue
+               }
+
+               // Remove resource key from each indexed value
+               for _, value := range values {
+                       valueIndex.Remove(value, resource.ResourceKey())
+               }
+
+               // Clean up empty ValueIndex
+               if valueIndex.IsEmpty() {
+                       delete(idx.indices, indexName)
+               }
+       }
+}
diff --git a/pkg/store/dbcommon/model.go b/pkg/store/dbcommon/model.go
new file mode 100644
index 00000000..53b54e94
--- /dev/null
+++ b/pkg/store/dbcommon/model.go
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dbcommon
+
+import (
+       "encoding/json"
+       "strings"
+       "time"
+       "unicode"
+
+       "gorm.io/gorm"
+
+       "github.com/apache/dubbo-admin/pkg/core/resource/model"
+)
+
+// ResourceModel is the database model for storing Dubbo resources
+// It uses dynamic table naming based on ResourceKind to improve query 
performance
+// Note: TableName() method is intentionally removed as GORM caches it.
+// Use TableScope() instead for dynamic table names.
+type ResourceModel struct {
+       ID           uint      `gorm:"primarykey"`           // 
Auto-incrementing primary key
+       ResourceKey  string    `gorm:"uniqueIndex;not null"` // Unique 
identifier for the resource
+       ResourceKind string    `gorm:"not null"`             // Type of 
resource (e.g., "Application", "ServiceProviderMapping")
+       Name         string    `gorm:"index;not null"`       // Resource name, 
indexed for fast lookups
+       Mesh         string    `gorm:"index;not null"`       // Mesh 
identifier, indexed for filtering by mesh
+       Data         []byte    `gorm:"type:text;not null"`   // JSON-encoded 
resource data
+       CreatedAt    time.Time `gorm:"autoCreateTime"`       // Automatically 
set on creation
+       UpdatedAt    time.Time `gorm:"autoUpdateTime"`       // Automatically 
updated on modification
+}
+
+// TableNameForKind returns the table name for a given ResourceKind
+// Uses dynamic table naming: each ResourceKind gets its own table
+// e.g., "Application" -> "resources_application", "ServiceProviderMapping" -> 
"resources_service_provider_mapping"
+func TableNameForKind(kind string) string {
+       if kind == "" {
+               return "resources"
+       }
+       // Convert ResourceKind to snake_case table name with "resources_" 
prefix
+       return "resources_" + toSnakeCase(kind)
+}
+
+// TableScope returns a GORM scope function that sets the table name 
dynamically
+// This is the recommended approach for dynamic table names as TableName() is 
cached by GORM
+// Usage: db.Scopes(TableScope(kind)).Find(&models)
+func TableScope(kind string) func(db *gorm.DB) *gorm.DB {
+       return func(db *gorm.DB) *gorm.DB {
+               return db.Table(TableNameForKind(kind))
+       }
+}
+
+// toSnakeCase converts a string to snake_case
+// e.g., "ServiceProviderMapping" -> "service_provider_mapping", "RPCInstance" 
-> "rpc_instance"
+func toSnakeCase(s string) string {
+       var result strings.Builder
+       runes := []rune(s)
+
+       for i := 0; i < len(runes); i++ {
+               r := runes[i]
+
+               if unicode.IsUpper(r) {
+                       // Add underscore before uppercase letter if:
+                       // 1. Not at the beginning
+                       // 2. Previous char is lowercase or
+                       // 3. Next char exists and is lowercase (for handling 
acronyms like "RPCInstance")
+                       if i > 0 {
+                               prevIsLower := unicode.IsLower(runes[i-1])
+                               nextIsLower := i+1 < len(runes) && 
unicode.IsLower(runes[i+1])
+
+                               if prevIsLower || nextIsLower {
+                                       result.WriteRune('_')
+                               }
+                       }
+                       result.WriteRune(unicode.ToLower(r))
+               } else {
+                       result.WriteRune(r)
+               }
+       }
+       return result.String()
+}
+
+// ToResource converts the database model back to a Resource object
+// Unmarshals the JSON data and returns the typed resource
+func (rm *ResourceModel) ToResource() (model.Resource, error) {
+       newFunc, err := 
model.ResourceSchemaRegistry().NewResourceFunc(model.ResourceKind(rm.ResourceKind))
+       if err != nil {
+               return nil, err
+       }
+       resource := newFunc()
+       if err := json.Unmarshal(rm.Data, resource); err != nil {
+               return nil, err
+       }
+       return resource, nil
+}
+
+// FromResource converts a Resource object to a database model
+// Marshals the resource to JSON and populates the model fields
+func FromResource(resource model.Resource) (*ResourceModel, error) {
+       data, err := json.Marshal(resource)
+       if err != nil {
+               return nil, err
+       }
+
+       return &ResourceModel{
+               ResourceKey:  resource.ResourceKey(),
+               ResourceKind: resource.ResourceKind().ToString(),
+               Name:         resource.ResourceMeta().Name,
+               Mesh:         resource.MeshName(),
+               Data:         data,
+       }, nil
+}
diff --git a/pkg/store/mysql/mysql.go b/pkg/store/mysql/mysql.go
index 484ead99..4dff0cb1 100644
--- a/pkg/store/mysql/mysql.go
+++ b/pkg/store/mysql/mysql.go
@@ -17,4 +17,42 @@
 
 package mysql
 
-// TODO implement memory resource store, refer to GORM https://gorm.io/docs/
+import (
+       "gorm.io/driver/mysql"
+
+       storecfg "github.com/apache/dubbo-admin/pkg/config/store"
+       "github.com/apache/dubbo-admin/pkg/core/resource/model"
+       "github.com/apache/dubbo-admin/pkg/core/store"
+       "github.com/apache/dubbo-admin/pkg/store/dbcommon"
+)
+
+func init() {
+       store.RegisterFactory(&mysqlStoreFactory{})
+}
+
+// mysqlStoreFactory is the factory for creating MySQL store instances
+type mysqlStoreFactory struct{}
+
+var _ store.Factory = &mysqlStoreFactory{}
+
+// Support checks if this factory supports the given store type
+func (f *mysqlStoreFactory) Support(s storecfg.Type) bool {
+       return s == storecfg.MySQL
+}
+
+// New creates a new MySQL store instance for the specified resource kind
+func (f *mysqlStoreFactory) New(kind model.ResourceKind, cfg *storecfg.Config) 
(store.ManagedResourceStore, error) {
+       // Get or create connection pool with MySQL dialector
+       pool, err := dbcommon.GetOrCreatePool(
+               mysql.Open(cfg.Address),
+               storecfg.MySQL,
+               cfg.Address,
+               dbcommon.DefaultConnectionPoolConfig(),
+       )
+       if err != nil {
+               return nil, err
+       }
+
+       // Create GormStore with the pool
+       return dbcommon.NewGormStore(kind, cfg.Address, pool), nil
+}
diff --git a/pkg/store/postgres/postgres.go b/pkg/store/postgres/postgres.go
new file mode 100644
index 00000000..27874805
--- /dev/null
+++ b/pkg/store/postgres/postgres.go
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package postgres
+
+import (
+       "gorm.io/driver/postgres"
+
+       storecfg "github.com/apache/dubbo-admin/pkg/config/store"
+       "github.com/apache/dubbo-admin/pkg/core/resource/model"
+       "github.com/apache/dubbo-admin/pkg/core/store"
+       "github.com/apache/dubbo-admin/pkg/store/dbcommon"
+)
+
+func init() {
+       store.RegisterFactory(&postgresStoreFactory{})
+}
+
+// postgresStoreFactory is the factory for creating PostgreSQL store instances
+type postgresStoreFactory struct{}
+
+var _ store.Factory = &postgresStoreFactory{}
+
+// Support checks if this factory supports the given store type
+func (f *postgresStoreFactory) Support(s storecfg.Type) bool {
+       return s == storecfg.Postgres
+}
+
+// New creates a new PostgreSQL store instance for the specified resource kind
+func (f *postgresStoreFactory) New(kind model.ResourceKind, cfg 
*storecfg.Config) (store.ManagedResourceStore, error) {
+       // Get or create connection pool with PostgreSQL dialector
+       pool, err := dbcommon.GetOrCreatePool(
+               postgres.Open(cfg.Address),
+               storecfg.Postgres,
+               cfg.Address,
+               dbcommon.DefaultConnectionPoolConfig(),
+       )
+       if err != nil {
+               return nil, err
+       }
+
+       // Create GormStore with the pool
+       return dbcommon.NewGormStore(kind, cfg.Address, pool), nil
+}

Reply via email to