zyxxoo commented on code in PR #2301:
URL: 
https://github.com/apache/incubator-hugegraph/pull/2301#discussion_r1313828473


##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/auth/SchemaDefine.java:
##########
@@ -281,6 +290,19 @@ public abstract static class Relationship extends 
AuthElement {
 
         public abstract Id target();
 
+        public void setId() {
+            this.id(IdGenerator.of(this.source().asString() + "->" +
+                                   this.target().asString()));
+        }
+
+        public static <T extends Relationship> T fromMap(Map<String, Object> 
map, T entity) {

Review Comment:
   fromMap名字有点奇怪,要不要改一下?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/PdMetaDriver.java:
##########
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta;
+
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.HugeException;
+import org.apache.hugegraph.meta.lock.LockResult;
+import org.apache.hugegraph.meta.lock.PdDistributedLock;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.TTLResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+
+import com.google.common.base.Strings;
+
+public class PdMetaDriver implements MetaDriver {
+
+    KvClient<WatchResponse> client = null;
+    PDClient pdClient = null;
+    private PdDistributedLock lock;
+
+    public PdMetaDriver(String pdPeer) {
+        PDConfig pdConfig = PDConfig.of(pdPeer);
+        this.client = new KvClient<>(pdConfig);
+        this.pdClient = PDClient.create(pdConfig);
+        lock = new PdDistributedLock(this.client);
+    }
+
+    public static void main(String[] args) {
+        PDConfig pdConfig = PDConfig.of("127.0.0.1:8686");
+        KvClient<WatchResponse> client = new KvClient<>(pdConfig);
+        ScanPrefixResponse contents;
+        try {
+            contents = client.scanPrefix("HUGEGRAPH/METRICS");
+            Map<String, String> map = contents.getKvsMap();
+            for (Map.Entry<String, String> entry : map.entrySet()) {
+                System.out.println(entry.getKey() + ":" + entry.getValue());
+            }
+            System.out.println(map.size());
+        } catch (PDException e) {
+            e.printStackTrace();
+        }
+        Date dNow = new Date();
+        System.out.println(dNow);
+        SimpleDateFormat ft = new SimpleDateFormat("HHmmss");
+        System.out.println("当前时间为: " + ft.format(dNow));
+    }
+
+    public PDClient pdClient() {
+        return this.pdClient;
+    }
+
+    @Override
+    public void put(String key, String value) {
+        try {
+            this.client.put(key, value);
+        } catch (PDException e) {
+            throw new HugeException("Failed to put '%s:%s' to pd", e, key, 
value);
+        }
+    }
+
+    @Override
+    public String get(String key) {
+        try {
+            KResponse response = this.client.get(key);
+            return response.getValue();
+        } catch (PDException e) {
+            throw new HugeException("Failed to get '%s' from pd", e, key);
+        }
+    }
+
+    @Override
+    public void delete(String key) {
+        try {
+            this.client.delete(key);
+        } catch (PDException e) {
+            throw new HugeException("Failed to delete '%s' from pd", e, key);
+        }
+    }
+
+    @Override
+    public void deleteWithPrefix(String prefix) {
+        try {
+            this.client.deletePrefix(prefix);
+        } catch (PDException e) {
+            throw new HugeException("Failed to deleteWithPrefix '%s' from pd", 
e, prefix);
+        }
+    }
+
+    @Override
+    public Map<String, String> scanWithPrefix(String prefix) {
+        try {
+            ScanPrefixResponse response = this.client.scanPrefix(prefix);
+            return response.getKvsMap();
+        } catch (PDException e) {
+            throw new HugeException("Failed to scanWithPrefix '%s' from pd", 
e, prefix);
+        }
+    }
+
+    @Override
+    public <T> void listen(String key, Consumer<T> consumer) {
+        try {
+            this.client.listen(key, (Consumer<WatchResponse>) consumer);
+        } catch (PDException e) {
+            throw new HugeException("Failed to listen '%s' to pd", e, key);
+        }
+    }
+
+    @Override
+    public <T> void listenPrefix(String prefix, Consumer<T> consumer) {
+        try {
+            this.client.listenPrefix(prefix, (Consumer<WatchResponse>) 
consumer);
+        } catch (PDException e) {
+            throw new HugeException("Failed to listenPrefix '%s' to pd", e, 
prefix);
+        }
+    }
+
+    @Override
+    public <T> List<String> extractValuesFromResponse(T response) {
+        List<String> values = new ArrayList<>();
+        WatchResponse res = (WatchResponse) response;
+        for (WatchEvent event : res.getEventsList()) {
+            // Skip if not PUT event
+            if (!event.getType().equals(WatchType.Put)) {
+                return null;
+            }
+            String value = event.getCurrent().getValue();
+            values.add(value);
+        }
+        return values;
+    }
+
+    @Override
+    public <T> Map<String, String> extractKVFromResponse(T response) {
+        Map<String, String> resultMap = new HashMap<>();
+        WatchResponse res = (WatchResponse) response;
+        for (WatchEvent event : res.getEventsList()) {
+            // Skip if not etcd PUT event
+            if (!event.getType().equals(WatchType.Put)) {
+                continue;
+            }
+
+            String key = event.getCurrent().getKey();
+            String value = event.getCurrent().getValue();
+            if (Strings.isNullOrEmpty(key)) {
+                continue;
+            }
+            resultMap.put(key, value);
+        }
+        return resultMap;
+    }
+
+    @Override
+    public LockResult lock(String key, long ttl) {
+        while (true) {

Review Comment:
   没有次数限制嘛?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/StandardHugeGraph.java:
##########
@@ -453,9 +458,25 @@ private void clearVertexCache() {
         }
     }
 
-    private SchemaTransaction openSchemaTransaction() throws HugeException {
+    private boolean isHstore() {
+        return this.storeProvider.isHstore();
+    }
+
+    private void initMetaManager() {
+        this.metaManager.connect("hg", MetaManager.MetaDriverType.PD,

Review Comment:
   这个链接是不是每个新的链接都要调用一次?是否只执行一次就可以呢?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/auth/HugeRole.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.auth;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hugegraph.HugeGraphParams;
+import org.apache.hugegraph.auth.SchemaDefine.Entity;
+import org.apache.hugegraph.backend.id.Id;
+import org.apache.hugegraph.backend.id.IdGenerator;
+import org.apache.hugegraph.schema.VertexLabel;
+import org.apache.hugegraph.util.E;
+import org.apache.tinkerpop.gremlin.structure.Graph.Hidden;
+import org.apache.tinkerpop.gremlin.structure.T;
+
+public class HugeRole extends Entity {
+
+    private static final long serialVersionUID = 2330399818352242686L;
+
+    private String name;
+    private String nickname;
+    private String graphSpace;
+    private String description;
+
+    public HugeRole(Id id, String name, String graphSpace) {
+        this.id = id;
+        this.name = name;
+        this.graphSpace = graphSpace;
+        this.description = null;
+    }
+
+    public HugeRole(String name, String graphSpace) {
+        this(StringUtils.isNotEmpty(name) ? IdGenerator.of(name) : null,
+             name, graphSpace);
+    }
+
+    public HugeRole(Id id, String graphSpace) {
+        this(id, id.asString(), graphSpace);
+    }
+
+    public static HugeRole fromMap(Map<String, Object> map) {
+        HugeRole role = new HugeRole("", "");
+        return fromMap(map, role);
+    }
+
+    public static Schema schema(HugeGraphParams graph) {
+        return new Schema(graph);
+    }
+
+    @Override
+    public ResourceType type() {
+        return ResourceType.GRANT;
+    }
+
+    @Override
+    public String label() {
+        return P.ROLE;
+    }
+
+    @Override
+    public String name() {
+        return this.name;
+    }
+
+    public void name(String name) {
+        this.name = name;
+    }
+
+    public String nickname() {
+        return this.nickname;
+    }
+
+    public void nickname(String nickname) {
+        this.nickname = nickname;
+    }
+
+    public String graphSpace() {
+        return this.graphSpace;
+    }
+
+    public String description() {
+        return this.description;
+    }
+
+    public void description(String description) {
+        this.description = description;
+    }
+
+    @Override
+    public String toString() {
+        return String.format("HugeGroup(%s)", this.id);
+    }
+
+    @Override
+    protected boolean property(String key, Object value) {
+        if (super.property(key, value)) {
+            return true;
+        }
+        switch (key) {
+            case P.GRAPHSPACE:
+                this.graphSpace = (String) value;
+                break;
+            case P.NAME:
+                this.name = (String) value;
+                break;
+            case P.NICKNAME:
+                this.nickname = (String) value;
+                break;
+            case P.DESCRIPTION:
+                this.description = (String) value;
+                break;
+            default:
+                throw new AssertionError("Unsupported key: " + key);
+        }
+        return true;
+    }
+
+    @Override
+    protected Object[] asArray() {
+        E.checkState(this.name != null, "Group name can't be null");
+
+        List<Object> list = new ArrayList<>(12);
+
+        list.add(T.label);
+        list.add(P.ROLE);
+
+        list.add(P.GRAPHSPACE);
+        list.add(this.graphSpace);
+
+        list.add(P.NAME);
+        list.add(this.name);
+
+        if (this.nickname != null) {
+            list.add(P.NICKNAME);
+            list.add(this.nickname);
+        }
+
+        if (this.description != null) {
+            list.add(P.DESCRIPTION);
+            list.add(this.description);
+        }
+
+        return super.asArray(list);
+    }
+
+    @Override
+    public Map<String, Object> asMap() {
+        E.checkState(this.name != null, "Group name can't be null");
+
+        Map<String, Object> map = new HashMap<>();
+
+        map.put(Hidden.unHide(P.NAME), this.name);
+        map.put(Hidden.unHide(P.GRAPHSPACE), this.graphSpace);
+        if (this.description != null) {
+            map.put(Hidden.unHide(P.DESCRIPTION), this.description);
+        }
+
+        if (this.nickname != null) {
+            map.put(Hidden.unHide(P.NICKNAME), this.nickname);
+        }
+
+        return super.asMap(map);
+    }
+
+    public static final class P {
+
+        public static final String ROLE = Hidden.hide("role");
+
+        public static final String ID = T.id.getAccessor();
+        public static final String LABEL = T.label.getAccessor();
+
+        public static final String NAME = "~role_name";
+        public static final String NICKNAME = "~role_nickname";
+        public static final String GRAPHSPACE = "~graphspace";
+        public static final String DESCRIPTION = "~role_description";

Review Comment:
   这里咋不用hide, hide就是加~前缀吧



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/ConditionQuery.java:
##########
@@ -870,4 +876,14 @@ public interface ResultsFilter {
 
         boolean test(HugeElement element);
     }
+
+    public byte[] bytes() {
+        Gson gson = new GsonBuilder()

Review Comment:
   Gson可以做成静态全局变量,不用每次创建了



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendStoreInfo.java:
##########
@@ -41,6 +41,9 @@ public boolean exists() {
     }
 
     public boolean checkVersion() {
+        if (this.storeProvider.isHstore()) { // skip for hstore now

Review Comment:
   hstore暂时不用考虑兼容问题嘛?如果用户滚动升级,节点版本不一致,是否会有风险?
   如果有的话,是否检查下版本比较合适呢?至少保证不兼容的版本不会产生脏数据



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/AbstractSerializerAdapter.java:
##########
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend.query.serializer;
+
+import java.lang.reflect.Type;
+import java.util.Map;
+
+import org.apache.hugegraph.backend.BackendException;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+public abstract class AbstractSerializerAdapter<T> implements 
JsonSerializer<T>,

Review Comment:
   以后可以考虑优化成二进制协议



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/AbstractSerializerAdapter.java:
##########
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend.query.serializer;
+
+import java.lang.reflect.Type;
+import java.util.Map;
+
+import org.apache.hugegraph.backend.BackendException;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+public abstract class AbstractSerializerAdapter<T> implements 
JsonSerializer<T>,

Review Comment:
   以后可以考虑优化成二进制协议



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/ConditionQuery.java:
##########
@@ -870,4 +876,14 @@ public interface ResultsFilter {
 
         boolean test(HugeElement element);
     }
+
+    public byte[] bytes() {
+        Gson gson = new GsonBuilder()

Review Comment:
   Gson可以做成静态全局变量,不用每次创建了



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendStoreInfo.java:
##########
@@ -41,6 +41,9 @@ public boolean exists() {
     }
 
     public boolean checkVersion() {
+        if (this.storeProvider.isHstore()) { // skip for hstore now

Review Comment:
   hstore暂时不用考虑兼容问题嘛?如果用户滚动升级,节点版本不一致,是否会有风险?
   如果有的话,是否检查下版本比较合适呢?至少保证不兼容的版本不会产生脏数据



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/PdMetaDriver.java:
##########
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta;
+
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.HugeException;
+import org.apache.hugegraph.meta.lock.LockResult;
+import org.apache.hugegraph.meta.lock.PdDistributedLock;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.TTLResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+
+import com.google.common.base.Strings;
+
+public class PdMetaDriver implements MetaDriver {
+
+    KvClient<WatchResponse> client = null;
+    PDClient pdClient = null;
+    private PdDistributedLock lock;
+
+    public PdMetaDriver(String pdPeer) {
+        PDConfig pdConfig = PDConfig.of(pdPeer);
+        this.client = new KvClient<>(pdConfig);
+        this.pdClient = PDClient.create(pdConfig);
+        lock = new PdDistributedLock(this.client);
+    }
+
+    public static void main(String[] args) {
+        PDConfig pdConfig = PDConfig.of("127.0.0.1:8686");
+        KvClient<WatchResponse> client = new KvClient<>(pdConfig);
+        ScanPrefixResponse contents;
+        try {
+            contents = client.scanPrefix("HUGEGRAPH/METRICS");
+            Map<String, String> map = contents.getKvsMap();
+            for (Map.Entry<String, String> entry : map.entrySet()) {
+                System.out.println(entry.getKey() + ":" + entry.getValue());
+            }
+            System.out.println(map.size());
+        } catch (PDException e) {
+            e.printStackTrace();
+        }
+        Date dNow = new Date();
+        System.out.println(dNow);

Review Comment:
   log



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/lock/DistributedLock.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta.lock;
+
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hugegraph.util.Log;
+import org.slf4j.Logger;
+
+import io.etcd.jetcd.ByteSequence;
+import io.etcd.jetcd.Client;
+import io.etcd.jetcd.KV;
+import io.etcd.jetcd.Lease;
+import io.etcd.jetcd.Lock;
+
+public class DistributedLock extends AbstractDistributedLock {
+
+    protected static final Logger LOG = Log.logger(DistributedLock.class);
+    private static final long UNLIMIT_TIMEOUT = -1L;
+    private final static Object mutex = new Object();
+    private static DistributedLock lockProvider = null;
+    private final KV kvClient;
+    private final Lock lockClient;
+    private final Lease leaseClient;
+
+    private DistributedLock(Client client) {
+        this.kvClient = client.getKVClient();
+        this.lockClient = client.getLockClient();
+        this.leaseClient = client.getLeaseClient();
+    }
+
+    public static DistributedLock getInstance(Client client) {
+        synchronized (mutex) {
+            if (null == lockProvider) {
+                lockProvider = new DistributedLock(client);
+            }
+        }
+        return lockProvider;
+    }
+
+    private static ByteSequence toByteSequence(String content) {
+        return ByteSequence.from(content, Charset.defaultCharset());
+    }
+
+    public LockResult tryLock(String lockName, long ttl, long timeout) {
+        LockResult lockResult = new LockResult();
+        ScheduledExecutorService service =
+                Executors.newSingleThreadScheduledExecutor();

Review Comment:
   这个是不是可以放全局队列来使用,没看错的话,这里的意思是没次lock就开一个线程来keep alive



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/auth/HugeRole.java:
##########
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.auth;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hugegraph.HugeGraphParams;
+import org.apache.hugegraph.auth.SchemaDefine.Entity;
+import org.apache.hugegraph.backend.id.Id;
+import org.apache.hugegraph.backend.id.IdGenerator;
+import org.apache.hugegraph.schema.VertexLabel;
+import org.apache.hugegraph.util.E;
+import org.apache.tinkerpop.gremlin.structure.Graph.Hidden;
+import org.apache.tinkerpop.gremlin.structure.T;
+
+public class HugeRole extends Entity {
+
+    private static final long serialVersionUID = 2330399818352242686L;
+
+    private String name;
+    private String nickname;
+    private String graphSpace;
+    private String description;
+
+    public HugeRole(Id id, String name, String graphSpace) {
+        this.id = id;
+        this.name = name;
+        this.graphSpace = graphSpace;
+        this.description = null;
+    }
+
+    public HugeRole(String name, String graphSpace) {
+        this(StringUtils.isNotEmpty(name) ? IdGenerator.of(name) : null,
+             name, graphSpace);
+    }
+
+    public HugeRole(Id id, String graphSpace) {
+        this(id, id.asString(), graphSpace);
+    }
+
+    public static HugeRole fromMap(Map<String, Object> map) {
+        HugeRole role = new HugeRole("", "");
+        return fromMap(map, role);
+    }
+
+    public static Schema schema(HugeGraphParams graph) {
+        return new Schema(graph);
+    }
+
+    @Override
+    public ResourceType type() {
+        return ResourceType.GRANT;
+    }
+
+    @Override
+    public String label() {
+        return P.ROLE;
+    }
+
+    @Override
+    public String name() {
+        return this.name;
+    }
+
+    public void name(String name) {
+        this.name = name;
+    }
+
+    public String nickname() {
+        return this.nickname;
+    }
+
+    public void nickname(String nickname) {
+        this.nickname = nickname;
+    }
+
+    public String graphSpace() {
+        return this.graphSpace;
+    }
+
+    public String description() {
+        return this.description;
+    }
+
+    public void description(String description) {
+        this.description = description;
+    }
+
+    @Override
+    public String toString() {
+        return String.format("HugeGroup(%s)", this.id);
+    }
+
+    @Override
+    protected boolean property(String key, Object value) {
+        if (super.property(key, value)) {
+            return true;
+        }
+        switch (key) {
+            case P.GRAPHSPACE:
+                this.graphSpace = (String) value;
+                break;
+            case P.NAME:
+                this.name = (String) value;
+                break;
+            case P.NICKNAME:
+                this.nickname = (String) value;
+                break;
+            case P.DESCRIPTION:
+                this.description = (String) value;
+                break;
+            default:
+                throw new AssertionError("Unsupported key: " + key);
+        }
+        return true;
+    }
+
+    @Override
+    protected Object[] asArray() {
+        E.checkState(this.name != null, "Group name can't be null");
+
+        List<Object> list = new ArrayList<>(12);
+
+        list.add(T.label);
+        list.add(P.ROLE);
+
+        list.add(P.GRAPHSPACE);
+        list.add(this.graphSpace);
+
+        list.add(P.NAME);
+        list.add(this.name);
+
+        if (this.nickname != null) {
+            list.add(P.NICKNAME);
+            list.add(this.nickname);
+        }
+
+        if (this.description != null) {
+            list.add(P.DESCRIPTION);
+            list.add(this.description);
+        }
+
+        return super.asArray(list);
+    }
+
+    @Override
+    public Map<String, Object> asMap() {
+        E.checkState(this.name != null, "Group name can't be null");
+
+        Map<String, Object> map = new HashMap<>();
+
+        map.put(Hidden.unHide(P.NAME), this.name);
+        map.put(Hidden.unHide(P.GRAPHSPACE), this.graphSpace);
+        if (this.description != null) {
+            map.put(Hidden.unHide(P.DESCRIPTION), this.description);
+        }
+
+        if (this.nickname != null) {
+            map.put(Hidden.unHide(P.NICKNAME), this.nickname);
+        }
+
+        return super.asMap(map);
+    }
+
+    public static final class P {
+
+        public static final String ROLE = Hidden.hide("role");
+
+        public static final String ID = T.id.getAccessor();
+        public static final String LABEL = T.label.getAccessor();
+
+        public static final String NAME = "~role_name";
+        public static final String NICKNAME = "~role_nickname";
+        public static final String GRAPHSPACE = "~graphspace";
+        public static final String DESCRIPTION = "~role_description";

Review Comment:
   这里咋不用hide, hide就是加~前缀吧



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryIdAdapter.java:
##########
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend.query.serializer;
+
+import java.lang.reflect.Type;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hugegraph.backend.id.EdgeId;
+import org.apache.hugegraph.backend.id.Id;
+import org.apache.hugegraph.backend.id.IdGenerator;
+import org.apache.hugegraph.backend.serializer.BinaryBackendEntry;
+
+public class QueryIdAdapter extends AbstractSerializerAdapter<Id> {
+
+    static HashMap<String, Type> cls;
+
+    static {
+        cls = new HashMap() {{

Review Comment:
   同



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/lock/PdDistributedLock.java:
##########
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta.lock;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hugegraph.HugeException;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+
+/**
+ * @author zhangyingjie
+ * @date 2022/6/18
+ **/
+public class PdDistributedLock extends AbstractDistributedLock {
+
+    private static int poolSize = 8;
+    private final KvClient client;
+    private ScheduledExecutorService service = new 
ScheduledThreadPoolExecutor(poolSize, r -> {
+        Thread t = new Thread(r);

Review Comment:
   线程最好有个名字,比如这里keepalive-*



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryAdapter.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend.query.serializer;
+
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hugegraph.backend.query.Condition;
+import org.apache.hugegraph.type.define.Directions;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.reflect.TypeToken;
+
+public class QueryAdapter extends AbstractSerializerAdapter<Condition> {
+
+    static HashMap<String, Type> cls;
+
+    static {
+        cls = new HashMap() {{

Review Comment:
   可以用immutableMap嘛?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/lock/PdDistributedLock.java:
##########
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta.lock;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hugegraph.HugeException;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+
+/**
+ * @author zhangyingjie
+ * @date 2022/6/18
+ **/
+public class PdDistributedLock extends AbstractDistributedLock {
+
+    private static int poolSize = 8;
+    private final KvClient client;
+    private ScheduledExecutorService service = new 
ScheduledThreadPoolExecutor(poolSize, r -> {
+        Thread t = new Thread(r);

Review Comment:
   线程最好有个名字,比如这里keepalive-*



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/lock/DistributedLock.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta.lock;
+
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hugegraph.util.Log;
+import org.slf4j.Logger;
+
+import io.etcd.jetcd.ByteSequence;
+import io.etcd.jetcd.Client;
+import io.etcd.jetcd.KV;
+import io.etcd.jetcd.Lease;
+import io.etcd.jetcd.Lock;
+
+public class DistributedLock extends AbstractDistributedLock {
+
+    protected static final Logger LOG = Log.logger(DistributedLock.class);
+    private static final long UNLIMIT_TIMEOUT = -1L;
+    private final static Object mutex = new Object();
+    private static DistributedLock lockProvider = null;
+    private final KV kvClient;
+    private final Lock lockClient;
+    private final Lease leaseClient;
+
+    private DistributedLock(Client client) {
+        this.kvClient = client.getKVClient();
+        this.lockClient = client.getLockClient();
+        this.leaseClient = client.getLeaseClient();
+    }
+
+    public static DistributedLock getInstance(Client client) {
+        synchronized (mutex) {
+            if (null == lockProvider) {
+                lockProvider = new DistributedLock(client);
+            }
+        }
+        return lockProvider;
+    }
+
+    private static ByteSequence toByteSequence(String content) {
+        return ByteSequence.from(content, Charset.defaultCharset());
+    }
+
+    public LockResult tryLock(String lockName, long ttl, long timeout) {
+        LockResult lockResult = new LockResult();
+        ScheduledExecutorService service =
+                Executors.newSingleThreadScheduledExecutor();
+
+        lockResult.lockSuccess(false);
+        lockResult.setService(service);
+
+        Long leaseId;
+
+        try {
+            leaseId = this.leaseClient.grant(ttl).get().getID();
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.warn(String.format("Thread {} failed to create lease for {} " +
+                                   "with ttl {}", 
Thread.currentThread().getName(),
+                                   lockName, ttl),
+                     e);
+            return lockResult;
+        }
+
+        lockResult.setLeaseId(leaseId);
+
+        long period = ttl - ttl / 5;
+        service.scheduleAtFixedRate(new KeepAliveTask(this.leaseClient, 
leaseId),
+                                    period, period, TimeUnit.SECONDS);
+
+        try {
+            if (timeout == UNLIMIT_TIMEOUT) {
+                this.lockClient.lock(toByteSequence(lockName), leaseId).get();
+
+            } else {
+                this.lockClient.lock(toByteSequence(lockName), leaseId)
+                               .get(1, TimeUnit.SECONDS);
+            }
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.warn(String.format("Thread {} failed to lock {}",
+                                   Thread.currentThread().getName(), lockName),
+                     e);
+            service.shutdown();
+            this.revokeLease(leaseId);
+            return lockResult;
+        } catch (TimeoutException e) {
+            // 获取锁超时
+            LOG.warn("Thread {} timeout to lock {}",
+                     Thread.currentThread().getName(), lockName);
+            service.shutdown();
+            this.revokeLease(leaseId);
+            return lockResult;
+        }
+
+        lockResult.lockSuccess(true);
+
+        return lockResult;
+    }
+
+    @Override
+    public LockResult lock(String lockName, long ttl) {
+        return tryLock(lockName, ttl, UNLIMIT_TIMEOUT);
+    }
+
+    @Override
+    public void unLock(String lockName, LockResult lockResult) {
+        LOG.debug("Thread {} start to unlock {}",
+                  Thread.currentThread().getName(), lockName);
+
+        lockResult.getService().shutdown();
+
+        if (lockResult.getLeaseId() != 0L) {
+            this.revokeLease(lockResult.getLeaseId());
+        }
+
+        LOG.debug("Thread {} unlock {} successfully",
+                  Thread.currentThread().getName(), lockName);
+    }
+
+    private void revokeLease(long leaseId) {
+        try {
+            this.leaseClient.revoke(leaseId).get();
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.warn(String.format("Thread %s failed to revoke release %s",
+                                   Thread.currentThread().getName(), leaseId), 
e);
+        }
+    }
+
+    public static class KeepAliveTask implements Runnable {
+
+        private final Lease leaseClient;
+        private final long leaseId;
+
+        KeepAliveTask(Lease leaseClient, long leaseId) {
+            this.leaseClient = leaseClient;
+            this.leaseId = leaseId;
+        }
+
+        @Override
+        public void run() {
+            this.leaseClient.keepAliveOnce(this.leaseId);
+        }

Review Comment:
   这里可以统计下调用的时间间隔



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/StandardHugeGraph.java:
##########
@@ -453,9 +458,25 @@ private void clearVertexCache() {
         }
     }
 
-    private SchemaTransaction openSchemaTransaction() throws HugeException {
+    private boolean isHstore() {
+        return this.storeProvider.isHstore();
+    }
+
+    private void initMetaManager() {
+        this.metaManager.connect("hg", MetaManager.MetaDriverType.PD,

Review Comment:
   这个链接是不是每个新的链接都要调用一次?是否只执行一次就可以呢?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/lock/DistributedLock.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta.lock;
+
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hugegraph.util.Log;
+import org.slf4j.Logger;
+
+import io.etcd.jetcd.ByteSequence;
+import io.etcd.jetcd.Client;
+import io.etcd.jetcd.KV;
+import io.etcd.jetcd.Lease;
+import io.etcd.jetcd.Lock;
+
+public class DistributedLock extends AbstractDistributedLock {
+
+    protected static final Logger LOG = Log.logger(DistributedLock.class);
+    private static final long UNLIMIT_TIMEOUT = -1L;
+    private final static Object mutex = new Object();
+    private static DistributedLock lockProvider = null;
+    private final KV kvClient;
+    private final Lock lockClient;
+    private final Lease leaseClient;
+
+    private DistributedLock(Client client) {
+        this.kvClient = client.getKVClient();
+        this.lockClient = client.getLockClient();
+        this.leaseClient = client.getLeaseClient();
+    }
+
+    public static DistributedLock getInstance(Client client) {
+        synchronized (mutex) {
+            if (null == lockProvider) {
+                lockProvider = new DistributedLock(client);
+            }
+        }
+        return lockProvider;
+    }
+
+    private static ByteSequence toByteSequence(String content) {
+        return ByteSequence.from(content, Charset.defaultCharset());
+    }
+
+    public LockResult tryLock(String lockName, long ttl, long timeout) {
+        LockResult lockResult = new LockResult();
+        ScheduledExecutorService service =
+                Executors.newSingleThreadScheduledExecutor();
+
+        lockResult.lockSuccess(false);
+        lockResult.setService(service);
+
+        Long leaseId;
+
+        try {
+            leaseId = this.leaseClient.grant(ttl).get().getID();
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.warn(String.format("Thread {} failed to create lease for {} " +
+                                   "with ttl {}", 
Thread.currentThread().getName(),
+                                   lockName, ttl),
+                     e);
+            return lockResult;
+        }
+
+        lockResult.setLeaseId(leaseId);
+
+        long period = ttl - ttl / 5;
+        service.scheduleAtFixedRate(new KeepAliveTask(this.leaseClient, 
leaseId),
+                                    period, period, TimeUnit.SECONDS);
+
+        try {
+            if (timeout == UNLIMIT_TIMEOUT) {
+                this.lockClient.lock(toByteSequence(lockName), leaseId).get();
+
+            } else {
+                this.lockClient.lock(toByteSequence(lockName), leaseId)
+                               .get(1, TimeUnit.SECONDS);
+            }
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.warn(String.format("Thread {} failed to lock {}",
+                                   Thread.currentThread().getName(), lockName),
+                     e);
+            service.shutdown();
+            this.revokeLease(leaseId);
+            return lockResult;
+        } catch (TimeoutException e) {
+            // 获取锁超时
+            LOG.warn("Thread {} timeout to lock {}",
+                     Thread.currentThread().getName(), lockName);
+            service.shutdown();
+            this.revokeLease(leaseId);
+            return lockResult;
+        }
+
+        lockResult.lockSuccess(true);
+
+        return lockResult;
+    }
+
+    @Override
+    public LockResult lock(String lockName, long ttl) {
+        return tryLock(lockName, ttl, UNLIMIT_TIMEOUT);
+    }
+
+    @Override
+    public void unLock(String lockName, LockResult lockResult) {
+        LOG.debug("Thread {} start to unlock {}",
+                  Thread.currentThread().getName(), lockName);
+
+        lockResult.getService().shutdown();
+
+        if (lockResult.getLeaseId() != 0L) {
+            this.revokeLease(lockResult.getLeaseId());
+        }
+
+        LOG.debug("Thread {} unlock {} successfully",
+                  Thread.currentThread().getName(), lockName);
+    }
+
+    private void revokeLease(long leaseId) {
+        try {
+            this.leaseClient.revoke(leaseId).get();
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.warn(String.format("Thread %s failed to revoke release %s",
+                                   Thread.currentThread().getName(), leaseId), 
e);
+        }
+    }
+
+    public static class KeepAliveTask implements Runnable {
+
+        private final Lease leaseClient;
+        private final long leaseId;
+
+        KeepAliveTask(Lease leaseClient, long leaseId) {
+            this.leaseClient = leaseClient;
+            this.leaseId = leaseId;
+        }
+
+        @Override
+        public void run() {
+            this.leaseClient.keepAliveOnce(this.leaseId);
+        }

Review Comment:
   这里可以统计下调用的时间间隔



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/auth/SchemaDefine.java:
##########
@@ -281,6 +290,19 @@ public abstract static class Relationship extends 
AuthElement {
 
         public abstract Id target();
 
+        public void setId() {
+            this.id(IdGenerator.of(this.source().asString() + "->" +
+                                   this.target().asString()));
+        }
+
+        public static <T extends Relationship> T fromMap(Map<String, Object> 
map, T entity) {

Review Comment:
   fromMap名字有点奇怪,要不要改一下?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryAdapter.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend.query.serializer;
+
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hugegraph.backend.query.Condition;
+import org.apache.hugegraph.type.define.Directions;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.reflect.TypeToken;
+
+public class QueryAdapter extends AbstractSerializerAdapter<Condition> {
+
+    static HashMap<String, Type> cls;
+
+    static {
+        cls = new HashMap() {{

Review Comment:
   可以用immutableMap嘛?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/query/serializer/QueryIdAdapter.java:
##########
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend.query.serializer;
+
+import java.lang.reflect.Type;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hugegraph.backend.id.EdgeId;
+import org.apache.hugegraph.backend.id.Id;
+import org.apache.hugegraph.backend.id.IdGenerator;
+import org.apache.hugegraph.backend.serializer.BinaryBackendEntry;
+
+public class QueryIdAdapter extends AbstractSerializerAdapter<Id> {
+
+    static HashMap<String, Type> cls;
+
+    static {
+        cls = new HashMap() {{

Review Comment:
   同



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/PdMetaDriver.java:
##########
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta;
+
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.HugeException;
+import org.apache.hugegraph.meta.lock.LockResult;
+import org.apache.hugegraph.meta.lock.PdDistributedLock;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.TTLResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+
+import com.google.common.base.Strings;
+
+public class PdMetaDriver implements MetaDriver {
+
+    KvClient<WatchResponse> client = null;
+    PDClient pdClient = null;
+    private PdDistributedLock lock;
+
+    public PdMetaDriver(String pdPeer) {
+        PDConfig pdConfig = PDConfig.of(pdPeer);
+        this.client = new KvClient<>(pdConfig);
+        this.pdClient = PDClient.create(pdConfig);
+        lock = new PdDistributedLock(this.client);
+    }
+
+    public static void main(String[] args) {
+        PDConfig pdConfig = PDConfig.of("127.0.0.1:8686");
+        KvClient<WatchResponse> client = new KvClient<>(pdConfig);
+        ScanPrefixResponse contents;
+        try {
+            contents = client.scanPrefix("HUGEGRAPH/METRICS");
+            Map<String, String> map = contents.getKvsMap();
+            for (Map.Entry<String, String> entry : map.entrySet()) {
+                System.out.println(entry.getKey() + ":" + entry.getValue());
+            }
+            System.out.println(map.size());
+        } catch (PDException e) {
+            e.printStackTrace();
+        }
+        Date dNow = new Date();
+        System.out.println(dNow);
+        SimpleDateFormat ft = new SimpleDateFormat("HHmmss");
+        System.out.println("当前时间为: " + ft.format(dNow));
+    }
+
+    public PDClient pdClient() {
+        return this.pdClient;
+    }
+
+    @Override
+    public void put(String key, String value) {
+        try {
+            this.client.put(key, value);
+        } catch (PDException e) {
+            throw new HugeException("Failed to put '%s:%s' to pd", e, key, 
value);
+        }
+    }
+
+    @Override
+    public String get(String key) {
+        try {
+            KResponse response = this.client.get(key);
+            return response.getValue();
+        } catch (PDException e) {
+            throw new HugeException("Failed to get '%s' from pd", e, key);
+        }
+    }
+
+    @Override
+    public void delete(String key) {
+        try {
+            this.client.delete(key);
+        } catch (PDException e) {
+            throw new HugeException("Failed to delete '%s' from pd", e, key);
+        }
+    }
+
+    @Override
+    public void deleteWithPrefix(String prefix) {
+        try {
+            this.client.deletePrefix(prefix);
+        } catch (PDException e) {
+            throw new HugeException("Failed to deleteWithPrefix '%s' from pd", 
e, prefix);
+        }
+    }
+
+    @Override
+    public Map<String, String> scanWithPrefix(String prefix) {
+        try {
+            ScanPrefixResponse response = this.client.scanPrefix(prefix);
+            return response.getKvsMap();
+        } catch (PDException e) {
+            throw new HugeException("Failed to scanWithPrefix '%s' from pd", 
e, prefix);
+        }
+    }
+
+    @Override
+    public <T> void listen(String key, Consumer<T> consumer) {
+        try {
+            this.client.listen(key, (Consumer<WatchResponse>) consumer);
+        } catch (PDException e) {
+            throw new HugeException("Failed to listen '%s' to pd", e, key);
+        }
+    }
+
+    @Override
+    public <T> void listenPrefix(String prefix, Consumer<T> consumer) {
+        try {
+            this.client.listenPrefix(prefix, (Consumer<WatchResponse>) 
consumer);
+        } catch (PDException e) {
+            throw new HugeException("Failed to listenPrefix '%s' to pd", e, 
prefix);
+        }
+    }
+
+    @Override
+    public <T> List<String> extractValuesFromResponse(T response) {
+        List<String> values = new ArrayList<>();
+        WatchResponse res = (WatchResponse) response;
+        for (WatchEvent event : res.getEventsList()) {
+            // Skip if not PUT event
+            if (!event.getType().equals(WatchType.Put)) {
+                return null;
+            }
+            String value = event.getCurrent().getValue();
+            values.add(value);
+        }
+        return values;
+    }
+
+    @Override
+    public <T> Map<String, String> extractKVFromResponse(T response) {
+        Map<String, String> resultMap = new HashMap<>();
+        WatchResponse res = (WatchResponse) response;
+        for (WatchEvent event : res.getEventsList()) {
+            // Skip if not etcd PUT event
+            if (!event.getType().equals(WatchType.Put)) {
+                continue;
+            }
+
+            String key = event.getCurrent().getKey();
+            String value = event.getCurrent().getValue();
+            if (Strings.isNullOrEmpty(key)) {
+                continue;
+            }
+            resultMap.put(key, value);
+        }
+        return resultMap;
+    }
+
+    @Override
+    public LockResult lock(String key, long ttl) {
+        while (true) {

Review Comment:
   没有次数限制嘛?



##########
hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/meta/lock/DistributedLock.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.meta.lock;
+
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hugegraph.util.Log;
+import org.slf4j.Logger;
+
+import io.etcd.jetcd.ByteSequence;
+import io.etcd.jetcd.Client;
+import io.etcd.jetcd.KV;
+import io.etcd.jetcd.Lease;
+import io.etcd.jetcd.Lock;
+
+public class DistributedLock extends AbstractDistributedLock {
+
+    protected static final Logger LOG = Log.logger(DistributedLock.class);
+    private static final long UNLIMIT_TIMEOUT = -1L;
+    private final static Object mutex = new Object();
+    private static DistributedLock lockProvider = null;
+    private final KV kvClient;
+    private final Lock lockClient;
+    private final Lease leaseClient;
+
+    private DistributedLock(Client client) {
+        this.kvClient = client.getKVClient();
+        this.lockClient = client.getLockClient();
+        this.leaseClient = client.getLeaseClient();
+    }
+
+    public static DistributedLock getInstance(Client client) {
+        synchronized (mutex) {
+            if (null == lockProvider) {
+                lockProvider = new DistributedLock(client);
+            }
+        }
+        return lockProvider;
+    }
+
+    private static ByteSequence toByteSequence(String content) {
+        return ByteSequence.from(content, Charset.defaultCharset());
+    }
+
+    public LockResult tryLock(String lockName, long ttl, long timeout) {
+        LockResult lockResult = new LockResult();
+        ScheduledExecutorService service =
+                Executors.newSingleThreadScheduledExecutor();

Review Comment:
   这个是不是可以放全局队列来使用,没看错的话,这里的意思是没次lock就开一个线程来keep alive



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to