MJY-HUST commented on code in PR #2645:
URL: https://github.com/apache/brpc/pull/2645#discussion_r1612006692


##########
src/bthread/key.cpp:
##########
@@ -204,14 +205,54 @@ class BAIDU_CACHELINE_ALIGNMENT KeyTable {
     SubKeyTable* _subs[KEY_1STLEVEL_SIZE];
 };
 
+class KeyTableList {
+public:
+    KeyTableList() {
+        keytable = NULL;
+    }
+    ~KeyTableList() {
+        bthread::TaskGroup* const g = bthread::tls_task_group;
+        bthread::KeyTable* old_kt = bthread::tls_bls.keytable;
+        while (keytable) {
+            bthread::KeyTable* kt = keytable;
+            keytable = kt->next;
+            bthread::tls_bls.keytable = kt;
+            if (g) {
+                g->current_task()->local_storage.keytable = kt;
+            }
+            delete kt;
+            if (old_kt == kt) {
+                old_kt = NULL;
+            }
+        }
+        bthread::tls_bls.keytable = old_kt;
+        if(g) {
+            g->current_task()->local_storage.keytable = old_kt;
+        }
+    }
+    KeyTable* keytable;
+};
+
 static KeyTable* borrow_keytable(bthread_keytable_pool_t* pool) {
-    if (pool != NULL && pool->free_keytables) {
-        BAIDU_SCOPED_LOCK(pool->mutex);
-        KeyTable* p = (KeyTable*)pool->free_keytables;
+    if (pool != NULL && (pool->list->get()->keytable || pool->free_keytables)) 
{
+        pthread_rwlock_rdlock(&pool->rwlock);
+        KeyTable* p = pool->list->get()->keytable;
         if (p) {
-            pool->free_keytables = p->next;
+            pool->list->get()->keytable = p->next;
+            pthread_rwlock_unlock(&pool->rwlock);
             return p;
         }
+        pthread_rwlock_unlock(&pool->rwlock);

Review Comment:
   1. 
只有当pool存在且全局链表不为空或者tls链表不为空时,才可以borrow,否则直接范围NULL,避免加锁。然后再在if内部来加锁处理两种取keytable的场景。
   2. 不加读锁的话,当bthread_keytable_pool_destroy加写锁销毁tls链表时,从tls list 中borrow 
keytable可能会产生冲突。所以需要加上读锁,正常情况下,不调用bthread_keytable_pool_destroy时,都加读锁也不会产生太大的竞争。



##########
src/bthread/types.h:
##########
@@ -83,8 +83,17 @@ inline std::ostream& operator<<(std::ostream& os, 
bthread_key_t key) {
 }
 #endif  // __cplusplus
 
+namespace bthread{
+class KeyTableList;
+}
+
+namespace butil {
+template <typename T> class ThreadLocal;
+}
+
 typedef struct {
-    pthread_mutex_t mutex;
+    pthread_rwlock_t rwlock;
+    butil::ThreadLocal<bthread::KeyTableList>* list;
     void* free_keytables;

Review Comment:
   模板类不指定类型使用void*话,在butil::ThreadLocal的DefaultDtor函数,delete 
static_cast<T*>(ptr);就会是delete void*类型的,好像是非法的?



##########
src/bthread/key.cpp:
##########
@@ -226,14 +267,15 @@ void return_keytable(bthread_keytable_pool_t* pool, 
KeyTable* kt) {
         delete kt;
         return;
     }
-    std::unique_lock<pthread_mutex_t> mu(pool->mutex);
+    pthread_rwlock_rdlock(&pool->rwlock);
     if (pool->destroyed) {
-        mu.unlock();
+        pthread_rwlock_unlock(&pool->rwlock);
         delete kt;
         return;
     }
-    kt->next = (KeyTable*)pool->free_keytables;
-    pool->free_keytables = kt;
+    kt->next = pool->list->get()->keytable;
+    pool->list->get()->keytable = kt;
+    pthread_rwlock_unlock(&pool->rwlock);
 }
 
 static void cleanup_pthread(void* arg) {

Review Comment:
   只有brpc_woker thread才会存在tls 
list,即https://github.com/apache/brpc/blob/master/src/bthread/key.cpp#L468
   g!=null是才会调用borrow_keytable,才会初始化Tthread_Local TLS 
list。所以我理解是不需要加的。这一块应该是纯pthread调用bthread代码才会走到的。



##########
src/bthread/key.cpp:
##########
@@ -226,14 +267,15 @@ void return_keytable(bthread_keytable_pool_t* pool, 
KeyTable* kt) {
         delete kt;
         return;
     }
-    std::unique_lock<pthread_mutex_t> mu(pool->mutex);
+    pthread_rwlock_rdlock(&pool->rwlock);
     if (pool->destroyed) {
-        mu.unlock();
+        pthread_rwlock_unlock(&pool->rwlock);
         delete kt;
         return;
     }
-    kt->next = (KeyTable*)pool->free_keytables;
-    pool->free_keytables = kt;
+    kt->next = pool->list->get()->keytable;
+    pool->list->get()->keytable = kt;
+    pthread_rwlock_unlock(&pool->rwlock);

Review Comment:
   1. 回答同上
   2. 可以增加上限,超过后加锁批量return回全局链表中



##########
src/bthread/key.cpp:
##########
@@ -204,14 +205,54 @@ class BAIDU_CACHELINE_ALIGNMENT KeyTable {
     SubKeyTable* _subs[KEY_1STLEVEL_SIZE];
 };
 
+class KeyTableList {
+public:
+    KeyTableList() {
+        keytable = NULL;
+    }
+    ~KeyTableList() {
+        bthread::TaskGroup* const g = bthread::tls_task_group;
+        bthread::KeyTable* old_kt = bthread::tls_bls.keytable;
+        while (keytable) {
+            bthread::KeyTable* kt = keytable;
+            keytable = kt->next;
+            bthread::tls_bls.keytable = kt;
+            if (g) {
+                g->current_task()->local_storage.keytable = kt;
+            }

Review Comment:
   这块是将原有bthread_keytable_pool_destroy方法中的删除逻辑移过来了
   https://github.com/apache/brpc/blob/master/src/bthread/key.cpp#L302-L311
   目的我理解是在删除keytable 
list时,kt中注册的key的析构函数中可能会包含访问另一个bthread_local的情况也可能会存在bthread_mutex导致bthread迁移到另外的线程上,因此在析构一个keytable时,要将这个keytable先赋值给tls_bls.keytable和task_meta中的keytable。



##########
src/bthread/key.cpp:
##########
@@ -330,11 +358,12 @@ int 
bthread_keytable_pool_getstat(bthread_keytable_pool_t* pool,
         LOG(ERROR) << "Param[pool] or Param[stat] is NULL";
         return EINVAL;
     }
-    std::unique_lock<pthread_mutex_t> mu(pool->mutex);
+    pthread_rwlock_wrlock(&pool->rwlock);
     size_t count = 0;
     bthread::KeyTable* p = (bthread::KeyTable*)pool->free_keytables;
     for (; p; p = p->next, ++count) {}
     stat->nfree = count;
+    pthread_rwlock_unlock(&pool->rwlock);

Review Comment:
   是的



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@brpc.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@brpc.apache.org
For additional commands, e-mail: dev-h...@brpc.apache.org

Reply via email to