This is an automated email from the ASF dual-hosted git repository.

jgemignani pushed a commit to branch PG16
in repository https://gitbox.apache.org/repos/asf/age.git


The following commit(s) were added to refs/heads/PG16 by this push:
     new e04bab01 Initial PG16 version (#1237)
e04bab01 is described below

commit e04bab01acf4b84c4686eb5b5734b2b65929f55c
Author: Shoaib <[email protected]>
AuthorDate: Tue Dec 12 20:57:38 2023 +0100

    Initial PG16 version (#1237)
    
    Fixed empty string handling.
    
    Previously, the PG 16 outToken emitted NULL for an empty string, but now it 
emits an empty string "". Consequently, our cypher read function required 
modification to properly decode the empty string as a plain token, thereby 
addressing the comparison issue.
    
    Compared the branches and added the necessary changes so that the query's 
rteperminfos variable doesn't stay NULL.
    
    Fix missing include varatt.h (causing undefined symbol VARDATA_ANY) & 
Fixing some test cases of the failings
    
    Added missing include varatt.h to fix the undefined symbol while loading 
age into postgresql because usage of VARDATA_ANY needs to import varatt.h in 
PG16
    
    Modified initialisation of ResultRelInfo and removed unnecessary RTEs
    
    Compared the branches and added the necessary changes so that the query's 
rteperminfos variable doesn't stay NULL.
    
    Modified initialisation of ResultRelInfo and removed unnecessary RTEs
    
    One of the problems that we were facing was related to the ResultRelInfo 
pointing at the wrong RTE via its ri_RangeTableIndex. The 
create_entity_result_rel_info() function does not have the capability of 
setting the ri_RootResultRelInfo to the correct ResultRelInfo node because it 
does not have access to the ModifyTableState node. The solution for this was to 
set the third argument in InitResultRelInfo() to be zero instead of 
list_length(estate->es_range_table).
    
    In the update_entity_tuple() function, when we call table_tuple_update() 
and assign the returned value to the result variable, the buffer variable 
receives the value of 0.
    Made a workaround so that the original value isn't lost.
    
    This is a work in progress for the new field that was added to the struct 
Var called varnullingrels. According to the documentation, this field is 
responsible for marking the Vars as nullable, if they are coming from a JOIN, 
either LEFT JOIN, RIGHT JOIN, or FULL OUTER JOIN. The changes were made 
following an "optional match" clause which is being treated as a LEFT JOIN from 
our extension.
    
    A function markRelsAsNulledBy is added because its internal in Postgres and 
doesn't belong in a header file, therefore it can't be exported. This function 
is added before the creation of the Vars from the make_vertex_expr and 
make_edge_expr, to correctly mark the specific PNSI as nullable, so later in 
the planner stage, the Vars will be correctly nulled.
    
    Fix incorrect typecasting in agtype_to_graphid function.
    
    Fix incorrect returns to the fuction _label_name, _ag_build_vertex and 
_ag_build_edge.
    Contributors
    
    Panagiotis Foliadis <[email protected]>
    Matheus Farias <[email protected]>
    Mohamed Mokhtar <[email protected]>
    Hannan Aamir <[email protected]>
    John Gemignani <[email protected]>
    Muhammad Taha Naveed <[email protected]>
    Wendel de Lana <[email protected]>
    ---------
---
 .github/workflows/go-driver.yml       | 12 ++++----
 .github/workflows/installcheck.yaml   | 30 +++++++++----------
 .github/workflows/jdbc-driver.yaml    | 12 ++++----
 .github/workflows/nodejs-driver.yaml  | 12 ++++----
 .github/workflows/python-driver.yaml  | 12 ++++----
 .gitignore                            |  1 +
 src/backend/catalog/ag_catalog.c      |  7 +++++
 src/backend/catalog/ag_graph.c        |  4 +--
 src/backend/catalog/ag_label.c        | 17 ++++++-----
 src/backend/commands/label_commands.c |  5 ++--
 src/backend/executor/cypher_create.c  |  4 +--
 src/backend/executor/cypher_merge.c   |  5 ++--
 src/backend/executor/cypher_set.c     |  7 +++--
 src/backend/executor/cypher_utils.c   |  6 ++--
 src/backend/nodes/cypher_readfuncs.c  |  2 +-
 src/backend/parser/cypher_analyze.c   |  1 +
 src/backend/parser/cypher_clause.c    | 50 ++++++++++++++++++-------------
 src/backend/parser/cypher_expr.c      |  2 +-
 src/backend/parser/cypher_item.c      | 30 +++++++++++--------
 src/backend/parser/cypher_parse_agg.c |  8 ++---
 src/backend/utils/adt/agtype.c        | 56 +++++++++++++++++++++--------------
 src/backend/utils/adt/agtype_gin.c    |  1 +
 src/backend/utils/adt/agtype_ops.c    |  2 +-
 src/backend/utils/adt/agtype_parser.c |  5 +++-
 src/backend/utils/ag_func.c           | 12 ++++----
 src/backend/utils/cache/ag_cache.c    | 12 ++++----
 src/backend/utils/graph_generation.c  | 10 +++----
 27 files changed, 183 insertions(+), 142 deletions(-)

diff --git a/.github/workflows/go-driver.yml b/.github/workflows/go-driver.yml
index adbacc93..10b1abaa 100644
--- a/.github/workflows/go-driver.yml
+++ b/.github/workflows/go-driver.yml
@@ -2,10 +2,10 @@ name: Go Driver Tests
 
 on:
   push:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
   pull_request:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
 jobs:
   build:
@@ -26,14 +26,14 @@ jobs:
         if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
           if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
           if [[ "$GITHUB_BASE_REF" == "master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         fi
 
diff --git a/.github/workflows/installcheck.yaml 
b/.github/workflows/installcheck.yaml
index 3709c974..a975aeb4 100644
--- a/.github/workflows/installcheck.yaml
+++ b/.github/workflows/installcheck.yaml
@@ -2,32 +2,32 @@ name: Build / Regression
 
 on:
   push:
-    branches: [ 'master', 'PG15' ]
+    branches: [ 'master', 'PG16' ]
   pull_request:
-    branches: [ 'master', 'PG15' ]
+    branches: [ 'master', 'PG16' ]
 
 jobs:
   build:
     runs-on: ubuntu-latest
 
     steps:
-      - name: Get latest commit id of PostgreSQL 15
+      - name: Get latest commit id of PostgreSQL 16
         run: |
-          echo "PG_COMMIT_HASH=$(git ls-remote 
git://git.postgresql.org/git/postgresql.git refs/heads/REL_15_STABLE | awk 
'{print $1}')" >> $GITHUB_ENV
+          echo "PG_COMMIT_HASH=$(git ls-remote 
git://git.postgresql.org/git/postgresql.git refs/heads/REL_16_STABLE | awk 
'{print $1}')" >> $GITHUB_ENV
 
-      - name: Cache PostgreSQL 15
+      - name: Cache PostgreSQL 16
         uses: actions/cache@v3
-        id: pg15cache
+        id: pg16cache
         with:
-          path: ~/pg15
-          key: ${{ runner.os }}-v1-pg15-${{ env.PG_COMMIT_HASH }}
+          path: ~/pg16
+          key: ${{ runner.os }}-v1-pg16-${{ env.PG_COMMIT_HASH }}
 
-      - name: Install PostgreSQL 15
-        if: steps.pg15cache.outputs.cache-hit != 'true'
+      - name: Install PostgreSQL 16
+        if: steps.pg16cache.outputs.cache-hit != 'true'
         run: |
-          git clone --depth 1 --branch REL_15_STABLE 
git://git.postgresql.org/git/postgresql.git ~/pg15source
-          cd ~/pg15source
-          ./configure --prefix=$HOME/pg15 CFLAGS="-std=gnu99 -ggdb -O0" 
--enable-cassert
+          git clone --depth 1 --branch REL_16_STABLE 
git://git.postgresql.org/git/postgresql.git ~/pg16source
+          cd ~/pg16source
+          ./configure --prefix=$HOME/pg16 CFLAGS="-std=gnu99 -ggdb -O0" 
--enable-cassert
           make install -j$(nproc) > /dev/null
 
       - uses: actions/checkout@v3
@@ -35,12 +35,12 @@ jobs:
       - name: Build
         id: build
         run: |
-          make PG_CONFIG=$HOME/pg15/bin/pg_config install -j$(nproc)
+          make PG_CONFIG=$HOME/pg16/bin/pg_config install -j$(nproc)
 
       - name: Regression tests
         id: regression_tests
         run: |
-          make PG_CONFIG=$HOME/pg15/bin/pg_config installcheck
+          make PG_CONFIG=$HOME/pg16/bin/pg_config installcheck
         continue-on-error: true
 
       - name: Dump regression test errors
diff --git a/.github/workflows/jdbc-driver.yaml 
b/.github/workflows/jdbc-driver.yaml
index 7dda51a4..81d2558a 100644
--- a/.github/workflows/jdbc-driver.yaml
+++ b/.github/workflows/jdbc-driver.yaml
@@ -2,10 +2,10 @@ name: JDBC Driver Tests
 
 on:
   push:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
   pull_request:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
 jobs:
   build:
@@ -28,14 +28,14 @@ jobs:
         if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
           if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
           if [[ "$GITHUB_BASE_REF" == "master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         fi
 
diff --git a/.github/workflows/nodejs-driver.yaml 
b/.github/workflows/nodejs-driver.yaml
index 36356a7c..bc926e6f 100644
--- a/.github/workflows/nodejs-driver.yaml
+++ b/.github/workflows/nodejs-driver.yaml
@@ -2,10 +2,10 @@ name: Nodejs Driver Tests
 
 on:
   push:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
   pull_request:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
 jobs:
   build:
@@ -23,14 +23,14 @@ jobs:
         if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
           if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
           if [[ "$GITHUB_BASE_REF" == "master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         fi
 
diff --git a/.github/workflows/python-driver.yaml 
b/.github/workflows/python-driver.yaml
index 9a7f3559..3e7f8ee5 100644
--- a/.github/workflows/python-driver.yaml
+++ b/.github/workflows/python-driver.yaml
@@ -2,10 +2,10 @@ name: Python Driver Tests
 
 on:
   push:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
   pull_request:
-    branches: [ "master", "PG15" ]
+    branches: [ "master", "PG16" ]
 
 jobs:
   build:
@@ -23,14 +23,14 @@ jobs:
         if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then
           if [[ "$GITHUB_REF" == "refs/heads/master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_REF" == "refs/heads/PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_REF" == "refs/heads/PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
           if [[ "$GITHUB_BASE_REF" == "master" ]]; then
             echo "TAG=latest" >> $GITHUB_ENV
-          elif [[ "$GITHUB_BASE_REF" == "PG15" ]]; then
-            echo "TAG=PG15_latest" >> $GITHUB_ENV
+          elif [[ "$GITHUB_BASE_REF" == "PG16" ]]; then
+            echo "TAG=PG16_latest" >> $GITHUB_ENV
           fi
         fi
 
diff --git a/.gitignore b/.gitignore
index 7a75f8cb..d159e2fa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@ age--*.*.*.sql
 .DS_Store
 *.tokens
 *.interp
+*.dylib
diff --git a/src/backend/catalog/ag_catalog.c b/src/backend/catalog/ag_catalog.c
index 60a576a9..ab747ae6 100644
--- a/src/backend/catalog/ag_catalog.c
+++ b/src/backend/catalog/ag_catalog.c
@@ -97,8 +97,15 @@ void ag_ProcessUtility_hook(PlannedStmt *pstmt, const char 
*queryString, bool re
         (*prev_process_utility_hook) (pstmt, queryString, readOnlyTree, 
context, params,
                                       queryEnv, dest, qc);
     else
+    {
+        Assert(IsA(pstmt, PlannedStmt));
+        Assert(pstmt->commandType == CMD_UTILITY);
+        Assert(queryString != NULL);   /* required as of 8.4 */
+        Assert(qc == NULL || qc->commandTag == CMDTAG_UNKNOWN);
         standard_ProcessUtility(pstmt, queryString, readOnlyTree, context, 
params, queryEnv,
                                 dest, qc);
+    }
+        
 }
 
 static void drop_age_extension(DropStmt *stmt)
diff --git a/src/backend/catalog/ag_graph.c b/src/backend/catalog/ag_graph.c
index f4a0d721..7594eea5 100644
--- a/src/backend/catalog/ag_graph.c
+++ b/src/backend/catalog/ag_graph.c
@@ -49,8 +49,8 @@ void insert_graph(const Name graph_name, const Oid nsp_id)
     HeapTuple tuple;
 
 
-    AssertArg(graph_name);
-    AssertArg(OidIsValid(nsp_id));
+    Assert(graph_name);
+    Assert(OidIsValid(nsp_id));
 
     ag_graph = table_open(ag_graph_relation_id(), RowExclusiveLock);
     values[Anum_ag_graph_oid - 1] = ObjectIdGetDatum(nsp_id);
diff --git a/src/backend/catalog/ag_label.c b/src/backend/catalog/ag_label.c
index 9cd89235..09fba102 100644
--- a/src/backend/catalog/ag_label.c
+++ b/src/backend/catalog/ag_label.c
@@ -63,12 +63,12 @@ void insert_label(const char *label_name, Oid graph_oid, 
int32 label_id,
      * NOTE: Is it better to make use of label_id and label_kind domain types
      *       than to use assert to check label_id and label_kind are valid?
      */
-    AssertArg(label_name);
-    AssertArg(label_id_is_valid(label_id));
-    AssertArg(label_kind == LABEL_KIND_VERTEX ||
+    Assert(label_name);
+    Assert(label_id_is_valid(label_id));
+    Assert(label_kind == LABEL_KIND_VERTEX ||
               label_kind == LABEL_KIND_EDGE);
-    AssertArg(OidIsValid(label_relation));
-    AssertArg(seq_name);
+    Assert(OidIsValid(label_relation));
+    Assert(seq_name);
 
     ag_label = table_open(ag_label_relation_id(), RowExclusiveLock);
 
@@ -188,8 +188,9 @@ Datum _label_name(PG_FUNCTION_ARGS)
     uint32 label_id;
 
     if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
-        ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
-                        errmsg("graph_oid and label_id must not be null")));
+        PG_RETURN_NULL(); 
+        //ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+        //                errmsg("graph_oid and label_id must not be null")));
 
     graph = PG_GETARG_OID(0);
 
@@ -241,7 +242,7 @@ Datum _extract_label_id(PG_FUNCTION_ARGS)
     }
     graph_oid = AG_GETARG_GRAPHID(0);
 
-    PG_RETURN_INT32(get_graphid_label_id(graph_oid));
+    PG_RETURN_INT64(get_graphid_label_id(graph_oid));
 }
 
 bool label_id_exists(Oid graph_oid, int32 label_id)
diff --git a/src/backend/commands/label_commands.c 
b/src/backend/commands/label_commands.c
index 0f6396e3..a3a9768c 100644
--- a/src/backend/commands/label_commands.c
+++ b/src/backend/commands/label_commands.c
@@ -808,7 +808,7 @@ static void remove_relation(List *qname)
     Oid rel_oid;
     ObjectAddress address;
 
-    AssertArg(list_length(qname) == 2);
+    Assert(list_length(qname) == 2);
 
     // concurrent is false so lockmode is AccessExclusiveLock
 
@@ -868,8 +868,7 @@ static void range_var_callback_for_remove_relation(const 
RangeVar *rel,
 
     // relkind == expected_relkind
 
-    if (!pg_class_ownercheck(rel_oid, GetUserId()) &&
-        !pg_namespace_ownercheck(get_rel_namespace(rel_oid), GetUserId()))
+    if (!object_ownercheck(rel_oid, get_rel_namespace(rel_oid), GetUserId()))
     {
         aclcheck_error(ACLCHECK_NOT_OWNER,
                        get_relkind_objtype(get_rel_relkind(rel_oid)),
diff --git a/src/backend/executor/cypher_create.c 
b/src/backend/executor/cypher_create.c
index be7825bf..bd2b228e 100644
--- a/src/backend/executor/cypher_create.c
+++ b/src/backend/executor/cypher_create.c
@@ -438,7 +438,7 @@ static void create_edge(cypher_create_custom_scan_state 
*css,
 
         result = make_edge(
             id, start_id, end_id, CStringGetDatum(node->label_name),
-            PointerGetDatum(scanTupleSlot->tts_values[node->prop_attr_num]));
+            scanTupleSlot->tts_values[node->prop_attr_num]);
 
         if (CYPHER_TARGET_NODE_IN_PATH(node->flags))
         {
@@ -528,7 +528,7 @@ static Datum create_vertex(cypher_create_custom_scan_state 
*css,
 
             // make the vertex agtype
             result = make_vertex(id, CStringGetDatum(node->label_name),
-                
PointerGetDatum(scanTupleSlot->tts_values[node->prop_attr_num]));
+                scanTupleSlot->tts_values[node->prop_attr_num]);
 
             // append to the path list
             if (CYPHER_TARGET_NODE_IN_PATH(node->flags))
diff --git a/src/backend/executor/cypher_merge.c 
b/src/backend/executor/cypher_merge.c
index dbe98851..cece1eb8 100644
--- a/src/backend/executor/cypher_merge.c
+++ b/src/backend/executor/cypher_merge.c
@@ -472,8 +472,9 @@ static TupleTableSlot *exec_cypher_merge(CustomScanState 
*node)
              * So we will need to create a TupleTableSlot and populate with the
              * information from the newly created path that the query needs.
              */
-            ExprContext *econtext = node->ss.ps.ps_ExprContext;
-            SubqueryScanState *sss = (SubqueryScanState *)node->ss.ps.lefttree;
+            SubqueryScanState *sss = NULL;
+            econtext = node->ss.ps.ps_ExprContext;
+            sss = (SubqueryScanState *)node->ss.ps.lefttree;
 
             /*
              * Our child execution node is always a subquery. If not there
diff --git a/src/backend/executor/cypher_set.c 
b/src/backend/executor/cypher_set.c
index 4f941bf5..fdfceda1 100644
--- a/src/backend/executor/cypher_set.c
+++ b/src/backend/executor/cypher_set.c
@@ -111,7 +111,7 @@ static HeapTuple update_entity_tuple(ResultRelInfo 
*resultRelInfo,
     TM_FailureData hufd;
     TM_Result lock_result;
     Buffer buffer;
-    bool update_indexes;
+    TU_UpdateIndexes update_indexes;
     TM_Result   result;
     CommandId cid = GetCurrentCommandId(true);
     ResultRelInfo **saved_resultRels = estate->es_result_relations;
@@ -167,9 +167,10 @@ static HeapTuple update_entity_tuple(ResultRelInfo 
*resultRelInfo,
         }
 
         // Insert index entries for the tuple
-        if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
+        if (resultRelInfo->ri_NumIndices > 0 && update_indexes != TU_None)
         {
-          ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, false, 
false, NULL, NIL);
+          ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, false, 
false, NULL, NIL,
+                                (update_indexes == TU_Summarizing));
         }
 
         ExecCloseIndices(resultRelInfo);
diff --git a/src/backend/executor/cypher_utils.c 
b/src/backend/executor/cypher_utils.c
index 0bf3430f..baccc028 100644
--- a/src/backend/executor/cypher_utils.c
+++ b/src/backend/executor/cypher_utils.c
@@ -77,7 +77,7 @@ ResultRelInfo *create_entity_result_rel_info(EState *estate, 
char *graph_name,
 
     // initialize the resultRelInfo
     InitResultRelInfo(resultRelInfo, label_relation,
-                      list_length(estate->es_range_table), NULL,
+                      0, NULL,
                       estate->es_instrument);
 
     // open the parse state
@@ -254,8 +254,8 @@ HeapTuple insert_entity_tuple_cid(ResultRelInfo 
*resultRelInfo,
     // Insert index entries for the tuple
     if (resultRelInfo->ri_NumIndices > 0)
     {
-        ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate, false,
-                              false, NULL, NIL);
+        ExecInsertIndexTuples(resultRelInfo, elemTupleSlot, estate,
+                              false, false, NULL, NIL, false);
     }
 
     return tuple;
diff --git a/src/backend/nodes/cypher_readfuncs.c 
b/src/backend/nodes/cypher_readfuncs.c
index 89cedd57..1aa763d5 100644
--- a/src/backend/nodes/cypher_readfuncs.c
+++ b/src/backend/nodes/cypher_readfuncs.c
@@ -166,7 +166,7 @@
         ((length) == 0 ? NULL : debackslash(token, length))
 
 #define non_nullable_string(token,length)  \
-        ((length) == 0 ? "" : debackslash(token, length))
+        ((length == 2 && token[0] == '"' && token[1] == '"') ? "" : 
debackslash(token, length))
 
 /*
  * Default read function for cypher nodes. For most nodes, we don't expect
diff --git a/src/backend/parser/cypher_analyze.c 
b/src/backend/parser/cypher_analyze.c
index 9f4836cb..5c0b445e 100644
--- a/src/backend/parser/cypher_analyze.c
+++ b/src/backend/parser/cypher_analyze.c
@@ -868,6 +868,7 @@ static Query *analyze_cypher_and_coerce(List *stmt, 
RangeTblFunction *rtfunc,
     }
 
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
 
     assign_query_collations(pstate, query);
diff --git a/src/backend/parser/cypher_clause.c 
b/src/backend/parser/cypher_clause.c
index cb8f5a17..c4201be2 100644
--- a/src/backend/parser/cypher_clause.c
+++ b/src/backend/parser/cypher_clause.c
@@ -639,6 +639,7 @@ static Query *transform_cypher_union(cypher_parsestate 
*cpstate,
                                               EXPR_KIND_LIMIT, "LIMIT");
 
     qry->rtable = pstate->p_rtable;
+    qry->rteperminfos = pstate->p_rteperminfos;
     qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
     qry->hasAggs = pstate->p_hasAggs;
 
@@ -1235,6 +1236,7 @@ static Query 
*transform_cypher_call_subquery(cypher_parsestate *cpstate,
     markTargetListOrigins(pstate, query->targetList);
 
     query->rtable = cpstate->pstate.p_rtable;
+    query->rteperminfos = cpstate->pstate.p_rteperminfos;
     query->jointree = makeFromExpr(cpstate->pstate.p_joinlist, (Node 
*)where_qual);
     query->hasAggs = pstate->p_hasAggs;
 
@@ -1307,6 +1309,7 @@ static Query *transform_cypher_delete(cypher_parsestate 
*cpstate,
     query->targetList = lappend(query->targetList, tle);
 
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
 
     return query;
@@ -1384,6 +1387,7 @@ static Query *transform_cypher_unwind(cypher_parsestate 
*cpstate,
 
     query->targetList = lappend(query->targetList, te);
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
     query->hasTargetSRFs = pstate->p_hasTargetSRFs;
 
@@ -1526,6 +1530,7 @@ static Query *transform_cypher_set(cypher_parsestate 
*cpstate,
     query->targetList = lappend(query->targetList, tle);
 
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
 
     return query;
@@ -2121,6 +2126,7 @@ static Query *transform_cypher_return(cypher_parsestate 
*cpstate,
                                                EXPR_KIND_LIMIT, "LIMIT");
 
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
     query->hasAggs = pstate->p_hasAggs;
 
@@ -2344,6 +2350,7 @@ static Query 
*transform_cypher_clause_with_where(cypher_parsestate *cpstate,
         markTargetListOrigins(pstate, query->targetList);
 
         query->rtable = pstate->p_rtable;
+        query->rteperminfos = pstate->p_rteperminfos;
 
         if (!is_ag_node(self, cypher_match))
         {
@@ -2593,6 +2600,7 @@ static Query 
*transform_cypher_match_pattern(cypher_parsestate *cpstate,
 
         query->targetList = make_target_list_from_join(pstate, rte);
         query->rtable = pstate->p_rtable;
+        query->rteperminfos = pstate->p_rteperminfos;
         query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
     }
     else
@@ -2647,7 +2655,7 @@ static List *make_target_list_from_join(ParseState 
*pstate, RangeTblEntry *rte)
     ListCell *lt;
     ListCell *ln;
 
-    AssertArg(rte->rtekind == RTE_JOIN);
+    Assert(rte->rtekind == RTE_JOIN);
 
     forboth(lt, rte->joinaliasvars, ln, rte->eref->colnames)
     {
@@ -2680,7 +2688,7 @@ static List *makeTargetListFromPNSItem(ParseState 
*pstate, ParseNamespaceItem *p
     rte = pnsi->p_rte;
 
     /* right now this is only for subqueries */
-    AssertArg(rte->rtekind == RTE_SUBQUERY);
+    Assert(rte->rtekind == RTE_SUBQUERY);
 
     rtindex = pnsi->p_rtindex;
 
@@ -2759,6 +2767,7 @@ static Query 
*transform_cypher_sub_pattern(cypher_parsestate *cpstate,
     markTargetListOrigins(p_child_parse_state, qry->targetList);
 
     qry->rtable = p_child_parse_state->p_rtable;
+    qry->rteperminfos = p_child_parse_state->p_rteperminfos;
     qry->jointree = makeFromExpr(p_child_parse_state->p_joinlist, NULL);
 
     /* the state will be destroyed so copy the data we need */
@@ -3045,6 +3054,7 @@ static void transform_match_pattern(cypher_parsestate 
*cpstate, Query *query,
     }
 
     query->rtable = cpstate->pstate.p_rtable;
+    query->rteperminfos = cpstate->pstate.p_rteperminfos;
     query->jointree = makeFromExpr(cpstate->pstate.p_joinlist, (Node *)expr);
 }
 
@@ -3213,7 +3223,7 @@ static List 
*make_join_condition_for_edge(cypher_parsestate *cpstate,
             prev_edge != NULL &&
             prev_edge->type == ENT_VLE_EDGE)
         {
-            List *qualified_name, *args;
+            List *qualified_name;
             String *match_qual;
             FuncCall *fc;
 
@@ -5293,6 +5303,7 @@ static Query *transform_cypher_create(cypher_parsestate 
*cpstate,
     query->targetList = lappend(query->targetList, tle);
 
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
 
     return query;
@@ -5444,7 +5455,7 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, 
List **target_list,
     Expr *props;
     Relation label_relation;
     RangeVar *rv;
-    RangeTblEntry *rte;
+    RTEPermissionInfo *rte_pi;
     TargetEntry *te;
     char *alias;
     AttrNumber resno;
@@ -5519,7 +5530,6 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, 
List **target_list,
     if (!label_exists(edge->label, cpstate->graph_oid))
     {
         List *parent;
-        RangeVar *rv;
 
         rv = get_label_range_var(cpstate->graph_name, cpstate->graph_oid,
                                  AG_DEFAULT_LABEL_EDGE);
@@ -5539,8 +5549,9 @@ transform_create_cypher_edge(cypher_parsestate *cpstate, 
List **target_list,
 
     pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
                                         AccessShareLock, NULL, false, false);
-    rte = pnsi->p_rte;
-    rte->requiredPerms = ACL_INSERT;
+
+    rte_pi = pnsi->p_perminfo;
+    rte_pi->requiredPerms = ACL_INSERT;
 
     // Build Id expression, always use the default logic
     rel->id_expr = (Expr *)build_column_default(label_relation,
@@ -5759,7 +5770,7 @@ transform_create_cypher_new_node(cypher_parsestate 
*cpstate,
     cypher_target_node *rel = make_ag_node(cypher_target_node);
     Relation label_relation;
     RangeVar *rv;
-    RangeTblEntry *rte;
+    RTEPermissionInfo *rte_pi;
     TargetEntry *te;
     Expr *props;
     char *alias;
@@ -5789,7 +5800,6 @@ transform_create_cypher_new_node(cypher_parsestate 
*cpstate,
     if (!label_exists(node->label, cpstate->graph_oid))
     {
         List *parent;
-        RangeVar *rv;
 
         rv = get_label_range_var(cpstate->graph_name, cpstate->graph_oid,
                                  AG_DEFAULT_LABEL_VERTEX);
@@ -5810,8 +5820,9 @@ transform_create_cypher_new_node(cypher_parsestate 
*cpstate,
 
     pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
                                         AccessShareLock, NULL, false, false);
-    rte = pnsi->p_rte;
-    rte->requiredPerms = ACL_INSERT;
+
+    rte_pi = pnsi->p_perminfo;
+    rte_pi->requiredPerms = ACL_INSERT;
 
     // id
     rel->id_expr = (Expr *)build_column_default(label_relation,
@@ -6292,6 +6303,7 @@ static Query *transform_cypher_merge(cypher_parsestate 
*cpstate,
     markTargetListOrigins(pstate, query->targetList);
 
     query->rtable = pstate->p_rtable;
+    query->rteperminfos = pstate->p_rteperminfos;
     query->jointree = makeFromExpr(pstate->p_joinlist, NULL);
 
     query->hasSubLinks = pstate->p_hasSubLinks;
@@ -6777,7 +6789,7 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, 
List **target_list,
     cypher_target_node *rel = make_ag_node(cypher_target_node);
     Relation label_relation;
     RangeVar *rv;
-    RangeTblEntry *rte;
+    RTEPermissionInfo *rte_pi;
     ParseNamespaceItem *pnsi;
 
     if (edge->name != NULL)
@@ -6825,8 +6837,6 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, 
List **target_list,
     if (edge->label && !label_exists(edge->label, cpstate->graph_oid))
     {
         List *parent;
-        RangeVar *rv;
-
         /*
          * setup the default edge table as the parent table, that we
          * will inherit from.
@@ -6865,8 +6875,8 @@ transform_merge_cypher_edge(cypher_parsestate *cpstate, 
List **target_list,
 
     pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
                                          AccessShareLock, NULL, false, false);
-    rte = pnsi->p_rte;
-    rte->requiredPerms = ACL_INSERT;
+    rte_pi = pnsi->p_perminfo;
+    rte_pi->requiredPerms = ACL_INSERT;
 
     // Build Id expression, always use the default logic
     rel->id_expr = (Expr *)build_column_default(label_relation,
@@ -6892,7 +6902,7 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, 
List **target_list,
     cypher_target_node *rel = make_ag_node(cypher_target_node);
     Relation label_relation;
     RangeVar *rv;
-    RangeTblEntry *rte;
+    RTEPermissionInfo *rte_pi;
     ParseNamespaceItem *pnsi;
 
     if (node->name != NULL)
@@ -6944,7 +6954,6 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, 
List **target_list,
     if (node->label && !label_exists(node->label, cpstate->graph_oid))
     {
         List *parent;
-        RangeVar *rv;
 
         /*
          * setup the default vertex table as the parent table, that we
@@ -6985,8 +6994,9 @@ transform_merge_cypher_node(cypher_parsestate *cpstate, 
List **target_list,
 
     pnsi = addRangeTableEntryForRelation((ParseState *)cpstate, label_relation,
                                          AccessShareLock, NULL, false, false);
-    rte = pnsi->p_rte;
-    rte->requiredPerms = ACL_INSERT;
+
+    rte_pi = pnsi->p_perminfo;
+    rte_pi->requiredPerms = ACL_INSERT;
 
     // id
     rel->id_expr = (Expr *)build_column_default(label_relation,
diff --git a/src/backend/parser/cypher_expr.c b/src/backend/parser/cypher_expr.c
index 0efd4486..d3ced711 100644
--- a/src/backend/parser/cypher_expr.c
+++ b/src/backend/parser/cypher_expr.c
@@ -249,7 +249,7 @@ static Node *transform_A_Const(cypher_parsestate *cpstate, 
A_Const *ac)
             }
             else
             {
-                float8 f = float8in_internal(n, NULL, "double precision", n);
+                float8 f = float8in_internal(n, NULL, "double precision", n, 
NULL);
 
                 d = float_to_agtype(f);
             }
diff --git a/src/backend/parser/cypher_item.c b/src/backend/parser/cypher_item.c
index 7d0ad88b..6e489fc5 100644
--- a/src/backend/parser/cypher_item.c
+++ b/src/backend/parser/cypher_item.c
@@ -39,8 +39,8 @@
 #include "parser/cypher_parse_node.h"
 
 static List *ExpandAllTables(ParseState *pstate, int location);
-static List *expand_rel_attrs(ParseState *pstate, RangeTblEntry *rte,
-                              int rtindex, int sublevels_up, int location);
+static List *expand_pnsi_attrs(ParseState *pstate, ParseNamespaceItem *pnsi,
+                              int sublevels_up, bool require_col_privs, int 
location);
 
 // see transformTargetEntry()
 TargetEntry *transform_cypher_item(cypher_parsestate *cpstate, Node *node,
@@ -161,10 +161,10 @@ static List *ExpandAllTables(ParseState *pstate, int 
location)
         /* Remember we found a p_cols_visible item */
         found_table = true;
 
-        target = list_concat(target, expand_rel_attrs(pstate,
-                                                      nsitem->p_rte,
-                                                      nsitem->p_rtindex,
-                                                      0, location));
+        target = list_concat(target, expand_pnsi_attrs(pstate,
+                                                      nsitem,
+                                                      0,
+                                                      true, location));
     }
 
     /* Check for "RETURN *;" */
@@ -177,26 +177,32 @@ static List *ExpandAllTables(ParseState *pstate, int 
location)
 }
 
 /*
- * From PG's expandRelAttrs
+ * From PG's expandNSItemAttrs
  * Modified to exclude hidden variables and aliases in RETURN *
  */
-static List *expand_rel_attrs(ParseState *pstate, RangeTblEntry *rte,
-                              int rtindex, int sublevels_up, int location)
+static List *expand_pnsi_attrs(ParseState *pstate, ParseNamespaceItem *pnsi,
+                              int sublevels_up, bool require_col_privs, int 
location)
 {
+    RangeTblEntry *rte = pnsi->p_rte;
+    RTEPermissionInfo *perminfo = pnsi->p_perminfo;
     List *names, *vars;
     ListCell *name, *var;
     List *te_list = NIL;
     int var_prefix_len = strlen(AGE_DEFAULT_VARNAME_PREFIX);
     int alias_prefix_len = strlen(AGE_DEFAULT_ALIAS_PREFIX);
-
-    expandRTE(rte, rtindex, sublevels_up, location, false, &names, &vars);
+    
+    vars = expandNSItemVars(pstate, pnsi, sublevels_up, location, &names);
 
     /*
      * Require read access to the table.  This is normally redundant with the
      * markVarForSelectPriv calls below, but not if the table has zero
      * columns.
      */
-    rte->requiredPerms |= ACL_SELECT;
+    if (rte->rtekind == RTE_RELATION)
+     {
+         Assert(perminfo != NULL);
+         perminfo->requiredPerms |= ACL_SELECT;
+     }
 
     /* iterate through the variables */
     forboth(name, names, var, vars)
diff --git a/src/backend/parser/cypher_parse_agg.c 
b/src/backend/parser/cypher_parse_agg.c
index 47075ef7..284a07e6 100644
--- a/src/backend/parser/cypher_parse_agg.c
+++ b/src/backend/parser/cypher_parse_agg.c
@@ -192,7 +192,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry)
         root->planner_cxt = CurrentMemoryContext;
         root->hasJoinRTEs = true;
 
-        groupClauses = (List *) flatten_join_alias_vars((Query*)root,
+        groupClauses = (List *) flatten_join_alias_vars(root, qry,
                                                         (Node *) groupClauses);
     }
 
@@ -236,7 +236,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry)
     finalize_grouping_exprs(clause, pstate, qry, groupClauses, root,
                             have_non_var_grouping);
     if (hasJoinRTEs)
-        clause = flatten_join_alias_vars((Query*)root, clause);
+        clause = flatten_join_alias_vars(root, qry, clause);
     check_ungrouped_columns(clause, pstate, qry, groupClauses,
                             groupClauseCommonVars, have_non_var_grouping,
                             &func_grouped_rels);
@@ -245,7 +245,7 @@ void parse_check_aggregates(ParseState *pstate, Query *qry)
     finalize_grouping_exprs(clause, pstate, qry, groupClauses, root,
                             have_non_var_grouping);
     if (hasJoinRTEs)
-        clause = flatten_join_alias_vars((Query*)root, clause);
+        clause = flatten_join_alias_vars(root, qry, clause);
     check_ungrouped_columns(clause, pstate, qry, groupClauses,
                             groupClauseCommonVars, have_non_var_grouping,
                             &func_grouped_rels);
@@ -562,7 +562,7 @@ static bool finalize_grouping_exprs_walker(Node *node,
                 Index ref = 0;
 
                 if (context->root)
-                    expr = flatten_join_alias_vars((Query*)context->root, 
expr);
+                    expr = flatten_join_alias_vars(context-> root, 
(Query*)context->root, expr);
 
                 /*
                  * Each expression must match a grouping entry at the current
diff --git a/src/backend/utils/adt/agtype.c b/src/backend/utils/adt/agtype.c
index e6539e87..7751f230 100644
--- a/src/backend/utils/adt/agtype.c
+++ b/src/backend/utils/adt/agtype.c
@@ -29,6 +29,8 @@
  */
 
 #include "postgres.h"
+#include "varatt.h"
+#include <math.h>
 
 #include <float.h>
 
@@ -971,7 +973,7 @@ static void agtype_in_scalar(void *pstate, char *token,
         Assert(token != NULL);
         v.type = AGTV_FLOAT;
         v.val.float_value = float8in_internal(token, NULL, "double precision",
-                                              token);
+                                              token, NULL);
         break;
     case AGTYPE_TOKEN_NUMERIC:
         Assert(token != NULL);
@@ -1974,7 +1976,7 @@ Datum _agtype_build_path(PG_FUNCTION_ARGS)
      */
     if (nargs >= 1 && nargs <= 3)
     {
-        int i = 0;
+        i = 0;
 
         for (i = 0; i < nargs; i++)
         {
@@ -2191,9 +2193,12 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS)
     /* handles null */
     if (fcinfo->args[0].isnull)
     {
+        /*
         ereport(ERROR,
                 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                  errmsg("_agtype_build_vertex() graphid cannot be NULL")));
+                 */
+        PG_RETURN_NULL();
     }
 
     if (fcinfo->args[1].isnull)
@@ -2207,7 +2212,7 @@ Datum _agtype_build_vertex(PG_FUNCTION_ARGS)
 
     if (fcinfo->args[2].isnull)
     {
-        agtype_build_state *bstate = init_agtype_build_state(0, AGT_FOBJECT);
+        bstate = init_agtype_build_state(0, AGT_FOBJECT);
         properties = build_agtype(bstate);
         pfree_agtype_build_state(bstate);
     }
@@ -2262,9 +2267,12 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
     /* process graph id */
     if (fcinfo->args[0].isnull)
     {
+        PG_RETURN_NULL();
+        /*
         ereport(ERROR,
                 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                  errmsg("_agtype_build_edge() graphid cannot be NULL")));
+                 */
     }
 
     id = AG_GETARG_GRAPHID(0);
@@ -2303,7 +2311,7 @@ Datum _agtype_build_edge(PG_FUNCTION_ARGS)
     /* if the properties object is null, push an empty object */
     if (fcinfo->args[4].isnull)
     {
-        agtype_build_state *bstate = init_agtype_build_state(0, AGT_FOBJECT);
+        bstate = init_agtype_build_state(0, AGT_FOBJECT);
         properties = build_agtype(bstate);
         pfree_agtype_build_state(bstate);
     }
@@ -2961,7 +2969,7 @@ Datum agtype_to_int2(PG_FUNCTION_ARGS)
 
     PG_FREE_IF_COPY(arg_agt, 0);
 
-    PG_RETURN_INT16(result);
+    PG_RETURN_INT64(result);
 }
 
 PG_FUNCTION_INFO_V1(agtype_to_float8);
@@ -3669,13 +3677,13 @@ Datum agtype_object_field_agtype(PG_FUNCTION_ARGS)
 
     if (key_value->type == AGTV_INTEGER)
     {
-        PG_RETURN_TEXT_P(agtype_array_element_impl(fcinfo, agt,
+        PG_RETURN_TEXT_P((const void*)agtype_array_element_impl(fcinfo, agt,
                                                    key_value->val.int_value,
                                                    false));
     }
     else if (key_value->type == AGTV_STRING)
     {
-        AG_RETURN_AGTYPE_P(agtype_object_field_impl(fcinfo, agt,
+        AG_RETURN_AGTYPE_P((const void*)agtype_object_field_impl(fcinfo, agt,
                                                     key_value->val.string.val,
                                                     key_value->val.string.len,
                                                     false));
@@ -3703,13 +3711,13 @@ Datum agtype_object_field_text_agtype(PG_FUNCTION_ARGS)
 
     if (key_value->type == AGTV_INTEGER)
     {
-        PG_RETURN_TEXT_P(agtype_array_element_impl(fcinfo, agt,
+        PG_RETURN_TEXT_P((const void*)agtype_array_element_impl(fcinfo, agt,
                                                    key_value->val.int_value,
                                                    true));
     }
     else if (key_value->type == AGTV_STRING)
     {
-        AG_RETURN_AGTYPE_P(agtype_object_field_impl(fcinfo, agt,
+        AG_RETURN_AGTYPE_P((const void*)agtype_object_field_impl(fcinfo, agt,
                                                     key_value->val.string.val,
                                                     key_value->val.string.len,
                                                     true));
@@ -3727,7 +3735,7 @@ Datum agtype_object_field(PG_FUNCTION_ARGS)
     agtype *agt = AG_GET_ARG_AGTYPE_P(0);
     text *key = PG_GETARG_TEXT_PP(1);
 
-    AG_RETURN_AGTYPE_P(agtype_object_field_impl(fcinfo, agt, VARDATA_ANY(key),
+    AG_RETURN_AGTYPE_P((const void*)agtype_object_field_impl(fcinfo, agt, 
VARDATA_ANY(key),
                                                 VARSIZE_ANY_EXHDR(key),
                                                 false));
 }
@@ -3739,7 +3747,7 @@ Datum agtype_object_field_text(PG_FUNCTION_ARGS)
     agtype *agt = AG_GET_ARG_AGTYPE_P(0);
     text *key = PG_GETARG_TEXT_PP(1);
 
-    PG_RETURN_TEXT_P(agtype_object_field_impl(fcinfo, agt, VARDATA_ANY(key),
+    PG_RETURN_TEXT_P((const void*)agtype_object_field_impl(fcinfo, agt, 
VARDATA_ANY(key),
                                               VARSIZE_ANY_EXHDR(key), true));
 }
 
@@ -3750,7 +3758,8 @@ Datum agtype_array_element(PG_FUNCTION_ARGS)
     agtype *agt = AG_GET_ARG_AGTYPE_P(0);
     int elem = PG_GETARG_INT32(1);
 
-    AG_RETURN_AGTYPE_P(agtype_array_element_impl(fcinfo, agt, elem, false));
+    AG_RETURN_AGTYPE_P((const void*)
+        agtype_array_element_impl(fcinfo, agt, elem, false));
 }
 
 PG_FUNCTION_INFO_V1(agtype_array_element_text);
@@ -3760,7 +3769,8 @@ Datum agtype_array_element_text(PG_FUNCTION_ARGS)
     agtype *agt = AG_GET_ARG_AGTYPE_P(0);
     int elem = PG_GETARG_INT32(1);
 
-    PG_RETURN_TEXT_P(agtype_array_element_impl(fcinfo, agt, elem, true));
+    PG_RETURN_TEXT_P((const void*)
+        agtype_array_element_impl(fcinfo, agt, elem, true));
 }
 
 PG_FUNCTION_INFO_V1(agtype_access_operator);
@@ -4268,7 +4278,7 @@ Datum agtype_hash_cmp(PG_FUNCTION_ARGS)
     uint64 seed = 0xF0F0F0F0;
 
     if (PG_ARGISNULL(0))
-        PG_RETURN_INT16(0);
+        PG_RETURN_INT64(0);
 
     agt = AG_GET_ARG_AGTYPE_P(0);
 
@@ -4291,7 +4301,7 @@ Datum agtype_hash_cmp(PG_FUNCTION_ARGS)
         seed = LEFT_ROTATE(seed, 1);
     }
 
-    PG_RETURN_INT16(hash);
+    PG_RETURN_INT64(hash);
 }
 
 // Comparison function for btree Indexes
@@ -4312,7 +4322,7 @@ Datum agtype_btree_cmp(PG_FUNCTION_ARGS)
     agtype_lhs = AG_GET_ARG_AGTYPE_P(0);
     agtype_rhs = AG_GET_ARG_AGTYPE_P(1);
 
-    PG_RETURN_INT16(compare_agtype_containers_orderability(&agtype_lhs->root,
+    PG_RETURN_INT64(compare_agtype_containers_orderability(&agtype_lhs->root,
                                                      &agtype_rhs->root));
 }
 
@@ -6014,7 +6024,7 @@ Datum age_tointeger(PG_FUNCTION_ARGS)
              */
             result = float8in_internal_null(string, NULL, "double precision",
                                             string, &is_valid);
-
+ 
             if (*endptr != '\0')
             {
                 float8 f;
@@ -6340,7 +6350,7 @@ PG_FUNCTION_INFO_V1(graphid_to_agtype);
 
 Datum graphid_to_agtype(PG_FUNCTION_ARGS)
 {
-    PG_RETURN_POINTER(integer_to_agtype(AG_GETARG_GRAPHID(0)));
+    PG_RETURN_POINTER((const void *) integer_to_agtype(AG_GETARG_GRAPHID(0)));
 }
 
 PG_FUNCTION_INFO_V1(agtype_to_graphid);
@@ -6356,7 +6366,7 @@ Datum agtype_to_graphid(PG_FUNCTION_ARGS)
 
     PG_FREE_IF_COPY(agtype_in, 0);
 
-    PG_RETURN_INT16(agtv.val.int_value);
+    PG_RETURN_INT64(agtv.val.int_value);
 }
 
 PG_FUNCTION_INFO_V1(age_type);
@@ -10179,7 +10189,7 @@ Datum age_percentile_cont_aggfinalfn(PG_FUNCTION_ARGS)
     if (!tuplesort_skiptuples(pgastate->sortstate, first_row, true))
         elog(ERROR, "missing row in percentile_cont");
 
-    if (!tuplesort_getdatum(pgastate->sortstate, true, &first_val, &isnull, 
NULL))
+    if (!tuplesort_getdatum(pgastate->sortstate, true, false, &first_val, 
&isnull, NULL))
         elog(ERROR, "missing row in percentile_cont");
     if (isnull)
         PG_RETURN_NULL();
@@ -10190,7 +10200,7 @@ Datum age_percentile_cont_aggfinalfn(PG_FUNCTION_ARGS)
     }
     else
     {
-        if (!tuplesort_getdatum(pgastate->sortstate, true, &second_val, 
&isnull, NULL))
+        if (!tuplesort_getdatum(pgastate->sortstate, true, false, &second_val, 
&isnull, NULL))
             elog(ERROR, "missing row in percentile_cont");
 
         if (isnull)
@@ -10256,7 +10266,7 @@ Datum age_percentile_disc_aggfinalfn(PG_FUNCTION_ARGS)
             elog(ERROR, "missing row in percentile_disc");
     }
 
-    if (!tuplesort_getdatum(pgastate->sortstate, true, &val, &isnull, NULL))
+    if (!tuplesort_getdatum(pgastate->sortstate, true, false, &val, &isnull, 
NULL))
         elog(ERROR, "missing row in percentile_disc");
 
     /* We shouldn't have stored any nulls, but do the right thing anyway */
@@ -11363,5 +11373,5 @@ Datum agtype_volatile_wrapper(PG_FUNCTION_ARGS)
     }
 
     /* otherwise, just pass it through */
-    PG_RETURN_POINTER(PG_GETARG_DATUM(0));
+    PG_RETURN_POINTER((const void*) PG_GETARG_DATUM(0));
 }
diff --git a/src/backend/utils/adt/agtype_gin.c 
b/src/backend/utils/adt/agtype_gin.c
index e8964c92..d260fd98 100644
--- a/src/backend/utils/adt/agtype_gin.c
+++ b/src/backend/utils/adt/agtype_gin.c
@@ -27,6 +27,7 @@
  */
 
 #include "postgres.h"
+#include "varatt.h"
 
 #include "access/gin.h"
 #include "access/hash.h"
diff --git a/src/backend/utils/adt/agtype_ops.c 
b/src/backend/utils/adt/agtype_ops.c
index 766b7d89..33a32deb 100644
--- a/src/backend/utils/adt/agtype_ops.c
+++ b/src/backend/utils/adt/agtype_ops.c
@@ -22,7 +22,7 @@
  */
 
 #include "postgres.h"
-
+#include "varatt.h"
 #include <math.h>
 #include <limits.h>
 
diff --git a/src/backend/utils/adt/agtype_parser.c 
b/src/backend/utils/adt/agtype_parser.c
index fe8203aa..0f0a9273 100644
--- a/src/backend/utils/adt/agtype_parser.c
+++ b/src/backend/utils/adt/agtype_parser.c
@@ -30,15 +30,18 @@
  */
 
 #include "postgres.h"
-
+#include "varatt.h"
 #include "catalog/pg_type.h"
 #include "libpq/pqformat.h"
 #include "miscadmin.h"
 #include "utils/date.h"
 #include "utils/datetime.h"
+#include "utils/varlena.h"
 
+#include "utils/agtype.h"
 #include "utils/agtype_parser.h"
 
+
 /*
  * The context of the parser is maintained by the recursive descent
  * mechanism, but is passed explicitly to the error reporting routine
diff --git a/src/backend/utils/ag_func.c b/src/backend/utils/ag_func.c
index 9c1ba020..3040e44b 100644
--- a/src/backend/utils/ag_func.c
+++ b/src/backend/utils/ag_func.c
@@ -41,8 +41,8 @@ bool is_oid_ag_func(Oid func_oid, const char *func_name)
     Oid nspid;
     const char *nspname;
 
-    AssertArg(OidIsValid(func_oid));
-    AssertArg(func_name);
+    Assert(OidIsValid(func_oid));
+    Assert(func_name);
 
     proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_oid));
     Assert(HeapTupleIsValid(proctup));
@@ -70,8 +70,8 @@ Oid get_ag_func_oid(const char *func_name, const int nargs, 
...)
     oidvector *arg_types;
     Oid func_oid;
 
-    AssertArg(func_name);
-    AssertArg(nargs >= 0 && nargs <= FUNC_MAX_ARGS);
+    Assert(func_name);
+    Assert(nargs >= 0 && nargs <= FUNC_MAX_ARGS);
 
     va_start(ap, nargs);
     for (i = 0; i < nargs; i++)
@@ -101,8 +101,8 @@ Oid get_pg_func_oid(const char *func_name, const int nargs, 
...)
     oidvector *arg_types;
     Oid func_oid;
 
-    AssertArg(func_name);
-    AssertArg(nargs >= 0 && nargs <= FUNC_MAX_ARGS);
+    Assert(func_name);
+    Assert(nargs >= 0 && nargs <= FUNC_MAX_ARGS);
 
     va_start(ap, nargs);
     for (i = 0; i < nargs; i++)
diff --git a/src/backend/utils/cache/ag_cache.c 
b/src/backend/utils/cache/ag_cache.c
index 4469618c..92f79584 100644
--- a/src/backend/utils/cache/ag_cache.c
+++ b/src/backend/utils/cache/ag_cache.c
@@ -202,7 +202,7 @@ static int name_hash_compare(const void *key1, const void 
*key2, Size keysize)
     Name name2 = (Name)key2;
 
     // keysize parameter is superfluous here
-    AssertArg(keysize == NAMEDATALEN);
+    Assert(keysize == NAMEDATALEN);
 
     return strncmp(NameStr(*name1), NameStr(*name2), NAMEDATALEN);
 }
@@ -339,7 +339,7 @@ graph_cache_data *search_graph_name_cache(const char *name)
     NameData name_key;
     graph_name_cache_entry *entry;
 
-    AssertArg(name);
+    Assert(name);
 
     initialize_caches();
 
@@ -851,7 +851,7 @@ label_cache_data *search_label_name_graph_cache(const char 
*name, Oid graph)
     NameData name_key;
     label_name_graph_cache_entry *entry;
 
-    AssertArg(name);
+    Assert(name);
 
     initialize_caches();
 
@@ -932,7 +932,7 @@ label_cache_data *search_label_graph_oid_cache(uint32 
graph_oid, int32 id)
 {
     label_graph_oid_cache_entry *entry;
 
-    AssertArg(label_id_is_valid(id));
+    Assert(label_id_is_valid(id));
 
     initialize_caches();
 
@@ -1071,8 +1071,8 @@ label_cache_data *search_label_seq_name_graph_cache(const 
char *name, Oid graph)
     NameData name_key;
     label_seq_name_graph_cache_entry *entry;
 
-    AssertArg(name);
-    AssertArg(OidIsValid(graph));
+    Assert(name);
+    Assert(OidIsValid(graph));
 
     initialize_caches();
 
diff --git a/src/backend/utils/graph_generation.c 
b/src/backend/utils/graph_generation.c
index b27b4951..f84031c7 100644
--- a/src/backend/utils/graph_generation.c
+++ b/src/backend/utils/graph_generation.c
@@ -129,7 +129,7 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
 
     if (!graph_exists(graph_name_str))
     {
-        DirectFunctionCall1(create_graph, CStringGetDatum(graph_name));
+        DirectFunctionCall1(create_graph, CStringGetDatum(graph_name->data));
     }
 
     graph_oid = get_graph_oid(graph_name_str);
@@ -140,16 +140,16 @@ Datum create_complete_graph(PG_FUNCTION_ARGS)
         if (!label_exists(vtx_name_str, graph_oid))
         {
             DirectFunctionCall2(create_vlabel,
-                                CStringGetDatum(graph_name),
-                                CStringGetDatum(vtx_label_name));
+                                CStringGetDatum(graph_name->data),
+                                CStringGetDatum(vtx_label_name->data));
         }
     }
 
     if (!label_exists(edge_name_str, graph_oid))
     {
         DirectFunctionCall2(create_elabel,
-                            CStringGetDatum(graph_name),
-                            CStringGetDatum(edge_label_name));
+                            CStringGetDatum(graph_name->data),
+                            CStringGetDatum(edge_label_name->data));
     }
 
     vtx_label_id = get_label_id(vtx_name_str, graph_oid);

Reply via email to