HAWQ-529. Allocate resource for udf in resource negotiator.

Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/8e38e844
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/8e38e844
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/8e38e844

Branch: refs/heads/HAWQ-459
Commit: 8e38e844f0d2a1c45f7d5cd25a8f5b639c8b6fb7
Parents: 31eb645
Author: hubertzhang <[email protected]>
Authored: Wed Mar 16 10:51:07 2016 +0800
Committer: hubertzhang <[email protected]>
Committed: Wed Mar 16 10:51:07 2016 +0800

----------------------------------------------------------------------
 src/backend/cdb/cdbdatalocality.c    | 194 ++++++++++++++++++++----------
 src/backend/executor/spi.c           |  72 +++++++++--
 src/backend/optimizer/plan/planner.c |  57 ++++++++-
 src/backend/optimizer/util/clauses.c |  11 +-
 src/backend/optimizer/util/walkers.c |   1 +
 src/include/cdb/cdbdatalocality.h    |  19 ++-
 src/include/executor/spi.h           |   7 ++
 src/include/optimizer/planner.h      |   4 +
 8 files changed, 289 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/backend/cdb/cdbdatalocality.c
----------------------------------------------------------------------
diff --git a/src/backend/cdb/cdbdatalocality.c 
b/src/backend/cdb/cdbdatalocality.c
index c1f829b..d15d6b0 100644
--- a/src/backend/cdb/cdbdatalocality.c
+++ b/src/backend/cdb/cdbdatalocality.c
@@ -33,6 +33,9 @@
 #include "access/filesplit.h"
 #include "access/parquetsegfiles.h"
 #include "catalog/catalog.h"
+#include "catalog/catquery.h"
+#include "catalog/pg_inherits.h"
+#include "catalog/pg_proc.h"
 #include "cdb/cdbdatalocality.h"
 #include "cdb/cdbutil.h"
 #include "cdb/cdbvars.h"
@@ -41,6 +44,7 @@
 #include "utils/tqual.h"
 #include "utils/memutils.h"
 #include "executor/execdesc.h"
+#include "executor/spi.h"
 #include "nodes/nodes.h"
 #include "nodes/parsenodes.h"
 #include "optimizer/walkers.h"
@@ -54,11 +58,7 @@
 #include "catalog/pg_proc.h"
 #include "postgres.h"
 #include "resourcemanager/utils/hashtable.h"
-#include "catalog/pg_inherits.h"
-//#include "utils/misc/guc.c"
 
-#define PRONAME 1
-#define PROISAGG 5
 /* We need to build a mapping from host name to host index */
 
 extern bool            optimizer; /* Enable the optimizer */
@@ -372,7 +372,7 @@ static Block_Host_Index * update_data_dist_stat(
 static HostDataVolumeInfo *search_host_in_stat_context(
                split_to_segment_mapping_context *context, char *hostname);
 
-static bool IsAggFunction(char* funcName);
+static bool IsBuildInFunction(Oid funcOid);
 
 static bool allocate_hash_relation(Relation_Data* rel_data,
                Assignment_Log_Context *log_context, TargetSegmentIDMap* idMap,
@@ -630,39 +630,36 @@ static void collect_range_tables(Query *query, List* 
full_range_table,
 /*
  *
  */
-static bool IsAggFunction(char* funcName) {
-       if (funcName == NULL) {
-               return false;
-       }
-       Relation pg_proc_rel;
-       TupleDesc pg_proc_dsc;
-       HeapTuple tuple;
-       SysScanDesc pg_proc_scan;
-
-       pg_proc_rel = heap_open(ProcedureRelationId, AccessShareLock);
-       pg_proc_dsc = RelationGetDescr(pg_proc_rel);
-       ScanKeyData skey;
+static bool IsBuildInFunction(Oid foid) {
 
-       ScanKeyInit(&skey, PRONAME, BTEqualStrategyNumber,
-       F_NAMEEQ, CStringGetDatum(funcName));
-
-       pg_proc_scan = systable_beginscan(pg_proc_rel, InvalidOid, FALSE,
-                       ActiveSnapshot, 1, &skey);
-       while (HeapTupleIsValid(tuple = systable_getnext(pg_proc_scan))) {
+       cqContext  *pcqCtx;
+       HeapTuple readtup = NULL;
+       HeapTuple       procedureTuple;
+       Form_pg_proc procedureStruct;
 
-               bool isAgg = DatumGetBool(fastgetattr(tuple, PROISAGG, 
pg_proc_dsc, NULL));
-               systable_endscan(pg_proc_scan);
-               heap_close(pg_proc_rel, AccessShareLock);
-               if (isAgg) {
-                       return true;
-               } else {
-                       return false;
-               }
+       /*
+        * get the procedure tuple corresponding to the given function Oid
+        */
+       pcqCtx = caql_beginscan(
+                       NULL,
+                       cql("SELECT * FROM pg_proc "
+                               " WHERE oid = :1 ",
+                               ObjectIdGetDatum(foid)));
+
+       procedureTuple = caql_getnext(pcqCtx);
+
+       if (!HeapTupleIsValid(procedureTuple))
+               elog(ERROR, "cache lookup failed for function %u", foid);
+       procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple);
+       caql_endscan(pcqCtx);
+       /* we treat proc namespace = 11 to build in function.*/
+       if (procedureStruct->pronamespace == 11) {
+               return true;
+       } else {
+               return false;
        }
-       systable_endscan(pg_proc_scan);
-       heap_close(pg_proc_rel, AccessShareLock);
-       return true;
 }
+
 /*
  *
  */
@@ -676,25 +673,6 @@ static void 
convert_range_tables_to_oids_and_check_table_functions(List **range_
        foreach(old_lc, *range_tables)
        {
                RangeTblEntry *entry = (RangeTblEntry *) lfirst(old_lc);
-               if (entry->rtekind == RTE_FUNCTION || entry->rtekind == 
RTE_TABLEFUNCTION) {
-                       *isTableFunctionExists = true;
-               }
-               if (entry->rtekind == RTE_SUBQUERY) {
-                       Query* subQuery = entry->subquery;
-                       ListCell *lc;
-                       foreach(lc, subQuery->targetList)
-                       {
-                               TargetEntry *te = (TargetEntry *) lfirst(lc);
-                               bool isAggFunc = IsAggFunction(te->resname);
-                               // if target list of subquery contains non 
aggregate function,
-                               // then we consider the query contains and use 
default_segment_num guc
-                               // as the number of virtual segment
-                               if (!isAggFunc) {
-                                       *isTableFunctionExists = true;
-                               }
-                       }
-
-               }
                if (entry->rtekind != RTE_RELATION) {
                        continue;
                }
@@ -3958,11 +3936,54 @@ static void cleanup_allocation_algorithm(
 }
 
 /*
+ * udf_collector_walker: the routine to file udfs.
+ */
+bool udf_collector_walker(Node *node,
+               udf_collector_context *context) {
+       if (node == NULL) {
+               return false;
+       }
+
+       if (IsA(node, Query)) {
+               return query_tree_walker((Query *) node, udf_collector_walker,
+                               (void *) context,
+                               QTW_EXAMINE_RTES);
+       }
+
+       /*For Aggref, we don't consider it as udf.*/
+
+       if(IsA(node,FuncExpr)){
+               if(!IsBuildInFunction(((FuncExpr *) node)->funcid)){
+                       context->udf_exist = true;
+               }
+               return false;
+       }
+
+       return expression_tree_walker(node, udf_collector_walker,
+                       (void *) context);
+
+       return false;
+}
+
+/*
+ * find_udf: collect all udf, and store them into the udf_collector_context.
+ */
+void find_udf(Query *query, udf_collector_context *context) {
+
+       query_tree_walker(query, udf_collector_walker, (void *) context,
+       QTW_EXAMINE_RTES);
+
+       return;
+}
+
+
+/*
  * calculate_planner_segment_num
+ * fixedVsegNum is used by PBE, since all the execute should use the same 
number of vsegs.
  */
 SplitAllocResult *
 calculate_planner_segment_num(Query *query, QueryResourceLife resourceLife,
-               List *fullRangeTable, GpPolicy *intoPolicy, int sliceNum) {
+               List *fullRangeTable, GpPolicy *intoPolicy, int sliceNum, int 
fixedVsegNum) {
        SplitAllocResult *result = NULL;
        QueryResource *resource = NULL;
        QueryResourceParameters *resource_parameters = NULL;
@@ -3971,14 +3992,14 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
        List *alloc_result = NIL;
        split_to_segment_mapping_context context;
 
-       int planner_segments = -1; /*virtual segments number for explain 
statement */
+       int planner_segments = 0; /*virtual segments number for explain 
statement */
 
        result = (SplitAllocResult *) palloc(sizeof(SplitAllocResult));
        result->resource = NULL;
        result->resource_parameters = NULL;
        result->alloc_results = NIL;
        result->relsType = NIL;
-       result->planner_segments = -1;
+       result->planner_segments = 0;
        result->datalocalityInfo = makeStringInfo();
 
        /* fake data locality */
@@ -3995,7 +4016,7 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
                result->resource_parameters = NULL;
                result->alloc_results = NIL;
                result->relsType = NIL;
-               result->planner_segments = -1;
+               result->planner_segments = 0;
                return result;
        }
 
@@ -4020,6 +4041,11 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
                 * 5 data size of random "from" relation
                 */
 
+               udf_collector_context udf_context;
+               udf_context.udf_exist = false;
+
+               find_udf(query, &udf_context);
+               isTableFunctionExists = udf_context.udf_exist;
                /*convert range table list to oid list and check whether table 
function exists
                 *we keep a full range table list and a range table list 
without result relation separately
                 */
@@ -4046,7 +4072,15 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
 
                /*use inherit resource*/
                if (resourceLife == QRL_INHERIT) {
-                       resource = AllocateResource(resourceLife, sliceNum, 0, 
0, 0, NULL, 0);
+
+                       if ( SPI_IsInPrepare() && (GetActiveQueryResource() == 
NULL) )
+                       {
+                               resource = NULL;
+                       }
+                       else
+                       {
+                               resource = AllocateResource(resourceLife, 
sliceNum, 0, 0, 0, NULL, 0);
+                       }
 
                        saveQueryResourceParameters(
                                                        resource_parameters,  
/* resource_parameters */
@@ -4197,6 +4231,11 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
                                maxTargetSegmentNumber = 
enforce_virtual_segment_number;
                                minTargetSegmentNumber = 
enforce_virtual_segment_number;
                        }
+                       /* in PBE mode, the execute should use the same vseg 
number. */
+                       if(fixedVsegNum > 0 ){
+                               maxTargetSegmentNumber = fixedVsegNum;
+                               minTargetSegmentNumber = fixedVsegNum;
+                       }
                        uint64_t before_rm_allocate_resource = 
gettime_microsec();
 
                        /* cost is use by RM to balance workload between hosts. 
the cost is at least one block size*/
@@ -4204,9 +4243,40 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
                        mincost <<= 20;
                        int64 queryCost = context.total_size < mincost ? 
mincost : context.total_size;
                        if (QRL_NONE != resourceLife) {
-                               resource = AllocateResource(QRL_ONCE, sliceNum, 
queryCost,
-                                               maxTargetSegmentNumber, 
minTargetSegmentNumber,
-                                               
context.host_context.hostnameVolInfos, context.host_context.size);
+
+                               if (SPI_IsInPrepare())
+                               {
+                                       resource = NULL;
+                                       /*
+                                        * prepare need to get resource quota 
from RM
+                                        * and pass quota(planner_segments) to 
Orca or Planner to generate plan
+                                        * the following executes(in PBE) 
should reallocate the same number
+                                        * of resources.
+                                        */
+                                       uint32 seg_num;
+                                       uint32 seg_num_min;
+                                       uint32 seg_memory_mb;
+                                       double seg_core;
+
+                                       GetResourceQuota(maxTargetSegmentNumber,
+                                                        minTargetSegmentNumber,
+                                                        &seg_num,
+                                                        &seg_num_min,
+                                                        &seg_memory_mb,
+                                                        &seg_core);
+
+                                       planner_segments = seg_num;
+                                       minTargetSegmentNumber = 
planner_segments;
+                                       maxTargetSegmentNumber = 
planner_segments;
+                               }
+                               else
+                               {
+                                       resource = AllocateResource(QRL_ONCE, 
sliceNum, queryCost,
+                                                                   
maxTargetSegmentNumber,
+                                                                   
minTargetSegmentNumber,
+                                                                   
context.host_context.hostnameVolInfos,
+                                                                   
context.host_context.size);
+                               }
 
                                saveQueryResourceParameters(
                                                                
resource_parameters,                   /* resource_parameters */
@@ -4236,7 +4306,7 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
 
                        if (resource == NULL) {
                                result->resource = NULL;
-                               result->resource_parameters = NULL;
+                               result->resource_parameters = 
resource_parameters;
                                result->alloc_results = NIL;
                                result->relsType = NIL;
                                result->planner_segments = planner_segments;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/backend/executor/spi.c
----------------------------------------------------------------------
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 07812aa..8e7645c 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -46,6 +46,7 @@
 #include "cdb/memquota.h"
 #include "executor/nodeFunctionscan.h"
 #include "nodes/stack.h"
+#include "cdb/cdbdatalocality.h"
 
 extern char *savedSeqServerHost;
 extern int savedSeqServerPort;
@@ -74,6 +75,7 @@ static int    _SPI_curid = -1;
 
 static PGconn *_QD_conn = NULL; /* To call back to the QD for SQL execution */
 static char *_QD_currently_prepared_stmt = NULL;
+static int SPI_prepare_depth = 0;
 
 static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan);
 
@@ -106,6 +108,32 @@ static bool _SPI_checktuples(void);
 
 /* =================== interface functions =================== */
 
+bool SPI_IsInPrepare(void)
+{
+       if (SPI_prepare_depth > 0)
+       {
+               return true;
+       }
+       else if (SPI_prepare_depth < 0)
+       {
+               elog(ERROR, "Invalid SPI_prepare_depth %d while getting SPI 
prepare depth",
+                           SPI_prepare_depth);
+       }
+
+       return false;
+}
+
+void SPI_IncreasePrepareDepth(void)
+{
+       SPI_prepare_depth++;
+}
+
+void SPI_DecreasePrepareDepth(void)
+{
+       SPI_prepare_depth--;
+}
+
+
 int
 SPI_connect(void)
 {
@@ -566,6 +594,8 @@ SPI_prepare(const char *src, int nargs, Oid *argtypes)
        _SPI_plan       plan;
        _SPI_plan  *result;
 
+       SPI_IncreasePrepareDepth();
+
        if (src == NULL || nargs < 0 || (nargs > 0 && argtypes == NULL))
        {
                SPI_result = SPI_ERROR_ARGUMENT;
@@ -591,9 +621,13 @@ SPI_prepare(const char *src, int nargs, Oid *argtypes)
 
                /* copy plan to procedure context */
                result = _SPI_copy_plan(&plan, _SPI_CPLAN_PROCXT);
+
+               SPI_DecreasePrepareDepth();
        }
        PG_CATCH();
        {
+               SPI_DecreasePrepareDepth();
+
                _SPI_end_call(true);
                PG_RE_THROW();
        }
@@ -1819,13 +1853,37 @@ _SPI_execute_plan(_SPI_plan * plan, Datum *Values, 
const char *Nulls,
                                             (stmt->resource == NULL) &&
                                             (stmt->resource_parameters != 
NULL) )
                                        {
-                                               stmt->resource = 
AllocateResource(stmt->resource_parameters->life,
-                                                                       
stmt->resource_parameters->slice_size,
-                                                                       
stmt->resource_parameters->iobytes,
-                                                                       
stmt->resource_parameters->max_target_segment_num,
-                                                                       
stmt->resource_parameters->min_target_segment_num,
-                                                                       
stmt->resource_parameters->vol_info,
-                                                                       
stmt->resource_parameters->vol_info_size);
+                                               SplitAllocResult *allocResult = 
NULL;
+
+                                               /* If this is a parallel plan. 
*/
+                                               if (stmt->planTree->dispatch == 
DISPATCH_PARALLEL)
+                                               {
+                                                       /*
+                                                        * Now, we want to 
allocate resource.
+                                                        */
+                                                       allocResult = 
calculate_planner_segment_num(queryTree,
+                                                                               
                    stmt->resource_parameters->life,
+                                                                               
                    stmt->rtable,
+                                                                               
                    stmt->intoPolicy,
+                                                                               
                    stmt->nMotionNodes + stmt->nInitPlans + 1,
+                                                                               
                    stmt->resource_parameters->min_target_segment_num);
+
+                                                       Assert(allocResult);
+
+                                                       if(stmt->resource 
!=NULL)
+                                                       {
+                                                               
pfree(stmt->resource);
+                                                       }
+                                                       stmt->resource = 
allocResult->resource;
+                                                       
if(stmt->scantable_splits !=NULL)
+                                                       {
+                                                               
list_free_deep(stmt->scantable_splits);
+                                                       }
+                                                       stmt->scantable_splits 
= allocResult->alloc_results;
+                                                       stmt->planner_segments 
= allocResult->planner_segments;
+                                                       stmt->datalocalityInfo 
= allocResult->datalocalityInfo;
+                                                       pfree(allocResult);
+                                               }
                                        }
 
                                        originalStmt->resource = NULL;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/backend/optimizer/plan/planner.c
----------------------------------------------------------------------
diff --git a/src/backend/optimizer/plan/planner.c 
b/src/backend/optimizer/plan/planner.c
index be057e0..c6be59a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -91,6 +91,8 @@ planner_hook_type planner_hook = NULL;
 
 ParamListInfo PlannerBoundParamList = NULL;            /* current boundParams 
*/
 
+static int PlanningDepth = 0;          /* Planning depth */
+
 /* Expression kind codes for preprocess_expression */
 #define EXPRKIND_QUAL                  0
 #define EXPRKIND_TARGET                        1
@@ -165,6 +167,32 @@ static void sort_canonical_gs_list(List *gs, int *p_nsets, 
Bitmapset ***p_sets);
 static Plan *pushdown_preliminary_limit(Plan *plan, Node *limitCount, int64 
count_est, Node *limitOffset, int64 offset_est);
 bool is_dummy_plan(Plan *plan);
 
+
+bool is_in_planning_phase(void)
+{
+       if (PlanningDepth > 0)
+       {
+               return true;
+       }
+       else if (PlanningDepth < 0)
+       {
+               elog(ERROR, "Invalid PlanningDepth %d while getting planning 
phase", PlanningDepth);
+       }
+
+       return false;
+}
+
+void increase_planning_depth(void)
+{
+       PlanningDepth++;
+}
+
+void decrease_planning_depth(void)
+{
+       PlanningDepth--;
+}
+
+
 #ifdef USE_ORCA
 /**
  * Logging of optimization outcome
@@ -285,7 +313,7 @@ planner(Query *parse, int cursorOptions,
        PlannedStmt *result = NULL;
        instr_time      starttime, endtime;
        ResourceNegotiatorResult *ppResult = (ResourceNegotiatorResult *) 
palloc(sizeof(ResourceNegotiatorResult));
-       SplitAllocResult initResult = {NULL, NULL, NIL, -1, NIL, NULL};
+       SplitAllocResult initResult = {NULL, NULL, NIL, 0, NIL, NULL};
        ppResult->saResult = initResult;
        ppResult->stmt = NULL;
        static int plannerLevel = 0;
@@ -300,6 +328,8 @@ planner(Query *parse, int cursorOptions,
         * resource to run this query. After gaining the resource, we can 
perform the
         * actual optimization.
         */
+       increase_planning_depth();
+
        plannerLevel++;
        if (!resourceNegotiateDone)
        {
@@ -309,6 +339,8 @@ planner(Query *parse, int cursorOptions,
       {
         resource_negotiator(parse, cursorOptions, boundParams, resourceLife, 
&ppResult);
 
+               decrease_planning_depth();
+
                if(ppResult->stmt && ppResult->stmt->planTree)
                {
                        isDispatchParallel = ppResult->stmt->planTree->dispatch 
== DISPATCH_PARALLEL;
@@ -318,6 +350,8 @@ planner(Query *parse, int cursorOptions,
          }
          PG_CATCH();
          {
+               decrease_planning_depth();
+
                if ((ppResult != NULL))
                {
                  pfree(ppResult);
@@ -430,7 +464,7 @@ planner(Query *parse, int cursorOptions,
          }
          else
          {
-           gp_segments_for_planner = -1;
+           gp_segments_for_planner = 0;
          }
          SetActiveQueryResource(savedQueryResource);
          if ((ppResult != NULL))
@@ -450,7 +484,7 @@ planner(Query *parse, int cursorOptions,
                }
                else
                {
-                       gp_segments_for_planner = -1;
+                       gp_segments_for_planner = 0;
                }
                SetActiveQueryResource(savedQueryResource);
 
@@ -470,6 +504,7 @@ planner(Query *parse, int cursorOptions,
        return result;
 }
 
+
 /*
  * The new framework for HAWQ 2.0 query optimizer
  */
@@ -480,6 +515,8 @@ resource_negotiator(Query *parse, int cursorOptions, 
ParamListInfo boundParams,
   PlannedStmt *plannedstmt = NULL;
   do
   {
+               udf_collector_context udf_context;
+               udf_context.udf_exist = false;
     SplitAllocResult *allocResult = NULL;
     Query *my_parse = copyObject(parse);
     ParamListInfo my_boundParams = copyParamList(boundParams);
@@ -495,12 +532,24 @@ resource_negotiator(Query *parse, int cursorOptions, 
ParamListInfo boundParams,
        */
       allocResult = calculate_planner_segment_num(my_parse, resourceLife,
                                           plannedstmt->rtable, 
plannedstmt->intoPolicy,
-                                          plannedstmt->nMotionNodes + 
plannedstmt->nInitPlans + 1);
+                                          plannedstmt->nMotionNodes + 
plannedstmt->nInitPlans + 1,
+                                          -1);
 
       Assert(allocResult);
 
       (*result)->saResult = *allocResult;
       pfree(allocResult);
+    }else{
+               find_udf(my_parse, &udf_context);
+               if(udf_context.udf_exist){
+                       if ((resourceLife == QRL_ONCE) || (resourceLife == 
QRL_NONE)) {
+                               int64 mincost = min_cost_for_each_query;
+                               mincost <<= 20;
+                               int avgSliceNum = 3;
+                               (*result)->saResult.resource = 
AllocateResource(QRL_ONCE, avgSliceNum, mincost,
+                                               
GetUserDefinedFunctionVsegNum(),GetUserDefinedFunctionVsegNum(),NULL, 0);
+                       }
+               }
     }
   } while (0);
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/backend/optimizer/util/clauses.c
----------------------------------------------------------------------
diff --git a/src/backend/optimizer/util/clauses.c 
b/src/backend/optimizer/util/clauses.c
index a80737f..a3d67f2 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -2831,8 +2831,15 @@ simplify_function(Oid funcid, Oid result_type, List 
*args,
        if (!HeapTupleIsValid(func_tuple))
                elog(ERROR, "cache lookup failed for function %u", funcid);
 
-       newexpr = evaluate_function(funcid, result_type, args,
-                                                               func_tuple, 
context);
+       if (is_in_planning_phase())
+       {
+               newexpr = NULL;
+       }
+       else
+       {
+               newexpr = evaluate_function(funcid, result_type, args,
+                                           func_tuple, context);
+       }
 
        if (large_const(newexpr, context->max_size))
        {

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/backend/optimizer/util/walkers.c
----------------------------------------------------------------------
diff --git a/src/backend/optimizer/util/walkers.c 
b/src/backend/optimizer/util/walkers.c
index 6f1ee51..a8222fb 100644
--- a/src/backend/optimizer/util/walkers.c
+++ b/src/backend/optimizer/util/walkers.c
@@ -162,6 +162,7 @@ expression_tree_walker(Node *node,
                case T_PartBoundExpr:
                case T_PartBoundInclusionExpr:
                case T_PartBoundOpenExpr:
+               case T_RangeTblEntry    :
                        /* primitive node types with no expression subnodes */
                        break;
                case T_Aggref:

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/include/cdb/cdbdatalocality.h
----------------------------------------------------------------------
diff --git a/src/include/cdb/cdbdatalocality.h 
b/src/include/cdb/cdbdatalocality.h
index 95f2bd5..67375d7 100644
--- a/src/include/cdb/cdbdatalocality.h
+++ b/src/include/cdb/cdbdatalocality.h
@@ -49,6 +49,13 @@ typedef struct SplitAllocResult
 } SplitAllocResult;
 
 /*
+ * structure containing all relation range table entries.
+ */
+typedef struct udf_collector_context {
+       bool udf_exist;
+} udf_collector_context;
+
+/*
  * structure containing rel and type when execution
  */
 typedef struct CurrentRelType {
@@ -86,7 +93,17 @@ void saveQueryResourceParameters(
  * we calculate the appropriate planner segment_num.
  */
 SplitAllocResult * calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
-                                                List *rtable, GpPolicy 
*intoPolicy, int sliceNum);
+                                                List *rtable, GpPolicy 
*intoPolicy, int sliceNum, int fixedVsegNum);
+
+/*
+ * udf_collector_walker: the routine to file udfs.
+ */
+bool udf_collector_walker(Node *node,  udf_collector_context *context);
+
+/*
+ * find_udf: collect all udf, and store them into the udf_collector_context.
+ */
+void find_udf(Query *query, udf_collector_context *context);
 
 FILE *fp;
 FILE *fpaoseg;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/include/executor/spi.h
----------------------------------------------------------------------
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index 867daf5..7b4ba1b 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -155,4 +155,11 @@ extern uint64 SPI_GetMemoryReservation(void);
 extern void SPI_ReserveMemory(uint64 mem_reserved);
 extern bool SPI_IsMemoryReserved(void);
 
+/**
+ * Query resource related routines.
+ */
+extern bool SPI_IsInPrepare(void);
+extern void SPI_IncreasePrepareCounter(void);
+extern void SPI_DecreasePrepareCounter(void);
+
 #endif   /* SPI_H */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8e38e844/src/include/optimizer/planner.h
----------------------------------------------------------------------
diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h
index 48de348..cc2eb7d 100644
--- a/src/include/optimizer/planner.h
+++ b/src/include/optimizer/planner.h
@@ -50,4 +50,8 @@ extern bool choose_hashed_grouping(PlannerInfo *root,
                                                                   double 
dNumGroups, 
                                                                   
AggClauseCounts *agg_counts);
 
+extern bool is_in_planning_phase(void);
+extern void increase_planning_depth(void);
+extern void decrease_planning_depth(void);
+
 #endif   /* PLANNER_H */

Reply via email to