orhankislal commented on code in PR #581:
URL: https://github.com/apache/madlib/pull/581#discussion_r891722782


##########
src/ports/postgres/modules/mxgboost/madlib_xgboost.py_in:
##########
@@ -0,0 +1,549 @@
+# coding=utf-8
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import plpy
+import re
+import pandas as pd
+import xgboost as xgb
+import numpy
+import cPickle as pickle
+import zlib
+import ast
+import collections
+import itertools
+from bisect import bisect_left
+from operator import itemgetter
+
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import precision_recall_fscore_support
+from sklearn.metrics import confusion_matrix
+from sklearn.metrics import roc_auc_score
+from sklearn.metrics import roc_curve
+
+from utilities.utilities import _assert
+from utilities.utilities import add_postfix
+from utilities.utilities import unique_string
+from utilities.validate_args import get_cols
+from utilities.validate_args import table_exists
+
+def serialize_pandas_dframe_as_bytea(schema_madlib, source_table, id_column,
+                                     class_label, features):
+
+    mdl_train_sql = """
+        SELECT
+            {id_column},
+            {features},
+            {class_label}
+        FROM
+            {source_table}
+    """.format(**locals())
+
+    result = plpy.execute(mdl_train_sql)
+    df = pd.DataFrame.from_records(result)
+    df_filtered = df.dropna(axis=1, how='all')
+
+    compressed = zlib.compress(pickle.dumps(df_filtered))
+    return compressed
+
+def print_prec_rec_fscore_support(mat, metric_labels, class_label, 
class_values):
+    """
+    pretty print precision, recall, fscore & support using pandas dataframe
+    """
+    tbl = pd.DataFrame(mat, columns=metric_labels)
+    tbl[class_label] = class_values
+    tbl = tbl[[class_label]+metric_labels]
+    return tbl
+
+def takeClosest(myList, myNumber):
+    """
+    Assumes myList is sorted. Returns closest value to myNumber.
+    If two numbers are equally close, return the smallest number.
+    """
+    pos = bisect_left(myList, myNumber)
+    if pos == 0:
+        return myList[0]
+    if pos == len(myList):
+        return myList[-1]
+    before = myList[pos - 1]
+    after = myList[pos]
+    if after - myNumber < myNumber - before:
+       return after
+    else:
+       return before
+
+def expand_grid(params):
+    #Expand the params to run-grid search
+    params_list = []
+    for key, val  in params.items():
+        #If supplied param is a list of values, expand it out
+        if(val and isinstance(val, collections.Iterable)):
+            r = ["""{k}={v}""".format(k=key,v=v) for v in val]
+        else:
+            r = ["""{k}={v}""".format(k=key,v=val)]
+        params_list.append(r)
+    params_grid = [l for l in itertools.product(*params_list)]
+    return params_grid
+
+def xgboost_train(schema_madlib, dframe, features_all, class_label, params,
+                  class_weights, train_set_size, id_column, 
train_set_split_var):
+
+    df = pickle.loads(zlib.decompress(dframe))
+    features_all.append(id_column)
+    features = filter(lambda x: x in df.columns, features_all)
+    X = df[features].as_matrix()
+    y = df[class_label]
+    class_list = numpy.unique(y).tolist()
+
+    if not train_set_split_var or train_set_split_var == 'None':
+        X_train, X_test, y_train, y_test = train_test_split(X, y, 
test_size=float(1-train_set_size))
+        #We don't actually want the test set size to change. We want it to be 
constant as we change train set size so we can compare apples to apples
+        #so lets lock it at 20% (only less if the train size is > 80%)
+        test_set_size = min((1-train_set_size),0.2)
+        X_test = X_test[range(0,int(len(y)*test_set_size)),]
+        y_test = y_test.head(int(len(y)*test_set_size))
+    else:
+        split_indx = numpy.WHERE(features == train_set_split_var)[0]
+        X = numpy.delete(X,split_indx,1)
+        X_train = X[numpy.array(df[train_set_split_var]==1),]
+        X_test = X[numpy.array(df[train_set_split_var]==0),]
+        y_train = y[numpy.array(df[train_set_split_var]==1)]
+        y_test = y[numpy.array(df[train_set_split_var]==0)]
+    #save off and remove the id_column for later output. Make sure to get rid 
of id_column from features!
+    test_ids = X_test [:,len(features)-1]
+    X_train = numpy.delete(X_train,len(features)-1,1)
+    X_test = numpy.delete(X_test,len(features)-1,1)
+    features = features[0:len(features)-1]
+
+    class_list_y_train = numpy.unique(y_train).tolist()
+    class_list_y_test = numpy.unique(y_test).tolist()
+    if (class_list != class_list_y_train) or (class_list != class_list_y_test):
+        plpy.error("Train test split caused a subset with missing classes.")
+
+    #run weights
+    sample_representation = y_train.value_counts()
+    total_samples = sum(sample_representation)
+    sample_weights = None
+    if not class_weights:
+        sample_weights = map(
+                lambda s: total_samples*1.0/sample_representation[s]
+                                /
+                sum([total_samples*1.0/sample_representation[c] for c in 
sample_representation.keys()])
+                ,
+                y_train
+            )
+    else:
+        #User-supplied class-weights
+        class_weights_dict = 
ast.literal_eval(re.sub("[\\t]","",class_weights).strip())
+        sample_weights = map(lambda s: class_weights_dict[s], y_train)
+
+    #Train gradient boosted trees
+    p_list = [p.split('=') for p in 
ast.literal_eval(re.sub("[\\t]","",params).strip())]
+    params_dict = dict([(k, ast.literal_eval(v.strip())) for k,v in p_list])
+    gbm = xgb.XGBClassifier(**params_dict)
+
+    #Fit model
+    gbm.fit(
+        X_train,
+        y_train,
+        eval_metric = 'auc',
+        sample_weight = sample_weights
+    )
+    #Compute and return model metrics score
+    y_pred_train = gbm.predict(X_train)
+    y_pred_test = gbm.predict(X_test)
+    cmat_train = confusion_matrix(y_train, y_pred_train)
+    cmat_test = confusion_matrix(y_test, y_pred_test)
+    scores = numpy.array(precision_recall_fscore_support(y_test, 
y_pred_test)).transpose()
+
+    metric_labels = ['precision', 'recall', 'fscore', 'support']
+    model_metrics = print_prec_rec_fscore_support(scores, metric_labels, 
class_label, gbm.classes_)
+
+    #Calculate feature importance scores
+    importance = gbm._Booster.get_fscore()
+    if len(importance) == 0:
+        plpy.error("No importance found for any feature")
+    fnames_importances = sorted(
+        [(features[int(k.replace('f',''))], importance[k]) for k in 
importance],
+        key=itemgetter(1),
+        reverse=True
+    )
+    fnames, f_importance_scores = zip(*fnames_importances)
+    important_features = pd.DataFrame(fnames_importances)
+
+    test_ids = [int(x) for x in test_ids]
+    return (features, pickle.dumps(gbm), params, fnames, f_importance_scores,
+        model_metrics.iloc[:,1].values.tolist(), 
model_metrics.iloc[:,2].values.tolist(),
+        
model_metrics.iloc[:,3].values.tolist(),model_metrics.iloc[:,4].values.tolist(),
+        test_ids)
+
+def xgboost_grid_search(schema_madlib, source_table, id_column, class_label,
+                        list_of_features, list_of_features_to_exclude,
+                        params_str, grid_search_results_tbl, class_weights,
+                        train_set_size, train_set_split_var):
+
+    _assert(table_exists(source_table), "Input table missing")
+    _assert(not table_exists(grid_search_results_tbl), "Output table exists")
+    grid_search_results_tbl_summary = add_postfix(grid_search_results_tbl, 
'_summary')
+    _assert(not table_exists(grid_search_results_tbl_summary), "Output table 
exists")
+    params = ast.literal_eval(re.sub("[\\t]","",params_str).strip())
+    params_grid = expand_grid(params)
+    #Save each parameter list in the grid as a row in a distributed table
+    grid_search_params_temp_tbl = unique_string('grid_params')
+    grid_search_params_temp_tbl_df = unique_string('grid_params_df')
+    dist_str = "m4_ifdef(`__POSTGRESQL__', `', `DISTRIBUTED BY 
(params_index)')"
+    sql = """
+        CREATE TEMP TABLE {grid_search_params_temp_tbl}
+        (
+            params_index int,
+            params text
+        ) {dist_str}
+    """.format(**locals())
+    plpy.execute(sql)
+    sql = """
+        INSERT INTO {grid_search_params_temp_tbl}
+            VALUES ({params_index}, $X${val}$X$);
+    """
+    for indx, val in enumerate(params_grid):
+        plpy.execute(
+            sql.format(
+                val=val,
+                params_index = indx+1, #postgres indices start from 1, so 
keeping it consistent
+                grid_search_params_temp_tbl=grid_search_params_temp_tbl
+            )
+        )
+
+    if list_of_features.strip() == '*':
+        #Extract feature names from information_schema
+        if list_of_features_to_exclude is None:
+            list_of_features_to_exclude = []
+        discard_features = list_of_features_to_exclude + [class_label, 
id_column]
+        features = [col for col in get_cols(source_table) if col not in 
discard_features]
+        list_of_features = ','.join(features)
+    else:
+        features = [f.strip() for f in list_of_features.split(',')]
+
+    grid_size = len(params_grid)
+    sql = """
+        CREATE TEMP TABLE {grid_search_params_temp_tbl_df}

Review Comment:
   Since they are temp tables, drop statements are not strictly necessary but I 
will add them just in case.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@madlib.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to