orhankislal commented on code in PR #581:
URL: https://github.com/apache/madlib/pull/581#discussion_r892788651


##########
src/ports/postgres/modules/mxgboost/madlib_xgboost.py_in:
##########
@@ -0,0 +1,549 @@
+# coding=utf-8
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import plpy
+import re
+import pandas as pd
+import xgboost as xgb
+import numpy
+import cPickle as pickle
+import zlib
+import ast
+import collections
+import itertools
+from bisect import bisect_left
+from operator import itemgetter
+
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import precision_recall_fscore_support
+from sklearn.metrics import confusion_matrix
+from sklearn.metrics import roc_auc_score
+from sklearn.metrics import roc_curve
+
+from utilities.utilities import _assert
+from utilities.utilities import add_postfix
+from utilities.utilities import unique_string
+from utilities.validate_args import get_cols
+from utilities.validate_args import table_exists
+
+def serialize_pandas_dframe_as_bytea(schema_madlib, source_table, id_column,
+                                     class_label, features):
+
+    mdl_train_sql = """
+        SELECT
+            {id_column},
+            {features},
+            {class_label}
+        FROM
+            {source_table}
+    """.format(**locals())
+
+    result = plpy.execute(mdl_train_sql)
+    df = pd.DataFrame.from_records(result)
+    df_filtered = df.dropna(axis=1, how='all')
+
+    compressed = zlib.compress(pickle.dumps(df_filtered))
+    return compressed
+
+def print_prec_rec_fscore_support(mat, metric_labels, class_label, 
class_values):
+    """
+    pretty print precision, recall, fscore & support using pandas dataframe
+    """
+    tbl = pd.DataFrame(mat, columns=metric_labels)
+    tbl[class_label] = class_values
+    tbl = tbl[[class_label]+metric_labels]
+    return tbl
+
+def takeClosest(myList, myNumber):
+    """
+    Assumes myList is sorted. Returns closest value to myNumber.
+    If two numbers are equally close, return the smallest number.
+    """
+    pos = bisect_left(myList, myNumber)
+    if pos == 0:
+        return myList[0]
+    if pos == len(myList):
+        return myList[-1]
+    before = myList[pos - 1]
+    after = myList[pos]
+    if after - myNumber < myNumber - before:
+       return after
+    else:
+       return before
+
+def expand_grid(params):
+    #Expand the params to run-grid search
+    params_list = []
+    for key, val  in params.items():
+        #If supplied param is a list of values, expand it out
+        if(val and isinstance(val, collections.Iterable)):
+            r = ["""{k}={v}""".format(k=key,v=v) for v in val]
+        else:
+            r = ["""{k}={v}""".format(k=key,v=val)]
+        params_list.append(r)
+    params_grid = [l for l in itertools.product(*params_list)]
+    return params_grid
+
+def xgboost_train(schema_madlib, dframe, features_all, class_label, params,
+                  class_weights, train_set_size, id_column, 
train_set_split_var):
+
+    df = pickle.loads(zlib.decompress(dframe))
+    features_all.append(id_column)
+    features = filter(lambda x: x in df.columns, features_all)
+    X = df[features].as_matrix()
+    y = df[class_label]
+    class_list = numpy.unique(y).tolist()
+
+    if not train_set_split_var or train_set_split_var == 'None':
+        X_train, X_test, y_train, y_test = train_test_split(X, y, 
test_size=float(1-train_set_size))
+        #We don't actually want the test set size to change. We want it to be 
constant as we change train set size so we can compare apples to apples
+        #so lets lock it at 20% (only less if the train size is > 80%)
+        test_set_size = min((1-train_set_size),0.2)
+        X_test = X_test[range(0,int(len(y)*test_set_size)),]
+        y_test = y_test.head(int(len(y)*test_set_size))
+    else:
+        split_indx = numpy.WHERE(features == train_set_split_var)[0]
+        X = numpy.delete(X,split_indx,1)
+        X_train = X[numpy.array(df[train_set_split_var]==1),]
+        X_test = X[numpy.array(df[train_set_split_var]==0),]
+        y_train = y[numpy.array(df[train_set_split_var]==1)]
+        y_test = y[numpy.array(df[train_set_split_var]==0)]
+    #save off and remove the id_column for later output. Make sure to get rid 
of id_column from features!
+    test_ids = X_test [:,len(features)-1]
+    X_train = numpy.delete(X_train,len(features)-1,1)
+    X_test = numpy.delete(X_test,len(features)-1,1)
+    features = features[0:len(features)-1]
+
+    class_list_y_train = numpy.unique(y_train).tolist()
+    class_list_y_test = numpy.unique(y_test).tolist()
+    if (class_list != class_list_y_train) or (class_list != class_list_y_test):
+        plpy.error("Train test split caused a subset with missing classes.")
+
+    #run weights
+    sample_representation = y_train.value_counts()
+    total_samples = sum(sample_representation)
+    sample_weights = None
+    if not class_weights:
+        sample_weights = map(
+                lambda s: total_samples*1.0/sample_representation[s]
+                                /
+                sum([total_samples*1.0/sample_representation[c] for c in 
sample_representation.keys()])
+                ,
+                y_train
+            )
+    else:
+        #User-supplied class-weights
+        class_weights_dict = 
ast.literal_eval(re.sub("[\\t]","",class_weights).strip())
+        sample_weights = map(lambda s: class_weights_dict[s], y_train)
+
+    #Train gradient boosted trees
+    p_list = [p.split('=') for p in 
ast.literal_eval(re.sub("[\\t]","",params).strip())]
+    params_dict = dict([(k, ast.literal_eval(v.strip())) for k,v in p_list])

Review Comment:
   I tested xgboost directly and it does not fail if there is an unexpected 
parameter. I think it is OK to preserve that behavior and mention it in the 
documentation. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@madlib.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to