This is an automated email from the ASF dual-hosted git repository.

mboehm7 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/systemds.git


The following commit(s) were added to refs/heads/main by this push:
     new 85164b3de9 [SYSTEMDS-3625] Learned sample selection (initial examples, 
baselines)
85164b3de9 is described below

commit 85164b3de979aaf1d29a85fd0f9775da1007bf63
Author: Matthias Boehm <[email protected]>
AuthorDate: Sat Oct 7 21:07:01 2023 +0200

    [SYSTEMDS-3625] Learned sample selection (initial examples, baselines)
    
    This patch adds an initial example of a script for simultaneously
    learning logreg models as well as a sample selection, along with some
    basic baselines (full, uniform samples). Future extensions should tune
    the objectives/gradients, investigate strongly baselines like clustering
    and tuple selection based on gradient (dis)similarity on fixed models.
---
 .../staging/learnedSampling/1_Data_Model_Prep.dml  | 23 ++++++++++
 .../learnedSampling/2_Baseline_Sampling.dml        | 27 ++++++++++++
 .../staging/learnedSampling/3_BasicTraining.dml    | 38 ++++++++++++++++
 .../staging/learnedSampling/4_CombinedTraining.dml | 51 ++++++++++++++++++++++
 scripts/staging/learnedSampling/notes.txt          | 12 +++++
 5 files changed, 151 insertions(+)

diff --git a/scripts/staging/learnedSampling/1_Data_Model_Prep.dml 
b/scripts/staging/learnedSampling/1_Data_Model_Prep.dml
new file mode 100644
index 0000000000..2a6be1a2c0
--- /dev/null
+++ b/scripts/staging/learnedSampling/1_Data_Model_Prep.dml
@@ -0,0 +1,23 @@
+# -- Train: Accuracy (%): 85.07940686145477
+# -- Test: Accuracy (%): 85.04146616156444
+# -- ~83% w/o intercept
+
+F = read("./data/Adult.csv", data_type="frame", format="csv", header=FALSE);
+
+jspec2= "{ ids:true, recode:[15], dummycode:[2,4,6,7,8,9,10,14]}"
+[X,M] = transformencode(target=F, spec=jspec2);
+y = X[,ncol(X)];
+X = X[,2:(ncol(X)-1)]
+X = scale(X=X)
+
+[Xtrain,Xtest,ytrain,ytest] = split(X=X,Y=y,f=0.7,cont=FALSE,seed=7)
+
+# learn model
+B = multiLogReg(X=Xtrain, Y=ytrain, maxii=50, icpt=2, reg=0.001, verbose=TRUE);
+[M,yhat,acc] = multiLogRegPredict(X=Xtrain, B=B, Y=ytrain, verbose=TRUE);
+[M,yhat,acc] = multiLogRegPredict(X=Xtest, B=B, Y=ytest, verbose=TRUE);
+
+write(X, "data/Adult_X.csv", format="csv")
+write(y, "data/Adult_y.csv", format="csv")
+write(B, "data/Adult_W.csv", format="csv")
+
diff --git a/scripts/staging/learnedSampling/2_Baseline_Sampling.dml 
b/scripts/staging/learnedSampling/2_Baseline_Sampling.dml
new file mode 100644
index 0000000000..e600cd43e0
--- /dev/null
+++ b/scripts/staging/learnedSampling/2_Baseline_Sampling.dml
@@ -0,0 +1,27 @@
+# -- Baseline sampling with f=0.1, nrow=3256.0
+# ---- Train Accuracy (%): 86.13587457731326
+# ---- Test Accuracy (%): 84.90836490222176
+# -- Baseline sampling with f=0.01, nrow=326.0
+# ---- Train Accuracy (%): 91.7933130699088
+# ---- Test Accuracy (%): 82.24633971536808
+# -- Baseline sampling with f=0.001, nrow=33.0
+# ---- Train Accuracy (%): 100.0
+# ---- Test Accuracy (%): 69.39694890959352
+
+X = read("data/Adult_X.csv")
+y = read("data/Adult_y.csv")
+B = read("data/Adult_W.csv")
+
+[Xtrain,Xtest,ytrain,ytest] = split(X=X,Y=y,f=0.7,cont=FALSE,seed=7)
+
+sf = matrix("0.1 0.01 0.001", rows=3, cols=1)
+for(i in 1:nrow(sf)) {
+  sfi = as.scalar(sf[i]);
+  print("Baseline uniform sampling with f="+sfi+", nrow="+round(nrow(X)*sfi))
+  [Xtrain2,Xtest2,ytrain2,ytest2] = split(X=X,Y=y,f=sfi,cont=FALSE,seed=7)
+  B = multiLogReg(X=Xtrain2, Y=ytrain2, maxii=50, icpt=2, reg=0.001, 
verbose=FALSE);
+
+  [M,yhat,acc] = multiLogRegPredict(X=Xtrain2, B=B, Y=ytrain2, verbose=TRUE);
+  [M,yhat,acc] = multiLogRegPredict(X=Xtest, B=B, Y=ytest, verbose=TRUE);
+}
+
diff --git a/scripts/staging/learnedSampling/3_BasicTraining.dml 
b/scripts/staging/learnedSampling/3_BasicTraining.dml
new file mode 100644
index 0000000000..7b37ca33b3
--- /dev/null
+++ b/scripts/staging/learnedSampling/3_BasicTraining.dml
@@ -0,0 +1,38 @@
+X = read("data/Adult_X.csv")
+y = read("data/Adult_y.csv")
+B = read("data/Adult_W.csv")
+
+[Xtrain,Xtest,ytrain,ytest] = split(X=X,Y=y,f=0.7,cont=FALSE,seed=7)
+
+X = Xtrain;
+y = ytrain;
+
+# vanilla training with from scratch or from good model
+w = rand(rows=ncol(X), cols=1, min=0, max=0.05);
+icpt = 0
+#w = B[1:ncol(X), ];
+#icpt = as.scalar(B[nrow(B),])
+
+maxiter = 200;
+i = 0
+lr = 15
+lrdecay = 1.05
+
+y = y==1;
+while (i < maxiter) {
+  z = X %*% w + icpt
+  ht = sigmoid(z) #yhat
+  obj = -1/nrow(X) * sum(y*log(ht) + (1-y)*log(1-ht))
+  grad = (t(X) %*% (ht-y)) / nrow(X)
+
+  print("Iteration "+i+": "+obj+" "+sum(grad))
+  w = w - lr * grad;
+  i = i+1
+  lr = lr/lrdecay
+}
+y = y+2*(y==0);
+
+
+[M,yhat,acc] = multiLogRegPredict(X=X, B=w, Y=y, verbose=TRUE);
+[M,yhat,acc] = multiLogRegPredict(X=Xtest, B=w, Y=ytest, verbose=TRUE);
+
diff --git a/scripts/staging/learnedSampling/4_CombinedTraining.dml 
b/scripts/staging/learnedSampling/4_CombinedTraining.dml
new file mode 100644
index 0000000000..807da398e5
--- /dev/null
+++ b/scripts/staging/learnedSampling/4_CombinedTraining.dml
@@ -0,0 +1,51 @@
+X = read("data/Adult_X.csv")
+y = read("data/Adult_y.csv")
+B = read("data/Adult_W.csv")
+
+[Xtrain,Xtest,ytrain,ytest] = split(X=X,Y=y,f=0.7,cont=FALSE,seed=7)
+
+X = Xtrain;
+y = ytrain;
+
+S = matrix(1, rows=nrow(X), cols=1)
+w = B[1:ncol(X), ];
+icpt = as.scalar(B[nrow(B),])
+
+maxiter = 500;
+i = 0
+lr = 15
+lrdecay = 1.05
+reg = 0.75
+reg_thr = 0.85
+
+y = y==1;
+while (i < maxiter) {
+  z = X %*% w + icpt
+  ht = sigmoid(z) #yhat
+  obj = -1/nrow(X) * sum(y*log(ht) + (1-y)*log(1-ht))
+        + 0.5 * reg * as.scalar(t(S)%*%S)
+  grad_w = (t(X) %*% ((S>reg_thr)*(ht-y))) / sum(S>reg_thr)
+  grad_s = reg*S + 4*reg*(abs(ht-y)-0.5);
+
+  print("Iteration "+i+": obj="+obj+" grad="+sum(grad_w)+" S="+sum(S)+" 
"+sum(S>reg_thr))
+  w = w - lr * grad_w;
+  if( i > 100 )
+    S = min(max(S - lr * grad_s, 0), 1);
+  i = i+1
+  lr = lr/lrdecay
+}
+y = y+2*(y==0);
+
+
+[M,yhat,acc] = multiLogRegPredict(X=X, B=w, Y=y, verbose=TRUE);
+[M,yhat,acc] = multiLogRegPredict(X=Xtest, B=w, Y=ytest, verbose=TRUE);
+
+Xtrain2 = removeEmpty(target=X, margin="rows", select=(S>reg_thr));
+ytrain2 = removeEmpty(target=y, margin="rows", select=(S>reg_thr));
+
+print("Model training on sample "+nrow(Xtrain2))
+print(" -- with labels 1: "+sum(ytrain2==1)+", 2: "+sum(ytrain2==2))
+B = multiLogReg(X=Xtrain2, Y=ytrain2, maxii=50, icpt=0, reg=0.001, 
verbose=TRUE);
+[M,yhat,acc] = multiLogRegPredict(X=Xtrain2, B=B, Y=ytrain2, verbose=TRUE);
+[M,yhat,acc] = multiLogRegPredict(X=Xtest, B=B, Y=ytest, verbose=TRUE);
+
diff --git a/scripts/staging/learnedSampling/notes.txt 
b/scripts/staging/learnedSampling/notes.txt
new file mode 100644
index 0000000000..fd02d49c4a
--- /dev/null
+++ b/scripts/staging/learnedSampling/notes.txt
@@ -0,0 +1,12 @@
+Data Preparation
+
+mkdir -p data;
+chmod 755 data;
+curl 
https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data -o 
data/Adult.csv;
+sed -i '$d' data/Adult.csv; # fix empty line at end of file
+
+Run Individual Scripts
+
+java -Xmx4g -Xms4g -cp ./lib/*:./SystemDS.jar org.apache.sysds.api.DMLScript \
+  -f XYZ.dml -debug -stats -explain
+

Reply via email to