This is an automated email from the git hooks/post-receive script.

sebastic-guest pushed a commit to branch upstream-master
in repository pktools.

commit a73cc3a791804ba126b42fef1c55b6ef92f3b894
Author: Pieter Kempeneers <kempe...@gmail.com>
Date:   Fri Jan 4 18:01:07 2013 +0100

    cross validation for pkclassify_nn
---
 ChangeLog                        |   4 +
 src/algorithms/FeatureSelector.h | 516 ++++-----------------------------------
 src/algorithms/myfann_cpp.h      | 171 ++++++++++++-
 src/algorithms/svm.cpp           |  56 +++--
 src/algorithms/svm.h             |   1 +
 src/apps/Makefile.am             |   6 +-
 src/apps/Makefile.in             |  45 +++-
 src/apps/pkclassify_nn.cc        | 131 +++++++---
 src/apps/pkclassify_nn.h         |  83 +++++++
 src/apps/pkclassify_svm.cc       |  69 ++++--
 src/apps/pkfs_svm.cc             | 282 +++++++++++----------
 11 files changed, 686 insertions(+), 678 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 6364779..de06614 100755
--- a/ChangeLog
+++ b/ChangeLog
@@ -62,8 +62,12 @@ version 2.4
  - pkclassify_svm
        do not output input file if no input data was defined in verbose mode
        update of header information
+ - pkclassify_nn
+       support of cross validation
  - pkfs_svm
        feature selection tool for svm classification (added)
+ - pkfs_nn
+       feature selection tool for nn classification (added)
  - pkascii2ogr
        tool to create simple vector files from coordinates in ASCII file 
(points or polygon)
  - pksensormodel
diff --git a/src/algorithms/FeatureSelector.h b/src/algorithms/FeatureSelector.h
index 503dfac..53cb7d0 100644
--- a/src/algorithms/FeatureSelector.h
+++ b/src/algorithms/FeatureSelector.h
@@ -38,86 +38,19 @@ class FeatureSelector
  public:
   FeatureSelector(){};
   ~FeatureSelector(){};
-  template<class T> double forwardUnivariate(Vector2d<T>& v1, Vector2d<T>& v2, 
double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset, int maxFeatures, double prior1=0.5, double 
prior2=0.5, bool verbose=false);
-  template<class T> double forwardUnivariate(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures, 
bool verbose=false);
-  template<class T> double forwardUnivariate(map<string, Vector2d<T> > &v, 
map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, int maxFeatures, bool 
verbose=false);
-    template<class T> double forward(Vector2d<T>& v1, Vector2d<T>& v2, double 
(*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset, int maxFeatures, double prior1=0.5, double 
prior2=0.5, bool verbose=false);
-    template<class T> double forward(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures, 
bool verbose=false);
-  template<class T> double forward(map<string, Vector2d<T> > &v, map<string, 
vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, map<string, 
vector<T> >&), list<int>& subset, int maxFeatures, bool verbose=false);
-    template<class T> double backward(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int minFeatures, 
bool verbose=false);
-    template<class T> double backward(Vector2d<T>& v1, Vector2d<T>& v2, double 
(*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset, int minFeatures, double prior1=0.5, double 
prior2=0.5,bool verbose=false);
-    template<class T> double backward(map<string, Vector2d<T> > &v, 
map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, int minFeatures, bool 
verbose=false);
-    template<class T> double floating(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures=0, 
bool verbose=false);
-    template<class T> double floating(Vector2d<T>& v1, Vector2d<T>& v2, double 
(*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset, int maxFeatures=0, double prior1=0.5, double 
prior2=0.5,bool verbose=false);
-    template<class T> double floating(map<string, Vector2d<T> > &v, 
map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, int maxFeatures, bool 
verbose=false);
-  template<class T> double bruteForce(Vector2d<T>& v1, Vector2d<T>& v2, double 
(*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset, int maxFeatures=0, double prior1=0.5, double 
prior2=0.5, bool verbose=false);
+  template<class T> double forwardUnivariate(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures, 
short verbose=0);
+  template<class T> double forward(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures, 
short verbose=0);
+  template<class T> double backward(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int minFeatures, 
short verbose=0);
+  template<class T> double floating(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures=0, 
short verbose=0);
+  template<class T> double bruteForce(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int maxFeatures=0, 
short verbose=0);
   
   private:
-    template<class T> double addFeature(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, bool 
verbose=false);
-    template<class T> double addFeature(Vector2d<T>& v1, Vector2d<T>& v2, 
double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset, double prior1=0.5, double prior2=0.5, bool 
verbose=false);
-    template<class T> double addFeature(map<string, Vector2d<T> > &v, 
map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, bool verbose=false);
-  template<class T> double removeFeature(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int& r, bool 
verbose=false);  
-    template<class T> double removeFeature(Vector2d<T>& v1, Vector2d<T>& v2, 
double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, double prior1, double 
prior2), list<int>& subset,int& r, double prior1=0.5, double prior2=0.5, bool 
verbose=false);
-    template<class T> double removeFeature(map<string, Vector2d<T> > &v, 
map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset,int& r, bool verbose=false);
+    template<class T> double addFeature(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, short verbose=0);
+  template<class T> double removeFeature(vector< Vector2d<T> >& v, double 
(*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int& r, short 
verbose=0);  
 };
 
 //sequential forward selection Univariate (N single best features)
-template<class T> double FeatureSelector::forwardUnivariate(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, int maxFeatures, double 
prior1, double prior2, bool verbose){
-  int maxLevels=v1[0].size();
-  if(!maxFeatures)
-    maxFeatures=maxLevels;
-  int k=subset.size();
-  if(k>=maxFeatures)
-    return -1;
-  vector<PosValue> cost(maxLevels);
-  list<int> tmpset=subset;//temporary set of selected features (levels)
-  Vector2d<T> tmp1(v1.size());
-  Vector2d<T> tmp2(v2.size());
-  for(int ilevel=0;ilevel<maxLevels;++ilevel){
-    if(find(tmpset.begin(),tmpset.end(),ilevel)==tmpset.end()){
-      tmpset.push_back(ilevel);
-      v1.selectCols(tmpset,tmp1);
-      v2.selectCols(tmpset,tmp2);
-      try{
-       PosValue pv;
-       pv.position=ilevel;
-       pv.value=getCost(tmp1,tmp2,prior1,prior2);
-       cost[ilevel]=pv;
-      }
-      catch(...){
-       PosValue pv;
-       pv.position=ilevel;
-       pv.value=-1;
-       cost[ilevel]=pv;
-      }
-      tmpset.pop_back();
-    }
-  }
-  sort(cost.begin(),cost.end(),Compare_PosValue());//decreasing order
-  int ilevel=0;
-  while((subset.size()<maxFeatures)&&(ilevel<maxLevels)){
-    if(cost[ilevel].value>0)
-      subset.push_back(cost[ilevel].position);
-    if(verbose)
-      cout << "feature " << subset.back() << " has cost: " << 
cost[ilevel].value << endl;
-    ++ilevel;
-  }
-  double maxCost=-1;
-  while(subset.size()){
-    v1.selectCols(subset,tmp1);
-    v2.selectCols(subset,tmp2);
-    try{
-      maxCost=getCost(tmp1,tmp2,prior1,prior2);
-    }
-    catch(...){
-      subset.pop_back();
-      continue;
-    }
-    return maxCost;
-  }
-}
-
-template<class T> double FeatureSelector::forwardUnivariate(vector< 
Vector2d<T> >& v, double (*getCost)(const vector< Vector2d<T> >&), list<int>& 
subset, int maxFeatures=0, bool verbose){
+template<class T> double FeatureSelector::forwardUnivariate(vector< 
Vector2d<T> >& v, double (*getCost)(const vector< Vector2d<T> >&), list<int>& 
subset, int maxFeatures=0, short verbose){
   int maxLevels=v[0][0].size();
   if(!maxFeatures)
     maxFeatures=maxLevels;
@@ -176,23 +109,7 @@ template<class T> double 
FeatureSelector::forwardUnivariate(vector< Vector2d<T>
 }
 
 //sequential forward selection Multivariate (Combination of N best features)
-template<class T> double FeatureSelector::forward(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, int maxFeatures, double 
prior1, double prior2, bool verbose){
-  //Select feature with the best value (get maximal cost for 1 feature)
-  double maxCost=0;
-  int maxLevels=v1[0].size();
-  if(!maxFeatures)
-    maxFeatures=maxLevels;
-  while(subset.size()<maxFeatures){
-    maxCost=addFeature(v1,v2,*getCost,subset,verbose);
-    if(verbose)
-      cout << "added " << subset.back() << ", " << subset.size() << "/" << 
maxFeatures << " features selected with cost: " << maxCost << endl;
-  }//while
-  return maxCost;
-}
-
-
-//sequential forward selection Multivariate (Combination of N best features)
-template<class T> double FeatureSelector::forward(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
maxFeatures=0, bool verbose){
+template<class T> double FeatureSelector::forward(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
maxFeatures=0, short verbose){
   //Select feature with the best value (get maximal cost for 1 feature)
   double maxCost=0;
   int maxLevels=v[0][0].size();
@@ -200,134 +117,41 @@ template<class T> double 
FeatureSelector::forward(vector< Vector2d<T> >& v, doub
     maxFeatures=maxLevels;
   while(subset.size()<maxFeatures){
     maxCost=addFeature(v,*getCost,subset,verbose);
-    if(verbose)
-      cout << "added " << subset.back() << ", " << subset.size() << "/" << 
maxFeatures << " features selected with cost: " << maxCost << endl;
-  }//while
-  return maxCost;
-}
-
-template<class T> double FeatureSelector::forward(map<string, Vector2d<T> > 
&v, map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, int maxFeatures, bool verbose)
-{
-  //Select feature with the best value (get maximal cost for 1 feature)
-  double maxCost=0;
-  int maxLevels=(v.begin()->second)[0].size();
-  if(!maxFeatures)
-    maxFeatures=maxLevels;
-  while(subset.size()<maxFeatures){
-    maxCost=addFeature(v,y,*getCost,subset,verbose);
-    if(verbose)
-      cout << "added " << subset.back() << ", " << subset.size() << "/" << 
maxFeatures << " features selected with cost: " << maxCost << endl;
-  }//while
-  return maxCost;
-}
-
-//sequential backward selection
-template<class T> double FeatureSelector::backward(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, int minFeatures, double 
prior1, double prior2, bool verbose){
-  //Select features with least effect on cost when removed (obtain minFeatures 
eventually)
-  double maxCost=0;
-  int removedFeature;
-  if(subset.empty()){
-    for(int iFeature=0;iFeature<v1[0].size();++iFeature)
-      subset.push_back(iFeature);
-  }//if
-  if(subset.size()==minFeatures)
-    maxCost=getCost(v1,v2,prior1,prior2);
-  while(subset.size()>minFeatures){
-    maxCost=removeFeature(v1,v2,*getCost,subset,removedFeature,verbose);
-    if(verbose)
-      cout << "removed " << removedFeature << ", " << subset.size() << "/" << 
minFeatures << " features remain with cost: " << maxCost << endl;
+    if(verbose){
+      for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
+        cout << *lit << " ";
+      cout << endl;
+      // cout << "added " << subset.back() << ", " << subset.size() << "/" << 
maxFeatures << " features selected with cost: " << maxCost << endl;
+    }
   }//while
   return maxCost;
 }
 
 //sequential backward selection
-template<class T> double FeatureSelector::backward(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
minFeatures, bool verbose){
+template<class T> double FeatureSelector::backward(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
minFeatures, short verbose){
   //Select features with least effect on cost when removed (obtain minFeatures 
eventually)
   double maxCost=0;
   int removedFeature;
   if(subset.empty()){
     for(int iFeature=0;iFeature<v[0][0].size();++iFeature)
       subset.push_back(iFeature);
-  }//if
+  }
   if(subset.size()==minFeatures)
     maxCost=getCost(v);
   while(subset.size()>minFeatures){
     maxCost=removeFeature(v,*getCost,subset,removedFeature,verbose);
-    if(verbose)
-      cout << "removed " << removedFeature << ", " << subset.size() << "/" << 
minFeatures << " features remain with cost: " << maxCost << endl;
-  }//while
-  return maxCost;
-}
-
-template<class T> double FeatureSelector::backward(map<string, Vector2d<T> > 
&v, map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, int minFeatures, bool verbose){
-  //Select features with least effect on cost when removed (obtain minFeatures 
eventually)
-  double maxCost=0;
-  int removedFeature;
-  if(subset.empty()){
-    for(int iFeature=0;iFeature<(v.begin()->second)[0].size();++iFeature)
-      subset.push_back(iFeature);
-  }//if
-  if(subset.size()==minFeatures)
-    maxCost=getCost(v,y);
-  while(subset.size()>minFeatures){
-    maxCost=removeFeature(v,y,*getCost,subset,removedFeature,verbose);
-    if(verbose)
-      cout << "removed " << removedFeature << ", " << subset.size() << "/" << 
minFeatures << " features remain with cost: " << maxCost << endl;
+    if(verbose){
+      for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
+        cout << *lit << " ";
+      cout << endl;
+      // cout << "removed " << removedFeature << ", " << subset.size() << "/" 
<< minFeatures << " features remain with cost: " << maxCost << endl;
+    }
   }//while
   return maxCost;
 }
 
 //floating search
-template<class T> double FeatureSelector::floating(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, int maxFeatures, double 
prior1, double prior2, bool verbose){
-  vector<T> cost;
-  int maxLevels=v1[0].size();
-  if(maxFeatures<1)
-    maxFeatures=maxLevels;
-//   int k=0;
-  int initialK=subset.size();
-  int k=initialK;
-  int stopK=initialK+1;
-  if(initialK>=maxFeatures)
-    return -1;
-  cost.push_back(-1);//init 0 features as cost -1
-  cost.push_back(addFeature(v1,v2,*getCost,subset,verbose));
-  ++k;
-  assert(subset.size()==k);
-  if(verbose)
-    cout << "added " << subset.back() << ", " << subset.size() << "/" << 
maxFeatures << " features selected with cost: " << cost.back() << endl;
-  while(k<maxFeatures){
-    cost.push_back(addFeature(v1,v2,*getCost,subset,verbose));
-    ++k;
-    assert(subset.size()==k);
-    if(verbose)
-    cout << "added " << subset.back() << ", " << subset.size() << "/" << 
maxFeatures << " features selected with cost: " << cost.back() << endl;
-//     while(k>1){
-    while(k>stopK){
-      int x_r;
-      double cost_r=removeFeature(v1,v2,*getCost,subset,x_r,verbose);
-      if(cost_r>cost[k-1]){
-       --k;
-       assert(subset.size()==k);
-       cost[k]=cost_r;
-       cost.pop_back();
-       if(verbose)
-         cout << "removed " << x_r << ", " << subset.size() << "/" << 
maxFeatures << " features remain with cost: " << cost_r << endl;
-       continue;
-      }
-      else if(cost_r>=0){
-       subset.push_back(x_r);
-       break;
-      }
-      else if(verbose)
-       cout << "could not remove any feature" << endl;
-      cost.pop_back();
-    }
-  }
-  return cost.back();
-}
-
-//floating search
-template<class T> double FeatureSelector::floating(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
maxFeatures, bool verbose){
+template<class T> double FeatureSelector::floating(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
maxFeatures, short verbose){
   vector<T> cost;
   int maxLevels=v[0][0].size();
   if(maxFeatures<1)
@@ -338,64 +162,38 @@ template<class T> double 
FeatureSelector::floating(vector< Vector2d<T> >& v, dou
   cost.push_back(-1);//init 0 features as cost -1
   cost.push_back(addFeature(v,*getCost,subset,verbose));
   ++k;
-  if(verbose)
+  if(verbose>1)
     cout << "added " << subset.back() << ", " << cost.size()-1 << "/" << 
maxFeatures << " features selected with cost: " << cost.back() << endl;
+  else if(verbose){
+    for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
+      cout << *lit << " ";
+    cout << endl;
+  }
   while(k<maxFeatures){
     cost.push_back(addFeature(v,*getCost,subset,verbose));
     ++k;
-    if(verbose)
+    if(verbose>1)
     cout << "added " << subset.back() << ", " << cost.size()-1 << "/" << 
maxFeatures << " features selected with cost: " << cost.back() << endl;
-    while(k>1){
-      int x_r;
-      double cost_r=removeFeature(v,*getCost,subset,x_r,verbose);
-      if(cost_r>cost[k-1]){
-       --k;
-       cost[k]=cost_r;
-       cost.pop_back();
-       if(verbose)
-         cout << "removed " << x_r << ", " << cost.size()-1 << "/" << 
maxFeatures << " features remain with cost: " << cost_r << endl;
-       continue;
-      }
-      else if(cost_r>=0){
-       subset.push_back(x_r);
-       break;
-      }
-      else if(verbose)
-       cout << "could not remove any feature" << endl;
-      cost.pop_back();
+    else if(verbose){
+      for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
+        cout << *lit << " ";
+      cout << " (" << cost.back() << ")" << endl;
     }
-  }
-  return cost.back();
-}
 
-//floating search
-template<class T> double FeatureSelector::floating(map<string, Vector2d<T> > 
&v, map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, int maxFeatures, bool verbose){
-  vector<T> cost;
-  int maxLevels=(v.begin()->second)[0].size();
-  if(maxFeatures<1)
-    maxFeatures=maxLevels;
-  int k=subset.size();
-  if(k>=maxFeatures)
-    return -1;
-  cost.push_back(-1);//init 0 features as cost -1
-  cost.push_back(addFeature(v,y,*getCost,subset,verbose));
-  ++k;
-  if(verbose)
-    cout << "added " << subset.back() << ", " << cost.size()-1 << "/" << 
maxFeatures << " features selected with cost: " << cost.back() << endl;
-  while(k<maxFeatures){
-    cost.push_back(addFeature(v,y,*getCost,subset,verbose));
-    ++k;
-    if(verbose)
-    cout << "added " << subset.back() << ", " << cost.size()-1 << "/" << 
maxFeatures << " features selected with cost: " << cost.back() << endl;
     while(k>1){
       int x_r;
-      double cost_r=removeFeature(v,y,*getCost,subset,x_r,verbose);
+      double cost_r=removeFeature(v,*getCost,subset,x_r,verbose);
       if(cost_r>cost[k-1]){
        --k;
        cost[k]=cost_r;
        cost.pop_back();
-       if(verbose)
+       if(verbose>1)
          cout << "removed " << x_r << ", " << cost.size()-1 << "/" << 
maxFeatures << " features remain with cost: " << cost_r << endl;
+        else if(verbose){
+          for(list<int>::const_iterator 
lit=subset.begin();lit!=subset.end();++lit)
+            cout << *lit << " ";
+          cout << " (" << cost.back() << ")" << endl;
+        }
        continue;
       }
       else if(cost_r>=0){
@@ -411,8 +209,8 @@ template<class T> double 
FeatureSelector::floating(map<string, Vector2d<T> > &v,
 }
 
 //brute force search (search for all possible combinations and select best)
-template<class T> double FeatureSelector::bruteForce(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, int maxFeatures, double 
prior1, double prior2, bool verbose){
-  int maxLevels=v1[0].size();
+template<class T> double FeatureSelector::bruteForce(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int 
maxFeatures, short verbose){
+  int maxLevels=v[0][0].size();
   if(maxFeatures<1)
     maxFeatures=maxLevels;
   int k=subset.size();
@@ -420,26 +218,24 @@ template<class T> double 
FeatureSelector::bruteForce(Vector2d<T>& v1, Vector2d<T
     return -1;
 //   gslmm::combination c(v1[0].size(),maxFeatures,false);
   gsl_combination *c;
-  c=gsl_combination_calloc(v1[0].size(),maxFeatures);
+  c=gsl_combination_calloc(v[0][0].size(),maxFeatures);
   
   list<int> tmpset;//temporary set of selected features (levels)
-  Vector2d<T> tmp1(v1.size());
-  Vector2d<T> tmp2(v2.size());
+  vector< Vector2d<T> > tmpv(v.size());
   list<int> catchset;//restore set in case of catch all the way to last level 
(no better cost)
   //initialize maxCost with actual cost for current subset (-1 if empty 
subset) 
   double maxCost=-1;
   double cost;
   if(subset.size()>=maxLevels)
     return maxCost;
-//   c.first();
   gsl_combination_next(c);
   do{
     for(int ifeature=0;ifeature<maxFeatures;++ifeature)
       tmpset.push_back(c->data[ifeature]);
-    v1.selectCols(tmpset,tmp1);
-    v2.selectCols(tmpset,tmp2);
+    for(int iclass=0;iclass<v.size();++iclass)
+      v[iclass].selectCols(tmpset,tmpv[iclass]);
     try{
-      cost=getCost(tmp1,tmp2,prior1,prior2);
+      cost=getCost(tmpv);
     }
     catch(...){ //singular matrix encountered
       catchset=tmpset;//this tmpset resulted in failure of getCost
@@ -447,7 +243,6 @@ template<class T> double 
FeatureSelector::bruteForce(Vector2d<T>& v1, Vector2d<T
        cout << "Could not get cost from set: " << endl;
        gsl_combination_fprintf(stdout,c," %u");
        printf("\n");
-//     c.print(stdout);
       }      
       tmpset.clear();
       continue;
@@ -470,61 +265,7 @@ template<class T> double 
FeatureSelector::bruteForce(Vector2d<T>& v1, Vector2d<T
   return maxCost;
 }
 
-template<class T> double FeatureSelector::addFeature(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, double prior1, double prior2, 
bool verbose){
-  //select feature with the best value (get maximal cost for 1 feature)
-  list<int> tmpset=subset;//temporary set of selected features (levels)
-  list<int> catchset;//restore set in case of catch all the way to last level 
(no better cost)
-  //initialize maxCost with actual cost for current subset (-1 if empty 
subset) 
-  double maxCost=-1;
-//   if(subset.size()){
-//     try{
-//       v1.selectCols(subset,tmp1);
-//       v2.selectCols(subset,tmp2);
-//       maxCost=getCost(tmp1,tmp2);
-//     }
-//     catch(double determinant){
-//       return -1;
-//     }
-//   }
-  double cost;
-  int maxLevels=v1[0].size();
-  if(subset.size()>=maxLevels)
-    return maxCost;
-  for(int ilevel=0;ilevel<maxLevels;++ilevel){
-    Vector2d<T> tmp1(v1.size());
-    Vector2d<T> tmp2(v2.size());
-    if(find(tmpset.begin(),tmpset.end(),ilevel)!=tmpset.end())
-      continue;
-    tmpset.push_back(ilevel);
-    v1.selectCols(tmpset,tmp1);
-    v2.selectCols(tmpset,tmp2);
-    try{
-      cost=getCost(tmp1,tmp2,prior1,prior2);
-    }
-    catch(...){
-      catchset=tmpset;//this tmpset resulted in singular matrix
-      if(verbose)
-       cout << "Could not add feature " << tmpset.back() << endl;
-      tmpset.pop_back();
-      continue;
-    }
-    if(maxCost<cost){ //level with better cost is found
-      maxCost=cost;
-      subset=tmpset;
-    }
-    tmpset.pop_back();
-  }
-  if(maxCost<0) //no level added to better maxCost than current subset 
(catchset)
-    subset=catchset;
-  //test: assert list contains no duplicate elements
-  for(list<int>::iterator lit=subset.begin();lit!=subset.end();++lit){
-    list<int>::iterator lit2=lit;//start searching from next element
-    assert(find(++lit2,subset.end(),*lit)==subset.end());
-  }
-  return maxCost;
-}
-
-template<class T> double FeatureSelector::addFeature(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, bool 
verbose){
+template<class T> double FeatureSelector::addFeature(vector< Vector2d<T> >& v, 
double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, short 
verbose){
   //select feature with the best value (get maximal cost for 1 feature)
   list<int> tmpset=subset;//temporary set of selected features (levels)
   vector< Vector2d<T> > tmp(v.size());
@@ -569,112 +310,7 @@ template<class T> double 
FeatureSelector::addFeature(vector< Vector2d<T> >& v, d
   return maxCost;
 }
 
-template<class T> double FeatureSelector::addFeature(map<string, Vector2d<T> > 
&v, map<string, vector<T> > &y, double (*getCost)(map<string, Vector2d<T> >&, 
map<string, vector<T> >&), list<int>& subset, bool verbose){
-  //select feature with the best value (get maximal cost for 1 feature)
-  list<int> tmpset=subset;//temporary set of selected features (levels)
-  map<string, Vector2d<double> > tmp;
-  list<int> catchset;//restore set in case of catch all the way to last level 
(no better cost)
-  //initialize maxCost with actual cost for current subset (-1 if empty 
subset) 
-  map<string, Vector2d<double> >::const_iterator mit;
-  double maxCost=-1;
-  double cost;
-  int maxLevels=(v.begin()->second)[0].size();
-  if(subset.size()>=maxLevels)
-    return maxCost;
-  for(int ilevel=0;ilevel<maxLevels;++ilevel){
-    if(find(tmpset.begin(),tmpset.end(),ilevel)!=tmpset.end())
-      continue;
-    tmpset.push_back(ilevel);
-    for(mit=v.begin();mit!=v.end();++mit){
-      string theName=mit->first;
-      v[theName].selectCols(tmpset,tmp[theName]);
-    }
-    try{
-      cost=getCost(tmp,y);
-    }
-    catch(...){
-      catchset=tmpset;//this tmpset resulted in singular matrix
-      if(verbose)
-       cout << "Could not add feature " << tmpset.back() << endl;
-      tmpset.pop_back();
-      continue;
-    }
-    if(maxCost<cost){ //level with better cost is found
-      maxCost=cost;
-      subset=tmpset;
-    }
-    tmpset.pop_back();
-  }
-  if(maxCost<0) //no level added to better maxCost than current subset 
(catchset)
-    subset=catchset;
-  //test: assert list contains no duplicate elements
-  for(list<int>::iterator lit=subset.begin();lit!=subset.end();++lit){
-    list<int>::iterator lit2=lit;//start searching from next element
-    assert(find(++lit2,subset.end(),*lit)==subset.end());
-  }
-  return maxCost;
-}
-
-template<class T> double FeatureSelector::removeFeature(Vector2d<T>& v1, 
Vector2d<T>& v2, double (*getCost)(const Vector2d<T>&, const Vector2d<T>&, 
double prior1, double prior2), list<int>& subset, int& r, double prior1, double 
prior2, bool verbose){
-  //find the feature that has the least effect on the cost when it is removed 
from subset
-  list<int> tmpset=subset;//temporary set of selected features (levels)
-  Vector2d<T> tmp1(v1.size());
-  Vector2d<T> tmp2(v2.size());
-  int nFeatures=subset.size();
-  list<int> catchset;//restore set in case of catch all the way to last level 
(no better cost)
-  //initialize maxCost with actual cost for current subset (-1 if empty 
subset) 
-  double maxCost=-1;
-//   if(subset.size()){
-//     try{
-//       v1.selectCols(subset,tmp1);
-//       v2.selectCols(subset,tmp2);
-//       maxCost=getCost(tmp1,tmp2);
-//     }
-//     catch(double determinant){
-//       return -1;
-//     }
-//   }
-  int last;
-  double cost;
-  int maxLevels=v1[0].size();
-  if(subset.size()>maxLevels||subset.empty()){
-    return maxCost;
-  }
-  list<int>::const_iterator it;
-  for(int i=0;i<nFeatures;++i){
-    last=tmpset.back();
-    tmpset.pop_back();
-    v1.selectCols(tmpset,tmp1);
-    v2.selectCols(tmpset,tmp2);
-    try{
-      cost=getCost(tmp1,tmp2,prior1,prior2);
-    }
-    catch(...){
-      catchset=tmpset;//this tmpset resulted in singular matrix
-      if(verbose)
-       cout << "Could not remove feature " << last << endl;
-      tmpset.push_front(last);
-      continue;
-    }
-    if(maxCost<cost){ //level with better cost is found
-      maxCost=cost;
-      subset=tmpset;
-      r=last;
-    }
-    tmpset.push_front(last);
-  }
-  if(maxCost<0){//all levels removed were catched
-    subset=catchset;
-  }
-  //test: assert list contains no duplicate elements
-  for(list<int>::iterator lit=subset.begin();lit!=subset.end();++lit){
-    list<int>::iterator lit2=lit;//start searching from next element
-    assert(find(++lit2,subset.end(),*lit)==subset.end());
-  }
-  return maxCost;
-}
-
-template<class T> double FeatureSelector::removeFeature(vector< Vector2d<T> >& 
v, double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int& r, 
bool verbose){
+template<class T> double FeatureSelector::removeFeature(vector< Vector2d<T> >& 
v, double (*getCost)(const vector< Vector2d<T> >&), list<int>& subset, int& r, 
short verbose){
   //find the feature that has the least effect on the cost when it is removed 
from subset
   list<int> tmpset=subset;//temporary set of selected features (levels)
   vector< Vector2d<T> > tmp(v.size());
@@ -724,54 +360,4 @@ template<class T> double 
FeatureSelector::removeFeature(vector< Vector2d<T> >& v
   return maxCost;
 }
 
-template<class T> double FeatureSelector::removeFeature(map<string, 
Vector2d<T> > &v, map<string, vector<T> > &y, double (*getCost)(map<string, 
Vector2d<T> >&, map<string, vector<T> >&), list<int>& subset, int& r, bool 
verbose){
-  //find the feature that has the least effect on the cost when it is removed 
from subset
-  list<int> tmpset=subset;//temporary set of selected features (levels)
-  map<string, Vector2d<double> > tmp;
-  map<string, Vector2d<double> >::const_iterator mit;
-  int nFeatures=subset.size();
-  list<int> catchset;//restore set in case of catch all the way to last level 
(no better cost)
-  //initialize maxCost with actual cost for current subset (-1 if empty 
subset) 
-  double maxCost=-1;
-  int last;
-  double cost;
-  int maxLevels=(v.begin()->second)[0].size();
-  if(subset.size()>maxLevels||subset.empty()){
-    return maxCost;
-  }
-  list<int>::const_iterator it;
-  for(int i=0;i<nFeatures;++i){
-    last=tmpset.back();
-    tmpset.pop_back();
-    for(mit=v.begin();mit!=v.end();++mit){
-      string theName=mit->first;
-      v[theName].selectCols(tmpset,tmp[theName]);
-    }
-    try{
-      cost=getCost(tmp,y);
-    }
-    catch(...){ //singular matrix encountered
-      catchset=tmpset;//this tmpset resulted in singular matrix
-      if(verbose)
-       cout << "Could not remove feature " << last << endl;
-      tmpset.push_front(last);
-      continue;
-    }
-    if(maxCost<cost){ //level with better cost is found
-      maxCost=cost;
-      subset=tmpset;
-      r=last;
-    }
-    tmpset.push_front(last);
-  }
-  if(maxCost<0){//all levels removed were catched
-    subset=catchset;
-  }
-  //test: assert list contains no duplicate elements
-  for(list<int>::iterator lit=subset.begin();lit!=subset.end();++lit){
-    list<int>::iterator lit2=lit;//start searching from next element
-    assert(find(++lit2,subset.end(),*lit)==subset.end());
-  }
-  return maxCost;
-}
 #endif /* _FEATURESELECTOR_H_ */
diff --git a/src/algorithms/myfann_cpp.h b/src/algorithms/myfann_cpp.h
index 323ed70..6aba87f 100644
--- a/src/algorithms/myfann_cpp.h
+++ b/src/algorithms/myfann_cpp.h
@@ -826,7 +826,7 @@ namespace FANN
             }
             set_train_data(data);
       }
-      
+
 
 private:
         /* Set the training data to the struct fann_training_data pointer.
@@ -1483,6 +1483,127 @@ public:
             }
         }
 
+      
+
+        void train_on_data(const std::vector< Vector2d<fann_type> >& input,
+                           unsigned int num_data,
+                           bool initWeights,
+                           unsigned int max_epochs,
+                           unsigned int epochs_between_reports,
+                           float desired_error)
+        {
+          if ((ann != NULL))
+            {
+              training_data data;
+              data.set_train_data(input,num_data);
+              if(data.train_data != NULL){
+                if(initWeights)
+                  init_weights(data);
+                fann_train_on_data(ann, data.train_data, max_epochs,
+                                   epochs_between_reports, desired_error);
+              }
+            }
+        }
+
+        float cross_validation(std::vector< Vector2d<fann_type> >& 
trainingFeatures,
+                               unsigned int ntraining,
+                               unsigned short cv,
+                               unsigned int max_epochs,
+                               unsigned int epochs_between_reports,
+                               float desired_error,
+                               std::vector<unsigned short>& input,
+                               std::vector<unsigned short>& output,
+                               short verbose=0)
+        {
+          input.clear();
+          output.clear();
+          assert(cv<ntraining);
+          float rmse=0;
+          int nclass=trainingFeatures.size();
+          vector< Vector2d<float> > testFeatures(nclass);
+          int testclass=0;//class to leave out
+          int testsample=0;//sample to leave out
+          int nrun=(cv>1)? cv : ntraining;
+          for(int irun=0;irun<nrun;++irun){
+            if(verbose>1)
+              std::cout << "run " << irun << std::endl;
+            //reset training sample from last run
+            if(verbose>1)
+              std::cout << "reset training sample from last run" << std::endl;
+            for(int iclass=0;iclass<nclass;++iclass){
+              while(testFeatures[iclass].size()){
+                
trainingFeatures[iclass].push_back(testFeatures[iclass].back());
+                testFeatures[iclass].pop_back();
+              }
+              assert(trainingFeatures[iclass].size());
+            }
+            //create test sample
+            if(verbose>1)
+              std::cout << "create test sample" << std::endl;
+            unsigned int nsample=0;
+            int ntest=(cv>1)? ntraining/cv : 1; //n-fold cross validation or 
leave-one-out
+            while(nsample<ntest){
+              // if(index>=trainingFeatures[testclass].size()){
+              //   index=0;
+              // }
+              
testFeatures[testclass].push_back(trainingFeatures[testclass][0]);
+              
trainingFeatures[testclass].erase(trainingFeatures[testclass].begin());
+              if(!trainingFeatures[testclass].size())
+                std::cout << "Error: testclass " << testclass << " has no 
training" << std::endl;
+              assert(trainingFeatures[testclass].size());
+              ++nsample;
+              
if(static_cast<float>(trainingFeatures[testclass].size())/static_cast<float>(testFeatures[testclass].size())<=cv)
+                testclass=(testclass+1)%nclass;
+            }
+            assert(nsample==ntest);
+            //training with left out training set
+            if(verbose>1)
+              cout << endl << "Set training data" << endl;
+            bool initWeights=true;
+            train_on_data(trainingFeatures,ntraining-ntest,initWeights, 
max_epochs,
+                          epochs_between_reports, desired_error);
+            //cross validation with testFeatures
+            if(verbose>1)
+              cout << endl << "Cross validation" << endl;
+
+            //todo: run network and store result in vector
+            vector<float> result(nclass);
+            int maxClass=-1;
+            for(int iclass=0;iclass<testFeatures.size();++iclass){
+              assert(trainingFeatures[iclass].size());
+              for(int isample=0;isample<testFeatures[iclass].size();++isample){
+                result=run(testFeatures[iclass][isample]);
+                //search class with maximum posterior probability
+                float maxP=-1;
+                for(int ic=0;ic<nclass;++ic){
+                float pv=(result[ic]+1.0)/2.0;//bring back to scale [0,1]
+                  if(pv>maxP){
+                    maxP=pv;
+                    maxClass=ic;
+                  }
+                }
+                assert(maxP>=0);
+                input.push_back(iclass);
+                output.push_back(maxClass);
+              }
+            }
+
+            // rmse+=test_data(testFeatures,ntest);
+            // if(verbose>1)
+            //   cout << endl << "rmse: " << rmse << endl;
+          }
+          // rmse/=nrun;
+          //reset from very last run
+          for(int iclass=0;iclass<nclass;++iclass){
+            while(testFeatures[iclass].size()){
+              trainingFeatures[iclass].push_back(testFeatures[iclass].back());
+              testFeatures[iclass].pop_back();
+            }
+          }
+          // return(rmse);
+          return 0;
+        }
+
         /* Method: train_on_file
            
            Does the same as <train_on_data>, but reads the training data 
directly from a file.
@@ -1545,6 +1666,54 @@ public:
             return mse;
         }
 
+      float test_data(const std::vector< Vector2d<fann_type> >& input, 
unsigned int num_data)
+      {
+          assert(num_data);
+          assert(input.size());
+          unsigned int num_class=input.size();
+          assert(input[0].size());
+          unsigned int num_input=input[0][0].size();
+          unsigned int num_output=num_class;
+            struct fann_train_data *data =
+                (struct fann_train_data *)malloc(sizeof(struct 
fann_train_data));
+            data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
+            data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
+
+            data->num_data = num_data;
+            data->num_input = num_input;
+            data->num_output = num_output;
+
+            fann_type *data_input = (fann_type *)calloc(num_input*num_data, 
sizeof(fann_type));
+            fann_type *data_output = (fann_type *)calloc(num_output*num_data, 
sizeof(fann_type));
+
+            unsigned int isample=0;
+            for(int iclass=0;iclass<num_class;++iclass){
+              for(int csample=0;csample<input[iclass].size();++csample){
+                data->input[isample] = data_input;
+                data_input += num_input;
+                for(int iband=0;iband<input[iclass][csample].size();++iband){
+                  assert(input[iclass][csample].size()==num_input);
+                  data->input[isample][iband] = input[iclass][csample][iband];
+                }
+                data->output[isample] = data_output;
+                data_output += num_output;
+                for(int ic=0;ic<num_output;++ic){
+                  //for single neuron output:
+//                   
data->output[isample][ic]=2.0/(num_class-1)*(iclass-(num_class-1)/2.0);
+                  if(ic==iclass)
+                    data->output[isample][ic] = 1;
+                  else
+                    data->output[isample][ic] = -1;
+                }
+                ++isample;
+              }
+            }
+            FANN::training_data trainingData;
+            trainingData.train_data = data;
+            return test_data(trainingData);
+      }
+
+
         /* Method: get_MSE
            Reads the mean square error from the network.
            
diff --git a/src/algorithms/svm.cpp b/src/algorithms/svm.cpp
index 5e9fd28..32c3cbb 100644
--- a/src/algorithms/svm.cpp
+++ b/src/algorithms/svm.cpp
@@ -407,7 +407,7 @@ public:
 
        void Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
                   double *alpha_, double Cp, double Cn, double eps,
-                  SolutionInfo* si, int shrinking);
+                  SolutionInfo* si, int shrinking, bool verbose=false);
 protected:
        int active_size;
        schar *y;
@@ -505,7 +505,8 @@ void Solver::reconstruct_gradient()
 
 void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
                   double *alpha_, double Cp, double Cn, double eps,
-                  SolutionInfo* si, int shrinking)
+                  SolutionInfo* si, int shrinking,
+                   bool verbose)//pk
 {
        this->l = l;
        this->Q = &Q;
@@ -571,7 +572,8 @@ void Solver::Solve(int l, const QMatrix& Q, const double 
*p_, const schar *y_,
                {
                        counter = min(l,1000);
                        if(shrinking) do_shrinking();
-                       info(".");
+                        if(verbose)//pk
+                          info(".");
                }
 
                int i,j;
@@ -581,7 +583,8 @@ void Solver::Solve(int l, const QMatrix& Q, const double 
*p_, const schar *y_,
                        reconstruct_gradient();
                        // reset active set size and check
                        active_size = l;
-                       info("*");
+                        if(verbose)//pk
+                          info("*");
                        if(select_working_set(i,j)!=0)
                                break;
                        else
@@ -737,7 +740,8 @@ void Solver::Solve(int l, const QMatrix& Q, const double 
*p_, const schar *y_,
                        // reconstruct the whole gradient to calculate 
objective value
                        reconstruct_gradient();
                        active_size = l;
-                       info("*");
+                        if(verbose)//pk
+                          info("*");
                }
                info("\nWARNING: reaching max number of iterations");
        }
@@ -772,8 +776,9 @@ void Solver::Solve(int l, const QMatrix& Q, const double 
*p_, const schar *y_,
 
        si->upper_bound_p = Cp;
        si->upper_bound_n = Cn;
-
-       info("\noptimization finished, #iter = %d\n",iter);
+        
+        if(verbose)//pk
+          info("\noptimization finished, #iter = %d\n",iter);
 
        delete[] p;
        delete[] y;
@@ -1014,10 +1019,10 @@ public:
        Solver_NU() {}
        void Solve(int l, const QMatrix& Q, const double *p, const schar *y,
                   double *alpha, double Cp, double Cn, double eps,
-                  SolutionInfo* si, int shrinking)
+                  SolutionInfo* si, int shrinking, bool verbose=false)
        {
                this->si = si;
-               Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking);
+               Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking,verbose);
        }
 private:
        SolutionInfo *si;
@@ -1458,14 +1463,15 @@ static void solve_c_svc(
 
        Solver s;
        s.Solve(l, SVC_Q(*prob,*param,y), minus_ones, y,
-               alpha, Cp, Cn, param->eps, si, param->shrinking);
+               alpha, Cp, Cn, param->eps, si, param->shrinking, 
param->verbose);
 
        double sum_alpha=0;
        for(i=0;i<l;i++)
                sum_alpha += alpha[i];
 
        if (Cp==Cn)
-               info("nu = %f\n", sum_alpha/(Cp*prob->l));
+          if(param->verbose)//pk
+            info("nu = %f\n", sum_alpha/(Cp*prob->l));
 
        for(i=0;i<l;i++)
                alpha[i] *= y[i];
@@ -1512,10 +1518,11 @@ static void solve_nu_svc(
 
        Solver_NU s;
        s.Solve(l, SVC_Q(*prob,*param,y), zeros, y,
-               alpha, 1.0, 1.0, param->eps, si,  param->shrinking);
+               alpha, 1.0, 1.0, param->eps, si,  param->shrinking, 
param->verbose);
        double r = si->r;
-
-       info("C = %f\n",1/r);
+        
+        if(param->verbose)//pk
+          info("C = %f\n",1/r);
 
        for(i=0;i<l;i++)
                alpha[i] *= y[i]/r;
@@ -1555,7 +1562,7 @@ static void solve_one_class(
 
        Solver s;
        s.Solve(l, ONE_CLASS_Q(*prob,*param), zeros, ones,
-               alpha, 1.0, 1.0, param->eps, si, param->shrinking);
+               alpha, 1.0, 1.0, param->eps, si, param->shrinking, 
param->verbose );
 
        delete[] zeros;
        delete[] ones;
@@ -1584,7 +1591,7 @@ static void solve_epsilon_svr(
 
        Solver s;
        s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y,
-               alpha2, param->C, param->C, param->eps, si, param->shrinking);
+               alpha2, param->C, param->C, param->eps, si, param->shrinking, 
param->verbose);
 
        double sum_alpha = 0;
        for(i=0;i<l;i++)
@@ -1592,7 +1599,8 @@ static void solve_epsilon_svr(
                alpha[i] = alpha2[i] - alpha2[i+l];
                sum_alpha += fabs(alpha[i]);
        }
-       info("nu = %f\n",sum_alpha/(param->C*l));
+        if(param->verbose)//pk
+          info("nu = %f\n",sum_alpha/(param->C*l));
 
        delete[] alpha2;
        delete[] linear_term;
@@ -1625,9 +1633,10 @@ static void solve_nu_svr(
 
        Solver_NU s;
        s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y,
-               alpha2, C, C, param->eps, si, param->shrinking);
+               alpha2, C, C, param->eps, si, param->shrinking, param->verbose);
 
-       info("epsilon = %f\n",-si->r);
+        if(param->verbose)//pk
+          info("epsilon = %f\n",-si->r);
 
        for(i=0;i<l;i++)
                alpha[i] = alpha2[i] - alpha2[i+l];
@@ -1671,7 +1680,8 @@ static decision_function svm_train_one(
                        break;
        }
 
-       info("obj = %f, rho = %f\n",si.obj,si.rho);
+        if(param->verbose)//pk
+          info("obj = %f, rho = %f\n",si.obj,si.rho);
 
        // output SVs
 
@@ -1695,7 +1705,8 @@ static decision_function svm_train_one(
                }
        }
 
-       info("nSV = %d, nBSV = %d\n",nSV,nBSV);
+        if(param->verbose)//pk
+          info("nSV = %d, nBSV = %d\n",nSV,nBSV);
 
        decision_function f;
        f.alpha = alpha;
@@ -2252,7 +2263,8 @@ svm_model *svm_train(const svm_problem *prob, const 
svm_parameter *param)
                        nz_count[i] = nSV;
                }
                
-               info("Total nSV = %d\n",total_sv);
+                if(param->verbose)//pk
+                  info("Total nSV = %d\n",total_sv);
 
                model->l = total_sv;
                model->SV = Malloc(svm_node *,total_sv);
diff --git a/src/algorithms/svm.h b/src/algorithms/svm.h
index 2f60a57..bc3805d 100644
--- a/src/algorithms/svm.h
+++ b/src/algorithms/svm.h
@@ -44,6 +44,7 @@ struct svm_parameter
        double p;       /* for EPSILON_SVR */
        int shrinking;  /* use the shrinking heuristics */
        int probability; /* do probability estimates */
+  bool verbose;//pk
 };
 
 //
diff --git a/src/apps/Makefile.am b/src/apps/Makefile.am
index 14d8f95..aa0421f 100644
--- a/src/apps/Makefile.am
+++ b/src/apps/Makefile.am
@@ -8,10 +8,13 @@ LDADD = $(GDAL_LDFLAGS) 
$(top_builddir)/src/algorithms/libalgorithms.a $(top_bui
 # the program to build (the names of the final binaries)
 bin_PROGRAMS = pkinfo pkcrop pkreclass pkgetmask pksetmask pkcreatect 
pkdumpimg pkdumpogr pksieve pkstat pkstatogr pkegcs pkextract pkfillnodata 
pkfilter pkdsm2shadow pkmosaic pkndvi pkpolygonize pkascii2img pkdiff 
pkclassify_svm pkfs_svm pkascii2ogr #pkxcorimg pkgeom
 if USE_FANN
-bin_PROGRAMS += pkclassify_nn
+bin_PROGRAMS += pkclassify_nn pkfs_nn
 pkclassify_nn_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h 
pkclassify_nn.h pkclassify_nn.cc
 pkclassify_nn_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base 
$(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
 pkclassify_nn_LDADD = $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
+pkfs_nn_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkfs_nn.cc
+pkfs_nn_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) 
-I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
+pkfs_nn_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
 endif
 
 if USE_LAS
@@ -50,6 +53,7 @@ pkpolygonize_SOURCES = pkpolygonize.cc
 pkdiff_SOURCES = pkdiff.cc
 pkclassify_svm_SOURCES = $(top_srcdir)/src/algorithms/svm.h 
$(top_srcdir)/src/algorithms/svm.cpp pkclassify_svm.cc
 pkfs_svm_SOURCES = $(top_srcdir)/src/algorithms/svm.h 
$(top_srcdir)/src/algorithms/svm.cpp pkfs_svm.cc
+pkfs_svm_LDADD = $(GSL_LIBS) $(AM_LDFLAGS)
 pkascii2img_SOURCES = pkascii2img.cc
 pkascii2ogr_SOURCES = pkascii2ogr.cc
 #pkxcorimg_SOURCES= pkxcorimg.cc
diff --git a/src/apps/Makefile.in b/src/apps/Makefile.in
index 184c40a..d708c55 100644
--- a/src/apps/Makefile.in
+++ b/src/apps/Makefile.in
@@ -41,7 +41,7 @@ bin_PROGRAMS = pkinfo$(EXEEXT) pkcrop$(EXEEXT) 
pkreclass$(EXEEXT) \
        pkpolygonize$(EXEEXT) pkascii2img$(EXEEXT) pkdiff$(EXEEXT) \
        pkclassify_svm$(EXEEXT) pkfs_svm$(EXEEXT) pkascii2ogr$(EXEEXT) \
        $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3)
-@USE_FANN_TRUE@am__append_1 = pkclassify_nn
+@USE_FANN_TRUE@am__append_1 = pkclassify_nn pkfs_nn
 @USE_LAS_TRUE@am__append_2 = pklas2img
 @USE_NLOPT_TRUE@am__append_3 = pksensormodel
 subdir = src/apps
@@ -55,7 +55,7 @@ mkinstalldirs = $(install_sh) -d
 CONFIG_HEADER = $(top_builddir)/config.h
 CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
-@USE_FANN_TRUE@am__EXEEXT_1 = pkclassify_nn$(EXEEXT)
+@USE_FANN_TRUE@am__EXEEXT_1 = pkclassify_nn$(EXEEXT) pkfs_nn$(EXEEXT)
 @USE_LAS_TRUE@am__EXEEXT_2 = pklas2img$(EXEEXT)
 @USE_NLOPT_TRUE@am__EXEEXT_3 = pksensormodel$(EXEEXT)
 am__installdirs = "$(DESTDIR)$(bindir)"
@@ -149,12 +149,18 @@ pkfilter_LDADD = $(LDADD)
 pkfilter_DEPENDENCIES = $(am__DEPENDENCIES_1) \
        $(top_builddir)/src/algorithms/libalgorithms.a \
        $(top_builddir)/src/imageclasses/libimageClasses.a
+am__pkfs_nn_SOURCES_DIST = $(top_srcdir)/src/algorithms/myfann_cpp.h \
+       pkfs_nn.cc
+@USE_FANN_TRUE@am_pkfs_nn_OBJECTS = pkfs_nn-pkfs_nn.$(OBJEXT)
+pkfs_nn_OBJECTS = $(am_pkfs_nn_OBJECTS)
+@USE_FANN_TRUE@pkfs_nn_DEPENDENCIES = $(am__DEPENDENCIES_1) \
+@USE_FANN_TRUE@        $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
+@USE_FANN_TRUE@        $(am__DEPENDENCIES_2)
+pkfs_nn_LINK = $(CXXLD) $(pkfs_nn_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+       $(LDFLAGS) -o $@
 am_pkfs_svm_OBJECTS = svm.$(OBJEXT) pkfs_svm.$(OBJEXT)
 pkfs_svm_OBJECTS = $(am_pkfs_svm_OBJECTS)
-pkfs_svm_LDADD = $(LDADD)
-pkfs_svm_DEPENDENCIES = $(am__DEPENDENCIES_1) \
-       $(top_builddir)/src/algorithms/libalgorithms.a \
-       $(top_builddir)/src/imageclasses/libimageClasses.a
+pkfs_svm_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2)
 am_pkgetmask_OBJECTS = pkgetmask.$(OBJEXT)
 pkgetmask_OBJECTS = $(am_pkgetmask_OBJECTS)
 pkgetmask_LDADD = $(LDADD)
@@ -241,7 +247,7 @@ SOURCES = $(pkascii2img_SOURCES) $(pkascii2ogr_SOURCES) \
        $(pkcreatect_SOURCES) $(pkcrop_SOURCES) $(pkdiff_SOURCES) \
        $(pkdsm2shadow_SOURCES) $(pkdumpimg_SOURCES) \
        $(pkdumpogr_SOURCES) $(pkegcs_SOURCES) $(pkextract_SOURCES) \
-       $(pkfillnodata_SOURCES) $(pkfilter_SOURCES) \
+       $(pkfillnodata_SOURCES) $(pkfilter_SOURCES) $(pkfs_nn_SOURCES) \
        $(pkfs_svm_SOURCES) $(pkgetmask_SOURCES) $(pkinfo_SOURCES) \
        $(pklas2img_SOURCES) $(pkmosaic_SOURCES) $(pkndvi_SOURCES) \
        $(pkpolygonize_SOURCES) $(pkreclass_SOURCES) \
@@ -253,7 +259,8 @@ DIST_SOURCES = $(pkascii2img_SOURCES) 
$(pkascii2ogr_SOURCES) \
        $(pkdsm2shadow_SOURCES) $(pkdumpimg_SOURCES) \
        $(pkdumpogr_SOURCES) $(pkegcs_SOURCES) $(pkextract_SOURCES) \
        $(pkfillnodata_SOURCES) $(pkfilter_SOURCES) \
-       $(pkfs_svm_SOURCES) $(pkgetmask_SOURCES) $(pkinfo_SOURCES) \
+       $(am__pkfs_nn_SOURCES_DIST) $(pkfs_svm_SOURCES) \
+       $(pkgetmask_SOURCES) $(pkinfo_SOURCES) \
        $(am__pklas2img_SOURCES_DIST) $(pkmosaic_SOURCES) \
        $(pkndvi_SOURCES) $(pkpolygonize_SOURCES) $(pkreclass_SOURCES) \
        $(am__pksensormodel_SOURCES_DIST) $(pksetmask_SOURCES) \
@@ -375,6 +382,9 @@ LDADD = $(GDAL_LDFLAGS) 
$(top_builddir)/src/algorithms/libalgorithms.a $(top_bui
 @USE_FANN_TRUE@pkclassify_nn_SOURCES = 
$(top_srcdir)/src/algorithms/myfann_cpp.h pkclassify_nn.h pkclassify_nn.cc
 @USE_FANN_TRUE@pkclassify_nn_CXXFLAGS = -I$(top_srcdir)/src 
-I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms 
$(AM_CXXFLAGS)
 @USE_FANN_TRUE@pkclassify_nn_LDADD = $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
+@USE_FANN_TRUE@pkfs_nn_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h 
pkfs_nn.cc
+@USE_FANN_TRUE@pkfs_nn_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base 
$(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
+@USE_FANN_TRUE@pkfs_nn_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) 
$(AM_LDFLAGS)
 @USE_LAS_TRUE@pklas2img_SOURCES = pklas2img.cc
 @USE_LAS_TRUE@pklas2img_LDADD = -L$(top_builddir)/src/fileclasses 
-lfileClasses -llas $(AM_LDFLAGS)
 @USE_NLOPT_TRUE@pksensormodel_SOURCES = 
$(top_srcdir)/src/algorithms/SensorModel.h pksensormodel.h pksensormodel.cc
@@ -405,6 +415,7 @@ pkpolygonize_SOURCES = pkpolygonize.cc
 pkdiff_SOURCES = pkdiff.cc
 pkclassify_svm_SOURCES = $(top_srcdir)/src/algorithms/svm.h 
$(top_srcdir)/src/algorithms/svm.cpp pkclassify_svm.cc
 pkfs_svm_SOURCES = $(top_srcdir)/src/algorithms/svm.h 
$(top_srcdir)/src/algorithms/svm.cpp pkfs_svm.cc
+pkfs_svm_LDADD = $(GSL_LIBS) $(AM_LDFLAGS)
 pkascii2img_SOURCES = pkascii2img.cc
 pkascii2ogr_SOURCES = pkascii2ogr.cc
 all: all-am
@@ -520,6 +531,9 @@ pkfillnodata$(EXEEXT): $(pkfillnodata_OBJECTS) 
$(pkfillnodata_DEPENDENCIES)
 pkfilter$(EXEEXT): $(pkfilter_OBJECTS) $(pkfilter_DEPENDENCIES) 
        @rm -f pkfilter$(EXEEXT)
        $(CXXLINK) $(pkfilter_OBJECTS) $(pkfilter_LDADD) $(LIBS)
+pkfs_nn$(EXEEXT): $(pkfs_nn_OBJECTS) $(pkfs_nn_DEPENDENCIES) 
+       @rm -f pkfs_nn$(EXEEXT)
+       $(pkfs_nn_LINK) $(pkfs_nn_OBJECTS) $(pkfs_nn_LDADD) $(LIBS)
 pkfs_svm$(EXEEXT): $(pkfs_svm_OBJECTS) $(pkfs_svm_DEPENDENCIES) 
        @rm -f pkfs_svm$(EXEEXT)
        $(CXXLINK) $(pkfs_svm_OBJECTS) $(pkfs_svm_LDADD) $(LIBS)
@@ -580,6 +594,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkextract.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkfillnodata.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkfilter.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkfs_nn-pkfs_nn.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkfs_svm.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkgetmask.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pkinfo.Po@am__quote@
@@ -637,6 +652,20 @@ svm.obj: $(top_srcdir)/src/algorithms/svm.cpp
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@     DEPDIR=$(DEPDIR) $(CXXDEPMODE) 
$(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) 
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o svm.obj `if test -f 
'$(top_srcdir)/src/algorithms/svm.cpp'; then $(CYGPATH_W) 
'$(top_srcdir)/src/algorithms/svm.cpp'; else $(CYGPATH_W) 
'$(srcdir)/$(top_srcdir)/src/algorithms/svm.cpp'; fi`
 
+pkfs_nn-pkfs_nn.o: pkfs_nn.cc
+@am__fastdepCXX_TRUE@  $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) 
$(AM_CPPFLAGS) $(CPPFLAGS) $(pkfs_nn_CXXFLAGS) $(CXXFLAGS) -MT 
pkfs_nn-pkfs_nn.o -MD -MP -MF $(DEPDIR)/pkfs_nn-pkfs_nn.Tpo -c -o 
pkfs_nn-pkfs_nn.o `test -f 'pkfs_nn.cc' || echo '$(srcdir)/'`pkfs_nn.cc
+@am__fastdepCXX_TRUE@  $(am__mv) $(DEPDIR)/pkfs_nn-pkfs_nn.Tpo 
$(DEPDIR)/pkfs_nn-pkfs_nn.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@     source='pkfs_nn.cc' 
object='pkfs_nn-pkfs_nn.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@     DEPDIR=$(DEPDIR) $(CXXDEPMODE) 
$(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) 
$(AM_CPPFLAGS) $(CPPFLAGS) $(pkfs_nn_CXXFLAGS) $(CXXFLAGS) -c -o 
pkfs_nn-pkfs_nn.o `test -f 'pkfs_nn.cc' || echo '$(srcdir)/'`pkfs_nn.cc
+
+pkfs_nn-pkfs_nn.obj: pkfs_nn.cc
+@am__fastdepCXX_TRUE@  $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) 
$(AM_CPPFLAGS) $(CPPFLAGS) $(pkfs_nn_CXXFLAGS) $(CXXFLAGS) -MT 
pkfs_nn-pkfs_nn.obj -MD -MP -MF $(DEPDIR)/pkfs_nn-pkfs_nn.Tpo -c -o 
pkfs_nn-pkfs_nn.obj `if test -f 'pkfs_nn.cc'; then $(CYGPATH_W) 'pkfs_nn.cc'; 
else $(CYGPATH_W) '$(srcdir)/pkfs_nn.cc'; fi`
+@am__fastdepCXX_TRUE@  $(am__mv) $(DEPDIR)/pkfs_nn-pkfs_nn.Tpo 
$(DEPDIR)/pkfs_nn-pkfs_nn.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@     source='pkfs_nn.cc' 
object='pkfs_nn-pkfs_nn.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@     DEPDIR=$(DEPDIR) $(CXXDEPMODE) 
$(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) 
$(AM_CPPFLAGS) $(CPPFLAGS) $(pkfs_nn_CXXFLAGS) $(CXXFLAGS) -c -o 
pkfs_nn-pkfs_nn.obj `if test -f 'pkfs_nn.cc'; then $(CYGPATH_W) 'pkfs_nn.cc'; 
else $(CYGPATH_W) '$(srcdir)/pkfs_nn.cc'; fi`
+
 .cpp.o:
 @am__fastdepCXX_TRUE@  $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o 
$@ $<
 @am__fastdepCXX_TRUE@  $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
diff --git a/src/apps/pkclassify_nn.cc b/src/apps/pkclassify_nn.cc
index 35c887c..983e2c5 100644
--- a/src/apps/pkclassify_nn.cc
+++ b/src/apps/pkclassify_nn.cc
@@ -26,6 +26,7 @@ along with pktools.  If not, see 
<http://www.gnu.org/licenses/>.
 #include "imageclasses/ImgReaderOgr.h"
 #include "imageclasses/ImgWriterOgr.h"
 #include "base/Optionpk.h"
+#include "algorithms/ConfusionMatrix.h"
 #include "floatfann.h"
 #include "myfann_cpp.h"
 
@@ -59,10 +60,12 @@ int main(int argc, char *argv[])
   Optionpk<int> minSize_opt("m", "min", "if number of training pixels is less 
then min, do not take this class into account (default is 0: consider all 
classes", 0);
   Optionpk<double> start_opt("s", "start", "start band sequence number (set to 
0)",0); 
   Optionpk<double> end_opt("e", "end", "end band sequence number (set to 0 for 
all bands)", 0); 
+  Optionpk<short> band_opt("b", "band", "band index (starting from 0, either 
use band option or use start to end)");
   Optionpk<double> offset_opt("\0", "offset", "offset value for each spectral 
band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
   Optionpk<double> scale_opt("\0", "scale", "scale value for each spectral 
band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale 
min and max in each band to -1.0 and 1.0)", 0.0);
   Optionpk<unsigned short> aggreg_opt("a", "aggreg", "how to combine 
aggregated classifiers, see also rc option (0: sum rule, 1: max rule). Default 
is max rule (1)",1); 
   Optionpk<double> priors_opt("p", "prior", "prior probabilities for each 
class (e.g., -p 0.3 -p 0.3 -p 0.2 ), default set to equal priors)", 0.0); 
+  Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation 
mode",0);
   Optionpk<unsigned int> nneuron_opt("\0", "nneuron", "number of neurons in 
hidden layers in neural network (multiple hidden layers are set by defining 
multiple number of neurons: -n 15 -n 1, default is one hidden layer with 5 
neurons)", 5); 
   Optionpk<float> connection_opt("\0", "connection", "connection reate 
(default: 1.0 for a fully connected network)", 1.0); 
   Optionpk<float> weights_opt("w", "weights", "weights for neural network. 
Apply to fully connected network only, starting from first input neuron to last 
output neuron, including the bias neurons (last neuron in each but last 
layer)", 0.0); 
@@ -96,10 +99,12 @@ int main(int argc, char *argv[])
   minSize_opt.retrieveOption(argc,argv);
   start_opt.retrieveOption(argc,argv);
   end_opt.retrieveOption(argc,argv);
+  band_opt.retrieveOption(argc,argv);
   offset_opt.retrieveOption(argc,argv);
   scale_opt.retrieveOption(argc,argv);
   aggreg_opt.retrieveOption(argc,argv);
   priors_opt.retrieveOption(argc,argv);
+  cv_opt.retrieveOption(argc,argv);
   nneuron_opt.retrieveOption(argc,argv);
   connection_opt.retrieveOption(argc,argv);
   weights_opt.retrieveOption(argc,argv);
@@ -183,6 +188,9 @@ int main(int argc, char *argv[])
       priors[iclass]/=normPrior;
   }
 
+  //sort bands
+  if(band_opt.size())
+    std::sort(band_opt.begin(),band_opt.end());
   //----------------------------------- Training 
-------------------------------
   vector<string> fields;
   for(int ibag=0;ibag<nbag;++ibag){
@@ -193,18 +201,21 @@ int main(int argc, char *argv[])
       if(verbose_opt[0]>=1)
         cout << "reading imageShape file " << training_opt[0] << endl;
       try{
-        
totalSamples=readDataImageShape(training_opt[ibag],trainingMap,fields,start_opt[0],end_opt[0],label_opt[0],verbose_opt[0]);
+        if(band_opt.size())
+          
totalSamples=readDataImageShape(training_opt[ibag],trainingMap,fields,band_opt,label_opt[0],verbose_opt[0]);
+        else
+          
totalSamples=readDataImageShape(training_opt[ibag],trainingMap,fields,start_opt[0],end_opt[0],label_opt[0],verbose_opt[0]);
         if(trainingMap.size()<2){
           string errorstring="Error: could not read at least two classes from 
training file";
           throw(errorstring);
         }
       }
       catch(string error){
-        cerr << error << endl;
+        cerr << error << std::endl;
         exit(1);
       }
       catch(...){
-        cerr << "error catched" << endl;
+        cerr << "error catched" << std::endl;
         exit(1);
       }
       //delete class 0
@@ -414,8 +425,7 @@ int main(int argc, char *argv[])
       assert(trainingFeatures[iclass].size()==nctraining);
     }
     
-    int nFeatures=0;
-    nFeatures=trainingFeatures[0][0].size();
+    unsigned int nFeatures=trainingFeatures[0][0].size();
     unsigned int ntraining=0;
     for(int iclass=0;iclass<nclass;++iclass){
       if(verbose_opt[0]>=1)
@@ -424,7 +434,7 @@ int main(int argc, char *argv[])
     }
     const unsigned int num_layers = nneuron_opt.size()+2;
     const float desired_error = 0.0003;
-    const unsigned int iterations_between_reports = 
(verbose_opt[0]>=1)?100:maxit_opt[0]+1;
+    const unsigned int iterations_between_reports = (verbose_opt[0])? 
maxit_opt[0]+1:0;
     if(verbose_opt[0]>=1){
       cout << "creating artificial neural network with " << nneuron_opt.size() 
<< " hidden layer, having " << endl;
       for(int ilayer=0;ilayer<nneuron_opt.size();++ilayer)
@@ -475,33 +485,57 @@ int main(int argc, char *argv[])
       net[ibag].print_parameters();
     }
       
-
-    FANN::training_data trainingData;
+    if(cv_opt[0]){
+      if(verbose_opt[0])
+        std::cout << "cross validation" << std::endl;
+      vector<unsigned short> referenceVector;
+      vector<unsigned short> outputVector;
+      float rmse=net[ibag].cross_validation(trainingFeatures,
+                                            ntraining,
+                                            cv_opt[0],
+                                            maxit_opt[0],
+                                            0,
+                                            desired_error,
+                                            referenceVector,
+                                            outputVector,
+                                            verbose_opt[0]);
+      ConfusionMatrix cm(nclass);
+      for(int isample=0;isample<referenceVector.size();++isample)
+        
cm.incrementResult(cm.getClass(referenceVector[isample]),cm.getClass(outputVector[isample]),1);
+      assert(cm.nReference());
+      std::cout << cm << std::endl;
+      std::cout << "Kappa: " << cm.kappa() << std::endl;
+      double se95_oa=0;
+      double doa=0;
+      doa=cm.oa_pct(&se95_oa);
+      std::cout << "Overall Accuracy: " << doa << " (" << se95_oa << ")"  << 
std::endl;
+      std::cout << "rmse cross-validation: " << rmse << std::endl;
+    }
   
     if(verbose_opt[0]>=1)
       cout << endl << "Set training data" << endl;
-    trainingData.set_train_data(trainingFeatures,ntraining);
-    //     trainingData.set_train_data(trainingFeatures,totalSamples);
 
     if(verbose_opt[0]>=1)
       cout << endl << "Training network" << endl;
-    net[ibag].init_weights(trainingData);
     
     if(verbose_opt[0]>=1){
       cout << "Max Epochs " << setw(8) << maxit_opt[0] << ". "
            << "Desired Error: " << left << desired_error << right << endl;
     }
-    //   net.set_callback(print_callback, NULL);
-    if(weights_opt.size()==net[ibag].get_total_connections()){
+    if(weights_opt.size()==net[ibag].get_total_connections()){//no new 
training needed (same training sample)
       vector<fann_connection> convector;
       net[ibag].get_connection_array(convector);
       for(int 
i_connection=0;i_connection<net[ibag].get_total_connections();++i_connection)
         convector[i_connection].weight=weights_opt[i_connection];
       net[ibag].set_weight_array(convector);
     }
-    else
-      net[ibag].train_on_data(trainingData, maxit_opt[0],
+    else{
+      bool initWeights=true;
+      net[ibag].train_on_data(trainingFeatures,ntraining,initWeights, 
maxit_opt[0],
                               iterations_between_reports, desired_error);
+    }
+
+
     if(verbose_opt[0]>=2){
       net[ibag].print_connections();
       vector<fann_connection> convector;
@@ -596,32 +630,64 @@ int main(int argc, char *argv[])
       vector<short> lineMask;
       if(mask_opt[0]!="")
         lineMask.resize(maskReader.nrOfCol());
-      Vector2d<float> hpixel(ncol,nband);
+      Vector2d<float> hpixel(ncol);
       Vector2d<float> fpixel(ncol);
       Vector2d<float> prOut(nreclass,ncol);//posterior prob for each reclass
       Vector2d<char> classBag;//classified line for writing to image file
       if(classBag_opt[0]!="")
         classBag.resize(nbag,ncol);
       //read all bands of all pixels in this line in hline
-      for(int iband=start_opt[0];iband<start_opt[0]+nband;++iband){
-        if(verbose_opt[0]==2)
-          cout << "reading band " << iband << endl;
-        assert(iband>=0);
-        assert(iband<testImage.nrOfBand());
-        try{
-          testImage.readData(buffer,GDT_Float32,iline,iband);
-        }
-        catch(string theError){
-          cerr << "Error reading " << input_opt[0] << ": " << theError << endl;
-          exit(3);
+      try{
+        if(band_opt.size()){
+          for(int iband=0;iband<band_opt.size();++iband){
+            if(verbose_opt[0]==2)
+              std::cout << "reading band " << band_opt[iband] << std::endl;
+            assert(band_opt[iband]>=0);
+            assert(band_opt[iband]<testImage.nrOfBand());
+            testImage.readData(buffer,GDT_Float32,iline,band_opt[iband]);
+            for(int icol=0;icol<ncol;++icol)
+              hpixel[icol].push_back(buffer[icol]);
+          }
         }
-        catch(...){
-          cerr << "error catched" << endl;
-          exit(3);
+        else{
+          for(int iband=start_opt[0];iband<start_opt[0]+nband;++iband){
+            if(verbose_opt[0]==2)
+              std::cout << "reading band " << iband << std::endl;
+            assert(iband>=0);
+            assert(iband<testImage.nrOfBand());
+            testImage.readData(buffer,GDT_Float32,iline,iband);
+            for(int icol=0;icol<ncol;++icol)
+              hpixel[icol].push_back(buffer[icol]);
+          }
         }
-        for(int icol=0;icol<ncol;++icol)
-          hpixel[icol][iband-start_opt[0]]=buffer[icol];
       }
+      catch(string theError){
+        cerr << "Error reading " << input_opt[0] << ": " << theError << 
std::endl;
+        exit(3);
+      }
+      catch(...){
+        cerr << "error catched" << std::endl;
+        exit(3);
+      }
+      // for(int iband=start_opt[0];iband<start_opt[0]+nband;++iband){
+      //   if(verbose_opt[0]==2)
+      //     cout << "reading band " << iband << endl;
+      //   assert(iband>=0);
+      //   assert(iband<testImage.nrOfBand());
+      //   try{
+      //     testImage.readData(buffer,GDT_Float32,iline,iband);
+      //   }
+      //   catch(string theError){
+      //     cerr << "Error reading " << input_opt[0] << ": " << theError << 
endl;
+      //     exit(3);
+      //   }
+      //   catch(...){
+      //     cerr << "error catched" << endl;
+      //     exit(3);
+      //   }
+      //   for(int icol=0;icol<ncol;++icol)
+      //     hpixel[icol][iband-start_opt[0]]=buffer[icol];
+      // }
 
       assert(nband==hpixel[0].size());
       if(verbose_opt[0]==2)
@@ -808,6 +874,7 @@ int main(int argc, char *argv[])
     classImageOut.close();
   }
   else{//classify shape file
+    //notice that fields have already been set by readDataImageShape (taking 
into account appropriate bands)
     for(int ivalidation=0;ivalidation<input_opt.size();++ivalidation){
       assert(output_opt.size()==input_opt.size());
       if(verbose_opt[0])
diff --git a/src/apps/pkclassify_nn.h b/src/apps/pkclassify_nn.h
index 68a547c..583c982 100644
--- a/src/apps/pkclassify_nn.h
+++ b/src/apps/pkclassify_nn.h
@@ -34,6 +34,89 @@ template<typename T> unsigned int readDataImageShape(const 
string &filename,
                                                      const string& label,
                                                      int verbose=false);
 
+template<typename T> unsigned int readDataImageShape(const string &filename,
+                                                     map<int,Vector2d<T> > 
&mapPixels, //[classNr][pixelNr][bandNr],
+                                                     vector<string>& fields,
+                                                     const vector<short>& 
bands,
+                                                     const string& label,
+                                                     int verbose=false);
+
+template<typename T> unsigned int readDataImageShape(const string &filename,
+                                                     map<int,Vector2d<T> > 
&mapPixels, //[classNr][pixelNr][bandNr],
+                                                     vector<string>& fields,
+                                                     const vector<short>& 
bands,
+                                                     const string& label,
+                                                     int verbose)
+{
+  mapPixels.clear();
+  int nsample=0;
+  int totalSamples=0;  
+  int nband=0;
+  if(verbose)
+    cout << "reading shape file " << filename  << endl;
+  ImgReaderOgr imgReaderShape;
+  try{
+    imgReaderShape.open(filename);
+    //only retain bands in fields
+    imgReaderShape.getFields(fields);
+    vector<string>::iterator fit=fields.begin();
+    if(verbose>1)
+      cout << "reading fields: ";
+    while(fit!=fields.end()){
+      if(verbose)
+        cout << *fit << " ";
+      // size_t 
pos=(*fit).find_first_not_of("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_
 ");
+      
if(((*fit).substr(0,1)=="B")&&((*fit).substr(1).find_first_not_of("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_
 ")!=string::npos)){
+        int theBand=atoi((*fit).substr(1).c_str());
+        if(bands.size()){
+          bool validBand=false;
+          for(int iband=0;iband<bands.size();++iband){
+            if(theBand==bands[iband])
+              validBand=true;
+          }
+          if(validBand)
+            ++fit;
+          else
+            fields.erase(fit);
+        }
+        else
+          ++fit;
+      }
+      else
+        fields.erase(fit);
+    }
+    if(verbose)
+      cout << endl;
+    if(verbose){
+      cout << "fields:";
+      for(vector<string>::iterator fit=fields.begin();fit!=fields.end();++fit)
+        cout << " " << *fit;
+      cout << endl;
+    }
+    if(!nband){
+      if(verbose)
+        cout << "reading data" << endl;
+      
nband=imgReaderShape.readData(mapPixels,OFTReal,fields,label,0,true,verbose==2);
+
+    }
+    else
+      
assert(nband==imgReaderShape.readData(mapPixels,OFTReal,fields,label,0,true,false));
+  }
+  catch(string e){
+    ostringstream estr;
+    estr << e << " " << filename;
+    throw(estr.str());
+  }
+  nsample=imgReaderShape.getFeatureCount();
+  totalSamples+=nsample;
+  if(verbose)
+    cout << ": " << nsample << " samples read with " << nband << " bands" << 
endl;
+  imgReaderShape.close();
+  if(verbose)
+    cout << "total number of samples read " << totalSamples << endl;
+  return totalSamples;
+}
+
 
 template<typename T> unsigned int readDataImageShape(const string &filename,
                                                      map<int,Vector2d<T> > 
&mapPixels, //[classNr][pixelNr][bandNr],
diff --git a/src/apps/pkclassify_svm.cc b/src/apps/pkclassify_svm.cc
index 6d5e911..c52225a 100644
--- a/src/apps/pkclassify_svm.cc
+++ b/src/apps/pkclassify_svm.cc
@@ -61,6 +61,7 @@ int main(int argc, char *argv[])
   Optionpk<int> minSize_opt("m", "min", "if number of training pixels is less 
then min, do not take this class into account", 0);
   Optionpk<double> start_opt("s", "start", "start band sequence number (set to 
0)",0); 
   Optionpk<double> end_opt("e", "end", "end band sequence number (set to 0 for 
all bands)", 0); 
+  Optionpk<short> band_opt("b", "band", "band index (starting from 0, either 
use band option or use start to end)");
   Optionpk<double> offset_opt("\0", "offset", "offset value for each spectral 
band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
   Optionpk<double> scale_opt("\0", "scale", "scale value for each spectral 
band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale 
min and max in each band to -1.0 and 1.0)", 0.0);
   Optionpk<unsigned short> aggreg_opt("a", "aggreg", "how to combine 
aggregated classifiers, see also rc option (0: no aggregation, 1: sum rule, 2: 
max rule).",0);
@@ -80,7 +81,7 @@ int main(int argc, char *argv[])
   Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the 
shrinking heuristics",false);
   Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR 
model for probability estimates",false);
   // Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to 
weight*C, for C-SVC",true);
-  Optionpk<unsigned int> cv_opt("cv", "cv", "n-fold cross validation mode",0);
+  Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation 
mode",0);
   Optionpk<unsigned short> comb_opt("c", "comb", "how to combine bootstrap 
aggregation classifiers (0: sum rule, 1: product rule, 2: max rule). Also used 
to aggregate classes with rc option.",0); 
   Optionpk<unsigned short> bag_opt("\0", "bag", "Number of bootstrap 
aggregations", 1);
   Optionpk<int> bagSize_opt("\0", "bsize", "Percentage of features used from 
available training features for each bootstrap aggregation", 100);
@@ -108,6 +109,7 @@ int main(int argc, char *argv[])
   minSize_opt.retrieveOption(argc,argv);
   start_opt.retrieveOption(argc,argv);
   end_opt.retrieveOption(argc,argv);
+  band_opt.retrieveOption(argc,argv);
   offset_opt.retrieveOption(argc,argv);
   scale_opt.retrieveOption(argc,argv);
   aggreg_opt.retrieveOption(argc,argv);
@@ -204,6 +206,10 @@ int main(int argc, char *argv[])
       priors[iclass]/=normPrior;
   }
 
+  //sort bands
+  if(band_opt.size())
+    std::sort(band_opt.begin(),band_opt.end());
+
   //----------------------------------- Training 
-------------------------------
   vector<struct svm_problem> prob(nbag);
   vector<struct svm_node *> x_space(nbag);
@@ -217,7 +223,10 @@ int main(int argc, char *argv[])
       if(verbose_opt[0]>=1)
         std::cout << "reading imageShape file " << training_opt[0] << 
std::endl;
       try{
-        
totalSamples=readDataImageShape(training_opt[ibag],trainingMap,fields,start_opt[0],end_opt[0],label_opt[0],verbose_opt[0]);
+        if(band_opt.size())
+          
totalSamples=readDataImageShape(training_opt[ibag],trainingMap,fields,band_opt,label_opt[0],verbose_opt[0]);
+        else
+          
totalSamples=readDataImageShape(training_opt[ibag],trainingMap,fields,start_opt[0],end_opt[0],label_opt[0],verbose_opt[0]);
         if(trainingMap.size()<2){
           string errorstring="Error: could not read at least two classes from 
training file";
           throw(errorstring);
@@ -489,15 +498,16 @@ int main(int argc, char *argv[])
     param[ibag].nr_weight = 0;//not used: I use priors and balancing
     param[ibag].weight_label = NULL;
     param[ibag].weight = NULL;
+    param[ibag].verbose=(verbose_opt[0]>1)? true:false;
 
-    if(verbose_opt[0])
+    if(verbose_opt[0]>1)
       std::cout << "checking parameters" << std::endl;
     svm_check_parameter(&prob[ibag],&param[ibag]);
     if(verbose_opt[0])
       std::cout << "parameters ok, training" << std::endl;
     svm[ibag]=svm_train(&prob[ibag],&param[ibag]);
     
-    if(verbose_opt[0])
+    if(verbose_opt[0]>1)
       std::cout << "SVM is now trained" << std::endl;
     if(cv_opt[0]>0){
       std::cout << "Confusion matrix" << std::endl;
@@ -616,33 +626,45 @@ int main(int argc, char *argv[])
       vector<short> lineMask;
       if(mask_opt.size())
         lineMask.resize(maskReader.nrOfCol());
-      Vector2d<float> hpixel(ncol,nband);
+      Vector2d<float> hpixel(ncol);
       // Vector2d<float> fpixel(ncol);
       Vector2d<float> prOut(nreclass,ncol);//posterior prob for each reclass
       Vector2d<char> classBag;//classified line for writing to image file
       if(classBag_opt.size())
         classBag.resize(nbag,ncol);
       //read all bands of all pixels in this line in hline
-      for(int iband=start_opt[0];iband<start_opt[0]+nband;++iband){
-        if(verbose_opt[0]==2)
-          std::cout << "reading band " << iband << std::endl;
-        assert(iband>=0);
-        assert(iband<testImage.nrOfBand());
-        try{
-          testImage.readData(buffer,GDT_Float32,iline,iband);
-        }
-        catch(string theError){
-          cerr << "Error reading " << input_opt[0] << ": " << theError << 
std::endl;
-          exit(3);
+      try{
+        if(band_opt.size()){
+          for(int iband=0;iband<band_opt.size();++iband){
+            if(verbose_opt[0]==2)
+              std::cout << "reading band " << band_opt[iband] << std::endl;
+            assert(band_opt[iband]>=0);
+            assert(band_opt[iband]<testImage.nrOfBand());
+            testImage.readData(buffer,GDT_Float32,iline,band_opt[iband]);
+            for(int icol=0;icol<ncol;++icol)
+              hpixel[icol].push_back(buffer[icol]);
+          }
         }
-        catch(...){
-          cerr << "error catched" << std::endl;
-          exit(3);
+        else{
+          for(int iband=start_opt[0];iband<start_opt[0]+nband;++iband){
+            if(verbose_opt[0]==2)
+              std::cout << "reading band " << iband << std::endl;
+            assert(iband>=0);
+            assert(iband<testImage.nrOfBand());
+            testImage.readData(buffer,GDT_Float32,iline,iband);
+            for(int icol=0;icol<ncol;++icol)
+              hpixel[icol].push_back(buffer[icol]);
+          }
         }
-        for(int icol=0;icol<ncol;++icol)
-          hpixel[icol][iband-start_opt[0]]=buffer[icol];
       }
-
+      catch(string theError){
+        cerr << "Error reading " << input_opt[0] << ": " << theError << 
std::endl;
+        exit(3);
+      }
+      catch(...){
+        cerr << "error catched" << std::endl;
+        exit(3);
+      }
       assert(nband==hpixel[0].size());
       if(verbose_opt[0]==2)
         std::cout << "used bands: " << nband << std::endl;
@@ -663,7 +685,7 @@ int main(int argc, char *argv[])
     
       //process per pixel
       for(int icol=0;icol<ncol;++icol){
-
+        assert(hpixel[icol].size()==nband);
         bool masked=false;
         if(!lineMask.empty()){
           short theMask=0;
@@ -858,6 +880,7 @@ int main(int argc, char *argv[])
     classImageOut.close();
   }
   else{//classify shape file
+    //notice that fields have already been set by readDataImageShape (taking 
into account appropriate bands)
     for(int ivalidation=0;ivalidation<input_opt.size();++ivalidation){
       assert(output_opt.size()==input_opt.size());
       if(verbose_opt[0])
diff --git a/src/apps/pkfs_svm.cc b/src/apps/pkfs_svm.cc
index 03b56c8..adb0421 100644
--- a/src/apps/pkfs_svm.cc
+++ b/src/apps/pkfs_svm.cc
@@ -18,6 +18,7 @@ You should have received a copy of the GNU General Public 
License
 along with pktools.  If not, see <http://www.gnu.org/licenses/>.
 ***********************************************************************/
 #include <vector>
+#include <string>
 #include <map>
 #include <algorithm>
 #include "imageclasses/ImgReaderGdal.h"
@@ -36,76 +37,67 @@ along with pktools.  If not, see 
<http://www.gnu.org/licenses/>.
 
 #define Malloc(type,n) (type *)malloc((n)*sizeof(type))
 
+                                    //static enum SelectorValue { NA, SFFS, 
SFS, SBS, BFS };
+enum SelectorValue  { NA=0, SFFS=1, SFS=2, SBS=3, BFS=4 };
+
+//global parameters used in cost function getCost
+Optionpk<unsigned short> svm_type_opt("svmt", "svmtype", "type of SVM (0: 
C-SVC, 1: nu-SVC, 2: one-class SVM, 3: epsilon-SVR,  4: nu-SVR)",0);
+Optionpk<unsigned short> kernel_type_opt("kt", "kerneltype", "type of kernel 
function (0: linear: u'*v, 1: polynomial: (gamma*u'*v + coef0)^degree, 2: 
radial basis function: exp(-gamma*|u-v|^2), 3: sigmoid: tanh(gamma*u'*v + 
coef0), 4: precomputed kernel (kernel values in training_set_file)",2);
+Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel 
function",3);
+Optionpk<float> gamma_opt("g", "gamma", "gamma in kernel function",0);
+Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
+Optionpk<float> ccost_opt("cc", "ccost", "the parameter C of C-SVC, 
epsilon-SVR, and nu-SVR",1);
+Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu-SVC, one-class SVM, 
and nu-SVR",0.5);
+Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss 
function of epsilon-SVR",0.1);
+Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
+Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of termination 
criterion",0.001);
+Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the shrinking 
heuristics",false);
+Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR 
model for probability estimates",false);
+// Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to 
weight*C, for C-SVC",true);
+Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
+Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 
(confusion matrix), 2 (debug)",0);
+
 double getCost(const vector<Vector2d<float> > &trainingFeatures)
 {
-  //test
-  std::cout << "debug0" << std::endl;
-  bool verbose=true;
+  unsigned short nclass=trainingFeatures.size();
+  unsigned int ntraining=0;
+  for(int iclass=0;iclass<nclass;++iclass)
+    ntraining+=trainingFeatures[iclass].size();
   unsigned short nFeatures=trainingFeatures[0][0].size();
-  //todo: set SVM parameters through command line options
+
   struct svm_parameter param;
-  // param.svm_type = svm_type_opt[0];
-  // param.kernel_type = kernel_type_opt[0];
-  // param.degree = kernel_degree_opt[0];
-  // param.gamma = (gamma_opt[0]>0)? gamma_opt[0] : 1.0/nFeatures;
-  // param.coef0 = coef0_opt[0];
-  // param.nu = nu_opt[0];
-  // param.cache_size = cache_opt[0];
-  // param.C = ccost_opt[0];
-  // param.eps = epsilon_tol_opt[0];
-  // param.p = epsilon_loss_opt[0];
-  // param.shrinking = (shrinking_opt[0])? 1 : 0;
-  // param.probability = (prob_est_opt[0])? 1 : 0;
-  // param.nr_weight = 0;//not used: I use priors and balancing
-  // param.weight_label = NULL;
-  // param.weight = NULL;
-  //todo: make parameter
-  unsigned int cv=2;
-  param.svm_type = 0;
-  param.kernel_type = 2;
-  param.degree = 3;
-  param.gamma = 1.0/nFeatures;
-  param.coef0 = 0;
-  param.nu = 0.5;
-  param.cache_size = 100.0;
-  param.C = 1;
-  param.eps = 0.001;
-  param.p = 0.1;
-  param.shrinking = 0;
-  param.probability = 0;
+  param.svm_type = svm_type_opt[0];
+  param.kernel_type = kernel_type_opt[0];
+  param.degree = kernel_degree_opt[0];
+  param.gamma = (gamma_opt[0]>0)? gamma_opt[0] : 1.0/nFeatures;
+  param.coef0 = coef0_opt[0];
+  param.nu = nu_opt[0];
+  param.cache_size = cache_opt[0];
+  param.C = ccost_opt[0];
+  param.eps = epsilon_tol_opt[0];
+  param.p = epsilon_loss_opt[0];
+  param.shrinking = (shrinking_opt[0])? 1 : 0;
+  param.probability = (prob_est_opt[0])? 1 : 0;
   param.nr_weight = 0;//not used: I use priors and balancing
   param.weight_label = NULL;
   param.weight = NULL;
+  param.verbose=(verbose_opt[0]>1)? true:false;
   struct svm_model* svm;
   struct svm_problem prob;
   struct svm_node* x_space;
-  unsigned int ntraining=0;
-  unsigned short nclass=trainingFeatures.size();
-
-  //test
-  std::cout << "debug1" << std::endl;
-
-  for(int iclass=0;iclass<nclass;++iclass)
-    ntraining+=trainingFeatures[iclass].size();
 
   prob.l=ntraining;
   prob.y = Malloc(double,prob.l);
   prob.x = Malloc(struct svm_node *,prob.l);
-  //test
-  std::cout << "debug2" << std::endl;
   x_space = Malloc(struct svm_node,(nFeatures+1)*ntraining);
   unsigned long int spaceIndex=0;
   int lIndex=0;
   for(int iclass=0;iclass<nclass;++iclass){
     for(int isample=0;isample<trainingFeatures[iclass].size();++isample){
-      //test
-      std::cout << "..." << iclass << std::endl;
       prob.x[lIndex]=&(x_space[spaceIndex]);
       for(int ifeature=0;ifeature<nFeatures;++ifeature){
         x_space[spaceIndex].index=ifeature+1;
         x_space[spaceIndex].value=trainingFeatures[iclass][isample][ifeature];
-        //test
-        std::cout << " " << x_space[spaceIndex].index << ":" << 
x_space[spaceIndex].value;
         ++spaceIndex;
       }
       x_space[spaceIndex++].index=-1;
@@ -113,23 +105,20 @@ double getCost(const vector<Vector2d<float> > 
&trainingFeatures)
       ++lIndex;
     }
   }
-  //test
-  std::cout << "debug3" << std::endl;
 
   assert(lIndex==prob.l);
-  if(verbose)
+  if(verbose_opt[0]>1)
     std::cout << "checking parameters" << std::endl;
   svm_check_parameter(&prob,&param);
-  if(verbose)
+  if(verbose_opt[0]>1)
     std::cout << "parameters ok, training" << std::endl;
   svm=svm_train(&prob,&param);
-  if(verbose)
+  if(verbose_opt[0]>1)
     std::cout << "SVM is now trained" << std::endl;
 
-  std::cout << "Confusion matrix" << std::endl;
   ConfusionMatrix cm(nclass);
   double *target = Malloc(double,prob.l);
-  svm_cross_validation(&prob,&param,cv,target);
+  svm_cross_validation(&prob,&param,cv_opt[0],target);
   assert(param.svm_type != EPSILON_SVR&&param.svm_type != NU_SVR);//only for 
regression
   int total_correct=0;
   for(int i=0;i<prob.l;i++)
@@ -157,12 +146,11 @@ int main(int argc, char *argv[])
 {
   map<short,int> reclassMap;
   vector<int> vreclass;
-  vector<double> priors;
+  // vector<double> priors;
   
   //--------------------------- command line options 
------------------------------------
-
   std::string versionString="version ";
-  versionString+=VERSION;
+  versionString+=static_cast<string>(VERSION);
   versionString+=", Copyright (C) 2008-2012 Pieter Kempeneers.\n\
    This program comes with ABSOLUTELY NO WARRANTY; for details type use option 
-h.\n\
    This is free software, and you are welcome to redistribute it\n\
@@ -179,25 +167,13 @@ int main(int argc, char *argv[])
   Optionpk<int> minSize_opt("m", "min", "if number of training pixels is less 
then min, do not take this class into account", 0);
   Optionpk<double> start_opt("s", "start", "start band sequence number (set to 
0)",0); 
   Optionpk<double> end_opt("e", "end", "end band sequence number (set to 0 for 
all bands)", 0); 
+  Optionpk<short> band_opt("b", "band", "band index (starting from 0, either 
use band option or use start to end)");
   Optionpk<double> offset_opt("\0", "offset", "offset value for each spectral 
band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
   Optionpk<double> scale_opt("\0", "scale", "scale value for each spectral 
band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale 
min and max in each band to -1.0 and 1.0)", 0.0);
   Optionpk<unsigned short> aggreg_opt("a", "aggreg", "how to combine 
aggregated classifiers, see also rc option (0: no aggregation, 1: sum rule, 2: 
max rule).",0);
-  Optionpk<double> priors_opt("p", "prior", "prior probabilities for each 
class (e.g., -p 0.3 -p 0.3 -p 0.2 )", 0.0); 
-  Optionpk<unsigned short> svm_type_opt("svmt", "svmtype", "type of SVM (0: 
C-SVC, 1: nu-SVC, 2: one-class SVM, 3: epsilon-SVR,        4: nu-SVR)",0);
-  Optionpk<unsigned short> kernel_type_opt("kt", "kerneltype", "type of kernel 
function (0: linear: u'*v, 1: polynomial: (gamma*u'*v + coef0)^degree, 2: 
radial basis function: exp(-gamma*|u-v|^2), 3: sigmoid: tanh(gamma*u'*v + 
coef0), 4: precomputed kernel (kernel values in training_set_file)",2);
-  Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel 
function",3);
-  Optionpk<float> gamma_opt("g", "gamma", "gamma in kernel function",0);
-  Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
-  Optionpk<float> ccost_opt("cc", "ccost", "the parameter C of C-SVC, 
epsilon-SVR, and nu-SVR",1);
-  Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu-SVC, one-class 
SVM, and nu-SVR",0.5);
-  Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss 
function of epsilon-SVR",0.1);
-  Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
-  Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of 
termination criterion",0.001);
-  Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the 
shrinking heuristics",false);
-  Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR 
model for probability estimates",false);
-  // Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to 
weight*C, for C-SVC",true);
-  Optionpk<unsigned int> cv_opt("cv", "cv", "n-fold cross validation mode",2);
-  Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 
(confusion matrix), 2 (debug)",0);
+  // Optionpk<double> priors_opt("p", "prior", "prior probabilities for each 
class (e.g., -p 0.3 -p 0.3 -p 0.2 )", 0.0);
+  Optionpk<string> selector_opt("sm", "sm", "feature selection method 
(sffs=sequential floating forward search,sfs=sequential forward search, sbs, 
sequential backward search ,bfs=brute force search)","sffs"); 
+  Optionpk<float> epsilon_cost_opt("ecost", "ecost", "epsilon for stopping 
criterion in cost function to determine optimal number of features",0.001);
 
   version_opt.retrieveOption(argc,argv);
   license_opt.retrieveOption(argc,argv);
@@ -211,10 +187,11 @@ int main(int argc, char *argv[])
   minSize_opt.retrieveOption(argc,argv);
   start_opt.retrieveOption(argc,argv);
   end_opt.retrieveOption(argc,argv);
+  band_opt.retrieveOption(argc,argv);
   offset_opt.retrieveOption(argc,argv);
   scale_opt.retrieveOption(argc,argv);
   aggreg_opt.retrieveOption(argc,argv);
-  priors_opt.retrieveOption(argc,argv);
+  // priors_opt.retrieveOption(argc,argv);
   svm_type_opt.retrieveOption(argc,argv);
   kernel_type_opt.retrieveOption(argc,argv);
   kernel_degree_opt.retrieveOption(argc,argv);
@@ -228,6 +205,8 @@ int main(int argc, char *argv[])
   shrinking_opt.retrieveOption(argc,argv);
   prob_est_opt.retrieveOption(argc,argv);
   cv_opt.retrieveOption(argc,argv);
+  selector_opt.retrieveOption(argc,argv);
+  epsilon_cost_opt.retrieveOption(argc,argv);
   verbose_opt.retrieveOption(argc,argv);
 
   if(version_opt[0]||todo_opt[0]){
@@ -243,6 +222,13 @@ int main(int argc, char *argv[])
     std::cout << "usage: pkfs_svm -t training [OPTIONS]" << std::endl;
     exit(0);
   }
+  
+  static std::map<std::string, SelectorValue> selMap;
+  //initialize selMap
+  selMap["sffs"]=SFFS;
+  selMap["sfs"]=SFS;
+  selMap["sbs"]=SBS;
+  selMap["bfs"]=BFS;
 
   assert(training_opt[0].size());
   if(verbose_opt[0]>=1)
@@ -267,19 +253,21 @@ int main(int argc, char *argv[])
       vreclass[iclass]=reclass_opt[iclass];
     }
   }
-  if(priors_opt.size()>1){//priors from argument list
-    priors.resize(priors_opt.size());
-    double normPrior=0;
-    for(int iclass=0;iclass<priors_opt.size();++iclass){
-      priors[iclass]=priors_opt[iclass];
-      normPrior+=priors[iclass];
-    }
-    //normalize
-    for(int iclass=0;iclass<priors_opt.size();++iclass)
-      priors[iclass]/=normPrior;
-  }
-
+  // if(priors_opt.size()>1){//priors from argument list
+  //   priors.resize(priors_opt.size());
+  //   double normPrior=0;
+  //   for(int iclass=0;iclass<priors_opt.size();++iclass){
+  //     priors[iclass]=priors_opt[iclass];
+  //     normPrior+=priors[iclass];
+  //   }
+  //   //normalize
+  //   for(int iclass=0;iclass<priors_opt.size();++iclass)
+  //     priors[iclass]/=normPrior;
+  // }
 
+  //sort bands
+  if(band_opt.size())
+    std::sort(band_opt.begin(),band_opt.end());
   //----------------------------------- Training 
-------------------------------
   struct svm_problem prob;
   vector<string> fields;
@@ -289,7 +277,10 @@ int main(int argc, char *argv[])
   if(verbose_opt[0]>=1)
     std::cout << "reading imageShape file " << training_opt[0] << std::endl;
   try{
-    
totalSamples=readDataImageShape(training_opt[0],trainingMap,fields,start_opt[0],end_opt[0],label_opt[0],verbose_opt[0]);
+    if(band_opt.size())
+      
totalSamples=readDataImageShape(training_opt[0],trainingMap,fields,band_opt,label_opt[0],verbose_opt[0]);
+    else
+      
totalSamples=readDataImageShape(training_opt[0],trainingMap,fields,start_opt[0],end_opt[0],label_opt[0],verbose_opt[0]);
     if(trainingMap.size()<2){
       string errorstring="Error: could not read at least two classes from 
training file";
       throw(errorstring);
@@ -373,7 +364,7 @@ int main(int argc, char *argv[])
     assert(scale_opt.size()==nband);
   Histogram hist;
   for(int iband=0;iband<nband;++iband){
-    if(verbose_opt[0]>=1)
+    if(verbose_opt[0]>1)
       std::cout << "scaling for band" << iband << std::endl;
     offset[iband]=(offset_opt.size()==1)?offset_opt[0]:offset_opt[iband];
     scale[iband]=(scale_opt.size()==1)?scale_opt[0]:scale_opt[iband];
@@ -391,7 +382,7 @@ int main(int argc, char *argv[])
       }
       offset[iband]=theMin+(theMax-theMin)/2.0;
       scale[iband]=(theMax-theMin)/2.0;
-      if(verbose_opt[0]>=1){
+      if(verbose_opt[0]>1){
         std::cout << "Extreme image values for band " << iband << ": [" << 
theMin << "," << theMax << "]" << std::endl;
         std::cout << "Using offset, scale: " << offset[iband] << ", " << 
scale[iband] << std::endl;
         std::cout << "scaled values for band " << iband << ": [" << 
(theMin-offset[iband])/scale[iband] << "," << 
(theMax-offset[iband])/scale[iband] << "]" << std::endl;
@@ -450,20 +441,20 @@ int main(int argc, char *argv[])
     std::cout << "number of reclasses: " << nreclass << std::endl;
   }
     
-  if(priors_opt.size()==1){//default: equal priors for each class
-    priors.resize(nclass);
-    for(int iclass=0;iclass<nclass;++iclass)
-      priors[iclass]=1.0/nclass;
-  }
-  assert(priors_opt.size()==1||priors_opt.size()==nclass);
+  // if(priors_opt.size()==1){//default: equal priors for each class
+  //   priors.resize(nclass);
+  //   for(int iclass=0;iclass<nclass;++iclass)
+  //     priors[iclass]=1.0/nclass;
+  // }
+  // assert(priors_opt.size()==1||priors_opt.size()==nclass);
     
   if(verbose_opt[0]>=1){
     std::cout << "number of bands: " << nband << std::endl;
     std::cout << "number of classes: " << nclass << std::endl;
-    std::cout << "priors:";
-    for(int iclass=0;iclass<nclass;++iclass)
-      std::cout << " " << priors[iclass];
-    std::cout << std::endl;
+    // std::cout << "priors:";
+    // for(int iclass=0;iclass<nclass;++iclass)
+    //   std::cout << " " << priors[iclass];
+    // std::cout << std::endl;
   }
 
   //Calculate features of trainig set
@@ -496,45 +487,84 @@ int main(int argc, char *argv[])
     
   unsigned int ntraining=0;
   for(int iclass=0;iclass<nclass;++iclass){
-    std::cout << "..." << iclass << "..." << std::endl;
-    if(verbose_opt[0]>=1)
+    if(verbose_opt[0]>1)
       std::cout << "training sample size for class " << vcode[iclass] << ": " 
<< trainingFeatures[iclass].size() << std::endl;
     ntraining+=trainingFeatures[iclass].size();
   }
 
   int nFeatures=trainingFeatures[0][0].size();
-  int maxFeatures=(maxFeatures_opt[0])? maxFeatures_opt[0] : nFeatures;
+  int maxFeatures=maxFeatures_opt[0];
+  double previousCost=(maxFeatures_opt[0])? 1 : 0;
+  double currentCost=1;
   list<int> subset;//set of selected features (levels) for each class 
combination
   double cost=0;
   FeatureSelector selector;
-  if(maxFeatures==nFeatures){
-    subset.clear();
-    for(int ifeature=0;ifeature<nFeatures;++ifeature)
-      subset.push_back(ifeature);
-    // cost=getCost(trainingFeatures[iclass1],trainingFeatures[iclass2]);
-  }
-  else{
-    try{
-      //test
-      std::cout << "debug3" << std::endl;
-      
cost=selector.floating(trainingFeatures,&getCost,subset,maxFeatures,(verbose_opt[0]>0)
 ? true:false);
+  try{
+    if(maxFeatures==nFeatures){
+      subset.clear();
+      for(int ifeature=0;ifeature<nFeatures;++ifeature)
+        subset.push_back(ifeature);
+      cost=getCost(trainingFeatures);
     }
-    catch(...){
-      std::cout << "catched feature selection" << std::endl;
-      exit(1);
+    else if(!maxFeatures_opt[0]){
+      while(currentCost-previousCost>epsilon_cost_opt[0]){
+        ++maxFeatures;
+        switch(selMap[selector_opt[0]]){
+        case(SFFS):
+          
cost=selector.floating(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+          break;
+        case(SFS):
+          
cost=selector.forward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+          break;
+        case(SBS):
+          
cost=selector.backward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+          break;
+        case(BFS):
+          
cost=selector.bruteForce(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+          break;
+        default:
+          std::cout << "Error: selector not supported, please use sffs, sfs, 
sbs or bfs" << std::endl;
+          exit(1);
+          break;
+        }
+        previousCost=currentCost;
+        currentCost=cost;
+      }
     }
-  }
-  if(verbose_opt[0]){
-    cout <<"cost: " << cost << endl;
-    cout << "levels: ";
-    for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit){
-      cout << *lit;
-      if((*(lit))!=subset.back())
-        cout <<",";
-      else
-        cout << endl;
+    else{
+      switch(selMap[selector_opt[0]]){
+      case(SFFS):
+        
cost=selector.floating(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+        break;
+      case(SFS):
+        
cost=selector.forward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+        break;
+      case(SBS):
+        
cost=selector.backward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+        break;
+      case(BFS):
+        
cost=selector.bruteForce(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+        break;
+      default:
+        std::cout << "Error: selector not supported, please use sffs, sfs, sbs 
or bfs" << std::endl;
+        exit(1);
+        break;
+      }
     }
   }
+  catch(...){
+    std::cout << "catched feature selection" << std::endl;
+    exit(1);
+  }
+
+  if(verbose_opt[0])
+    cout <<"cost: " << cost << endl;
+  for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
+    std::cout << " -b " << *lit;
+  std::cout << std::endl;
+    // if((*(lit))!=subset.back())
+    // else
+    //   cout << endl;
 
   // *NOTE* Because svm_model contains pointers to svm_problem, you can
   // not free the memory used by svm_problem if you are still using the

-- 
Alioth's /usr/local/bin/git-commit-notice on 
/srv/git.debian.org/git/pkg-grass/pktools.git

_______________________________________________
Pkg-grass-devel mailing list
Pkg-grass-devel@lists.alioth.debian.org
http://lists.alioth.debian.org/cgi-bin/mailman/listinfo/pkg-grass-devel

Reply via email to