Hello community,

here is the log from the commit of package armadillo for openSUSE:Factory 
checked in at 2019-01-25 22:45:34
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/armadillo (Old)
 and      /work/SRC/openSUSE:Factory/.armadillo.new.28833 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "armadillo"

Fri Jan 25 22:45:34 2019 rev:123 rq:668655 version:9.200.7

Changes:
--------
--- /work/SRC/openSUSE:Factory/armadillo/armadillo.changes      2018-11-18 
23:32:07.593485987 +0100
+++ /work/SRC/openSUSE:Factory/.armadillo.new.28833/armadillo.changes   
2019-01-25 22:45:35.235064076 +0100
@@ -1,0 +2,6 @@
+Fri Jan 25 11:11:30 UTC 2019 - badshah...@gmail.com
+
+- Update to version 9.200.7:
+  + Misc bug fixes.
+
+-------------------------------------------------------------------

Old:
----
  armadillo-9.200.4.tar.xz

New:
----
  armadillo-9.200.7.tar.xz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ armadillo.spec ++++++
--- /var/tmp/diff_new_pack.DWpiRo/_old  2019-01-25 22:45:36.003063178 +0100
+++ /var/tmp/diff_new_pack.DWpiRo/_new  2019-01-25 22:45:36.007063173 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package armadillo
 #
-# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -18,7 +18,7 @@
 
 %define soname libarmadillo9
 Name:           armadillo
-Version:        9.200.4
+Version:        9.200.7
 Release:        0
 Summary:        C++ matrix library with interfaces to LAPACK and ATLAS
 License:        Apache-2.0

++++++ armadillo-9.200.4.tar.xz -> armadillo-9.200.7.tar.xz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/armadillo-9.200.4/README.md 
new/armadillo-9.200.7/README.md
--- old/armadillo-9.200.4/README.md     2016-06-16 18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/README.md     2016-06-16 18:19:17.000000000 +0200
@@ -74,9 +74,9 @@
 
 ### 2: Citation Details
 
-Please cite one (or both) of the following papers if you use Armadillo
-in your research and/or software.  Citations are useful for the continued
-development and maintenance of the library.
+Please cite the following papers if you use Armadillo
+in your research and/or software.  Citations are useful
+for the continued development and maintenance of the library.
 
   * Conrad Sanderson and Ryan Curtin.  
     Armadillo: a template-based C++ library for linear algebra.  
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/armadillo-9.200.4/docs.html 
new/armadillo-9.200.7/docs.html
--- old/armadillo-9.200.4/docs.html     2016-06-16 18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/docs.html     2016-06-16 18:19:17.000000000 +0200
@@ -189,7 +189,7 @@
 <td style="text-align: left; vertical-align: top; width: 50%;">
 <ul>
 <li>
-Please cite one (or both) of the following papers if you use Armadillo in your 
research and/or software.
+Please cite the following papers if you use Armadillo in your research and/or 
software.
 <br>
 Citations are useful for the continued development and maintenance of the 
library.
 <br>
@@ -501,7 +501,7 @@
 <tr style="background-color: #F5F5F5;"><td><a 
href="#cor">cor</a></td><td>&nbsp;</td><td>correlation</td></tr>
 <tr><td><a href="#hist">hist</a></td><td>&nbsp;</td><td>histogram of 
counts</td></tr>
 <tr><td><a href="#histc">histc</a></td><td>&nbsp;</td><td>histogram of counts 
with user specified edges</td></tr>
-<tr><td><a href="#princomp">princomp</a></td><td>&nbsp;</td><td>principal 
component analysis</td></tr>
+<tr><td><a href="#princomp">princomp</a></td><td>&nbsp;</td><td>principal 
component analysis (PCA)</td></tr>
 <tr style="background-color: #F5F5F5;"><td><a 
href="#normpdf">normpdf</a></td><td>&nbsp;</td><td>probability density function 
of normal distribution</td></tr>
 <tr style="background-color: #F5F5F5;"><td><a 
href="#normcdf">normcdf</a></td><td>&nbsp;</td><td>cumulative distribution 
function of normal distribution</td></tr>
 <tr style="background-color: #F5F5F5;"><td><a 
href="#mvnrnd">mvnrnd</a></td><td>&nbsp;</td><td>random vectors from 
multivariate normal distribution</td></tr>
@@ -703,7 +703,7 @@
 </li>
 <br>
 <li>
-When using the <i>mat(n_rows, n_cols)</i> or <i>mat(size(X)</i> constructors, 
by default the memory is uninitialised (ie. may contain garbage);
+When using the <i>mat(n_rows, n_cols)</i> or <i>mat(size(X))</i> constructors, 
by default the memory is uninitialised (ie. may contain garbage);
 memory can be explicitly initialised by specifying the <i>fill_type</i>,
 which is one of:
 <i>fill::zeros</i>,
@@ -2495,6 +2495,7 @@
 <li><a href="#element_initialisation">element initialisation</a></li>
 <li><a href="#ind2sub">ind2sub()</a></li>
 <li><a href="#sub2ind">sub2ind()</a></li>
+<li><a 
href="#index_min_and_index_max_member">.index_min()&nbsp;/&nbsp;.index_max()</a></li>
 <li><a href="#submat">submatrix views</a></li>
 <li><a href="#memptr">.memptr()</a></li>
 <li><a href="#transform">.transform()</a></li>
@@ -3542,7 +3543,7 @@
 
X<b>(&nbsp;span(</b>first_row<b>,</b>&nbsp;last_row<b>),&nbsp;span(</b>first_col<b>,</b>&nbsp;last_col<b>)&nbsp;)</b><br>
 <br>
 
X<b>(&nbsp;</b>first_row<b>,</b>&nbsp;first_col<b>,&nbsp;size(</b>n_rows<b>,&nbsp;</b>n_cols<b>)&nbsp;)</b><br>
-X<b>(&nbsp;</b>first_row<b>,</b>&nbsp;first_col<b>,&nbsp;size(</b>Y<b>)&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;<i>(Y&nbsp;is&nbsp;a&nbsp;mat)</i><br>
+X<b>(&nbsp;</b>first_row<b>,</b>&nbsp;first_col<b>,&nbsp;size(</b>Y<b>)&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;<i>[&nbsp;Y&nbsp;is&nbsp;a&nbsp;matrix&nbsp;]</i><br>
 <br>
 
X<b>(</b>&nbsp;<b>span(</b>first_row<b>,</b>&nbsp;last_row<b>),</b>&nbsp;col_number&nbsp;<b>)</b><br>
 
X<b>(</b>&nbsp;row_number<b>,</b>&nbsp;<b>span(</b>first_col<b>,</b>&nbsp;last_col<b>)&nbsp;)</b><br>
@@ -3553,7 +3554,7 @@
 X.<b>tail_cols(&nbsp;</b>number_of_cols<b>&nbsp;)</b><br>
 X.<b>tail_rows(&nbsp;</b>number_of_rows<b>&nbsp;)</b><br>
 <br>
-X.<b>unsafe_col(&nbsp;</b>col_number<b>&nbsp;)</b><br>
+X.<b>unsafe_col(&nbsp;</b>col_number<b>&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;<i>[&nbsp;use&nbsp;with&nbsp;caution&nbsp;]</i><br>
 </ul>
 </li>
 
@@ -3567,7 +3568,7 @@
 V<b>(&nbsp;span(</b>first_index<b>,</b> last_index<b>)&nbsp;)</b><br>
 V.<b>subvec(&nbsp;</b>first_index<b>,</b> last_index<b>&nbsp;)</b><br>
 <br>
-V.<b>subvec(&nbsp;</b>first_index<b>, 
size(</b>W<b>)&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;<i>(W&nbsp;is&nbsp;a&nbsp;vector)</i><br>
+V.<b>subvec(&nbsp;</b>first_index<b>, 
size(</b>W<b>)&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;<i>[&nbsp;W&nbsp;is&nbsp;a&nbsp;vector&nbsp;]</i><br>
 <br>
 V.<b>head(&nbsp;</b>number_of_elements<b>&nbsp;)</b><br>
 V.<b>tail(&nbsp;</b>number_of_elements<b>&nbsp;)</b>
@@ -3610,7 +3611,7 @@
 <br>
 <br>
 
-<li>related views (documented separately)
+<li>related matrix views (documented separately)
 <ul>
 <br>
 X.<a href="#diag">diag()</a><br>
@@ -3748,7 +3749,7 @@
 Q<b>(&nbsp;span(</b>first_row<b>,</b> last_row<b>), span(</b>first_col<b>,</b> 
last_col<b>), span(</b>first_slice<b>,</b> last_slice<b>)&nbsp;)</b><br>
 <br>
 Q<b>(&nbsp;</b>first_row<b>,</b> first_col<b>,</b> first_slice<b>, 
size(</b>n_rows<b>,</b> n_cols<b>, </b>n_slices<b>)&nbsp;)</b><br>
-Q<b>(&nbsp;</b>first_row<b>,</b> first_col<b>,</b> first_slice<b>, 
size(</b>R<b>)&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<i>(R is a 
cube)</i><br>
+Q<b>(&nbsp;</b>first_row<b>,</b> first_col<b>,</b> first_slice<b>, 
size(</b>R<b>)&nbsp;)</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<i>[&nbsp;R&nbsp;is&nbsp;a&nbsp;cube&nbsp;]</i><br>
 <br>
 Q.<b>head_slices(&nbsp;</b>number_of_slices<b>&nbsp;)</b><br>
 Q.<b>tail_slices(&nbsp;</b>number_of_slices<b>&nbsp;)</b><br>
@@ -3777,7 +3778,7 @@
 </ul>
 </li>
 <br>
-<li>related views (documented separately)
+<li>related cube views (documented separately)
 <ul>
 <br>
 Q.<a href="#each_slice">each_slice()</a><br>
@@ -3885,7 +3886,7 @@
 <br>
 F<b>(&nbsp;span(</b>first_row<b>,</b> last_row<b>), 
span(</b>first_col<b>,</b>&nbsp;last_col<b>) )</b><br>
 <br>
-F<b>(&nbsp;</b>first_row<b>,</b> first_col<b>, size(</b>G<b>)&nbsp;)</b> 
&nbsp;&nbsp;&nbsp;<i>(G is a 2D field)</i><br>
+F<b>(&nbsp;</b>first_row<b>,</b> first_col<b>, size(</b>G<b>)&nbsp;)</b> 
&nbsp;&nbsp;&nbsp;<i>[&nbsp;G&nbsp;is&nbsp;a&nbsp;2D&nbsp;field&nbsp;]</i><br>
 F<b>(&nbsp;</b>first_row<b>,</b> first_col<b>, size(</b>n_rows<b>, 
</b>n_cols<b>)&nbsp;)</b><br>
 </ul>
 <br>
@@ -3900,7 +3901,7 @@
 <br>
 F<b>(&nbsp;span(</b>first_row<b>,</b> last_row<b>), 
span(</b>first_col<b>,</b>&nbsp;last_col<b>), 
span(</b>first_slice<b>,</b>&nbsp;last_slice<b>) )</b><br>
 <br>
-F<b>(&nbsp;</b>first_row<b>,</b> first_col<b>,</b> first_slice<b>, 
size(</b>G<b>)&nbsp;)</b> &nbsp;&nbsp;&nbsp;<i>(G is a 3D field)</i><br>
+F<b>(&nbsp;</b>first_row<b>,</b> first_col<b>,</b> first_slice<b>, 
size(</b>G<b>)&nbsp;)</b> 
&nbsp;&nbsp;&nbsp;<i>[&nbsp;G&nbsp;is&nbsp;a&nbsp;3D&nbsp;field&nbsp;]</i><br>
 F<b>(&nbsp;</b>first_row<b>,</b> first_col<b>,</b> first_slice<b>, 
size(</b>n_rows<b>, </b>n_cols<b>, </b>n_slices<b>)&nbsp;)</b><br>
 </ul>
 </li>
@@ -3964,7 +3965,7 @@
 </li>
 <br>
 <li>
-<b>NOTE:</b> handling of sparse matrix diagonals has changed slightly in 
Armadillo 8.x;
+<b>NOTE:</b> handling of sparse matrix diagonals has changed slightly between 
Armadillo 7.x and 8.x;
 to copy sparse diagonal to dense vector, use:
 <ul>
 <pre>
@@ -5595,7 +5596,7 @@
 See also:
 <ul>
 <li><a href="#index_min_and_index_max_member">.index_min() &amp; 
.index_max()</a></li>
-<li><a href="#min_and_max">min() &amp; max()</a> (standalone functions)</li>
+<li><a href="#min_and_max">min() &amp; max()</a> (standalone functions with 
extended functionality)</li>
 <li><a href="#clamp">clamp()</a>
 <li><a href="#running_stat">running_stat</a></li>
 <li><a href="#running_stat_vec">running_stat_vec</a></li>
@@ -5638,7 +5639,7 @@
 See also:
 <ul>
 <li><a href="#min_and_max_member">.min() &amp; .max()</a></li>
-<li><a href="#index_min_and_index_max_standalone">index_min() &amp; 
index_max()</a> (standalone functions)</li>
+<li><a href="#index_min_and_index_max_standalone">index_min() &amp; 
index_max()</a> (standalone functions with extended functionality)</li>
 <li><a href="#ind2sub">ind2sub()</a></li>
 <li><a href="#sort_index">sort_index()</a></li>
 <li><a href="#find">find()</a></li>
@@ -9528,8 +9529,9 @@
 <li><a href="#index_min_and_index_max_standalone">index_min() &amp; 
index_max()</a>
 <li><a href="#clamp">clamp()</a>
 <li><a href="#stats_fns">statistics functions</a>
-<li><a href="#running_stat">running_stat</a></li>
-<li><a href="#running_stat_vec">running_stat_vec</a></li>
+<li><a href="#running_stat">running_stat</a> - class for running statistics of 
scalars</li>
+<li><a href="#running_stat_vec">running_stat_vec</a> - class for running 
statistics of vectors</li>
+<li><a href="http://ensmallen.org";>ensmallen</a> - library for finding minimum 
of arbitrary function</li>
 </ul>
 </li>
 <br>
@@ -11381,6 +11383,7 @@
 <li><a href="#eig_pair">eig_pair()</a></li>
 <li><a href="#svd">svd()</a></li>
 <li><a href="#svd_econ">svd_econ()</a></li>
+<li><a href="#princomp">princomp()</a></li>
 <li><a href="#eigs_sym">eigs_sym()</a></li>
 <li><a href="http://mathworld.wolfram.com/EigenDecomposition.html";>eigen 
decomposition in MathWorld</a></li>
 <li><a 
href="http://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors";>eigenvalues 
&amp; eigenvectors in Wikipedia</a></li>
@@ -12288,6 +12291,7 @@
 <li><a href="#svd_econ">svd_econ()</a></li>
 <li><a href="#eig_gen">eig_gen()</a></li>
 <li><a href="#eig_sym">eig_sym()</a></li>
+<li><a href="#princomp">princomp()</a></li>
 <li><a href="#svds">svds()</a></li>
 <li><a 
href="http://en.wikipedia.org/wiki/Singular_value_decomposition";>singular value 
decomposition in Wikipedia</a></li>
 <li><a 
href="http://mathworld.wolfram.com/SingularValueDecomposition.html";>singular 
value decomposition in MathWorld</a></li>
@@ -12367,6 +12371,7 @@
 <li><a href="#svd">svd()</a></li>
 <li><a href="#eig_gen">eig_gen()</a></li>
 <li><a href="#eig_sym">eig_sym()</a></li>
+<li><a href="#princomp">princomp()</a></li>
 <li><a href="#svds">svds()</a></li>
 <li><a 
href="http://en.wikipedia.org/wiki/Singular_value_decomposition";>singular value 
decomposition in Wikipedia</a></li>
 <li><a 
href="http://mathworld.wolfram.com/SingularValueDecomposition.html";>singular 
value decomposition in MathWorld</a></li>
@@ -12482,11 +12487,12 @@
 <br>
 <li><b>Caveats:</b>
 <ul>
-<li>there is currently no check whether <i>X</i> is symmetric</li>
+<li>the number of obtained eigenvalues/eigenvectors may be lower than 
requested, depending on the given data</li>
 <li>
 it's more difficult to compute the smallest eigenvalues than the largest 
eigenvalues;
 <br>if the decomposition fails, try increasing <i>k</i> (number of 
eigenvalues) and/or the tolerance
 </li>
+<li>there is currently no check whether <i>X</i> is symmetric</li>
 </ul>
 </li>
 <br>
@@ -12576,9 +12582,14 @@
 </ul>
 </li>
 <br>
-<li><b>Caveat:</b> it's more difficult to compute the smallest eigenvalues 
than the largest eigenvalues;
+<li><b>Caveats:</b>
+<ul>
+<li>the number of obtained eigenvalues/eigenvectors may be lower than 
requested, depending on the given data</li>
+<li>
+it's more difficult to compute the smallest eigenvalues than the largest 
eigenvalues;
 <br>if the decomposition fails, try increasing <i>k</i> (number of 
eigenvalues) and/or the tolerance
 </li>
+</ul>
 <br>
 <li>
 Examples:
@@ -13714,8 +13725,10 @@
 <li>
 See also:
 <ul>
+<li><a href="#eig_sym">eig_sym()</a></li>
+<li><a href="#svd">svd()</a></li>
+<li><a href="#svd_econ">svd_econ()</a></li>
 <li><a 
href="http://en.wikipedia.org/wiki/Principal_component_analysis";>principal 
components analysis in Wikipedia</a></li>
-<li><a 
href="http://mathworld.wolfram.com/PrincipalComponentAnalysis.html";>principal 
components analysis in MathWorld</a></li>
 </ul>
 </li>
 <br>
@@ -14572,10 +14585,10 @@
 <ul>
 <li><i>n_gaus</i> is the number of Gaussians</li>
 
<li>N(<small>&nbsp;</small><i>x</i><small>&nbsp;</small>|<small>&nbsp;</small><i>m<sub>g</sub></i><small>&nbsp;</small>,&nbsp;<i>C<sub>g</sub></i><small>&nbsp;</small>)
 represents a Gaussian (normal) distribution</li>
-<li>for each Gaussian <i>g</i>:
+<li>each Gaussian <i>g</i> has the following parameters:
 <ul>
 <li><i>h<sub>g</sub></i> is the heft (weight), with constraints 
<i>h<sub>g</sub></i>&nbsp;&ge;&nbsp;0 and &sum;<i>h<sub>g</sub></i> = 1</li>
-<li><i>m<sub>g</sub></i> is the mean (centroid) vector with dimensionality 
<i>n_dims</i></li>
+<li><i>m<sub>g</sub></i> is the mean vector (centroid) with dimensionality 
<i>n_dims</i></li>
 <li><i>C<sub>g</sub></i> is the covariance matrix (either diagonal or 
full)</li>
 </ul>
 </li>
@@ -14596,8 +14609,12 @@
 </li>
 <br>
 <li>
-The classes include parameter estimation (training) algorithms: k-means 
clustering and Expectation-Maximisation (EM);
-the training algorithms will run much quicker on multi-core machines when 
OpenMP is enabled in your compiler (eg. <i>-fopenmp</i> in GCC and clang)
+The <i>gmm_diag</i> and <i>gmm_full</i> classes include dedicated optimisation 
algorithms for learning (training) the model parameters from data:
+<ul>
+<li>k-means clustering, for quick initial estimates</li>
+<li>Expectation-Maximisation (EM), for maximum-likelihood estimates</li>
+</ul>
+The optimisation algorithms are multi-threaded and run much quicker on 
multi-core machines when OpenMP is enabled in your compiler (eg. 
<i>-fopenmp</i> in GCC and clang)
 </li>
 </ul>
 </td>
@@ -14612,12 +14629,33 @@
 
 <ul>
 <li>
-Please cite the following paper if you use the <i>gmm_diag</i> or 
<i>gmm_full</i> classes in your research and/or software:
+Please cite one of the following papers if you use the <i>gmm_diag</i> or 
<i>gmm_full</i> classes in your research and/or software:
+<br>
+<br>
+Conrad Sanderson and Ryan Curtin.
+<br><i><a href="arma_gmm_joss_2017.pdf">gmm_diag and gmm_full: C++ classes for 
multi-threaded Gaussian mixture models and Expectation-Maximisation</a></i>.
+<br>Journal of Open Source Software, Vol.&nbsp;2, 2017.
 <br>
 <br>
 Conrad Sanderson and Ryan Curtin.
 <br><i><a href="arma_gmm_spcs_2017.pdf">An Open Source C++ Implementation of 
Multi-Threaded Gaussian Mixture Models, k-Means and Expectation 
Maximisation</a></i>.
 <br>International Conference on Signal Processing and Communication Systems, 
2017.
+<br>
+<br>
+<br>bibtex:
+<br>
+<br>
+<font size=-1>
+<code>
+@article{gmm2017,<br>
+&nbsp;author&nbsp;&nbsp;= {Conrad Sanderson and Ryan Curtin},<br>
+&nbsp;title&nbsp;&nbsp;&nbsp;= {gmm{\tt\char95}diag and gmm{\tt\char95}full: 
C++ classes for multi-threaded Gaussian mixture models and 
Expectation-Maximisation},<br>
+&nbsp;journal&nbsp;= {Journal of Open Source Software},<br>
+&nbsp;volume&nbsp;&nbsp;= {2},<br>
+&nbsp;year&nbsp;&nbsp;&nbsp;&nbsp;= {2017}<br>
+&nbsp;}
+</code>
+</font>
 </li>
 </ul>
 
@@ -15234,7 +15272,7 @@
 <li><a href="http://en.wikipedia.org/wiki/Mixture_model";>mixture model in 
Wikipedia</a></li>
 <li><a href="http://en.wikipedia.org/wiki/K-means_clustering";>k-means 
clustering in Wikipedia</a></li>
 <li><a 
href="http://mathworld.wolfram.com/K-MeansClusteringAlgorithm.html";>k-means 
clustering in MathWorld</a></li>
-<li><a 
href="http://en.wikipedia.org/wiki/Expectation-maximization_algorithm";>expectation
 maximisation algorithm in Wikipedia</a></li>
+<li><a 
href="http://en.wikipedia.org/wiki/Expectation-maximization_algorithm";>Expectation-Maximisation
 algorithm in Wikipedia</a></li>
 <li><a href="http://mathworld.wolfram.com/MaximumLikelihood.html";>maximum 
likelihood in MathWorld</a></li>
 <li><a href="http://en.wikipedia.org/wiki/Vector_quantization";>vector 
quantisation in Wikipedia</a></li>
 <li><a href="http://en.wikipedia.org/wiki/OpenMP";>OpenMP in Wikipedia</a></li>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/armadillo-9.200.4/examples/example1.cpp 
new/armadillo-9.200.7/examples/example1.cpp
--- old/armadillo-9.200.4/examples/example1.cpp 2016-06-16 18:19:14.000000000 
+0200
+++ new/armadillo-9.200.7/examples/example1.cpp 2016-06-16 18:19:17.000000000 
+0200
@@ -7,6 +7,8 @@
 // Armadillo documentation is available at:
 // http://arma.sourceforge.net/docs.html
 
+// NOTE: the C++11 "auto" keyword is not recommended for use with Armadillo 
objects and functions
+
 int
 main(int argc, char** argv)
   {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/Col_bones.hpp 
new/armadillo-9.200.7/include/armadillo_bits/Col_bones.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/Col_bones.hpp  2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/Col_bones.hpp  2016-06-16 
18:19:17.000000000 +0200
@@ -183,9 +183,9 @@
   static const bool is_col = true;
   static const bool is_row = false;
   
-  static const uword n_rows = fixed_n_elem;
-  static const uword n_cols = 1;
-  static const uword n_elem = fixed_n_elem;
+  static const uword n_rows;  // value provided below the class definition
+  static const uword n_cols;  // value provided below the class definition
+  static const uword n_elem;  // value provided below the class definition
   
   arma_inline fixed();
   arma_inline fixed(const fixed<fixed_n_elem>& X);
@@ -249,4 +249,21 @@
 
 
 
+// these definitions are outside of the class due to bizarre C++ rules;
+// C++17 has inline variables to address this shortcoming
+
+template<typename eT>
+template<uword fixed_n_elem>
+const uword Col<eT>::fixed<fixed_n_elem>::n_rows = fixed_n_elem;
+
+template<typename eT>
+template<uword fixed_n_elem>
+const uword Col<eT>::fixed<fixed_n_elem>::n_cols = 1u;
+
+template<typename eT>
+template<uword fixed_n_elem>
+const uword Col<eT>::fixed<fixed_n_elem>::n_elem = fixed_n_elem;
+
+
+
 //! @}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/Cube_meat.hpp 
new/armadillo-9.200.7/include/armadillo_bits/Cube_meat.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/Cube_meat.hpp  2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/Cube_meat.hpp  2016-06-16 
18:19:17.000000000 +0200
@@ -973,7 +973,7 @@
   
   arma_debug_check( (in_row >= n_rows), "Cube::row(): index out of bounds" );
   
-  rows(in_row, in_row);
+  return (*this).rows(in_row, in_row);
   }
 
 
@@ -988,7 +988,7 @@
   
   arma_debug_check( (in_row >= n_rows), "Cube::row(): index out of bounds" );
   
-  rows(in_row, in_row);
+  return (*this).rows(in_row, in_row);
   }
 
 
@@ -1003,7 +1003,7 @@
   
   arma_debug_check( (in_col >= n_cols), "Cube::col(): index out of bounds" );
   
-  cols(in_col, in_col);
+  return (*this).cols(in_col, in_col);
   }
 
 
@@ -1018,7 +1018,7 @@
   
   arma_debug_check( (in_col >= n_cols), "Cube::col(): index out of bounds" );
   
-  cols(in_col, in_col);
+  return (*this).cols(in_col, in_col);
   }
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/Mat_bones.hpp 
new/armadillo-9.200.7/include/armadillo_bits/Mat_bones.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/Mat_bones.hpp  2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/Mat_bones.hpp  2016-06-16 
18:19:17.000000000 +0200
@@ -475,8 +475,8 @@
   inline const Mat& eye(const uword in_rows, const uword in_cols);
   inline const Mat& eye(const SizeMat& s);
   
-  inline void      reset();
-  inline void soft_reset();
+  inline arma_cold void      reset();
+  inline arma_cold void soft_reset();
   
   
   template<typename T1> inline void set_real(const Base<pod_type,T1>& X);
@@ -773,12 +773,12 @@
   typedef eT                                elem_type;
   typedef typename get_pod_type<eT>::result pod_type;
   
-  static const bool is_col = (fixed_n_cols == 1) ? true : false;
-  static const bool is_row = (fixed_n_rows == 1) ? true : false;
+  static const bool is_col = (fixed_n_cols == 1);
+  static const bool is_row = (fixed_n_rows == 1);
   
-  static const uword n_rows = fixed_n_rows;
-  static const uword n_cols = fixed_n_cols;
-  static const uword n_elem = fixed_n_elem;
+  static const uword n_rows;  // value provided below the class definition
+  static const uword n_cols;  // value provided below the class definition
+  static const uword n_elem;  // value provided below the class definition
   
   arma_inline fixed();
   arma_inline fixed(const fixed<fixed_n_rows, fixed_n_cols>& X);
@@ -843,6 +843,23 @@
 
 
 
+// these definitions are outside of the class due to bizarre C++ rules;
+// C++17 has inline variables to address this shortcoming
+
+template<typename eT>
+template<uword fixed_n_rows, uword fixed_n_cols>
+const uword Mat<eT>::fixed<fixed_n_rows, fixed_n_cols>::n_rows = fixed_n_rows;
+
+template<typename eT>
+template<uword fixed_n_rows, uword fixed_n_cols>
+const uword Mat<eT>::fixed<fixed_n_rows, fixed_n_cols>::n_cols = fixed_n_cols;
+
+template<typename eT>
+template<uword fixed_n_rows, uword fixed_n_cols>
+const uword Mat<eT>::fixed<fixed_n_rows, fixed_n_cols>::n_elem = fixed_n_rows 
* fixed_n_cols;
+
+
+
 class Mat_aux
   {
   public:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/Mat_meat.hpp 
new/armadillo-9.200.7/include/armadillo_bits/Mat_meat.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/Mat_meat.hpp   2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/Mat_meat.hpp   2016-06-16 
18:19:17.000000000 +0200
@@ -6712,6 +6712,7 @@
 
 template<typename eT>
 inline
+arma_cold
 void
 Mat<eT>::reset()
   {
@@ -6737,6 +6738,7 @@
 
 template<typename eT>
 inline
+arma_cold
 void
 Mat<eT>::soft_reset()
   {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/Row_bones.hpp 
new/armadillo-9.200.7/include/armadillo_bits/Row_bones.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/Row_bones.hpp  2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/Row_bones.hpp  2016-06-16 
18:19:17.000000000 +0200
@@ -183,9 +183,9 @@
   static const bool is_col = false;
   static const bool is_row = true;
   
-  static const uword n_rows = 1;
-  static const uword n_cols = fixed_n_elem;
-  static const uword n_elem = fixed_n_elem;
+  static const uword n_rows;  // value provided below the class definition
+  static const uword n_cols;  // value provided below the class definition
+  static const uword n_elem;  // value provided below the class definition
   
   arma_inline fixed();
   arma_inline fixed(const fixed<fixed_n_elem>& X);
@@ -249,4 +249,21 @@
 
 
 
+// these definitions are outside of the class due to bizarre C++ rules;
+// C++17 has inline variables to address this shortcoming
+
+template<typename eT>
+template<uword fixed_n_elem>
+const uword Row<eT>::fixed<fixed_n_elem>::n_rows = 1u;
+
+template<typename eT>
+template<uword fixed_n_elem>
+const uword Row<eT>::fixed<fixed_n_elem>::n_cols = fixed_n_elem;
+
+template<typename eT>
+template<uword fixed_n_elem>
+const uword Row<eT>::fixed<fixed_n_elem>::n_elem = fixed_n_elem;
+
+
+
 //! @}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/arma_version.hpp 
new/armadillo-9.200.7/include/armadillo_bits/arma_version.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/arma_version.hpp       
2016-06-16 18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/arma_version.hpp       
2016-06-16 18:19:17.000000000 +0200
@@ -21,7 +21,7 @@
 
 #define ARMA_VERSION_MAJOR 9
 #define ARMA_VERSION_MINOR 200
-#define ARMA_VERSION_PATCH 4
+#define ARMA_VERSION_PATCH 7
 #define ARMA_VERSION_NAME  "Carpe Noctem"
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/armadillo-9.200.4/include/armadillo_bits/fn_qz.hpp 
new/armadillo-9.200.7/include/armadillo_bits/fn_qz.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/fn_qz.hpp      2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/fn_qz.hpp      2016-06-16 
18:19:17.000000000 +0200
@@ -49,10 +49,10 @@
   
   if(status == false)
     {
-    AA.reset();
-    BB.reset();
-    Q.reset();
-    Z.reset();
+    AA.soft_reset();
+    BB.soft_reset();
+    Q.soft_reset();
+    Z.soft_reset();
     arma_debug_warn("qz(): decomposition failed");
     }
   
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/fn_trace.hpp 
new/armadillo-9.200.7/include/armadillo_bits/fn_trace.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/fn_trace.hpp   2016-06-16 
18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/fn_trace.hpp   2016-06-16 
18:19:17.000000000 +0200
@@ -78,11 +78,11 @@
 
 
 
-//! speedup for trace(A*B)
+//! speedup for trace(A*B); non-complex elements
 template<typename T1, typename T2>
 arma_warn_unused
 inline
-typename T1::elem_type
+typename enable_if2< is_cx<typename T1::elem_type>::no, typename 
T1::elem_type>::result
 trace(const Glue<T1, T2, glue_times>& X)
   {
   arma_extra_debug_sigprint();
@@ -198,6 +198,54 @@
   }
 
 
+
+//! speedup for trace(A*B); complex elements
+template<typename T1, typename T2>
+arma_warn_unused
+inline
+typename enable_if2< is_cx<typename T1::elem_type>::yes, typename 
T1::elem_type>::result
+trace(const Glue<T1, T2, glue_times>& X)
+  {
+  arma_extra_debug_sigprint();
+  
+  typedef typename T1::elem_type eT;
+  
+  const quasi_unwrap<T1> UA(X.A);
+  const quasi_unwrap<T2> UB(X.B);
+  
+  const Mat<eT>& A = UA.M;
+  const Mat<eT>& B = UB.M;
+  
+  const uword A_n_rows = A.n_rows;
+  const uword A_n_cols = A.n_cols;
+
+  const uword B_n_rows = B.n_rows;
+  const uword B_n_cols = B.n_cols;
+  
+  arma_debug_assert_mul_size(A_n_rows, A_n_cols, B_n_rows, B_n_cols, "matrix 
multiplication");
+  
+  if( (A.n_elem == 0) || (B.n_elem == 0) )  { return eT(0); }
+  
+  const uword N = (std::min)(A_n_rows, B_n_cols);
+  
+  eT acc = eT(0);
+  
+  for(uword k=0; k < N; ++k)
+    {
+    const eT* B_colptr = B.colptr(k);
+    
+    // condition: A_n_cols = B_n_rows
+    
+    for(uword i=0; i < A_n_cols; ++i)
+      {
+      acc += A.at(k, i) * B_colptr[i];
+      }
+    }
+  
+  return acc;
+  }
+
+
 
 //! trace of sparse object; generic version
 template<typename T1>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/op_princomp_bones.hpp 
new/armadillo-9.200.7/include/armadillo_bits/op_princomp_bones.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/op_princomp_bones.hpp  
2016-06-16 18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/op_princomp_bones.hpp  
2016-06-16 18:19:17.000000000 +0200
@@ -23,37 +23,26 @@
   {
   public:
   
-  //
-  // real element versions
-  
-  template<typename T1>
-  inline static bool
-  direct_princomp
-    (
-           Mat<typename T1::elem_type>&     coeff_out,
-    const Base<typename T1::elem_type, T1>& X,
-    const typename arma_not_cx<typename T1::elem_type>::result* junk = 0
-    );
-  
   template<typename T1>
   inline static bool
   direct_princomp
     (
            Mat<typename T1::elem_type>&     coeff_out,
            Mat<typename T1::elem_type>&     score_out,
-    const Base<typename T1::elem_type, T1>& X,
-    const typename arma_not_cx<typename T1::elem_type>::result* junk = 0
+           Col<typename T1::pod_type>&     latent_out,
+           Col<typename T1::elem_type>&     tsquared_out,
+    const Base<typename T1::elem_type, T1>& X
     );
   
+  
   template<typename T1>
   inline static bool
   direct_princomp
     (
            Mat<typename T1::elem_type>&     coeff_out,
            Mat<typename T1::elem_type>&     score_out,
-           Col<typename T1::elem_type>&     latent_out,
-    const Base<typename T1::elem_type, T1>& X,
-    const typename arma_not_cx<typename T1::elem_type>::result* junk = 0
+           Col<typename T1::pod_type>&     latent_out,
+    const Base<typename T1::elem_type, T1>& X
     );
   
   template<typename T1>
@@ -62,63 +51,20 @@
     (
            Mat<typename T1::elem_type>&     coeff_out,
            Mat<typename T1::elem_type>&     score_out,
-           Col<typename T1::elem_type>&     latent_out,
-           Col<typename T1::elem_type>&     tsquared_out,
-    const Base<typename T1::elem_type, T1>& X,
-    const typename arma_not_cx<typename T1::elem_type>::result* junk = 0
-    );
-  
-  
-  //
-  // complex element versions
-  
-  template<typename T1>
-  inline static bool
-  direct_princomp
-    (
-           Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-    const Base< std::complex<typename T1::pod_type>, T1 >& X,
-    const typename arma_cx_only<typename T1::elem_type>::result* junk = 0
+    const Base<typename T1::elem_type, T1>& X
     );
   
   template<typename T1>
   inline static bool
   direct_princomp
     (
-           Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-           Mat< std::complex<typename T1::pod_type> >&     score_out,
-    const Base< std::complex<typename T1::pod_type>, T1 >& X,
-    const typename arma_cx_only<typename T1::elem_type>::result* junk = 0
-    );
-  
-  template<typename T1>
-  inline static bool
-  direct_princomp
-    (
-           Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-           Mat< std::complex<typename T1::pod_type> >&     score_out,
-           Col<              typename T1::pod_type  >&     latent_out,
-    const Base< std::complex<typename T1::pod_type>, T1 >& X,
-    const typename arma_cx_only<typename T1::elem_type>::result* junk = 0
-    );
-  
-  template<typename T1>
-  inline static bool
-  direct_princomp
-    (
-           Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-           Mat< std::complex<typename T1::pod_type> >&     score_out,
-           Col<              typename T1::pod_type  >&     latent_out,
-           Col< std::complex<typename T1::pod_type> >&     tsquared_out,
-    const Base< std::complex<typename T1::pod_type>, T1 >& X,
-    const typename arma_cx_only<typename T1::elem_type>::result* junk = 0
+           Mat<typename T1::elem_type>&     coeff_out,
+    const Base<typename T1::elem_type, T1>& X
     );
   
-  
   template<typename T1>
   inline static void
   apply(Mat<typename T1::elem_type>& out, const Op<T1,op_princomp>& in);
-  
   };
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/armadillo-9.200.4/include/armadillo_bits/op_princomp_meat.hpp 
new/armadillo-9.200.7/include/armadillo_bits/op_princomp_meat.hpp
--- old/armadillo-9.200.4/include/armadillo_bits/op_princomp_meat.hpp   
2016-06-16 18:19:14.000000000 +0200
+++ new/armadillo-9.200.7/include/armadillo_bits/op_princomp_meat.hpp   
2016-06-16 18:19:17.000000000 +0200
@@ -33,16 +33,15 @@
   (
          Mat<typename T1::elem_type>&     coeff_out,
          Mat<typename T1::elem_type>&     score_out,
-         Col<typename T1::elem_type>&     latent_out,
+         Col<typename T1::pod_type>&      latent_out,
          Col<typename T1::elem_type>&     tsquared_out,
-  const Base<typename T1::elem_type, T1>& X,
-  const typename arma_not_cx<typename T1::elem_type>::result* junk
+  const Base<typename T1::elem_type, T1>& X
   )
   {
   arma_extra_debug_sigprint();
-  arma_ignore(junk);
   
   typedef typename T1::elem_type eT;
+  typedef typename T1::pod_type   T;
   
   const unwrap_check<T1> Y( X.get_ref(), score_out );
   const Mat<eT>& in    = Y.M;
@@ -57,9 +56,9 @@
     
     // singular value decomposition
     Mat<eT> U;
-    Col<eT> s;
+    Col< T> s;
     
-    const bool svd_ok = svd(U, s, coeff_out, score_out);
+    const bool svd_ok = (n_rows >= n_cols) ? svd_econ(U, s, coeff_out, 
score_out) : svd(U, s, coeff_out, score_out);
     
     if(svd_ok == false)  { return false; }
     
@@ -73,26 +72,25 @@
       {
       score_out.cols(n_rows-1,n_cols-1).zeros();
       
-      //Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
-      Col<eT> s_tmp(n_cols);
-      s_tmp.zeros();
+      Col<T> s_tmp(n_cols, fill::zeros);
       
       s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
       s = s_tmp;
           
       // compute the Hotelling's T-squared
-      s_tmp.rows(0,n_rows-2) = eT(1) / s_tmp.rows(0,n_rows-2);
+      s_tmp.rows(0,n_rows-2) = T(1) / s_tmp.rows(0,n_rows-2);
       
-      const Mat<eT> S = score_out * diagmat(Col<eT>(s_tmp));   
-      tsquared_out = sum(S%S,1); 
+      const Mat<eT> S = score_out * diagmat(Col<T>(s_tmp));
+      tsquared_out = sum(S%S,1);
       }
     else
       {
-      // compute the Hotelling's T-squared   
-      const Mat<eT> S = score_out * diagmat(Col<eT>( eT(1) / s));
+      // compute the Hotelling's T-squared
+      // TODO: replace with more robust approach
+      const Mat<eT> S = score_out * diagmat(Col<T>( T(1) / s));
       tsquared_out = sum(S%S,1);
       }
-            
+    
     // compute the eigenvalues of the principal vectors
     latent_out = s%s;
     }
@@ -128,15 +126,14 @@
   (
          Mat<typename T1::elem_type>&     coeff_out,
          Mat<typename T1::elem_type>&     score_out,
-         Col<typename T1::elem_type>&     latent_out,
-  const Base<typename T1::elem_type, T1>& X,
-  const typename arma_not_cx<typename T1::elem_type>::result* junk
+         Col<typename T1::pod_type>&      latent_out,
+  const Base<typename T1::elem_type, T1>& X
   )
   {
   arma_extra_debug_sigprint();
-  arma_ignore(junk);
   
   typedef typename T1::elem_type eT;
+  typedef typename T1::pod_type   T;
   
   const unwrap_check<T1> Y( X.get_ref(), score_out );
   const Mat<eT>& in    = Y.M;
@@ -151,9 +148,9 @@
     
     // singular value decomposition
     Mat<eT> U;
-    Col<eT> s;
+    Col< T> s;
     
-    const bool svd_ok = svd(U, s, coeff_out, score_out);
+    const bool svd_ok = (n_rows >= n_cols) ? svd_econ(U, s, coeff_out, 
score_out) : svd(U, s, coeff_out, score_out);
     
     if(svd_ok == false)  { return false; }
     
@@ -167,14 +164,14 @@
       {
       score_out.cols(n_rows-1,n_cols-1).zeros();
       
-      Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
+      Col<T> s_tmp(n_cols, fill::zeros);
+      
       s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
       s = s_tmp;
       }
-      
+    
     // compute the eigenvalues of the principal vectors
     latent_out = s%s;
-    
     }
   else // 0 or 1 samples
     {
@@ -204,14 +201,13 @@
   (
          Mat<typename T1::elem_type>&     coeff_out,
          Mat<typename T1::elem_type>&     score_out,
-  const Base<typename T1::elem_type, T1>& X,
-  const typename arma_not_cx<typename T1::elem_type>::result* junk
+  const Base<typename T1::elem_type, T1>& X
   )
   {
   arma_extra_debug_sigprint();
-  arma_ignore(junk);
   
   typedef typename T1::elem_type eT;
+  typedef typename T1::pod_type   T;
   
   const unwrap_check<T1> Y( X.get_ref(), score_out );
   const Mat<eT>& in    = Y.M;
@@ -226,25 +222,18 @@
     
     // singular value decomposition
     Mat<eT> U;
-    Col<eT> s;
+    Col< T> s;
     
-    const bool svd_ok = svd(U, s, coeff_out, score_out);
+    const bool svd_ok = (n_rows >= n_cols) ? svd_econ(U, s, coeff_out, 
score_out) : svd(U, s, coeff_out, score_out);
     
     if(svd_ok == false)  { return false; }
     
-    // normalize the eigenvalues
-    s /= std::sqrt( double(n_rows - 1) );
-    
     // project the samples to the principals
     score_out *= coeff_out;
     
     if(n_rows <= n_cols) // number of samples is less than their dimensionality
       {
       score_out.cols(n_rows-1,n_cols-1).zeros();
-      
-      Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
-      s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
-      s = s_tmp;
       }
     }
   else // 0 or 1 samples
@@ -269,12 +258,10 @@
 op_princomp::direct_princomp
   (
          Mat<typename T1::elem_type>&     coeff_out,
-  const Base<typename T1::elem_type, T1>& X,
-  const typename arma_not_cx<typename T1::elem_type>::result* junk
+  const Base<typename T1::elem_type, T1>& X
   )
   {
   arma_extra_debug_sigprint();
-  arma_ignore(junk);
   
   typedef typename T1::elem_type eT;
   
@@ -289,289 +276,7 @@
     Mat<eT> U;
     Col<eT> s;
     
-    const bool svd_ok = svd(U, s, coeff_out, tmp);
-    
-    if(svd_ok == false)  { return false; }
-    }
-  else
-    {
-    coeff_out.eye(in.n_cols, in.n_cols);
-    }
-  
-  return true;
-  }
-
-
-
-//! \brief
-//! principal component analysis -- 4 arguments complex version
-//! computation is done via singular value decomposition
-//! coeff_out    -> principal component coefficients
-//! score_out    -> projected samples
-//! latent_out   -> eigenvalues of principal vectors
-//! tsquared_out -> Hotelling's T^2 statistic
-template<typename T1>
-inline
-bool
-op_princomp::direct_princomp
-  (
-         Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-         Mat< std::complex<typename T1::pod_type> >&     score_out,
-         Col<              typename T1::pod_type  >&     latent_out,
-         Col< std::complex<typename T1::pod_type> >&     tsquared_out,
-  const Base< std::complex<typename T1::pod_type>, T1 >& X,
-  const typename arma_cx_only<typename T1::elem_type>::result* junk
-  )
-  {
-  arma_extra_debug_sigprint();
-  arma_ignore(junk);
-  
-  typedef typename T1::pod_type     T;
-  typedef          std::complex<T> eT;
-  
-  const unwrap_check<T1> Y( X.get_ref(), score_out );
-  const Mat<eT>& in    = Y.M;
-  
-  const uword n_rows = in.n_rows;
-  const uword n_cols = in.n_cols;
-  
-  if(n_rows > 1) // more than one sample
-    {
-    // subtract the mean - use score_out as temporary matrix
-    score_out = in;  score_out.each_row() -= mean(in);
-         
-    // singular value decomposition
-    Mat<eT> U;
-    Col< T> s;
-    
-    const bool svd_ok = svd(U, s, coeff_out, score_out); 
-    
-    if(svd_ok == false)  { return false; }
-    
-    // normalize the eigenvalues
-    s /= std::sqrt( double(n_rows - 1) );
-    
-    // project the samples to the principals
-    score_out *= coeff_out;
-    
-    if(n_rows <= n_cols) // number of samples is less than their dimensionality
-      {
-      score_out.cols(n_rows-1,n_cols-1).zeros();
-      
-      Col<T> s_tmp = zeros< Col<T> >(n_cols);
-      s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
-      s = s_tmp;
-          
-      // compute the Hotelling's T-squared   
-      s_tmp.rows(0,n_rows-2) = 1.0 / s_tmp.rows(0,n_rows-2);
-      const Mat<eT> S = score_out * diagmat(Col<T>(s_tmp));                    
 
-      tsquared_out = sum(S%S,1); 
-      }
-    else
-      {
-      // compute the Hotelling's T-squared   
-      const Mat<eT> S = score_out * diagmat(Col<T>(T(1) / s));                 
    
-      tsquared_out = sum(S%S,1);
-      }
-    
-    // compute the eigenvalues of the principal vectors
-    latent_out = s%s;
-    
-    }
-  else // 0 or 1 samples
-    {
-    coeff_out.eye(n_cols, n_cols);
-    
-    score_out.copy_size(in);
-    score_out.zeros();
-      
-    latent_out.set_size(n_cols);
-    latent_out.zeros();
-      
-    tsquared_out.set_size(n_rows);
-    tsquared_out.zeros();
-    }
-  
-  return true;
-  }
-
-
-
-//! \brief
-//! principal component analysis -- 3 arguments complex version
-//! computation is done via singular value decomposition
-//! coeff_out    -> principal component coefficients
-//! score_out    -> projected samples
-//! latent_out   -> eigenvalues of principal vectors
-template<typename T1>
-inline
-bool
-op_princomp::direct_princomp
-  (
-         Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-         Mat< std::complex<typename T1::pod_type> >&     score_out,
-         Col<              typename T1::pod_type  >&     latent_out,
-  const Base< std::complex<typename T1::pod_type>, T1 >& X,
-  const typename arma_cx_only<typename T1::elem_type>::result* junk
-  )
-  {
-  arma_extra_debug_sigprint();
-  arma_ignore(junk);
-  
-  typedef typename T1::pod_type     T;
-  typedef          std::complex<T> eT;
-  
-  const unwrap_check<T1> Y( X.get_ref(), score_out );
-  const Mat<eT>& in    = Y.M;
-  
-  const uword n_rows = in.n_rows;
-  const uword n_cols = in.n_cols;
-  
-  if(n_rows > 1) // more than one sample
-    {
-    // subtract the mean - use score_out as temporary matrix
-    score_out = in;  score_out.each_row() -= mean(in);
-    
-    // singular value decomposition
-    Mat<eT> U;
-    Col< T> s;
-    
-    const bool svd_ok = svd(U, s, coeff_out, score_out);
-    
-    if(svd_ok == false)  { return false; }
-    
-    // normalize the eigenvalues
-    s /= std::sqrt( double(n_rows - 1) );
-    
-    // project the samples to the principals
-    score_out *= coeff_out;
-    
-    if(n_rows <= n_cols) // number of samples is less than their dimensionality
-      {
-      score_out.cols(n_rows-1,n_cols-1).zeros();
-      
-      Col<T> s_tmp = zeros< Col<T> >(n_cols);
-      s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
-      s = s_tmp;
-      }
-      
-    // compute the eigenvalues of the principal vectors
-    latent_out = s%s;
-    }
-  else // 0 or 1 samples
-    {
-    coeff_out.eye(n_cols, n_cols);
-
-    score_out.copy_size(in);
-    score_out.zeros();
-
-    latent_out.set_size(n_cols);
-    latent_out.zeros();
-    }
-  
-  return true;
-  }
-
-
-
-//! \brief
-//! principal component analysis -- 2 arguments complex version
-//! computation is done via singular value decomposition
-//! coeff_out    -> principal component coefficients
-//! score_out    -> projected samples
-template<typename T1>
-inline
-bool
-op_princomp::direct_princomp
-  (
-         Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-         Mat< std::complex<typename T1::pod_type> >&     score_out,
-  const Base< std::complex<typename T1::pod_type>, T1 >& X,
-  const typename arma_cx_only<typename T1::elem_type>::result* junk
-  )
-  {
-  arma_extra_debug_sigprint();
-  arma_ignore(junk);
-  
-  typedef typename T1::pod_type     T;
-  typedef          std::complex<T> eT;
-  
-  const unwrap_check<T1> Y( X.get_ref(), score_out );
-  const Mat<eT>& in    = Y.M;
-  
-  const uword n_rows = in.n_rows;
-  const uword n_cols = in.n_cols;
-  
-  if(n_rows > 1) // more than one sample
-    {
-    // subtract the mean - use score_out as temporary matrix
-    score_out = in;  score_out.each_row() -= mean(in);
-    
-    // singular value decomposition
-    Mat<eT> U;
-    Col< T> s;
-    
-    const bool svd_ok = svd(U, s, coeff_out, score_out);
-    
-    if(svd_ok == false)  { return false; }
-    
-    // normalize the eigenvalues
-    s /= std::sqrt( double(n_rows - 1) );
-
-    // project the samples to the principals
-    score_out *= coeff_out;
-
-    if(n_rows <= n_cols) // number of samples is less than their dimensionality
-      {
-      score_out.cols(n_rows-1,n_cols-1).zeros();
-      }
-
-    }
-  else // 0 or 1 samples
-    {
-    coeff_out.eye(n_cols, n_cols);
-    
-    score_out.copy_size(in);
-    score_out.zeros();
-    }
-  
-  return true;
-  }
-
-
-
-//! \brief
-//! principal component analysis -- 1 argument complex version
-//! computation is done via singular value decomposition
-//! coeff_out    -> principal component coefficients
-template<typename T1>
-inline
-bool
-op_princomp::direct_princomp
-  (
-         Mat< std::complex<typename T1::pod_type> >&     coeff_out,
-  const Base< std::complex<typename T1::pod_type>, T1 >& X,
-  const typename arma_cx_only<typename T1::elem_type>::result* junk
-  )
-  {
-  arma_extra_debug_sigprint();
-  arma_ignore(junk);
-  
-  typedef typename T1::pod_type     T;
-  typedef          std::complex<T> eT;
-  
-  const unwrap<T1>    Y( X.get_ref() );
-  const Mat<eT>& in = Y.M;
-  
-  if(in.n_elem != 0)
-    {
-    // singular value decomposition
-    Mat<eT> U;
-    Col< T> s;
-    
-    Mat<eT> tmp = in;  tmp.each_row() -= mean(in);
-    
-    const bool svd_ok = svd(U, s, coeff_out, tmp);
+    const bool svd_ok = (in.n_rows >= in.n_cols) ? svd_econ(U, s, coeff_out, 
tmp) : svd(U, s, coeff_out, tmp);
     
     if(svd_ok == false)  { return false; }
     }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/armadillo-9.200.4/tests/fn_princomp.cpp 
new/armadillo-9.200.7/tests/fn_princomp.cpp
--- old/armadillo-9.200.4/tests/fn_princomp.cpp 1970-01-01 01:00:00.000000000 
+0100
+++ new/armadillo-9.200.7/tests/fn_princomp.cpp 2016-06-16 18:19:17.000000000 
+0200
@@ -0,0 +1,150 @@
+#include <armadillo>
+#include "catch.hpp"
+
+using namespace arma;
+
+namespace
+  {
+  void
+  initMatrix(mat& m)
+    {
+    for(uword ii = 0; ii < m.n_rows; ++ii)
+    for(uword jj = 0; jj < m.n_cols; ++jj)
+      {
+      const int i = int(ii);
+      const int j = int(jj);
+      
+      m(ii, jj) = 5 * (i % 17) + (i + j) % 13 - 7 * ((j + 2) % 5) + 
double(i)/double(m.n_rows);
+      }
+    }
+
+  void checkEigenvectors(const mat& coeff)
+    {
+    // sign of the eigenvectors can be flipped
+    REQUIRE(std::abs(coeff(0,0)) == Approx(2.2366412109e-01));
+    REQUIRE(std::abs(coeff(0,1)) == Approx(3.1197826828e-01));
+    REQUIRE(std::abs(coeff(0,2)) == Approx(5.1847537613e-02));
+    REQUIRE(std::abs(coeff(1,0)) == Approx(2.2419616512e-01));
+    REQUIRE(std::abs(coeff(1,1)) == Approx(2.7564301912e-01));
+    REQUIRE(std::abs(coeff(1,2)) == Approx(1.0953921221e-01));
+    REQUIRE(std::abs(coeff(2,0)) == Approx(2.2427613980e-01));
+    REQUIRE(std::abs(coeff(2,1)) == Approx(1.6088934501e-01));
+    REQUIRE(std::abs(coeff(2,2)) == Approx(2.3660988967e-01));
+    }
+
+  void checkScore(const mat& score)
+    {
+    REQUIRE(score(0,0) == Approx(-1.8538115696e+02));
+    REQUIRE(score(0,1) == Approx(4.6671842099e+00));
+    REQUIRE(score(0,2) == Approx(1.1026881736e+01));
+    REQUIRE(score(1,0) == Approx(-1.6144314244e+02));
+    REQUIRE(score(1,1) == Approx(8.0636602200e+00));
+    REQUIRE(score(1,2) == Approx(8.5129014856e+00));
+    REQUIRE(score(2,0) == Approx(-1.3750123749e+02));
+    REQUIRE(score(2,1) == Approx(1.0312494525e+01));
+    REQUIRE(score(2,2) == Approx(4.5214633042e+00));
+    }
+
+  void checkEigenvalues(const vec& latent)
+    {
+    REQUIRE(latent(0) == Approx(1.1989436021e+04));
+    REQUIRE(latent(1) == Approx(9.2136913098e+01));
+    REQUIRE(latent(2) == Approx(7.8335565832e+01));
+    REQUIRE(latent(3) == Approx(2.4204644513e+01));
+    REQUIRE(latent(4) == Approx(2.1302619671e+01));
+    REQUIRE(latent(5) == Approx(1.1615198930e+01));
+    REQUIRE(latent(6) == Approx(1.1040034957e+01));
+    REQUIRE(latent(7) == Approx(7.7918177707e+00));
+    REQUIRE(latent(8) == Approx(7.2862524567e+00));
+    REQUIRE(latent(9) == Approx(6.5039856845e+00));
+    }
+
+  void checkHotteling(const vec& tsquared)
+    {
+    REQUIRE(tsquared(0) == Approx(7.1983631370e+02));
+    REQUIRE(tsquared(1) == Approx(6.5616053343e+02));
+    REQUIRE(tsquared(2) == Approx(5.6308987454e+02));
+    REQUIRE(tsquared(3) == Approx(3.6908398978e+02));
+    REQUIRE(tsquared(4) == Approx(2.4632493795e+02));
+    REQUIRE(tsquared(5) == Approx(1.3213013367e+02));
+    REQUIRE(tsquared(6) == Approx(5.7414718234e+01));
+    REQUIRE(tsquared(7) == Approx(1.5157746233e+01));
+    REQUIRE(tsquared(8) == Approx(1.7316032365e+01));
+    REQUIRE(tsquared(9) == Approx(2.9290529527e+01));
+    REQUIRE(tsquared(20) == Approx(2.6159738840e+02));
+    }
+  }
+
+TEST_CASE("fn_princomp_1")
+  {
+  mat m(1000, 20);
+  initMatrix(m);
+  mat coeff = princomp(m);
+  checkEigenvectors(coeff);
+  }
+
+TEST_CASE("fn_princomp_2")
+  {
+  mat m(1000, 20);
+  initMatrix(m);
+  mat coeff;
+  princomp(coeff, m);
+  checkEigenvectors(coeff);
+  }
+
+TEST_CASE("fn_princomp_3")
+  {
+  mat m(1000, 20);
+  initMatrix(m);
+  mat coeff;
+  mat score;
+  princomp(coeff, score, m);
+  checkScore(score);
+  checkEigenvectors(coeff);
+  }
+
+TEST_CASE("fn_princomp_4")
+  {
+  mat m(1000, 20);
+  initMatrix(m);
+  mat coeff;
+  mat score;
+  vec latent;
+  princomp(coeff, score, latent, m);
+  checkEigenvectors(coeff);
+  checkScore(score);
+  checkEigenvalues(latent);
+  }
+
+TEST_CASE("fn_princomp_5")
+  {
+  mat m(1000, 20);
+  initMatrix(m);
+  mat coeff;
+  mat score;
+  vec latent;
+  vec tsquared;
+  princomp(coeff, score, latent, tsquared, m);
+  checkEigenvectors(coeff);
+  checkScore(score);
+  checkEigenvalues(latent);
+  // checkHotteling(tsquared);  // TODO
+  }
+
+TEST_CASE("fn_princomp_6")
+  {
+  mat m(5, 20);
+  initMatrix(m);
+  mat coeff = princomp(m);
+  REQUIRE(std::abs(coeff(0,0)) == Approx(2.4288979933e-01));
+  REQUIRE(std::abs(coeff(0,1)) == Approx(3.9409505019e-16));
+  REQUIRE(std::abs(coeff(0,2)) == Approx(1.2516285718e-02));
+  REQUIRE(std::abs(coeff(1,0)) == Approx(2.4288979933e-01));
+  REQUIRE(std::abs(coeff(1,1)) == Approx(2.9190770799e-16));
+  REQUIRE(std::abs(coeff(1,2)) == Approx(1.2516285718e-02));
+  REQUIRE(std::abs(coeff(2,0)) == Approx(2.4288979933e-01));
+  REQUIRE(std::abs(coeff(2,1)) == Approx(3.4719806003e-17));
+  REQUIRE(std::abs(coeff(2,2)) == Approx(1.2516285718e-02));
+  REQUIRE(std::abs(coeff(19,19)) == Approx(9.5528446175e-01).epsilon(0.01));
+  }
+


Reply via email to