On Wednesday 02 June 2010 06:01:24 pm Yoav Zibin wrote:
> Maybe you do the reduce in a static context (so the error is that you refer
> to instance method in a static context) ?
>
> Can you try both:
> v = v_dst.reduce(ArrayD_add, v);
> and
> v = v_dst.reduce(*this.*ArrayD_add, v);
>
> If both don't work, email the full code and I'll look into it.
>
I think the mentioning the static context was a hint to the right direction.
But unfortunately, playing around with "this" didn't help. I am appending the
full code matmul.x10 below.
I also tried to list both of the functions ArrayD_cp and ArrayD_add "public
static", which made the error at v = v_dst.reduce() disappear. But then the
lift was offending (presumably, it was not listed static).
As all ArrayD.region() are zero-based Rails, I tried to replace the call to
lift with an ordinary loop - but no avail. I am appending this version
(matmul_v2.x10) as well.
I hope, one of the two versions can be coerced into a valid X10 program !
Thanks very much for your kind help !
--
Mit freundlichen Grüßen / Kind regards
Dr. Christoph Pospiech
High Performance & Parallel Computing
Phone: +49-351 86269826
Mobile: +49-171-765 5871
E-Mail: christoph.pospi...@de.ibm.com
-------------------------------------
IBM Deutschland GmbH
Vorsitzender des Aufsichtsrats: Erich Clementi
Geschäftsführung: Martin Jetter (Vorsitzender),
Reinhard Reschke, Christoph Grandpierre,
Klaus Lintelmann, Michael Diemer, Martina Koederitz
Sitz der Gesellschaft: Ehningen / Registergericht: Amtsgericht Stuttgart, HRB
14562 WEEE-Reg.-Nr. DE 99369940
import x10.util.Box;
/**
* Class matmul
*/
public class matmul {
global val vsize:Int;
global val debug:Boolean;
static type ArrayD = Array[Double]{rank==1};
global val A: DistArray[Double]{rank==2};
/**
* While the matrix A is distributed,
* we keep the vector as ordinary Array
* at place Place.FIRST_PLACE.
*/
global val v: ArrayD;
/**
* We need a data structure
* for accumulating the temporary results
* of the local matrix vector multiply.
* The data structure is designed to make
* use of the DistArray.reduce() function.
*/
global val v_src: DistArray[ArrayD]{rank==1};
global val v_dst: DistArray[ArrayD]{rank==1};
/**
* v_src is generated from v by scattering
* the data across the MPI tasks. The following
* two distributed arrays describe which parts
* of v get where.
*/
global val j_min: DistArray[Int]{rank==1};
global val j_len: DistArray[Int]{rank==1};
/**
* This constant ArrayD is used as the
* zero element in the reduce operation.
*/
global val v_NULL: ArrayD;
/**
* special constructor
*/
public def this(n:Int, axis:Int, Dprint: Boolean) {
// populate vsize
vsize = n;
// populate debug
debug = Dprint;
// set up distributions
val D = Dist.makeBlock([0..n-1, 0..n-1], axis);
// the following distributions are no longer used
// and kept here only for reference.
val D_row:Dist = ( axis == 0 ?
Dist.makeConstant([0..n-1]) :
Dist.makeBlock([0..n-1], 0) );
val D_col:Dist = ( axis == 0 ?
Dist.makeBlock([0..n-1], 0) :
Dist.makeConstant([0..n-1]) );
// Finally, we set up a distribution
// with one point per place
val Dv_tmp:Dist = Dist.makeUnique(D.places());
if (debug) {
// This declares matrix with debug entries
A = DistArray.make[Double](D,
(p(i,j):Point) => { i*1.0 + j*0.01 } );
} else {
// This declares a unit matrix
A = DistArray.make[Double](D,
(p(i,j):Point) => { i==j ? 1.0 : 0.0} );
}
if (debug) {
// For debugging reasons we take a constant vector.
v = new ArrayD(n,
(p(i):Point) => {(1.0) as Double});
} else {
// This will be the final initialization
v = new ArrayD(n,
(p(i):Point) => {(i*1.0) as Double});
}
v_src = DistArray.make[ArrayD](Dv_tmp);
v_dst = DistArray.make[ArrayD](Dv_tmp);
j_min = DistArray.make[Int](Dv_tmp);
j_len = DistArray.make[Int](Dv_tmp);
finish ateach(pt in Dv_tmp ) {
v_src(pt) = new Array[Double](n,
(q:Point) => {(0.0) as Double});
v_dst(pt) = new Array[Double](n,
(q:Point) => {(0.0) as Double});
val p:Place = Dv_tmp(pt);
j_min(pt) = (A|p).region().min(1);
j_len(pt) = (A|p).region().max(1)
- j_min(pt) + 1;
}
v_NULL = new ArrayD(n,
(p:Point) => {(0.0) as Double});
}
/**
* Default constructor
*/
public def this() { this(6,0,(1==1)); }
/**
* Methods used for printing
*/
static def format(x:Double, numDecimals:int) {
return String.format("%1."+numDecimals+"f", [x as
Box[Double]]);
}
def prettyPrintMatrix(A:DistArray[Double]{rank == 2}) {
finish for (p in A.dist().places() ) {
at (p) {
Console.OUT.println("Matrix A at place "+p.id()
);
for ((i) in (A|p).region.projection(0)) {
for ((j) in (A|p).region.projection(1))
{
val str = A(i,j) + " ";
Console.OUT.print(str);
}
Console.OUT.println();
Console.OUT.flush();
}
}
}
}
global def prettyPrintArrayD(Name:String, w:ArrayD) {
val p = w.home();
at (p) {
Console.OUT.println("Array "+Name+
" at place "+p.id());
for (q in w.region()) {
val str = w(q) + " ";
Console.OUT.print(str);
}
Console.OUT.println();
Console.OUT.flush();
}
}
/**
* Lift the copyFrom function from Rails to special Arrays
*/
public static def ArrayD_cp(s1:Array[Double])
{s1.rail()}: Array[Double]! {
if (s1.home() == here) {
return(s1 as Array[Double]!);
} else {
val n:Int = s1.raw().length();
val p:Place = s1.home();
val R:Rail[Double] = at(p) s1.raw();
val s2:Array[Double] = new Array[Double](n);
s2.raw().copyFrom(0,R,0,n);
return(s2 as Array[Double]!);
}
}
/**
* Function for Adding two arrays of the special ArrayD form
*/
public static def ArrayD_add(s1:Array[Double],
s2:Array[Double])
{s1.region() == s2.region(), s1.rail(), s2.rail()}: Array[Double] {
val s1_loc:Array[Double]! = ArrayD_cp(s1);
val s2_loc:Array[Double]! = ArrayD_cp(s2);
val n:Int = s1.raw().length();
for ( var i:Int = 0; i<n; i++ ) {
s1_loc(i) += s2_loc(i);
}
return(s1_loc);
}
/**
* Method to multiply matrix A with vector v
*/
def ClassicMatrixMultiply() {
// set up a Rail at Place 0 to serve
// as a source for the scatter operation
val r_src:Rail[Double] = v.raw();
finish ateach (pt in v_src ) {
val p:Place = v_src.dist()(pt);
/**
* Initialize the result vector
*/
for ( r in v_dst(pt).region() ) {
v_dst(pt)(r) = 0.0;
}
/**
* Scatter the vector v
*/
v_src(pt).raw().copyFrom(j_min(pt),
r_src,j_min(pt),j_len(pt));
/**
* Next do the local part of the
* matrix multiply.
*/
for ( q(i,j) in (A|p) ) {
v_dst(pt)(i) += A(q)*v_src(pt)(j);
}
if (debug) {
val v_src_str = "v_src("+p.id()+")";
prettyPrintArrayD(v_src_str, v_src(pt));
val v_dst_str = "v_dst("+p.id()+")";
prettyPrintArrayD(v_dst_str, v_dst(pt));
}
}
/**
* Initialize v to zero and use it as
* neutral element in the reduce
*/
for ( r in v.region() ) {
v(r) = 0.0;
}
v = v_dst.reduce(ArrayD_add, v);
}
/**
* Main method
*/
public static def main(args:Rail[String]): Void {
//val size = ( args.length > 0 ?
// Int.parseInt(args(0)) : 4 );
val size = 6;
val s = new matmul(size, 1, (1==1));
if ( s.debug) {
s.prettyPrintMatrix(s.A);
s.prettyPrintArrayD("initial v", s.v);
}
s.ClassicMatrixMultiply();
if ( s.debug) {
s.prettyPrintArrayD("final v", s.v);
}
}
}
import x10.util.Box;
/**
* Class matmul
*/
public class matmul {
global val vsize:Int;
global val debug:Boolean;
static type ArrayD = Array[Double]{rank==1};
global val A: DistArray[Double]{rank==2};
/**
* While the matrix A is distributed,
* we keep the vector as ordinary Array
* at place Place.FIRST_PLACE.
*/
global val v: ArrayD;
/**
* We need a data structure
* for accumulating the temporary results
* of the local matrix vector multiply.
* The data structure is designed to make
* use of the DistArray.reduce() function.
*/
global val v_src: DistArray[ArrayD]{rank==1};
global val v_dst: DistArray[ArrayD]{rank==1};
/**
* v_src is generated from v by scattering
* the data across the MPI tasks. The following
* two distributed arrays describe which parts
* of v get where.
*/
global val j_min: DistArray[Int]{rank==1};
global val j_len: DistArray[Int]{rank==1};
/**
* This constant ArrayD is used as the
* zero element in the reduce operation.
*/
global val v_NULL: ArrayD;
/**
* special constructor
*/
public def this(n:Int, axis:Int, Dprint: Boolean) {
// populate vsize
vsize = n;
// populate debug
debug = Dprint;
// set up distributions
val D = Dist.makeBlock([0..n-1, 0..n-1], axis);
// the following distributions are no longer used
// and kept here only for reference.
val D_row:Dist = ( axis == 0 ?
Dist.makeConstant([0..n-1]) :
Dist.makeBlock([0..n-1], 0) );
val D_col:Dist = ( axis == 0 ?
Dist.makeBlock([0..n-1], 0) :
Dist.makeConstant([0..n-1]) );
// Finally, we set up a distribution
// with one point per place
val Dv_tmp:Dist = Dist.makeUnique(D.places());
if (debug) {
// This declares matrix with debug entries
A = DistArray.make[Double](D,
(p(i,j):Point) => { i*1.0 + j*0.01 } );
} else {
// This declares a unit matrix
A = DistArray.make[Double](D,
(p(i,j):Point) => { i==j ? 1.0 : 0.0} );
}
if (debug) {
// For debugging reasons we take a constant vector.
v = new ArrayD(n,
(p(i):Point) => {(1.0) as Double});
} else {
// This will be the final initialization
v = new ArrayD(n,
(p(i):Point) => {(i*1.0) as Double});
}
v_src = DistArray.make[ArrayD](Dv_tmp);
v_dst = DistArray.make[ArrayD](Dv_tmp);
j_min = DistArray.make[Int](Dv_tmp);
j_len = DistArray.make[Int](Dv_tmp);
finish ateach(pt in Dv_tmp ) {
v_src(pt) = new Array[Double](n,
(q:Point) => {(0.0) as Double});
v_dst(pt) = new Array[Double](n,
(q:Point) => {(0.0) as Double});
val p:Place = Dv_tmp(pt);
j_min(pt) = (A|p).region().min(1);
j_len(pt) = (A|p).region().max(1)
- j_min(pt) + 1;
}
v_NULL = new ArrayD(n,
(p:Point) => {(0.0) as Double});
}
/**
* Default constructor
*/
public def this() { this(6,0,(1==1)); }
/**
* Methods used for printing
*/
static def format(x:Double, numDecimals:int) {
return String.format("%1."+numDecimals+"f", [x as
Box[Double]]);
}
def prettyPrintMatrix(A:DistArray[Double]{rank == 2}) {
finish for (p in A.dist().places() ) {
at (p) {
Console.OUT.println("Matrix A at place "+p.id()
);
for ((i) in (A|p).region.projection(0)) {
for ((j) in (A|p).region.projection(1))
{
val str = A(i,j) + " ";
Console.OUT.print(str);
}
Console.OUT.println();
Console.OUT.flush();
}
}
}
}
global def prettyPrintArrayD(Name:String, w:ArrayD) {
val p = w.home();
at (p) {
Console.OUT.println("Array "+Name+
" at place "+p.id());
for (q in w.region()) {
val str = w(q) + " ";
Console.OUT.print(str);
}
Console.OUT.println();
Console.OUT.flush();
}
}
/**
* Lift the copyFrom function from Rails to special Arrays
*/
public def ArrayD_cp(s1:Array[Double])
{s1.rail()}: Array[Double]! {
if (s1.home() == here) {
return(s1 as Array[Double]!);
} else {
val n:Int = s1.raw().length();
val p:Place = s1.home();
val R:Rail[Double] = at(p) s1.raw();
val s2:Array[Double] = new Array[Double](n);
s2.raw().copyFrom(0,R,0,n);
return(s2 as Array[Double]!);
}
}
/**
* Function for Adding two arrays of the special ArrayD form
*/
public def ArrayD_add(s1:Array[Double],
s2:Array[Double])
{s1.region() == s2.region(), s1.rail(), s2.rail()}: Array[Double] {
val s1_loc:Array[Double]! = ArrayD_cp(s1);
val s2_loc:Array[Double]! = ArrayD_cp(s2);
s1_loc.lift(s1_loc, s2_loc, Double.+);
return(s1_loc);
}
/**
* Method to multiply matrix A with vector v
*/
def ClassicMatrixMultiply() {
// set up a Rail at Place 0 to serve
// as a source for the scatter operation
val r_src:Rail[Double] = v.raw();
finish ateach (pt in v_src ) {
val p:Place = v_src.dist()(pt);
/**
* Initialize the result vector
*/
for ( r in v_dst(pt).region() ) {
v_dst(pt)(r) = 0.0;
}
/**
* Scatter the vector v
*/
v_src(pt).raw().copyFrom(j_min(pt),
r_src,j_min(pt),j_len(pt));
/**
* Next do the local part of the
* matrix multiply.
*/
for ( q(i,j) in (A|p) ) {
v_dst(pt)(i) += A(q)*v_src(pt)(j);
}
if (debug) {
val v_src_str = "v_src("+p.id()+")";
prettyPrintArrayD(v_src_str, v_src(pt));
val v_dst_str = "v_dst("+p.id()+")";
prettyPrintArrayD(v_dst_str, v_dst(pt));
}
}
/**
* Initialize v to zero and use it as
* neutral element in the reduce
*/
for ( r in v.region() ) {
v(r) = 0.0;
}
// v = v_dst.reduce(ArrayD_add, v);
}
/**
* Main method
*/
public static def main(args:Rail[String]): Void {
//val size = ( args.length > 0 ?
// Int.parseInt(args(0)) : 4 );
val size = 6;
val s = new matmul(size, 1, (1==1));
if ( s.debug) {
s.prettyPrintMatrix(s.A);
s.prettyPrintArrayD("initial v", s.v);
}
s.ClassicMatrixMultiply();
if ( s.debug) {
s.prettyPrintArrayD("final v", s.v);
}
}
}
------------------------------------------------------------------------------
ThinkGeek and WIRED's GeekDad team up for the Ultimate
GeekDad Father's Day Giveaway. ONE MASSIVE PRIZE to the
lucky parental unit. See the prize list and enter to win:
http://p.sf.net/sfu/thinkgeek-promo
_______________________________________________
X10-users mailing list
X10-users@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/x10-users