http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/ECP2.go
----------------------------------------------------------------------
diff --git a/version22/go/ECP2.go b/version22/go/ECP2.go
new file mode 100644
index 0000000..30fe1e4
--- /dev/null
+++ b/version22/go/ECP2.go
@@ -0,0 +1,568 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/* MiotCL Weierstrass elliptic curve functions over FP2 */
+
+package main
+
+//import "fmt"
+
+type ECP2 struct {
+       x *FP2
+       y *FP2
+       z *FP2
+       INF bool
+}
+
+func NewECP2() *ECP2 {
+       E:=new(ECP2)
+       E.x=NewFP2int(0)
+       E.y=NewFP2int(1)
+       E.z=NewFP2int(1)
+       E.INF=true
+       return E
+}
+
+/* Test this=O? */
+func (E *ECP2) is_infinity() bool {
+               return E.INF
+}
+/* copy this=P */
+func (E *ECP2) copy(P *ECP2) {
+       E.x.copy(P.x)
+       E.y.copy(P.y)
+       E.z.copy(P.z)
+       E.INF=P.INF
+}
+/* set this=O */
+func (E *ECP2) inf() {
+       E.INF=true
+       E.x.zero()
+       E.y.zero()
+       E.z.zero()
+}
+
+/* set this=-this */
+func (E *ECP2) neg() {
+       if E.is_infinity() {return}
+       E.y.neg(); E.y.reduce()
+}
+
+/* Conditional move of Q to P dependant on d */
+func (E *ECP2) cmove(Q *ECP2,d int) {
+       E.x.cmove(Q.x,d)
+       E.y.cmove(Q.y,d)
+       E.z.cmove(Q.z,d)
+
+       var bd bool
+       if (d==0) {
+               bd=false
+       } else {bd=true}
+       E.INF=(E.INF!=(E.INF!=Q.INF)&&bd)
+}
+
+/* Constant time select from pre-computed table */
+func (E *ECP2) selector(W []*ECP2,b int32) {
+       MP:=NewECP2() 
+       m:=b>>31
+       babs:=(b^m)-m
+
+       babs=(babs-1)/2
+
+       E.cmove(W[0],teq(babs,0))  // conditional move
+       E.cmove(W[1],teq(babs,1))
+       E.cmove(W[2],teq(babs,2))
+       E.cmove(W[3],teq(babs,3))
+       E.cmove(W[4],teq(babs,4))
+       E.cmove(W[5],teq(babs,5))
+       E.cmove(W[6],teq(babs,6))
+       E.cmove(W[7],teq(babs,7))
+ 
+       MP.copy(E)
+       MP.neg()
+       E.cmove(MP,int(m&1))
+}
+
+/* Test if P == Q */
+func (E *ECP2) equals(Q *ECP2) bool {
+       if E.is_infinity() && Q.is_infinity() {return true}
+       if E.is_infinity() || Q.is_infinity() {return false}
+
+       zs2:=NewFP2copy(E.z); zs2.sqr()
+       zo2:=NewFP2copy(Q.z); zo2.sqr()
+       zs3:=NewFP2copy(zs2); zs3.mul(E.z)
+       zo3:=NewFP2copy(zo2); zo3.mul(Q.z)
+       zs2.mul(Q.x)
+       zo2.mul(E.x)
+       if !zs2.equals(zo2) {return false}
+       zs3.mul(Q.y)
+       zo3.mul(E.y)
+       if !zs3.equals(zo3) {return false}
+
+       return true
+}
+
+/* set to Affine - (x,y,z) to (x,y) */
+func (E *ECP2) affine() {
+       if E.is_infinity() {return}
+       one:=NewFP2int(1)
+       if E.z.equals(one) {return}
+       E.z.inverse()
+
+       z2:=NewFP2copy(E.z);
+       z2.sqr()
+       E.x.mul(z2); E.x.reduce()
+       E.y.mul(z2) 
+       E.y.mul(E.z);  E.y.reduce()
+       E.z.copy(one)
+}
+
+/* extract affine x as FP2 */
+func (E *ECP2) getX() *FP2 {
+       E.affine()
+       return E.x
+}
+/* extract affine y as FP2 */
+func (E *ECP2) getY() *FP2 {
+       E.affine();
+       return E.y;
+}
+/* extract projective x */
+func (E *ECP2) getx() *FP2 {
+       return E.x
+}
+/* extract projective y */
+func (E *ECP2) gety() *FP2 {
+       return E.y
+}
+/* extract projective z */
+func (E *ECP2) getz() *FP2 {
+       return E.z
+}
+
+/* convert to byte array */
+func (E *ECP2) toBytes(b []byte) {
+       var t [int(MODBYTES)]byte
+       MB:=int(MODBYTES)
+
+       E.affine()
+       E.x.getA().toBytes(t[:])
+       for i:=0;i<MB;i++ { b[i]=t[i]}
+       E.x.getB().toBytes(t[:])
+       for i:=0;i<MB;i++ { b[i+MB]=t[i]}
+
+       E.y.getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {b[i+2*MB]=t[i]}
+       E.y.getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {b[i+3*MB]=t[i]}
+}
+
+/* convert from byte array to point */
+func ECP2_fromBytes(b []byte) *ECP2 {
+       var t [int(MODBYTES)]byte
+       MB:=int(MODBYTES)
+
+       for i:=0;i<MB;i++ {t[i]=b[i]}
+       ra:=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=b[i+MB]}
+       rb:=fromBytes(t[:])
+       rx:=NewFP2bigs(ra,rb)
+
+       for i:=0;i<MB;i++ {t[i]=b[i+2*MB]}
+       ra=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=b[i+3*MB]}
+       rb=fromBytes(t[:])
+       ry:=NewFP2bigs(ra,rb)
+
+       return NewECP2fp2s(rx,ry)
+}
+
+/* convert this to hex string */
+func (E *ECP2) toString() string {
+       if E.is_infinity() {return "infinity"}
+       E.affine()
+       return "("+E.x.toString()+","+E.y.toString()+")"
+}
+
+/* Calculate RHS of twisted curve equation x^3+B/i */
+func RHS2(x *FP2) *FP2 {
+       x.norm()
+       r:=NewFP2copy(x)
+       r.sqr()
+       b:=NewFP2big(NewBIGints(CURVE_B))
+       b.div_ip()
+       r.mul(x)
+       r.add(b)
+
+       r.reduce()
+       return r
+}
+
+/* construct this from (x,y) - but set to O if not on curve */
+func NewECP2fp2s(ix *FP2,iy *FP2) *ECP2 {
+       E:=new(ECP2)
+       E.x=NewFP2copy(ix)
+       E.y=NewFP2copy(iy)
+       E.z=NewFP2int(1)
+       rhs:=RHS2(E.x)
+       y2:=NewFP2copy(E.y)
+       y2.sqr()
+       if y2.equals(rhs) {
+               E.INF=false
+       } else {E.x.zero();E.INF=true}
+       return E
+}
+
+/* construct this from x - but set to O if not on curve */
+func NewECP2fp2(ix *FP2) *ECP2 {       
+       E:=new(ECP2)
+       E.x=NewFP2copy(ix)
+       E.y=NewFP2int(1)
+       E.z=NewFP2int(1)
+       rhs:=RHS2(E.x)
+       if rhs.sqrt() {
+                       E.y.copy(rhs)
+                       E.INF=false;
+       } else {E.x.zero();E.INF=true}
+       return E
+}
+
+/* this+=this */
+func (E *ECP2) dbl() int {
+       if E.INF {return -1}
+       if E.y.iszilch() {
+               E.inf()
+               return -1
+       }
+
+       w1:=NewFP2copy(E.x)
+       w2:=NewFP2int(0)
+       w3:=NewFP2copy(E.x)
+       w8:=NewFP2copy(E.x)
+
+       w1.sqr()
+       w8.copy(w1)
+       w8.imul(3)
+
+       w2.copy(E.y); w2.sqr()
+       w3.copy(E.x); w3.mul(w2)
+       w3.imul(4)
+       w1.copy(w3); w1.neg()
+       w1.norm();
+
+       E.x.copy(w8); E.x.sqr()
+       E.x.add(w1)
+       E.x.add(w1)
+       E.x.norm()
+
+       E.z.mul(E.y)
+       E.z.add(E.z)
+
+       w2.add(w2)
+       w2.sqr()
+       w2.add(w2)
+       w3.sub(E.x);
+       E.y.copy(w8); E.y.mul(w3)
+       //      w2.norm();
+       E.y.sub(w2)
+
+       E.y.norm()
+       E.z.norm()
+
+       return 1
+}
+
+/* this+=Q - return 0 for add, 1 for double, -1 for O */
+func (E *ECP2) add(Q *ECP2) int {
+       if E.INF {
+               E.copy(Q)
+               return -1
+       }
+       if Q.INF {return -1}
+
+       aff:=false
+
+       if Q.z.isunity() {aff=true}
+
+       var A,C *FP2
+       B:=NewFP2copy(E.z)
+       D:=NewFP2copy(E.z)
+       if !aff{
+               A=NewFP2copy(Q.z)
+               C=NewFP2copy(Q.z)
+
+               A.sqr(); B.sqr()
+               C.mul(A); D.mul(B)
+
+               A.mul(E.x)
+               C.mul(E.y)
+       } else {
+               A=NewFP2copy(E.x)
+               C=NewFP2copy(E.y)
+       
+               B.sqr()
+               D.mul(B)
+       }
+
+       B.mul(Q.x); B.sub(A)
+       D.mul(Q.y); D.sub(C)
+
+       if B.iszilch() {
+               if D.iszilch() {
+                       E.dbl()
+                       return 1
+               } else  {
+                       E.INF=true
+                       return -1
+               }
+       }
+
+       if !aff {E.z.mul(Q.z)}
+       E.z.mul(B)
+
+       e:=NewFP2copy(B); e.sqr()
+       B.mul(e)
+       A.mul(e)
+
+       e.copy(A)
+       e.add(A); e.add(B)
+       E.x.copy(D); E.x.sqr(); E.x.sub(e)
+
+       A.sub(E.x);
+       E.y.copy(A); E.y.mul(D)
+       C.mul(B); E.y.sub(C)
+
+       E.x.norm()
+       E.y.norm()
+       E.z.norm()
+
+       return 0
+}
+
+/* set this-=Q */
+func (E *ECP2) sub(Q *ECP2) int {
+       Q.neg()
+       D:=E.add(Q)
+       Q.neg()
+       return D
+}
+/* set this*=q, where q is Modulus, using Frobenius */
+func (E *ECP2) frob(X *FP2) {
+       if E.INF {return}
+       X2:=NewFP2copy(X)
+       X2.sqr()
+       E.x.conj()
+       E.y.conj()
+       E.z.conj()
+       E.z.reduce();
+       E.x.mul(X2)
+       E.y.mul(X2)
+       E.y.mul(X)
+}
+
+/* normalises m-array of ECP2 points. Requires work vector of m FP2s */
+
+func multiaffine2(m int,P []*ECP2) {
+       t1:=NewFP2int(0)
+       t2:=NewFP2int(0)
+
+       var work []*FP2
+
+       for i:=0;i<m;i++ {
+               work=append(work,NewFP2int(0))
+       }
+
+       work[0].one()
+       work[1].copy(P[0].z)
+
+       for i:=2;i<m;i++ {
+               work[i].copy(work[i-1])
+               work[i].mul(P[i-1].z)
+       }
+
+       t1.copy(work[m-1]); t1.mul(P[m-1].z)
+
+       t1.inverse()
+
+       t2.copy(P[m-1].z)
+       work[m-1].mul(t1)
+
+       for i:=m-2;;i-- {
+               if i==0 {
+                       work[0].copy(t1)
+                       work[0].mul(t2)
+                       break
+               }
+               work[i].mul(t2);
+               work[i].mul(t1);
+               t2.mul(P[i].z);
+       }
+/* now work[] contains inverses of all Z coordinates */
+
+       for i:=0;i<m;i++ {
+               P[i].z.one();
+               t1.copy(work[i]); t1.sqr()
+               P[i].x.mul(t1)
+               t1.mul(work[i])
+               P[i].y.mul(t1)
+       }    
+}
+
+/* P*=e */
+func (E *ECP2) mul(e *BIG) *ECP2 {
+/* fixed size windows */
+       mt:=NewBIG()
+       t:=NewBIG()
+       P:=NewECP2()
+       Q:=NewECP2()
+       C:=NewECP2()
+
+       if E.is_infinity() {return NewECP2()}
+
+       var W []*ECP2
+       var w [1+(NLEN*int(BASEBITS)+3)/4]int8
+
+       E.affine()
+
+/* precompute table */
+       Q.copy(E)
+       Q.dbl()
+               
+       W=append(W,NewECP2())
+       W[0].copy(E);
+
+       for i:=1;i<8;i++ {
+               W=append(W,NewECP2())
+               W[i].copy(W[i-1])
+               W[i].add(Q)
+       }
+
+/* convert the table to affine */
+
+       multiaffine2(8,W[:])
+
+/* make exponent odd - add 2P if even, P if odd */
+       t.copy(e)
+       s:=int(t.parity())
+       t.inc(1); t.norm(); ns:=int(t.parity()); mt.copy(t); mt.inc(1); 
mt.norm()
+       t.cmove(mt,s)
+       Q.cmove(E,ns)
+       C.copy(Q)
+
+       nb:=1+(t.nbits()+3)/4
+/* convert exponent to signed 4-bit window */
+       for i:=0;i<nb;i++ {
+               w[i]=int8(t.lastbits(5)-16)
+               t.dec(int(w[i])); t.norm()
+               t.fshr(4)       
+       }
+       w[nb]=int8(t.lastbits(5))
+               
+       P.copy(W[(w[nb]-1)/2])
+       for i:=nb-1;i>=0;i-- {
+               Q.selector(W,int32(w[i]))
+               P.dbl()
+               P.dbl()
+               P.dbl()
+               P.dbl()
+               P.add(Q)
+       }
+       P.sub(C)
+       P.affine()
+       return P
+}
+
+/* P=u0.Q0+u1*Q1+u2*Q2+u3*Q3 */
+func mul4(Q []*ECP2,u []*BIG) *ECP2 {
+       var a [4]int8
+       T:=NewECP2()
+       C:=NewECP2()
+       P:=NewECP2()
+
+       var W [] *ECP2
+
+       mt:=NewBIG()
+       var t []*BIG
+
+       var w [NLEN*int(BASEBITS)+1]int8        
+
+       for i:=0;i<4;i++ {
+               t=append(t,NewBIGcopy(u[i]));
+               Q[i].affine();
+       }
+
+/* precompute table */
+
+       W=append(W,NewECP2()); W[0].copy(Q[0]); W[0].sub(Q[1])
+       W=append(W,NewECP2()); W[1].copy(W[0])
+       W=append(W,NewECP2()); W[2].copy(W[0])
+       W=append(W,NewECP2()); W[3].copy(W[0])
+       W=append(W,NewECP2()); W[4].copy(Q[0]); W[4].add(Q[1])
+       W=append(W,NewECP2()); W[5].copy(W[4])
+       W=append(W,NewECP2()); W[6].copy(W[4])
+       W=append(W,NewECP2()); W[7].copy(W[4])
+
+       T.copy(Q[2]); T.sub(Q[3])
+       W[1].sub(T)
+       W[2].add(T)
+       W[5].sub(T)
+       W[6].add(T)
+       T.copy(Q[2]); T.add(Q[3])
+       W[0].sub(T)
+       W[3].add(T)
+       W[4].sub(T)
+       W[7].add(T)
+
+       multiaffine2(8,W[:])
+
+/* if multiplier is even add 1 to multiplier, and add P to correction */
+       mt.zero(); C.inf()
+       for i:=0;i<4;i++ {
+               if t[i].parity()==0 {
+                       t[i].inc(1); t[i].norm()
+                       C.add(Q[i])
+               }
+               mt.add(t[i]); mt.norm()
+       }
+
+       nb:=1+mt.nbits();
+
+/* convert exponent to signed 1-bit window */
+       for j:=0;j<nb;j++ {
+               for i:=0;i<4;i++ {
+                       a[i]=int8(t[i].lastbits(2)-2)
+                       t[i].dec(int(a[i])); t[i].norm()
+                       t[i].fshr(1)
+               }
+               w[j]=(8*a[0]+4*a[1]+2*a[2]+a[3])
+       }
+       
w[nb]=int8(8*t[0].lastbits(2)+4*t[1].lastbits(2)+2*t[2].lastbits(2)+t[3].lastbits(2))
+
+       P.copy(W[(w[nb]-1)/2])  
+       for i:=nb-1;i>=0;i-- {
+               T.selector(W,int32(w[i]))
+               P.dbl()
+               P.add(T)
+       }
+       P.sub(C) /* apply correction */
+
+       P.affine()
+       return P
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/FF.go
----------------------------------------------------------------------
diff --git a/version22/go/FF.go b/version22/go/FF.go
new file mode 100644
index 0000000..553f7ac
--- /dev/null
+++ b/version22/go/FF.go
@@ -0,0 +1,905 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+package main
+
+//import "fmt"
+//import "os"
+
+//var debug bool = false
+
+type FF struct {
+       length int
+       v []*BIG
+}
+
+/* Constructors */
+func NewFFint(n int) *FF {
+       F:=new(FF)
+       for i:=0;i<n;i++ {
+               F.v=append(F.v,NewBIG())
+       }
+       F.length=n
+       return F
+}
+/*
+func NewFFints(x [][NLEN]int64,n int) *FF {
+       F:=new(FF)
+       for i:=0;i<n;i++ {
+               F.v=append(F.v,NewBIGints(x[i]))
+       }
+       F.length=n
+       return F
+}
+*/
+/* set to zero */
+func (F *FF) zero() {
+       for i:=0;i<F.length;i++ {
+               F.v[i].zero()
+       }
+}
+
+func (F *FF) getlen() int {
+               return F.length
+       }
+
+/* set to integer */
+func (F *FF) set(m int) {
+       F.zero()
+       F.v[0].set(0,Chunk(m))
+}
+
+/* copy from FF b */
+func (F *FF) copy(b *FF) {
+       for i:=0;i<F.length;i++ {
+               F.v[i].copy(b.v[i])
+       }
+}
+
+/* x=y<<n */
+func (F *FF) dsucopy(b *FF) {
+       for i:=0;i<b.length;i++ {
+               F.v[b.length+i].copy(b.v[i])
+               F.v[i].zero()
+       }
+}
+
+/* x=y */
+func (F *FF) dscopy(b *FF) {
+       for i:=0;i<b.length;i++ {
+               F.v[i].copy(b.v[i])
+               F.v[b.length+i].zero()
+       }
+}
+
+/* x=y>>n */
+func (F *FF) sducopy(b *FF) {
+       for i:=0;i<F.length;i++ {
+               F.v[i].copy(b.v[F.length+i])
+       }
+}
+
+func (F *FF) one() {
+       F.v[0].one();
+       for i:=1;i<F.length;i++ {
+               F.v[i].zero()
+       }
+}
+
+/* test equals 0 */
+func (F *FF) iszilch() bool {
+       for i:=0;i<F.length;i++ {
+               if !F.v[i].iszilch() {return false}
+       }
+       return true
+}
+
+/* shift right by BIGBITS-bit words */
+func (F *FF) shrw(n int) {
+       for i:=0;i<n;i++ {
+               F.v[i].copy(F.v[i+n])
+               F.v[i+n].zero()
+       }
+}
+
+/* shift left by BIGBITS-bit words */
+func (F *FF) shlw(n int) {
+       for i:=0;i<n;i++ {
+               F.v[n+i].copy(F.v[i])
+               F.v[i].zero()
+       }
+}
+
+/* extract last bit */
+func (F *FF) parity() int {
+       return F.v[0].parity()
+}
+
+func (F *FF) lastbits(m int) int {
+       return F.v[0].lastbits(m)
+}
+
+/* compare x and y - must be normalised, and of same length */
+func ff_comp(a *FF,b *FF) int {
+       for i:=a.length-1;i>=0;i-- {
+               j:=comp(a.v[i],b.v[i])
+               if j!=0 {return j}
+       }
+       return 0
+}
+
+/* recursive add */
+func (F *FF) radd(vp int,x *FF,xp int,y *FF,yp int,n int) {
+       for i:=0;i<n;i++ {
+               F.v[vp+i].copy(x.v[xp+i])
+               F.v[vp+i].add(y.v[yp+i])
+       }
+}
+
+/* recursive inc */
+func (F *FF) rinc(vp int,y *FF,yp int,n int) {
+       for i:=0;i<n;i++ {
+               F.v[vp+i].add(y.v[yp+i])
+       }
+}
+
+/* recursive sub */
+func (F *FF) rsub(vp int,x *FF,xp int,y *FF,yp int,n int) {
+       for i:=0;i<n;i++ {
+               F.v[vp+i].copy(x.v[xp+i])
+               F.v[vp+i].sub(y.v[yp+i])
+       }
+}
+
+/* recursive dec */
+func (F *FF) rdec(vp int,y *FF,yp int,n int) {
+       for i:=0;i<n;i++ {
+               F.v[vp+i].sub(y.v[yp+i])
+       }
+}
+
+/* simple add */
+func (F *FF) add(b *FF) {
+       for i:=0;i<F.length;i++ {
+               F.v[i].add(b.v[i])
+       }
+}
+
+/* simple sub */
+func (F *FF) sub(b *FF) {
+       for i:=0;i<F.length;i++ {
+               F.v[i].sub(b.v[i])
+       }
+}
+       
+/* reverse sub */
+func (F *FF) revsub(b *FF) {
+       for i:=0;i<F.length;i++ {
+               F.v[i].rsub(b.v[i])
+       }
+}
+
+/* normalise - but hold any overflow in top part unless n<0 */
+func (F *FF) rnorm(vp int,n int) {
+       trunc:=false
+       var carry Chunk
+       if n<0 { /* -v n signals to do truncation */
+               n=-n
+               trunc=true
+       }
+       for i:=0;i<n-1;i++ {
+               carry=F.v[vp+i].norm()
+               F.v[vp+i].xortop(carry<<P_TBITS)
+               F.v[vp+i+1].w[0]+=carry; // inc(carry)
+       }
+       carry=F.v[vp+n-1].norm()
+       if trunc {
+               F.v[vp+n-1].xortop(carry<<P_TBITS)
+       }
+}
+
+func (F *FF) norm() {
+       F.rnorm(0,F.length)
+}
+
+/* increment/decrement by a small integer */
+func (F *FF) inc(m int) {
+       F.v[0].inc(m)
+       F.norm()
+}
+
+func (F *FF) dec(m int) {
+       F.v[0].dec(m)
+       F.norm()
+}
+
+/* shift left by one bit */
+func (F *FF) shl() {
+       var delay_carry int=0
+       for i:=0;i<F.length-1;i++ {
+               carry:=F.v[i].fshl(1)
+               F.v[i].inc(delay_carry)
+               F.v[i].xortop(Chunk(carry)<<P_TBITS)
+               delay_carry=int(carry)
+       }
+       F.v[F.length-1].fshl(1)
+       F.v[F.length-1].inc(delay_carry)
+}
+
+/* shift right by one bit */
+
+func (F *FF) shr() {
+       for i:=F.length-1;i>0;i-- {
+               carry:=F.v[i].fshr(1)
+               F.v[i-1].xortop(Chunk(carry)<<P_TBITS)
+       }
+       F.v[0].fshr(1)
+}
+
+/* Convert to Hex String */
+func (F *FF) toString() string {
+       F.norm()
+       s:=""
+       for i:=F.length-1;i>=0;i-- {
+               s+=F.v[i].toString()
+       }
+       return s
+}
+
+/* Convert FFs to/from byte arrays */
+func (F *FF) toBytes(b []byte) {
+       for i:=0;i<F.length;i++ {
+               F.v[i].tobytearray(b,(F.length-i-1)*int(MODBYTES))
+       }
+}
+
+func ff_fromBytes(x *FF,b []byte) {
+       for i:=0;i<x.length;i++ {
+               x.v[i]=frombytearray(b,(x.length-i-1)*int(MODBYTES))
+       }
+}
+
+/* in-place swapping using xor - side channel resistant - lengths must be the 
same */
+func ff_cswap(a *FF,b *FF,d int) {
+       for i:=0;i<a.length;i++ {
+               a.v[i].cswap(b.v[i],d)
+       }
+}
+
+/* z=x*y, t is workspace */
+func (F *FF) karmul(vp int,x *FF,xp int,y *FF,yp int,t *FF,tp int,n int) {
+       if n==1 {
+               d:=mul(x.v[xp],y.v[yp])
+               F.v[vp+1]=d.split(8*MODBYTES)
+               F.v[vp].dcopy(d)
+               return
+       }
+       nd2:=n/2
+       F.radd(vp,x,xp,x,xp+nd2,nd2)
+               F.rnorm(vp,nd2)
+       F.radd(vp+nd2,y,yp,y,yp+nd2,nd2)
+               F.rnorm(vp+nd2,nd2)
+       t.karmul(tp,F,vp,F,vp+nd2,t,tp+n,nd2)
+       F.karmul(vp,x,xp,y,yp,t,tp+n,nd2)
+       F.karmul(vp+n,x,xp+nd2,y,yp+nd2,t,tp+n,nd2)
+       t.rdec(tp,F,vp,n)
+       t.rdec(tp,F,vp+n,n)
+       F.rinc(vp+nd2,t,tp,n)
+       F.rnorm(vp,2*n)
+}
+
+func (F *FF) karsqr(vp int,x *FF,xp int,t *FF,tp int,n int) {
+       if n==1 {
+               d:=sqr(x.v[xp])
+               F.v[vp+1].copy(d.split(8*MODBYTES))
+               F.v[vp].dcopy(d)
+               return
+       }       
+
+       nd2:=n/2
+       F.karsqr(vp,x,xp,t,tp+n,nd2)
+       F.karsqr(vp+n,x,xp+nd2,t,tp+n,nd2)
+       t.karmul(tp,x,xp,x,xp+nd2,t,tp+n,nd2)
+       F.rinc(vp+nd2,t,tp,n)
+       F.rinc(vp+nd2,t,tp,n)
+       F.rnorm(vp+nd2,n)
+}
+
+/* Calculates Least Significant bottom half of x*y */
+func (F *FF) karmul_lower(vp int,x *FF,xp int,y *FF,yp int,t *FF,tp int,n int) 
{ 
+       if n==1 { /* only calculate bottom half of product */
+               F.v[vp].copy(smul(x.v[xp],y.v[yp]))
+               return
+       }
+       nd2:=n/2
+
+       F.karmul(vp,x,xp,y,yp,t,tp+n,nd2)
+       t.karmul_lower(tp,x,xp+nd2,y,yp,t,tp+n,nd2)
+       F.rinc(vp+nd2,t,tp,nd2)
+       t.karmul_lower(tp,x,xp,y,yp+nd2,t,tp+n,nd2)
+       F.rinc(vp+nd2,t,tp,nd2)
+       F.rnorm(vp+nd2,-nd2)  /* truncate it */
+}
+
+/* Calculates Most Significant upper half of x*y, given lower part */
+func (F *FF) karmul_upper(x *FF,y *FF,t *FF,n int) { 
+       nd2:=n/2
+       F.radd(n,x,0,x,nd2,nd2)
+       F.radd(n+nd2,y,0,y,nd2,nd2)
+       F.rnorm(n,nd2)
+       F.rnorm(n+nd2,nd2)
+
+       t.karmul(0,F,n+nd2,F,n,t,n,nd2)  /* t = (a0+a1)(b0+b1) */
+       F.karmul(n,x,nd2,y,nd2,t,n,nd2) /* z[n]= a1*b1 */
+
+                                       /* z[0-nd2]=l(a0b0) z[nd2-n]= 
h(a0b0)+l(t)-l(a0b0)-l(a1b1) */
+       t.rdec(0,F,n,n)              /* t=t-a1b1  */    
+                                               
+       F.rinc(nd2,F,0,nd2)  /* z[nd2-n]+=l(a0b0) = h(a0b0)+l(t)-l(a1b1)  */
+       F.rdec(nd2,t,0,nd2)   /* 
z[nd2-n]=h(a0b0)+l(t)-l(a1b1)-l(t-a1b1)=h(a0b0) */
+
+       F.rnorm(0,-n)           /* a0b0 now in z - truncate it */
+
+       t.rdec(0,F,0,n)         /* (a0+a1)(b0+b1) - a0b0 */
+       F.rinc(nd2,t,0,n)
+
+       F.rnorm(nd2,n)
+}
+
+/* z=x*y. Assumes x and y are of same length. */
+func ff_mul(x *FF,y *FF) *FF {
+       n:=x.length
+       z:=NewFFint(2*n)
+       t:=NewFFint(2*n)
+       z.karmul(0,x,0,y,0,t,0,n)
+       return z
+}
+
+/* return low part of product this*y */
+func (F *FF) lmul(y *FF) {
+       n:=F.length
+       t:=NewFFint(2*n)
+       x:=NewFFint(n); x.copy(F)
+       F.karmul_lower(0,x,0,y,0,t,0,n)
+}
+
+/* Set b=b mod c */
+func (F *FF) mod(c *FF) {
+       var k int=1  
+
+       F.norm()
+       if ff_comp(F,c)<0 {return}
+
+       c.shl()
+       for ff_comp(F,c)>=0 {
+               c.shl()
+               k++
+       }
+
+       for k>0 {
+               c.shr()
+               if ff_comp(F,c)>=0 {
+                       F.sub(c)
+                       F.norm()
+               }
+               k--
+       }
+}
+
+/* z=x^2 */
+func ff_sqr(x *FF) *FF {
+       n:=x.length
+       z:=NewFFint(2*n)
+       t:=NewFFint(2*n)
+       z.karsqr(0,x,0,t,0,n)
+       return z
+}
+
+/* return This mod modulus, N is modulus, ND is Montgomery Constant */
+func (F *FF) reduce(N *FF,ND *FF) *FF { /* fast karatsuba Montgomery reduction 
*/
+       n:=N.length
+       t:=NewFFint(2*n)
+       r:=NewFFint(n)
+       m:=NewFFint(n)
+
+       r.sducopy(F)
+       m.karmul_lower(0,F,0,ND,0,t,0,n)
+
+       F.karmul_upper(N,m,t,n)
+       
+       m.sducopy(F)
+       r.add(N)
+       r.sub(m)
+       r.norm()
+
+       return r
+
+}
+
+/* Set r=this mod b */
+/* this is of length - 2*n */
+/* r,b is of length - n */
+func (F *FF) dmod(b *FF) *FF {
+       n:=b.length
+       m:=NewFFint(2*n)
+       x:=NewFFint(2*n)
+       r:=NewFFint(n)
+
+       x.copy(F)
+       x.norm()
+       m.dsucopy(b); k:=BIGBITS*n
+
+       for ff_comp(x,m)>=0 {
+               x.sub(m)
+               x.norm()
+       }
+
+       for k>0 {       
+               m.shr()
+
+               if ff_comp(x,m)>=0 {
+                       x.sub(m)
+                       x.norm()
+               }
+               k--
+       }
+
+       r.copy(x)
+       r.mod(b)
+       return r
+}
+
+/* Set return=1/this mod p. Binary method - a<p on entry */
+
+func (F *FF) invmodp(p *FF) {
+       n:=p.length
+
+       u:=NewFFint(n)
+       v:=NewFFint(n)
+       x1:=NewFFint(n)
+       x2:=NewFFint(n)
+       t:=NewFFint(n)
+       one:=NewFFint(n)
+
+       one.one()
+       u.copy(F)
+       v.copy(p)
+       x1.copy(one)
+       x2.zero()
+
+       // reduce n in here as well! 
+       for (ff_comp(u,one)!=0 && ff_comp(v,one)!=0) {
+               for u.parity()==0 {
+                       u.shr()
+                       if x1.parity()!=0 {
+                               x1.add(p)
+                               x1.norm()
+                       }
+                       x1.shr()
+               }
+               for v.parity()==0 {
+                       v.shr() 
+                       if x2.parity()!=0 {
+                               x2.add(p)
+                               x2.norm()
+                       }
+                       x2.shr()
+               }
+               if ff_comp(u,v)>=0 {
+                       u.sub(v)
+                       u.norm()
+                       if ff_comp(x1,x2)>=0 {
+                               x1.sub(x2)
+                       } else {
+                               t.copy(p)
+                               t.sub(x2)
+                               x1.add(t)
+                       }
+                       x1.norm()
+               } else {
+                       v.sub(u)
+                       v.norm()
+                       if ff_comp(x2,x1)>=0 { 
+                               x2.sub(x1)
+                       } else {
+                               t.copy(p)
+                               t.sub(x1)
+                               x2.add(t)
+                       }
+                       x2.norm()
+               }
+       }
+       if ff_comp(u,one)==0 {
+               F.copy(x1)
+       } else {
+               F.copy(x2)
+       }
+}
+
+/* nresidue mod m */
+func (F *FF) nres(m *FF) {
+       n:=m.length
+       if n==1 {
+               d:=NewDBIGscopy(F.v[0])
+               d.shl(uint(NLEN)*BASEBITS)
+               F.v[0].copy(d.mod(m.v[0]))
+       } else {
+               d:=NewFFint(2*n)
+               d.dsucopy(F)
+               F.copy(d.dmod(m))
+       }
+}
+
+func (F *FF) redc(m *FF,ND *FF) {
+       n:=m.length
+       if n==1 {
+               d:=NewDBIGscopy(F.v[0])
+               F.v[0].copy(monty(m.v[0],Chunk(1)<<BASEBITS-ND.v[0].w[0],d))
+       } else {
+               d:=NewFFint(2*n)
+               F.mod(m)
+               d.dscopy(F)
+               F.copy(d.reduce(m,ND))
+               F.mod(m)
+       }
+}
+
+func (F *FF) mod2m(m int) {
+       for i:=m;i<F.length;i++ {
+               F.v[i].zero()
+       }
+}
+
+/* U=1/a mod 2^m - Arazi & Qi */
+func (F *FF) invmod2m() *FF {
+       n:=F.length
+
+       b:=NewFFint(n)
+       c:=NewFFint(n)
+       U:=NewFFint(n)
+
+       U.zero()
+       U.v[0].copy(F.v[0])
+       U.v[0].invmod2m()
+
+       for i:=1;i<n;i<<=1 {
+               b.copy(F); b.mod2m(i)
+               t:=ff_mul(U,b); t.shrw(i); b.copy(t)
+               c.copy(F); c.shrw(i); c.mod2m(i)
+               c.lmul(U); c.mod2m(i)
+
+               b.add(c); b.norm()
+               b.lmul(U); b.mod2m(i)
+
+               c.one(); c.shlw(i); b.revsub(c); b.norm()
+               b.shlw(i)
+               U.add(b)
+       }
+       U.norm()
+       return U
+}
+
+func (F *FF) random(rng *RAND) {
+       n:=F.length
+       for i:=0;i<n;i++ {
+               F.v[i].copy(random(rng))
+       }
+       /* make sure top bit is 1 */
+       for (F.v[n-1].nbits()<int(MODBYTES*8)) {
+               F.v[n-1].copy(random(rng))
+       }
+}
+
+/* generate random x less than p */
+func (F *FF) randomnum(p *FF,rng *RAND) {
+       n:=F.length
+       d:=NewFFint(2*n)
+
+       for i:=0;i<2*n;i++ {
+               d.v[i].copy(random(rng))
+       }
+       F.copy(d.dmod(p))
+}
+
+/* this*=y mod p */
+func (F *FF) modmul(y *FF,p *FF,nd *FF) {
+       if ff_pexceed(F.v[F.length-1],y.v[y.length-1]) {F.mod(p)}
+       n:=p.length
+       if n==1 {
+               d:=mul(F.v[0],y.v[0])
+               F.v[0].copy(monty(p.v[0],Chunk(1)<<BASEBITS-nd.v[0].w[0],d))    
        
+       } else {
+               d:=ff_mul(F,y)
+               F.copy(d.reduce(p,nd))
+       }
+}
+
+/* this*=y mod p */
+func (F *FF) modsqr(p *FF,nd *FF) {
+       if ff_sexceed(F.v[F.length-1]) {F.mod(p)}
+       n:=p.length
+       if n==1 {
+               d:=sqr(F.v[0])
+               F.v[0].copy(monty(p.v[0],Chunk(1)<<BASEBITS-nd.v[0].w[0],d))    
                
+       } else {
+               d:=ff_sqr(F)
+               F.copy(d.reduce(p,nd))
+       }
+}
+
+/* this=this^e mod p using side-channel resistant Montgomery Ladder, for large 
e */
+func (F *FF) skpow(e *FF,p *FF) {
+       n:=p.length
+       R0:=NewFFint(n)
+       R1:=NewFFint(n)
+       ND:=p.invmod2m()
+
+       F.mod(p)
+       R0.one()
+       R1.copy(F)
+       R0.nres(p)
+       R1.nres(p)
+
+       for i:=int(8*MODBYTES)*n-1;i>=0;i-- {
+               b:=int(e.v[i/BIGBITS].bit(i%BIGBITS))
+               F.copy(R0)
+               F.modmul(R1,p,ND)
+
+               ff_cswap(R0,R1,b)
+               R0.modsqr(p,ND)
+
+               R1.copy(F)
+               ff_cswap(R0,R1,b)
+       }
+       F.copy(R0)
+       F.redc(p,ND)
+}
+
+/* this =this^e mod p using side-channel resistant Montgomery Ladder, for 
short e */
+func (F *FF) skpows(e *BIG,p *FF) {
+       n:=p.length
+       R0:=NewFFint(n)
+       R1:=NewFFint(n)
+       ND:=p.invmod2m()
+
+       F.mod(p)
+       R0.one()
+       R1.copy(F)
+       R0.nres(p)
+       R1.nres(p)
+
+       for i:=int(8*MODBYTES)-1;i>=0;i-- {
+               b:=int(e.bit(i))
+               F.copy(R0)
+               F.modmul(R1,p,ND)
+
+               ff_cswap(R0,R1,b)
+               R0.modsqr(p,ND)
+
+               R1.copy(F)
+               ff_cswap(R0,R1,b)
+       }
+       F.copy(R0)
+       F.redc(p,ND)
+}
+
+/* raise to an integer power - right-to-left method */
+func (F *FF) power(e int,p *FF) {
+       n:=p.length
+       w:=NewFFint(n)
+       ND:=p.invmod2m()
+       f:=true
+
+       w.copy(F)
+       w.nres(p)
+//i:=0;
+       if e==2 {
+               F.copy(w)
+               F.modsqr(p,ND)
+       } else {
+               for (true) {
+                       if e%2==1 {
+                               if f {
+                                       F.copy(w)
+                               } else {F.modmul(w,p,ND)}
+                               f=false
+
+                       }
+                       e>>=1
+                       if e==0 {break}
+//fmt.Printf("wb= "+w.toString()+"\n");
+//debug=true;
+                       w.modsqr(p,ND)
+//debug=false;
+//fmt.Printf("wa= "+w.toString()+"\n");
+//i+=1;
+//os.Exit(0);
+               }
+       }
+
+       F.redc(p,ND)
+
+}
+
+/* this=this^e mod p, faster but not side channel resistant */
+func (F *FF) pow(e *FF,p *FF) {
+       n:=p.length
+       w:=NewFFint(n)
+       ND:=p.invmod2m()
+//fmt.Printf("ND= "+ND.toString() +"\n");
+       w.copy(F)
+       F.one()
+       F.nres(p)
+       w.nres(p)
+       for i:=int(8*MODBYTES)*n-1;i>=0;i-- {
+               F.modsqr(p,ND)
+               b:=e.v[i/BIGBITS].bit(i%BIGBITS)
+               if b==1 {F.modmul(w,p,ND)}
+       }
+       F.redc(p,ND)
+}
+
+/* double exponentiation r=x^e.y^f mod p */
+func (F *FF) pow2(e *BIG,y *FF,f *BIG,p *FF) {
+       n:=p.length
+       xn:=NewFFint(n)
+       yn:=NewFFint(n)
+       xy:=NewFFint(n)
+       ND:=p.invmod2m()
+
+       xn.copy(F)
+       yn.copy(y)
+       xn.nres(p)
+       yn.nres(p)
+       xy.copy(xn); xy.modmul(yn,p,ND)
+       F.one()
+       F.nres(p)
+
+       for i:=int(8*MODBYTES)-1;i>=0;i-- {
+               eb:=e.bit(i)
+               fb:=f.bit(i)
+               F.modsqr(p,ND)
+               if eb==1 {
+                       if fb==1 {
+                               F.modmul(xy,p,ND)
+                       } else {F.modmul(xn,p,ND)}
+               } else  {
+                       if fb==1 {F.modmul(yn,p,ND)}
+               }
+       }
+       F.redc(p,ND)
+}
+
+func igcd(x int,y int) int { /* integer GCD, returns GCD of x and y */
+       var r int
+       if y==0 {return x}
+       for true {
+               r=x%y
+               if r==0 {break}
+               x=y;y=r
+       }
+       return y
+}
+
+/* quick and dirty check for common factor with n */
+func (F *FF) cfactor(s int) bool {
+       n:=F.length
+
+       x:=NewFFint(n)
+       y:=NewFFint(n)
+
+       y.set(s)
+       x.copy(F)
+       x.norm()
+
+       x.sub(y)
+       x.norm()
+
+       for (!x.iszilch() && x.parity()==0) {x.shr()}
+
+       for (ff_comp(x,y)>0) {
+               x.sub(y)
+               x.norm()
+               for (!x.iszilch() && x.parity()==0) {x.shr()}
+       }
+
+       g:=int(x.v[0].get(0))
+       r:=igcd(s,g)
+       if r>1 {return true}
+       return false
+}
+
+/* Miller-Rabin test for primality. Slow. */
+func prime(p *FF,rng *RAND) bool {
+       s:=0
+       n:=p.length
+       d:=NewFFint(n)
+       x:=NewFFint(n)
+       unity:=NewFFint(n)
+       nm1:=NewFFint(n)
+
+       sf:=4849845 /* 3*5*.. *19 */
+       p.norm()
+
+       if p.cfactor(sf) {return false}
+       unity.one()
+       nm1.copy(p)
+       nm1.sub(unity)
+       nm1.norm()
+       d.copy(nm1)
+
+       for d.parity()==0 {
+               d.shr()
+               s++
+       }
+       if s==0 {return false}
+
+       for i:=0;i<10;i++ {
+               x.randomnum(p,rng)
+               x.pow(d,p)
+
+               if (ff_comp(x,unity)==0 || ff_comp(x,nm1)==0) {continue}
+               loop:=false
+               for j:=1;j<s;j++ {
+                       x.power(2,p)
+                       if ff_comp(x,unity)==0 {return false}
+                       if ff_comp(x,nm1)==0 {loop=true; break}
+               }
+               if loop {continue}
+               return false
+       }
+
+       return true
+}
+/*
+func main() {
+
+       var P = [4][5]int64 
{{0xAD19A781670957,0x76A79C00965796,0xDEFCC5FC9A9717,0xF02F2940E20E9,0xBF59E34F},{0x6894F31844C908,0x8DADA70E82C79F,0xFD29F3836046F6,0x8C1D874D314DD0,0x46D077B},{0x3C515217813331,0x56680FD1CE935B,0xE55C53EEA8838E,0x92C2F7E14A4A95,0xD945E5B1},{0xACF673E919F5EF,0x6723E7E7DAB446,0x6B6FA69B36EB1B,0xF7D13920ECA300,0xB5FC2165}}
+
+       fmt.Printf("Testing FF\n")
+       var raw [100]byte
+       rng:=NewRAND()
+
+       rng.Clean()
+       for i:=0;i<100;i++ {
+               raw[i]=byte(i)
+       }
+
+       rng.Seed(100,raw[:])
+
+       n:=4
+
+       x:=NewFFint(n)
+       x.set(3)
+
+       p:=NewFFints(P[:],n)
+
+       if prime(p,rng) {fmt.Printf("p is a prime\n"); fmt.Printf("\n")}
+
+       e:=NewFFint(n)
+       e.copy(p)
+       e.dec(1); e.norm()
+
+       fmt.Printf("e= "+e.toString())
+       fmt.Printf("\n")
+       x.skpow(e,p)
+       fmt.Printf("x= "+x.toString())
+       fmt.Printf("\n")
+}
+*/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/FP.go
----------------------------------------------------------------------
diff --git a/version22/go/FP.go b/version22/go/FP.go
new file mode 100644
index 0000000..89bcbda
--- /dev/null
+++ b/version22/go/FP.go
@@ -0,0 +1,279 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/* Finite Field arithmetic */
+/* CLINT mod p functions */
+
+package main
+
+//import "fmt"
+
+type FP struct {
+       x *BIG
+}
+
+/* Constructors */
+func NewFPint(a int) *FP {
+       F:=new(FP)
+       F.x=NewBIGint(a)
+       F.nres()
+       return F
+}
+
+func NewFPbig(a *BIG) *FP {
+       F:=new(FP)
+       F.x=NewBIGcopy(a)
+       F.nres()
+       return F
+}
+
+func NewFPcopy(a *FP) *FP {
+       F:=new(FP)
+       F.x=NewBIGcopy(a.x)
+       return F
+}
+
+func (F *FP) toString() string {
+       return F.redc().toString()
+}
+
+/* convert to Montgomery n-residue form */
+func (F *FP) nres() {
+       if MODTYPE!=PSEUDO_MERSENNE && MODTYPE!=GENERALISED_MERSENNE {
+               p:=NewBIGints(Modulus);
+               d:=NewDBIGscopy(F.x)
+               d.shl(uint(NLEN)*BASEBITS)
+               F.x.copy(d.mod(p))
+       }
+}
+
+/* convert back to regular form */
+func (F *FP) redc() *BIG {
+       if MODTYPE!=PSEUDO_MERSENNE && MODTYPE!=GENERALISED_MERSENNE {
+               d:=NewDBIGscopy(F.x)
+               return mod(d)
+       } else {
+               r:=NewBIGcopy(F.x)
+               return r
+       }
+}
+
+/* reduce this mod Modulus */
+func (F *FP) reduce() {
+       p:=NewBIGints(Modulus)
+       F.x.mod(p)
+}
+
+/* test this=0? */
+func (F *FP) iszilch() bool {
+       F.reduce()
+       return F.x.iszilch()
+}
+
+/* copy from FP b */
+func (F *FP) copy(b *FP ) {
+       F.x.copy(b.x)
+}
+
+/* set this=0 */
+func (F *FP) zero() {
+       F.x.zero()
+}
+       
+/* set this=1 */
+func (F *FP) one() {
+       F.x.one(); F.nres()
+}
+
+/* normalise this */
+func (F *FP) norm() {
+       F.x.norm();
+}
+
+/* swap FPs depending on d */
+func (F *FP) cswap(b *FP,d int) {
+       F.x.cswap(b.x,d);
+}
+
+/* copy FPs depending on d */
+func (F *FP) cmove(b *FP,d int) {
+       F.x.cmove(b.x,d)
+}
+
+/* this*=b mod Modulus */
+func (F *FP) mul(b *FP) {
+
+       F.norm()
+       b.norm()
+       if pexceed(F.x,b.x) {F.reduce()}
+       d:=mul(F.x,b.x)
+       F.x.copy(mod(d))
+}
+
+func logb2(w uint32) uint {
+       v:=w
+       v |= (v >> 1)
+       v |= (v >> 2)
+       v |= (v >> 4)
+       v |= (v >> 8)
+       v |= (v >> 16)
+
+       v = v - ((v >> 1) & 0x55555555)                 
+       v = (v & 0x33333333) + ((v >> 2) & 0x33333333)  
+       r:= uint((   ((v + (v >> 4)) & 0xF0F0F0F)   * 0x1010101) >> 24)
+       return (r+1)
+}
+
+/* this = -this mod Modulus */
+func (F *FP) neg() {
+       p:=NewBIGints(Modulus)
+       m:=NewBIGcopy(p)
+       F.norm()
+       sb:=logb2(uint32(EXCESS(F.x)))
+
+//     ov:=EXCESS(F.x); 
+//     sb:=uint(1); for ov!=0 {sb++;ov>>=1} 
+
+       m.fshl(sb)
+       F.x.rsub(m)             
+
+       if EXCESS(F.x)>=FEXCESS {F.reduce()}
+}
+
+
+/* this*=c mod Modulus, where c is a small int */
+func (F *FP) imul(c int) {
+       F.norm()
+       s:=false
+       if (c<0) {
+               c=-c
+               s=true
+       }
+       afx:=(EXCESS(F.x)+1)*(Chunk(c)+1)+1;
+       if (c<NEXCESS && afx<FEXCESS) {
+               F.x.imul(c);
+       } else {
+               if (afx<FEXCESS) {
+                       F.x.pmul(c)
+               } else {
+                       p:=NewBIGints(Modulus);
+                       d:=F.x.pxmul(c)
+                       F.x.copy(d.mod(p))
+               }
+       }
+       if s {F.neg()}
+       F.norm()
+}
+
+/* this*=this mod Modulus */
+func (F *FP) sqr() {
+       F.norm();
+       if sexceed(F.x) {F.reduce()}
+       d:=sqr(F.x)     
+       F.x.copy(mod(d))
+}
+
+/* this+=b */
+func (F *FP) add(b *FP) {
+       F.x.add(b.x)
+       if (EXCESS(F.x)+2>=FEXCESS) {F.reduce()}
+}
+
+/* this-=b */
+func (F *FP) sub(b *FP) {
+       n:=NewFPcopy(b)
+       n.neg()
+       F.add(n)
+}
+
+/* this/=2 mod Modulus */
+func (F *FP) div2() {
+       F.x.norm()
+       if (F.x.parity()==0) {
+               F.x.fshr(1)
+       } else {
+               p:=NewBIGints(Modulus);
+               F.x.add(p)
+               F.x.norm()
+               F.x.fshr(1)
+       }
+}
+
+/* this=1/this mod Modulus */
+func (F *FP) inverse() {
+       p:=NewBIGints(Modulus);
+       r:=F.redc()
+       r.invmodp(p)
+       F.x.copy(r)
+       F.nres()
+}
+
+/* return TRUE if this==a */
+func (F *FP) equals(a *FP) bool {
+       a.reduce()
+       F.reduce()
+       if (comp(a.x,F.x)==0) {return true}
+       return false
+}
+
+/* return this^e mod Modulus */
+func (F *FP) pow(e *BIG) *FP {
+       r:=NewFPint(1)
+       e.norm()
+       F.x.norm()
+       m:=NewFPcopy(F)
+       for true {
+               bt:=e.parity();
+               e.fshr(1);
+               if bt==1 {r.mul(m)}
+               if e.iszilch() {break}
+               m.sqr();
+       }
+       p:=NewBIGints(Modulus);
+       r.x.mod(p);
+       return r;
+}
+
+/* return sqrt(this) mod Modulus */
+func (F *FP) sqrt() *FP {
+       F.reduce();
+       p:=NewBIGints(Modulus);
+       b:=NewBIGcopy(p)
+       if MOD8==5 {
+               b.dec(5); b.norm(); b.shr(3)
+               i:=NewFPcopy(F); i.x.shl(1)
+               v:=i.pow(b)
+               i.mul(v); i.mul(v)
+               i.x.dec(1)
+               r:=NewFPcopy(F)
+               r.mul(v); r.mul(i) 
+               r.reduce()
+               return r
+       } else {
+               b.inc(1); b.norm(); b.shr(2)
+               return F.pow(b);
+       }
+}
+
+/* return jacobi symbol (this/Modulus) */
+func (F *FP) jacobi() int {
+       w:=F.redc();
+       p:=NewBIGints(Modulus);
+       return w.jacobi(p)
+}

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/FP12.go
----------------------------------------------------------------------
diff --git a/version22/go/FP12.go b/version22/go/FP12.go
new file mode 100644
index 0000000..88371b2
--- /dev/null
+++ b/version22/go/FP12.go
@@ -0,0 +1,551 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/* MiotCL Fp^12 functions */
+/* FP12 elements are of the form a+i.b+i^2.c */
+
+package main
+
+//import "fmt"
+
+type FP12 struct {
+       a *FP4
+       b *FP4
+       c *FP4
+}
+
+/* Constructors */
+func NewFP12fp4(d *FP4) *FP12 {
+       F:=new(FP12)
+       F.a=NewFP4copy(d)
+       F.b=NewFP4int(0)
+       F.c=NewFP4int(0)
+       return F
+}
+
+func NewFP12int(d int) *FP12 {
+       F:=new(FP12)
+       F.a=NewFP4int(d)
+       F.b=NewFP4int(0)
+       F.c=NewFP4int(0)
+       return F
+}
+
+func NewFP12fp4s(d *FP4,e *FP4,f *FP4) *FP12 {
+       F:=new(FP12)
+       F.a=NewFP4copy(d)
+       F.b=NewFP4copy(e)
+       F.c=NewFP4copy(f)
+       return F
+}
+
+func NewFP12copy(x *FP12) *FP12 {
+       F:=new(FP12)
+       F.a=NewFP4copy(x.a)
+       F.b=NewFP4copy(x.b)
+       F.c=NewFP4copy(x.c)
+       return F
+}
+
+/* reduce all components of this mod Modulus */
+func (F *FP12) reduce() {
+       F.a.reduce()
+       F.b.reduce()
+       F.c.reduce()
+}
+/* normalise all components of this */
+func (F *FP12) norm() {
+       F.a.norm()
+       F.b.norm()
+       F.c.norm()
+}
+/* test x==0 ? */
+func (F *FP12) iszilch() bool {
+       F.reduce()
+       return (F.a.iszilch() && F.b.iszilch() && F.c.iszilch())
+}
+/* test x==1 ? */
+func (F *FP12) isunity() bool {
+       one:=NewFP4int(1)
+       return (F.a.equals(one) && F.b.iszilch() && F.c.iszilch())
+}
+/* return 1 if x==y, else 0 */
+func (F *FP12) equals(x *FP12) bool {
+       return (F.a.equals(x.a) && F.b.equals(x.b) && F.c.equals(x.c))
+}
+
+/* extract a from this */
+func (F *FP12) geta() *FP4 {
+       return F.a
+}
+/* extract b */
+func (F *FP12) getb() *FP4 {
+       return F.b
+}
+/* extract c */
+func (F *FP12) getc() *FP4 {
+       return F.c
+}
+/* copy this=x */
+func (F *FP12) copy(x *FP12) {
+       F.a.copy(x.a)
+       F.b.copy(x.b)
+       F.c.copy(x.c)
+}
+/* set this=1 */
+func (F *FP12) one() {
+       F.a.one()
+       F.b.zero()
+       F.c.zero()
+}
+/* this=conj(this) */
+func (F *FP12) conj() {
+       F.a.conj()
+       F.b.nconj()
+       F.c.conj()
+}
+
+/* Granger-Scott Unitary Squaring */
+func (F *FP12) usqr() {
+       A:=NewFP4copy(F.a)
+       B:=NewFP4copy(F.c)
+       C:=NewFP4copy(F.b)
+       D:=NewFP4int(0)
+
+       F.a.sqr()
+       D.copy(F.a); D.add(F.a)
+       F.a.add(D)
+
+       F.a.norm();
+       A.nconj()
+
+       A.add(A)
+       F.a.add(A)
+       B.sqr()
+       B.times_i()
+
+       D.copy(B); D.add(B)
+       B.add(D)
+       B.norm();
+
+       C.sqr()
+       D.copy(C); D.add(C)
+       C.add(D)
+       C.norm();
+
+       F.b.conj()
+       F.b.add(F.b)
+       F.c.nconj()
+
+       F.c.add(F.c)
+       F.b.add(B)
+       F.c.add(C)
+       F.reduce()
+
+}
+
+/* Chung-Hasan SQR2 method from 
http://cacr.uwaterloo.ca/techreports/2006/cacr2006-24.pdf */
+func (F *FP12)  sqr() {
+       A:=NewFP4copy(F.a)
+       B:=NewFP4copy(F.b)
+       C:=NewFP4copy(F.c)
+       D:=NewFP4copy(F.a)
+
+       A.sqr()
+       B.mul(F.c)
+       B.add(B)
+       C.sqr()
+       D.mul(F.b)
+       D.add(D)
+
+       F.c.add(F.a)
+       F.c.add(F.b)
+       F.c.sqr()
+
+       F.a.copy(A)
+
+       A.add(B)
+       A.norm();
+       A.add(C)
+       A.add(D)
+       A.norm();
+
+       A.neg()
+       B.times_i();
+       C.times_i()
+
+       F.a.add(B)
+
+       F.b.copy(C); F.b.add(D)
+       F.c.add(A)
+       F.norm()
+}
+
+/* FP12 full multiplication this=this*y */
+func (F *FP12) mul(y *FP12) {
+       z0:=NewFP4copy(F.a)
+       z1:=NewFP4int(0)
+       z2:=NewFP4copy(F.b)
+       z3:=NewFP4int(0)
+       t0:=NewFP4copy(F.a)
+       t1:=NewFP4copy(y.a)
+
+       z0.mul(y.a)
+       z2.mul(y.b)
+
+       t0.add(F.b)
+       t1.add(y.b)
+
+       z1.copy(t0); z1.mul(t1)
+       t0.copy(F.b); t0.add(F.c)
+
+       t1.copy(y.b); t1.add(y.c)
+       z3.copy(t0); z3.mul(t1)
+
+       t0.copy(z0); t0.neg()
+       t1.copy(z2); t1.neg()
+
+       z1.add(t0)
+       z1.norm();
+       F.b.copy(z1); F.b.add(t1)
+
+       z3.add(t1)
+       z2.add(t0)
+
+       t0.copy(F.a); t0.add(F.c)
+       t1.copy(y.a); t1.add(y.c)
+       t0.mul(t1)
+       z2.add(t0)
+
+       t0.copy(F.c); t0.mul(y.c)
+       t1.copy(t0); t1.neg()
+
+       z2.norm();
+       z3.norm();
+       F.b.norm();
+
+       F.c.copy(z2); F.c.add(t1)
+       z3.add(t1)
+       t0.times_i()
+       F.b.add(t0)
+
+       z3.times_i()
+       F.a.copy(z0); F.a.add(z3)
+       F.norm()
+}
+
+/* Special case of multiplication arises from special form of ATE pairing line 
function */
+func (F *FP12) smul(y *FP12) {
+       z0:=NewFP4copy(F.a)
+       z2:=NewFP4copy(F.b)
+       z3:=NewFP4copy(F.b)
+       t0:=NewFP4int(0)
+       t1:=NewFP4copy(y.a)
+               
+       z0.mul(y.a)
+       z2.pmul(y.b.real());
+       F.b.add(F.a)
+       t1.real().add(y.b.real())
+
+       F.b.mul(t1)
+       z3.add(F.c);
+       z3.pmul(y.b.real())
+
+       t0.copy(z0); t0.neg()
+       t1.copy(z2); t1.neg()
+
+       F.b.add(t0)
+       F.b.norm();
+
+       F.b.add(t1)
+       z3.add(t1)
+       z2.add(t0)
+
+       t0.copy(F.a); t0.add(F.c)
+       t0.mul(y.a)
+       F.c.copy(z2); F.c.add(t0)
+
+       z3.times_i()
+       F.a.copy(z0); F.a.add(z3)
+
+       F.norm()
+}
+
+/* this=1/this */
+func (F *FP12) inverse() {
+       f0:=NewFP4copy(F.a)
+       f1:=NewFP4copy(F.b)
+       f2:=NewFP4copy(F.a)
+       f3:=NewFP4int(0)
+
+       F.norm()
+       f0.sqr()
+       f1.mul(F.c)
+       f1.times_i()
+       f0.sub(f1)
+
+       f1.copy(F.c); f1.sqr()
+       f1.times_i()
+       f2.mul(F.b)
+       f1.sub(f2)
+
+       f2.copy(F.b); f2.sqr()
+       f3.copy(F.a); f3.mul(F.c)
+       f2.sub(f3)
+
+       f3.copy(F.b); f3.mul(f2)
+       f3.times_i()
+       F.a.mul(f0)
+       f3.add(F.a)
+       F.c.mul(f1)
+       F.c.times_i()
+
+       f3.add(F.c)
+       f3.inverse()
+       F.a.copy(f0); F.a.mul(f3)
+       F.b.copy(f1); F.b.mul(f3)
+       F.c.copy(f2); F.c.mul(f3)
+}
+
+/* this=this^p using Frobenius */
+func (F *FP12) frob(f *FP2) {
+       f2:=NewFP2copy(f)
+       f3:=NewFP2copy(f)
+
+       f2.sqr()
+       f3.mul(f2)
+
+       F.a.frob(f3);
+       F.b.frob(f3);
+       F.c.frob(f3);
+
+       F.b.pmul(f);
+       F.c.pmul(f2);
+}
+
+/* trace function */
+func (F *FP12) trace() *FP4 {
+       t:=NewFP4int(0)
+       t.copy(F.a)
+       t.imul(3)
+       t.reduce()
+       return t;
+}
+
+
+/* convert from byte array to FP12 */
+func FP12_fromBytes(w []byte) *FP12 {
+       var t [int(MODBYTES)]byte
+       MB:=int(MODBYTES)
+
+       for i:=0;i<MB;i++ {t[i]=w[i]}
+       a:=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=w[i+MB]}
+       b:=fromBytes(t[:])
+       c:=NewFP2bigs(a,b)
+
+       for i:=0;i<MB;i++ {t[i]=w[i+2*MB]}
+       a=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=w[i+3*MB]}
+       b=fromBytes(t[:])
+       d:=NewFP2bigs(a,b)
+
+       e:=NewFP4fp2s(c,d)
+
+
+       for i:=0;i<MB;i++ {t[i]=w[i+4*MB]}
+       a=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=w[i+5*MB]}
+       b=fromBytes(t[:])
+       c=NewFP2bigs(a,b)
+
+       for i:=0;i<MB;i++ {t[i]=w[i+6*MB]}
+       a=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=w[i+7*MB]}
+       b=fromBytes(t[:])
+       d=NewFP2bigs(a,b)
+
+       f:=NewFP4fp2s(c,d)
+
+
+       for i:=0;i<MB;i++ {t[i]=w[i+8*MB]}
+       a=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=w[i+9*MB]}
+       b=fromBytes(t[:]);
+               
+       c=NewFP2bigs(a,b)
+
+       for i:=0;i<MB;i++ {t[i]=w[i+10*MB]}
+       a=fromBytes(t[:])
+       for i:=0;i<MB;i++ {t[i]=w[i+11*MB]}
+       b=fromBytes(t[:])
+       d=NewFP2bigs(a,b)
+
+       g:=NewFP4fp2s(c,d)
+
+       return NewFP12fp4s(e,f,g)
+}
+
+/* convert this to byte array */
+func (F *FP12) toBytes(w []byte) {
+       var t [int(MODBYTES)]byte
+       MB:=int(MODBYTES)
+       F.a.geta().getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i]=t[i]}
+       F.a.geta().getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+MB]=t[i]}
+       F.a.getb().getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+2*MB]=t[i]}
+       F.a.getb().getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+3*MB]=t[i]}
+
+       F.b.geta().getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+4*MB]=t[i]}
+       F.b.geta().getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+5*MB]=t[i]}
+       F.b.getb().getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+6*MB]=t[i]}
+       F.b.getb().getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+7*MB]=t[i]}
+
+       F.c.geta().getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+8*MB]=t[i]}
+       F.c.geta().getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+9*MB]=t[i]}
+       F.c.getb().getA().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+10*MB]=t[i]}
+       F.c.getb().getB().toBytes(t[:])
+       for i:=0;i<MB;i++ {w[i+11*MB]=t[i]}
+}
+
+/* convert to hex string */
+func (F *FP12) toString() string {
+       return ("["+F.a.toString()+","+F.b.toString()+","+F.c.toString()+"]")
+}
+
+/* this=this^e */ 
+func (F *FP12) pow(e *BIG) *FP12 {
+       F.norm()
+       e.norm()
+       w:=NewFP12copy(F)
+       z:=NewBIGcopy(e)
+       r:=NewFP12int(1)
+
+       for true {
+               bt:=z.parity()
+               z.fshr(1)
+               if bt==1 {r.mul(w)}
+               if z.iszilch() {break}
+               w.usqr()
+       }
+       r.reduce();
+       return r;
+}
+
+/* constant time powering by small integer of max length bts */
+func (F *FP12) pinpow(e int,bts int) {
+       var R []*FP12
+       R=append(R,NewFP12int(1))
+       R=append(R,NewFP12copy(F))
+
+       for i:=bts-1;i>=0;i-- {
+               b:=(e>>uint(i))&1
+               R[1-b].mul(R[b])
+               R[b].usqr()
+       }
+       F.copy(R[0])
+}
+
+/* p=q0^u0.q1^u1.q2^u2.q3^u3 */
+/* Timing attack secure, but not cache attack secure */
+
+ func pow4(q []*FP12,u []*BIG) *FP12 {
+       var a [4]int8
+       var g []*FP12
+       var s []*FP12
+       c:=NewFP12int(1)
+       p:=NewFP12int(0)
+       var w [NLEN*int(BASEBITS)+1]int8
+       var t []*BIG
+       mt:=NewBIGint(0)
+
+       for i:=0;i<4;i++ {
+               t=append(t,NewBIGcopy(u[i]))
+       }
+
+       s=append(s,NewFP12int(0))
+       s=append(s,NewFP12int(0))
+
+       g=append(g,NewFP12copy(q[0])); s[0].copy(q[1]); s[0].conj(); 
g[0].mul(s[0])
+       g=append(g,NewFP12copy(g[0]))
+       g=append(g,NewFP12copy(g[0]))
+       g=append(g,NewFP12copy(g[0]))
+       g=append(g,NewFP12copy(q[0])); g[4].mul(q[1])
+       g=append(g,NewFP12copy(g[4]))
+       g=append(g,NewFP12copy(g[4]))
+       g=append(g,NewFP12copy(g[4]))
+
+       s[1].copy(q[2]); s[0].copy(q[3]); s[0].conj(); s[1].mul(s[0])
+       s[0].copy(s[1]); s[0].conj(); g[1].mul(s[0])
+       g[2].mul(s[1])
+       g[5].mul(s[0])
+       g[6].mul(s[1])
+       s[1].copy(q[2]); s[1].mul(q[3])
+       s[0].copy(s[1]); s[0].conj(); g[0].mul(s[0])
+       g[3].mul(s[1])
+       g[4].mul(s[0])
+       g[7].mul(s[1])
+
+/* if power is even add 1 to power, and add q to correction */
+
+       for i:=0;i<4;i++ {
+               if t[i].parity()==0 {
+                       t[i].inc(1); t[i].norm()
+                       c.mul(q[i])
+               }
+               mt.add(t[i]); mt.norm()
+       }
+       c.conj()
+       nb:=1+mt.nbits()
+
+/* convert exponent to signed 1-bit window */
+       for j:=0;j<nb;j++ {
+               for i:=0;i<4;i++ {
+                       a[i]=int8(t[i].lastbits(2)-2)
+                       t[i].dec(int(a[i])); t[i].norm();
+                       t[i].fshr(1)
+               }
+               w[j]=(8*a[0]+4*a[1]+2*a[2]+a[3])
+       }
+       
w[nb]=int8(8*t[0].lastbits(2)+4*t[1].lastbits(2)+2*t[2].lastbits(2)+t[3].lastbits(2))
+       p.copy(g[(w[nb]-1)/2])
+
+       for i:=nb-1;i>=0;i-- {
+               m:=w[i]>>7
+               j:=(w[i]^m)-m  /* j=abs(w[i]) */
+               j=(j-1)/2
+               s[0].copy(g[j]); s[1].copy(g[j]); s[1].conj()
+               p.usqr()
+               p.mul(s[m&1]);
+       }
+       p.mul(c)  /* apply correction */
+       p.reduce()
+       return p;
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/FP2.go
----------------------------------------------------------------------
diff --git a/version22/go/FP2.go b/version22/go/FP2.go
new file mode 100644
index 0000000..d4993e2
--- /dev/null
+++ b/version22/go/FP2.go
@@ -0,0 +1,300 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/* Finite Field arithmetic  Fp^2 functions */
+
+/* FP2 elements are of the form a+ib, where i is sqrt(-1) */
+
+package main
+
+//import "fmt"
+
+type FP2 struct {
+       a *FP
+       b *FP
+}
+
+/* Constructors */
+func NewFP2int(a int) *FP2 {
+       F:=new(FP2)
+       F.a=NewFPint(a)
+       F.b=NewFPint(0)
+       return F
+}
+
+func NewFP2copy(x *FP2) *FP2 {
+       F:=new(FP2)
+       F.a=NewFPcopy(x.a)
+       F.b=NewFPcopy(x.b)
+       return F
+}
+
+func NewFP2fps(c *FP,d *FP) *FP2 {
+       F:=new(FP2)
+       F.a=NewFPcopy(c)
+       F.b=NewFPcopy(d)
+       return F
+}
+
+func NewFP2bigs(c *BIG,d *BIG) *FP2 {
+       F:=new(FP2)
+       F.a=NewFPbig(c)
+       F.b=NewFPbig(d)
+       return F
+}
+
+func NewFP2fp(c *FP) *FP2 {
+       F:=new(FP2)
+       F.a=NewFPcopy(c)
+       F.b=NewFPint(0)
+       return F
+}
+
+func NewFP2big(c *BIG) *FP2 {
+       F:=new(FP2)
+       F.a=NewFPbig(c)
+       F.b=NewFPint(0)
+       return F
+}
+
+/* reduce components mod Modulus */
+func (F *FP2) reduce() {
+       F.a.reduce()
+       F.b.reduce()
+}
+
+/* normalise components of w */
+func (F *FP2) norm() {
+       F.a.norm()
+       F.b.norm()
+}
+
+/* test this=0 ? */
+func (F *FP2) iszilch() bool {
+       F.reduce()
+       return (F.a.iszilch() && F.b.iszilch())
+}
+
+func (F *FP2) cmove(g *FP2,d int) {
+       F.a.cmove(g.a,d)
+       F.b.cmove(g.b,d)
+}
+
+/* test this=1 ? */
+func (F *FP2)  isunity() bool {
+       one:=NewFPint(1)
+       return (F.a.equals(one) && F.b.iszilch())
+}
+
+/* test this=x */
+func (F *FP2) equals(x *FP2) bool {
+       return (F.a.equals(x.a) && F.b.equals(x.b))
+}
+
+/* extract a */
+func (F *FP2) getA() *BIG { 
+       return F.a.redc()
+}
+
+/* extract b */
+func (F *FP2) getB() *BIG {
+       return F.b.redc()
+}
+
+/* copy this=x */
+func (F *FP2) copy(x *FP2) {
+       F.a.copy(x.a)
+       F.b.copy(x.b)
+}
+
+/* set this=0 */
+func (F *FP2) zero() {
+       F.a.zero()
+       F.b.zero()
+}
+
+/* set this=1 */
+func (F *FP2) one() {
+       F.a.one()
+       F.b.zero()
+}
+
+/* negate this mod Modulus */
+func (F *FP2) neg() {
+       F.norm()
+       m:=NewFPcopy(F.a)
+       t:= NewFPint(0)
+
+       m.add(F.b)
+       m.neg()
+       m.norm()
+       t.copy(m); t.add(F.b)
+       F.b.copy(m)
+       F.b.add(F.a)
+       F.a.copy(t)
+}
+
+/* set to a-ib */
+func (F *FP2) conj() {
+       F.b.neg()
+}
+
+/* this+=a */
+func (F *FP2) add(x *FP2) {
+       F.a.add(x.a)
+       F.b.add(x.b)
+}
+
+/* this-=a */
+func (F *FP2) sub(x *FP2) {
+       m:=NewFP2copy(x)
+       m.neg()
+       F.add(m)
+}
+
+/* this*=s, where s is an FP */
+func (F *FP2) pmul(s *FP) {
+       F.a.mul(s)
+       F.b.mul(s)
+}
+
+/* this*=i, where i is an int */
+func (F *FP2) imul(c int) {
+       F.a.imul(c)
+       F.b.imul(c)
+}
+
+/* this*=this */
+func (F *FP2) sqr() {
+       F.norm()
+       w1:=NewFPcopy(F.a)
+       w3:=NewFPcopy(F.a)
+       mb:=NewFPcopy(F.b)
+
+       w3.mul(F.b)
+       w1.add(F.b)
+       mb.neg()
+       F.a.add(mb)
+       F.a.mul(w1)
+       F.b.copy(w3); F.b.add(w3)
+
+       F.norm()
+}
+
+/* this*=y */
+func (F *FP2) mul(y *FP2) {
+       F.norm();  /* This is needed here as {a,b} is not normed before 
additions */
+
+       w1:=NewFPcopy(F.a)
+       w2:=NewFPcopy(F.b)
+       w5:=NewFPcopy(F.a)
+       mw:=NewFPint(0)
+
+       w1.mul(y.a)  // w1=a*y.a  - this norms w1 and y.a, NOT a
+       w2.mul(y.b)  // w2=b*y.b  - this norms w2 and y.b, NOT b
+       w5.add(F.b)    // w5=a+b
+       F.b.copy(y.a); F.b.add(y.b) // b=y.a+y.b
+
+       F.b.mul(w5);
+       mw.copy(w1); mw.add(w2); mw.neg()
+
+       F.b.add(mw); mw.add(w1)
+       F.a.copy(w1);   F.a.add(mw)
+
+       F.norm()
+}
+
+/* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2)) */
+/* returns true if this is QR */
+func (F *FP2) sqrt() bool {
+       if F.iszilch() {return true}
+       w1:=NewFPcopy(F.b)
+       w2:=NewFPcopy(F.a)
+       w1.sqr(); w2.sqr(); w1.add(w2)
+       if w1.jacobi()!=1 { F.zero(); return false }
+       w1=w1.sqrt()
+       w2.copy(F.a); w2.add(w1); w2.div2()
+       if w2.jacobi()!=1 {
+               w2.copy(F.a); w2.sub(w1); w2.div2()
+               if w2.jacobi()!=1 { F.zero(); return false }
+       }
+       w2=w2.sqrt()
+       F.a.copy(w2)
+       w2.add(w2)
+       w2.inverse()
+       F.b.mul(w2)
+       return true
+}
+
+/* output to hex string */
+func (F *FP2) toString() string {
+       return ("["+F.a.toString()+","+F.b.toString()+"]")
+}
+
+/* this=1/this */
+func (F *FP2) inverse() {
+       F.norm()
+       w1:=NewFPcopy(F.a)
+       w2:=NewFPcopy(F.b)
+
+       w1.sqr()
+       w2.sqr()
+       w1.add(w2)
+       w1.inverse()
+       F.a.mul(w1)
+       w1.neg()
+       F.b.mul(w1)
+}
+
+/* this/=2 */
+func (F *FP2) div2() {
+       F.a.div2()
+       F.b.div2()
+}
+
+/* this*=sqrt(-1) */
+func (F *FP2) times_i() {
+       //      a.norm();
+       z:=NewFPcopy(F.a)
+       F.a.copy(F.b); F.a.neg()
+       F.b.copy(z)
+}
+
+/* w*=(1+sqrt(-1)) */
+/* where X*2-(1+sqrt(-1)) is irreducible for FP4, assumes p=3 mod 8 */
+func (F *FP2) mul_ip() {
+       F.norm()
+       t:=NewFP2copy(F)
+       z:=NewFPcopy(F.a)
+       F.a.copy(F.b)
+       F.a.neg()
+       F.b.copy(z)
+       F.add(t)
+       F.norm()
+}
+
+/* w/=(1+sqrt(-1)) */
+func (F *FP2) div_ip() {
+       t:=NewFP2int(0)
+       F.norm()
+       t.a.copy(F.a); t.a.add(F.b)
+       t.b.copy(F.b); t.b.sub(F.a);
+       F.copy(t)
+       F.div2()
+}

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/FP4.go
----------------------------------------------------------------------
diff --git a/version22/go/FP4.go b/version22/go/FP4.go
new file mode 100644
index 0000000..649b88c
--- /dev/null
+++ b/version22/go/FP4.go
@@ -0,0 +1,479 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/* Finite Field arithmetic  Fp^4 functions */
+
+/* FP4 elements are of the form a+ib, where i is sqrt(-1+sqrt(-1)) */
+
+package main
+
+//import "fmt"
+
+type FP4 struct {
+       a *FP2
+       b *FP2
+}
+
+/* Constructors */
+func NewFP4int(a int) *FP4 {
+       F:=new(FP4)
+       F.a=NewFP2int(a)
+       F.b=NewFP2int(0)
+       return F
+}
+
+func NewFP4copy(x *FP4) *FP4 {
+       F:=new(FP4)
+       F.a=NewFP2copy(x.a)
+       F.b=NewFP2copy(x.b)
+       return F
+}
+
+func NewFP4fp2s(c *FP2,d *FP2) *FP4 {
+       F:=new(FP4)
+       F.a=NewFP2copy(c)
+       F.b=NewFP2copy(d)
+       return F
+}
+
+func NewFP4fp2(c *FP2) *FP4 {
+       F:=new(FP4)
+       F.a=NewFP2copy(c)
+       F.b=NewFP2int(0)
+       return F
+}
+
+/* reduce all components of this mod Modulus */
+func (F *FP4) reduce() {
+       F.a.reduce()
+       F.b.reduce()
+}
+
+/* normalise all components of this mod Modulus */
+func (F *FP4) norm() {
+       F.a.norm()
+       F.b.norm()
+}
+
+/* test this==0 ? */
+func (F *FP4) iszilch() bool {
+       F.reduce()
+       return F.a.iszilch() && F.b.iszilch()
+}
+
+/* test this==1 ? */
+func (F *FP4) isunity() bool {
+       one:=NewFP2int(1)
+       return F.a.equals(one) && F.b.iszilch()
+}
+
+/* test is w real? That is in a+ib test b is zero */
+func (F *FP4) isreal() bool {
+       return F.b.iszilch()
+}
+/* extract real part a */
+func (F *FP4) real() *FP2 {
+       return F.a
+}
+
+func (F *FP4) geta() *FP2 {
+       return F.a
+}
+/* extract imaginary part b */
+func (F *FP4) getb() *FP2 {
+       return F.b
+}
+/* test this=x? */
+func (F *FP4) equals(x *FP4) bool {
+       return (F.a.equals(x.a) && F.b.equals(x.b))
+}
+
+/* copy this=x */
+func (F *FP4) copy(x *FP4) {
+       F.a.copy(x.a)
+       F.b.copy(x.b)
+}
+/* set this=0 */
+func (F *FP4) zero() {
+       F.a.zero()
+       F.b.zero()
+       }
+/* set this=1 */
+func (F *FP4) one() {
+       F.a.one()
+       F.b.zero()
+}
+
+/* set this=-this */
+func (F *FP4) neg() {
+       m:=NewFP2copy(F.a);
+       t:=NewFP2int(0)
+       m.add(F.b)
+       m.neg()
+       m.norm()
+       t.copy(m); t.add(F.b)
+       F.b.copy(m)
+       F.b.add(F.a)
+       F.a.copy(t)
+}
+
+/* this=conjugate(this) */
+func (F *FP4) conj() {
+       F.b.neg(); F.b.norm()
+}
+
+/* this=-conjugate(this) */
+func (F *FP4) nconj() {
+       F.a.neg(); F.a.norm()
+}
+
+/* this+=x */
+func (F *FP4) add(x *FP4) {
+       F.a.add(x.a)
+       F.b.add(x.b)
+}
+/* this-=x */
+func (F *FP4) sub(x *FP4) {
+       m:=NewFP4copy(x)
+       m.neg()
+       F.add(m)
+}
+
+/* this*=s where s is FP2 */
+func (F *FP4) pmul(s *FP2) {
+       F.a.mul(s)
+       F.b.mul(s)
+}
+/* this*=c where c is int */
+func (F *FP4) imul(c int) {
+       F.a.imul(c)
+       F.b.imul(c)
+}
+
+/* this*=this */       
+func (F *FP4) sqr() {
+       F.norm()
+
+       t1:=NewFP2copy(F.a)
+       t2:=NewFP2copy(F.b)
+       t3:=NewFP2copy(F.a)
+
+       t3.mul(F.b)
+       t1.add(F.b)
+       t2.mul_ip()
+
+       t2.add(F.a)
+       F.a.copy(t1)
+
+       F.a.mul(t2)
+
+       t2.copy(t3)
+       t2.mul_ip()
+       t2.add(t3)
+       t2.neg()
+       F.a.add(t2)
+
+       F.b.copy(t3)
+       F.b.add(t3)
+
+       F.norm()
+}
+
+/* this*=y */
+func (F *FP4) mul(y *FP4) {
+       F.norm()
+
+       t1:=NewFP2copy(F.a)
+       t2:=NewFP2copy(F.b)
+       t3:=NewFP2int(0)
+       t4:=NewFP2copy(F.b)
+
+       t1.mul(y.a)
+       t2.mul(y.b)
+       t3.copy(y.b)
+       t3.add(y.a)
+       t4.add(F.a)
+
+       t4.mul(t3)
+       t4.sub(t1)
+       t4.norm();
+
+       F.b.copy(t4)
+       F.b.sub(t2)
+       t2.mul_ip()
+       F.a.copy(t2)
+       F.a.add(t1)
+
+       F.norm()
+}
+
+/* convert this to hex string */
+func (F *FP4) toString() string {
+       return ("["+F.a.toString()+","+F.b.toString()+"]")
+}
+
+/* this=1/this */
+func (F *FP4) inverse() {
+       F.norm()
+
+       t1:=NewFP2copy(F.a)
+       t2:=NewFP2copy(F.b)
+
+       t1.sqr()
+       t2.sqr()
+       t2.mul_ip()
+       t1.sub(t2)
+       t1.inverse()
+       F.a.mul(t1)
+       t1.neg()
+       F.b.mul(t1)
+}
+
+/* this*=i where i = sqrt(-1+sqrt(-1)) */
+func (F *FP4) times_i() {
+       F.norm()
+       s:=NewFP2copy(F.b)
+       t:=NewFP2copy(F.b)
+       s.times_i()
+       t.add(s)
+       t.norm();
+       F.b.copy(F.a)
+       F.a.copy(t)
+}
+
+/* this=this^p using Frobenius */
+func (F *FP4) frob(f *FP2) {
+       F.a.conj()
+       F.b.conj()
+       F.b.mul(f)
+}
+
+/* this=this^e */
+func (F *FP4) pow(e *BIG) *FP4 {
+       F.norm()
+       e.norm()
+       w:=NewFP4copy(F)
+       z:=NewBIGcopy(e)
+       r:=NewFP4int(1)
+       for true {
+               bt:=z.parity()
+               z.fshr(1)
+               if bt==1 {r.mul(w)}
+               if z.iszilch() {break}
+               w.sqr()
+       }
+       r.reduce()
+       return r
+}
+
+/* XTR xtr_a function */
+func (F *FP4) xtr_A(w *FP4,y *FP4,z *FP4) {
+       r:=NewFP4copy(w)
+       t:=NewFP4copy(w)
+       r.sub(y);
+       r.pmul(F.a)
+       t.add(y)
+       t.pmul(F.b)
+       t.times_i()
+
+       F.copy(r)
+       F.add(t)
+       F.add(z)
+
+       F.norm()
+}
+
+/* XTR xtr_d function */
+func (F *FP4) xtr_D() {
+       w:=NewFP4copy(F)
+       F.sqr(); w.conj()
+       w.add(w)
+       F.sub(w)
+       F.reduce()
+}
+
+/* r=x^n using XTR method on traces of FP12s */
+func (F *FP4) xtr_pow(n *BIG) *FP4 {
+       a:=NewFP4int(3)
+       b:=NewFP4copy(F)
+       c:=NewFP4copy(b)
+       c.xtr_D()
+       t:=NewFP4int(0)
+       r:=NewFP4int(0)
+
+       n.norm()
+       par:=n.parity()
+       v:=NewBIGcopy(n); v.fshr(1)
+       if (par==0) {v.dec(1); v.norm()}
+
+       nb:=v.nbits();
+       for i:=nb-1;i>=0;i-- {
+               if v.bit(i)!=1 {
+                       t.copy(b)
+                       F.conj()
+                       c.conj()
+                       b.xtr_A(a,F,c)
+                       F.conj()
+                       c.copy(t)
+                       c.xtr_D()
+                       a.xtr_D()
+               } else {
+                       t.copy(a); t.conj()
+                       a.copy(b)
+                       a.xtr_D()
+                       b.xtr_A(c,F,t)
+                       c.xtr_D()
+               }
+       }
+       if par==0 {
+               r.copy(c)
+       } else {r.copy(b)}
+       r.reduce()
+       return r
+}
+
+/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP12s. See 
Stam thesis. */
+func (F *FP4) xtr_pow2(ck *FP4,ckml *FP4,ckm2l *FP4,a *BIG,b *BIG) *FP4 {
+       a.norm(); b.norm()
+       e:=NewBIGcopy(a)
+       d:=NewBIGcopy(b)
+       w:=NewBIGint(0)
+
+       cu:=NewFP4copy(ck)  // can probably be passed in w/o copying
+       cv:=NewFP4copy(F);
+       cumv:=NewFP4copy(ckml)
+       cum2v:=NewFP4copy(ckm2l)
+       r:=NewFP4int(0)
+       t:=NewFP4int(0)
+
+       f2:=0
+       for (d.parity()==0 && e.parity()==0) {
+               d.fshr(1)
+               e.fshr(1)
+               f2++
+       }
+
+       for comp(d,e)!=0 {
+               if comp(d,e)>0 {
+                       w.copy(e); w.imul(4); w.norm()
+                       if comp(d,w)<=0 {
+                               w.copy(d); d.copy(e)
+                               e.rsub(w); e.norm()
+
+                               t.copy(cv);
+                               t.xtr_A(cu,cumv,cum2v)
+                               cum2v.copy(cumv);
+                               cum2v.conj()
+                               cumv.copy(cv)
+                               cv.copy(cu)
+                               cu.copy(t)
+                       } else {
+                                       if (d.parity()==0) {
+                                       d.fshr(1)
+                                       r.copy(cum2v); r.conj()
+                                       t.copy(cumv)
+                                       t.xtr_A(cu,cv,r)
+                                       cum2v.copy(cumv)
+                                       cum2v.xtr_D()
+                                       cumv.copy(t)
+                                       cu.xtr_D()
+                               } else {
+                                       if (e.parity()==1) {
+                                               d.sub(e); d.norm()
+                                               d.fshr(1)
+                                               t.copy(cv)
+                                               t.xtr_A(cu,cumv,cum2v)
+                                               cu.xtr_D()
+                                               cum2v.copy(cv)
+                                               cum2v.xtr_D()
+                                               cum2v.conj()
+                                               cv.copy(t)
+                                       } else {
+                                               w.copy(d)
+                                               d.copy(e); d.fshr(1)
+                                               e.copy(w)
+                                               t.copy(cumv)
+                                               t.xtr_D()
+                                               cumv.copy(cum2v); cumv.conj()
+                                               cum2v.copy(t); cum2v.conj()
+                                               t.copy(cv)
+                                               t.xtr_D()
+                                               cv.copy(cu)
+                                               cu.copy(t)
+                                       }
+                               }
+                       }
+               }
+               if comp(d,e)<0 {
+                       w.copy(d); w.imul(4); w.norm()
+                       if comp(e,w)<=0 {
+                               e.sub(d); e.norm()
+                               t.copy(cv)
+                               t.xtr_A(cu,cumv,cum2v)
+                               cum2v.copy(cumv)
+                               cumv.copy(cu)
+                               cu.copy(t)
+                       } else {
+                               if (e.parity()==0) {
+                                       w.copy(d)
+                                       d.copy(e); d.fshr(1)
+                                       e.copy(w)
+                                       t.copy(cumv)
+                                       t.xtr_D()
+                                       cumv.copy(cum2v); cumv.conj()
+                                       cum2v.copy(t); cum2v.conj()
+                                       t.copy(cv)
+                                       t.xtr_D()
+                                       cv.copy(cu)
+                                       cu.copy(t)
+                               } else {
+                                       if (d.parity()==1) {
+                                               w.copy(e)
+                                               e.copy(d)
+                                               w.sub(d); w.norm()
+                                               d.copy(w); d.fshr(1)
+                                               t.copy(cv)
+                                               t.xtr_A(cu,cumv,cum2v)
+                                               cumv.conj()
+                                               cum2v.copy(cu)
+                                               cum2v.xtr_D()
+                                               cum2v.conj()
+                                               cu.copy(cv)
+                                               cu.xtr_D()
+                                               cv.copy(t)
+                                       } else {
+                                               d.fshr(1)
+                                               r.copy(cum2v); r.conj()
+                                               t.copy(cumv)
+                                               t.xtr_A(cu,cv,r)
+                                               cum2v.copy(cumv)
+                                               cum2v.xtr_D()
+                                               cumv.copy(t)
+                                               cu.xtr_D()
+                                       }
+                               }
+                       }
+               }
+       }
+       r.copy(cv)
+       r.xtr_A(cu,cumv,cum2v)
+       for i:=0;i<f2;i++ {r.xtr_D()}
+       r=r.xtr_pow(d)
+       return r
+}

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/GCM.go
----------------------------------------------------------------------
diff --git a/version22/go/GCM.go b/version22/go/GCM.go
new file mode 100644
index 0000000..fcd7310
--- /dev/null
+++ b/version22/go/GCM.go
@@ -0,0 +1,337 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/*
+* Implementation of the AES-GCM Encryption/Authentication
+*
+* Some restrictions..
+* 1. Only for use with AES
+* 2. Returned tag is always 128-bits. Truncate at your own risk.
+* 3. The order of function calls must follow some rules
+*
+* Typical sequence of calls..
+* 1. call GCM_init
+* 2. call GCM_add_header any number of times, as long as length of header is 
multiple of 16 bytes (block size)
+* 3. call GCM_add_header one last time with any length of header
+* 4. call GCM_add_cipher any number of times, as long as length of 
cipher/plaintext is multiple of 16 bytes
+* 5. call GCM_add_cipher one last time with any length of cipher/plaintext
+* 6. call GCM_finish to extract the tag.
+*
+* See http://www.mindspring.com/~dmcgrew/gcm-nist-6.pdf
+*/
+
+
+package main
+
+import
+(
+//     "fmt"
+       "strconv"
+)
+
+const gcm_NB int=4
+const GCM_ACCEPTING_HEADER int=0
+const GCM_ACCEPTING_CIPHER int=1
+const GCM_NOT_ACCEPTING_MORE int=2
+const GCM_FINISHED int=3
+const GCM_ENCRYPTING int=0
+const GCM_DECRYPTING int=1
+
+
+type GCM struct {
+       table [128][4]uint32 /* 2k bytes */
+       stateX [16]byte
+       Y_0 [16]byte
+       counter int
+       lenA [2]uint32
+       lenC [2]uint32
+       status int
+       a  *AES
+}
+
+func gcm_pack(b [4]byte) uint32 { /* pack bytes into a 32-bit Word */
+        return 
((uint32(b[0])&0xff)<<24)|((uint32(b[1])&0xff)<<16)|((uint32(b[2])&0xff)<<8)|(uint32(b[3])&0xff)
+}
+
+func gcm_unpack(a uint32) [4]byte { /* unpack bytes from a word */
+        var 
b=[4]byte{byte((a>>24)&0xff),byte((a>>16)&0xff),byte((a>>8)&0xff),byte(a&0xff)}
+       return b;
+}
+
+func (G *GCM) precompute(H []byte) {
+        var b [4]byte
+        j:=0
+        for i:=0;i<gcm_NB;i++ {
+            b[0]=H[j]; b[1]=H[j+1]; b[2]=H[j+2]; b[3]=H[j+3]
+            G.table[0][i]=gcm_pack(b);
+            j+=4
+        }
+        for i:=1;i<128;i++ {
+           c:=uint32(0)
+            for j:=0;j<gcm_NB;j++ {G.table[i][j]=c|(G.table[i-1][j])>>1; 
c=G.table[i-1][j]<<31;}
+            if c != 0  {G.table[i][0]^=0xE1000000} /* irreducible polynomial */
+        }
+}
+
+func (G *GCM) gf2mul() { /* gf2m mul - Z=H*X mod 2^128 */
+        var P [4]uint32
+    
+        for i:=0;i<4;i++ {P[i]=0}
+        j:=uint(8); m:=0
+        for i:=0;i<128;i++ {
+           j--
+            c:=uint32((G.stateX[m]>>j)&1); c=^c+1
+           for k:=0;k<gcm_NB;k++ {P[k]^=(G.table[i][k]&c)}
+            if j==0 {
+               j=8; m++;
+                if m==16 {break}
+            }
+        }
+        j=0
+        for i:=0;i<gcm_NB;i++ {
+            b:=gcm_unpack(P[i])
+            G.stateX[j]=b[0]; G.stateX[j+1]=b[1]; G.stateX[j+2]=b[2]; 
G.stateX[j+3]=b[3];
+            j+=4
+        }
+}
+
+func (G *GCM) wrap() { /* Finish off GHASH */
+       var F [4]uint32
+       var L [16]byte
+   
+    /* convert lengths from bytes to bits */
+        F[0]=(G.lenA[0]<<3)|(G.lenA[1]&0xE0000000)>>29
+        F[1]=G.lenA[1]<<3
+        F[2]=(G.lenC[0]<<3)|(G.lenC[1]&0xE0000000)>>29
+        F[3]=G.lenC[1]<<3
+        j:=0
+        for i:=0;i<gcm_NB;i++ {
+            b:=gcm_unpack(F[i]);
+            L[j]=b[0]; L[j+1]=b[1]; L[j+2]=b[2]; L[j+3]=b[3]
+            j+=4
+        }
+        for i:=0;i<16;i++ {G.stateX[i]^=L[i]}
+        G.gf2mul()
+}
+
+func (G *GCM) ghash(plain []byte,len int) bool {
+        if G.status==GCM_ACCEPTING_HEADER {G.status=GCM_ACCEPTING_CIPHER}
+        if G.status != GCM_ACCEPTING_CIPHER {return false}
+        
+        j:=0
+        for (j<len) {
+            for i:=0;i<16 && j<len;i++ {
+               G.stateX[i]^=plain[j]; j++
+                G.lenC[1]++; if G.lenC[1]==0 {G.lenC[0]++}
+            }
+            G.gf2mul();
+        }
+        if len%16 != 0 {G.status=GCM_NOT_ACCEPTING_MORE}
+        return true;
+    }
+
+    /* Initialize GCM mode */
+func (G *GCM) Init(nk int,key []byte,niv int,iv []byte) { /* iv size niv is 
usually 12 bytes (96 bits). AES key size nk can be 16,24 or 32 bytes */
+       var H [16]byte
+    
+        for i:=0;i<16;i++ {H[i]=0; G.stateX[i]=0}
+        
+          G.a=new(AES)
+
+        G.a.Init(aes_ECB,nk,key,iv)
+        G.a.ecb_encrypt(H[:])    /* E(K,0) */
+        G.precompute(H[:])
+        
+        G.lenA[0]=0;G.lenC[0]=0;G.lenA[1]=0;G.lenC[1]=0
+        if niv==12 {
+            for i:=0;i<12;i++ {G.a.f[i]=iv[i]}
+            b:=gcm_unpack(uint32(1))
+            G.a.f[12]=b[0]; G.a.f[13]=b[1]; G.a.f[14]=b[2]; G.a.f[15]=b[3];  
/* initialise IV */
+            for i:=0;i<16;i++ {G.Y_0[i]=G.a.f[i]}
+        } else {
+            G.status=GCM_ACCEPTING_CIPHER;
+            G.ghash(iv,niv) /* GHASH(H,0,IV) */
+            G.wrap()
+            for i:=0;i<16;i++ 
{G.a.f[i]=G.stateX[i];G.Y_0[i]=G.a.f[i];G.stateX[i]=0}
+            G.lenA[0]=0;G.lenC[0]=0;G.lenA[1]=0;G.lenC[1]=0
+        }
+        G.status=GCM_ACCEPTING_HEADER
+}
+
+/* Add Header data - included but not encrypted */
+func (G *GCM) Add_header(header []byte,len int) bool { /* Add some header. 
Won't be encrypted, but will be authenticated. len is length of header */
+        if G.status != GCM_ACCEPTING_HEADER {return false}
+  
+        j:=0
+        for j<len {
+            for i:=0;i<16 && j<len;i++ {
+               G.stateX[i]^=header[j]; j++
+                G.lenA[1]++; if G.lenA[1]==0 {G.lenA[0]++}
+            }
+            G.gf2mul();
+        }
+        if len%16 != 0 {G.status=GCM_ACCEPTING_CIPHER}
+
+        return true;
+    }
+
+/* Add Plaintext - included and encrypted */
+func (G *GCM) Add_plain(plain []byte,len int) []byte {
+       var B [16]byte
+       var b [4]byte
+        
+        cipher:=make([]byte,len)
+        var counter uint32=0
+        if G.status == GCM_ACCEPTING_HEADER {G.status=GCM_ACCEPTING_CIPHER}
+        if G.status != GCM_ACCEPTING_CIPHER {return nil}
+        
+        j:=0
+        for j<len {
+    
+            b[0]=G.a.f[12]; b[1]=G.a.f[13]; b[2]=G.a.f[14]; b[3]=G.a.f[15];
+            counter=gcm_pack(b)
+            counter++
+            b=gcm_unpack(counter)
+            G.a.f[12]=b[0]; G.a.f[13]=b[1]; G.a.f[14]=b[2]; G.a.f[15]=b[3] /* 
increment counter */
+            for i:=0;i<16;i++ {B[i]=G.a.f[i]}
+            G.a.ecb_encrypt(B[:]);        /* encrypt it  */
+    
+            for i:=0;i<16 && j<len;i++ {
+               cipher[j]=(plain[j]^B[i])
+               G.stateX[i]^=cipher[j]; j++
+                G.lenC[1]++; if G.lenC[1]==0 {G.lenC[0]++}
+            }
+            G.gf2mul()
+        }
+        if len%16 != 0 {G.status=GCM_NOT_ACCEPTING_MORE}
+        return cipher
+}
+
+/* Add Ciphertext - decrypts to plaintext */
+func (G *GCM) Add_cipher(cipher []byte,len int) []byte {
+       var B [16]byte
+       var b [4]byte
+        
+        plain:=make([]byte,len)
+        var counter uint32=0
+        
+        if G.status==GCM_ACCEPTING_HEADER {G.status=GCM_ACCEPTING_CIPHER}
+        if G.status != GCM_ACCEPTING_CIPHER {return nil}
+    
+        j:=0
+        for j<len {
+            b[0]=G.a.f[12]; b[1]=G.a.f[13]; b[2]=G.a.f[14]; b[3]=G.a.f[15]
+            counter=gcm_pack(b);
+            counter++
+            b=gcm_unpack(counter)
+            G.a.f[12]=b[0]; G.a.f[13]=b[1]; G.a.f[14]=b[2]; G.a.f[15]=b[3]; /* 
increment counter */
+            for i:=0;i<16;i++ {B[i]=G.a.f[i]}
+            G.a.ecb_encrypt(B[:])        /* encrypt it  */
+            for i:=0;i<16 && j<len;i++ {
+               oc:=cipher[j];
+               plain[j]=(cipher[j]^B[i])
+               G.stateX[i]^=oc; j++
+                G.lenC[1]++; if G.lenC[1]==0 {G.lenC[0]++}
+            }
+            G.gf2mul()
+        }
+        if len%16 != 0 {G.status=GCM_NOT_ACCEPTING_MORE}
+        return plain
+}
+
+/* Finish and extract Tag */
+func (G *GCM) Finish(extract bool) [16]byte { /* Finish off GHASH and extract 
tag (MAC) */
+       var tag [16]byte
+    
+        G.wrap()
+        /* extract tag */
+        if extract {
+            G.a.ecb_encrypt(G.Y_0[:]);        /* E(K,Y0) */
+            for i:=0;i<16;i++ {G.Y_0[i]^=G.stateX[i]}
+            for i:=0;i<16;i++ {tag[i]=G.Y_0[i];G.Y_0[i]=0;G.stateX[i]=0}
+        }
+        G.status=GCM_FINISHED
+        G.a.End()
+        return tag
+}
+
+func hex2bytes(s string) []byte {
+       lgh:=len(s)
+       data:=make([]byte,lgh/2)
+       
+        for i:=0;i<lgh;i+=2 {
+            a,_ := strconv.ParseInt(s[i:i+2],16,32)
+           data[i/2]=byte(a)
+        }
+        return data
+}
+
+/*
+func main() {
+
+       KT:="feffe9928665731c6d6a8f9467308308"
+       
MT:="d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"
+       HT:="feedfacedeadbeeffeedfacedeadbeefabaddad2"
+
+       
NT:="9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b";
+// Tag should be 619cc5aefffe0bfa462af43c1699d050
+
+       g:=new(GCM)
+
+       M:=hex2bytes(MT)
+       H:=hex2bytes(HT)
+       N:=hex2bytes(NT)
+       K:=hex2bytes(KT)
+
+       lenM:=len(M)
+       lenH:=len(H)
+       lenK:=len(K)
+       lenIV:=len(N)
+
+       fmt.Printf("Plaintext=\n");
+       for i:=0;i<lenM;i++ {fmt.Printf("%02x",M[i])}
+       fmt.Printf("\n")
+
+       g.Init(lenK,K,lenIV,N)
+       g.Add_header(H,lenH)
+       C:=g.Add_plain(M,lenM)
+       T:=g.Finish(true)
+
+       fmt.Printf("Ciphertext=\n")
+       for i:=0;i<lenM;i++ {fmt.Printf("%02x",C[i])}
+       fmt.Printf("\n")
+        
+       fmt.Printf("Tag=\n")
+       for i:=0;i<16;i++ {fmt.Printf("%02x",T[i])}
+       fmt.Printf("\n")
+
+       g.Init(lenK,K,lenIV,N)
+       g.Add_header(H,lenH)
+       P:=g.Add_cipher(C,lenM)
+       T=g.Finish(true)
+
+       fmt.Printf("Plaintext=\n");
+       for i:=0;i<lenM;i++ {fmt.Printf("%02x",P[i])}
+       fmt.Printf("\n")
+
+       fmt.Printf("Tag=\n");
+       for i:=0;i<16;i++ {fmt.Printf("%02x",T[i])}
+       fmt.Printf("\n")
+}
+*/

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/HASH256.go
----------------------------------------------------------------------
diff --git a/version22/go/HASH256.go b/version22/go/HASH256.go
new file mode 100644
index 0000000..e6d30c8
--- /dev/null
+++ b/version22/go/HASH256.go
@@ -0,0 +1,192 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/*
+ * Implementation of the Secure Hashing Algorithm (SHA-256)
+ *
+ * Generates a 256 bit message digest. It should be impossible to come
+ * come up with two messages that hash to the same value ("collision free").
+ *
+ * For use with byte-oriented messages only. 
+ */
+
+
+package main
+
+//import "fmt"
+
+const hash256_H0 uint32=0x6A09E667
+const hash256_H1 uint32=0xBB67AE85
+const hash256_H2 uint32=0x3C6EF372
+const hash256_H3 uint32=0xA54FF53A
+const hash256_H4 uint32=0x510E527F
+const hash256_H5 uint32=0x9B05688C
+const hash256_H6 uint32=0x1F83D9AB
+const hash256_H7 uint32=0x5BE0CD19
+
+var hash256_K = [...]uint32 {
+       
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
+       
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
+       
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
+       
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
+       
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
+       
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
+       
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
+       
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2}
+
+
+type HASH256 struct {
+       length [2]uint32
+       h [8]uint32
+       w [64]uint32
+
+}
+
+/* functions */
+func hash256_S(n uint32,x uint32) uint32 {
+       return (((x)>>n) | ((x)<<(32-n)))
+}
+
+func hash256_R(n uint32,x uint32) uint32 {
+       return ((x)>>n)
+}
+
+func hash256_Ch(x,y,z uint32) uint32 {
+       return ((x&y)^(^(x)&z))
+}
+
+func hash256_Maj(x,y,z uint32) uint32 {
+       return ((x&y)^(x&z)^(y&z))
+}
+
+func hash256_Sig0(x uint32) uint32 {
+       return (hash256_S(2,x)^hash256_S(13,x)^hash256_S(22,x))
+}
+
+func hash256_Sig1(x uint32) uint32 {
+       return (hash256_S(6,x)^hash256_S(11,x)^hash256_S(25,x))
+}
+
+func hash256_theta0(x uint32) uint32 {
+       return (hash256_S(7,x)^hash256_S(18,x)^hash256_R(3,x));
+}
+
+func hash256_theta1(x uint32) uint32 {
+               return (hash256_S(17,x)^hash256_S(19,x)^hash256_R(10,x))
+}
+
+func (H *HASH256) transform() { /* basic transformation step */
+       for j:=16;j<64;j++ {
+               
H.w[j]=hash256_theta1(H.w[j-2])+H.w[j-7]+hash256_theta0(H.w[j-15])+H.w[j-16]
+       }
+       a:=H.h[0]; b:=H.h[1]; c:=H.h[2]; d:=H.h[3] 
+       e:=H.h[4]; f:=H.h[5]; g:=H.h[6]; hh:=H.h[7]
+       for j:=0;j<64;j++ { /* 64 times - mush it up */
+               t1:=hh+hash256_Sig1(e)+hash256_Ch(e,f,g)+hash256_K[j]+H.w[j]
+               t2:=hash256_Sig0(a)+hash256_Maj(a,b,c)
+               hh=g; g=f; f=e
+               e=d+t1
+               d=c
+               c=b
+               b=a
+               a=t1+t2  
+       }
+       H.h[0]+=a; H.h[1]+=b; H.h[2]+=c; H.h[3]+=d 
+       H.h[4]+=e; H.h[5]+=f; H.h[6]+=g; H.h[7]+=hh 
+} 
+
+/* Initialise Hash function */
+func (H *HASH256) Init() { /* initialise */
+       for i:=0;i<64;i++ {H.w[i]=0}
+       H.length[0]=0; H.length[1]=0
+       H.h[0]=hash256_H0
+       H.h[1]=hash256_H1
+       H.h[2]=hash256_H2
+       H.h[3]=hash256_H3
+       H.h[4]=hash256_H4
+       H.h[5]=hash256_H5
+       H.h[6]=hash256_H6
+       H.h[7]=hash256_H7
+}
+
+func NewHASH256() *HASH256 {
+       H:= new(HASH256)
+       H.Init()
+       return H
+}
+
+/* process a single byte */
+func (H *HASH256) Process(byt byte) { /* process the next message byte */
+       cnt:=(H.length[0]/32)%16;
+    
+       H.w[cnt]<<=8;
+       H.w[cnt]|=uint32(byt&0xFF);
+       H.length[0]+=8;
+       if H.length[0]==0 {H.length[1]++; H.length[0]=0}
+       if (H.length[0]%512)==0 {H.transform()}
+}
+
+/* process an array of bytes */        
+func (H *HASH256) Process_array(b []byte) {
+       for i:=0;i<len(b);i++ {H.Process((b[i]))}
+}
+
+/* process a 32-bit integer */
+func (H *HASH256) Process_num(n int32) {
+       H.Process(byte((n>>24)&0xff));
+       H.Process(byte((n>>16)&0xff));
+       H.Process(byte((n>>8)&0xff));
+       H.Process(byte(n&0xff));
+}
+
+/* Generate 32-byte Hash */
+func (H *HASH256) Hash() []byte { /* pad message and finish - supply digest */
+       var digest [32]byte
+       len0:=H.length[0]
+       len1:=H.length[1]
+       H.Process(0x80);
+       for (H.length[0]%512)!=448 {H.Process(0)}
+       H.w[14]=len1;
+       H.w[15]=len0;    
+       H.transform();
+       for i:=0;i<32;i++ { /* convert to bytes */
+               digest[i]=byte((H.h[i/4]>>uint(8*(3-i%4))) & 0xff);
+       }
+       H.Init()
+       return digest[0:32]
+}
+
+/* test program: should produce digest */
+
+//248d6a61 d20638b8 e5c02693 0c3e6039 a33ce459 64ff2167 f6ecedd4 19db06c1
+/*
+func main() {
+
+       test := 
[]byte("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq")
+       sh:=NewHASH256()
+
+       for i:=0;i<len(test);i++ {
+               sh.Process(test[i])
+       }
+               
+       digest:=sh.Hash()    
+       for i:=0;i<32;i++ {fmt.Printf("%02x",digest[i])}
+
+} */
+

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/HASH384.go
----------------------------------------------------------------------
diff --git a/version22/go/HASH384.go b/version22/go/HASH384.go
new file mode 100644
index 0000000..ee3e535
--- /dev/null
+++ b/version22/go/HASH384.go
@@ -0,0 +1,204 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/*
+ * Implementation of the Secure Hashing Algorithm (SHA-384)
+ *
+ * Generates a 384 bit message digest. It should be impossible to come
+ * come up with two messages that hash to the same value ("collision free").
+ *
+ * For use with byte-oriented messages only. 
+ */
+
+
+package main
+
+//import "fmt"
+
+const hash384_H0 uint64=0xcbbb9d5dc1059ed8
+const hash384_H1 uint64=0x629a292a367cd507
+const hash384_H2 uint64=0x9159015a3070dd17
+const hash384_H3 uint64=0x152fecd8f70e5939
+const hash384_H4 uint64=0x67332667ffc00b31
+const hash384_H5 uint64=0x8eb44a8768581511
+const hash384_H6 uint64=0xdb0c2e0d64f98fa7
+const hash384_H7 uint64=0x47b5481dbefa4fa4
+
+var hash384_K = [...]uint64 {
+       
0x428a2f98d728ae22,0x7137449123ef65cd,0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc,
+       
0x3956c25bf348b538,0x59f111f1b605d019,0x923f82a4af194f9b,0xab1c5ed5da6d8118,
+       
0xd807aa98a3030242,0x12835b0145706fbe,0x243185be4ee4b28c,0x550c7dc3d5ffb4e2,
+       
0x72be5d74f27b896f,0x80deb1fe3b1696b1,0x9bdc06a725c71235,0xc19bf174cf692694,
+       
0xe49b69c19ef14ad2,0xefbe4786384f25e3,0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65,
+       
0x2de92c6f592b0275,0x4a7484aa6ea6e483,0x5cb0a9dcbd41fbd4,0x76f988da831153b5,
+       
0x983e5152ee66dfab,0xa831c66d2db43210,0xb00327c898fb213f,0xbf597fc7beef0ee4,
+       
0xc6e00bf33da88fc2,0xd5a79147930aa725,0x06ca6351e003826f,0x142929670a0e6e70,
+       
0x27b70a8546d22ffc,0x2e1b21385c26c926,0x4d2c6dfc5ac42aed,0x53380d139d95b3df,
+       
0x650a73548baf63de,0x766a0abb3c77b2a8,0x81c2c92e47edaee6,0x92722c851482353b,
+       
0xa2bfe8a14cf10364,0xa81a664bbc423001,0xc24b8b70d0f89791,0xc76c51a30654be30,
+       
0xd192e819d6ef5218,0xd69906245565a910,0xf40e35855771202a,0x106aa07032bbd1b8,
+       
0x19a4c116b8d2d0c8,0x1e376c085141ab53,0x2748774cdf8eeb99,0x34b0bcb5e19b48a8,
+       
0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb,0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3,
+       
0x748f82ee5defb2fc,0x78a5636f43172f60,0x84c87814a1f0ab72,0x8cc702081a6439ec,
+       
0x90befffa23631e28,0xa4506cebde82bde9,0xbef9a3f7b2c67915,0xc67178f2e372532b,
+       
0xca273eceea26619c,0xd186b8c721c0c207,0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178,
+       
0x06f067aa72176fba,0x0a637dc5a2c898a6,0x113f9804bef90dae,0x1b710b35131c471b,
+       
0x28db77f523047d84,0x32caab7b40c72493,0x3c9ebe0a15c9bebc,0x431d67c49c100d4c,
+       
0x4cc5d4becb3e42b6,0x597f299cfc657e2a,0x5fcb6fab3ad6faec,0x6c44198c4a475817}
+
+
+type HASH384 struct {
+       length [2]uint64
+       h [8]uint64
+       w [80]uint64
+
+}
+
+/* functions */
+func hash384_S(n uint64,x uint64) uint64 {
+       return (((x)>>n) | ((x)<<(64-n)))
+}
+
+func hash384_R(n uint64,x uint64) uint64 {
+       return ((x)>>n)
+}
+
+func hash384_Ch(x,y,z uint64) uint64 {
+       return ((x&y)^(^(x)&z))
+}
+
+func hash384_Maj(x,y,z uint64) uint64 {
+       return ((x&y)^(x&z)^(y&z))
+}
+
+func hash384_Sig0(x uint64) uint64 {
+       return (hash384_S(28,x)^hash384_S(34,x)^hash384_S(39,x))
+}
+
+func hash384_Sig1(x uint64) uint64 {
+       return (hash384_S(14,x)^hash384_S(18,x)^hash384_S(41,x))
+}
+
+func hash384_theta0(x uint64) uint64 {
+       return (hash384_S(1,x)^hash384_S(8,x)^hash384_R(7,x));
+}
+
+func hash384_theta1(x uint64) uint64 {
+               return (hash384_S(19,x)^hash384_S(61,x)^hash384_R(6,x))
+}
+
+func (H *HASH384) transform() { /* basic transformation step */
+       for j:=16;j<80;j++ {
+               
H.w[j]=hash384_theta1(H.w[j-2])+H.w[j-7]+hash384_theta0(H.w[j-15])+H.w[j-16]
+       }
+       a:=H.h[0]; b:=H.h[1]; c:=H.h[2]; d:=H.h[3] 
+       e:=H.h[4]; f:=H.h[5]; g:=H.h[6]; hh:=H.h[7]
+       for j:=0;j<80;j++ { /* 80 times - mush it up */
+               t1:=hh+hash384_Sig1(e)+hash384_Ch(e,f,g)+hash384_K[j]+H.w[j]
+               t2:=hash384_Sig0(a)+hash384_Maj(a,b,c)
+               hh=g; g=f; f=e
+               e=d+t1
+               d=c
+               c=b
+               b=a
+               a=t1+t2  
+       }
+       H.h[0]+=a; H.h[1]+=b; H.h[2]+=c; H.h[3]+=d 
+       H.h[4]+=e; H.h[5]+=f; H.h[6]+=g; H.h[7]+=hh 
+} 
+
+/* Initialise Hash function */
+func (H *HASH384) Init() { /* initialise */
+       for i:=0;i<80;i++ {H.w[i]=0}
+       H.length[0]=0; H.length[1]=0
+       H.h[0]=hash384_H0
+       H.h[1]=hash384_H1
+       H.h[2]=hash384_H2
+       H.h[3]=hash384_H3
+       H.h[4]=hash384_H4
+       H.h[5]=hash384_H5
+       H.h[6]=hash384_H6
+       H.h[7]=hash384_H7
+}
+
+func NewHASH384() *HASH384 {
+       H:= new(HASH384)
+       H.Init()
+       return H
+}
+
+/* process a single byte */
+func (H *HASH384) Process(byt byte) { /* process the next message byte */
+       cnt:=(H.length[0]/64)%16;
+    
+       H.w[cnt]<<=8;
+       H.w[cnt]|=uint64(byt&0xFF);
+       H.length[0]+=8;
+       if H.length[0]==0 {H.length[1]++; H.length[0]=0}
+       if (H.length[0]%1024)==0 {H.transform()}
+}
+
+/* process an array of bytes */        
+func (H *HASH384) Process_array(b []byte) {
+       for i:=0;i<len(b);i++ {H.Process((b[i]))}
+}
+
+/* process a 32-bit integer */
+func (H *HASH384) Process_num(n int32) {
+       H.Process(byte((n>>24)&0xff));
+       H.Process(byte((n>>16)&0xff));
+       H.Process(byte((n>>8)&0xff));
+       H.Process(byte(n&0xff));
+}
+
+/* Generate 32-byte Hash */
+func (H *HASH384) Hash() []byte { /* pad message and finish - supply digest */
+       var digest [48]byte
+       len0:=H.length[0]
+       len1:=H.length[1]
+       H.Process(0x80);
+       for (H.length[0]%1024)!=896 {H.Process(0)}
+       H.w[14]=len1;
+       H.w[15]=len0;    
+       H.transform();
+       for i:=0;i<48;i++ { /* convert to bytes */
+               digest[i]=byte((H.h[i/8]>>uint(8*(7-i%8))) & 0xff);
+       }
+       H.Init()
+       return digest[0:48]
+}
+
+/* test program: should produce digest */
+
+//09330c33f71147e8 3d192fc782cd1b47 53111b173b3b05d2 2fa08086e3b0f712 
fcc7c71a557e2db9 66c3e9fa91746039
+/*
+func main() {
+
+       test := 
[]byte("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu")
+       sh:=NewHASH384()
+
+       for i:=0;i<len(test);i++ {
+               sh.Process(test[i])
+       }
+               
+       digest:=sh.Hash()    
+       for i:=0;i<48;i++ {fmt.Printf("%02x",digest[i])}
+
+}
+*/

http://git-wip-us.apache.org/repos/asf/incubator-milagro-crypto/blob/c25f9e5c/version22/go/HASH512.go
----------------------------------------------------------------------
diff --git a/version22/go/HASH512.go b/version22/go/HASH512.go
new file mode 100644
index 0000000..be274f3
--- /dev/null
+++ b/version22/go/HASH512.go
@@ -0,0 +1,204 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+/*
+ * Implementation of the Secure Hashing Algorithm (SHA-384)
+ *
+ * Generates a 384 bit message digest. It should be impossible to come
+ * come up with two messages that hash to the same value ("collision free").
+ *
+ * For use with byte-oriented messages only. 
+ */
+
+
+package main
+
+//import "fmt"
+
+const hash512_H0 uint64=0x6a09e667f3bcc908
+const hash512_H1 uint64=0xbb67ae8584caa73b
+const hash512_H2 uint64=0x3c6ef372fe94f82b
+const hash512_H3 uint64=0xa54ff53a5f1d36f1
+const hash512_H4 uint64=0x510e527fade682d1
+const hash512_H5 uint64=0x9b05688c2b3e6c1f
+const hash512_H6 uint64=0x1f83d9abfb41bd6b
+const hash512_H7 uint64=0x5be0cd19137e2179
+
+var hash512_K = [...]uint64 {
+       
0x428a2f98d728ae22,0x7137449123ef65cd,0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc,
+       
0x3956c25bf348b538,0x59f111f1b605d019,0x923f82a4af194f9b,0xab1c5ed5da6d8118,
+       
0xd807aa98a3030242,0x12835b0145706fbe,0x243185be4ee4b28c,0x550c7dc3d5ffb4e2,
+       
0x72be5d74f27b896f,0x80deb1fe3b1696b1,0x9bdc06a725c71235,0xc19bf174cf692694,
+       
0xe49b69c19ef14ad2,0xefbe4786384f25e3,0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65,
+       
0x2de92c6f592b0275,0x4a7484aa6ea6e483,0x5cb0a9dcbd41fbd4,0x76f988da831153b5,
+       
0x983e5152ee66dfab,0xa831c66d2db43210,0xb00327c898fb213f,0xbf597fc7beef0ee4,
+       
0xc6e00bf33da88fc2,0xd5a79147930aa725,0x06ca6351e003826f,0x142929670a0e6e70,
+       
0x27b70a8546d22ffc,0x2e1b21385c26c926,0x4d2c6dfc5ac42aed,0x53380d139d95b3df,
+       
0x650a73548baf63de,0x766a0abb3c77b2a8,0x81c2c92e47edaee6,0x92722c851482353b,
+       
0xa2bfe8a14cf10364,0xa81a664bbc423001,0xc24b8b70d0f89791,0xc76c51a30654be30,
+       
0xd192e819d6ef5218,0xd69906245565a910,0xf40e35855771202a,0x106aa07032bbd1b8,
+       
0x19a4c116b8d2d0c8,0x1e376c085141ab53,0x2748774cdf8eeb99,0x34b0bcb5e19b48a8,
+       
0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb,0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3,
+       
0x748f82ee5defb2fc,0x78a5636f43172f60,0x84c87814a1f0ab72,0x8cc702081a6439ec,
+       
0x90befffa23631e28,0xa4506cebde82bde9,0xbef9a3f7b2c67915,0xc67178f2e372532b,
+       
0xca273eceea26619c,0xd186b8c721c0c207,0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178,
+       
0x06f067aa72176fba,0x0a637dc5a2c898a6,0x113f9804bef90dae,0x1b710b35131c471b,
+       
0x28db77f523047d84,0x32caab7b40c72493,0x3c9ebe0a15c9bebc,0x431d67c49c100d4c,
+       
0x4cc5d4becb3e42b6,0x597f299cfc657e2a,0x5fcb6fab3ad6faec,0x6c44198c4a475817}
+
+
+type HASH512 struct {
+       length [2]uint64
+       h [8]uint64
+       w [80]uint64
+
+}
+
+/* functions */
+func hash512_S(n uint64,x uint64) uint64 {
+       return (((x)>>n) | ((x)<<(64-n)))
+}
+
+func hash512_R(n uint64,x uint64) uint64 {
+       return ((x)>>n)
+}
+
+func hash512_Ch(x,y,z uint64) uint64 {
+       return ((x&y)^(^(x)&z))
+}
+
+func hash512_Maj(x,y,z uint64) uint64 {
+       return ((x&y)^(x&z)^(y&z))
+}
+
+func hash512_Sig0(x uint64) uint64 {
+       return (hash512_S(28,x)^hash512_S(34,x)^hash512_S(39,x))
+}
+
+func hash512_Sig1(x uint64) uint64 {
+       return (hash512_S(14,x)^hash512_S(18,x)^hash512_S(41,x))
+}
+
+func hash512_theta0(x uint64) uint64 {
+       return (hash512_S(1,x)^hash512_S(8,x)^hash512_R(7,x));
+}
+
+func hash512_theta1(x uint64) uint64 {
+               return (hash512_S(19,x)^hash512_S(61,x)^hash512_R(6,x))
+}
+
+func (H *HASH512) transform() { /* basic transformation step */
+       for j:=16;j<80;j++ {
+               
H.w[j]=hash512_theta1(H.w[j-2])+H.w[j-7]+hash512_theta0(H.w[j-15])+H.w[j-16]
+       }
+       a:=H.h[0]; b:=H.h[1]; c:=H.h[2]; d:=H.h[3] 
+       e:=H.h[4]; f:=H.h[5]; g:=H.h[6]; hh:=H.h[7]
+       for j:=0;j<80;j++ { /* 80 times - mush it up */
+               t1:=hh+hash512_Sig1(e)+hash512_Ch(e,f,g)+hash512_K[j]+H.w[j]
+               t2:=hash512_Sig0(a)+hash512_Maj(a,b,c)
+               hh=g; g=f; f=e
+               e=d+t1
+               d=c
+               c=b
+               b=a
+               a=t1+t2  
+       }
+       H.h[0]+=a; H.h[1]+=b; H.h[2]+=c; H.h[3]+=d 
+       H.h[4]+=e; H.h[5]+=f; H.h[6]+=g; H.h[7]+=hh 
+} 
+
+/* Initialise Hash function */
+func (H *HASH512) Init() { /* initialise */
+       for i:=0;i<80;i++ {H.w[i]=0}
+       H.length[0]=0; H.length[1]=0
+       H.h[0]=hash512_H0
+       H.h[1]=hash512_H1
+       H.h[2]=hash512_H2
+       H.h[3]=hash512_H3
+       H.h[4]=hash512_H4
+       H.h[5]=hash512_H5
+       H.h[6]=hash512_H6
+       H.h[7]=hash512_H7
+}
+
+func NewHASH512() *HASH512 {
+       H:= new(HASH512)
+       H.Init()
+       return H
+}
+
+/* process a single byte */
+func (H *HASH512) Process(byt byte) { /* process the next message byte */
+       cnt:=(H.length[0]/64)%16;
+    
+       H.w[cnt]<<=8;
+       H.w[cnt]|=uint64(byt&0xFF);
+       H.length[0]+=8;
+       if H.length[0]==0 {H.length[1]++; H.length[0]=0}
+       if (H.length[0]%1024)==0 {H.transform()}
+}
+
+/* process an array of bytes */        
+func (H *HASH512) Process_array(b []byte) {
+       for i:=0;i<len(b);i++ {H.Process((b[i]))}
+}
+
+/* process a 32-bit integer */
+func (H *HASH512) Process_num(n int32) {
+       H.Process(byte((n>>24)&0xff));
+       H.Process(byte((n>>16)&0xff));
+       H.Process(byte((n>>8)&0xff));
+       H.Process(byte(n&0xff));
+}
+
+/* Generate 32-byte Hash */
+func (H *HASH512) Hash() []byte { /* pad message and finish - supply digest */
+       var digest [64]byte
+       len0:=H.length[0]
+       len1:=H.length[1]
+       H.Process(0x80);
+       for (H.length[0]%1024)!=896 {H.Process(0)}
+       H.w[14]=len1;
+       H.w[15]=len0;    
+       H.transform();
+       for i:=0;i<64;i++ { /* convert to bytes */
+               digest[i]=byte((H.h[i/8]>>uint(8*(7-i%8))) & 0xff);
+       }
+       H.Init()
+       return digest[0:64]
+}
+
+/* test program: should produce digest */
+
+//8e959b75dae313da 8cf4f72814fc143f 8f7779c6eb9f7fa1 7299aeadb6889018 
501d289e4900f7e4 331b99dec4b5433a c7d329eeb6dd2654 5e96e55b874be909
+/*
+func main() {
+
+       test := 
[]byte("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu")
+       sh:=NewHASH512()
+
+       for i:=0;i<len(test);i++ {
+               sh.Process(test[i])
+       }
+               
+       digest:=sh.Hash()    
+       for i:=0;i<64;i++ {fmt.Printf("%02x",digest[i])}
+
+}
+*/


Reply via email to