#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
from time import time
import numpy as np
import numpy.random
import random

from time import time

from sklearn import random_projection

from sklearn.cluster import KMeans,Ward
#from scipy import sparse
from scipy.sparse import *
from scipy import *

from sklearn.preprocessing import scale
from sklearn import mixture
from sklearn.decomposition import PCA,ProbabilisticPCA
import jieba
import sys
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
lineNo2content_dict = dict()
def extract_data_features(component_number=0):
    #global component_number
    global lineNo2content_dict 

    def my_tokenizer(s):
        return s.split()

    ################
    vectorizer = TfidfVectorizer(tokenizer=my_tokenizer,min_df=1)

    ################
    corpus = []
    with codecs.open('test.dat','r','utf-8') as fi:
        idx = 0
        for line in fi:
            line = line.rstrip()
            if not line: continue
            idx += 1
            lineNo2content_dict[idx] = line
            corpus.append( line)

    print 'corpus len:',len(corpus)
    #####start fit#########
    X = vectorizer.fit_transform(corpus)
    print 'X',X.shape
    #test truncate
    print 'component_number:',component_number
    data = random_projection.GaussianRandomProjection(n_components=component_number).fit_transform(X)
    #auto learning reduced feature dimensions
    #data = random_projection.GaussianRandomProjection(n_components='auto',eps=0.2).fit_transform(X)

    #data = random_projection.SparseRandomProjection(n_components=component_number,dense_output=True).fit_transform(X)
    #data = ProbabilisticPCA(n_components=component_number).fit_transform(X.toarray())
    print 'extract_data fuc:',data.shape
    return data

def calc(alpha,beta,component_number=0):
    print '***********new test***************'
    print 'alpha/beta(%s/%s)'%(alpha,beta)
    # Parameters...
    # Draw two sets of samples from them...
    print 'Generating data...'
    train = extract_data_features(component_number)
    #print train
    print 'train shape'
    print train.shape


    # Train a model...
    print 'Trainning model...'
    #component_number = train.shape[1]
    #clf = mixture.DPGMM(n_components=component_number, covariance_type='diag', min_covar=1e-12,alpha=0.1, n_iter=1000)
    clf = mixture.VBGMM(n_components=component_number, covariance_type='diag', min_covar=1e-12,alpha=0.1, n_iter=1000)
    clf.fit(train)
    y = clf.predict(train)
    print y
    #print 'params...'
    #print clf.converged_, clf.means_, clf.weights_

if __name__ == '__main__':
    component_number = int(sys.argv[1])
    start = time()
    #train_start()
    calc(1.3,0.7,component_number)
    print 'time:',time()-start
