This is an automated email from the git hooks/post-receive script.

malex-guest pushed a commit to branch master
in repository pycorrfit.

commit ce9c6482da1a21e6ffd49ae488e60421308f9a0f
Author: Alexandre Mestiashvili <[email protected]>
Date:   Fri Nov 6 13:55:12 2015 +0100

    Imported Upstream version 0.9.2+dfsg
---
 ChangeLog.txt                                      |    8 +
 pycorrfit/doc.py                                   |    3 +-
 pycorrfit/fcs_data_set.py                          |    5 +-
 pycorrfit/readfiles/read_FCS_Confocor3.py          |   67 +-
 pycorrfit/readfiles/read_pt3_PicoQuant.py          |   42 +-
 pycorrfit/readfiles/read_pt3_scripts/README        |   14 +
 pycorrfit/readfiles/read_pt3_scripts/__init__.py   |    1 +
 .../readfiles/read_pt3_scripts/__version__.py      |    2 +
 .../read_pt3_scripts/correlation_methods.py        |   14 +-
 .../read_pt3_scripts/correlation_objects.py        | 1352 ++++++++++++--------
 .../readfiles/read_pt3_scripts/fitting_methods.py  |  349 +++++
 .../readfiles/read_pt3_scripts/import_methods.py   |   32 +
 pycorrfit/readfiles/util.py                        |   48 +
 pycorrfit/tools/parmrange.py                       |    6 +-
 pycorrfit/tools/simulation.py                      |   32 +-
 pycorrfit/tools/trace.py                           |    7 +-
 16 files changed, 1379 insertions(+), 603 deletions(-)

diff --git a/ChangeLog.txt b/ChangeLog.txt
index 5b9dabd..1f71d1f 100644
--- a/ChangeLog.txt
+++ b/ChangeLog.txt
@@ -1,3 +1,11 @@
+0.9.2
+- Bugfixes:
+  - "Slider Simulation"/"Parm Range" broken (#133)
+  - Computation of average intensity did not work
+    correctly for unequally spaced traces
+- Update .pt3 reader to version 8399ff7401
+- Import traces of .pt3 files (#118)
+  Warning: Absolute values for intensity might be wrong
 0.9.1
 - Tool 'Overlay curves': improve UI (#117)
 - Tool 'Statistics view': improve UI (#113)
diff --git a/pycorrfit/doc.py b/pycorrfit/doc.py
index e9e1e43..93ce575 100755
--- a/pycorrfit/doc.py
+++ b/pycorrfit/doc.py
@@ -41,6 +41,7 @@ import wx
 import yaml
 
 import readfiles
+from readfiles import read_pt3_scripts
 
 
 def GetLocationOfFile(filename):
@@ -155,7 +156,7 @@ def SoftwareUsed():
            "\n - wxPython "+wx.__version__
     # Other software
     text += "\n\nOther software:"+\
-            "\n - FCS_point_correlator (9311a5c15e)" +\
+            "\n - FCS_point_correlator ({})".format(read_pt3_scripts.version) 
+\
             "\n    PicoQuant file format for Python by Dominic Waithe"
     if hasattr(sys, 'frozen'):
         pyinst = "\n\nThis executable has been created using PyInstaller."
diff --git a/pycorrfit/fcs_data_set.py b/pycorrfit/fcs_data_set.py
index 0fe981b..4c301b3 100644
--- a/pycorrfit/fcs_data_set.py
+++ b/pycorrfit/fcs_data_set.py
@@ -7,6 +7,7 @@ from __future__ import print_function, division
 
 import hashlib
 import numpy as np
+import scipy.integrate as spintg
 import scipy.interpolate as spintp
 import scipy.optimize as spopt
 import warnings
@@ -61,7 +62,9 @@ class Trace(object):
     @property
     def countrate(self):
         if self._countrate is None:
-            self._countrate = np.average(self._trace[:,1])
+            #self._countrate = np.average(self._trace[:,1])
+            # Take into account traces that have arbitrary sampling
+            self._countrate = spintg.simps(self._trace[:,1], self._trace[:,0]) 
/ self.duration
         return self._countrate
     
     @countrate.setter
diff --git a/pycorrfit/readfiles/read_FCS_Confocor3.py 
b/pycorrfit/readfiles/read_FCS_Confocor3.py
index 7a83044..3e5658e 100644
--- a/pycorrfit/readfiles/read_FCS_Confocor3.py
+++ b/pycorrfit/readfiles/read_FCS_Confocor3.py
@@ -7,6 +7,7 @@ import csv
 import numpy as np
 import warnings
 
+from . import util
 
 def openFCS(dirname, filename):
     """ 
@@ -128,37 +129,8 @@ def openFCS_Multiple(dirname, filename):
                         trace.append( (np.float(row[3])*1000,
                                        np.float(row[4])/1000) )
                     trace = np.array(trace)
-                    # The trace is too big. Wee need to bin it.
-                    if len(trace) >= 500:
-                        # We want about 500 bins
-                        # We need to sum over intervals of length *teiler*
-                        teiler = int(len(trace)/500)
-                        newlength = len(trace)/teiler
-                        newsignal = np.zeros(newlength)
-                        # Simultaneously sum over all intervals
-                        for j in np.arange(teiler):
-                            newsignal = \
-                                 
newsignal+trace[j:newlength*teiler:teiler][:,1]
-                        newsignal = 1.* newsignal / teiler
-                        newtimes = trace[teiler-1:newlength*teiler:teiler][:,0]
-                        if len(trace)%teiler != 0:
-                            # We have a rest signal
-                            # We average it and add it to the trace
-                            rest = trace[newlength*teiler:][:,1]
-                            lrest = len(rest)
-                            rest = np.array([sum(rest)/lrest])
-                            newsignal = np.concatenate((newsignal, rest),
-                                                       axis=0)
-                            timerest = np.array([trace[-1][0]])
-                            newtimes = np.concatenate((newtimes, timerest),
-                                                      axis=0)
-                        newtrace=np.zeros((len(newtimes),2))
-                        newtrace[:,0] = newtimes
-                        newtrace[:,1] = newsignal
-                    else:
-                        # Declare newtrace -
-                        # otherwise we have a problem down three lines ;)
-                        newtrace = trace
+                    # If the trace is too big. Wee need to bin it.
+                    newtrace = util.downsample_trace(trace)
                     # Finally add the trace to the list
                     traces.append(newtrace)
                     if FoundType[:2] != "AC":
@@ -370,37 +342,8 @@ def openFCS_Single(dirname, filename):
                         # So we need to put some factors here
                         trace.append( (np.float(row[0])*1000, 
np.float(row[1])) )
                     trace = np.array(trace)
-                    # The trace is too big. Wee need to bin it.
-                    if len(trace) >= 500:
-                        # We want about 500 bins
-                        # We need to sum over intervals of length *teiler*
-                        teiler = int(len(trace)/500)
-                        newlength = len(trace)/teiler
-                        newsignal = np.zeros(newlength)
-                        # Simultaneously sum over all intervals
-                        for j in np.arange(teiler):
-                            newsignal = \
-                                 
newsignal+trace[j:newlength*teiler:teiler][:,1]
-                        newsignal = 1.* newsignal / teiler
-                        newtimes = trace[teiler-1:newlength*teiler:teiler][:,0]
-                        if len(trace)%teiler != 0:
-                            # We have a rest signal
-                            # We average it and add it to the trace
-                            rest = trace[newlength*teiler:][:,1]
-                            lrest = len(rest)
-                            rest = np.array([sum(rest)/lrest])
-                            newsignal = np.concatenate((newsignal, rest),
-                                                       axis=0)
-                            timerest = np.array([trace[-1][0]])
-                            newtimes = np.concatenate((newtimes, timerest),
-                                                      axis=0)
-                        newtrace=np.zeros((len(newtimes),2))
-                        newtrace[:,0] = newtimes
-                        newtrace[:,1] = newsignal
-                    else:
-                        # Declare newtrace -
-                        # otherwise we have a problem down three lines ;)
-                        newtrace = trace
+                    # If the trace is too big. Wee need to bin it.
+                    newtrace = util.downsample_trace(trace)
                 tracecurve = False
         if fcscurve == True:
             if Alldata[i].partition("=")[0].strip() == "##NPOINTS":
diff --git a/pycorrfit/readfiles/read_pt3_PicoQuant.py 
b/pycorrfit/readfiles/read_pt3_PicoQuant.py
index 8e7efad..b603b42 100644
--- a/pycorrfit/readfiles/read_pt3_PicoQuant.py
+++ b/pycorrfit/readfiles/read_pt3_PicoQuant.py
@@ -8,6 +8,7 @@ import numpy as np
 import os
 from .read_pt3_scripts.correlation_objects import picoObject
 
+from . import util
 
 class ParameterClass():
     """Stores parameters for correlation """
@@ -25,6 +26,39 @@ class ParameterClass():
         self.photonCountBin = 25
         
 
+def getTrace(picoObject, number):
+    """
+    Extracts trace `number` from a `picoObject`.
+    
+    Parameters
+    ----------
+    picoObject: instance of picoObject
+        The data retreived from a pt3 file
+    number:
+        The id of the trace, can be 1 or 2.
+    """
+    
+    attrint = "timeSeries{}".format(number)
+    attrtime = "timeSeriesScale{}".format(number)
+
+    # binned photon counts
+    intensity = np.array(getattr(picoObject, attrint))
+    # Time in ms for each bin
+    time = np.array(getattr(picoObject, attrtime))
+    # time delta
+    deltat = np.abs(time[2]-time[1])
+    
+    trace = np.zeros((intensity.shape[0],2))
+    trace[:,0] = time # ms
+    trace[:,1] = intensity / deltat # kHz
+    
+    # If the trace is too big. Wee need to bin it.
+    newtrace = util.downsample_trace(trace)
+    
+    return newtrace
+    
+
+
 def openPT3(dirname, filename):
     """ Retreive correlation curves from PicoQuant data files 
     
@@ -43,7 +77,7 @@ def openPT3(dirname, filename):
 
     corrlist = list()
     typelist = list()
-    
+    tracelist = list()
     # Some data points are zero for some reason
     id1 = np.where(autotime!=0)
 
@@ -55,6 +89,7 @@ def openPT3(dirname, filename):
         # autotime,auto[:,0,0]
         corrlist.append(np.hstack( (autotime[id1].reshape(-1,1),
                                     corrac0[id1].reshape(-1,1)) ))
+        tracelist.append([getTrace(po, 1)])
     
     # AC1 - autocorrelation CH1
     corrac1 = auto[:,1,1]
@@ -63,6 +98,7 @@ def openPT3(dirname, filename):
         # autotime,auto[:,1,1]
         corrlist.append(np.hstack( (autotime[id1].reshape(-1,1),
                                     corrac1[id1].reshape(-1,1)) ))
+        tracelist.append([getTrace(po, 2)])
     
     # CC01 - Cross-Correlation CH0-CH1
     corrcc01 = auto[:,0,1]
@@ -71,6 +107,7 @@ def openPT3(dirname, filename):
         # autotime,auto[:,0,1]
         corrlist.append(np.hstack( (autotime[id1].reshape(-1,1),
                                     corrcc01[id1].reshape(-1,1)) ))
+        tracelist.append([getTrace(po, 1), getTrace(po, 2)])
     
     # CC10 - Cross-Correlation CH1-CH0
     corrcc10 = auto[:,1,0]
@@ -79,10 +116,9 @@ def openPT3(dirname, filename):
         # autotime,auto[:,1,0]
         corrlist.append(np.hstack( (autotime[id1].reshape(-1,1),
                                     corrcc10[id1].reshape(-1,1)) ))
-
+        tracelist.append([getTrace(po, 1), getTrace(po, 2)])
 
     filelist = [filename] * len(typelist)
-    tracelist = [None] * len(typelist)
 
     dictionary = dict()
     dictionary["Correlation"] = corrlist
diff --git a/pycorrfit/readfiles/read_pt3_scripts/README 
b/pycorrfit/readfiles/read_pt3_scripts/README
new file mode 100644
index 0000000..a09248a
--- /dev/null
+++ b/pycorrfit/readfiles/read_pt3_scripts/README
@@ -0,0 +1,14 @@
+The following files were copied from the repository 
+https://github.com/dwaithe/FCS_point_correlator
+
+- correlation_methods.py
+- correlation_objects.py
+- fitting_methods.py
+- import_methods.py
+
+The following changes were performed:
+- fib4.pyx
+  A doc string was inserted.
+- correlation_objects.py
+  Line 7 was commented out:
+  #from lmfit import minimize, Parameters,report_fit,report_errors, fit_report
diff --git a/pycorrfit/readfiles/read_pt3_scripts/__init__.py 
b/pycorrfit/readfiles/read_pt3_scripts/__init__.py
index e69de29..c1bb8a7 100644
--- a/pycorrfit/readfiles/read_pt3_scripts/__init__.py
+++ b/pycorrfit/readfiles/read_pt3_scripts/__init__.py
@@ -0,0 +1 @@
+from .__version__ import version
\ No newline at end of file
diff --git a/pycorrfit/readfiles/read_pt3_scripts/__version__.py 
b/pycorrfit/readfiles/read_pt3_scripts/__version__.py
new file mode 100644
index 0000000..f3d2133
--- /dev/null
+++ b/pycorrfit/readfiles/read_pt3_scripts/__version__.py
@@ -0,0 +1,2 @@
+# This file contains the GitHub hash of the file versions
+version = "8399ff7401"
\ No newline at end of file
diff --git a/pycorrfit/readfiles/read_pt3_scripts/correlation_methods.py 
b/pycorrfit/readfiles/read_pt3_scripts/correlation_methods.py
index 7ede9d1..a905526 100644
--- a/pycorrfit/readfiles/read_pt3_scripts/correlation_methods.py
+++ b/pycorrfit/readfiles/read_pt3_scripts/correlation_methods.py
@@ -1,6 +1,7 @@
 import numpy as np
-from . import fib4
-
+import fib4
+import time
+import thread
 
 """FCS Bulk Correlation Software
 
@@ -86,8 +87,12 @@ def tttr2xfcs (y,num,NcascStart,NcascEnd, Nsub):
                 
                 #New method, cython
                 i1,i2 = fib4.dividAndConquer(y, y+lag,y.shape[0])
+
+                #If the weights (num) are one as in the first Ncasc round, 
then the correlation is equal to np.sum(i1)
                 i1 = i1.astype(np.bool);
                 i2 = i2.astype(np.bool);
+
+                #Now we want to weight each photon corectly.
                 #Faster dot product method, faster than converting to matrix.
                 auto[(k+(j)*Nsub),:,:] = np.dot((num[i1,:]).T,num[i2,:])/delta 
   
             
@@ -100,6 +105,11 @@ def tttr2xfcs (y,num,NcascStart,NcascEnd, Nsub):
     for j in range(0, auto.shape[0]):
         auto[j,:,:] = auto[j,:,:]*dt/(dt-autotime[j])
     autotime = autotime/1000000
+
+
+    #Removes the trailing zeros.
+    autotime = autotime[autotime != 0]
+    auto = auto[autotime != 0,:,:]
     return auto, autotime
 
 
diff --git a/pycorrfit/readfiles/read_pt3_scripts/correlation_objects.py 
b/pycorrfit/readfiles/read_pt3_scripts/correlation_objects.py
index 1d6ed35..fd1ccd4 100644
--- a/pycorrfit/readfiles/read_pt3_scripts/correlation_objects.py
+++ b/pycorrfit/readfiles/read_pt3_scripts/correlation_objects.py
@@ -1,525 +1,871 @@
 import numpy as np
 import os, sys
-#from correlation_methods import *
-#from import_methods import *
+from correlation_methods import *
+from import_methods import *
 import time
-#from fitting_methods import equation_
+from fitting_methods import equation_
 #from lmfit import minimize, Parameters,report_fit,report_errors, fit_report
-
-from .correlation_methods import *
-from .import_methods import *
+import csv
+import copy
 
 
 """FCS Bulk Correlation Software
 
-    Copyright (C) 2015  Dominic Waithe
+       Copyright (C) 2015  Dominic Waithe
 
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    any later version.
+       This program is free software; you can redistribute it and/or modify
+       it under the terms of the GNU General Public License as published by
+       the Free Software Foundation; either version 2 of the License, or
+       any later version.
 
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
+       This program is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+       GNU General Public License for more details.
 
-    You should have received a copy of the GNU General Public License along
-    with this program; if not, write to the Free Software Foundation, Inc.,
-    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+       You should have received a copy of the GNU General Public License along
+       with this program; if not, write to the Free Software Foundation, Inc.,
+       51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 """
 
 class picoObject():
-    #This is the class which holds the .pt3 data and parameters
-    def __init__(self,filepath, par_obj,fit_obj):
-    
-        #parameter object and fit object. If 
-        self.par_obj = par_obj
-        self.fit_obj = fit_obj
-        self.type = 'mainObject'
-        
-        #self.PIE = 0
-        self.filepath = str(filepath)
-        self.nameAndExt = os.path.basename(self.filepath).split('.')
-        self.name = self.nameAndExt[0]
-        self.par_obj.data.append(filepath);
-        self.par_obj.objectRef.append(self)
-        
-        #Imports pt3 file format to object.
-        self.unqID = self.par_obj.numOfLoaded
-        
-        #For fitting.
-        self.objId1 = None
-        self.objId2 = None
-        self.objId3 = None
-        self.objId4 = None
-        self.processData();
-        
-        self.plotOn = True;
-
-
-    def processData(self):
-
-        self.NcascStart = self.par_obj.NcascStart
-        self.NcascEnd = self.par_obj.NcascEnd
-        self.Nsub = self.par_obj.Nsub
-        self.winInt = self.par_obj.winInt
-        self.photonCountBin = self.par_obj.photonCountBin
-        
-        #File import 
-        self.subChanArr, self.trueTimeArr, self.dTimeArr,self.resolution = 
pt3import(self.filepath)
-        
-        #Colour assigned to file.
-        self.color = self.par_obj.colors[self.unqID % len(self.par_obj.colors)]
-
-        #How many channels there are in the files.
-        self.numOfCH =  np.unique(np.array(self.subChanArr)).__len__()-1 
#Minus 1 because not interested in channel 15.
-        #TODO Generates the interleaved excitation channel if required. 
-        #if (self.aug == 'PIE'):
-            #self.pulsedInterleavedExcitation()
-        
-        #Finds the numbers which address the channels.
-        self.ch_present = np.unique(np.array(self.subChanArr[0:100]))
-
-        #Calculates decay function for both channels.
-        self.photonDecayCh1,self.decayScale1 = 
delayTime2bin(np.array(self.dTimeArr),np.array(self.subChanArr),self.ch_present[0],self.winInt)
-        
-        if self.numOfCH ==  2:
-            self.photonDecayCh2,self.decayScale2 = 
delayTime2bin(np.array(self.dTimeArr),np.array(self.subChanArr),self.ch_present[1],self.winInt)
-
-        #Time series of photon counts. For visualisation.
-        self.timeSeries1,self.timeSeriesScale1 = 
delayTime2bin(np.array(self.trueTimeArr)/1000000,np.array(self.subChanArr),self.ch_present[0],self.photonCountBin)
-        if self.numOfCH ==  2:
-            self.timeSeries2,self.timeSeriesScale2 = 
delayTime2bin(np.array(self.trueTimeArr)/1000000,np.array(self.subChanArr),self.ch_present[1],self.photonCountBin)
-
-        
-        #Calculates the Auto and Cross-correlation functions.
-        self.crossAndAuto(np.array(self.trueTimeArr),np.array(self.subChanArr))
-        
-        
-           
-       
-        
-        if self.fit_obj != None:
-            #If fit object provided then creates fit objects.
-            if self.objId1 == None:
-                corrObj= corrObject(self.filepath,self.fit_obj);
-                self.objId1 = corrObj.objId
-                self.fit_obj.objIdArr.append(corrObj.objId)
-                self.objId1.name = self.name+'_CH0_Auto_Corr'
-                self.objId1.ch_type = 0 #channel 0 Auto
-                self.objId1.prepare_for_fit()
-            self.objId1.autoNorm = np.array(self.autoNorm[:,0,0]).reshape(-1)
-            self.objId1.autotime = np.array(self.autotime).reshape(-1)
-            self.objId1.param = self.fit_obj.def_param
-            
-            
-            if self.numOfCH ==  2:
-                if self.objId3 == None:
-                    corrObj= corrObject(self.filepath,self.fit_obj);
-                    self.objId3 = corrObj.objId
-                    self.fit_obj.objIdArr.append(corrObj.objId)
-                    self.objId3.name = self.name+'_CH1_Auto_Corr'
-                    self.objId3.ch_type = 1 #channel 1 Auto
-                    self.objId3.prepare_for_fit()
-                self.objId3.autoNorm = 
np.array(self.autoNorm[:,1,1]).reshape(-1)
-                self.objId3.autotime = np.array(self.autotime).reshape(-1)
-                self.objId3.param = self.fit_obj.def_param
-                
-                if self.objId2 == None:
-                    corrObj= corrObject(self.filepath,self.fit_obj);
-                    self.objId2 = corrObj.objId
-                    self.fit_obj.objIdArr.append(corrObj.objId)
-                    self.objId2.name = self.name+'_CH01_Cross_Corr'
-                    self.objId2.ch_type = 2 #01cross
-                    self.objId2.prepare_for_fit()
-                self.objId2.autoNorm = 
np.array(self.autoNorm[:,0,1]).reshape(-1)
-                self.objId2.autotime = np.array(self.autotime).reshape(-1)
-                self.objId2.param = self.fit_obj.def_param
-                
-
-                if self.objId4 == None:
-                    corrObj= corrObject(self.filepath,self.fit_obj);
-                    self.objId4 = corrObj.objId
-                    self.fit_obj.objIdArr.append(corrObj.objId)
-                    self.objId4.name = self.name+'_CH10_Cross_Corr'
-                    self.objId4.ch_type = 3 #10cross
-                    self.objId4.prepare_for_fit()
-                self.objId4.autoNorm = 
np.array(self.autoNorm[:,1,0]).reshape(-1)
-                self.objId4.autotime = np.array(self.autotime).reshape(-1)
-                self.objId4.param = self.fit_obj.def_param
-                
-            self.fit_obj.fill_series_list()
-        self.dTimeMin = 0
-        self.dTimeMax = np.max(self.dTimeArr)
-        self.subDTimeMin = self.dTimeMin
-        self.subDTimeMax = self.dTimeMax
-        del self.subChanArr 
-        del self.trueTimeArr 
-        del self.dTimeArr
-    def crossAndAuto(self,trueTimeArr,subChanArr):
-        #For each channel we loop through and find only those in the correct 
time gate.
-        #We only want photons in channel 1 or two.
-        y = trueTimeArr[subChanArr < 3]
-        validPhotons = subChanArr[subChanArr < 3 ]
-
-
-        #Creates boolean for photon events in either channel.
-        num = np.zeros((validPhotons.shape[0],2))
-        num[:,0] = (np.array([np.array(validPhotons) 
==self.ch_present[0]])).astype(np.int32)
-        if self.numOfCH ==2:
-            num[:,1] = (np.array([np.array(validPhotons) 
==self.ch_present[1]])).astype(np.int32)
-
-
-        self.count0 = np.sum(num[:,0]) 
-        self.count1 = np.sum(num[:,1])
-
-        t1 = time.time()
-        auto, self.autotime = tttr2xfcs(y,num,self.NcascStart,self.NcascEnd, 
self.Nsub)
-        t2 = time.time()
-        print 'timing',t2-t1
-        
-
-        #Normalisation of the TCSPC data:
-        maxY = np.ceil(max(self.trueTimeArr))
-        self.autoNorm = np.zeros((auto.shape))
-        self.autoNorm[:,0,0] = ((auto[:,0,0]*maxY)/(self.count0*self.count0))-1
-        
-        if self.numOfCH ==  2:
-            self.autoNorm[:,1,1] = 
((auto[:,1,1]*maxY)/(self.count1*self.count1))-1
-            self.autoNorm[:,1,0] = 
((auto[:,1,0]*maxY)/(self.count1*self.count0))-1
-            self.autoNorm[:,0,1] = 
((auto[:,0,1]*maxY)/(self.count0*self.count1))-1
-            
-
-        #Normalisaation of the decay functions.
-        self.photonDecayCh1Min = 
self.photonDecayCh1-np.min(self.photonDecayCh1)
-        self.photonDecayCh1Norm = 
self.photonDecayCh1Min/np.max(self.photonDecayCh1Min)
-        
-        
-        if self.numOfCH ==  2:
-            self.photonDecayCh2Min = 
self.photonDecayCh2-np.min(self.photonDecayCh2)
-            self.photonDecayCh2Norm = 
self.photonDecayCh2Min/np.max(self.photonDecayCh2Min)
-        
-        return 
+       #This is the class which holds the .pt3 data and parameters
+       def __init__(self,filepath, par_obj,fit_obj):
+       
+               #parameter object and fit object. If 
+               self.par_obj = par_obj
+               self.fit_obj = fit_obj
+               self.type = 'mainObject'
+               
+               #self.PIE = 0
+               self.filepath = str(filepath)
+               self.nameAndExt = os.path.basename(self.filepath).split('.')
+               self.name = self.nameAndExt[0]
+               self.ext = self.nameAndExt[-1]
+
+               self.par_obj.data.append(filepath);
+               self.par_obj.objectRef.append(self)
+               
+               #Imports pt3 file format to object.
+               self.unqID = self.par_obj.numOfLoaded
+               
+               #For fitting.
+               self.objId1 = None
+               self.objId2 = None
+               self.objId3 = None
+               self.objId4 = None
+               self.processData();
+               
+               self.plotOn = True;
+
+
+       def processData(self):
+
+               self.NcascStart = self.par_obj.NcascStart
+               self.NcascEnd = self.par_obj.NcascEnd
+               self.Nsub = self.par_obj.Nsub
+               self.winInt = self.par_obj.winInt
+               self.photonCountBin = self.par_obj.photonCountBin
+               
+               #File import
+               
+               if self.ext == 'pt3':
+                       self.subChanArr, self.trueTimeArr, 
self.dTimeArr,self.resolution = pt3import(self.filepath)
+               if self.ext == 'csv':
+                       self.subChanArr, self.trueTimeArr, 
self.dTimeArr,self.resolution = csvimport(self.filepath)
+                       #If the file is empty.
+                       if self.subChanArr == None:
+                               #Undoes any preparation of resource.
+                               self.par_obj.data.pop(-1);
+                               self.par_obj.objectRef.pop(-1)
+                               self.exit = True
+                               
self.par_obj.image_status_text.showMessage("Your sample is not in the correct 
format.")
+                               self.par_obj.fit_obj.app.processEvents()
+                               return
+
+                                       
+               
+               #Colour assigned to file.
+               self.color = self.par_obj.colors[self.unqID % 
len(self.par_obj.colors)]
+
+               #How many channels there are in the files.
+               self.numOfCH =  
np.unique(np.array(self.subChanArr)).__len__()-1 #Minus 1 because not 
interested in channel 15.
+               
+               #Finds the numbers which address the channels.
+               self.ch_present = np.unique(np.array(self.subChanArr[0:100]))
+
+               #Calculates decay function for both channels.
+               self.photonDecayCh1,self.decayScale1 = 
delayTime2bin(np.array(self.dTimeArr),np.array(self.subChanArr),self.ch_present[0],self.winInt)
+               
+               if self.numOfCH ==  2:
+                       self.photonDecayCh2,self.decayScale2 = 
delayTime2bin(np.array(self.dTimeArr),np.array(self.subChanArr),self.ch_present[1],self.winInt)
+
+               #Time series of photon counts. For visualisation.
+               self.timeSeries1,self.timeSeriesScale1 = 
delayTime2bin(np.array(self.trueTimeArr)/1000000,np.array(self.subChanArr),self.ch_present[0],self.photonCountBin)
+               
+               unit = self.timeSeriesScale1[-1]/self.timeSeriesScale1.__len__()
+               
+               #Converts to counts per 
+               self.kcount_CH1 = np.average(self.timeSeries1)
+
+               raw_count = np.average(self.timeSeries1) #This is the 
unnormalised intensity count for int_time duration (the first moment)
+               var_count = np.var(self.timeSeries1)
+
+               self.brightnessNandBCH0=(((var_count 
-raw_count)/(raw_count))/(float(unit)))
+               if (var_count-raw_count) == 0:
+                       self.numberNandBCH0 =0
+               else:
+                       self.numberNandBCH0 = 
(raw_count**2/(var_count-raw_count))
+               
+
+
+               if self.numOfCH ==  2:
+
+                       self.timeSeries2,self.timeSeriesScale2 = 
delayTime2bin(np.array(self.trueTimeArr)/1000000,np.array(self.subChanArr),self.ch_present[1],self.photonCountBin)
+                       unit = 
self.timeSeriesScale2[-1]/self.timeSeriesScale2.__len__()
+                       self.kcount_CH2 = np.average(self.timeSeries2)
+                       raw_count = np.average(self.timeSeries2) #This is the 
unnormalised intensity count for int_time duration (the first moment)
+                       var_count = np.var(self.timeSeries2)
+                       self.brightnessNandBCH1= (((var_count 
-raw_count)/(raw_count))/(float(unit)))
+                       if (var_count-raw_count) == 0:
+                               self.numberNandBCH1 =0
+                       else:
+                               self.numberNandBCH1 = 
(raw_count**2/(var_count-raw_count))
+
+
+               
+               #Calculates the Auto and Cross-correlation functions.
+               
self.crossAndAuto(np.array(self.trueTimeArr),np.array(self.subChanArr))
+               
+               if self.fit_obj != None:
+                       #If fit object provided then creates fit objects.
+                       if self.objId1 == None:
+                               corrObj= corrObject(self.filepath,self.fit_obj);
+                               self.objId1 = corrObj.objId
+                               self.fit_obj.objIdArr.append(corrObj.objId)
+                               self.objId1.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               self.objId1.name = self.name+'_CH0_Auto_Corr'
+                               self.objId1.ch_type = 0 #channel 0 Auto
+                               self.objId1.siblings = None
+                               self.objId1.prepare_for_fit()
+                               self.objId1.kcount = self.kcount_CH1
+                               
+                       self.objId1.autoNorm = 
np.array(self.autoNorm[:,0,0]).reshape(-1)
+                       self.objId1.autotime = 
np.array(self.autotime).reshape(-1)
+                       self.objId1.param = 
copy.deepcopy(self.fit_obj.def_param)
+                       
+                       
+                       if self.numOfCH ==  2:
+                               if self.objId3 == None:
+                                       corrObj= 
corrObject(self.filepath,self.fit_obj);
+                                       self.objId3 = corrObj.objId
+                                       self.objId3.param = 
copy.deepcopy(self.fit_obj.def_param)
+                                       
self.fit_obj.objIdArr.append(corrObj.objId)
+                                       self.objId3.name = 
self.name+'_CH1_Auto_Corr'
+                                       self.objId3.ch_type = 1 #channel 1 Auto
+                                       self.objId3.siblings = None
+                                       self.objId3.prepare_for_fit()
+                                       self.objId3.kcount = self.kcount_CH2
+                                       
+                               self.objId3.autoNorm = 
np.array(self.autoNorm[:,1,1]).reshape(-1)
+                               self.objId3.autotime = 
np.array(self.autotime).reshape(-1)
+                               self.objId3.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               
+                               if self.objId2 == None:
+                                       corrObj= 
corrObject(self.filepath,self.fit_obj);
+                                       self.objId2 = corrObj.objId
+                                       self.objId2.param = 
copy.deepcopy(self.fit_obj.def_param)
+                                       
self.fit_obj.objIdArr.append(corrObj.objId)
+                                       self.objId2.name = 
self.name+'_CH01_Cross_Corr'
+                                       self.objId2.ch_type = 2 #01cross
+                                       self.objId2.siblings = None
+                                       self.objId2.prepare_for_fit()
+                                       
+                               self.objId2.autoNorm = 
np.array(self.autoNorm[:,0,1]).reshape(-1)
+                               self.objId2.autotime = 
np.array(self.autotime).reshape(-1)
+                               self.objId2.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               
+
+                               if self.objId4 == None:
+                                       corrObj= 
corrObject(self.filepath,self.fit_obj);
+                                       self.objId4 = corrObj.objId
+                                       self.objId4.param = 
copy.deepcopy(self.fit_obj.def_param)
+                                       
self.fit_obj.objIdArr.append(corrObj.objId)
+                                       self.objId4.name = 
self.name+'_CH10_Cross_Corr'
+                                       self.objId4.ch_type = 3 #10cross
+                                       self.objId4.siblings = None
+                                       self.objId4.prepare_for_fit()
+                                       
+                               self.objId4.autoNorm = 
np.array(self.autoNorm[:,1,0]).reshape(-1)
+                               self.objId4.autotime = 
np.array(self.autotime).reshape(-1)
+                               self.objId4.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               
+                       self.fit_obj.fill_series_list()
+               self.dTimeMin = 0
+               self.dTimeMax = np.max(self.dTimeArr)
+               self.subDTimeMin = self.dTimeMin
+               self.subDTimeMax = self.dTimeMax
+               self.exit = False
+               del self.subChanArr 
+               del self.trueTimeArr 
+               del self.dTimeArr
+       def crossAndAuto(self,trueTimeArr,subChanArr):
+               #For each channel we loop through and find only those in the 
correct time gate.
+               #We only want photons in channel 1 or two.
+               y = trueTimeArr[subChanArr < 3]
+               validPhotons = subChanArr[subChanArr < 3 ]
+
+
+               #Creates boolean for photon events in either channel.
+               num = np.zeros((validPhotons.shape[0],2))
+               num[:,0] = (np.array([np.array(validPhotons) 
==self.ch_present[0]])).astype(np.int32)
+               if self.numOfCH ==2:
+                       num[:,1] = (np.array([np.array(validPhotons) 
==self.ch_present[1]])).astype(np.int32)
+
+
+               self.count0 = np.sum(num[:,0]) 
+               self.count1 = np.sum(num[:,1])
+
+               t1 = time.time()
+               auto, self.autotime = 
tttr2xfcs(y,num,self.NcascStart,self.NcascEnd, self.Nsub)
+               t2 = time.time()
+               
+               
+
+               #Normalisation of the TCSPC data:
+               maxY = np.ceil(max(self.trueTimeArr))
+               self.autoNorm = np.zeros((auto.shape))
+               self.autoNorm[:,0,0] = 
((auto[:,0,0]*maxY)/(self.count0*self.count0))-1
+               
+               if self.numOfCH ==  2:
+                       self.autoNorm[:,1,1] = 
((auto[:,1,1]*maxY)/(self.count1*self.count1))-1
+                       self.autoNorm[:,1,0] = 
((auto[:,1,0]*maxY)/(self.count1*self.count0))-1
+                       self.autoNorm[:,0,1] = 
((auto[:,0,1]*maxY)/(self.count0*self.count1))-1
+                       
+
+               #Normalisaation of the decay functions.
+               self.photonDecayCh1Min = 
self.photonDecayCh1-np.min(self.photonDecayCh1)
+               self.photonDecayCh1Norm = 
self.photonDecayCh1Min/np.max(self.photonDecayCh1Min)
+               
+               
+               if self.numOfCH ==  2:
+                       self.photonDecayCh2Min = 
self.photonDecayCh2-np.min(self.photonDecayCh2)
+                       self.photonDecayCh2Norm = 
self.photonDecayCh2Min/np.max(self.photonDecayCh2Min)
+               
+               return 
    
 
-    
-    
+       
+       
 class subPicoObject():
-    def __init__(self,parentId,xmin,xmax,TGid,par_obj):
-        #Binning window for decay function
-        self.TGid = TGid
-        #Parameters for auto-correlation and cross-correlation.
-        self.parentId = parentId
-        self.par_obj = par_obj
-        self.NcascStart = self.parentId.NcascStart
-        self.NcascEnd = self.parentId.NcascEnd
-        self.Nsub = self.parentId.Nsub
-        self.fit_obj = self.parentId.fit_obj
-        
-        self.type = 'subObject'
-        #Appends the object to the subObject register.
-        self.par_obj.subObjectRef.append(self)
-        self.unqID = self.par_obj.subNum
-        self.parentUnqID = self.parentId.unqID
-        #self.chanArr = parentObj.chanArr
-        #self.trueTimeArr = self.parentId.trueTimeArr
-        #self.dTimeArr = self.parentId.dTimeArr
-        self.color = self.parentId.color
-        self.numOfCH = self.parentId.numOfCH
-        self.ch_present = self.parentId.ch_present
-
-        self.filepath = str(self.parentId.filepath)
-        self.xmin = xmin
-        self.xmax = xmax
-
-        self.nameAndExt = os.path.basename(self.filepath).split('.')
-        self.name = 
'TG-'+str(self.unqID)+'-xmin_'+str(round(xmin,0))+'-xmax_'+str(round(xmax,0))+'-'+self.nameAndExt[0]
-
-        self.objId1 = None
-        self.objId2 = None
-        self.objId3 = None
-        self.objId4 = None
-        self.processData();
-        self.plotOn = True
-        
-        
-    def processData(self):
-        self.NcascStart= self.par_obj.NcascStart
-        self.NcascEnd= self.par_obj.NcascEnd
-        self.Nsub = self.par_obj.Nsub
-        self.winInt = self.par_obj.winInt
-        
-        
-        self.subChanArr, self.trueTimeArr, self.dTimeArr,self.resolution = 
pt3import(self.filepath)
-        
-        
-        
-        self.subArrayGeneration(self.xmin,self.xmax,np.array(self.subChanArr))
-        
-        
-
-
-        self.dTimeMin = self.parentId.dTimeMin
-        self.dTimeMax = self.parentId.dTimeMax
-        self.subDTimeMin = self.dTimeMin
-        self.subDTimeMax = self.dTimeMax
-        
-
-        
-        #Adds names to the fit function for later fitting.
-        if self.objId1 == None:
-            corrObj= corrObject(self.filepath,self.fit_obj);
-            self.objId1 = corrObj.objId
-            self.fit_obj.objIdArr.append(corrObj.objId)
-            self.objId1.name = self.name+'_CH0_Auto_Corr'
-            self.objId1.ch_type = 0 #channel 0 Auto
-            self.objId1.prepare_for_fit()
-        self.objId1.autoNorm = np.array(self.autoNorm[:,0,0]).reshape(-1)
-        self.objId1.autotime = np.array(self.autotime).reshape(-1)
-        self.objId1.param = self.fit_obj.def_param
-        
-        
-        if self.numOfCH ==2:
-            if self.objId3 == None:
-                corrObj= corrObject(self.filepath,self.fit_obj);
-                self.objId3 = corrObj.objId
-                self.fit_obj.objIdArr.append(corrObj.objId)
-                self.objId3.name = self.name+'_CH1_Auto_Corr'
-                self.objId3.ch_type = 1 #channel 1 Auto
-                self.objId3.prepare_for_fit()
-            self.objId3.autoNorm = np.array(self.autoNorm[:,1,1]).reshape(-1)
-            self.objId3.autotime = np.array(self.autotime).reshape(-1)
-            self.objId3.param = self.fit_obj.def_param
-            if self.objId2 == None:
-                corrObj= corrObject(self.filepath,self.fit_obj);
-                self.objId2 = corrObj.objId
-                self.fit_obj.objIdArr.append(corrObj.objId)
-                self.objId2.name = self.name+'_CH01_Cross_Corr'
-                self.objId2.ch_type = 2 #channel 01 Cross
-                self.objId2.prepare_for_fit()
-            self.objId2.autoNorm = np.array(self.autoNorm[:,0,1]).reshape(-1)
-            self.objId2.autotime = np.array(self.autotime).reshape(-1)
-            self.objId2.param = self.fit_obj.def_param
-            if self.objId4 == None:
-                corrObj= corrObject(self.filepath,self.fit_obj);
-                self.objId4 = corrObj.objId
-                self.fit_obj.objIdArr.append(corrObj.objId)
-                self.objId4.name = self.name+'_CH10_Cross_Corr'
-                self.objId4.ch_type = 3 #channel 10 Cross
-                self.objId4.prepare_for_fit()
-            self.objId4.autoNorm = np.array(self.autoNorm[:,1,0]).reshape(-1)
-            self.objId4.autotime = np.array(self.autotime).reshape(-1)
-            self.objId4.param = self.fit_obj.def_param
-            
-        
-        self.fit_obj.fill_series_list()  
-        del self.subChanArr 
-        del self.trueTimeArr 
-        del self.dTimeArr 
-    
-
-
-    def subArrayGeneration(self,xmin,xmax,subChanArr):
-        if(xmax<xmin):
-            xmin1 = xmin
-            xmin = xmax
-            xmax = xmin1
-        #self.subChanArr = np.array(self.chanArr)
-        #Finds those photons which arrive above certain time or below certain 
time.
-        photonInd = np.logical_and(self.dTimeArr>=xmin, 
self.dTimeArr<=xmax).astype(np.bool)
-        
-        subChanArr[np.invert(photonInd).astype(np.bool)] = 16
-        
-        self.crossAndAuto(subChanArr)
-
-        return
-    def crossAndAuto(self,subChanArr):
-        #We only want photons in channel 1 or two.
-        validPhotons = subChanArr[subChanArr < 3]
-        y = self.trueTimeArr[subChanArr < 3]
-        #Creates boolean for photon events in either channel.
-        num = np.zeros((validPhotons.shape[0],2))
-        num[:,0] = (np.array([np.array(validPhotons) 
==self.ch_present[0]])).astype(np.int)
-        if self.numOfCH == 2:
-            num[:,1] = (np.array([np.array(validPhotons) 
==self.ch_present[1]])).astype(np.int)
-
-        self.count0 = np.sum(num[:,0]) 
-        self.count1 = np.sum(num[:,1]) 
-        #Function which calculates auto-correlation and cross-correlation.
-
-
-
-        auto, self.autotime = tttr2xfcs(y,num,self.NcascStart,self.NcascEnd, 
self.Nsub)
-
-        maxY = np.ceil(max(self.trueTimeArr))
-        self.autoNorm = np.zeros((auto.shape))
-        self.autoNorm[:,0,0] = ((auto[:,0,0]*maxY)/(self.count0*self.count0))-1
-        if self.numOfCH ==2:
-            self.autoNorm[:,1,1] = 
((auto[:,1,1]*maxY)/(self.count1*self.count1))-1
-            self.autoNorm[:,1,0] = 
((auto[:,1,0]*maxY)/(self.count1*self.count0))-1
-            self.autoNorm[:,0,1] = 
((auto[:,0,1]*maxY)/(self.count0*self.count1))-1
-
-        return 
+       def __init__(self,parentId,xmin,xmax,TGid,par_obj):
+               #Binning window for decay function
+               self.TGid = TGid
+               #Parameters for auto-correlation and cross-correlation.
+               self.parentId = parentId
+               self.par_obj = par_obj
+               self.NcascStart = self.parentId.NcascStart
+               self.NcascEnd = self.parentId.NcascEnd
+               self.Nsub = self.parentId.Nsub
+               self.fit_obj = self.parentId.fit_obj
+               self.ext = self.parentId.ext
+               
+               self.type = 'subObject'
+               #Appends the object to the subObject register.
+               self.par_obj.subObjectRef.append(self)
+               self.unqID = self.par_obj.subNum
+               self.parentUnqID = self.parentId.unqID
+               #self.chanArr = parentObj.chanArr
+               #self.trueTimeArr = self.parentId.trueTimeArr
+               #self.dTimeArr = self.parentId.dTimeArr
+               self.color = self.parentId.color
+               self.numOfCH = self.parentId.numOfCH
+               self.ch_present = self.parentId.ch_present
+               self.photonCountBin = self.par_obj.photonCountBin
+
+               self.filepath = str(self.parentId.filepath)
+               self.xmin = xmin
+               self.xmax = xmax
+
+               self.nameAndExt = os.path.basename(self.filepath).split('.')
+               self.name = 
'TG-'+str(self.unqID)+'-xmin_'+str(round(xmin,0))+'-xmax_'+str(round(xmax,0))+'-'+self.nameAndExt[0]
+
+               self.objId1 = None
+               self.objId2 = None
+               self.objId3 = None
+               self.objId4 = None
+               self.processData();
+               self.plotOn = True
+               
+               
+       def processData(self):
+               self.NcascStart= self.par_obj.NcascStart
+               self.NcascEnd= self.par_obj.NcascEnd
+               self.Nsub = self.par_obj.Nsub
+               self.winInt = self.par_obj.winInt
+               
+               
+               #self.subChanArr, self.trueTimeArr, 
self.dTimeArr,self.resolution = pt3import(self.filepath)
+               if self.ext == 'pt3':
+                       self.subChanArr, self.trueTimeArr, 
self.dTimeArr,self.resolution = pt3import(self.filepath)
+               if self.ext == 'csv':
+                       self.subChanArr, self.trueTimeArr, 
self.dTimeArr,self.resolution = csvimport(self.filepath)
+                       #If the file is empty.
+                       #if self.subChanArr == None:
+                               #Undoes any preparation of resource.
+                       #    self.par_obj.subObjectRef.pop(-1)
+                               #self.exit = True
+                       #    return
+
+
+               self.subArrayGeneration(self.xmin,self.xmax)
+               
+               self.dTimeMin = self.parentId.dTimeMin
+               self.dTimeMax = self.parentId.dTimeMax
+               self.subDTimeMin = self.dTimeMin
+               self.subDTimeMax = self.dTimeMax
+
+          #Time series of photon counts. For visualisation.
+               self.timeSeries1,self.timeSeriesScale1 = 
delayTime2bin(np.array(self.trueTimeArr)/1000000,np.array(self.subChanArr),self.ch_present[0],self.photonCountBin)
+               
+               unit = self.timeSeriesScale1[-1]/self.timeSeriesScale1.__len__()
+               self.kcount_CH1 = np.average(self.timeSeries1)
+               if self.numOfCH ==  2:
+
+                       self.timeSeries2,self.timeSeriesScale2 = 
delayTime2bin(np.array(self.trueTimeArr)/1000000,np.array(self.subChanArr),self.ch_present[1],self.photonCountBin)
+                       unit = 
self.timeSeriesScale2[-1]/self.timeSeriesScale2.__len__()
+                       self.kcount_CH2 = np.average(self.timeSeries2)
+               
+
+               
+               #Adds names to the fit function for later fitting.
+               if self.objId1 == None:
+                       corrObj= corrObject(self.filepath,self.fit_obj);
+                       self.objId1 = corrObj.objId
+                       self.fit_obj.objIdArr.append(corrObj.objId)
+                       self.objId1.param = 
copy.deepcopy(self.fit_obj.def_param)
+                       self.objId1.name = self.name+'_CH0_Auto_Corr'
+                       self.objId1.ch_type = 0 #channel 0 Auto
+                       self.objId1.siblings = None
+                       self.objId1.prepare_for_fit()
+                       
+                       self.objId1.kcount = self.kcount_CH1
+               self.objId1.autoNorm = 
np.array(self.autoNorm[:,0,0]).reshape(-1)
+               self.objId1.autotime = np.array(self.autotime).reshape(-1)
+               self.objId1.param = copy.deepcopy(self.fit_obj.def_param)
+               
+               
+               if self.numOfCH == 2:
+                       if self.objId3 == None:
+                               corrObj= corrObject(self.filepath,self.fit_obj);
+                               self.objId3 = corrObj.objId
+                               self.fit_obj.objIdArr.append(corrObj.objId)
+                               self.objId3.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               self.objId3.name = self.name+'_CH1_Auto_Corr'
+                               self.objId3.ch_type = 1 #channel 1 Auto
+                               self.objId3.siblings = None
+                               self.objId3.prepare_for_fit()
+                               self.objId3.kcount = self.kcount_CH2
+                               
+                       self.objId3.autoNorm = 
np.array(self.autoNorm[:,1,1]).reshape(-1)
+                       self.objId3.autotime = 
np.array(self.autotime).reshape(-1)
+                       self.objId3.param = 
copy.deepcopy(self.fit_obj.def_param)
+                       if self.objId2 == None:
+                               corrObj= corrObject(self.filepath,self.fit_obj);
+                               self.objId2 = corrObj.objId
+                               self.objId2.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               self.fit_obj.objIdArr.append(corrObj.objId)
+                               self.objId2.name = self.name+'_CH01_Cross_Corr'
+                               self.objId2.ch_type = 2 #channel 01 Cross
+                               self.objId2.siblings = None
+                               self.objId2.prepare_for_fit()
+                               
+                       self.objId2.autoNorm = 
np.array(self.autoNorm[:,0,1]).reshape(-1)
+                       self.objId2.autotime = 
np.array(self.autotime).reshape(-1)
+                       self.objId2.param = 
copy.deepcopy(self.fit_obj.def_param)
+                       if self.objId4 == None:
+                               corrObj= corrObject(self.filepath,self.fit_obj);
+                               self.objId4 = corrObj.objId
+                               self.objId4.param = 
copy.deepcopy(self.fit_obj.def_param)
+                               self.fit_obj.objIdArr.append(corrObj.objId)
+                               self.objId4.name = self.name+'_CH10_Cross_Corr'
+                               self.objId4.ch_type = 3 #channel 10 Cross
+                               self.objId4.siblings = None
+                               self.objId4.prepare_for_fit()
+                               
+                       self.objId4.autoNorm = 
np.array(self.autoNorm[:,1,0]).reshape(-1)
+                       self.objId4.autotime = 
np.array(self.autotime).reshape(-1)
+                       
+                       
+               
+               self.fit_obj.fill_series_list()  
+               del self.subChanArr 
+               del self.trueTimeArr 
+               del self.dTimeArr 
+       
+
+
+       def subArrayGeneration(self,xmin,xmax):
+               if(xmax<xmin):
+                       xmin1 = xmin
+                       xmin = xmax
+                       xmax = xmin1
+               #self.subChanArr = np.array(self.chanArr)
+               #Finds those photons which arrive above certain time or below 
certain time.
+               photonInd = np.logical_and(self.dTimeArr>=xmin, 
self.dTimeArr<=xmax).astype(np.bool)
+               
+               self.subChanArr[np.invert(photonInd).astype(np.bool)] = 16
+               
+               self.crossAndAuto()
+
+               return
+       def crossAndAuto(self):
+               #We only want photons in channel 1 or two.
+               validPhotons = self.subChanArr[self.subChanArr < 3]
+               y = self.trueTimeArr[self.subChanArr < 3]
+               #Creates boolean for photon events in either channel.
+               num = np.zeros((validPhotons.shape[0],2))
+               num[:,0] = (np.array([np.array(validPhotons) 
==self.ch_present[0]])).astype(np.int)
+               if self.numOfCH == 2:
+                       num[:,1] = (np.array([np.array(validPhotons) 
==self.ch_present[1]])).astype(np.int)
+
+               self.count0 = np.sum(num[:,0]) 
+               self.count1 = np.sum(num[:,1]) 
+               #Function which calculates auto-correlation and 
cross-correlation.
+
+
+
+               auto, self.autotime = 
tttr2xfcs(y,num,self.NcascStart,self.NcascEnd, self.Nsub)
+
+               maxY = np.ceil(max(self.trueTimeArr))
+               self.autoNorm = np.zeros((auto.shape))
+               self.autoNorm[:,0,0] = 
((auto[:,0,0]*maxY)/(self.count0*self.count0))-1
+               if self.numOfCH ==2:
+                       self.autoNorm[:,1,1] = 
((auto[:,1,1]*maxY)/(self.count1*self.count1))-1
+                       self.autoNorm[:,1,0] = 
((auto[:,1,0]*maxY)/(self.count1*self.count0))-1
+                       self.autoNorm[:,0,1] = 
((auto[:,0,1]*maxY)/(self.count0*self.count1))-1
+
+               return 
 
 class corrObject():
-    def __init__(self,filepath,parentFn):
-        #the container for the object.
-        self.parentFn = parentFn
-        self.type = 'corrObject'
-        self.filepath = str(filepath)
-        self.nameAndExt = os.path.basename(self.filepath).split('.')
-        self.name = self.nameAndExt[0]
-        self.ext = self.nameAndExt[-1]
-        self.autoNorm=[]
-        self.autotime=[]
-        self.model_autoNorm =[]
-        self.model_autotime = []
-        self.datalen= []
-        self.objId = self;
-        self.param = []
-        self.goodFit = True
-        self.fitted = False
-        self.checked = False
-        self.toFit = False
-       
-        #main.data.append(filepath);
-        #The master data object reference 
-        #main.corrObjectRef.append(self)
-        #The id in terms of how many things are loaded.
-        #self.unqID = main.label.numOfLoaded;
-        #main.label.numOfLoaded = main.label.numOfLoaded+1
-    def prepare_for_fit(self):
-        if self.parentFn.ch_check_ch0.isChecked() == True and self.ch_type == 
0:
-            self.toFit = True
-        if self.parentFn.ch_check_ch1.isChecked() == True and self.ch_type == 
1:
-            self.toFit = True
-            
-        if self.parentFn.ch_check_ch01.isChecked() == True and self.ch_type == 
2:
-            self.toFit = True
-        if self.parentFn.ch_check_ch10.isChecked() == True and self.ch_type == 
3:
-            self.toFit = True
-        #self.parentFn.modelFitSel.clear()
-        #for objId in self.parentFn.objIdArr:
-         #   if objId.toFit == True:
-          #      self.parentFn.modelFitSel.addItem(objId.name)
-        self.parentFn.updateFitList()
-    def residual(self, param, x, data,options):
-    
-        A = equation_(param, x,options)
-        residuals = data-A
-        return residuals
-    def fitToParameters(self):
-        self.parentFn.updateParamFirst()
-        self.parentFn.updateTableFirst()
-        self.parentFn.updateParamFirst()
-        
-
-        #convert line coordinate
-        
-        #Find the index of the nearest point in the scale.
-        
-        data = np.array(self.autoNorm).astype(np.float64).reshape(-1)
-        scale = np.array(self.autotime).astype(np.float64).reshape(-1)
-        indx_L = int(np.argmin(np.abs(scale -  self.parentFn.dr.xpos)))
-        indx_R = int(np.argmin(np.abs(scale -  self.parentFn.dr1.xpos)))
-        
-
-        res = minimize(self.residual, self.param, 
args=(scale[indx_L:indx_R+1],data[indx_L:indx_R+1], self.parentFn.def_options))
-        self.residualVar = res.residual
-        output = fit_report(self.param)
-        print 'residual',res.chisqr
-        if(res.chisqr>0.05):
-            print 'CAUTION DATA DID NOT FIT WELL CHI^2 >0.05',res.chisqr
-            self.goodFit = False
-        else:
-            self.goodFit = True
-        self.fitted = True
-        self.chisqr = res.chisqr
-        rowArray =[];
-        localTime = time.asctime( time.localtime(time.time()) )
-        rowArray.append(str(self.name))  
-        rowArray.append(str(localTime))
-        rowArray.append(str(self.parentFn.diffModEqSel.currentText()))
-        rowArray.append(str(self.parentFn.def_options['Diff_species']))
-        rowArray.append(str(self.parentFn.tripModEqSel.currentText()))
-        rowArray.append(str(self.parentFn.def_options['Triplet_species']))
-        rowArray.append(str(self.parentFn.dimenModSel.currentText()))
-        rowArray.append(str(scale[indx_L]))
-        rowArray.append(str(scale[indx_R]))
-
-        for key, value in self.param.iteritems() :
-            rowArray.append(str(value.value))
-            rowArray.append(str(value.stderr))
-            if key =='GN0':
-                try:
-                    rowArray.append(str(1/value.value))
-                except:
-                    rowArray.append(str(0))
-        
-        self.rowText = rowArray
-        
-        self.parentFn.updateTableFirst();
-        self.model_autoNorm = equation_(self.param, 
scale[indx_L:indx_R+1],self.parentFn.def_options)
-        self.model_autotime = scale[indx_L:indx_R+1]
-        self.parentFn.on_show()
-
-        #self.parentFn.axes.plot(model_autotime,model_autoNorm, 'o-')
-        #self.parentFn.canvas.draw();
-    
-    def load_from_file(self,channel):
-        tscale = [];
-        tdata = [];
-        if self.ext == 'SIN':
-            self.parentFn.objIdArr.append(self.objId)
-            proceed = False
-            
-            for line in csv.reader(open(self.filepath, 'rb'),delimiter='\t'):
-                
-                if proceed ==True:
-                    if line ==[]:
-                        break;
-                    
-                    
-                    tscale.append(float(line[0]))
-                    tdata.append(float(line[channel+1]))
-                else:
-                  
-                  if (str(line)  == "[\'[CorrelationFunction]\']"):
-                    proceed = True;
-            
-
-            self.autoNorm= np.array(tdata).astype(np.float64).reshape(-1)
-            self.autotime= np.array(tscale).astype(np.float64).reshape(-1)*1000
-            self.name = self.name+'-CH'+str(channel)
-            self.ch_type = channel;
-            self.prepare_for_fit()
-
-
-            self.param = self.parentFn.def_param
-            self.parentFn.fill_series_list()
-            
-        
-                    #Where we add the names.
-
-
-        if self.ext == 'csv':
-            
-            self.parentFn.objIdArr.append(self)
-            
-            c = 0
-            
-            for line in csv.reader(open(self.filepath, 'rb')):
-                if (c >0):
-                    tscale.append(line[0])
-                    tdata.append(line[1])
-                c +=1;
-
-            self.autoNorm= np.array(tdata).astype(np.float64).reshape(-1)
-            self.autotime= np.array(tscale).astype(np.float64).reshape(-1)
-            self.ch_type = 0
-            self.datalen= len(tdata)
-            self.objId.prepare_for_fit()
+       def __init__(self,filepath,parentFn):
+               #the container for the object.
+               self.parentFn = parentFn
+               self.type = 'corrObject'
+               self.filepath = str(filepath)
+               self.nameAndExt = os.path.basename(self.filepath).split('.')
+               self.name = self.nameAndExt[0]
+               self.ext = self.nameAndExt[-1]
+               self.autoNorm=[]
+               self.autotime=[]
+               self.model_autoNorm =[]
+               self.model_autotime = []
+               self.datalen= []
+               self.objId = self;
+               self.param = []
+               self.goodFit = True
+               self.fitted = False
+               self.checked = False
+               self.clicked = False
+               self.toFit = False
+               self.kcount = None
+               self.filter = False
+          
+       def prepare_for_fit(self):
+               if self.parentFn.ch_check_ch0.isChecked() == True and 
self.ch_type == 0:
+                       self.toFit = True
+               if self.parentFn.ch_check_ch1.isChecked() == True and 
self.ch_type == 1:
+                       self.toFit = True
+                       
+               if self.parentFn.ch_check_ch01.isChecked() == True and 
self.ch_type == 2:
+                       self.toFit = True
+               if self.parentFn.ch_check_ch10.isChecked() == True and 
self.ch_type == 3:
+                       self.toFit = True
+               #self.parentFn.modelFitSel.clear()
+               #for objId in self.parentFn.objIdArr:
+                #   if objId.toFit == True:
+                 #      self.parentFn.modelFitSel.addItem(objId.name)
+               self.parentFn.updateFitList()
+       def residual(self, param, x, data,options):
+       
+               A = equation_(param, x,options)
+               residuals = data-A
+               return residuals
+       def fitToParameters(self):
+               #self.parentFn.updateParamFirst()
+               #self.parentFn.updateTableFirst()
+               #self.parentFn.updateParamFirst()
+               
+
+               #Populate param for lmfit.
+               param = Parameters()
+               #self.def_param.add('A1', value=1.0, min=0,max=1.0, vary=False)
+               for art in self.param:
+                       
+                       if self.param[art]['to_show'] == True:
+                               
+                               param.add(art, 
value=float(self.param[art]['value']), min=float(self.param[art]['minv']) 
,max=float(self.param[art]['maxv']), vary=self.param[art]['vary']);
+                               
+               
+               #Find the index of the nearest point in the scale.
+               
+               data = np.array(self.autoNorm).astype(np.float64).reshape(-1)
+               scale = np.array(self.autotime).astype(np.float64).reshape(-1)
+               self.indx_L = int(np.argmin(np.abs(scale -  
self.parentFn.dr.xpos)))
+               self.indx_R = int(np.argmin(np.abs(scale -  
self.parentFn.dr1.xpos)))
+               
+               #Run the fitting.
+               res = minimize(self.residual, param, 
args=(scale[self.indx_L:self.indx_R+1],data[self.indx_L:self.indx_R+1], 
self.parentFn.def_options))
+
+               #Repopulate the parameter object.
+               for art in self.param:
+                       if self.param[art]['to_show'] == True and 
self.param[art]['calc'] == False:
+                               self.param[art]['value'] = param[art].value
+                               self.param[art]['stderr'] = 
float(param[art].stderr)
+                               
+               #Extra parameters, which are not fit or inherited.
+               #self.param['N_FCS']['value'] = 
np.round(1/self.param['GN0']['value'],4)
+
+
+               self.residualVar = res.residual
+               output = fit_report(param)
+               print 'residual',res.chisqr
+               if(res.chisqr>0.05):
+                       print 'CAUTION DATA DID NOT FIT WELL CHI^2 
>0.05',res.chisqr
+                       self.goodFit = False
+               else:
+                       self.goodFit = True
+               self.fitted = True
+               self.chisqr = res.chisqr
+               
+               self.localTime =  time.asctime( time.localtime(time.time()) )
+               
+               
+               
+               #self.parentFn.updateTableFirst();
+               self.model_autoNorm = equation_(param, 
scale[self.indx_L:self.indx_R+1],self.parentFn.def_options)
+               self.model_autotime = scale[self.indx_L:self.indx_R+1]
+               #self.parentFn.on_show()
+
+               #self.parentFn.axes.plot(model_autotime,model_autoNorm, 'o-')
+               #self.parentFn.canvas.draw();
+       
+       def load_from_file(self,channel):
+               tscale = [];
+               tdata = [];
+               int_tscale =[];
+               int_tdata=[];
+
+               
+               if self.ext == 'fcs':
+                       
+
+
+                       corrObj = self
+                       text =[0]
+                       r_obj = csv.reader(open(corrObj.filepath, 
'rb'),delimiter='\t')
+                       title = r_obj.next()
+                       line = r_obj.next()
+                       line = r_obj.next()
+                       name = line[1].split(' = ')[1]
+                       read = True
+                       while  read == True:
+                               
+                               corrObj = 
corrObject(self.filepath,self.parentFn);
+                               self.parentFn.objIdArr.append(corrObj)
+
+                               corrObj.name = name
+
+                               
+                               
+
+                               line = r_obj.next()
+                               text =[]
+                               for part in line:
+                                       if part != '':
+                                                       text.append(part)
+                               
+
+                               #Reads to first correlation array text.
+                               while  text[0].split(' = ')[0] != 
'CorrelationArray' or text[0].split(' = ')[1] == int(0):
+                                       
+                                       line = r_obj.next()
+                                       text =[]
+                                       for part in line:
+                                               if part != '':
+                                                       text.append(part)
+                               line = r_obj.next()
+                               tdata = []
+                               tscale = []
+                               
+
+                               while  text[0].split(' = ')[0] != 
'PhotonCountHistogramArraySize':
+                                       
+                                       try:
+                                               line = r_obj.next()
+                                       except:
+                                               read = False
+                                               break;
+                                       text =[]
+                                       for part in line:
+                                               if part != '':
+                                                       text.append(part)
+                                       if text.__len__() >1:
+                                               tscale.append(float(text[0]))
+                                               tdata.append(float(text[1]))
+
+                               if tdata.__len__() == 0:
+                                       corrObj = []
+                                       self.parentFn.objIdArr.pop(-1)
+                                       break;
+
+                               channel = 0
+                               corrObj.siblings = None
+                               corrObj.autoNorm= 
np.array(tdata).astype(np.float64).reshape(-1)
+                               corrObj.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)*1000
+                               corrObj.name = corrObj.name+'-CH'+str(channel)
+                               corrObj.ch_type = channel;
+                               corrObj.param = 
copy.deepcopy(self.parentFn.def_param)
+                               self.parentFn.fill_series_list()
+                               
+                               
+
+
+
+
+
+
+               if self.ext == 'SIN':
+                       self.parentFn.objIdArr.append(self.objId)
+                       proceed = False
+                       
+                       for line in csv.reader(open(self.filepath, 
'rb'),delimiter='\t'):
+                               
+                               if proceed =='correlated':
+                                       if line ==[]:
+                                               proceed =False;
+                                       else:
+                                               tscale.append(float(line[0]))
+                                               
tdata.append(float(line[channel+1]))
+                               if proceed =='intensity':
+                                       
+                                       if line ==[]:
+                                               proceed=False;
+                                       elif line.__len__()> 1:
+                                               
+                                               
int_tscale.append(float(line[0]))
+                                               
int_tdata.append(float(line[channel+1]))
+                               if (str(line)  == 
"[\'[CorrelationFunction]\']"):
+                                       proceed = 'correlated';
+                               elif (str(line)  == "[\'[IntensityHistory]\']"):
+                                       proceed = 'intensity';
+                               
+                               
+                               
+                       
+                       self.siblings = None
+                       self.autoNorm= 
np.array(tdata).astype(np.float64).reshape(-1)
+                       self.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)*1000
+                       self.name = self.name+'-CH'+str(channel)
+                       self.ch_type = channel;
+                       #self.prepare_for_fit()
+
+                       
+                       
+                       
+
+
+                       #Average counts per bin. For it to be seconds (Hz), 
must divide by duration.
+                       unit = int_tscale[-1]/(int_tscale.__len__()-1)
+                       #And to be in kHz we divide by 1000.
+                       self.kcount = np.average(np.array(int_tdata)/unit)/1000
+                       self.param = copy.deepcopy(self.parentFn.def_param)
+                       self.parentFn.fill_series_list()
+                       
+               
+                                       #Where we add the names.
+
+               
+               if self.ext == 'csv' or self.ext =='CSV':
+                       r_obj = csv.reader(open(self.filepath, 'rb'))
+                       line_one = r_obj.next()
+                       if line_one.__len__()>1:
+                               
+                                       if float(line_one[1]) == 2:
+                                               
+                                               version = 2
+                                       else:
+                                               print 'version not 
known:',line_one[1]
+                                       
+                               
+                       else:
+                               version = 1
+
+                       if version == 1:
+                               self.parentFn.objIdArr.append(self)
+                               
+                               c = 0
+                               
+                               for line in csv.reader(open(self.filepath, 
'rb')):
+                                       if (c >0):
+                                               tscale.append(line[0])
+                                               tdata.append(line[1])
+                                       c +=1;
+
+                               self.autoNorm= 
np.array(tdata).astype(np.float64).reshape(-1)
+                               self.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)
+                               self.name = self.name+'-CH'+str(0)
+                               self.ch_type = 0
+                               self.datalen= len(tdata)
+
+                               self.param = 
copy.deepcopy(self.parentFn.def_param)
+                               self.parentFn.fill_series_list()
+                       if version >= 2:
+                               
+                                       
+                               numOfCH = float(r_obj.next()[1])
+                               
+                               if numOfCH == 1:
+                                       self.parentFn.objIdArr.append(self)
+                                       self.type =str(r_obj.next()[1])
+                                       self.ch_type = int(r_obj.next()[1])
+                                       self.name = 
self.name+'-CH'+str(self.ch_type)
+                                       
+                                       line = r_obj.next()
+
+                                       while  line[0] != 'Time (ns)':
+                                               if line[0] == 'kcount':
+                                                       self.kcount = 
float(line[1])
+                                               if line[0] == 'numberNandB':
+                                                       self.numberNandB = 
float(line[1])
+                                               if line[0] == 'brightnessNandB':
+                                                       self.brightnessNandB =  
float(line[1])
+                                               if line[0] == 'CV':
+                                                       self.CV =  
float(line[1])
+                                               if line[0] == 'carpet pos':
+                                                       carpet = int(line[1])
+                                               if line[0] == 'pc':
+                                                       pc_text = int(line[1])
+                                               if line[0] == 'pbc_f0':
+                                                       self.pbc_f0 = 
float(line[1])
+                                               if line[0] == 'pbc_tb':
+                                                       self.pbc_tb = 
float(line[1])
+                                               
+                                               line = r_obj.next()
+
+                                       
+                                       if pc_text != False:
+                                               self.name = self.name 
+'_pc_m'+str(pc_text)
+                                               
+                                       
+                                       tscale = []
+                                       tdata = []
+                                       null = r_obj.next()
+                                       line = r_obj.next()
+                                       while  line[0] != 'end':
+
+                                               tscale.append(line[0])
+                                               tdata.append(line[1])
+                                               line = r_obj.next()
+
+                                       self.autoNorm= 
np.array(tdata).astype(np.float64).reshape(-1)
+                                       self.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)
+                                       self.siblings = None
+                                       self.param = 
copy.deepcopy(self.parentFn.def_param)
+                                       self.parentFn.fill_series_list()
+
+                               if numOfCH == 2:
+                                       corrObj2 = 
corrObject(self.filepath,self.parentFn);
+                                       corrObj3 = 
corrObject(self.filepath,self.parentFn);
+
+                                       self.parentFn.objIdArr.append(self)
+                                       self.parentFn.objIdArr.append(corrObj2)
+                                       self.parentFn.objIdArr.append(corrObj3)
+                                       
+                                       line_type = r_obj.next()
+                                       self.type = str(line_type[1])
+                                       corrObj2.type = str(line_type[1])
+                                       corrObj3.type = str(line_type[1])
+                                       
+
+                                       line_ch = r_obj.next()
+                                       self.ch_type = int(line_ch[1])
+                                       corrObj2.ch_type = int(line_ch[2])
+                                       corrObj3.ch_type = int(line_ch[3])
+                                       
+                                       self.name = 
self.name+'-CH'+str(self.ch_type)
+                                       corrObj2.name = 
corrObj2.name+'-CH'+str(corrObj2.ch_type)
+                                       corrObj3.name = 
corrObj3.name+'-CH'+str(corrObj3.ch_type)
+
+                                       line = r_obj.next()
+                                       while  line[0] != 'Time (ns)':
+                                               if line[0] == 'kcount':
+                                                       self.kcount = 
float(line[1])
+                                                       corrObj2.kcount = 
float(line[2])
+                                               if line[0] == 'numberNandB':
+                                                       self.numberNandB = 
float(line[1])
+                                                       corrObj2.numberNandB =  
float(line[2])
+                                               if line[0] == 'brightnessNandB':
+                                                       self.brightnessNandB =  
float(line[1])
+                                                       
corrObj2.brightnessNandB =  float(line[2])
+                                               if line[0] == 'CV':
+                                                       self.CV =  
float(line[1])
+                                                       corrObj2.CV = 
float(line[2])
+                                                       corrObj3.CV = 
float(line[3])
+                                               if line[0] == 'carpet pos':
+                                                       self.carpet_position = 
int(line[1])
+                                               if line[0] == 'pc':
+                                                       pc_text = int(line[1])
+                                               if line[0] == 'pbc_f0':
+                                                       self.pbc_f0 = 
float(line[1])
+                                                       corrObj2.pbc_f0 = 
float(line[2])
+                                               if line[0] == 'pbc_tb':
+                                                       self.pbc_tb = 
float(line[1])
+                                                       corrObj2.pbc_tb = 
float(line[2])
+                                               
+                                               line = r_obj.next()
+                                       
+                                       
+                                       
+                                       if pc_text != False:
+                                               self.name = self.name 
+'_pc_m'+str(pc_text)
+                                               corrObj2.name = corrObj2.name 
+'_pc_m'+str(pc_text)
+                                               corrObj3.name = corrObj3.name 
+'_pc_m'+str(pc_text)
+                                       
+                                       null = r_obj.next()
+                                       line = r_obj.next()
+                                       tscale = []
+                                       tdata0 = []
+                                       tdata1 = []
+                                       tdata2 = []
+                                       while  line[0] != 'end':
+
+                                               tscale.append(line[0])
+                                               tdata0.append(line[1])
+                                               tdata1.append(line[2])
+                                               tdata2.append(line[3])
+                                               line = r_obj.next()
+
+                                       
+                                       self.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)
+                                       corrObj2.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)
+                                       corrObj3.autotime= 
np.array(tscale).astype(np.float64).reshape(-1)
+
+                                       self.autoNorm= 
np.array(tdata0).astype(np.float64).reshape(-1)
+                                       corrObj2.autoNorm= 
np.array(tdata1).astype(np.float64).reshape(-1)
+                                       corrObj3.autoNorm= 
np.array(tdata2).astype(np.float64).reshape(-1)
+                                       
+                                       self.siblings = [corrObj2,corrObj3]
+                                       corrObj2.siblings = [self,corrObj3]
+                                       corrObj3.siblings = [self,corrObj2]
+                                       
+                                       self.param = 
copy.deepcopy(self.parentFn.def_param)
+                                       corrObj2.param = 
copy.deepcopy(self.parentFn.def_param)
+                                       corrObj3.param = 
copy.deepcopy(self.parentFn.def_param)
+                                       self.parentFn.fill_series_list()
+
+
+                               
 
 
 
diff --git a/pycorrfit/readfiles/read_pt3_scripts/fitting_methods.py 
b/pycorrfit/readfiles/read_pt3_scripts/fitting_methods.py
new file mode 100644
index 0000000..a65359d
--- /dev/null
+++ b/pycorrfit/readfiles/read_pt3_scripts/fitting_methods.py
@@ -0,0 +1,349 @@
+import numpy as np
+import copy
+"""FCS Bulk Correlation Software
+
+    Copyright (C) 2015  Dominic Waithe
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""
+def initialise_fcs(int_obj):
+       #default options for the fitting.
+               int_obj.def_options ={}
+               
+               int_obj.def_options['Diff_eq'] = 1
+               int_obj.def_options['Diff_species'] = 1
+               int_obj.def_options['Triplet_eq'] = 1
+               int_obj.def_options['Triplet_species'] = 1
+
+               int_obj.def_options['Dimen'] =1
+               
+
+               
+               A1 = 
{'alias':'A1','value':1.0,'minv':0.0,'maxv':1.0,'vary':False,'to_show':True,'stdev':False,'calc':False}
+               A2 = 
{'alias':'A2','value':1.0,'minv':0.0,'maxv':1.0,'vary':False,'to_show':True,'calc':False}
+               A3 = 
{'alias':'A3','value':1.0,'minv':0.0,'maxv':1.0,'vary':False,'to_show':True,'calc':False}
          
+               #The offset
+               offset = { 
'alias':'offset','value':0.01,'minv':-1.0,'maxv':1.0,'vary':True,'to_show':True,'calc':False}
+               #int_obj.defin.add('offset', value=0.0, 
min=-1.0,max=5.0,vary=False)
+               #The amplitude
+               GN0 = 
{'alias':'GN0','minv':0.001,'value':1,'maxv':1.0,'vary':True,'to_show':True,'calc':False}
+               #int_obj.def_param.add('GN0', value=1.0, vary=True)
+               #The alpha value
+               #int_obj.def_param.add('alpha', value=1.0, min=0,max=1.0, 
vary=True)
+               #lateral diffusion coefficent
+               txy1 = 
{'alias':'txy1','value':0.01,'minv':0.001,'maxv':2000.0,'vary':True,'to_show':True,'calc':False}
+               txy2 = 
{'alias':'txy2','value':0.01,'minv':0.001,'maxv':2000.0,'vary':True,'to_show':True,'calc':False}
+               txy3 = 
{'alias':'txy3','value':0.01,'minv':0.001,'maxv':2000.0,'vary':True,'to_show':True,'calc':False}
+
+               
+
+               alpha1 = 
{'alias':'alpha1','value':1.0,'minv':0.0,'maxv':2.0,'vary':True,'to_show':True,'calc':False}
+               alpha2 = 
{'alias':'alpha2','value':1.0,'minv':0.0,'maxv':2.0,'vary':True,'to_show':True,'calc':False}
+               alpha3 = 
{'alias':'alpha3','value':1.0,'minv':0.0,'maxv':2.0,'vary':True,'to_show':True,'calc':False}
+               
+               tz1 = 
{'alias':'tz1','value':1.0,'minv':0.0,'maxv':1.0,'vary':True,'to_show':True,'calc':False}
+               tz2 = 
{'alias':'tz2','value':1.0,'minv':0.0,'maxv':1.0,'vary':True,'to_show':True,'calc':False}
+               tz3 = 
{'alias':'tz3','value':1.0,'minv':0.0,'maxv':1.0,'vary':True,'to_show':True,'calc':False}
+
+               #Axial ratio coefficient
+               
+               AR1 = 
{'alias':'AR1','value':1.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               AR2 = 
{'alias':'AR2','value':1.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               AR3 = 
{'alias':'AR3','value':1.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+
+               B1 = 
{'alias':'B1','value':1.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               B2 = 
{'alias':'B2','value':1.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               B3 = 
{'alias':'B3','value':1.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+
+               T1 = 
{'alias':'T1','value':1.0,'minv':0.0,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               T2 = 
{'alias':'T2','value':1.0,'minv':0.0,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               T3 = 
{'alias':'T3','value':1.0,'minv':0.0,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+       
+               tauT1 = 
{'alias':'tauT1','value':0.055,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               tauT2 = 
{'alias':'tauT2','value':0.055,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+               tauT3 = 
{'alias':'tauT3','value':0.005,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':False}
+
+               N_FCS = {'alias':'N 
(FCS)','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               cpm = {'alias':'cpm 
(kHz)','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               N_mom = {'alias':'N 
(mom)','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               bri = {'alias':'bri 
(kHz)','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               CV = 
{'alias':'Coincidence','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               f0 = {'alias':'PBC 
f0','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               overtb = {'alias':'PBC 
tb','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+
+               
+               ACAC = 
{'alias':'ACAC','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+               ACCC = 
{'alias':'ACCC','value':0.0,'minv':0.001,'maxv':1000.0,'vary':True,'to_show':True,'calc':True}
+
+
+               int_obj.def_param 
={'A1':A1,'A2':A2,'A3':A3,'txy1':txy1,'txy2':txy2,'txy3':txy3,'offset':offset,'GN0':GN0,'alpha1':alpha1,'alpha2':alpha2,'alpha3':alpha3,'tz1':tz1,'tz2':tz2,'tz3':tz3,'AR1':AR1,'AR2':AR2,'AR3':AR3,'B1':B1,'B2':B2,'B3':B3,'T1':T1,'T2':T2,'T3':T3,'tauT1':tauT1,'tauT2':tauT2,'tauT3':tauT3}
+               int_obj.def_param['N_FCS'] = N_FCS
+               int_obj.def_param['cpm'] = cpm
+               int_obj.def_param['N_mom'] = N_mom
+               int_obj.def_param['bri'] = bri
+               int_obj.def_param['CV'] = CV
+               int_obj.def_param['f0'] = f0
+               int_obj.def_param['overtb'] = overtb
+
+               int_obj.def_param['ACAC'] = ACAC
+               int_obj.def_param['ACCC'] = ACCC
+
+               
+               int_obj.order_list = 
['offset','GN0','N_FCS','cpm','A1','A2','A3','txy1','txy2','txy3','tz1','tz2','tz3','alpha1','alpha2','alpha3','AR1','AR2','AR3','B1','B2','B3','T1','T2','T3','tauT1','tauT2','tauT3','N_mom','bri','CV','f0','overtb','ACAC','ACCC']
+
+
+def decide_which_to_show(int_obj):
+       
+       for art in int_obj.objId_sel.param:
+               if int_obj.objId_sel.param[art]['to_show'] == True:
+                       int_obj.objId_sel.param[art]['to_show'] = False
+
+               int_obj.objId_sel.param[ 'offset']['to_show'] = True
+               int_obj.objId_sel.param[ 'GN0']['to_show'] = True
+
+               int_obj.def_options['Diff_species'] = 
int_obj.diffNumSpecSpin.value()
+               int_obj.def_options['Triplet_species'] 
=int_obj.tripNumSpecSpin.value()
+               
+               #Optional parameters
+               for i in range(1,int_obj.def_options['Diff_species']+1):
+                       int_obj.objId_sel.param['A'+str(i)]['to_show'] = True
+                       int_obj.objId_sel.param['txy'+str(i)]['to_show'] = True
+                       int_obj.objId_sel.param['alpha'+str(i)]['to_show'] = 
True
+                       #2 in this case corresponds to 3D:
+                       if int_obj.def_options['Dimen'] == 2:
+                               if int_obj.def_options['Diff_eq'] == 1:
+                                       
int_obj.objId_sel.param['tz'+str(i)]['to_show'] = True
+                                       
+                               if int_obj.def_options['Diff_eq'] == 2:
+                                       int_obj.objId_sel.param[ 
'AR'+str(i)]['to_show'] = True
+                                               
+               if int_obj.def_options['Triplet_eq'] == 2:
+                               #Triplet State equation1
+                               for i in 
range(1,int_obj.tripNumSpecSpin.value()+1):                     
+                                       
int_obj.objId_sel.param['B'+str(i)]['to_show'] = True
+                                       
int_obj.objId_sel.param['tauT'+str(i)]['to_show'] = True
+                                       
+               if int_obj.def_options['Triplet_eq'] == 3:
+                               #Triplet State equation2
+                               for i in 
range(1,int_obj.tripNumSpecSpin.value()+1):
+                                       
int_obj.objId_sel.param['T'+str(i)]['to_show'] = True
+                                       
int_obj.objId_sel.param['tauT'+str(i)]['to_show'] = True
+               calc_param_fcs(int_obj,objId=int_obj.objId_sel)
+
+def update_each(int_obj,text):
+               """Will try and populate paramaters with what is present in the 
inteface, but if new option will goto the default"""
+               try:
+                       exec("valueV = int_obj."+text+"_value.value()"); 
exec("minV = int_obj."+text+"_min.value()"); exec("maxV = 
int_obj."+text+"_max.value()"); exec("varyV = 
int_obj."+text+"_vary.isChecked()");
+                       
+                       int_obj.objId_sel.param[text]['value'] = valueV 
+                       int_obj.objId_sel.param[text]['minv'] = minV
+                       int_obj.objId_sel.param[text]['maxv'] = maxV
+                       int_obj.objId_sel.param[text]['vary'] = varyV
+               except:
+                       
+                       int_obj.objId_sel.param[text] = 
copy.deepcopy(int_obj.def_param[text])
+def update_param_fcs(int_obj):
+               """Depending on the menu options this function will update the 
params of the current data set. """
+               if int_obj.objId_sel ==None:
+                       return
+               decide_which_to_show(int_obj)
+               #Set all the parameters to not show.
+
+               for art in int_obj.objId_sel.param:
+                       if int_obj.objId_sel.param[art]['to_show'] == True:
+                               update_each(int_obj, art)
+               
+               calc_param_fcs(int_obj,objId=int_obj.objId_sel)
+def calc_param_fcs(int_obj,objId):
+               #Calculated parameters.
+               try:
+                       objId.param['N_FCS']['value'] = 
1/objId.param['GN0']['value']
+                       objId.param['N_FCS']['to_show'] = True
+               except:
+                       objId.param['N_FCS']['value'] = 1
+                       objId.param['N_FCS']['to_show'] = False
+               
+               
+               try:
+                       objId.param['cpm']['value'] = 
float(objId.kcount)/(1/objId.param['GN0']['value'])
+                       objId.param['cpm']['to_show'] = True
+               except:
+                       objId.param['cpm']['value'] = 1
+                       objId.param['cpm']['to_show'] = False
+               try:
+                       objId.param['N_mom']['value'] =  
float(objId.numberNandB)
+                       objId.param['N_mom']['to_show'] = True
+               except:
+                       objId.param['N_mom']['value'] =  1
+                       objId.param['N_mom']['to_show'] = False
+               try:
+                       objId.param['bri']['value'] = 
float(objId.brightnessNandB)
+                       objId.param['bri']['to_show'] = True
+               except:
+                       objId.param['bri']['value'] = 1
+                       objId.param['bri']['to_show'] = False
+               try:
+                       objId.param['CV']['value'] = float(objId.CV)
+                       objId.param['CV']['to_show'] = True
+               except:
+                       pass
+               try:
+                       objId.param['f0']['value'] = float(objId.pbc_f0)
+                       objId.param['f0']['to_show'] = True
+                       objId.param['overtb']['value'] = float(objId.pbc_tb)
+                       objId.param['overtb']['to_show'] = True
+               except:
+                       pass
+
+               if int_obj.objIdArr != [] and objId.siblings !=None and 
objId.ch_type != 2:
+                                       
+                                       if objId.siblings[0].fitted == True:
+                                               
+                                               objId.param['ACAC']['value'] = 
float(objId.param['GN0']['value'])/float(objId.siblings[0].param['GN0']['value'])
+                                               objId.param['ACAC']['to_show'] 
= True
+                                               objId.param['ACCC']['value'] = 
float(objId.param['GN0']['value'])/float(objId.siblings[1].param['GN0']['value'])
+                                               objId.param['ACCC']['to_show'] 
= True
+                               
+
+def equation_(param, tc,options):
+       """This is equation for fitting"""
+
+       #A1 is relative component of fluorescent species
+       #tc is tau.
+       #txy1 is xy difusion   for fluorescent species one.
+       #alpha1 is
+       #tz1 is z diffusion for fluorescent species one.
+       offset =param['offset'].value; 
+       GN0 =param['GN0'].value; 
+       
+       
+       if(options['Dimen'] == 2):
+               if(options['Diff_eq']==1):
+                       #Equation 1A with 3D term.
+                       if (options['Diff_species'] == 1):
+                               A1 = param['A1'].value; txy1 = 
param['txy1'].value; alpha1 = param['alpha1'].value;tz1 = param['tz1'].value;
+                               #For one diffusing species
+                               GDiff = 
(A1*(((1+((tc/txy1)**alpha1))**-1)*((1+(tc/tz1))**-0.5)))
+                       elif (options['Diff_species'] == 2):
+                               A1 = param['A1'].value; txy1 = 
param['txy1'].value; alpha1 = param['alpha1'].value;tz1 = param['tz1'].value;
+                               A2 = param['A2'].value; txy2 = 
param['txy2'].value; alpha2 = param['alpha2'].value;tz2 = param['tz2'].value;
+                               param['A2'].value = 1.0-A1
+                               A2 = param['A2'].value
+                               #For two diffusing species
+                               GDiff = 
(A1*(((1+((tc/txy1)**alpha1))**-1)*((1+(tc/tz1))**-0.5)))+ 
(A2*(((1+((tc/txy2)**alpha2))**-1)*((1+(tc/tz2))**-0.5)))
+                       elif (options['Diff_species'] == 3):
+                               A1 = param['A1'].value; txy1 = 
param['txy1'].value; alpha1 = param['alpha1'].value;tz1 = param['tz1'].value;
+                               A2 = param['A2'].value; txy2 = 
param['txy2'].value; alpha2 = param['alpha2'].value;tz2 = param['tz2'].value;
+                               A3 = param['A3'].value; txy3 = 
param['txy3'].value; alpha3 = param['alpha3'].value;tz3 = param['tz3'].value;
+                               param['A2'].value = 1.0-A1-A3
+                               A2 = param['A2'].value
+                               param['A3'].value = 1.0-A2-A1
+                               A3 = param['A3'].value
+                               #For three diffusing species
+                               GDiff = 
(A1*(((1+((tc/txy1)**alpha1))**-1)*((1+(tc/tz1))**-0.5)))+ 
(A2*(((1+((tc/txy2)**alpha2))**-1)*((1+(tc/tz2))**-0.5)))+ 
(A3*(((1+((tc/txy3)**alpha3))**-1)*((1+(tc/tz3))**-0.5)))
+               elif(options['Diff_eq']==2):
+                       if (options['Diff_species'] == 1):
+                               A1 = param['A1'].value; txy1 = 
param['txy1'].value; alpha1 = param['alpha1'].value;AR1 = param['AR1'].value;
+                               #For one diffusing species
+                               GDiff = 
(A1*(((1+((tc/txy1)**alpha1))**-1)*(((1+(tc/((AR1**2)*txy1)))**-0.5))))
+                       elif (options['Diff_species'] == 2):
+                               A1 = param['A1'].value; txy1 = 
param['txy1'].value; alpha1 = param['alpha1'].value;AR1 = param['AR1'].value;
+                               A2 = param['A2'].value; txy2 = 
param['txy2'].value; alpha2 = param['alpha2'].value;AR2 = param['AR2'].value;
+                               param['A2'].value = 1.0-A1
+                               A2 = param['A2'].value
+                               #For two diffusing species
+                               GDiff = 
(A1*(((1+((tc/txy1)**alpha1))**-1)*(((1+(tc/((AR1**2)*txy1)))**-0.5))))+(A2*(((1+((tc/txy2)**alpha2))**-1)*(((1+(tc/((AR2**2)*txy2)))**-0.5))))
+                       elif (options['Diff_species'] == 3):
+                               A1 = param['A1'].value; txy1 = 
param['txy1'].value; alpha1 = param['alpha1'].value;AR1 = param['AR1'].value;
+                               A2 = param['A2'].value; txy2 = 
param['txy2'].value; alpha2 = param['alpha2'].value;AR2 = param['AR2'].value;
+                               A3 = param['A3'].value; txy3 = 
param['txy3'].value; alpha3 = param['alpha3'].value;AR3 = param['AR3'].value;
+                               #For two diffusing species
+                               param['A2'].value = 1.0-A1-A3
+                               A2 = param['A2'].value
+                               param['A3'].value = 1.0-A2-A1
+                               A3 = param['A3'].value
+                               #For three diffusing species
+                               GDiff = 
(A1*(((1+((tc/txy1)**alpha1))**-1)*(((1+(tc/((AR1**2)*txy1)))**-0.5))))+(A2*(((1+((tc/txy2)**alpha2))**-1)*(((1+(tc/((AR2**2)*txy2)))**-0.5))))+(A3*(((1+((tc/txy3)**alpha3))**-1)*(((1+(tc/((AR3**2)*txy3)))**-0.5))))
+                                                                               
                                                                                
                                                                                
   
+       if(options['Dimen'] == 1):
+               #Equation 1A with 2D term.
+               if (options['Diff_species'] == 1):
+                       A1 = param['A1'].value; txy1 = param['txy1'].value; 
alpha1 = param['alpha1'].value;
+                       #For one diffusing species
+                       GDiff = (A1*(((1+((tc/txy1)**alpha1))**-1)))
+               elif (options['Diff_species'] == 2):
+                       A1 = param['A1'].value; txy1 = param['txy1'].value; 
alpha1 = param['alpha1'].value;
+                       A2 = param['A2'].value; txy2 = param['txy2'].value; 
alpha2 = param['alpha2'].value;
+                       #For two diffusing species
+
+                       param['A2'].value = 1.0-A1
+                       A2 = param['A2'].value
+
+                       GDiff = 
(A1*(((1+(tc/txy1)**alpha1)**-1)))+(A2*(((1+(tc/txy2)**alpha2)**-1)))
+
+                       #if A1 +A2 != 1.0:
+                       #    GDiff = 99999999999
+               elif (options['Diff_species'] == 3):
+                       A1 = param['A1'].value; txy1 = param['txy1'].value; 
alpha1 = param['alpha1'].value;
+                       A2 = param['A2'].value; txy2 = param['txy2'].value; 
alpha2 = param['alpha2'].value;
+                       A3 = param['A3'].value; txy3 = param['txy3'].value; 
alpha3 = param['alpha3'].value;
+                       #For two diffusing species
+                       param['A2'].value = 1.0-A1-A3
+                       A2 = param['A2'].value
+                       param['A3'].value = 1.0-A2-A1
+                       A3 = param['A3'].value
+                       #For three diffusing species
+                       GDiff = 
(A1*(((1+(tc/txy1)**alpha1)**-1)))+(A2*(((1+(tc/txy2)**alpha2)**-1)))+(A3*(((1+(tc/txy3)**alpha3)**-1)))
+       
+       if(options['Triplet_eq'] ==1):
+               #For no triplets.
+               GT = 1
+       elif(options['Triplet_eq'] ==2):
+               #Equation (2) 1st equation.
+               if (options['Triplet_species'] == 1):
+                       B1 = param['B1'].value;tauT1 = param['tauT1'].value;
+                       #For one dark state.
+                       GT = 1 + (B1*np.exp(-tc/tauT1))
+               elif (options['Triplet_species'] == 2):
+                       B1 = param['B1'].value;tauT1 = param['tauT1'].value;
+                       B2 = param['B2'].value;tauT2 = param['tauT2'].value;
+                       #For two dark state
+                       GT = 1 + (B1*np.exp(-tc/tauT1))+(B2*np.exp(-tc/tauT2))
+               elif (options['Triplet_species'] == 3):
+                       B1 = param['B1'].value;tauT1 = param['tauT1'].value;
+                       B2 = param['B2'].value;tauT2 = param['tauT2'].value;
+                       B3 = param['B3'].value;tauT3 = param['tauT3'].value;
+                       #For three dark state
+                       GT = 1 + 
(B1*np.exp(-tc/tauT1))+(B2*np.exp(-tc/tauT2))+(B3*np.exp(-tc/tauT3))
+       
+       elif(options['Triplet_eq'] ==3):       
+               #Equation (2) 2nd equation.
+               if (options['Triplet_species'] == 1):
+                       T1 = param['T1'].value;tauT1 = param['tauT1'].value;
+                       #For one dark state.
+                       GT = 1- T1 + (T1*np.exp(-tc/tauT1))
+               elif (options['Triplet_species'] == 2):
+                       T1 = param['T1'].value;tauT1 = param['tauT1'].value;
+                       T1 = param['T2'].value;tauT1 = param['tauT2'].value;
+                       #For two dark state.
+                       GT = 1- (T1+T2 )+ 
((T1*np.exp(-tc/tauT1))+(T2*np.exp(-tc/tauT2)))
+               elif (options['Triplet_species'] == 3):
+                       T1 = param['T1'].value;tauT1 = param['tauT1'].value;
+                       T2 = param['T2'].value;tauT1 = param['tauT2'].value;
+                       T3 = param['T3'].value;tauT1 = param['tauT3'].value;
+                       #For three dark state.
+                       GT = 1- (T1+T2+T3)+ 
((T1*np.exp(-tc/tauT1))+(T2*np.exp(-tc/tauT2))+(T3*np.exp(-tc/tauT3)))
+                       
+       return offset + (GN0*GDiff*GT)
\ No newline at end of file
diff --git a/pycorrfit/readfiles/read_pt3_scripts/import_methods.py 
b/pycorrfit/readfiles/read_pt3_scripts/import_methods.py
index 71b1abb..4736a8f 100644
--- a/pycorrfit/readfiles/read_pt3_scripts/import_methods.py
+++ b/pycorrfit/readfiles/read_pt3_scripts/import_methods.py
@@ -1,5 +1,6 @@
 import struct
 import numpy as np
+import csv
 
 
 """FCS Bulk Correlation Software
@@ -20,6 +21,36 @@ import numpy as np
     with this program; if not, write to the Free Software Foundation, Inc.,
     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 """
+def csvimport(filepath):
+    r_obj = csv.reader(open(filepath, 'rb'))
+    line_one = r_obj.next()
+    if line_one.__len__()>1:
+        if float(line_one[1]) == 2:
+            
+            version = 2
+        else:
+            print 'version not known:',line_one[1]
+    
+    if version == 2:
+        type =str(r_obj.next()[1])
+        if type == "pt uncorrelated":
+            Resolution = float(r_obj.next()[1])
+            chanArr = []
+            trueTimeArr = []
+            dTimeArr = []
+            line = r_obj.next()
+            while  line[0] != 'end':
+
+                chanArr.append(int(line[0]))
+                trueTimeArr.append(float(line[1]))
+                dTimeArr.append(int(line[2]))
+                line = r_obj.next()
+            return np.array(chanArr), np.array(trueTimeArr), 
np.array(dTimeArr), Resolution
+        else:
+            print 'type not recognised'
+            return None, None,None,None
+
+    
 
 def pt3import(filepath):
     """The file import for the .pt3 file"""
@@ -181,6 +212,7 @@ def pt3import(filepath):
         truetime = (truensync * syncperiod) + (dtime*Resolution);
         trueTimeArr[b] = truetime
         dTimeArr[b] = dtime
+        
         #f1.write(str(truensync)+" "+str(truetime)+"\n")
     f.close();
     #f1.close();
diff --git a/pycorrfit/readfiles/util.py b/pycorrfit/readfiles/util.py
new file mode 100644
index 0000000..9bc6c3e
--- /dev/null
+++ b/pycorrfit/readfiles/util.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+"""
+utility functions for reading data
+"""
+from __future__ import division
+
+import numpy as np
+
+def downsample_trace(trace, bestlength=500):
+    """
+    Reduces the length of a trace so that there is no undersampling on a
+    regular computer screen and the data size is not too large.
+    
+    Downsampling is performed by averaging neighboring intensity values
+    for two time bins and omitting the first time bin.
+    """
+    # The trace is too big. Wee need to bin it.
+    if len(trace) >= bestlength:
+        # We want about 500 bins
+        # We need to sum over intervals of length *teiler*
+        teiler = int(np.floor(len(trace)/bestlength))
+        newlength = int(np.floor(len(trace)/teiler))
+        newsignal = np.zeros(newlength)
+        # Simultaneously sum over all intervals
+        for j in np.arange(teiler):
+            newsignal = \
+                 newsignal+trace[j:newlength*teiler:teiler][:,1]
+        newsignal = 1.* newsignal / teiler
+        newtimes = trace[teiler-1:newlength*teiler:teiler][:,0]
+        if len(trace)%teiler != 0:
+            # We have a rest signal
+            # We average it and add it to the trace
+            rest = trace[newlength*teiler:][:,1]
+            lrest = len(rest)
+            rest = np.array([sum(rest)/lrest])
+            newsignal = np.concatenate((newsignal, rest),
+                                       axis=0)
+            timerest = np.array([trace[-1][0]])
+            newtimes = np.concatenate((newtimes, timerest),
+                                      axis=0)
+        newtrace=np.zeros((len(newtimes),2))
+        newtrace[:,0] = newtimes
+        newtrace[:,1] = newsignal
+    else:
+        # Declare newtrace -
+        # otherwise we have a problem down three lines ;)
+        newtrace = trace
+    return newtrace
\ No newline at end of file
diff --git a/pycorrfit/tools/parmrange.py b/pycorrfit/tools/parmrange.py
index c1d45d3..d0df4f1 100644
--- a/pycorrfit/tools/parmrange.py
+++ b/pycorrfit/tools/parmrange.py
@@ -8,8 +8,10 @@ Select the range in which the parameter should reside for 
fitting.
 
 
 import wx
+from wx.lib.agw import floatspin
 import numpy as np
 
+
 from .. import edclasses  # edited floatspin
 from .. import models as mdls
 
@@ -55,8 +57,8 @@ class RangeSelector(wx.Frame):
         # = wx.BoxSizer(wx.VERTICAL)
         self.WXboxsizer = wx.FlexGridSizer(rows=len(labels), cols=4, vgap=5, 
hgap=5)
         for i in range(len(labels)):
-            left = edclasses.FloatSpin(self.panel, digits=7, increment=.1)
-            right = edclasses.FloatSpin(self.panel, digits=7, increment=.1)
+            left = floatspin.FloatSpin(self.panel, digits=7, increment=.1)
+            right = floatspin.FloatSpin(self.panel, digits=7, increment=.1)
             left.SetValue(self.parameter_range[i][0])
             right.SetValue(self.parameter_range[i][1])
             left.Bind(wx.EVT_SPINCTRL, self.OnSetParmRange)
diff --git a/pycorrfit/tools/simulation.py b/pycorrfit/tools/simulation.py
index 79c75f9..b6c37ca 100644
--- a/pycorrfit/tools/simulation.py
+++ b/pycorrfit/tools/simulation.py
@@ -9,9 +9,9 @@ Might be useful for better understanding model functions.
 
 
 import wx
+from wx.lib.agw import floatspin
 import numpy as np
 
-from .. import edclasses  # edited floatspin
 from .. import models as mdls
 
 # Menu entry name
@@ -76,45 +76,39 @@ class Slide(wx.Frame):
         slidesizer = wx.FlexGridSizer(rows=3, cols=5, vgap=5, hgap=5)
         self.textstartA = wx.StaticText(self.panel, label=self.labelA)
         slidesizer.Add(self.textstartA)
-        self.startspinA = edclasses.FloatSpin(self.panel, digits=7,
-                                            increment=.1)
+        self.startspinA = floatspin.FloatSpin(self.panel, digits=7)
         slidesizer.Add(self.startspinA)
         self.sliderA = wx.Slider(self.panel, -1, self.slidestart, 0,
                                  self.slidemax, wx.DefaultPosition, (250, -1),
                                  wx.SL_HORIZONTAL)
         slidesizer.Add(self.sliderA)
-        self.endspinA = edclasses.FloatSpin(self.panel, digits=7,
-                                            increment=.1)
+        self.endspinA = floatspin.FloatSpin(self.panel, digits=7)
         slidesizer.Add(self.endspinA)
         self.textvalueA = wx.StaticText(self.panel, label= "%.5e" % 
self.valueA)
         slidesizer.Add(self.textvalueA)
         # Parameter B
         self.textstartB = wx.StaticText(self.panel, label=self.labelB)
         slidesizer.Add(self.textstartB)
-        self.startspinB = edclasses.FloatSpin(self.panel, digits=7,
-                                            increment=.1)
+        self.startspinB = floatspin.FloatSpin(self.panel, digits=7)
         slidesizer.Add(self.startspinB)
         self.sliderB = wx.Slider(self.panel, -1, self.slidestart, 0,
                                  self.slidemax, wx.DefaultPosition, (250, -1),
                                  wx.SL_HORIZONTAL)
         slidesizer.Add(self.sliderB)
-        self.endspinB = edclasses.FloatSpin(self.panel, digits=7,
-                                            increment=.1)
+        self.endspinB = floatspin.FloatSpin(self.panel, digits=7)
         slidesizer.Add(self.endspinB)
         self.textvalueB = wx.StaticText(self.panel, label= "%.5e" % 
self.valueB)
         slidesizer.Add(self.textvalueB)
         # Result of operation
         self.textstartOp = wx.StaticText(self.panel, label=self.labelOp)
         slidesizer.Add(self.textstartOp)
-        self.startspinOp = edclasses.FloatSpin(self.panel, digits=7,
-                                            increment=.1)
+        self.startspinOp = floatspin.FloatSpin(self.panel, digits=7)
         slidesizer.Add(self.startspinOp)
         self.sliderOp = wx.Slider(self.panel, -1, self.slidestart, 0,
                                   self.slidemax, wx.DefaultPosition, (250, -1),
                                   wx.SL_HORIZONTAL)
         slidesizer.Add(self.sliderOp)
-        self.endspinOp = edclasses.FloatSpin(self.panel, digits=7,
-                                        increment=.1)
+        self.endspinOp = floatspin.FloatSpin(self.panel, digits=7)
         slidesizer.Add(self.endspinOp)
         self.textvalueOp = wx.StaticText(self.panel,
                                          label= "%.5e" % self.valueOp)
@@ -182,16 +176,6 @@ class Slide(wx.Frame):
                 return B, C
 
 
-    def Increment(self):
-        # Set the correct increment for each spinctrl
-        self.startspinA.increment()
-        self.startspinB.increment()
-        self.startspinOp.increment()
-        self.endspinA.increment()
-        self.endspinB.increment()
-        self.endspinOp.increment()
-
-
     def FillOpDict(self):
         # Dictionaries: [Calculate C, Calculate B)
         self.opdict["A/B"] = [lambda A,B: A/B, lambda A,C: A/C]
@@ -330,7 +314,6 @@ class Slide(wx.Frame):
                                                  self.valueOp)
         self.textvalueB.SetLabel( "%.5e" % self.valueB)
         self.textvalueOp.SetLabel( "%.5e" % self.valueOp)
-        self.Increment()
         self.SetResult()
         self.OnSize()
 
@@ -442,6 +425,5 @@ class Slide(wx.Frame):
         self.textvalueA.SetLabel( "%.5e" % self.valueA)
         self.textvalueB.SetLabel( "%.5e" % self.valueB)
         self.textvalueOp.SetLabel( "%.5e" % self.valueOp)
-        self.Increment()
         self.SetResult()
 
diff --git a/pycorrfit/tools/trace.py b/pycorrfit/tools/trace.py
index 21ca6f3..51b8cc3 100644
--- a/pycorrfit/tools/trace.py
+++ b/pycorrfit/tools/trace.py
@@ -53,6 +53,7 @@ class ShowTrace(wx.Frame):
 
     def OnDraw(self):
         traces = self.Page.corr.traces
+        self.canvas.SetEnableLegend(True)
         if len(traces) == 1:
             self.trace = 1*traces[0].trace
             # We want to have the trace in [s] here.
@@ -61,7 +62,6 @@ class ShowTrace(wx.Frame):
                     legend='{:.2f}kHz'.format(traces[0].countrate),
                     colour='blue', width=1)
             lines = [line]
-            self.canvas.SetEnableLegend(False)
             xmax = np.max(self.trace[:,0])
             xmin = np.min(self.trace[:,0])
             ymax = np.max(self.trace[:,1])
@@ -73,13 +73,12 @@ class ShowTrace(wx.Frame):
             self.traceb = 1*traces[1].trace
             self.traceb[:,0] = self.traceb[:,0]/1000
             linea = plot.PolyLine(self.tracea,
-                    legend='channel 1\n{:.2f}kHz'.format(traces[0].countrate), 
+                    legend='channel 1 {:.2f}kHz'.format(traces[0].countrate), 
                     colour='blue', width=1)
             lineb = plot.PolyLine(self.traceb, 
-                    legend='channel 2\n{:.2f}kHz'.format(traces[1].countrate), 
+                    legend='channel 2 {:.2f}kHz'.format(traces[1].countrate), 
                     colour='red', width=1)
             lines = [linea, lineb]
-            self.canvas.SetEnableLegend(True)
             xmax = max(np.max(self.tracea[:,0]), np.max(self.traceb[:,0]))
             xmin = min(np.min(self.tracea[:,0]), np.min(self.traceb[:,0]))
             ymax = max(np.max(self.tracea[:,1]), np.max(self.traceb[:,1]))

-- 
Alioth's /usr/local/bin/git-commit-notice on 
/srv/git.debian.org/git/debian-med/pycorrfit.git

_______________________________________________
debian-med-commit mailing list
[email protected]
http://lists.alioth.debian.org/cgi-bin/mailman/listinfo/debian-med-commit

Reply via email to