Revision: 6270 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=6270&view=rev Author: fer_perez Date: 2008-10-19 07:53:47 +0000 (Sun, 19 Oct 2008)
Log Message: ----------- Cleanup and updates to many examples. Added new tool to create skeletons and solutions from a single source script. Modified Paths: -------------- trunk/py4science/examples/bessel.py trunk/py4science/examples/butter_filter.py trunk/py4science/examples/fft_imdenoise.py trunk/py4science/examples/filtilt_demo.py trunk/py4science/examples/fitting.py trunk/py4science/examples/montecarlo_pi.py trunk/py4science/examples/qsort.py trunk/py4science/examples/quad_newton.py trunk/py4science/examples/recarray_demo.py trunk/py4science/examples/trapezoid.py Added Paths: ----------- trunk/py4science/examples/mkskel.py trunk/py4science/examples/skel/fortran_wrap/test_example.py trunk/py4science/examples/skel/fortran_wrap/test_fib.py Modified: trunk/py4science/examples/bessel.py =================================================================== --- trunk/py4science/examples/bessel.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/bessel.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -1,45 +1,46 @@ #!/usr/bin/env python """Plot some Bessel functions of integer order, using Scipy and pylab""" -import scipy.special +from scipy import special -import numpy as N -import scipy as S -import pylab as P +import numpy as np +import matplotlib.pyplot as plt -# shorthand -special = S.special - def jn_asym(n,x): """Asymptotic form of jn(x) for x>>n""" + + #@ The asymptotic formula is: + #@ j_n(x) ~ sqrt(2.0/pi/x)*cos(x-(n*pi/2.0+pi/4.0)) - return S.sqrt(2.0/S.pi/x)*S.cos(x-(n*S.pi/2.0+S.pi/4.0)) + return np.sqrt(2.0/np.pi/x)*np.cos(x-(n*np.pi/2.0+np.pi/4.0)) #@ # build a range of values to plot in -x = N.linspace(0,30,400) +x = np.linspace(0,30,400) # Start by plotting the well-known j0 and j1 -P.figure() -P.plot(x,special.j0(x),label='$J_0$') -P.plot(x,special.j1(x),label='$J_1$') +plt.figure() +plt.plot(x,special.j0(x),label='$J_0$') #@ +plt.plot(x,special.j1(x),label='$J_1$') #@ # Show a higher-order Bessel function n = 5 -P.plot(x,special.jn(n,x),label='$J_%s$' % n) +plt.plot(x,special.jn(n,x),label='$J_%s$' % n) # and compute its asymptotic form (valid for x>>n, where n is the order). We -# must first find the valid range of x where at least x>n: -x_asym = S.compress(x>n,x) -P.plot(x_asym,jn_asym(n,x_asym),label='$J_%s$ (asymptotic)' % n) +# must first find the valid range of x where at least x>n. +#@ Find where x>n and evaluate the asymptotic relation only there +x_asym = x[x>n] #@ +plt.plot(x_asym,jn_asym(n,x_asym),label='$J_%s$ (asymptotic)' % n) #@ # Finish off the plot -P.legend() -P.title('Bessel Functions') +plt.legend() +plt.title('Bessel Functions') # horizontal line at 0 to show x-axis, but after the legend -P.axhline(0) +plt.axhline(0) -# EXERCISE: redo the above, for the asymptotic range 0<x<<n. The asymptotic -# form in this regime is +# Extra credit: redo the above, for the asymptotic range 0<x<<n. The +# asymptotic form in this regime is: +# # J(n,x) = (1/gamma(n+1))(x/2)^n # Now, let's verify numerically the recursion relation @@ -47,17 +48,25 @@ jn = special.jn # just a shorthand # Be careful to check only for x!=0, to avoid divisions by zero -xp = S.compress(x>0.0,x) # positive x +#@ xp contains the positive values of x +xp = x[x>0.0] #@ # construct both sides of the recursion relation, these should be equal +#@ Define j_np1 to hold j_(n+1) evaluated at the points xp j_np1 = jn(n+1,xp) +#@ Define j_np1_rec to express j_(n+1) via a recursion relation, at points xp j_np1_rec = (2.0*n/xp)*jn(n,xp)-jn(n-1,xp) # Now make a nice error plot of the difference, in a new figure -P.figure() -P.semilogy(xp,abs(j_np1-j_np1_rec),'r+-') -P.title('Error in recursion for $J_%s$' % n) -P.grid() +plt.figure() +#@ We now plot the difference between the two formulas above. Note that to +#@ properly display the errors, we want to use a logarithmic y scale. Search +#@ the matplotlib docs for the proper calls. +plt.semilogy(xp,abs(j_np1-j_np1_rec),'r+-') #@ + +plt.title('Error in recursion for $J_%s$' % n) +plt.grid() + # Don't forget a show() call at the end of the script -P.show() +plt.show() Modified: trunk/py4science/examples/butter_filter.py =================================================================== --- trunk/py4science/examples/butter_filter.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/butter_filter.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -1,13 +1,13 @@ -import numpy as n +import numpy as np import scipy.signal as signal -from pylab import figure, show +from matplotlib.pyplot import figure, show dt = 0.01 -t = n.arange(0, 2, dt) -s = n.sin(2*n.pi*t) +t = np.arange(0, 2, dt) +s = np.sin(2*np.pi*t) # sine corrupted wih gaussian white noise -sn = s + 0.1*n.random.randn(len(s)) # noisy sine +sn = s + 0.1*np.random.randn(len(s)) # noisy sine # the nyquist frequency 1/(2dt) is the maximum frequency in a sampled # signal Modified: trunk/py4science/examples/fft_imdenoise.py =================================================================== --- trunk/py4science/examples/fft_imdenoise.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/fft_imdenoise.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -1,62 +1,86 @@ #!/usr/bin/env python -"""Image denoising example using 2-dimensional FFT.""" +"""Simple image denoising example using 2-dimensional FFT.""" import sys import numpy as np from matplotlib import pyplot as plt -def mag_phase(F): - """Return magnitude and phase components of spectrum F.""" - - return (np.absolute(F), np.angle(F)) - def plot_spectrum(F, amplify=1000): """Normalise, amplify and plot an amplitude spectrum.""" - M, Phase = mag_phase(F) - M *= amplify/M.max() - M[M > 1] = 1 - print M.shape, M.dtype - plt.imshow(M, plt.cm.Blues) + # Note: the problem here is that we have a spectrum whose histogram is + # *very* sharply peaked at small values. To get a meaningful display, a + # simple strategy to improve the display quality consists of simply + # amplifying the values in the array and then clipping. + # Compute the magnitude of the input F (call it mag). Then, rescale mag by + # amplify/maximum_of_mag. Numpy arrays can be scaled in-place with ARR *= + # number. For the max of an array, look for its max method. + mag = abs(F) #@ + mag *= amplify/mag.max() #@ + + # Next, clip all values larger than one to one. You can set all elements + # of an array which satisfy a given condition with array indexing syntax: + # ARR[ARR<VALUE] = NEWVALUE, for example. + mag[mag > 1] = 1 #@ + # Display: this one already works, if you did everything right with mag + plt.imshow(mag, plt.cm.Blues) + if __name__ == '__main__': try: # Read in original image, convert to floating point for further # manipulation; imread returns a MxNx4 RGBA image. Since the image is - # grayscale, just extrac the 1st channel - im = plt.imread('data/moonlanding.png').astype(float)[:,:,0] + # grayscale, just extract the 1st channel + #@ Hints: + #@ - use plt.imread() to load the file + #@ - convert to a float array with the .astype() method + #@ - extract all rows, all columns, 0-th plane to get the first + #@ channel + #@ - the resulting array should have 2 dimensions only + im = plt.imread('data/moonlanding.png').astype(float)[:,:,0] #@ + print "Image shape:",im.shape except: print "Could not open image." sys.exit(-1) # Compute the 2d FFT of the input image - F = np.fft.fft2(im) + #@ Hint: Look for a 2-d FFT in np.fft. + #@ Note: call this variable 'F', which is the name we'll be using below. + F = np.fft.fft2(im) #@ - # Now, make a copy of the original spectrum and truncate coefficients. + # In the lines following, we'll make a copy of the original spectrum and + # truncate coefficients. NO immediate code is to be written right here. + + # Define the fraction of coefficients (in each direction) we keep keep_fraction = 0.1 # Call ff a copy of the original transform. Numpy arrays have a copy # method for this purpose. - ff = F.copy() + ff = F.copy() #@ - # Set r and c to be the number of rows and columns of the array. - r,c = ff.shape + # Set r and c to be the number of rows and columns of the array. + #@ Hint: use the array's shape attribute. + r,c = ff.shape #@ # Set to zero all rows with indices between r*keep_fraction and # r*(1-keep_fraction): - ff[r*keep_fraction:r*(1-keep_fraction)] = 0 + ff[r*keep_fraction:r*(1-keep_fraction)] = 0 #@ # Similarly with the columns: - ff[:, c*keep_fraction:c*(1-keep_fraction)] = 0 + ff[:, c*keep_fraction:c*(1-keep_fraction)] = 0 #@ # Reconstruct the denoised image from the filtered spectrum, keep only the - # real part for display - im_new = np.fft.ifft2(ff).real - + # real part for display. + #@ Hint: There's an inverse 2d fft in the np.fft module as well (don't + #@ forget that you only want the real part). + #@ Call the result im_new, + im_new = np.fft.ifft2(ff).real #@ + # Show the results + #@ The code below already works, if you did everything above right. plt.figure() plt.subplot(221) @@ -75,9 +99,6 @@ plt.title('Reconstructed Image') plt.imshow(im_new, plt.cm.gray) - plt.savefig('fft_imdenoise.png', dpi=150) - plt.savefig('fft_imdenoise.eps') - # Adjust the spacing between subplots for readability plt.subplots_adjust(hspace=0.32) plt.show() Modified: trunk/py4science/examples/filtilt_demo.py =================================================================== --- trunk/py4science/examples/filtilt_demo.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/filtilt_demo.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -7,10 +7,11 @@ order. The function also computes the initial filter parameters in order to provide a more stable response (via lfilter_zi). The following code has been tested with Python 2.4.4 and Scipy 0.5.1. +""" -""" -from numpy import vstack, hstack, eye, ones, zeros, linalg, \ -newaxis, r_, flipud, convolve, matrix, array +from numpy import (vstack, hstack, eye, ones, zeros, linalg, + newaxis, r_, flipud, convolve, matrix, array) + from scipy.signal import lfilter def lfilter_zi(b,a): @@ -38,19 +39,18 @@ return array(zi_return) - - def filtfilt(b,a,x): #For now only accepting 1d arrays ntaps=max(len(a),len(b)) edge=ntaps*3 if x.ndim != 1: - raise ValueError, "Filiflit is only accepting 1 dimension arrays." + raise ValueError("Filiflit is only accepting 1 dimension arrays.") - #x must be bigger than edge + # x must be bigger than edge if x.size < edge: - raise ValueError, "Input vector needs to be bigger than 3 * max(len(a),len(b)." + e="Input vector needs to be bigger than 3 * max(len(a),len(b)." + raise ValueError(e) if len(a) < ntaps: a=r_[a,zeros(len(b)-len(a))] Modified: trunk/py4science/examples/fitting.py =================================================================== --- trunk/py4science/examples/fitting.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/fitting.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -7,10 +7,11 @@ from scipy.optimize import leastsq from scipy.interpolate import splrep,splev -import numpy as N -import scipy as S -import pylab as P +import numpy as np +import matplotlib as mpl +import matplotlib.pyplot as plt + def func(pars): a, alpha, k = pars return a*exp(alpha*x_vals) + k @@ -35,7 +36,7 @@ print 'Least-squares fit to the data' print 'true', pars_true print 'best', best -print '|err|_l2 =',P.l2norm(pars_true-best) +print '|err|_l2 =',np.linalg.norm(pars_true-best) # scipy's splrep uses FITPACK's curfit (B-spline interpolation) print @@ -48,27 +49,27 @@ def plot_polyfit(x,y,n,fignum=None): """ """ if fignum is None: - fignum = P.figure().number - P.plot(x,y,label='Data') + fignum = plt.figure().number + plt.plot(x,y,label='Data') - fit_coefs = N.polyfit(x,y,n) - fit_val = N.polyval(fit_coefs,x) - P.plot(x,fit_val,label='Polynomial fit, $n=%d$' % n) - P.legend() + fit_coefs = np.polyfit(x,y,n) + fit_val = np.polyval(fit_coefs,x) + plt.plot(x,fit_val,label='Polynomial fit, $n=%d$' % n) + plt.legend() return fignum # Now use pylab to plot -P.figure() -P.plot(x_vals,y_noisy,label='Noisy data') -P.plot(x_vals,func(best),lw=2,label='Least-squares fit') -P.legend() -P.figure() -P.plot(x_vals,y_noisy,label='Noisy data') -P.plot(x_vals,smooth,lw=2,label='Spline-smoothing') -P.legend() +plt.figure() +plt.plot(x_vals,y_noisy,label='Noisy data') +plt.plot(x_vals,func(best),lw=2,label='Least-squares fit') +plt.legend() +plt.figure() +plt.plot(x_vals,y_noisy,label='Noisy data') +plt.plot(x_vals,smooth,lw=2,label='Spline-smoothing') +plt.legend() fignum = plot_polyfit(x_vals,y_noisy,1) plot_polyfit(x_vals,y_noisy,2,fignum) plot_polyfit(x_vals,y_noisy,3,fignum) -P.show() +plt.show() Added: trunk/py4science/examples/mkskel.py =================================================================== --- trunk/py4science/examples/mkskel.py (rev 0) +++ trunk/py4science/examples/mkskel.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -0,0 +1,325 @@ +#!/usr/bin/env python +"""Make skeletons out of Python scripts. + +Usage: + + mkskel [--test] file1.py file2.py .... + +If --test is given, the test suite is run instead. + +For each input filename f.py, a pair of output files is generated, f_soln.py +and f_skel.py. + +Source markup is very simple. The tests in the file show precisely how it +works, but in summary: + +- Pure comment lines with the special marker (#@) are left in the skeleton + (only the marker is stripped, but they remain as valid comments). These are + typically used for hints. + +- Code lines terminated with the marker are: + + - In the skeleton, replaced by a NotImplementedError call. Consecutive lines + are replaced by a single call. + + - In the solution, kept as is but the marker is removed. +""" + +from __future__ import with_statement + +#----------------------------------------------------------------------------- +# Stdlib imports +import os +import re +import shutil +import sys + +# Third-party imports +import nose + +# Constants +MARKER = '#@' +DEL_RE = re.compile(r'''^((\s*)(.*?))\s*%s\s*$''' % MARKER) +HINT_RE = re.compile(r'''^(?P<space>\s*)%s\s+(?P<hint>.*)$''' % MARKER) + + +#----------------------------------------------------------------------------- +# Main code begins + +def src2soln(src): + """Remove markers from input source, leaving all else intact. + + Inputs: + src : sequence of lines (file-like objects work out of the box) + """ + + out = [] + addline = out.append + for line in src: + # Check for lines to delete and with hints + mdel = DEL_RE.match(line) + mhint = HINT_RE.match(line) + + # All hints are unconditionally removed + if mhint is None: + if mdel: + # if marker is matched in code, strip it and leave the code + line = mdel.group(1)+'\n' + addline(line) + + return ''.join(out) + + +def src2skel(src): + """Remove markers from input source, replacing marked lines. + + Marked lines are replaced with "raise NotImplementedError" calls that + summarize the total number of deleted lines. + + Inputs: + src : sequence of lines (file-like objects work out of the box) + """ + + def flush_buffers(normal_lines,del_lines=0): + """Local function to reuse some common code""" + + if state_cur == normal: + # add the normal lines + out.extend(normal_lines) + normal_lines[:] = [] + else: + # Add the summary of 'raise' lines + + # flush counter of code (disabled, we report static summary) + #msg = '1 line' if del_lines==1 else ('%s lines' % del_lines) + #exc = exc_tpl % msg + exc = exc_tpl + + # Use the last value of 'space' + line = '%s%s' % (spaces[0],exc) + out.append(line) + del_lines = 0 + spaces[:] = [] + + return del_lines + + # used to report actual # of lines removed - disabled + #exc_tpl = "raise NotImplementedError('%s missing')\n" + exc_tpl = "raise NotImplementedError('insert missing code here')\n" + + # states for state machine and other initialization + normal,delete = 0,1 + state_cur = normal + del_lines = 0 # counter, in case we want to report # of deletions + spaces = [] + normal_lines = [] + out = [] + + # To remove multiple consecutive lines of input marked for deletion, we + # need a small state machine. + for line in src: + # Check for lines to delete and with hints + mdel = DEL_RE.match(line) + mhint = HINT_RE.match(line) + + if mhint: + state_new = normal + hint = mhint.group('space')+'# ' + mhint.group('hint') +'\n' + normal_lines.append(hint) + else: + if mdel is None: + state_new = normal + normal_lines.append(line) + else: + state_new = delete + del_lines += 1 + spaces.append(mdel.group(2)) + + # Flush output only when there's a change of state + if state_new != state_cur: + del_lines = flush_buffers(normal_lines,del_lines) + + # Update state machine + state_cur = state_new + + # Final buffer flush is unconditional + flush_buffers(normal_lines) + + return ''.join(out) + + +def transform_file(fpath,fname_skel,fname_soln): + """Run the cleanup routines for a given input, creating skel and soln. + """ + + # get the mode of the input so that we can create the output files with the + # same mode + fmode = os.stat(fpath).st_mode + + with open(fpath) as infile: + # Generate the skeleton + skel = src2skel(infile) + with open(fname_skel,'w') as fskel: + fskel.write(skel) + os.chmod(fname_skel,fmode) + + # Reset the input file pointer and generate the solution + infile.seek(0) + soln = src2soln(infile) + with open(fname_soln,'w') as fsoln: + fsoln.write(soln) + os.chmod(fname_soln,fmode) + +#----------------------------------------------------------------------------- +# Main execution routines + +def copyforce(src,dest): + """Forceful file link/copy that overwrites destination files.""" + try: + copy = os.link + except AttributeError: + copy = shutil.copy + if os.path.isfile(dest): + os.remove(dest) + copy(src,dest) + + +def mvforce(src,dest): + """Forceful file copy that overwrites destination files.""" + if os.path.isfile(dest): + os.remove(dest) + shutil.move(src,dest) + + +def main(argv=None): + """Main entry point as a command line script for normal execution""" + + if argv is None: + argv = sys.argv + + # If there are subdirs called skel and soln, we populate them by moving the + # generated files there, otherwise they're left in the current dir. + skel_dir = 'skel' + soln_dir = 'soln' + has_skel_dir = os.path.isdir(skel_dir) + has_soln_dir = os.path.isdir(soln_dir) + + # First, check that all files are present and abort immediately if any of + # them isn't there. + for fpath in argv[1:]: + if not os.path.isfile(fpath): + raise OSError("file %r not found" % fpath) + + # If all files are there, then go ahead and process them unconditionally + for fpath in argv[1:]: + basename, ext = os.path.splitext(fpath) + fname_skel = basename + '_skel' + ext + fname_soln = basename + '_soln' + ext + transform_file(fpath,fname_skel,fname_soln) + # Move files over to final dirs if present + if has_skel_dir: + mvforce(fname_skel,os.path.join(skel_dir,fname_skel)) + if has_soln_dir: + mvforce(fname_soln,os.path.join(soln_dir,fname_soln)) + +#----------------------------------------------------------------------------- +# Tests + +def str_match(s1,s2): + """Check that two strings are equal ignoring trailing whitespace.""" + #print '***S1\n',s1,'\n***S2\n',s2 # dbg + nose.tools.assert_equal(s1.rstrip(),s2.rstrip()) + + +def test_simple(): + src = """ + first line + del line #@ + second line + """ + srclines = src.splitlines(True) + + clean = """ + first line + raise NotImplementedError('insert missing code here') + second line + """ + + cleaned = src2skel(srclines) + yield str_match,cleaned,clean + + clean = """ + first line + del line + second line + """ + cleaned = src2soln(src.splitlines(True)) + yield str_match,cleaned,clean + + +def test_multi(): + src = """ + first line + #@ Hint: remember that + #@ idea we discussed before... + del line #@ + del line2 #@ + del line3 #@ + second line: + del line4 #@ + del line5 #@ + third line + + some indented code: #@ + with more... #@ + """ + srclines = src.splitlines(True) + + clean = """ + first line + # Hint: remember that + # idea we discussed before... + raise NotImplementedError('insert missing code here') + second line: + raise NotImplementedError('insert missing code here') + third line + + raise NotImplementedError('insert missing code here') + """ + cleaned = src2skel(srclines) + yield str_match,cleaned,clean + + clean = """ + first line + del line + del line2 + del line3 + second line: + del line4 + del line5 + third line + + some indented code: + with more... + """ + cleaned = src2soln(srclines) + yield str_match,cleaned,clean + + [EMAIL PROTECTED] +def test(): + """Simple self-contained test runner.""" + nose.runmodule(__name__,exit=False, + argv=['--doctests', + #'-s', + #'--pdb-failures', + ]) + +#----------------------------------------------------------------------------- +# Execution from the command line. + +if __name__ == "__main__": + if '--test' in sys.argv: + test() + else: + main(sys.argv) Property changes on: trunk/py4science/examples/mkskel.py ___________________________________________________________________ Added: svn:executable + * Modified: trunk/py4science/examples/montecarlo_pi.py =================================================================== --- trunk/py4science/examples/montecarlo_pi.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/montecarlo_pi.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -15,7 +15,7 @@ import math import random -import numpy as N +import numpy as np from scipy import weave from scipy.weave import inline,converters @@ -110,7 +110,7 @@ print 'pi - weave :',v2() # make a simple 10x10 array - a = N.arange(100) + a = np.arange(100) a.shape = 10,10 # Print it using our printer Modified: trunk/py4science/examples/qsort.py =================================================================== --- trunk/py4science/examples/qsort.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/qsort.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -1,29 +1,60 @@ -""" -Simple quicksort implementation. +"""Simple quicksort implementation. -See http://en.wikipedia.org/wiki/Quicksort for algorithm, pseudocode -and C implementation for comparison +From http://en.wikipedia.org/wiki/Quicksort we have this pseudocode (see also +the C implementation for comparison). + +Note: what follows is NOT python code, it's meant to only illustrate the +algorithm for you. Below you'll need to actually implement it in real Python. +You may be surprised at how close a working Python implementation can be to +this pseudocode. + + +function quicksort(array) + var list less, greater + if length(array) <= 1 + return array + select and remove a pivot value pivot from array + for each x in array + if x <= pivot then append x to less + else append x to greater + return concatenate(quicksort(less), pivot, quicksort(greater)) """ def qsort(lst): - """Return a sorted copy of the input list.""" + """Return a sorted copy of the input list. - if len(lst) <= 1: - return lst + Input: - # Select pivot and apply recursively - pivot, rest = lst[0],lst[1:] - less_than = [ lt for lt in rest if lt < pivot ] - greater_equal = [ ge for ge in rest if ge >= pivot ] - - return qsort(less_than) + [pivot] + qsort(greater_equal) + lst : a list of elements which can be compared. + Examples: + >>> qsort([]) + [] + + >>> qsort([3,2,5]) + [2, 3, 5] + """ + + #@ Hint: remember that all recursive functions need an exit condition + if len(lst) <= 1: #@ + return lst #@ + + #@ Select pivot and apply recursively + pivot, rest = lst[0],lst[1:] #@ + less_than = [ lt for lt in rest if lt < pivot ] #@ + greater_equal = [ ge for ge in rest if ge >= pivot ] #@ + + #@ Upon return, make sure to properly concatenate the output lists + return qsort(less_than) + [pivot] + qsort(greater_equal) #@ + + #----------------------------------------------------------------------------- # Tests #----------------------------------------------------------------------------- import random +import nose import nose, nose.tools as nt def test_sorted(): Modified: trunk/py4science/examples/quad_newton.py =================================================================== --- trunk/py4science/examples/quad_newton.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/quad_newton.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -4,35 +4,35 @@ from math import sin -import scipy, scipy.integrate, scipy.optimize +from scipy.integrate import quad +from scipy.optimize import newton -quad = scipy.integrate.quad -newton = scipy.optimize.newton - # test input function def f(t): - return t*(sin(t))**2 + #@ f(t): t * sin^2(t) + return t*(sin(t))**2 #@ -# exact \int_0^t f(s) ds - u def g(t): + "Exact form for g by integrating f(t)" u = 0.25 return .25*(t**2-t*sin(2*t)+(sin(t))**2)-u -# now let's construct g(t) via numerical integration def gn(t): + "g(t) obtained by numerical integration" u = 0.25 - return quad(f,0.0,t)[0] - u + #@ Hint: use quad, see its return value carefully. + return quad(f,0.0,t)[0] - u #@ # main tguess = 10.0 print '"Exact" solution (knowing the analytical form of the integral)' -t0 = newton(g,tguess,f) +t0 = newton(g,tguess,f) #@ print "t0, g(t0) =",t0,g(t0) print print "Solution using the numerical integration technique" -t1 = newton(gn,tguess,f) +t1 = newton(gn,tguess,f) #@ print "t1, g(t1) =",t1,g(t1) print Modified: trunk/py4science/examples/recarray_demo.py =================================================================== --- trunk/py4science/examples/recarray_demo.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/recarray_demo.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -1,67 +1,53 @@ """ parse and load ge.csv into a record array """ -import time, datetime, csv -import dateutil.parser -import numpy -if 0: - # create a csv reader to parse the file - fh = file('data/ge.csv') - reader = csv.reader(fh) - header = reader.next() +import datetime - # iterate over the remaining rows and convert the data to date - # objects, ints, or floats as approriate - rows = [] - for date, open, high, low, close, volume, adjclose in reader: - date = dateutil.parser.parse(date).date() - volume = int(volume) - open, high, low, close, adjclose = map(float, ( - open, high, low, close, adjclose)) - rows.append((date, open, high, low, close, volume, adjclose)) +import numpy as np - fh.close() +import matplotlib +from matplotlib import pyplot as plt - # this is how you can use the function matplotlib.mlab.load to do the same - #rows = load('data/ge.csv', skiprows=1, converters={0:datestr2num}, delimiter=',') +# Disable latex support just in case, so we can rotate text +matplotlib.rcParams['text.usetex'] = False - r = numpy.rec.fromrecords(rows, names='date,open,high,low,close,volume,adjclose') +# Load a data file into a record array using the matplotlib csv2rec utility +r = matplotlib.mlab.csv2rec('data/ge.csv') +r.sort() #sort by date, the first column # compute the average approximate dollars traded over the last 10 days # hint: closing price * volume trades approx equals dollars trades recent = r[-10:] -dollars = numpy.mean(recent.close * recent.volume) -print '$%1.2fM'%(dollars/1e6) +dollars = np.mean(recent.close * recent.volume) +print 'Total traded over last 10 days: $%1.2fM'%(dollars/1e6) -# plot the adjusted closing price vs time since 2003 - hint, you must -# use date2num to convert the date to a float for mpl. Make two axes, -# one for price and one for volume. Use a bar chart for volume -import matplotlib -matplotlib.rcParams['usetex'] = False -from matplotlib.dates import date2num -import pylab -mask = r.date>datetime.date(2003,1,1) -date = date2num(r.date[mask]) -price = r.adjclose[mask] +# plot the adjusted closing price vs time since 2003. Make two axes, one for +# price and one for volume. Use a bar chart for volume + +# Record arrays are like 'mini-spreadsheets' +mask = r.date > datetime.date(2003,1,1) +date = r.date[mask] +price = r.adj_close[mask] volume = r.volume[mask] -fig = pylab.figure() +# Plotting +fig = plt.figure() fig.subplots_adjust(hspace=0) ax1 = fig.add_subplot(211) -ax1.plot(date, price, '-'); -ax1.xaxis_date() +ax1.plot(date, price, '-') + ax1.grid() for label in ax1.get_xticklabels(): label.set_visible(False) ax2 = fig.add_subplot(212, sharex=ax1) ax2.bar(date, volume); -ax2.xaxis_date() + ax2.grid() for label in ax2.get_xticklabels(): label.set_rotation(30) label.set_horizontalalignment('right') - -pylab.show() +fig.autofmt_xdate() +plt.show() Added: trunk/py4science/examples/skel/fortran_wrap/test_example.py =================================================================== --- trunk/py4science/examples/skel/fortran_wrap/test_example.py (rev 0) +++ trunk/py4science/examples/skel/fortran_wrap/test_example.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -0,0 +1,9 @@ +import example +import numpy + +x = numpy.arange(10.) + +yf = example.cumsum(x) +yn = numpy.cumsum(x) + +numpy.testing.assert_array_almost_equal(yf, yn) Added: trunk/py4science/examples/skel/fortran_wrap/test_fib.py =================================================================== --- trunk/py4science/examples/skel/fortran_wrap/test_fib.py (rev 0) +++ trunk/py4science/examples/skel/fortran_wrap/test_fib.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -0,0 +1,13 @@ +import numpy as N + +import example + +n = 10 +fn = example.fib(n) + +print 'Fibonacci numbers:' +print fn + +# Check validity +assert N.alltrue(fn[2:]== fn[1:-1]+fn[:-2]), "Fibonacci mismatch" + Modified: trunk/py4science/examples/trapezoid.py =================================================================== --- trunk/py4science/examples/trapezoid.py 2008-10-19 07:48:51 UTC (rev 6269) +++ trunk/py4science/examples/trapezoid.py 2008-10-19 07:53:47 UTC (rev 6270) @@ -7,7 +7,8 @@ """Simple trapezoid integrator for sequence-based innput. Inputs: - - x,y: arrays of the same length. + - x,y: arrays of the same length (and more than one element). If the two + inputs have different lengths, a ValueError exception is raised. Output: - The result of applying the trapezoid rule to the input, assuming that @@ -15,14 +16,20 @@ Minimally modified from matplotlib.mlab.""" - # Sanity checks - if len(x)!=len(y): - raise ValueError('x and y must have the same length') - if len(x)<2: - raise ValueError('x and y must have > 1 element') + # Sanity checks. + #@ + #@ Hint: if the two inputs have mismatched lengths or less than 2 + #@ elements, we raise ValueError with an explanatory message. + if len(x) != len(y): #@ + raise ValueError('x and y must have the same length') #@ + if len(x) < 2: #@ + raise ValueError('x and y must have > 1 element') #@ # Efficient application of trapezoid rule via numpy - return 0.5*((x[1:]-x[:-1])*(y[1:]+y[:-1])).sum() + #@ + #@ Hint: think of using numpy slicing to compute the moving difference in + #@ the basic trapezoid formula. + return 0.5*((x[1:]-x[:-1])*(y[1:]+y[:-1])).sum() #@ def trapzf(f,a,b,npts=100): """Simple trapezoid-based integrator. @@ -39,15 +46,25 @@ Output: - The value of the trapezoid-rule approximation to the integral.""" - # Generate an equally spaced grid to sample the function at - x = np.linspace(a,b,npts) + #@ Hint: you will need to apply the function f to easch element of the + #@ vector x. What are several ways to do this? Can you profile them to see + #@ what differences in timings result for long vectors x? + + # Generate an equally spaced grid to sample the function. + x = np.linspace(a,b,npts)#@ + # For an equispaced grid, the x spacing can just be read off from the first # two points and factored out of the summation. - dx = x[1]-x[0] + dx = x[1]-x[0]#@ + # Sample the input function at all values of x - y = np.array(map(f,x)) + #@ + #@ Hint: you need to make an array out of the evaluations, and the python + #@ builtin 'map' function can come in handy. + y = np.array(map(f,x))#@ + # Compute the trapezoid rule sum for the final result - return 0.5*dx*(y[1:]+y[:-1]).sum() + return 0.5*dx*(y[1:]+y[:-1]).sum()#@ #----------------------------------------------------------------------------- @@ -56,20 +73,25 @@ import nose, nose.tools as nt import numpy.testing as nptest +# A simple function for testing def square(x): return x**2 def test_err(): + """Test that mismatched inputs raise a ValueError exception.""" nt.assert_raises(ValueError,trapz,range(2),range(3)) def test_call(): + "Test a direct call with equally spaced samples. " x = np.linspace(0,1,100) y = np.array(map(square,x)) nptest.assert_almost_equal(trapz(x,y),1./3,4) def test_square(): + "Test integrating the square() function." nptest.assert_almost_equal(trapzf(square,0,1),1./3,4) def test_square2(): + "Another test integrating the square() function." nptest.assert_almost_equal(trapzf(square,0,3,350),9.0,4) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. ------------------------------------------------------------------------- This SF.Net email is sponsored by the Moblin Your Move Developer's challenge Build the coolest Linux based applications with Moblin SDK & win great prizes Grand prize is a trip for two to an Open Source event anywhere in the world http://moblin-contest.org/redirect.php?banner_id=100&url=/ _______________________________________________ Matplotlib-checkins mailing list Matplotlib-checkins@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/matplotlib-checkins