%pylab inline
import Image
import re
import FPI
import glob
from lmfit import Parameters
import logging
from optparse import OptionParser
import MySQLdb as mdb
import datetime
import numpy as np
import os
import BoltwoodSensor
import X300Sensor
import FPIDisplay
import pytz
import datetime
import fpiinfo
from FPI import *
# Make default figure size bigger
matplotlib.rcParams['savefig.dpi'] = 150
matplotlib.rcParams['figure.figsize'] = (4,3)
SIGMA_V_THRESH = 30.
Populating the interactive namespace from numpy and matplotlib
/usr/lib/pymodules/python2.7/matplotlib/__init__.py:1173: UserWarning: This call to matplotlib.use() has no effect because the backend has already been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time. warnings.warn(_use_error_msg)
#fn = '/home/bhardin2/Laser_20150209_2147_3sec.tif'
fn = '/rdata/airglow/fpi/minime05/uao/2015/20150214/laser_X1.tif'
d = Image.open(fn)
img = array(d)
imshow(img, vmin = prctile(img,1), vmax = prctile(img,99))
colorbar();
instrument = fpiinfo.get_instr_info('minime05')
ESTIMATE_BLUR = True
# Initialize values
output = []
last_chi = -999
laser_times = []
laser_redchi = []
last_t = instrument['nominal_t']
lam_laser = instrument['lam_laser']
cx,cy = FPI.FindCenter(img)
N = 512
N0 = 0
N1 = N
annuli = FPI.FindEqualAreas(img,cx,cy,N)
# Perform annular summation
laser_spectra, laser_sigma = AnnularSum(img,annuli,0)
if not np.isnan(laser_spectra).any(): # Do the inversion. Otherwise, ignore it.
####### Find good initial guesses for the parameters ######
# Magnification parameter by using geometry (assuming square binning)
alpha = instrument['pix_size']/instrument['focal_length'] * 2
# Intensity and background by looking at fringes
I = laser_spectra.max() - laser_spectra.min()
B = laser_spectra.min()
laser_params = Parameters()
laser_params.add('n', value = 1.0, vary = False)
laser_params.add('t', value = None, vary = False)
laser_params.add('lam', value = lam_laser, vary = False)
laser_params.add('R', value = 0.5, vary = False)
laser_params.add('alpha', value = alpha, vary = False)
laser_params.add('I', value = I, vary = False)
laser_params.add('B', value = B, vary = False)
laser_params.add('a1', value = -0.1, min=-5.0, max=5.0, vary = False)
laser_params.add('a2', value = 0.0, min=-5.0, max=5.0, vary = False)
laser_params.add('b0', value = 0.5, min=-10.0, max=10.0, vary = False)
laser_params.add('b1', value = 0.0, min=-10.0, max=10.0, vary = False)
laser_params.add('b2', value = 0.0, min=-10.0, max=10.0, vary = False)
# To find a good initial guess for "t", we'll need to do a grid search. Search
# over 1 FSR around the last solved-for value (or the nominal value, if this is first trial).
# TODO: make this grid search a separate general function.
def goodness(t):
# return the correlation between the fringe with this t and the data
laser_params['t'].value = t
fringe = Laser_FringeModel(laser_params, annuli['r'])
return np.dot(fringe, laser_spectra)/(norm(fringe)*norm(laser_spectra))
t_candidates = linspace(last_t - lam_laser/4, last_t + lam_laser/4, 50)
corrvec = np.array([goodness(t) for t in t_candidates])
best_t = t_candidates[corrvec.argmax()]
laser_params['t'].value = best_t
####### Inversion of laser image ##########
# Now do least-squares fit, but in stages, varying only certain parameters at a time, according to:
order = [
['alpha'],\
['t','alpha'],\
['B','I'],\
['R'],\
['t','alpha','B','I','R','a1','a2'], \
]
if ESTIMATE_BLUR:
order.append(['t','alpha','B','I','R','a1','a2','b0','b1','b2'])
for group in order: # set all the params in this group to "vary=True", and then run inversion
for param in laser_params.keys():
if param in group:
laser_params[param].vary = True
else:
laser_params[param].vary = False
# Set falloff terms to false, if this instrument only has a couple fringes
if not instrument['many_fringes']:
for param in ['a2', 'b1', 'b2']:
laser_params[param].vary=False
laser_params[param].value=0.0
laser_fit = Minimizer(Laser_Residual,laser_params, \
fcn_args=(annuli['r'][N0:N1],), fcn_kws={'data': laser_spectra[N0:N1], 'sigma': laser_sigma[N0:N1]}, \
scale_covar = True)
laser_fit.leastsq()
output.append(laser_fit)
i = 0
# Make default figure size shorter and fatter
figure(figsize=(8,3))
subplot(2,1,1)
M0 = 0
M1 = N1-N0
spectra = laser_spectra[N0:N1]
sigma = laser_sigma[N0:N1]
model = spectra + sigma*laser_fit.residual
resid = spectra-model
plot(spectra[M0:M1],'k.', label='data')
plot(model[M0:M1],'r',label='model')
xlabel('Radial Bin')
ylabel('Detector Counts')
legend(loc='best', prop={'size':8})
subplots_adjust(bottom=0.25) # or else the xlabel will be cut off, for some reason
#title(lasers[i])
print(fn)
figure(figsize=(8,1.5))
subplot(2,1,2)
plot(resid,'k.', label='residual')
xlabel('Radial Bin')
ylabel('Detector Counts')
legend(loc='best', prop={'size':8})
subplots_adjust(bottom=0.0) # or else the xlabel will be cut off, for some reason
#fl = instrument['pix_size']*d.info['XBinning']/o.params['alpha'].value
#print fl
/rdata/airglow/fpi/minime05/uao/2015/20150214/laser_X1.tif
print '%.2f m/s error from laser calibration' % (laser_params['t'].stderr*20/1e-9)
2.98 m/s error from laser calibration
for param in laser_params:
vals = [o.params[param].value for o in output]
print '%06s: %.3e' % (param, median(vals))
n: 1.000e+00 t: 1.500e-02 lam: 6.328e-07 R: 8.720e-01 alpha: 8.849e-05 I: 2.045e+04 B: 6.819e+02 a1: -4.465e-02 a2: -5.884e-03 b0: 9.349e-01 b1: 2.351e-01 b2: -3.389e-02
######### Spline fit to the parameters ###########
# Grab the seconds from the first laser observation (required for spline interpolation)
dt = []
for x in laser_times:
diff = (x - laser_times[0])
dt.append(diff.seconds+diff.days*86400.)
dt = array(dt)
# Spline fit to the parameters.
# (This will work for both the fitted and constant params)
for param in laser_fit.params:
# Grab the estimates and the associated (normalized) uncertanty
p = []
ep = []
for o in output:
p.append(o.params[param].value)
ep.append(o.params[param].stderr)
p = np.array(p)
ep = np.array(ep)
m = len(ep)
s = m # Expected value of chi^2
if param == 't': # force 't' to fit more smoothly
# i.e., we don't trust those small error bars, so make them bigger
ep = ep * 3 # TODO: optimal value? 1 seems too small, 10 seems too big
# set weights
if laser_fit.params[param].vary:
# use the error bars from the inversion, but
# replace the values that are infinite,
# (i.e., where the errorbars couldn't be calculated)
# with very large numbers.
w = [1/stderr if stderr > 0 else 1/(1e6*paramval) for paramval,stderr in zip(p,ep) ]
else:
# use arbitrary error bars (doesn't matter - it will fit exactly)
w = np.ones(shape(ep))
# Try spline, then linear, then zeroth-order interpolation
try: # spline interpolation
#sfit = interpolate.UnivariateSpline(np.array(dt),p, w=w, s=s)
sfit = interpolate.interp1d(np.array(dt), p)
except: # Try linear.
notify_the_humans = True
try: # if there's only one laser image, then even linear won't work.
print 'Spline interpolation failed for laser param "%s". Defaulting to linear interpolation.\n' % (param)
sfit = interpolate.interp1d(np.array(dt), p)
except: # all else failed. There's probably only one laser image.
print 'Spline and linear interpolation both failed for laser param ' + \
'"%s". Defaulting to zeroth-order interpolation.\n' % (param)
sfit = lambda t: p[0] # whatever the input, just return the parameter.
laser_fit.params[param].spfit = sfit
Spline interpolation failed for laser param "n". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "n". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "t". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "t". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "lam". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "lam". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "R". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "R". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "alpha". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "alpha". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "I". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "I". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "B". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "B". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "a1". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "a1". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "a2". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "a2". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "b0". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "b0". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "b1". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "b1". Defaulting to zeroth-order interpolation. Spline interpolation failed for laser param "b2". Defaulting to linear interpolation. Spline and linear interpolation both failed for laser param "b2". Defaulting to zeroth-order interpolation.
#fn = '/rdata/airglow/fpi/minime05/uao/2015/20150207/UAO_L_20150208_100608_138.img'
fn = '/rdata/airglow/fpi/minime05/uao/2015/20150214/180sec_X1.tif'
d = Image.open(fn)
img = array(d)
imshow(img, vmin = prctile(img,2), vmax = prctile(img,98))
colorbar()
<matplotlib.colorbar.Colorbar instance at 0x7f6fbbca7368>
#fn = '/rdata/airglow/fpi/minime05/uao/2015/20150207/UAO_L_20150208_100608_138.img'
fn = '/rdata/airglow/fpi/minime05/uao/2015/20150214/10sec_X1.tif'
d = Image.open(fn)
img = array(d)
imshow(img, vmin = prctile(img,2), vmax = prctile(img,98))
colorbar()
<matplotlib.colorbar.Colorbar instance at 0x7f6fbb90cdd0>
#fn = '/rdata/airglow/fpi/minime05/uao/2015/20150207/UAO_L_20150208_100608_138.img'
fn = '/rdata/airglow/fpi/minime05/uao/2015/20150214/2sec_X1.tif'
d = Image.open(fn)
img = array(d)
imshow(img, vmin = prctile(img,2), vmax = prctile(img,98))
colorbar()
<matplotlib.colorbar.Colorbar instance at 0x7f6fba09ca70>
sky = glob.glob('/rdata/airglow/fpi/minime05/uao/2015/20150214/[0-9]*.tif')
sky.sort()
sky = sky[:]
sky
['/rdata/airglow/fpi/minime05/uao/2015/20150214/10sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/15sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/180sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/1sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/20sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/25sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/2sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/30sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/3sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/40sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/4sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/50sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/5sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/60sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/7.5sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/75sec_X1.tif', '/rdata/airglow/fpi/minime05/uao/2015/20150214/90sec_X1.tif']
def ReadIMG(fname): # Override FPI behavior
d = Image.open(fname)
return d
# Constants
c = 299792458.
k = 1.3806503e-23
m = 16/6.0221367e26
# Initialize data holder
sky_times = []
az = []
ze = []
LOSwind = []
sigma_LOSwind = []
T = []
sigma_T = []
direction = []
sky_redchi = []
skyI = []
skyB = []
ccdB = []
sigma_skyI = []
sigma_skyB = []
sigma_ccdB = []
sky_out = []
sky_intT = []
sky_temperature = []
lam0 = instrument['lam0']
last_lamc = lam0 # nominal value of line center
lamc_start_set = False # Turns true when a reasonable search interval center is found
my_params = dict()
for i, fname in enumerate(sky):
# Read in the sky image
d = ReadIMG(fname)
img = np.asarray(d)
# Calculate the annuli to use for this time
annuli = FindEqualAreas(img,cx,cy,N)
# Perform the annular summation
sky_spectra, sky_sigma = AnnularSum(img,annuli,0)
# See if there are any NaNs in the spectra. If so, skip this one.
if np.isnan(sky_spectra).sum() == 0:
# Append the time of the sky image
sky_times.append(nan)
sky_intT.append(nan)
sky_temperature.append(nan)
# Set the fitting parameter guesses
sky_params = Parameters()
# Use the spline-interpolated values for the instrument function at this time
for param in laser_fit.params:
# Hack to just use last laser image:
sky_params.add(param, value = laser_fit.params[param].value, vary = False)
# But change the sky wavelength to lam0
sky_params['lam'].value = lam0
# Set up the forward model
L = 301
A_1D, lamvec = get_conv_matrix_1D(sky_params, annuli['r'][N0:N1], L, lam0)
# Come up with good initial guesses for the sky parameters
skyI_guess = sky_spectra[N0:N1].max() - sky_spectra[N0:N1].min()
skyB_guess = 0.0
ccdB_guess = sky_spectra[N0:N1].min()
sky_params.add('lamc', value = lam0, vary = True ) # We'll do a grid search to find a good value for lamc
sky_params.add('T', value = 1000, vary = True , min = 0.1, max = 5000.0)
sky_params.add('skyI', value = skyI_guess, vary = True , min = 0.0)
sky_params.add('skyB', value = skyB_guess, vary = True )
sky_params.add('skym', value = 0.0, vary = False) # Don't try to estimate skym (or should we?)
sky_params.add('ccdB', value = ccdB_guess, vary = True )
sky_params.add('lam0', value = lam0, vary = False) # This is determined by chemistry so it can't change.
# Do a grid search to find a good starting value for lamc.
# The instrument bias might mean that lam0 is a really bad value to start with.
# Search over one FSR around the last solved-for value (or the nominal value, if it's the first run).
# It's not really important to sort by direction, since the instrument bias dominates.
def goodness(lamc):
# return the correlation between the fringe with this lamc and the data
sky_params['lamc'].value = lamc
fringe = Sky_FringeModel(sky_params, annuli['r'][N0:N1], lamvec, A_1D)
return np.dot(fringe, sky_spectra[N0:N1])/(norm(fringe)*norm(sky_spectra[N0:N1]))
FSR = lam0**2/(2*sky_params['t'].value)
lamc_candidates = linspace(last_lamc - FSR/2, last_lamc + FSR/2, 50)
corrvec = np.array([goodness(lamc) for lamc in lamc_candidates])
best_lamc = lamc_candidates[corrvec.argmax()]
sky_params['lamc'].value = best_lamc
# Make a copy for future debugging
sky_params_init = Parameters()
for p in sky_params:
sky_params_init.add(p, value = sky_params[p].value)
# Take the inversion in steps
order = [
['skyI'],\
['skyI','ccdB'],\
['skyI', 'ccdB', 'skyB'], \
['skyI', 'ccdB', 'skyB', 'lamc', 'T'], \
]
for group in order: # set all the params in this group to "vary=True", and then run inversion
for param in sky_params.keys():
if param in group:
sky_params[param].vary = True
else:
sky_params[param].vary = False
sky_fit = Minimizer(Sky_Residual,sky_params,fcn_args=(annuli['r'][N0:N1],lamvec,A_1D), \
fcn_kws={'data': sky_spectra[N0:N1], 'sigma': sky_sigma[N0:N1]}, scale_covar=True)
sky_fit.prepare_fit()
sky_fit.leastsq()
# if the inversion failed, replace values with nans and set error bars to inf
if not sky_fit.success or not sky_fit.errorbars:
for p in sky_fit.params:
sky_fit.params[p].value = np.nan
sky_fit.params[p].stderr = np.inf
sky_redchi.append(sky_fit.redchi)
sky_out.append(sky_fit)
# Transform from lamc to v and collect all parameters
lamc = sky_params['lamc'].value
sigma_lamc = sky_params['lamc'].stderr
v = c * (lamc/lam0 - 1)
sigma_v = c * sigma_lamc / lam0
LOSwind.append(v) # negative sign
sigma_LOSwind.append(sigma_v)
T.append(sky_fit.params['T'].value)
sigma_T.append(sky_fit.params['T'].stderr)
skyI.append(sky_params['skyI'].value)
sigma_skyI.append(sky_params['skyI'].stderr)
skyB.append(sky_params['skyB'].value)
sigma_skyB.append(sky_params['skyB'].stderr)
#logfile.write(datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S %p: ') + '%s reduced chisqr: %.4f\n' % (fname, sky_fit.redchi))
print('%s reduced chisqr: %.4f. Message:"%s"' % (fname, sky_fit.redchi, sky_fit.message))
# use the lamc value to center the next grid search, if the error bars are small enough
if sigma_v < SIGMA_V_THRESH and sigma_v > 0 and not lamc_start_set: # sigma_v==0 means it didn't converge
last_lamc = lamc # save for next time
lamc_start_set = True
else:
#logfile.write(datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S %p: ') + '%s is invalid and not analyzed\n' % fname)
print '%s is invalid and not analyzed' % fname
# Convert sky_params to array
n = len(sky_out)
sky_value = {}
sky_stderr = {}
for p in sky_out[-1].params:
sky_value[p] = np.zeros(n)
sky_stderr[p] = np.zeros(n)
for i in range(0,n):
sky_value[p][i] = sky_out[i].params[p].value
sky_stderr[p][i] = sky_out[i].params[p].stderr
/rdata/airglow/fpi/minime05/uao/2015/20150214/10sec_X1.tif reduced chisqr: 9.4501. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/15sec_X1.tif reduced chisqr: 3.8875. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/180sec_X1.tif reduced chisqr: 2.0485. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/1sec_X1.tif reduced chisqr: 1.3123. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/20sec_X1.tif reduced chisqr: 2.6414. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/25sec_X1.tif reduced chisqr: 2.3226. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/2sec_X1.tif reduced chisqr: 1.3746. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/30sec_X1.tif reduced chisqr: 1.9606. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/3sec_X1.tif reduced chisqr: 3.8010. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/40sec_X1.tif reduced chisqr: 1.8167. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/4sec_X1.tif reduced chisqr: 8.9006. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/50sec_X1.tif reduced chisqr: 1.7377. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/5sec_X1.tif reduced chisqr: 13.5832. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/60sec_X1.tif reduced chisqr: 1.7307. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/7.5sec_X1.tif reduced chisqr: 16.0159. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/75sec_X1.tif reduced chisqr: 1.8871. Message:"Tolerance seems to be too small." /rdata/airglow/fpi/minime05/uao/2015/20150214/90sec_X1.tif reduced chisqr: 2.1319. Message:"Tolerance seems to be too small."
i = 2
M0 = N0
M1 = N1
# Make default figure size shorter and fatter
figure(figsize=(6,2.5))
subplot(2,1,1)
o = sky_out[i]
spectra = o.userkws['data']
sigma = o.userkws['sigma']
model = spectra + sigma*o.residual
plot(spectra[M0:M1],'k.', label='data')
plot(model[M0:M1],'r',label='model')
xlabel('Radial Bin')
ylabel('Detector Counts')
legend(loc='best', prop={'size':8},ncol=2)
subplots_adjust(bottom=0.25) # or else the xlabel will be cut off, for some reason
plt.title('%s' % sky[i].split('/')[-1])
xlim((0,N1))
subplot(2,1,2)
plot(o.residual[M0:M1],'k.', label='residual')
xlabel('Radial Bin')
ylabel('Detector Counts')
legend(loc='best', prop={'size':8})
xlim((0,N1))
#subplots_adjust(bottom=0.25) # or else the xlabel will be cut off, for some reason
i = i + 1
i = 0
i = 6
M0 = N0
M1 = N1
# Make default figure size shorter and fatter
figure(figsize=(6,2.5))
subplot(2,1,1)
o = sky_out[i]
spectra = o.userkws['data']
sigma = o.userkws['sigma']
model = spectra + sigma*o.residual
plot(spectra[M0:M1],'k.', label='data')
plot(model[M0:M1],'r',label='model')
xlabel('Radial Bin')
ylabel('Detector Counts')
legend(loc='best', prop={'size':8},ncol=2)
subplots_adjust(bottom=0.25) # or else the xlabel will be cut off, for some reason
plt.title('%s' % sky[i].split('/')[-1])
xlim((0,N1))
subplot(2,1,2)
plot(o.residual[M0:M1],'k.', label='residual')
xlabel('Radial Bin')
ylabel('Detector Counts')
legend(loc='best', prop={'size':8})
xlim((0,N1))
#subplots_adjust(bottom=0.25) # or else the xlabel will be cut off, for some reason
i = i + 1
exp_t = zeros(len(sky))
for i in range(len(sky)):
exp_t[i] = float(sky[i].split('/')[-1].split('sec')[0])
dref = mean(LOSwind)
LOSwindref = LOSwind - dref
figure(figsize=(5,5))
subplot(2,1,1)
errorbar(exp_t,LOSwindref, yerr=sigma_LOSwind,fmt='k.')
grid()
xscale('log')
xlim((0.9,200))
#xlabel('Exposure Time [sec]')
ylabel('Vertical Wind [m/s]')
subplot(2,1,2)
plot(exp_t, sigma_LOSwind,'k.',markersize=5)
xscale('log')
yscale('log')
xlim((0.9,200))
xlabel('Exposure Time [sec]')
ylabel('Wind Uncertainty [m/s]')
grid()
This comparison is not apples-to-apples, since the exposures were taken on different nights. The red dots were taken from the historical database with the following requirements:
import FPIprocess
def get_exp_time_samples(instr_name, year, doy):
'''
Return exposure times and wind error bars for samples
satisfying the conditions.
'''
conv_exp_t = []
conv_sigma_LOSwind = []
r = FPIprocess.load_level0(instr_name, year, doy)
fpir = r['FPI_Results']
t = fpir['sky_times']
d = fpir['direction']
c = fpir['Clouds']['mean']
sw = fpir['sigma_LOSwind']
et = fpir['sky_intT']
for i in range(len(t)):
if d[i] == 'Zenith' and c[i] < -25 and t[i].hour in [20]:
conv_exp_t.append(et[i])
conv_sigma_LOSwind.append(sw[i])
#print d[i],c[i],t[i].hour,et[i],sw[i]
return conv_exp_t, conv_sigma_LOSwind
print '((Click here for code))'
((Click here for code))
instr_name = 'minime05'
years = [2012, 2013, 2014,2015]
doys = arange(30,60)
conv_exp_t = []
conv_sigma_LOSwind = []
for year in years:
for doy in doys:
try:
t,s = get_exp_time_samples(instr_name, year, doy)
conv_exp_t.extend(t)
conv_sigma_LOSwind.extend(s)
except Exception as e:
print year,doy,e
conv_exp_t = array(conv_exp_t)
conv_sigma_LOSwind = array(conv_sigma_LOSwind)
2012 30 cannot concatenate 'str' and 'NoneType' objects 2012 31 cannot concatenate 'str' and 'NoneType' objects 2012 32 cannot concatenate 'str' and 'NoneType' objects 2012 33 cannot concatenate 'str' and 'NoneType' objects 2012 34 cannot concatenate 'str' and 'NoneType' objects 2012 35 cannot concatenate 'str' and 'NoneType' objects 2012 36 cannot concatenate 'str' and 'NoneType' objects 2012 37 cannot concatenate 'str' and 'NoneType' objects 2012 38 cannot concatenate 'str' and 'NoneType' objects 2012 39 cannot concatenate 'str' and 'NoneType' objects 2012 40 cannot concatenate 'str' and 'NoneType' objects 2012 41 cannot concatenate 'str' and 'NoneType' objects 2012 42 cannot concatenate 'str' and 'NoneType' objects 2012 43 cannot concatenate 'str' and 'NoneType' objects 2012 44 cannot concatenate 'str' and 'NoneType' objects 2012 45 cannot concatenate 'str' and 'NoneType' objects 2012 46 cannot concatenate 'str' and 'NoneType' objects 2012 47 cannot concatenate 'str' and 'NoneType' objects 2012 48 cannot concatenate 'str' and 'NoneType' objects 2012 49 cannot concatenate 'str' and 'NoneType' objects 2012 50 cannot concatenate 'str' and 'NoneType' objects 2012 51 cannot concatenate 'str' and 'NoneType' objects 2012 52 cannot concatenate 'str' and 'NoneType' objects 2012 53 cannot concatenate 'str' and 'NoneType' objects 2012 54 cannot concatenate 'str' and 'NoneType' objects 2012 55 cannot concatenate 'str' and 'NoneType' objects 2012 56 cannot concatenate 'str' and 'NoneType' objects 2012 57 cannot concatenate 'str' and 'NoneType' objects 2012 58 cannot concatenate 'str' and 'NoneType' objects 2012 59 cannot concatenate 'str' and 'NoneType' objects 2014 38 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20140207.npz' 2015 40 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150209.npz' 2015 41 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150210.npz' 2015 42 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150211.npz' 2015 43 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150212.npz' 2015 44 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150213.npz' 2015 45 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150214.npz' 2015 46 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150215.npz' 2015 47 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150216.npz' 2015 48 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150217.npz' 2015 49 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150218.npz' 2015 50 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150219.npz' 2015 51 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150220.npz' 2015 52 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150221.npz' 2015 53 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150222.npz' 2015 54 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150223.npz' 2015 55 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150224.npz' 2015 56 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150225.npz' 2015 57 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150226.npz' 2015 58 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150227.npz' 2015 59 [Errno 2] No such file or directory: '/rdata/airglow/fpi/results/minime05_uao_20150228.npz'
plot(conv_exp_t, conv_sigma_LOSwind,'rx',markersize=5,label='Conventional CCD')
plot(exp_t, sigma_LOSwind,'k.',markersize=5,label='EMCCD')
xscale('log')
yscale('log')
xlim((0.9,200))
#xlim((20,400))
xlabel('Exposure Time [sec]')
ylabel('Wind Uncertainty [m/s]')
legend(loc='best',prop={'size':8},numpoints=1)
grid()
t2 = list(set(conv_exp_t))
e2 = zeros(len(t2))
for i in range(len(t2)):
t = t2[i]
idx = conv_exp_t == t
e2[i] = mean(conv_sigma_LOSwind[idx])
plot(t2, e2,'r.',markersize=5,label='Conventional CCD')
plot(exp_t, sigma_LOSwind,'k.',markersize=5,label='EMCCD')
xscale('log')
yscale('log')
xlim((0.9,200))
#xlim((20,400))
xlabel('Exposure Time [sec]')
ylabel('Wind Uncertainty [m/s]')
legend(loc='best',prop={'size':8},numpoints=1)
grid()
200 seconds
#fn = '/rdata/airglow/fpi/minime05/uao/2015/20150207/UAO_L_20150208_100608_138.img'
fn = '/rdata/airglow/fpi/minime05/uao/2015/20150214/IR_732_200sec_X1.tif'
d = Image.open(fn)
img = array(d)
figure(figsize=(3,3))
imshow(img, vmin = prctile(img,2), vmax = prctile(img,98))
colorbar();
# Calculate the annuli to use for this time
annuli = FindEqualAreas(img,cx,cy,N)
# Perform the annular summation
sky_spectra, sky_sigma = AnnularSum(img,annuli,0)
figure(figsize=(6,1))
plot(sky_spectra,'k.')
xlim((0,N1))
grid()
import subprocess
import shutil
nb_name = 'EMCCD_ExpTime_Sweep.ipynb' # There's probably a way to get this programmatically
cmd = ['ipython', 'nbconvert', '--to', 'slides', nb_name, \
'--reveal-prefix', '"http://cdn.jsdelivr.net/reveal.js/2.6.2"',\
'--template','output_toggle']
# The --template command points to a custom template which opens and closes the input code cells with a click
subprocess.call(cmd)
html_name = '%s.slides.html' % (nb_name.split('.')[0])
shutil.copy(html_name, '/home/bhardin2/public_html/slideshows/')