本文整理汇总了Python中DataStructures类的典型用法代码示例。如果您正苦于以下问题:Python DataStructures类的具体用法?Python DataStructures怎么用?Python DataStructures使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataStructures类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SmoothData
def SmoothData(self, numiters=10, lowreject=2, highreject=2):
done = False
data = self.smoothing_data.copy()
iterations = 0
while not done and iterations < numiters:
iterations += 1
done = True
smoother = UnivariateSpline(data.x, data.y / data.cont, s=self.smoothing_factor)
smoothed = DataStructures.xypoint(x=data.x)
smoothed.y = smoother(smoothed.x)
resid = data.y / data.cont - smoothed.y
std = np.std(resid)
badindices = np.where(np.logical_or(resid < -lowreject * std, resid > highreject * std))[0]
# plt.figure(2)
#plt.plot(data.x, resid, 'ro')
#plt.plot(data.x, -lowreject*std*np.ones(data.x.size), 'b-')
#plt.plot(data.x, highreject*std*np.ones(data.x.size), 'b-')
#plt.show()
if badindices.size > 0 and data.size() - badindices.size > 10:
done = False
data.x = np.delete(data.x, badindices)
data.y = np.delete(data.y, badindices)
data.cont = np.delete(data.cont, badindices)
return DataStructures.xypoint(x=self.smoothing_data.x, y=smoother(self.smoothing_data.x))
示例2: GetSpectrum
def GetSpectrum(self, T, metal, logg, vmicro):
#Make sure it is not already read in
if self.read_dict[T][metal][logg][vmicro]:
if self.debug:
print "Model already read. Skipping..."
return self.models[T][metal][logg][vmicro]
elif self.model_dict[T][metal][logg][vmicro] == '':
if self.debug:
print "No model found with: \n\tT=%f\n\t[Fe/H]=%f\n\tlog(g)=%f\n\tvmicro=%f" % (T, metal, logg, vmicro)
raise ValueError
else:
#If we get here, the model does exist and has not yet been read in.
if self.debug:
print "Reading model with: \n\tT=%f\n\t[Fe/H]=%f\n\tlog(g)=%f\n\tvmicro=%f" % (T, metal, logg, vmicro)
fluxfile = self.model_dict[T][metal][logg][vmicro]
contfile = fluxfile.replace("vis.7", "vis.17")
x, y = np.loadtxt(fluxfile, usecols=(0, 1), unpack=True)
x2, c = np.loadtxt(contfile, usecols=(0, 1), unpack=True)
flux = DataStructures.xypoint(x=x * units.angstrom.to(units.nm), y=y)
cont = DataStructures.xypoint(x=x2 * units.angstrom.to(units.nm), y=c)
flux = RebinData(flux, self.xgrid)
cont = RebinData(cont, self.xgrid)
model = DataStructures.xypoint(x=self.xgrid, y=flux.y, cont=cont.y)
self.read_dict[T][metal][logg][vmicro] = True
self.models[T][metal][logg][vmicro] = model
return model
示例3: MakeXYpoints
def MakeXYpoints(datafile, extensions=False, x=None, y=None, cont=None, errors=None):
print "Reading in file %s: " % datafile
orders = []
if extensions:
#This means the data is in fits extensions, with one order per extension
#At least x and y should be given (and should be strings to identify the field in the table record array)
if type(x) != str:
x = raw_input("Give name of the field which contains the x array: ")
if type(y) != str:
y = raw_input("Give name of the field which contains the y array: ")
hdulist = pyfits.open(datafile)
if cont == None:
if errors == None:
for i in range(1, len(hdulist)):
data = hdulist[i].data
xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y))
orders.append(xypt)
else:
if type(errors) != str:
errors = raw_input("Give name of the field which contains the errors/sigma array: ")
for i in range(1, len(hdulist)):
data = hdulist[i].data
xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y), err=data.field(errors))
orders.append(xypt)
elif type(cont) == str:
if errors == None:
for i in range(1, len(hdulist)):
data = hdulist[i].data
xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y), cont=data.field(cont))
orders.append(xypt)
else:
if type(errors) != str:
errors = raw_input("Give name of the field which contains the errors/sigma array: ")
for i in range(1, len(hdulist)):
data = hdulist[i].data
xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y), cont=data.field(cont),
err=data.field(errors))
orders.append(xypt)
hdulist.close()
else:
#Data is in a big array.
hdulist = pyfits.open(datafile)
data = hdulist[0].data
hdulist.close()
numorders = data.shape[0]
for i in range(numorders):
wave = data[..., ..., 0][i]
flux = data[..., ..., 1][i]
error = np.ones(wave.size) * 1e9
error[flux > 0] = np.sqrt(flux[flux > 0])
orders.append(DataStructures.xypoint(x=wave, y=flux, err=error))
return orders
示例4: OptimalSmooth
def OptimalSmooth(order, normalize=True):
"""
Determine the best window size with cross-validation
"""
#Flatten the spectrum
order.y /= order.cont/order.cont.mean()
order.err /= order.cont/order.cont.mean()
#Remove outliers (telluric residuals)
smoothed = SmoothData(order, windowsize=41, normalize=False)
temp = smoothed.copy()
temp.y = order.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=6, numsighigh=6, expand=10)
data = order.copy()
if len(outliers) > 0:
#order.y[outliers] = order.cont[outliers]
order.y[outliers] = smoothed.y[outliers]
order.err[outliers] = 9e9
#Make cross-validation sets
inp = np.transpose((order.x, order.err, order.cont))
X_train, X_test, y_train, y_test = cross_validation.train_test_split(inp, order.y, test_size=0.2)
X_train = X_train.transpose()
X_test = X_test.transpose()
sorter_train = np.argsort(X_train[0])
sorter_test = np.argsort(X_test[0])
training = DataStructures.xypoint(x=X_train[0][sorter_train], y=y_train[sorter_train], err=X_train[1][sorter_train], cont=X_train[2][sorter_train])
validation = DataStructures.xypoint(x=X_test[0][sorter_test], y=y_test[sorter_test], err=X_test[1][sorter_test], cont=X_test[2][sorter_test])
"""
#Try each smoothing parameter
s_array = np.logspace(-3, 1, 100)
chisq = []
for s in s_array:
fcn = smooth(training.x, training.y, w=1.0/training.err, s=s)
prediction = fcn(validation.x)
chisq.append(cost(validation.y, prediction, validation.err))
print s, chisq[-1]
idx = np.argmin(np.array(chisq) - 1.0)
s = s_array[idx]
"""
s = 0.9*order.size()
smoothed = order.copy()
fcn = smooth(smoothed.x, smoothed.y, w=1.0/smoothed.err, s=s)
smoothed.y = fcn(smoothed.x)
plt.plot(order.x, order.y)
plt.plot(smoothed.x, smoothed.y)
plt.show()
return smoothed, s
示例5: Interpolate_Old
def Interpolate_Old(self, dictionary, SpT):
#First, we must convert the relations above into a monotonically increasing system
#Just add ten when we get to each new spectral type
relation = DataStructures.xypoint(len(dictionary))
# Strip the spectral type of the luminosity class information
SpT = re.search('[A-Z]([0-9]\.?[0-9]*)', SpT).group()
xpoints = []
ypoints = []
for key, index in zip(dictionary, range(len(dictionary))):
#Convert key to a number
xpoints.append(self.SpT_To_Number(key))
ypoints.append(dictionary[key])
sorting_indices = [i[0] for i in sorted(enumerate(xpoints), key=lambda x: x[1])]
for index in range(len(dictionary)):
i = sorting_indices[index]
relation.x[index] = xpoints[i]
relation.y[index] = ypoints[i]
RELATION = UnivariateSpline(relation.x, relation.y, s=0)
spnum = self.SpT_To_Number(SpT)
if spnum > 0:
return RELATION(spnum)
else:
return np.nan
示例6: fix_chip_wavelength
def fix_chip_wavelength(model_orders, data_orders, band_cutoff=1870):
""" Adjust the wavelength in data_orders to be self-consistent
"""
# H band
model_orders_H = [o.copy() for o in model_orders if o.x[-1] < band_cutoff]
data_orders_H = [o.copy() for o in data_orders if o.x[-1] < band_cutoff]
ordernums_H = 121.0 - np.arange(len(model_orders_H))
p_H = fit_wavelength(model_orders_H, ordernums_H, first_order=3, last_order=len(ordernums_H) - 4)
# K band
model_orders_K = [o.copy() for o in model_orders if o.x[-1] > band_cutoff]
data_orders_K = [o.copy() for o in data_orders if o.x[-1] > band_cutoff]
ordernums_K = 92.0 - np.arange(len(model_orders_K))
p_K = fit_wavelength(model_orders_K, ordernums_K, first_order=7, last_order=len(ordernums_K) - 4)
new_orders = []
for i, order in enumerate(data_orders):
pixels = np.arange(order.size(), dtype=np.float)
if order.x[-1] < band_cutoff:
# H band
ordernum = ordernums_H[i] * np.ones_like(pixels)
wave = p_H(pixels, ordernum) / ordernum
else:
# K band
ordernum = ordernums_K[i-len(ordernums_H)] * np.ones_like(pixels)
wave = p_K(pixels, ordernum) / ordernum
new_orders.append(DataStructures.xypoint(x=wave, y=order.y, cont=order.cont, err=order.err))
return new_orders
示例7: CombineIntervals
def CombineIntervals(intervals, overlap=0):
iteration = 0
print "\n\n"
for interval in intervals:
lastindex = interval.x.size - overlap
if iteration == 0:
firstindex = 0
master_x = interval.x[firstindex:lastindex]
master_y = interval.y[firstindex:lastindex]
master_cont = interval.cont[firstindex:lastindex]
else:
firstindex = np.searchsorted(interval.x, master_x[-1]) + 1
master_x = np.append(master_x, interval.x[firstindex:lastindex])
master_y = np.append(master_y, interval.y[firstindex:lastindex])
master_cont = np.append(master_cont, interval.cont[firstindex:lastindex])
iteration += 1
output = DataStructures.xypoint(master_x.size)
output.x = master_x.copy()
output.y = master_y.copy()
output.cont = master_cont.copy()
#Scale continuum so the highest normalized flux = 1.0
maxindex = np.argmax(output.y / output.cont)
factor = output.y[maxindex] / output.cont[maxindex]
output.cont *= factor
return output
示例8: ConvolveSmooth
def ConvolveSmooth(self, numiters=10, lowreject=2, highreject=3):
done = False
data = self.smoothing_data.copy()
# data.y /= data.cont
iterations = 0
if self.window_size % 2 == 0:
self.window_size += 1
while not done and iterations < numiters:
iterations += 1
done = True
y = FittingUtilities.savitzky_golay(data.y, self.window_size, 5)
#s = np.r_[data.y[self.window_size/2:0:-1], data.y, data.y[-1:-self.window_size/2:-1]]
#y = np.convolve(window/window.sum(), s, mode='valid')
reduced = data.y / y
sigma = np.std(reduced)
mean = np.mean(reduced)
badindices = \
np.where(np.logical_or((reduced - mean) / sigma < -lowreject, (reduced - mean) / sigma > highreject))[0]
if badindices.size > 0:
done = False
data.y[badindices] = y[badindices]
return DataStructures.xypoint(x=self.smoothing_data.x, y=y / self.smoothing_data.cont)
示例9: main2
def main2():
vsini = 250
fname = "/Users/kgulliks/t15000g40a00p00_w300_700_r1.dat"
x, y, c = np.loadtxt(fname, usecols=(0, 1, 2), unpack=True)
import DataStructures
import Broaden
order = DataStructures.xypoint(x=x, y=y, cont=c)
order = Broaden.RotBroad(order, vsini * units.km.to(units.cm))
left = np.searchsorted(order.x, 4780)
right = np.searchsorted(order.x, 4960)
arr = order[left:right].toarray(norm=True)
syn = s4.synthesis.Synplot(15000,
4.0,
idl="/Applications/itt/idl/bin/idl",
wstart=order.x[left],
wend=order.x[right],
observ=arr,
relative=1,
vrot=vsini,
rv=83)
syn.run()
syn.plot()
plt.figure(2)
# spec = syn.spectrum
#plt.plot(spec[:,0], spec[:,1])
#plt.plot(arr[:,0], arr[:,1])
plt.show()
示例10: onclick
def onclick(self, event):
print event.xdata, event.ydata
self.clicks.append((event.xdata, event.ydata))
if len(self.clicks) < 2:
return
else:
# Perform fit. Try just fitting splines?
x1, y1 = self.clicks[0] #Left-hand continuum
x2, y2 = self.clicks[1] #Right-hand continuum
#x3, y3 = self.clicks[2] #Line depth
self.clicks = []
left = np.searchsorted(self.current_order.x, x1)
right = np.searchsorted(self.current_order.x, x2)
y1 = np.median(self.current_order.y[max(0, left - 2):min(self.current_order.size(), left + 2)])
y2 = np.median(self.current_order.y[max(0, right - 2):min(self.current_order.size(), right + 2)])
cont = np.poly1d(np.polyfit((x1, x2), (y1, y2), 1))
self.smoothing_data = DataStructures.xypoint(x=self.current_order.x[left:right],
y=self.current_order.y[left:right],
cont=cont(self.current_order.x[left:right]))
self.smoothing_factor *= self.smoothing_data.size()
smoothed = self.SmoothData()
#smoothed = UnivariateSpline(data.x, data.y/data.cont, s=6e-4 ) #np.median(data.y)/10000.0)
#mean = data.x.mean()
mean = 0.0
#smoothed = np.poly1d(np.polyfit(data.x - mean, data.y/data.cont, 7) )
self.PlotArrays(((self.smoothing_data.x, self.smoothing_data.y / self.smoothing_data.cont, "Data"),
(smoothed.x, smoothed.y, "Smoothed")), self.fitaxis)
#plt.show()
return
示例11: __getitem__
def __getitem__(self, item):
if item not in self.valid_keys:
raise KeyError('{} not a valid item for CCFContainer!'.format(item))
if item == 'ml' and self.ml is not None:
return DataStructures.xypoint(x=self.x, y=self.ml)
elif item == 'dc' and self.dc is not None:
return DataStructures.xypoint(x=self.x, y=self.dc)
elif item == 'simple' and self.simple is not None:
return DataStructures.xypoint(x=self.x, y=self.simple)
elif item == 'weighted' and self.weighted is not None:
return DataStructures.xypoint(x=self.x, y=self.weighted)
elif item == 'simple-weighted' and self.simple_weighted is not None:
return DataStructures.xypoint(x=self.x, y=self.simple_weighted)
return None # We should never get here...
示例12: GetSpectrum
def GetSpectrum(self, T, logg, metal):
retval = self.ReadFile(T, logg, metal)
if retval == 0:
wave = self.read_dict[T][metal][0]
flux = self.read_dict[T][metal][1][logg]
return DataStructures.xypoint(x=wave, y=flux)
else:
return retval
示例13: Broaden2
def Broaden2(model, vsini, intervalsize=50.0, epsilon=0.5, linear=False, findcont=False):
"""
model: input filename of the spectrum. The continuum data is assumed to be in filename[:-1]+".17"
model can also be a DataStructures.xypoint containing the already-read model (must include continuum!)
vsini: the velocity (times sin(i) ) of the star
intervalsize: The size (in nm) of the interval to use for broadening. Since it depends on wavelength, you don't want to do all at once
epsilon: Linear limb darkening. I(u) = 1-epsilon + epsilon*u
linear: flag for if the x-spacing is already linear. If true, we don't need to make UnivariateSplines and linearize
findcont: flag to decide if the continuum needs to be found
"""
if type(model) == str:
model = ReadFile(model)
if not findcont:
cont_fcn = UnivariateSpline(model.x, model.cont, s=0)
if not linear:
model_fcn = UnivariateSpline(model.x, model.y, s=0)
x = np.linspace(model.x[0], model.x[-1], model.size())
model = DataStructures.xypoint(x=x, y=model_fcn(x))
if not findcont:
model.cont = cont_fcn(model.x)
else:
model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)
elif findcont:
model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)
#Convert to velocity space
wave0 = model.x[model.size() / 2]
model.x = constants.c.cgs.value * (model.x - wave0) / wave0
#Make broadening profile
left = np.searchsorted(model.x, -2 * vsini)
right = np.searchsorted(model.x, 2 * vsini)
profile = model[left:right]
profile.y = np.zeros(profile.size())
dv = profile.x / vsini
indices = np.where(np.abs(dv) < 1.0)[0]
profile.y[indices] = 1.0 / (vsini * (1 - epsilon / 3.0)) * (
2 * (1 - epsilon) / np.pi * np.sqrt(1 - dv[indices] ** 2) + epsilon / 2.0 * (1 - dv[indices] ** 2) )
#Extend interval to reduce edge effects (basically turn convolve into circular convolution)
before = model.y[-int(profile.size()):]
after = model.y[:int(profile.size())]
extended = np.append(np.append(before, model.y), after)
if profile.size() % 2 == 0:
left, right = int(profile.size() * 1.5), int(profile.size() * 1.5) - 1
else:
left, right = int(profile.size() * 1.5), int(profile.size() * 1.5)
model.y = np.convolve(extended, profile.y / profile.y.sum(), mode="full")[left:-right]
#Return back to wavelength space
model.x = wave0 * (1 + model.x / constants.c.cgs.value)
return model
示例14: main1
def main1():
model_dir = "%s/School/Research/Models/Sorted/Stellar/Vband/" % (os.environ["HOME"])
modelfile = "%slte86-4.00+0.0-alpha0.KURUCZ_MODELS.dat.sorted" % model_dir
threshold = 0.90
print "Reading stellar model"
x, y = np.loadtxt(modelfile, usecols=(0, 1), unpack=True)
x *= units.angstrom.to(units.nm)
y = 10 ** y
model = DataStructures.xypoint(x=x, y=y)
model.cont = FittingUtilities.Continuum(model.x, model.y, fitorder=21, lowreject=1.5, highreject=20)
plt.plot(model.x, model.y)
plt.plot(model.x, model.cont)
plt.show()
print "Finding lines"
linepoints = np.where(model.y / model.cont < threshold)[0]
points = []
lines = []
strengths = []
for line in linepoints:
# print len(points)
if len(points) == 0 or int(line) - 1 == points[-1]:
points.append(int(line))
else:
index = int(np.median(points) + 0.5)
if len(points) > 1:
minindex = model.y[points[0]:points[-1]].argmin() + points[0]
else:
minindex = points[0]
lines.append(model.x[minindex])
yval = model.y[minindex] / model.cont[minindex]
strengths.append(yval)
points = [int(line), ]
"""
#Make sure there are no points too close to each other
tol = 0.05
lines = sorted(lines)
for i in range(len(lines) - 2, 0, -1):
if np.abs(lines[i] - lines[i-1]) < tol:
del lines[i]
del lines[i-1]
elif np.abs(lines[i] - lines[i+1]) < tol:
del lines[i+1]
del lines[i]
else:
index = np.searchsorted(x,lines[i]) - 1
yval = trans[index]
plt.plot((lines[i], lines[i]), (yval-0.05, yval-0.1), 'r-')
"""
plt.plot(model.x, model.y / model.cont, 'k-')
for line in lines:
idx = np.searchsorted(model.x, line)
x = model.x[idx]
y = model.y[idx] / model.cont[idx]
plt.plot([x, x], [y - 0.05, y - 0.1], 'r-')
plt.show()
np.savetxt("Linelist3.dat", np.transpose((lines, strengths)), fmt="%.8f")
示例15: HighPassFilter
def HighPassFilter(data, vel, width=5, linearize=False):
"""
Function to apply a high-pass filter to data.
Data must be in an xypoint container, and have linear wavelength spacing
vel is the width of the features you want to remove, in velocity space (in cm/s)
width is how long it takes the filter to cut off, in units of wavenumber
"""
if linearize:
original_data = data.copy()
datafcn = spline(data.x, data.y, k=3)
errorfcn = spline(data.x, data.err, k=3)
contfcn = spline(data.x, data.cont, k=3)
linear = DataStructures.xypoint(data.x.size)
linear.x = np.linspace(data.x[0], data.x[-1], linear.size())
linear.y = datafcn(linear.x)
linear.err = errorfcn(linear.x)
linear.cont = contfcn(linear.x)
data = linear
# Figure out cutoff frequency from the velocity.
featuresize = 2 * data.x.mean() * vel / constants.c.cgs.value # vel MUST be given in units of cm
dlam = data.x[1] - data.x[0] # data.x MUST have constant x-spacing
Npix = featuresize / dlam
nsamples = data.size()
sample_rate = 1.0 / dlam
nyq_rate = sample_rate / 2.0 # The Nyquist rate of the signal.
width /= nyq_rate
cutoff_hz = min(1.0 / featuresize, nyq_rate - width * nyq_rate / 2.0) # Cutoff frequency of the filter
# The desired attenuation in the stop band, in dB.
ripple_db = 60.0
# Compute the order and Kaiser parameter for the FIR filter.
N, beta = kaiserord(ripple_db, width)
if N % 2 == 0:
N += 1
# Use firwin with a Kaiser window to create a lowpass FIR filter.
taps = firwin(N, cutoff_hz / nyq_rate, window=('kaiser', beta), pass_zero=False)
# Extend data to prevent edge effects
y = np.r_[data.y[::-1], data.y, data.y[::-1]]
# Use lfilter to filter data with the FIR filter.
smoothed_y = lfilter(taps, 1.0, y)
# The phase delay of the filtered signal.
delay = 0.5 * (N - 1) / sample_rate
delay_idx = np.searchsorted(data.x, data.x[0] + delay)
smoothed_y = smoothed_y[data.size() + delay_idx:-data.size() + delay_idx]
if linearize:
fcn = spline(data.x, smoothed_y)
return fcn(original_data.x)
else:
return smoothed_y