本文整理汇总了Python中pycbc.fft.ifft函数的典型用法代码示例。如果您正苦于以下问题:Python ifft函数的具体用法?Python ifft怎么用?Python ifft使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ifft函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bandlimited_interpolate
def bandlimited_interpolate(series, delta_f):
"""Return a new PSD that has been interpolated to the desired delta_f.
Parameters
----------
series : FrequencySeries
Frequency series to be interpolated.
delta_f : float
The desired delta_f of the output
Returns
-------
interpolated series : FrequencySeries
A new FrequencySeries that has been interpolated.
"""
series = FrequencySeries(series, dtype=complex_same_precision_as(series), delta_f=series.delta_f)
N = (len(series) - 1) * 2
delta_t = 1.0 / series.delta_f / N
new_N = int(1.0 / (delta_t * delta_f))
new_n = new_N / 2 + 1
series_in_time = TimeSeries(zeros(N), dtype=real_same_precision_as(series), delta_t=delta_t)
ifft(series, series_in_time)
padded_series_in_time = TimeSeries(zeros(new_N), dtype=series_in_time.dtype, delta_t=delta_t)
padded_series_in_time[0:N/2] = series_in_time[0:N/2]
padded_series_in_time[new_N-N/2:new_N] = series_in_time[N/2:N]
interpolated_series = FrequencySeries(zeros(new_n), dtype=series.dtype, delta_f=delta_f)
fft(padded_series_in_time, interpolated_series)
return interpolated_series
示例2: qseries
def qseries(fseries, Q, f0, return_complex=False):
"""Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
energy: '~pycbc.types.TimeSeries'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data.
"""
# normalize and generate bi-square window
qprime = Q / 11**(1/2.)
norm = numpy.sqrt(315. * qprime / (128. * f0))
window_size = 2 * int(f0 / qprime * fseries.duration) + 1
xfrequencies = numpy.linspace(-1., 1., window_size)
start = int((f0 - (f0 / qprime)) * fseries.duration)
end = int(start + window_size)
center = (start + end) / 2
windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm
tlen = (len(fseries)-1) * 2
windowed.resize(tlen)
windowed = numpy.roll(windowed, -center)
# calculate the time series for this q -value
windowed = FrequencySeries(windowed, delta_f=fseries.delta_f,
epoch=fseries.start_time)
ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
delta_t=fseries.delta_t)
ifft(windowed, ctseries)
if return_complex:
return ctseries
else:
energy = ctseries.squared_norm()
medianenergy = numpy.median(energy.numpy())
return energy / float(medianenergy)
示例3: lfilter
def lfilter(coefficients, timeseries):
""" Apply filter coefficients to a time series
Parameters
----------
coefficients: numpy.ndarray
Filter coefficients to apply
timeseries: numpy.ndarray
Time series to be filtered.
Returns
-------
tseries: numpy.ndarray
filtered array
"""
from pycbc.fft import fft, ifft
from pycbc.filter import correlate
# If there aren't many points just use the default scipy method
if len(timeseries) < 2**7:
if hasattr(timeseries, 'numpy'):
timeseries = timeseries.numpy()
series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
return series
else:
cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
cseries.resize(len(timeseries))
cseries.roll(len(timeseries) - len(coefficients) + 1)
timeseries = Array(timeseries, copy=False)
flen = len(cseries) / 2 + 1
ftype = complex_same_precision_as(timeseries)
cfreq = zeros(flen, dtype=ftype)
tfreq = zeros(flen, dtype=ftype)
fft(Array(cseries), cfreq)
fft(Array(timeseries), tfreq)
cout = zeros(flen, ftype)
out = zeros(len(timeseries), dtype=timeseries)
correlate(cfreq, tfreq, cout)
ifft(cout, out)
return out.numpy() / len(out)
示例4: interpolate_complex_frequency
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'):
"""Interpolate complex frequency series to desired delta_f.
Return a new complex frequency series that has been interpolated to the
desired delta_f.
Parameters
----------
series : FrequencySeries
Frequency series to be interpolated.
delta_f : float
The desired delta_f of the output
zeros_offset : optional, {0, int}
Number of sample to delay the start of the zero padding
side : optional, {'right', str}
The side of the vector to zero pad
Returns
-------
interpolated series : FrequencySeries
A new FrequencySeries that has been interpolated.
"""
new_n = int( (len(series)-1) * series.delta_f / delta_f + 1)
samples = numpy.arange(0, new_n) * delta_f
old_N = int( (len(series)-1) * 2 )
new_N = int( (new_n - 1) * 2 )
time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N),
dtype=real_same_precision_as(series))
ifft(series, time_series)
time_series.roll(-zeros_offset)
time_series.resize(new_N)
if side == 'left':
time_series.roll(zeros_offset + new_N - old_N)
elif side == 'right':
time_series.roll(zeros_offset)
out_series = FrequencySeries(zeros(new_n), epoch=series.epoch,
delta_f=delta_f, dtype=series.dtype)
fft(time_series, out_series)
return out_series
示例5: setUp
def setUp(self):
self.scheme = _scheme
self.context = _context
self.psd_len = 1024
self.psd_delta_f = 0.1
self.psd_low_freq_cutoff = 10.
# generate 1/f noise for testing PSD estimation
noise_size = 524288
sample_freq = 4096.
delta_f = sample_freq / noise_size
numpy.random.seed(132435)
noise = numpy.random.normal(loc=0, scale=1, size=noise_size/2+1) + \
1j * numpy.random.normal(loc=0, scale=1, size=noise_size/2+1)
noise_model = 1. / numpy.linspace(1., 100., noise_size / 2 + 1)
noise *= noise_model / numpy.sqrt(delta_f) / 2
noise[0] = noise[0].real
noise_fs = FrequencySeries(noise, delta_f=delta_f)
self.noise = TimeSeries(numpy.zeros(noise_size), delta_t=1./sample_freq)
ifft(noise_fs, self.noise)
示例6: to_timeseries
def to_timeseries(self, delta_t=None):
""" Return the Fourier transform of this time series.
Note that this assumes even length time series!
Parameters
----------
delta_t : {None, float}, optional
The time resolution of the returned series. By default the
resolution is determined by length and delta_f of this frequency
series.
Returns
-------
TimeSeries:
The inverse fourier transform of this frequency series.
"""
from pycbc.fft import ifft
from pycbc.types import TimeSeries, real_same_precision_as
nat_delta_t = 1.0 / ((len(self)-1)*2) / self.delta_f
if not delta_t:
delta_t = nat_delta_t
# add 0.5 to round integer
tlen = int(1.0 / self.delta_f / delta_t + 0.5)
flen = tlen / 2 + 1
if flen < len(self):
raise ValueError("The value of delta_t (%s) would be "
"undersampled. Maximum delta_t "
"is %s." % (delta_t, nat_delta_t))
if not delta_t:
tmp = self
else:
tmp = FrequencySeries(zeros(flen, dtype=self.dtype),
delta_f=self.delta_f, epoch=self.epoch)
tmp[:len(self)] = self[:]
f = TimeSeries(zeros(tlen,
dtype=real_same_precision_as(self)),
delta_t=delta_t)
ifft(tmp, f)
return f
示例7: inverse_spectrum_truncation
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None):
"""Modify a PSD such that the impulse response associated with its inverse
square root is no longer than `max_filter_len` time samples. In practice
this corresponds to a coarse graining or smoothing of the PSD.
Parameters
----------
psd : FrequencySeries
PSD whose inverse spectrum is to be truncated.
max_filter_len : int
Maximum length of the time-domain filter in samples.
low_frequency_cutoff : {None, int}
Frequencies below `low_frequency_cutoff` are zeroed in the output.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
Returns
-------
psd : FrequencySeries
PSD whose inverse spectrum has been truncated.
Raises
------
ValueError
For invalid types or values of `max_filter_len` and `low_frequency_cutoff`.
Notes
-----
See arXiv:gr-qc/0509116 for details.
"""
# sanity checks
if type(max_filter_len) is not int or max_filter_len <= 0:
raise ValueError('max_filter_len must be a positive integer')
if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \
or low_frequency_cutoff > psd.sample_frequencies[-1]:
raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD')
N = (len(psd)-1)*2
inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \
dtype=complex_same_precision_as(psd))
inv_asd[0] = 0
inv_asd[N/2] = 0
q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \
dtype=real_same_precision_as(psd))
if low_frequency_cutoff:
kmin = int(low_frequency_cutoff / psd.delta_f)
inv_asd[0:kmin] = 0
ifft(inv_asd, q)
trunc_start = max_filter_len / 2
trunc_end = N - max_filter_len / 2
if trunc_method == 'hann':
trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype)
q[0:trunc_start] *= trunc_window[max_filter_len/2:max_filter_len]
q[trunc_end:N] *= trunc_window[0:max_filter_len/2]
q[trunc_start:trunc_end] = 0
psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \
dtype=complex_same_precision_as(psd))
fft(q, psd_trunc)
psd_trunc *= psd_trunc.conj()
psd_out = 1. / abs(psd_trunc)
return psd_out
示例8: matched_filter_core
def matched_filter_core(template, data, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, h_norm=None, out=None, corr_out=None):
""" Return the complex snr and normalization.
Return the complex snr, along with its associated normalization of the template,
matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : {FrequencySeries}, optional
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
h_norm : {None, float}, optional
The template normalization. If none, this value is calculated internally.
out : {None, Array}, optional
An array to use as memory for snr storage. If None, memory is allocated
internally.
corr_out : {None, Array}, optional
An array to use as memory for correlation storage. If None, memory is allocated
internally. If provided, management of the vector is handled externally by the
caller. No zero'ing is done internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
corrrelation: FrequencySeries
A frequency series containing the correlation vector.
norm : float
The normalization of the complex snr.
"""
if corr_out is not None:
_qtilde = corr_out
else:
global _qtilde_t
_qtilde = _qtilde_t
htilde = make_frequency_series(template)
stilde = make_frequency_series(data)
if len(htilde) != len(stilde):
raise ValueError("Length of template and data must match")
N = (len(stilde)-1) * 2
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff, stilde.delta_f, N)
if out is None:
_q = zeros(N, dtype=complex_same_precision_as(data))
elif (len(out) == N) and type(out) is Array and out.kind =='complex':
_q = out
else:
raise TypeError('Invalid Output Vector: wrong length or dtype')
if corr_out:
pass
elif (_qtilde is None) or (len(_qtilde) != N) or _qtilde.dtype != data.dtype:
_qtilde_t = _qtilde = zeros(N, dtype=complex_same_precision_as(data))
else:
_qtilde.clear()
correlate(htilde[kmin:kmax], stilde[kmin:kmax], _qtilde[kmin:kmax])
if psd is not None:
if isinstance(psd, FrequencySeries):
if psd.delta_f == stilde.delta_f :
_qtilde[kmin:kmax] /= psd[kmin:kmax]
else:
raise TypeError("PSD delta_f does not match data")
else:
raise TypeError("PSD must be a FrequencySeries")
ifft(_qtilde, _q)
if h_norm is None:
h_norm = sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff)
norm = (4.0 * stilde.delta_f) / sqrt( h_norm)
delta_t = 1.0 / (N * stilde.delta_f)
return (TimeSeries(_q, epoch=stilde._epoch, delta_t=delta_t, copy=False),
FrequencySeries(_qtilde, epoch=stilde._epoch, delta_f=htilde.delta_f, copy=False),
norm)
示例9: heirarchical_matched_filter_and_cluster
def heirarchical_matched_filter_and_cluster(self, segnum, template_norm, window):
""" Return the complex snr and normalization.
Calculated the matched filter, threshold, and cluster.
Parameters
----------
segnum : int
Index into the list of segments at MatchedFilterControl construction
template_norm : float
The htilde, template normalization factor.
window : int
Size of the window over which to cluster triggers, in samples
Returns
-------
snr : TimeSeries
A time series containing the complex snr at the reduced sample rate.
norm : float
The normalization of the complex snr.
corrrelation: FrequencySeries
A frequency series containing the correlation vector.
idx : Array
List of indices of the triggers.
snrv : Array
The snr values at the trigger locations.
"""
from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose
htilde = self.htilde
stilde = self.segments[segnum]
norm = (4.0 * stilde.delta_f) / sqrt(template_norm)
correlate(htilde[self.kmin_red:self.kmax_red],
stilde[self.kmin_red:self.kmax_red],
self.corr_mem[self.kmin_red:self.kmax_red])
ifft(self.corr_mem, self.snr_mem)
if not hasattr(stilde, 'red_analyze'):
stilde.red_analyze = \
slice(stilde.analyze.start/self.downsample_factor,
stilde.analyze.stop/self.downsample_factor)
idx_red, snrv_red = events.threshold(self.snr_mem[stilde.red_analyze],
self.snr_threshold / norm * self.upsample_threshold)
if len(idx_red) == 0:
return [], None, [], [], []
idx_red, _ = events.cluster_reduce(idx_red, snrv_red, window / self.downsample_factor)
logging.info("%s points above threshold at reduced resolution"\
%(str(len(idx_red)),))
# The fancy upsampling is here
if self.upsample_method=='pruned_fft':
idx = (idx_red + stilde.analyze.start/self.downsample_factor)\
* self.downsample_factor
idx = smear(idx, self.downsample_factor)
# cache transposed versions of htilde and stilde
if not hasattr(self.corr_mem_full, 'transposed'):
self.corr_mem_full.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
if not hasattr(htilde, 'transposed'):
htilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
htilde.transposed[self.kmin_full:self.kmax_full] = htilde[self.kmin_full:self.kmax_full]
htilde.transposed = fft_transpose(htilde.transposed)
if not hasattr(stilde, 'transposed'):
stilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
stilde.transposed[self.kmin_full:self.kmax_full] = stilde[self.kmin_full:self.kmax_full]
stilde.transposed = fft_transpose(stilde.transposed)
correlate(htilde.transposed, stilde.transposed, self.corr_mem_full.transposed)
snrv = pruned_c2cifft(self.corr_mem_full.transposed, self.inter_vec, idx, pretransposed=True)
idx = idx - stilde.analyze.start
idx2, snrv = events.threshold(Array(snrv, copy=False), self.snr_threshold / norm)
if len(idx2) > 0:
correlate(htilde[self.kmax_red:self.kmax_full],
stilde[self.kmax_red:self.kmax_full],
self.corr_mem_full[self.kmax_red:self.kmax_full])
idx, snrv = events.cluster_reduce(idx[idx2], snrv, window)
else:
idx, snrv = [], []
logging.info("%s points at full rate and clustering" % len(idx))
return self.snr_mem, norm, self.corr_mem_full, idx, snrv
else:
raise ValueError("Invalid upsample method")
示例10: qtransform
def qtransform(fseries, Q, f0):
"""Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
Returns
-------
norm_energy: '~pycbc.types.aligned.ArrayWithAligned'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data.
cenergy: '~pycbc.types.aligned.ArrayWithAligned'
A 'TimeSeries' of the complex energy from the Q-transform of
this tile against the data.
"""
# q-transform data for each (Q, frequency) tile
# initialize parameters
qprime = Q / 11**(1/2.) # ... self.qprime
dur = fseries.duration
# check for sampling rate
sampling = fseries.sample_rate
# window fft
window_size = 2 * int(f0 / qprime * dur) + 1
# get start and end indices
start = int((f0 - (f0 / qprime)) * dur)
end = int(start + window_size)
# apply window to fft
# normalize and generate bi-square window
norm = np.sqrt(315. * qprime / (128. * f0))
windowed = fseries[start:end].numpy() * bisquare(window_size) * norm
# choice of output sampling rate
output_sampling = sampling # Can lower this to highest bandwidth
output_samples = int(dur * output_sampling)
# pad data, move negative frequencies to the end, and IFFT
padded = np.pad(windowed, padding(window_size, output_samples), mode='constant')
wenergy = npfft.ifftshift(padded)
# return a 'TimeSeries'
wenergy = FrequencySeries(wenergy, delta_f=1./dur)
cenergy = TimeSeries(zeros(output_samples, dtype=np.complex128),
delta_t=1./sampling)
ifft(wenergy, cenergy)
energy = cenergy.squared_norm()
medianenergy = np.median(energy.numpy())
norm_energy = energy / float(medianenergy)
return norm_energy, cenergy
示例11: get_fd_waveform
# Plot a time domain and fourier domain waveform together in the time domain.
# Note that without special cleanup the Fourier domain waveform will exhibit
# the Gibb's phenomenon. (http://en.wikipedia.org/wiki/Gibbs_phenomenon)
import pylab
from pycbc import types, fft, waveform
# Get a time domain waveform
hp, hc = waveform.get_td_waveform(approximant="EOBNRv2",
mass1=6, mass2=6, delta_t=1.0/4096, f_lower=40)
# Get a frequency domain waveform
sptilde, sctilde = waveform. get_fd_waveform(approximant="TaylorF2",
mass1=6, mass2=6, delta_f=1.0/4, f_lower=40)
# FFT it to the time-domain
tlen = int(1.0 / hp.delta_t / sptilde.delta_f)
sptilde.resize(tlen/2 + 1)
sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t)
fft.ifft(sptilde, sp)
pylab.plot(sp.sample_times, sp, label="TaylorF2 (IFFT)")
pylab.plot(hp.sample_times, hp, label="EOBNRv2")
pylab.ylabel('Strain')
pylab.xlabel('Time (s)')
pylab.legend()
pylab.show()