本文整理汇总了Python中scipy.signal.gaussian方法的典型用法代码示例。如果您正苦于以下问题:Python signal.gaussian方法的具体用法?Python signal.gaussian怎么用?Python signal.gaussian使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.signal
的用法示例。
在下文中一共展示了signal.gaussian方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def preprocess(data, smoothing_sd=25, n_pcs=20):
"""
Preprocess neural data for cca analysis with smoothing and pca
:param data: array of shape (n_samples, n_features)
:type data: array-like
:param smoothing_sd: gaussian smoothing kernel standard deviation (ms)
:type smoothing_sd: float
:param n_pcs: number of pca dimensions to retain
:type n_pcs: int
:return: preprocessed neural data
:rtype: array-like, shape (n_samples, pca_dims)
"""
if smoothing_sd > 0:
data = _smooth(data, sd=smoothing_sd)
if n_pcs > 0:
data = _pca(data, n_pcs=n_pcs)
return data
示例2: test_smooth_1d
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def test_smooth_1d():
for edge in ['m', 'c']:
for N in [20,21]:
# values in [9.0,11.0]
x = rand(N) + 10
mn = 9.0
mx = 11.0
for M in range(18,27):
print("1d", edge, "N=%i, M=%i" %(N,M))
xsm = smooth(x, gaussian(M,2.0), edge=edge)
assert len(xsm) == N
# (N,1) case
xsm2 = smooth(x[:,None], gaussian(M,2.0)[:,None], edge=edge)
assert np.allclose(xsm, xsm2[:,0], atol=1e-14, rtol=1e-12)
# Smoothed signal should not go to zero if edge effects are handled
# properly. Also assert proper normalization (i.e. smoothed signal
# is "in the middle" of the noisy original data).
assert xsm.min() >= mn
assert xsm.max() <= mx
assert mn <= xsm[0] <= mx
assert mn <= xsm[-1] <= mx
# convolution with delta peak produces same data exactly
assert np.allclose(smooth(x, np.array([0.0,1,0]), edge=edge),x, atol=1e-14,
rtol=1e-12)
示例3: test_smooth_nd
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def test_smooth_nd():
for edge in ['m', 'c']:
a = rand(20, 2, 3) + 10
for M in [5, 20, 123]:
print("nd", edge, "M=%i" %M)
kern = gaussian(M, 2.0)
asm = smooth(a, kern[:,None,None], axis=0, edge=edge)
assert asm.shape == a.shape
for jj in range(asm.shape[1]):
for kk in range(asm.shape[2]):
assert np.allclose(asm[:,jj,kk], smooth(a[:,jj,kk], kern,
edge=edge))
mn = a[:,jj,kk].min()
mx = a[:,jj,kk].max()
smn = asm[:,jj,kk].min()
smx = asm[:,jj,kk].max()
assert smn >= mn, "min: data=%f, smooth=%f" %(mn, smn)
assert smx <= mx, "max: data=%f, smooth=%f" %(mx, smx)
示例4: smooth_dir_map
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def smooth_dir_map(dir_map,sigma=2.0,mask = None):
cos2Theta = np.cos(dir_map * 2)
sin2Theta = np.sin(dir_map * 2)
if mask is not None:
assert (dir_map.shape[0] == mask.shape[0])
assert (dir_map.shape[1] == mask.shape[1])
cos2Theta[mask == 0] = 0
sin2Theta[mask == 0] = 0
cos2Theta = gaussian(cos2Theta, sigma, multichannel=False, mode='reflect')
sin2Theta = gaussian(sin2Theta, sigma, multichannel=False, mode='reflect')
dir_map = np.arctan2(sin2Theta,cos2Theta)*0.5
return dir_map
示例5: smooth1d
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def smooth1d(array, window_size=None, kernel='gaussian'):
"""Apply a centered window smoothing to a 1D array.
Parameters
----------
array : ndarray
the array to apply the smoothing to
window_size : int
the size of the smoothing window
kernel : str
the type of smoothing (`gaussian`, `mean`)
Returns
-------
the smoothed array (same dim as input)
"""
# some defaults
if window_size is None:
if len(array) >= 9:
window_size = 9
elif len(array) >= 7:
window_size = 7
elif len(array) >= 5:
window_size = 5
elif len(array) >= 3:
window_size = 3
if window_size % 2 == 0:
raise ValueError('Window should be an odd number.')
if isinstance(kernel, str):
if kernel == 'gaussian':
kernel = gaussian(window_size, 1)
elif kernel == 'mean':
kernel = np.ones(window_size)
else:
raise NotImplementedError('Kernel: ' + kernel)
kernel = kernel / np.asarray(kernel).sum()
return filters.convolve1d(array, kernel, mode='mirror')
示例6: _smooth
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def _smooth(data, sd):
from scipy.signal import gaussian
from scipy.signal import convolve
n_bins = data.shape[0]
w = n_bins - 1 if n_bins % 2 == 0 else n_bins
window = gaussian(w, std=sd)
for j in range(data.shape[1]):
data[:, j] = convolve(data[:, j], window, mode='same', method='auto')
return data
示例7: velocity_smoothed
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def velocity_smoothed(pos, freq, smooth_size=0.03):
"""
Compute wheel velocity from uniformly sampled wheel data
Parameters
----------
pos : array_like
Array of wheel positions
smooth_size : float
Size of Gaussian smoothing window in seconds
freq : float
Sampling frequency of the data
Returns
-------
vel : np.ndarray
Array of velocity values
acc : np.ndarray
Array of acceleration values
"""
# Define our smoothing window with an area of 1 so the units won't be changed
stdSamps = np.round(smooth_size * freq) # Standard deviation relative to sampling frequency
N = stdSamps * 6 # Number of points in the Gaussian
gauss_std = (N - 1) / 6 # @fixme magic number everywhere!
win = gaussian(N, gauss_std)
win = win / win.sum() # Normalize amplitude
# Convolve and multiply by sampling frequency to restore original units
vel = np.insert(convolve(np.diff(pos), win, mode='same'), 0, 0) * freq
acc = np.insert(convolve(np.diff(vel), win, mode='same'), 0, 0) * freq
return vel, acc
示例8: __init__
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def __init__(
self,
action_space=(0, 1, 2, 3),
time_threshold=5,
pips_threshold=10,
pips_scale=1e-4,
kernel_size=5,
kernel_stddev=1
):
"""
Args:
action_space: actions to advice: 0 - hold, 1- buy, 2 - sell, 3 - close
time_threshold: how many points (in number of ENVIRONMENT timesteps) on each side to use
for the comparison to consider comparator(n, n+x) to be True
pips_threshold: int, minimal peaks difference in pips
to consider comparator(n, n+x) to be True
pips_scale: actual single pip value wrt signal value
kernel_size: gaussian convolution kernel size (used to compute distribution over actions)
kernel_stddev: gaussian kernel standard deviation
"""
self.action_space = action_space
self.time_threshold = time_threshold
self.value_threshold = pips_threshold * pips_scale
self.kernel_size = kernel_size
self.kernel = signal.gaussian(kernel_size, std=kernel_stddev)
self.data = None
示例9: fit
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def fit(self, episode_data, resampling_factor=1):
"""
Estimates `advised` actions probabilities distribution based on data received.
Args:
episode_data: 1D np.array of unscaled price values in OHL[CV] format
resampling_factor: factor by which to resample given data
by taking min/max values inside every resampled bar
Returns:
Np.array of size [resampled_data_size, actions_space_size] of probabilities of advised actions, where
resampled_data_size = int(len(episode_data) / resampling_factor) + 1/0
"""
# Vector of advised actions:
data = self.resample_data(episode_data, resampling_factor)
signals = self.estimate_actions(data)
signals = self.adjust_signals(signals)
# One-hot actions encoding:
actions_one_hot = np.zeros([signals.shape[0], len(self.action_space)])
actions_one_hot[np.arange(signals.shape[0]), signals] = 1
# Want a bit relaxed discrete distribution over actions instead of one hot (heuristic):
actions_distr = np.zeros(actions_one_hot.shape)
# For all actions except 'hold' (due to heuristic skewness):
actions_distr[:, 0] = actions_one_hot[:, 0]
# ...spread out actions probabilities by convolving with gaussian kernel :
for channel in range(1, actions_one_hot.shape[-1]):
actions_distr[:, channel] = np.convolve(actions_one_hot[:, channel], self.kernel, mode='same') + 0.1
# Normalize:
actions_distr /= actions_distr.sum(axis=-1)[..., None]
return actions_distr
示例10: lorentz
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def lorentz(M, std=1.0, sym=True):
r"""Lorentz window (same as Cauchy function). Function skeleton stolen from
scipy.signal.gaussian().
The Lorentz function is
.. math::
L(x) = \frac{\Gamma}{(x-x_0)^2 + \Gamma^2}
Here :math:`x_0 = 0` and `std` = :math:`\Gamma`.
Some definitions use :math:`1/2\,\Gamma` instead of :math:`\Gamma`, but
without 1/2 we get comparable peak width to Gaussians when using this
window in convolutions, thus ``scipy.signal.gaussian(M, std=5)`` is similar
to ``lorentz(M, std=5)``.
Parameters
----------
M : int
number of points
std : float
spread parameter :math:`\Gamma`
sym : bool
Returns
-------
w : (M,)
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,dtype=float)
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = std / (n**2.0 + std**2.0)
w /= w.max()
if not sym and not odd:
w = w[:-1]
return w
示例11: _generate_noise_temporal_task
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def _generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
):
"""Generate the signal dependent noise
Create noise specific to the signal, for instance there is variability
in how the signal manifests on each event
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
motion_noise : str
What type of noise will you generate? Can be gaussian or rician
Returns
----------
noise_task : one dimensional array, float
Generates the temporal task noise timecourse
"""
# Make the noise to be added
stimfunction_tr = stimfunction_tr != 0
if motion_noise == 'gaussian':
noise = stimfunction_tr * np.random.normal(0, 1,
size=stimfunction_tr.shape)
elif motion_noise == 'rician':
noise = stimfunction_tr * stats.rice.rvs(0, 1,
size=stimfunction_tr.shape)
noise_task = stimfunction_tr + noise
# Normalize
noise_task = stats.zscore(noise_task).flatten()
return noise_task
示例12: isogkern
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def isogkern(kernlen, std):
gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1)
gkern2d = np.outer(gkern1d, gkern1d)
gkern2d = gkern2d/np.sum(gkern2d)
return gkern2d
示例13: anisogkern
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def anisogkern(kernlen, std1, std2, angle):
gkern1d_1 = signal.gaussian(kernlen, std=std1).reshape(kernlen, 1)
gkern1d_2 = signal.gaussian(kernlen, std=std2).reshape(kernlen, 1)
gkern2d = np.outer(gkern1d_1, gkern1d_2)
gkern2d = gkern2d/np.sum(gkern2d)
return gkern2d
示例14: DUF_downsample
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def DUF_downsample(x, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code
Args:
x (Tensor, [B, T, C, H, W]): frames to be downsampled.
scale (int): downsampling factor: 2 | 3 | 4.
"""
assert scale in [2, 3, 4], 'Scale [{}] is not supported'.format(scale)
def gkern(kernlen=13, nsig=1.6):
import scipy.ndimage.filters as fi
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen // 2, kernlen // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
B, T, C, H, W = x.size()
x = x.view(-1, 1, H, W)
pad_w, pad_h = 6 + scale * 2, 6 + scale * 2 # 6 is the pad of the gaussian filter
r_h, r_w = 0, 0
if scale == 3:
r_h = 3 - (H % 3)
r_w = 3 - (W % 3)
x = F.pad(x, [pad_w, pad_w + r_w, pad_h, pad_h + r_h], 'reflect')
gaussian_filter = torch.from_numpy(gkern(13, 0.4 * scale)).type_as(x).unsqueeze(0).unsqueeze(0)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(B, T, C, x.size(2), x.size(3))
return x
####################
# metric
####################
示例15: compute_gaussian_krnl
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import gaussian [as 别名]
def compute_gaussian_krnl(M):
"""Creates a gaussian kernel following Serra's paper."""
g = signal.gaussian(M, M / 3., sym=True)
G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))
G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]
G[:M // 2, M // 1:] = -G[:M // 2, M // 1:]
return G