本文整理汇总了Python中scipy.ndimage.gaussian_filter1d方法的典型用法代码示例。如果您正苦于以下问题:Python ndimage.gaussian_filter1d方法的具体用法?Python ndimage.gaussian_filter1d怎么用?Python ndimage.gaussian_filter1d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.ndimage
的用法示例。
在下文中一共展示了ndimage.gaussian_filter1d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: openpose2motion
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def openpose2motion(json_dir, scale=1.0, smooth=True, max_frame=None):
json_files = sorted(os.listdir(json_dir))
length = max_frame if max_frame is not None else len(json_files) // 8 * 8
json_files = json_files[:length]
json_files = [os.path.join(json_dir, x) for x in json_files]
motion = []
for path in json_files:
with open(path) as f:
jointDict = json.load(f)
joint = np.array(jointDict['people'][0]['pose_keypoints_2d']).reshape((-1, 3))[:15, :2]
if len(motion) > 0:
joint[np.where(joint == 0)] = motion[-1][np.where(joint == 0)]
motion.append(joint)
for i in range(len(motion) - 1, 0, -1):
motion[i - 1][np.where(motion[i - 1] == 0)] = motion[i][np.where(motion[i - 1] == 0)]
motion = np.stack(motion, axis=2)
if smooth:
motion = gaussian_filter1d(motion, sigma=2, axis=-1)
motion = motion * scale
return motion
示例2: find_jumps
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def find_jumps(self,ds, threshold = 40000):
self._prepare_find_jumps()
ds = self._hf[ds]
ds = gaussian_filter1d(ds,2)
offset=ds[0]
jpnh = 0
for i in xrange(ds.shape[0]-3):
#i +=3
#df=(((ds[i+1]+ds[i+2]+ds[i+3])/3.)-ds[i])
#df=(ds[i] - ((ds[i-1]+ds[i-2]+ds[i-3])/3.))
df=((ds[i+1])-ds[i])
if (abs(df)>threshold):
self.qps_jpn_nr.append(1.)
offset = offset-df
jpnh = df
#print df, offset
self.qps_jpn_hight.append(abs(float(jpnh)))
self.qps_jpn_spec.append(float(ds[i]+offset))
jpnh = df
else:
self.qps_jpn_nr.append(0.)
#self.qps_jpn_hight.append(float(jpnh))
self.qps_jpn_spec.append(float(ds[i]+offset))
示例3: find_jumps2
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def find_jumps2(self,ds,threshold=30000):
self._prepare_find_jumps()
ds = self._hf[ds]
offset=ds[0]
# first we remove a bit of noise
#flt = gaussian_filter1d(ds,10)
flt = median_filter(ds,size=10)
#flt = ds
# the sobel filter finds the "jumps"
sb=sobel(flt)
for i in sb:
self.qps_jpn_hight.append(float(i))
for i in flt: self.qps_jpn_spec.append(float(i))
"""
for i in xrange(flt.shape[0]-1):
if(abs(sb[i])>threshold):
offset -= sb[i]
self.qps_jpn_spec.append(float(flt[i]-offset))
else:
self.qps_jpn_spec.append(float(flt[i]-offset))
"""
#for i in sb
示例4: set_prefilter
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def set_prefilter(self,gaussian = False, median = False, params = []):
self._do_prefilter_data = False
if gaussian or median:
self._do_prefilter_data = True
#print gaussian, median
if median:
self._prefilter = median_filter
#print("median_filter")
if params:
self._prefilter_params = params[0]
else:
self._prefilter_params = 6
if gaussian:
self._prefilter = gaussian_filter1d
#print("gaussian_filter1d")
if params:
self._prefilter_params = params[0]
else:
self._prefilter_params = 6 # 0.4
示例5: _compute_hist
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def _compute_hist (data, x_scale="linear", smooth_sigma=2, nbins=200):
# Count each categories in log or linear space
min = np.nanmin(data)
max = np.nanmax(data)
if x_scale == "log":
count_y, bins = np.histogram (a=data, bins=np.logspace (np.log10(min), np.log10(max)+0.1, nbins))
elif x_scale == "linear":
count_y, bins = np.histogram (a=data, bins= np.linspace (min, max, nbins))
# Remove last bin from labels
count_x = bins[1:]
# Smooth results with a gaussian filter
if smooth_sigma:
count_y = gaussian_filter1d (count_y, sigma=smooth_sigma)
# Convert to python list
count_x = [float(i) for i in count_x]
count_y = [float(i) for i in count_y]
return (count_x, count_y)
示例6: load_zstack
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def load_zstack(self):
name = QtGui.QFileDialog.getOpenFileName(
self, "Open zstack", filter="*.tif"
)
self.fname = name[0]
try:
self.zstack = imread(self.fname)
self.zLy, self.zLx = self.zstack.shape[1:]
self.Zedit.setValidator(QtGui.QIntValidator(0, self.zstack.shape[0]))
self.zrange = [np.percentile(self.zstack,1), np.percentile(self.zstack,99)]
self.computeZ.setEnabled(True)
self.zloaded = True
self.zbox.setEnabled(True)
self.zbox.setChecked(True)
if 'zcorr' in self.ops[0]:
if self.zstack.shape[0]==self.ops[0]['zcorr'].shape[0]:
zcorr = self.ops[0]['zcorr']
self.zmax = np.argmax(gaussian_filter1d(zcorr.T.copy(), 2, axis=1), axis=1)
self.plot_zcorr()
except Exception as e:
print('ERROR: %s'%e)
示例7: find_firsts
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def find_firsts(curve):
curve = gaussian_filter1d(curve, STD)
# fig, ax = plt.subplots(1)
# ax.plot(curve)
# ax.plot(curve)
# ax.set_ylim([0, 8000])
# plt.savefig('plots/junk_vis/test_smoothing.png', bbox_inches='tight', dpi=300)
# plt.close()
firsts = []
for target in TARGETS:
idxs = np.sort(np.asarray(curve > target).nonzero()[0])
if idxs.size == 0:
firsts.append(-1)
else:
firsts.append(idxs[0])
return firsts
示例8: amplitude_cutoff
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3):
""" Calculate approximate fraction of spikes missing from a distribution of amplitudes
Assumes the amplitude histogram is symmetric (not valid in the presence of drift)
Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Input:
------
amplitudes : numpy.ndarray
Array of amplitudes (don't need to be in physical units)
Output:
-------
fraction_missing : float
Fraction of missing spikes (0-0.5)
If more than 50% of spikes are missing, an accurate estimate isn't possible
"""
h, b = np.histogram(amplitudes, num_histogram_bins, density=True)
pdf = gaussian_filter1d(h, histogram_smoothing_value)
support = b[:-1]
peak_index = np.argmax(pdf)
G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index
bin_size = np.mean(np.diff(support))
fraction_missing = np.sum(pdf[G:]) * bin_size
fraction_missing = np.min([fraction_missing, 0.5])
return fraction_missing
示例9: test_orders_gauss
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0)
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4
示例10: test_orders_gauss
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1)
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0))
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3))
assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
示例11: test_multiple_modes_sequentially
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.minimum_filter(arr, size=5, mode=modes))
示例12: test_gaussian_truncate
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
示例13: split_traces
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def split_traces(self,ds,threshold=30000):
self._prepare_find_jumps()
ds = self._hf[ds]
# first we remove a bit of noise, size is the number of averages
#flt = gaussian_filter1d(ds,10)
flt = median_filter(ds,size=3)
#flt = ds
# the sobel filter finds the "jumps"
sb=sobel(flt)
for i in sb:
self.qps_jpn_hight.append(float(i))
#for i in flt: self.qps_jpn_spec.append(float(i))
offset=ds[0]
tr_num = 0
tr_name = "qps_tr_"+str(tr_num)
tr_obj = self._hf.add_value_vector(tr_name,
folder = 'analysis',
x = self._x_co,
unit = 'Hz')
keepout = 4
for i,tr in enumerate(flt):
keepout += 1
if abs(sb[i])>threshold and keepout>3:
keepout = 0
# new trace
tr_num +=1
tr_name = "qps_tr_"+str(tr_num)
tr_obj = self._hf.add_value_vector(tr_name,
folder = 'analysis',
x = self._x_co,
unit = 'Hz')
print tr , i
#tr_obj.append(float(tr))
else:
if keepout>2:
tr_obj.append(float(tr-offset))
示例14: __init__
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def __init__(self, seq_type, root_dir, data_list, cache_path=None, step_size=10, window_size=1000,
random_shift=0, transform=None, **kwargs):
super(HeadingDataset, self).__init__()
self.seq_type = seq_type
self.feature_dim = seq_type.feature_dim
self.target_dim = seq_type.target_dim
self.aux_dim = seq_type.aux_dim
self.window_size = window_size
self.step_size = step_size
self.random_shift = random_shift
self.transform = transform
self.data_path = [osp.join(root_dir, data) for data in data_list]
self.index_map = []
self.features, self.targets, self.velocities = load_cached_sequences(
seq_type, root_dir, data_list, cache_path, **kwargs)
# Optionally smooth the sequence
feat_sigma = kwargs.get('feature_sigma,', -1)
targ_sigma = kwargs.get('target_sigma,', -1)
if feat_sigma > 0:
self.features = [gaussian_filter1d(feat, sigma=feat_sigma, axis=0) for feat in self.features]
if targ_sigma > 0:
self.targets = [gaussian_filter1d(targ, sigma=targ_sigma, axis=0) for targ in self.targets]
max_norm = kwargs.get('max_velocity_norm', 3.0)
for i in range(len(data_list)):
self.features[i] = self.features[i][:-1]
self.targets[i] = self.targets[i][:-1]
self.velocities[i] = self.velocities[i]
velocity = np.linalg.norm(self.velocities[i], axis=1) # Remove outlier ground truth data
bad_data = velocity > max_norm
for j in range(window_size + random_shift, self.targets[i].shape[0], step_size):
if not bad_data[j - window_size - random_shift:j + random_shift].any():
self.index_map.append([i, j])
if kwargs.get('shuffle', True):
random.shuffle(self.index_map)
示例15: __init__
# 需要导入模块: from scipy import ndimage [as 别名]
# 或者: from scipy.ndimage import gaussian_filter1d [as 别名]
def __init__(self, seq_type, root_dir, data_list, cache_path=None, step_size=10, window_size=200,
random_shift=0, transform=None, **kwargs):
super().__init__()
self.feature_dim = seq_type.feature_dim
self.target_dim = seq_type.target_dim
self.aux_dim = seq_type.aux_dim
self.window_size = window_size
self.step_size = step_size
self.random_shift = random_shift
self.transform = transform
self.data_path = [osp.join(root_dir, data) for data in data_list]
self.index_map = []
self.ts, self.orientations, self.gt_pos = [], [], []
self.features, self.targets, aux = load_cached_sequences(
seq_type, root_dir, data_list, cache_path, interval=1, **kwargs)
# Optionally smooth the sequence
feat_sigma = kwargs.get('feature_sigma,', -1)
targ_sigma = kwargs.get('target_sigma,', -1)
if feat_sigma > 0:
self.features = [gaussian_filter1d(feat, sigma=feat_sigma, axis=0) for feat in self.features]
if targ_sigma > 0:
self.targets = [gaussian_filter1d(targ, sigma=targ_sigma, axis=0) for targ in self.targets]
for i in range(len(data_list)):
self.ts.append(aux[i][:, 0])
self.orientations.append(aux[i][:, 1:5])
self.gt_pos.append(aux[i][:, -3:])
self.index_map += [[i, j] for j in range(window_size, self.targets[i].shape[0], step_size)]
if kwargs.get('shuffle', True):
random.shuffle(self.index_map)