本文整理匯總了Python中numpy.delete方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.delete方法的具體用法?Python numpy.delete怎麽用?Python numpy.delete使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.delete方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: draw_heatmap
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def draw_heatmap(img, heatmap, alpha=0.5):
"""Draw a heatmap overlay over an image."""
assert len(heatmap.shape) == 2 or \
(len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
assert img.dtype in [np.uint8, np.int32, np.int64]
assert heatmap.dtype in [np.float32, np.float64]
if img.shape[0:2] != heatmap.shape[0:2]:
heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
heatmap_rs = ia.imresize_single_image(
heatmap_rs[..., np.newaxis],
img.shape[0:2],
interpolation="nearest"
)
heatmap = np.squeeze(heatmap_rs) / 255.0
cmap = plt.get_cmap('jet')
heatmap_cmapped = cmap(heatmap)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
heatmap_cmapped = heatmap_cmapped * 255
mix = (1-alpha) * img + alpha * heatmap_cmapped
mix = np.clip(mix, 0, 255).astype(np.uint8)
return mix
示例2: jacobian
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def jacobian(self, params, into=None):
params = flattest(params)
n = len(params)
ii = np.arange(n)
(rs,cs,zs) = ([],[],[])
for ((mn,mx), f) in self.pieces_with_default:
if len(ii) == 0: break
k = np.where((params >= mn) & (params <= mx))[0]
if len(k) == 0: continue
kk = ii[k]
j = f.jacobian(params[k])
if j.shape[0] == 1 and j.shape[1] > 1: j = repmat(j, j.shape[1], 1)
(rj,cj,vj) = sps.find(j)
rs.append(kk[rj])
cs.append(kk[cj])
zs.append(vj)
ii = np.delete(ii, k)
params = np.delete(params, k)
(rs,cs,zs) = [np.concatenate(us) if len(us) > 0 else [] for us in (rs,cs,zs)]
dz = sps.csr_matrix((zs, (rs,cs)), shape=(n,n))
return safe_into(into, dz)
示例3: sacrificeStarCbyT
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def sacrificeStarCbyT(self, sInds, t_dets, fZ, fEZ, WA, overheadTime):
"""Sacrifice the worst performing CbyT star
Args:
sInds[nStars] - indicies of stars in the list
t_dets[nStars] - time to observe each star (in days)
fZ[nStars] - zodiacal light for each target
fEZ - 0
WA - inner working angle of the instrument
overheadTime - overheadTime added to each observation
Return:
sInds[nStars] - indicies of stars in the list
t_dets[nStars] - time to observe each star (in days)
sacrificedStarTime - time to distribute in days
"""
CbyT = self.Completeness.comp_per_intTime(t_dets*u.d, self.TargetList, sInds, self.valfZmin[sInds], fEZ, WA, self.mode, self.Cb[sInds], self.Csp[sInds])/t_dets#takes 5 seconds to do 1 time for all stars
sacrificeIndex = np.argmin(CbyT)#finds index of star to sacrifice
#Need index of sacrificed star by this point
sacrificedStarTime = t_dets[sacrificeIndex] + overheadTime#saves time being sacrificed
sInds = np.delete(sInds,sacrificeIndex)
t_dets = np.delete(t_dets,sacrificeIndex)
return sInds, t_dets, sacrificedStarTime
示例4: create_vertex_groups
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
'''Creates vertex groups and sets weights. "groups" is a list of strings
for the names of the groups. "weights" is a list of weights corresponding
to the strings. Each vertex is assigned a weight for each vertex group to
avoid calling vertex weights that are not assigned. If the groups are
already present, the previous weights will be preserved. To reset weights
delete the created groups'''
if ob is None:
ob = bpy.context.object
vg = ob.vertex_groups
for g in range(0, len(groups)):
if groups[g] not in vg.keys(): # Don't create groups if there are already there
vg.new(groups[g])
vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
else:
vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups.
示例5: create_sew_edges
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def create_sew_edges():
bpy.ops.mesh.bridge_edge_loops()
bpy.ops.mesh.delete(type='ONLY_FACE')
return
#highlight a sew edge
#compare vertex counts
#subdivide to match counts
#distribute and smooth back into mesh
#create sew lines
# sewing functions ---------------->>>
示例6: test_pruneFeatureMap_ShouldPruneRightParams
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def test_pruneFeatureMap_ShouldPruneRightParams(self):
dropped_index = 0
output = self.module(self.input)
torch.autograd.backward(output, self.upstream_gradient)
old_weight_size = self.module.weight.size()
old_bias_size = self.module.bias.size()
old_out_channels = self.module.out_channels
old_weight_values = self.module.weight.data.cpu().numpy()
# ensure that the chosen index is dropped
self.module.prune_feature_map(dropped_index)
# check bias size
self.assertEqual(self.module.bias.size()[0], (old_bias_size[0]-1))
# check output channels
self.assertEqual(self.module.out_channels, old_out_channels-1)
_, *other_old_weight_sizes = old_weight_size
# check weight size
self.assertEqual(self.module.weight.size(), (old_weight_size[0]-1, *other_old_weight_sizes))
# check weight value
expected = np.delete(old_weight_values, dropped_index , 0)
self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected))
示例7: test_PLinearDropInputs_ShouldDropRightParams
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def test_PLinearDropInputs_ShouldDropRightParams(self):
dropped_index = 0
# assume input is 2x2x2, 2 layers of 2x2
input_shape = (2, 2, 2)
module = pnn.PLinear(8, 10)
old_num_features = module.in_features
old_weight = module.weight.data.cpu().numpy()
resized_old_weight = np.resize(old_weight, (module.out_features, *input_shape))
module.drop_inputs(input_shape, dropped_index)
new_shape = module.weight.size()
# ensure that the chosen index is dropped
expected_weight = np.resize(np.delete(resized_old_weight, dropped_index, 1), new_shape)
output = module.weight.data.cpu().numpy()
self.assertTrue(np.array_equal(output, expected_weight))
# ensure num features is reduced
self.assertTrue(module.in_features, old_num_features-1)
示例8: test_PBatchNorm2dDropInputChannel_ShouldDropRightParams
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def test_PBatchNorm2dDropInputChannel_ShouldDropRightParams(self):
dropped_index = 0
module = pnn.PBatchNorm2d(2)
old_num_features = module.num_features
old_bias = module.bias.data.cpu().numpy()
old_weight = module.weight.data.cpu().numpy()
module.drop_input_channel(dropped_index)
# ensure that the chosen index is dropped
expected_weight = np.delete(old_weight, dropped_index, 0)
self.assertTrue(np.array_equal(module.weight.data.cpu().numpy(), expected_weight))
expected_bias = np.delete(old_bias, dropped_index, 0)
self.assertTrue(np.array_equal(module.bias.data.cpu().numpy(), expected_bias))
# ensure num features is reduced
self.assertTrue(module.num_features, old_num_features-1)
示例9: _correct_misaligned
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def _correct_misaligned(misaligned_idcs, peaks):
corrected_peaks = peaks.copy()
misaligned_idcs = np.array(misaligned_idcs)
# Make sure to not generate negative indices, or indices that exceed
# the total number of peaks. prev_peaks and next_peaks must have the
# same number of elements.
valid_idcs = np.logical_and(
misaligned_idcs > 1, misaligned_idcs < len(corrected_peaks) - 1 # pylint: disable=E1111
)
misaligned_idcs = misaligned_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]
half_ibi = (next_peaks - prev_peaks) / 2
peaks_interp = prev_peaks + half_ibi
# Shift the R-peaks from the old to the new position.
corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
corrected_peaks = np.concatenate((corrected_peaks, peaks_interp)).astype(int)
corrected_peaks.sort(kind="mergesort")
return corrected_peaks
示例10: _rsp_findpeaks_biosppy
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def _rsp_findpeaks_biosppy(rsp_cleaned, sampling_rate):
extrema = _rsp_findpeaks_extrema(rsp_cleaned)
extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0)
peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)
# Apply minimum period outlier-criterion (exclude inter-breath-intervals
# that produce breathing rate larger than 35 breaths per minute.
outlier_idcs = np.where((np.diff(peaks) / sampling_rate) < 1.7)[0]
peaks = np.delete(peaks, outlier_idcs)
troughs = np.delete(troughs, outlier_idcs)
info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
return info
示例11: _rsp_findpeaks_outliers
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0.3):
# Only consider those extrema that have a minimum vertical distance to
# their direct neighbor, i.e., define outliers in absolute amplitude
# difference between neighboring extrema.
vertical_diff = np.abs(np.diff(rsp_cleaned[extrema]))
median_diff = np.median(vertical_diff)
min_diff = np.where(vertical_diff > (median_diff * amplitude_min))[0]
extrema = extrema[min_diff]
# Make sure that the alternation of peaks and troughs is unbroken. If
# alternation of sign in extdiffs is broken, remove the extrema that
# cause the breaks.
amplitudes = rsp_cleaned[extrema]
extdiffs = np.sign(np.diff(amplitudes))
extdiffs = np.add(extdiffs[0:-1], extdiffs[1:])
removeext = np.where(extdiffs != 0)[0] + 1
extrema = np.delete(extrema, removeext)
amplitudes = np.delete(amplitudes, removeext)
return extrema, amplitudes
示例12: nms_crnr_dist
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def nms_crnr_dist(boxes, conf, overlap_threshold):
I = np.argsort(conf)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
scores = []
for ind in I[:-1]:
scores.append(bbox_corner_dist_measure(boxes[i,:], boxes[ind, :]))
I = np.delete(I, np.concatenate(([last-1], np.where(np.array(scores)>overlap_threshold)[0])))
return pick
示例13: compute_max_min
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def compute_max_min(self, ske_joints):
max_vals, min_vals = list(), list()
for ske_joint in ske_joints:
zero_row = []
if self.dataset == 'NTU':
for i in range(len(ske_joint)):
if (ske_joint[i, :] == np.zeros((1, 150))).all():
zero_row.append(i)
ske_joint = np.delete(ske_joint, zero_row, axis=0)
if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75), axis=1)
elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
ske_joint = np.delete(ske_joint, range(75, 150), axis=1)
max_val = ske_joint.max()
min_val = ske_joint.min()
max_vals.append(float(max_val))
min_vals.append(float(min_val))
max_vals, min_vals = np.array(max_vals), np.array(min_vals)
return max_vals.max(), min_vals.min()
開發者ID:microsoft,項目名稱:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代碼行數:23,代碼來源:data_cnn.py
示例14: test_line_area_nan_series
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
示例15: deleterowcol
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import delete [as 別名]
def deleterowcol(A, delrow, delcol):
"""Assumes that matrix is in symmetric csc form !"""
m = A.shape[0]
keep = np.delete(np.arange(0, m), delrow)
A = A[keep, :]
keep = np.delete(np.arange(0, m), delcol)
A = A[:, keep]
return A