本文整理匯總了Python中numpy.logical_and方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.logical_and方法的具體用法?Python numpy.logical_and怎麽用?Python numpy.logical_and使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.logical_and方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: dataframe_select
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def dataframe_select(df, *cols, **filters):
'''
dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
contain only the rows whose cells match the given values.
dataframe_select(df, col1, col2...) selects the given columns.
dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both.
If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
between the values. If value is a tuple/list of more than 2 elements or is a set of any length
then it is a list of values, any one of which can match the cell.
'''
ii = np.ones(len(df), dtype='bool')
for (k,v) in six.iteritems(filters):
vals = df[k].values
if pimms.is_set(v): jj = np.isin(vals, list(v))
elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1])
elif pimms.is_vector(v): jj = np.isin(vals, list(v))
else: jj = (vals == v)
ii = np.logical_and(ii, jj)
if len(ii) != np.sum(ii): df = df.loc[ii]
if len(cols) > 0: df = df[list(cols)]
return df
示例2: _update_labels
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _update_labels(self, label, crop_box, height, width):
"""Convert labels according to crop box"""
xmin = float(crop_box[0]) / width
ymin = float(crop_box[1]) / height
w = float(crop_box[2]) / width
h = float(crop_box[3]) / height
out = label.copy()
out[:, (1, 3)] -= xmin
out[:, (2, 4)] -= ymin
out[:, (1, 3)] /= w
out[:, (2, 4)] /= h
out[:, 1:5] = np.maximum(0, out[:, 1:5])
out[:, 1:5] = np.minimum(1, out[:, 1:5])
coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:])
valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2])
valid = np.logical_and(valid, coverage > self.min_eject_coverage)
valid = np.where(valid)[0]
if valid.size < 1:
return None
out = out[valid, :]
return out
示例3: _parse_label
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _parse_label(self, label):
"""Helper function to parse object detection label.
Format for raw label:
n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
where n is the width of header, 2 or larger
k is the width of each object annotation, can be arbitrary, at least 5
"""
if isinstance(label, nd.NDArray):
label = label.asnumpy()
raw = label.ravel()
if raw.size < 7:
raise RuntimeError("Label shape is invalid: " + str(raw.shape))
header_width = int(raw[0])
obj_width = int(raw[1])
if (raw.size - header_width) % obj_width != 0:
msg = "Label shape %s inconsistent with annotation width %d." \
%(str(raw.shape), obj_width)
raise RuntimeError(msg)
out = np.reshape(raw[header_width:], (-1, obj_width))
# remove bad ground-truths
valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
if valid.size < 1:
raise RuntimeError('Encounter sample with no valid label.')
return out[valid, :]
示例4: get_hardness_distribution
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def get_hardness_distribution(gtG, max_dist, min_dist, rng, trials, bins, nodes,
n_ori, step_size):
heuristic_fn = lambda node_ids, node_id: \
heuristic_fn_vec(nodes[node_ids, :], nodes[[node_id], :], n_ori, step_size)
num_nodes = gtG.num_vertices()
gt_dists = []; h_dists = [];
for i in range(trials):
end_node_id = rng.choice(num_nodes)
gt_dist = gt.topology.shortest_distance(gt.GraphView(gtG, reversed=True),
source=gtG.vertex(end_node_id),
target=None, max_dist=max_dist)
gt_dist = np.array(gt_dist.get_array())
ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0]
gt_dist = gt_dist[ind]
h_dist = heuristic_fn(ind, end_node_id)[:,0]
gt_dists.append(gt_dist)
h_dists.append(h_dist)
gt_dists = np.concatenate(gt_dists)
h_dists = np.concatenate(h_dists)
hardness = 1. - h_dists*1./gt_dists
hist, _ = np.histogram(hardness, bins)
hist = hist.astype(np.float64)
hist = hist / np.sum(hist)
return hist
示例5: test_subsample_all_examples
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def test_subsample_all_examples(self):
numpy_labels = np.random.permutation(300)
indicator = tf.constant(np.ones(300) == 1)
numpy_labels = (numpy_labels - 200) > 0
labels = tf.constant(numpy_labels)
sampler = (balanced_positive_negative_sampler.
BalancedPositiveNegativeSampler())
is_sampled = sampler.subsample(indicator, 64, labels)
with self.test_session() as sess:
is_sampled = sess.run(is_sampled)
self.assertTrue(sum(is_sampled) == 64)
self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 32)
self.assertTrue(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled)) == 32)
示例6: is_earthlike
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def is_earthlike(self, plan_inds, sInd):
"""
Is the planet earthlike?
"""
TL = self.TargetList
SU = self.SimulatedUniverse
PPop = self.PlanetPopulation
# extract planet and star properties
Rp_plan = SU.Rp[plan_inds].value
L_star = TL.L[sInd]
if PPop.scaleOrbits:
a_plan = (SU.a[plan_inds]/np.sqrt(L_star)).value
else:
a_plan = (SU.a[plan_inds]).value
# Definition: planet radius (in earth radii) and solar-equivalent luminosity must be
# between the given bounds.
Rp_plan_lo = 0.80/np.sqrt(a_plan)
# We use the numpy versions so that plan_ind can be a numpy vector.
return np.logical_and(
np.logical_and(Rp_plan >= Rp_plan_lo, Rp_plan <= 1.4),
np.logical_and(a_plan >= 0.95, a_plan <= 1.67))
示例7: test_subsample_selection
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def test_subsample_selection(self):
# Test random sampling when only some examples can be sampled:
# 100 samples, 20 positives, 10 positives cannot be sampled
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 90
indicator = tf.constant(numpy_indicator)
numpy_labels = (numpy_labels - 80) >= 0
labels = tf.constant(numpy_labels)
sampler = (balanced_positive_negative_sampler.
BalancedPositiveNegativeSampler())
is_sampled = sampler.subsample(indicator, 64, labels)
with self.test_session() as sess:
is_sampled = sess.run(is_sampled)
self.assertTrue(sum(is_sampled) == 64)
self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 10)
self.assertTrue(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled)) == 54)
self.assertAllEqual(is_sampled, np.logical_and(is_sampled,
numpy_indicator))
示例8: _do
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _do(self, problem, X, **kwargs):
n_parents, n_matings, n_var = X.shape
_X = np.full((self.n_offsprings, n_matings, problem.n_var), False)
for k in range(n_matings):
p1, p2 = X[0, k], X[1, k]
both_are_true = np.logical_and(p1, p2)
_X[0, k, both_are_true] = True
n_remaining = problem.n_max - np.sum(both_are_true)
I = np.where(np.logical_xor(p1, p2))[0]
S = I[np.random.permutation(len(I))][:n_remaining]
_X[0, k, S] = True
return _X
示例9: _mandelbrot
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _mandelbrot(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2), iterations=25, threshold=4):
img, c = _mandelbrot_initialize(size=size, real_range=real_range, imaginary_range=imaginary_range)
optim = _mandelbrot_optimize(c)
z = np.copy(c)
for i in range(1, iterations + 1): # pylint: disable=W0612
# Continue only where smaller than threshold
mask = (z * z.conjugate()).real < threshold
mask = np.logical_and(mask, optim)
if np.all(~mask) is True:
break
# Increase
img[mask] += 1
# Iterate based on Mandelbrot equation
z[mask] = z[mask] ** 2 + c[mask]
# Fill otpimized area
img[~optim] = np.max(img)
return img
示例10: _mandelbrot_optimize
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _mandelbrot_optimize(c):
# Optimizations: most of the mset points lie within the
# within the cardioid or in the period-2 bulb. (The two most
# prominant shapes in the mandelbrot set. We can eliminate these
# from our search straight away and save alot of time.
# see: http://en.wikipedia.org/wiki/Mandelbrot_set#Optimizations
# First eliminate points within the cardioid
p = (((c.real - 0.25) ** 2) + (c.imag ** 2)) ** 0.5
mask1 = c.real > p - (2 * p ** 2) + 0.25
# Next eliminate points within the period-2 bulb
mask2 = ((c.real + 1) ** 2) + (c.imag ** 2) > 0.0625
# Combine masks
mask = np.logical_and(mask1, mask2)
return mask
示例11: _correct_missed
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _correct_missed(missed_idcs, peaks):
corrected_peaks = peaks.copy()
missed_idcs = np.array(missed_idcs)
# Calculate the position(s) of new beat(s). Make sure to not generate
# negative indices. prev_peaks and next_peaks must have the same
# number of elements.
valid_idcs = np.logical_and(missed_idcs > 1, missed_idcs < len(corrected_peaks)) # pylint: disable=E1111
missed_idcs = missed_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in missed_idcs]]
next_peaks = corrected_peaks[missed_idcs]
added_peaks = prev_peaks + (next_peaks - prev_peaks) / 2
# Add the new peaks before the missed indices (see numpy docs).
corrected_peaks = np.insert(corrected_peaks, missed_idcs, added_peaks)
return corrected_peaks
示例12: _correct_misaligned
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def _correct_misaligned(misaligned_idcs, peaks):
corrected_peaks = peaks.copy()
misaligned_idcs = np.array(misaligned_idcs)
# Make sure to not generate negative indices, or indices that exceed
# the total number of peaks. prev_peaks and next_peaks must have the
# same number of elements.
valid_idcs = np.logical_and(
misaligned_idcs > 1, misaligned_idcs < len(corrected_peaks) - 1 # pylint: disable=E1111
)
misaligned_idcs = misaligned_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]
half_ibi = (next_peaks - prev_peaks) / 2
peaks_interp = prev_peaks + half_ibi
# Shift the R-peaks from the old to the new position.
corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
corrected_peaks = np.concatenate((corrected_peaks, peaks_interp)).astype(int)
corrected_peaks.sort(kind="mergesort")
return corrected_peaks
示例13: pyeeg_ap_entropy
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def pyeeg_ap_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
# Probability that random M-sequences are in range
Cm = InRange.mean(axis=0)
# M+1-sequences in range if M-sequences are in range & last values are close
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
示例14: pyeeg_samp_entropy
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def pyeeg_samp_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)[:-1]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
np.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
return Samp_En
# =============================================================================
# Entropy
# =============================================================================
示例15: create_pixel_plots
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import logical_and [as 別名]
def create_pixel_plots(candidate_path, reference_path, base_name,
last_band_alpha=False, limits=None, custom_alpha=None):
c_ds, c_alpha, c_band_count = _open_image_and_get_info(
candidate_path, last_band_alpha)
r_ds, r_alpha, r_band_count = _open_image_and_get_info(
reference_path, last_band_alpha)
_assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)
if custom_alpha != None:
combined_alpha = custom_alpha
else:
combined_alpha = numpy.logical_and(c_alpha, r_alpha)
valid_pixels = numpy.nonzero(combined_alpha)
for band_no in range(1, c_band_count + 1):
c_band = gimage.read_single_band(c_ds, band_no)
r_band = gimage.read_single_band(r_ds, band_no)
file_name = '{}_{}.png'.format(base_name, band_no)
display.plot_pixels(file_name, c_band[valid_pixels],
r_band[valid_pixels], limits)