本文整理汇总了Python中numpy.intersect1d方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.intersect1d方法的具体用法?Python numpy.intersect1d怎么用?Python numpy.intersect1d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.intersect1d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main_sequence_filter
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def main_sequence_filter(self):
"""Removes stars from Target List which are not main sequence
"""
# indices from Target List to keep
i1 = np.where((self.BV < 0.74) & (self.MV < 6*self.BV + 1.8))[0]
i2 = np.where((self.BV >= 0.74) & (self.BV < 1.37) & \
(self.MV < 4.3*self.BV + 3.05))[0]
i3 = np.where((self.BV >= 1.37) & (self.MV < 18*self.BV - 15.7))[0]
i4 = np.where((self.BV < 0.87) & (self.MV > -8*(self.BV - 1.35)**2 + 7.01))[0]
i5 = np.where((self.BV >= 0.87) & (self.BV < 1.45) & \
(self.MV < 5*self.BV + 0.81))[0]
i6 = np.where((self.BV >= 1.45) & (self.MV > 18*self.BV - 18.04))[0]
ia = np.append(np.append(i1, i2), i3)
ib = np.append(np.append(i4, i5), i6)
i = np.intersect1d(np.unique(ia), np.unique(ib))
self.revise_lists(i)
示例2: voc2012_split
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def voc2012_split(dataset_dir='data/datasets/VOCdevkit/VOC2012/', split_ratios=[0.7, 0.2, 0.1]):
images_dir = os.path.join(dataset_dir, 'JPEGImages/')
labels_dir = os.path.join(dataset_dir, 'SegmentationClass/')
image_filenames = [filename.split('.')[0] for filename in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, filename)) and filename.endswith('.jpg')]
label_filenames = [filename.split('.')[0] for filename in os.listdir(labels_dir) if os.path.isfile(os.path.join(labels_dir, filename)) and filename.endswith('.png')]
dataset_filenames = np.intersect1d(image_filenames, label_filenames)
train_dataset_filename = os.path.join(dataset_dir, 'train.txt')
valid_dataset_filename = os.path.join(dataset_dir, 'val.txt')
test_dataset_filename = os.path.join(dataset_dir, 'test.txt')
try:
train_val_test_split(
dataset_filenames=dataset_filenames,
split_ratios=split_ratios,
train_dataset_filename=train_dataset_filename,
valid_dataset_filename=valid_dataset_filename,
test_dataset_filename=test_dataset_filename)
except BaseException:
raise Exception('Dataset split failed.')
return train_dataset_filename, valid_dataset_filename, test_dataset_filename
示例3: nonin_osc_strength
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def nonin_osc_strength(self):
from scipy.sparse import spmatrix
""" Computes the non-interacting oscillator strengths and energies """
x,y,z = map(spmatrix.toarray, self.dipole_coo())
i2d = array((x,y,z))
n = self.mo_occ.shape[-1]
p = zeros((len(comega)), dtype=np.complex128) # result to accumulate
for s in range(self.nspin):
o,e,cc = self.mo_occ[0,s],self.mo_energy[0,s],self.mo_coeff[0,s,:,:,0]
oo1,ee1 = np.subtract.outer(o,o).reshape(n*n), np.subtract.outer(e,e).reshape(n*n)
idx = unravel_index( np.intersect1d(where(oo1<0.0), where(ee1<eemax)), (n,n))
ivrt,iocc = array(list(set(idx[0]))), array(list(set(idx[1])))
voi2d = einsum('nia,ma->nmi', einsum('iab,nb->nia', i2d, cc[ivrt]), cc[iocc])
t2osc = 2.0/3.0*einsum('voi,voi->vo', voi2d, voi2d)
t2w = np.subtract.outer(e[ivrt],e[iocc])
t2o = -np.subtract.outer(o[ivrt],o[iocc])
for iw,w in enumerate(comega):
p[iw] += 0.5*(t2osc*((t2o/(w-t2w))-(t2o/(w+t2w)))).sum()
return p
示例4: _ecg_rsa_cycles
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def _ecg_rsa_cycles(signals):
"""Extract respiratory cycles."""
inspiration_onsets = np.intersect1d(
np.where(signals["RSP_Phase"] == 1)[0], np.where(signals["RSP_Phase_Completion"] == 0)[0], assume_unique=True
)
expiration_onsets = np.intersect1d(
np.where(signals["RSP_Phase"] == 0)[0], np.where(signals["RSP_Phase_Completion"] == 0)[0], assume_unique=True
)
cycles_length = np.diff(inspiration_onsets)
return {
"RSP_Inspiration_Onsets": inspiration_onsets,
"RSP_Expiration_Onsets": expiration_onsets,
"RSP_Cycles_Length": cycles_length,
}
示例5: get_partition_from_splits
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def get_partition_from_splits(self, splits):
num_splits = len(splits)
splits_per_partition = np.int(np.ceil(np.log2(self.num_classes)))
num_failed = 0
while True:
which_splits = np.random.choice(num_splits, splits_per_partition, replace=False)
splits_for_this_partition = [splits[i] for i in which_splits]
partition = defaultdict(list)
num_big_enough_classes = 0
for i_class, above_or_belows in enumerate(product([0, 1], repeat=splits_per_partition)):
zones = [splits_for_this_partition[i][above_or_belows[i]] for i in range(splits_per_partition)]
indices = reduce(np.intersect1d, zones)
if len(indices) >= self.num_samples_per_class:
num_big_enough_classes += 1
partition[i_class].extend(indices.tolist())
if num_big_enough_classes >= self.num_classes:
break
else:
num_failed += 1
return partition, num_failed
示例6: compute_miou
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def compute_miou(coords, preds, targets, weights):
coords, preds, targets, weights = filter_points(coords, preds, targets, weights)
seen_classes = np.unique(targets)
mask = np.zeros(CONF.NUM_CLASSES)
mask[seen_classes] = 1
pointmiou = np.zeros(CONF.NUM_CLASSES)
voxmiou = np.zeros(CONF.NUM_CLASSES)
uvidx, uvlabel, _ = point_cloud_label_to_surface_voxel_label_fast(coords, np.concatenate((np.expand_dims(targets,1),np.expand_dims(preds,1)),axis=1), res=0.02)
for l in seen_classes:
target_label = np.arange(targets.shape[0])[targets==l]
pred_label = np.arange(preds.shape[0])[preds==l]
num_intersection_label = np.intersect1d(pred_label, target_label).shape[0]
num_union_label = np.union1d(pred_label, target_label).shape[0]
pointmiou[l] = num_intersection_label / (num_union_label + 1e-8)
target_label_vox = uvidx[(uvlabel[:, 0] == l)]
pred_label_vox = uvidx[(uvlabel[:, 1] == l)]
num_intersection_label_vox = np.intersect1d(pred_label_vox, target_label_vox).shape[0]
num_union_label_vox = np.union1d(pred_label_vox, target_label_vox).shape[0]
voxmiou[l] = num_intersection_label_vox / (num_union_label_vox + 1e-8)
return pointmiou, voxmiou, mask
示例7: find_colors
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def find_colors(ref_id, ref_flux, ref_ivar):
# Find colors
print("Finding colors")
a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
ref_id_col = np.intersect1d(all_ids, ref_id)
inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
all_col, all_col_ivar = get_colors(
DATA_DIR + "/lamost_catalog_colors.fits")
col = all_col[:,inds]
col_ivar = all_col_ivar[:,inds]
bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
col_ivar[bad_ivar] = 0.0
bad_flux = np.logical_or(np.isnan(col), col==np.inf)
col[bad_flux] = 1.0
col_ivar[bad_flux] = 0.0
# add them to the wl, flux and ivar arrays
inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
ref_flux_col = np.hstack((ref_flux[inds], col.T))
ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
return ref_id_col, ref_flux_col, ref_ivar_col
示例8: find_colors
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def find_colors(ref_id, ref_flux, ref_ivar):
# Find colors
DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age"
print("Finding colors")
a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
ref_id_col = np.intersect1d(all_ids, ref_id)
inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
all_id, all_col, all_col_ivar = get_colors(
DATA_DIR + "/lamost_catalog_colors.fits")
col = all_col[:,inds]
col_ivar = all_col_ivar[:,inds]
bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
col_ivar[bad_ivar] = 0.0
bad_flux = np.logical_or(np.isnan(col), col==np.inf)
col[bad_flux] = 1.0
col_ivar[bad_flux] = 0.0
# add them to the wl, flux and ivar arrays
inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
ref_flux_col = np.hstack((ref_flux[inds], col.T))
ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
return ref_id_col, ref_flux_col, ref_ivar_col
示例9: filtertraindata
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def filtertraindata(self):
datapath = get_data_path('coco')
train_list = tuple(open(datapath + 'annotations/train2014.txt', 'r'))
val_list = tuple(open(datapath + 'annotations/val2014.txt', 'r'))
total_list = ['/train2014/'+id_.rstrip() for id_ in train_list] + ['/val2014/'+id_.rstrip() for id_ in val_list]
annotation_path = os.path.join(datapath, 'seg_mask')
aug_list = []
for filename in total_list:
lbl_path = annotation_path + filename + '.png'
lbl = Image.open(lbl_path).convert('P')
lbl = np.array(lbl, dtype=np.int32)
if np.sum(pascal_map[lbl] != 0) > 1000 and np.intersect1d(np.unique(lbl),pascal_classes).any():
aug_list.append(filename)
val_aug_list = random.sample(aug_list, 1500)
train_aug_list = list(set(aug_list) - set(val_aug_list))
with open(os.path.join(datapath, 'annotations', 'train_aug.txt'), 'w') as txtfile:
[txtfile.write(file + '\n') for file in train_aug_list]
with open(os.path.join(datapath, 'annotations', 'val.txt'), 'w') as txtfile:
[txtfile.write(file + '\n') for file in val_aug_list]
示例10: test_shuffle_kfold
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
示例11: test_stratified_shuffle_split_overlap_train_test_bug
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
示例12: test_stratified_shuffle_split_multilabel
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def test_stratified_shuffle_split_multilabel():
# fix for issue 9037
for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
# correct stratification of entire rows
# (by design, here y[:, 0] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 0])
assert_equal(expected_ratio, np.mean(y_train[:, 0]))
assert_equal(expected_ratio, np.mean(y_test[:, 0]))
示例13: intersect_sim
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def intersect_sim(array_1, array_2):
"""Calculate the simiarity of two arrays
by using intersection / union
"""
sim = float(np.intersect1d(array_1, array_2).size) / \
float(np.union1d(array_1, array_2).size)
return sim
示例14: fit_pRF_radius
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def fit_pRF_radius(ctx, retinotopy=Ellipsis, mask=None, weight=Ellipsis, slope_only=False):
'''
fit_pRF_radius(ctx) fits a line, m*eccen + b, to the pRF radius and yields the tuple (m, b).
The following options may be given:
* retinotopy (default: Ellipsis) specifies the prefix for the retinotopy (passed to
retinotopy_data() to find the retinotopic dataset).
* mask (default: None) specifies the mask over which to perform the calculation. This is
passed to the to_mask() function. In the case that mask is a set or frozenset, then it is
treated as a conjunction (intersection) of masks.
* weight (default: None) specifies that a weight should be used; if this is True or Ellipsis,
will use the variance_explained if it is part of the retinotopy dataset; if this is False or
None, uses no weight; otherwise, this must be a weight property or property name.
* slope_only (default: False) may be set to True to instead fit radius = m*eccen and return
only m.
'''
rdat = retinotopy_data(ctx, retinotopy)
if 'radius' not in rdat: raise ValueError('No pRF radius found in dataset %s' % retinotopy)
rad = rdat['radius']
(ang,ecc) = as_retinotopy(rdat, 'visual')
if isinstance(mask, (set, frozenset)):
mask = reduce(np.intersect1d, [ctx.mask(m, indices=True) for m in mask])
else: mask = ctx.mask(mask, indices=True)
# get a weight if provided:
if weight in [False, None]: wgt = np.ones(rad.shape)
elif weight in [True, Ellipsis]:
if 'variance_explained' in rdat: wgt = rdat['variance_explained']
else: wgt = np.ones(rad.shape)
else: wgt = ctx.property(weight)
# get the relevant eccen and radius values
(ecc,rad,wgt) = [x[mask] for x in (ecc,rad,wgt)]
# fit a line...
if slope_only:
ecc = np.reshape(ecc * wgt, (len(ecc), 1))
rad = np.reshape(rad * wgt, (len(rad), 1))
return np.linalg.lstsq(ecc, rad)[0]
else:
return tuple(np.polyfit(ecc, rad, 1, w=wgt))
示例15: choose_next_target
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import intersect1d [as 别名]
def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):
"""Choose next target at random
Args:
old_sInd (integer):
Index of the previous target star
sInds (integer array):
Indices of available targets
slewTimes (astropy quantity array):
slew times to all stars (must be indexed by sInds)
intTimes (astropy Quantity array):
Integration times for detection in units of day
Returns:
sInd (integer):
Index of next target star
"""
TL = self.TargetList
# cast sInds to array
sInds = np.array(sInds, ndmin=1, copy=False)
occ_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]
n_sInds = np.intersect1d(sInds, occ_sInds)
# pick one
if len(n_sInds) == 0:
sInd = np.random.choice(sInds)
else:
sInd = np.random.choice(n_sInds)
return sInd, slewTimes[sInd]