本文整理汇总了Python中numpy.in1d函数的典型用法代码示例。如果您正苦于以下问题:Python in1d函数的具体用法?Python in1d怎么用?Python in1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了in1d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_destination_pathline_data
def get_destination_pathline_data(self, dest_cells):
"""Get pathline data for set of destination cells.
Parameters
----------
dest_cells : list or array of tuples
(k, i, j) of each destination cell (zero-based)
Returns
-------
pthldest : np.recarray
Slice of pathline data array (e.g. PathlineFile._data)
containing only pathlines with final k,i,j in dest_cells.
"""
ra = self._data.view(np.recarray)
# find the intersection of endpoints and dest_cells
# convert dest_cells to same dtype for comparison
raslice = ra[['k', 'i', 'j']]
dest_cells = np.array(dest_cells, dtype=raslice.dtype)
inds = np.in1d(raslice, dest_cells)
epdest = ra[inds].copy().view(np.recarray)
# use particle ids to get the rest of the paths
inds = np.in1d(ra.particleid, epdest.particleid)
pthldes = ra[inds].copy()
pthldes.sort(order=['particleid', 'time'])
return pthldes
示例2: test_match_mask
def test_match_mask():
msk = np.array([ True, False, True, False, False], dtype=bool)
idx = np.array([0, 2])
arr = np.array([1,2,3,4,5])
values = np.array([1,3])
assert (num.match_mask(arr, values) == msk).all()
ret = num.match_mask(arr, values, fullout=True)
assert (ret[0] == msk).all()
assert (ret[1] == idx).all()
assert (arr[msk] == np.array([1, 3])).all()
assert (ret[0] == np.in1d(arr, values)).all()
# handle cases where len(values) > len(arr) and values not contained in arr
values = np.array([1,3,3,3,7,9,-3,-4,-5])
ret = num.match_mask(arr, values, fullout=True)
assert (ret[0] == msk).all()
assert (ret[1] == idx).all()
assert (ret[0] == np.in1d(arr, values)).all()
# float values: use eps
ret = num.match_mask(arr+0.1, values, fullout=True, eps=0.2)
assert (ret[0] == msk).all()
assert (ret[1] == idx).all()
msk = num.match_mask(np.array([1,2]), np.array([3,4]))
assert (msk == np.array([False]*2)).all()
示例3: generateBatch
def generateBatch(curinds, elements, atomArraysAll, nAtomsDict,
atomsIndsReverse, atomArraysAllDerivs):
"""This method generates batches from a large dataset using a set of
selected indices curinds."""
# inputs:
atomArraysFinal = {}
atomArraysDerivsFinal = {}
for element in elements:
validKeys = np.in1d(atomsIndsReverse[element], curinds)
if len(validKeys) > 0:
atomArraysFinal[element] = atomArraysAll[element][validKeys]
if len(atomArraysAllDerivs[element]) > 0:
atomArraysDerivsFinal[element] = atomArraysAllDerivs[
element][validKeys, :, :, :]
else:
atomArraysDerivsFinal[element] = []
else:
atomArraysFinal[element] = []
atomArraysDerivsFinal[element] = []
atomInds = {}
for element in elements:
validKeys = np.in1d(atomsIndsReverse[element], curinds)
if len(validKeys) > 0:
atomIndsTemp = np.sum(atomsIndsReverse[element][validKeys], 1)
atomInds[element] = atomIndsTemp * 0.
for i in range(len(curinds)):
atomInds[element][atomIndsTemp == curinds[i]] = i
else:
atomInds[element] = []
return atomArraysFinal, atomArraysDerivsFinal, atomInds
示例4: Check_Result
def Check_Result(self, Str_DataName, Int_DataNum, List_PeakIdx):
Array_MyAnswer = np.array(List_PeakIdx)
Array_MyAnswer = np.unique(Array_MyAnswer)
Array_Anno = self.Load_Answer(Str_DataName, Int_DataNum)
Int_TP = 0
Int_FP = 0
Int_FN = 0
Int_BufferSize = 2
for myanswer in Array_MyAnswer:
Array_BufferMyAnswer = range(myanswer-Int_BufferSize, myanswer + Int_BufferSize)
Array_BufferMyAnswer = np.array(Array_BufferMyAnswer)
Array_InorNOT = np.in1d(Array_BufferMyAnswer, Array_Anno)
if True in Array_InorNOT:
Int_TP += 1
elif True not in Array_InorNOT:
Int_FP += 1
for trueanswer in Array_Anno:
Array_BufferMyAnswer = range(trueanswer - Int_BufferSize, trueanswer + Int_BufferSize)
Array_BufferMyAnswer = np.array(Array_BufferMyAnswer)
Array_InorNOT = np.in1d(Array_BufferMyAnswer, Array_MyAnswer)
if True not in Array_InorNOT:
Int_FN += 1
Flt_Se = float(Int_TP) / float(Int_TP + Int_FN)
Flt_PP = float(Int_TP) / float(Int_TP + Int_FP)
return Str_DataName, Int_DataNum, Flt_Se, Flt_PP
示例5: _do_one_inner_iteration
def _do_one_inner_iteration(self, inv_val):
r"""
Determine which throats are invaded at a given applied capillary
pressure.
"""
# Generate a tlist containing boolean values for throat state
Tinvaded = self['throat.entry_pressure'] <= inv_val
# Find all pores that can be invaded at specified pressure
[pclusters, tclusters] = self._net.find_clusters2(mask=Tinvaded,
t_labels=True)
if self._AL:
# Identify clusters connected to invasion sites
inv_clusters = sp.unique(pclusters[self['pore.inlets']])
else:
# All clusters are invasion sites
inv_clusters = pclusters
inv_clusters = inv_clusters[inv_clusters >= 0]
# Find pores on the invading clusters
pmask = np.in1d(pclusters, inv_clusters)
# Store current applied pressure in newly invaded pores
pinds = (self['pore.inv_Pc'] == sp.inf) * (pmask)
self['pore.inv_Pc'][pinds] = inv_val
# Find throats on the invading clusters
tmask = np.in1d(tclusters, inv_clusters)
# Store current applied pressure in newly invaded throats
tinds = (self['throat.inv_Pc'] == sp.inf) * (tmask)
self['throat.inv_Pc'][tinds] = inv_val
# Store total network saturation
tsat = sp.sum(self._net['throat.volume'][self['throat.inv_Pc'] <= inv_val])
psat = sp.sum(self._net['pore.volume'][self['pore.inv_Pc'] <= inv_val])
total = sp.sum(self._net['throat.volume']) + sp.sum(self._net['pore.volume'])
self['pore.inv_sat'][pinds] = (tsat + psat)/total
self['throat.inv_sat'][tinds] = (tsat + psat)/total
示例6: Pred_EOF_CCA
def Pred_EOF_CCA(self):
'''
预报模块,需要进一步完善,有很多内容需要进一步深入
'''
I_Year = self.I_Year
I_YearP = self.I_YearP
print('I_Year=',I_Year)
print('I_YearP=',I_YearP)
#print(self.Field[:,0,0])
#print(self.FieldP[:,0,0])
#sys.exit(0)
Region = self.Region[:,np.in1d(I_Year,I_YearP)]
print('I_YearR=',I_Year[np.in1d(I_Year,I_YearP)])
FieldP = self.FieldP[:,self.p_np3] #等于过滤后的场文件
FieldP = FieldP.T
FieldP2 = FieldP[:,np.in1d(I_YearP,I_Year)]
print(FieldP2.shape,np.atleast_2d(FieldP[:,-1]).T.shape)
print('FieldP.shape = ',FieldP.shape)
print('FieldP2.shape = ',FieldP2.shape)
print('Region.shape = ',Region.shape)
self.X_Pre = dclim.dpre_eof_cca(FieldP2,Region,np.atleast_2d(FieldP[:,-1]).T,4)
print(self.X_Pre.shape)
self.out = np.hstack((self.StaLatLon,self.X_Pre))
print('Pred Year is ',I_YearP[-1])
np.savetxt('out.txt',self.out,fmt='%5d %7.2f %7.2f %7.2f',delimiter=' ')
示例7: AM_vector_strength
def AM_vector_strength(spikeTimestamps, eventOnsetTimes, behavData, timeRange):
currentFreq = behavData['currentFreq']
possibleFreq = np.unique(currentFreq)
vs_array=np.array([])
ral_array=np.array([])
pval_array = np.array([])
timeRange = [0, 0.5]
spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(
spikeTimestamps, eventOnsetTimes, timeRange)
for freq in possibleFreq:
select = np.flatnonzero(currentFreq==freq)
selectspikes = spikeTimesFromEventOnset[np.in1d(trialIndexForEachSpike, select)]
selectinds = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, select)]
squeezedinds=np.array([list(np.unique(selectinds)).index(x) for x in selectinds])
spikesAfterFirstCycle = selectspikes[selectspikes>(1.0/freq)]
indsAfterFirstCycle = selectinds[selectspikes>(1.0/freq)]
strength, phase = vectorstrength(spikesAfterFirstCycle, 1.0/freq)
vs_array=np.concatenate((vs_array, np.array([strength])))
#Compute the pval for the vector strength
radsPerSec=freq*2*np.pi
spikeRads = (spikesAfterFirstCycle*radsPerSec)%(2*np.pi)
ral_test = circstats.rayleigh_test(spikeRads)
pval = np.array([ral_test['pvalue']])
ral =np.array([2*len(spikesAfterFirstCycle)*(strength**2)])
pval_array = np.concatenate((pval_array, pval))
ral_array = np.concatenate((ral_array, ral))
return vs_array, pval_array, ral_array
示例8: sim_top_doc
def sim_top_doc(self, topic_or_topics, weights=[], filter_words=[],
print_len=10, as_strings=True, label_fn=_def_label_fn_,
filter_nan=True):
"""
"""
d_arr = _sim_top_doc_(self.corpus, self.model.doc_top, topic_or_topics,
self.model.context_type, weights=weights,
norms=self._doc_norms, print_len=print_len,
as_strings=False, label_fn=label_fn,
filter_nan=filter_nan)
topics = _res_top_type_(topic_or_topics)
if len(filter_words) > 0:
white = set()
for w in filter_words:
l = self.word_topics(w, as_strings=False)
d = l['i'][np.in1d(l['value'], topics)]
white.update(d)
d_arr = d_arr[(np.in1d(d_arr['i'], white))]
if as_strings:
md = self.corpus.view_metadata(self.model.context_type)
docs = label_fn(md)
d_arr = _map_strarr_(d_arr, docs, k='i', new_k='doc')
return d_arr
示例9: compute_mAP
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = torch.IntTensor(len(index)).zero_()
if good_index.size==0: # if empty
cmc[0] = -1
return ap,cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere(mask==True)
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0/ngood
precision = (i+1)*1.0/(rows_good[i]+1)
if rows_good[i]!=0:
old_precision = i*1.0/rows_good[i]
else:
old_precision=1.0
ap = ap + d_recall*(old_precision + precision)/2
return ap, cmc
示例10: find_matches
def find_matches(mock, obs, opts):
"""
Function to find matching galaxy members between mock haloes
and observed clusters.
"""
obs = obs[np.in1d(obs.mem_id, mock.m_mem_id, assume_unique = True)]
mock = mock[np.in1d(mock.m_mem_id, obs.mem_id, assume_unique = True)]
merged = np.lib.recfunctions.merge_arrays([obs, mock], flatten = True,
usemask = False)
clusters = []
count = 0
for id_val in np.unique(obs.id):
clusters.append(Clusterx(count))
for member in merged[obs.id == id_val]:
clusters[count].add_mem(member)
count += 1
for cluster in clusters:
cluster.props()
cluster.halo_count()
cluster.mass_hist(opts.mass_bin)
return clusters
示例11: map_to_external_reference
def map_to_external_reference(self, roi, refname='HXB2', in_patient=True):
'''
return a map of positions in the patient to a reference genomewide
Args:
roi -- region of interest given as a string or a tuple (start, end)
refname -- reference to compare to
in_patient -- specifies whether the (start, end) refers to reference or patient coordinates
returns:
a (len(roi), 3) array with reference coordinates in first column,
patient coordinates in second
roi coordinates in third column
'''
from .filenames import get_coordinate_map_filename
coo_fn = get_coordinate_map_filename(self.name, 'genomewide', refname=refname)
genomewide_map = np.loadtxt(coo_fn, dtype=int)
if roi in self.annotation:
roi_pos = np.array([x for x in self.annotation[roi]], dtype = int)
ind = np.in1d(genomewide_map[:,1], roi_pos)
roi_indices = np.in1d(roi_pos, genomewide_map[:,1]).nonzero()[0]
return np.vstack((genomewide_map[ind].T, [roi_indices])).T
elif roi == "genomewide":
return np.vstack((genomewide_map.T, [genomewide_map[:,1]])).T
else:
try:
start, stop = map(int, roi)
start_ind = np.searchsorted(genomewide_map[:,in_patient], start)
stop_ind = np.searchsorted(genomewide_map[:,in_patient], stop)
return np.vstack((genomewide_map[start_ind:stop_ind].T,
[genomewide_map[start_ind:stop_ind, in_patient] - start])).T
except:
raise ValueError("ROI not understood")
示例12: _limit_features
def _limit_features(self, csr_matrix, low=2, high=None, limit=None):
"""
Lower bound on features, so that > n docs much contain the feature
"""
assert isinstance(csr_matrix, scipy.sparse.csr_matrix) # won't work with other sparse matrices
# (most can be converted with .tocsr() method)
indices_to_remove = np.where(np.asarray(csr_matrix.sum(axis=0) < low)[0])[0]
# csr_matrix.sum(axis=0) < low: returns Boolean matrix where total features nums < low
# np.asarray: converts np.matrix to np.array
# [0]: since the array of interest is the first (and only) item in an outer array
# np.where: to go from True/False to indices of Trues
data_filter = np.in1d(csr_matrix.indices, indices_to_remove)
# gets boolean array, where the columns of any non-zero values are to be removed
# (i.e. their index is in the indices_to_remove array)
# following three lines for info/debugging purposes
# to show how many unique features are being removed
num_total_features = len(np.unique(csr_matrix.indices))
num_features_to_remove = np.sum(np.in1d(indices_to_remove, np.unique(csr_matrix.indices)))
print "%d/%d features will be removed" % (num_features_to_remove, num_total_features)
csr_matrix.data[data_filter] = 0
# set the values to be removed to 0 to start with
csr_matrix.eliminate_zeros()
# then run the np optimised routine to delete those 0's (and free a little memory)
# NB zeros are superfluous since a sparse matrix
return csr_matrix
示例13: check_filter_labels
def check_filter_labels(inverse=False):
# create a feature set
fs, _ = make_classification_data(num_examples=1000,
num_features=4,
num_labels=5,
train_test_ratio=1.0)
# keep just the instaces with 0, 1 and 2 labels
labels_to_filter = [0, 1, 2]
# do the actual filtering
fs.filter(labels=labels_to_filter, inverse=inverse)
# make sure that we removed the right things
if inverse:
ids_kept = fs.ids[np.where(np.logical_not(np.in1d(fs.labels,
labels_to_filter)))]
else:
ids_kept = fs.ids[np.where(np.in1d(fs.labels, labels_to_filter))]
assert_array_equal(fs.ids, np.array(ids_kept))
# make sure that number of ids, labels and features are the same
eq_(fs.ids.shape[0], fs.labels.shape[0])
eq_(fs.labels.shape[0], fs.features.shape[0])
示例14: untie
def untie(a,b):
"""
Parameters
----------
a
b
Returns
-------
boolean
a
r
"""
la = len(a)
lb = len(b)
u = np.intersect1d(a,b)
lu = len(u)
#print lu
#print min(la,lb)/2
if lu >= min(la,lb)/2:
# segment de a non commun avec b
aa = a[~np.in1d(a,u)]
# segment de b non commun avec a
bb = b[~np.in1d(b,u)]
r = np.hstack((aa,bb))
if la<lb:
return(True,a,r)
else:
return(True,b,r)
else:
return(False,-1,-1)
示例15: filter_effects
def filter_effects(self):
"""
Merge effects and data, and flip effect alleles
"""
effect_positions=self.effects[["CHR", "POS"]]
data_positions=self.data.snp[["CHR", "POS"]]
effect_include=np.in1d(effect_positions, data_positions)
data_include=np.in1d(data_positions, effect_positions)
self.data.filter_snps(data_include)
self.effects=self.effects[effect_include]
# Just give up and convert to float. I have no idea why int doesn't work here
# but it's something to do with the fact that you can't have None as a numpy int
# wheras float gets converted to nan.
tmp_data=nprec.append_fields(self.data.snp, "GENO", None, dtypes=[(float,self.data.geno.shape[1])],usemask=False)
tmp_data["GENO"]=self.data.geno
self.effects=nprec.join_by(["CHR", "POS"], self.effects, tmp_data, usemask=False, jointype="inner")
flipped=0
removed=0
for rec in self.effects:
if rec["EFFECT"]==rec["REF"] and rec["OTHER"]==rec["ALT"]:
pass
elif rec["OTHER"]==rec["REF"] and rec["EFFECT"]==rec["ALT"]:
flipped+=1
rec["OTHER"]=rec["ALT"]
rec["EFFECT"]=rec["REF"]
rec["BETA"]=-rec["BETA"]
else:
removed+=1
rec["EFFECT"]=rec["OTHER"]="N"
self.effects=self.effects[self.effects["EFFECT"]!="N"]
print( "Removed "+str(removed)+" non-matching alleles",file=sys.stderr)
print( "Flipped "+str(flipped)+" alleles",file=sys.stderr)