本文整理汇总了Python中numpy.intersect1d函数的典型用法代码示例。如果您正苦于以下问题:Python intersect1d函数的具体用法?Python intersect1d怎么用?Python intersect1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了intersect1d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: afterObjectCompute
def afterObjectCompute(self, feedforwardInput, lateralInputs=(),
feedforwardGrowthCandidates=None, learn=True):
activeCells = self.objectLayer.getActiveCells()
cells = dict((cell, [])
for cell in activeCells.tolist())
for cell in activeCells:
connectedSynapses = np.where(
self.objectLayer.proximalPermanences.getRow(cell)
>= self.objectLayer.connectedPermanenceProximal)[0]
activeSynapses = np.intersect1d(connectedSynapses, feedforwardInput)
segmentData = [
["input", activeSynapses.tolist()]
]
cells[cell].append(segmentData)
self.csvOut.writerow(("layer", "object"))
self.csvOut.writerow([json.dumps(cells.items())])
decodings = [k
for k, sdr in self.objectRepresentations.iteritems()
if np.intersect1d(activeCells, sdr).size == sdr.size]
self.csvOut.writerow([json.dumps(decodings)])
示例2: _lf_acc
def _lf_acc(self, subset, lf_idx):
gt = self.gt._gt_vec
pred = np.ravel(self.lf_matrix.tocsc()[:,lf_idx].todense())
has_label = np.where(pred != 0)
has_gt = np.where(gt != 0)
# Get labels/gt for candidates in dev set, with label, with gt
gd_idxs = np.intersect1d(has_label, subset)
gd_idxs = np.intersect1d(has_gt, gd_idxs)
gt = np.ravel(gt[gd_idxs])
pred_sub = np.ravel(pred[gd_idxs])
n_neg = np.sum(pred_sub == -1)
n_pos = np.sum(pred_sub == 1)
if np.sum(pred == -1) == 0:
neg_acc = -1
elif n_neg == 0:
neg_acc = 0
else:
neg_acc = float(np.sum((pred_sub == -1) * (gt == -1))) / n_neg
if np.sum(pred == 1) == 0:
pos_acc = -1
elif n_pos == 0:
pos_acc = 0
else:
pos_acc = float(np.sum((pred_sub == 1) * (gt == 1))) / n_pos
return (pos_acc, n_pos, neg_acc, n_neg)
示例3: get_movers_from_overfilled_locations
def get_movers_from_overfilled_locations(self, agent_set, agents_index, config=None):
"""Returns an index (relative to agents_index) of agents that should be removed from their locations.
"""
id_name = self.choice_set.get_id_name()[0]
agents_locations = agent_set.get_attribute_by_index(id_name, agents_index)
# check if there was an overfilling of locations
movers = array([], dtype='int32')
if self.compute_capacity_flag:
overfilled_string = config.get("is_choice_overfilled_string", None)
if overfilled_string:
tmp_agent_set = copy.copy(agent_set)
overfilled_locations = where(self.choice_set.compute_variables(overfilled_string, self.dataset_pool))[0]
current_agents_in_overfilled_locations = intersect1d(agents_locations, overfilled_locations)
while current_agents_in_overfilled_locations.size > 0:
for location in current_agents_in_overfilled_locations:
agents_of_this_location = where(agents_locations == location)[0]
if agents_of_this_location.size > 1:
sampled_agents = probsample_noreplace(agents_of_this_location, 1)
else:
sampled_agents = agents_of_this_location
movers = concatenate((movers, sampled_agents))
tmp_agent_set.set_values_of_one_attribute(id_name, -1, agents_index[movers])
agents_locations = tmp_agent_set.get_attribute_by_index(id_name, agents_index)
self.dataset_pool.replace_dataset(tmp_agent_set.get_dataset_name(), tmp_agent_set)
overfilled_locations = where(self.choice_set.compute_variables(overfilled_string, self.dataset_pool))[0]
current_agents_in_overfilled_locations = intersect1d(agents_locations, overfilled_locations)
self.dataset_pool.replace_dataset(agent_set.get_dataset_name(), agent_set)
else:
new_locations_vacancy = self.get_locations_vacancy(agent_set)
movers = self.choose_agents_to_move_from_overfilled_locations(new_locations_vacancy,
agent_set, agents_index, agents_locations)
return concatenate((movers, where(agents_locations <= 0)[0]))
示例4: rh_by_runtime
def rh_by_runtime(RTFc, RTFh, RHi, **hourly):
heads = ['Hours above 50% RH; days with no cooling or heating',
'Number of days; no cooling or heating',
'Mean RH, days with no cooling or heating',
'Hours above 50% RH; days with cooling, no heating',
'Number of days; cooling, no heating',
'Mean RH, days with cooling, no heating',
'Hours above 50% RH; days with heating, no cooling',
'Number of days; heating, no cooling',
'Mean RH, days with heating, no cooling',
'Hours above 50% RH; days with heating and cooling',
'Number of days; heating and cooling',
'Mean RH, days with heating and cooling']
vals = []
conditions = [np.intersect1d(np.where(daily_total(RTFc)==0)[0], np.where(daily_total(RTFh)==0)[0]),
np.intersect1d(np.where(daily_total(RTFc)>0)[0], np.where(daily_total(RTFh)==0)[0]),
np.intersect1d(np.where(daily_total(RTFc)==0)[0], np.where(daily_total(RTFh)>0)[0]),
np.intersect1d(np.where(daily_total(RTFc)>0)[0], np.where(daily_total(RTFh)>0)[0])]
for condition in conditions:
vals.append(daily_total(np.where(RHi > 50, 1, 0))[condition].sum())
vals.append(len(condition))
vals.append(daily_mean(RHi)[condition].mean())
if len(heads) != len(vals):
print("only {0} values for {1}".format(len(vals), hourly['name']))
return (heads, vals)
示例5: slice_xyz
def slice_xyz(self, xslice, yslice, zslice):
"""TODO: doesn't remove unused nodes/renumber elements"""
x = self.xyz[:, 0]
y = self.xyz[:, 1]
z = self.xyz[:, 2]
inodes = []
if xslice is not None:
xslice = float(xslice)
inodes.append(where(x < xslice)[0])
if yslice is not None:
yslice = float(yslice)
inodes.append(where(y < yslice)[0])
if zslice is not None:
zslice = float(zslice)
inodes.append(where(z < zslice)[0])
if len(inodes) == 1:
nodes = inodes[0]
elif len(inodes) == 2:
nodes = intersect1d(inodes[0], inodes[1], assume_unique=True)
elif len(inodes) == 3:
nodes = intersect1d(
intersect1d(inodes[0], inodes[1], assume_unique=True),
inodes[2], assume_unique=True)
inodes = arange(self.nodes.shape[0])
# nodes = unique(hstack(inodes))
self._slice_plane_inodes(nodes)
示例6: find_largest_coalition
def find_largest_coalition(previous, current):
"""
Returns the largest coalition given a current set of
coalitions and the previous largest coalition.
:param previous: List containing previous largest coalition.
:param current: Numpy array containing the set of current coalitions.
:return: List containing current largest coalition.
"""
# Record largest coalition.
largest_coalition = []
# Iterate through all coalitions to find the largest.
for coalition in current:
c = coalition[0]
if len(c) > len(largest_coalition):
largest_coalition = c
# If two coalitions have the same length, choose
# the one that resembles the previous one the most.
elif len(c) == len(largest_coalition):
score_new = np.intersect1d(c, previous, True).size
score_old = np.intersect1d(largest_coalition, previous, True).size
if score_new > score_old:
largest_coalition = c
# If both resemble the previous one equally, choose a random one.
elif score_new == score_old:
flip = np.random.randint(2)
if flip == 0:
largest_coalition = c
return largest_coalition
示例7: intersect_coords
def intersect_coords(coords1,coords2):
"""For two sets of coordinates, find the coordinates that are common to
both, where the dimensionality is the coords1.shape[0]"""
#find the longer one
if coords1.shape[-1]>coords2.shape[-1]:
coords_long = coords1
coords_short = coords2
else:
coords_long = coords2
coords_short = coords1
ans = np.array([[],[],[]],dtype='int') #Initialize as a 3 row variable
#Loop over the longer of the coordinate sets
for i in xrange(coords_long.shape[-1]):
#For each coordinate:
this_coords = coords_long[:,i]
#Find the matches in the other set of coordinates:
x = np.where(coords_short[0,:] == this_coords[0])[0]
y = np.where(coords_short[1,:] == this_coords[1])[0]
z = np.where(coords_short[2,:] == this_coords[2])[0]
#Use intersect1d, such that there can be more than one match (and the
#size of idx will reflect how many such matches exist):
idx = np.intersect1d(np.intersect1d(x,y),z)
#append the places where there are matches in all three dimensions:
if len(idx):
ans = np.hstack([ans,coords_short[:,idx]])
return ans
示例8: _flags_fill
def _flags_fill(flags,N,M):
'''Expand a cube of True around singleton True values in flags array'''
idx = np.argwhere(flags == True)
max_idx = N**3 - 1
last = flags[max_idx]
# find where there is room for expansion in each direction
i = np.where(idx % N < N-1)[0]
j = np.where(idx % (N**2) < (N-1)*N)[0]
k = np.where(idx % (N**3) < (N-1)*N**2)[0]
# find room for expansion in multiple directions
ij = np.intersect1d(i,j)
ik = np.intersect1d(i,k)
jk = np.intersect1d(j,k)
ijk = np.intersect1d(ij,k)
# this is hardcoded for M=2 right now
flags[np.clip(idx[i]+1,0,max_idx)] = True # i+1
flags[np.clip(idx[j]+N,0,max_idx)] = True # j+1
flags[np.clip(idx[k]+N**2,0,max_idx)] = True # k+1
flags[np.clip(idx[ij]+1+N,0,max_idx)] = True # i+1, j+1
flags[np.clip(idx[ik]+1+N**2,0,max_idx)] = True # i+1, k+1
flags[np.clip(idx[jk]+N*(N+1),0,max_idx)] = True # j+1, k+1
flags[np.clip(idx[ijk]+1+N*(N+1),0,max_idx)] = True # i+1, j+1, k+1
# needed?
flags[max_idx] = last
return flags
示例9: check
def check(lattice, chains):
for chain in chains:
chain = chain[chain[:,0]>=0]
# for monomer in chain:
# for j in xrange(0,chain.shape[0]-1):
# index = np.intersect1d(np.where((chain[:,0] == monomer[0]))[0], np.where((chain[:,1] == monomer[1]))[0])
# intersect = np.array([x for x in set(tuple(x) for x in chain) & set(tuple(x) for x in chain)])
intersect = [tuple(x) for x in chain]
dups = [item for item,count in collections.Counter(intersect).items() if count > 1]
# if index == j:
# print "\n"
if len(dups) > 0:
print "ERROR: Duplicate in chain"
print dups
index = np.intersect1d(np.where((chain[:,0] == dups[0][0])), np.where((chain[:,1] == dups[0][1])))
print index
#print "Monomer is: " + str(monomer) + "at index " + str(index)
for chain1, chain2 in itertools.combinations(chains,2):
chain1 = chain1[chain1[:,0]>=0]
chain2 = chain2[chain2[:,0]>=0]
array = np.concatenate((chain1, chain2), 0)
intersect2 = [tuple(x) for x in array]
dups2 = [item for item,count in collections.Counter(intersect2).items() if count > 1]
if len(dups2) > 0:
print "overlap"
print dups2
index2 = np.intersect1d(np.where((chain[:,0] == dups2[0][0])), np.where((chain[:,1] == dups2[0][1])))
print index2
return 0
示例10: Find_AI
def Find_AI(self,time, Gen,PL):
#Need to supply Tau and PL for a fast speed
#This is done by assuming at one PL value there should only be one tau value, then this occurs though changing Ai
#We also restrict the data to a 'Good Region'
if self.Width==None:
print 'Need to input Width in cm'
return False
self.Gen = Gen/self.Width
self.PL = PL
self.time = time
Max_gen = np.amax(self.Gen)
Max_Time = np.amax(self.time)
self.Limited_Index = np.where((self.Gen>=Max_gen*self.LowerLimit))[0]
Max_Index = np.where((self.Gen==Max_gen))[0]
Analysis_Upper = np.where((self.time>=self.time[Max_Index]))[0]
Analysis_Lower = np.where((self.time<=self.time[Max_Index]))[0]
self.Analysis_Upper = np.intersect1d(Analysis_Upper,self.Limited_Index)
self.Analysis_Lower = np.intersect1d(Analysis_Lower,self.Limited_Index)
res = minimize(self.Minimisation_Function,self.Ai,tol=1e-0)
self.Ai = abs(res.x)
self.Tau = self.Generalised_Lifetime(self.time,self.Gen,self.PL,self.Ai)
示例11: split_set_by_indices
def split_set_by_indices(dataset, train_fold, valid_fold, test_fold):
n_trials = dataset.get_topological_view().shape[0]
# Make sure there are no overlaps and we have all possible trials
# assigned
assert np.intersect1d(valid_fold, test_fold).size == 0
assert np.intersect1d(train_fold, test_fold).size == 0
assert np.intersect1d(train_fold, valid_fold).size == 0
assert (set(np.concatenate((train_fold, valid_fold, test_fold))) ==
set(range(n_trials)))
train_set = DenseDesignMatrixWrapper(
topo_view=dataset.get_topological_view()[train_fold],
y=dataset.y[train_fold],
axes=dataset.view_converter.axes)
valid_set = DenseDesignMatrixWrapper(
topo_view=dataset.get_topological_view()[valid_fold],
y=dataset.y[valid_fold],
axes=dataset.view_converter.axes)
test_set = DenseDesignMatrixWrapper(
topo_view=dataset.get_topological_view()[test_fold],
y=dataset.y[test_fold],
axes=dataset.view_converter.axes)
# make ordered dict to make it easier to iterate, i.e. for logging
datasets = OrderedDict([('train', train_set), ('valid', valid_set),
('test', test_set)])
return datasets
示例12: reverse_interpolate_two_array
def reverse_interpolate_two_array(value1, array1, value2, array2, delta1=0.1, delta2=0.1):
"""
Tries to reverse interpolate two vales from two arrays with the same dimensions, and finds a common index
for value1 and value2 in their respective arrays. the deltas define the search radius for a close value match
to the arrays.
:return: index1, index2
"""
tth_ind = np.argwhere(np.abs(array1 - value1) < delta1)
azi_ind = np.argwhere(np.abs(array2 - value2) < delta2)
tth_ind_ravel = np.ravel_multi_index((tth_ind[:, 0], tth_ind[:, 1]), dims=array1.shape)
azi_ind_ravel = np.ravel_multi_index((azi_ind[:, 0], azi_ind[:, 1]), dims=array2.shape)
common_ind_ravel = np.intersect1d(tth_ind_ravel, azi_ind_ravel)
result_ind = np.unravel_index(common_ind_ravel, dims=array1.shape)
while len(result_ind[0]) > 1:
if np.max(np.diff(array1)) > 0:
delta1 = np.max(np.diff(array1[result_ind]))
if np.max(np.diff(array2)) > 0:
delta2 = np.max(np.diff(array2[result_ind]))
tth_ind = np.argwhere(np.abs(array1[result_ind] - value1) < delta1)
azi_ind = np.argwhere(np.abs(array2[result_ind] - value2) < delta2)
print(result_ind)
common_ind = np.intersect1d(tth_ind, azi_ind)
result_ind = (result_ind[0][common_ind], result_ind[1][common_ind])
return result_ind[0], result_ind[1]
示例13: filter_lines
def filter_lines(gr, max_gradient, min_flow_area):
"""Filter lines from the gridresultadmin and return 3 filtered sets:
:param gr: GridH5ResultAdmin
:param max_gradient: GridH5ResultAdmin.nodes to get waterlevels (s1) from
:param min_flow_area: the minimum flow area, in square meters
:returns: tuple of 3 filtered GridH5ResultAdmin.lines objects:
- 2D-2D lines that have flow_area >= min_flow_area and
gradient <= max_gradient
- 1D-2D lines that have flow_area >= min_flow_area
- 1D-2D lines that have flow_area >= min_flow_area and
gradient <= max_gradient
"""
lines_active = filter_min_flow_area(gr.lines, min_flow_area)
lines_valid = filter_max_gradient(gr.lines, gr.nodes, max_gradient)
lines2d2d_valid = gr.lines.subset('2D_ALL').filter(
id__in=np.intersect1d(lines_valid, lines_active)
)
lines1d2d_active = gr.lines.subset('1D2D').filter(
id__in=lines_active
)
lines1d2d_valid = gr.lines.subset('1D2D').filter(
id__in=np.intersect1d(lines_valid, lines_active)
)
return lines2d2d_valid, lines1d2d_active, lines1d2d_valid
示例14: beforeTimestep
def beforeTimestep(self, locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn):
self.csvOut.writerow(("t",))
self.csvOut.writerow(("input", "newLocation"))
self.csvOut.writerow([json.dumps(locationSDR.tolist())])
self.csvOut.writerow([json.dumps(
[decoding
for decoding, sdr in self.exp.locations.iteritems()
if np.intersect1d(locationSDR, sdr).size == sdr.size])])
self.csvOut.writerow(("input", "deltaLocation"))
self.csvOut.writerow([json.dumps(transitionSDR.tolist())])
self.csvOut.writerow([json.dumps(
[decoding
for decoding, sdr in self.exp.transitions.iteritems()
if np.intersect1d(transitionSDR, sdr).size == sdr.size])])
self.csvOut.writerow(("input", "feature"))
self.csvOut.writerow([json.dumps(featureSDR.tolist())])
self.csvOut.writerow([json.dumps(
[k
for k, sdr in self.exp.features.iteritems()
if np.intersect1d(featureSDR, sdr).size == sdr.size])])
self.csvOut.writerow(("egocentricLocation",))
self.csvOut.writerow([json.dumps(egocentricLocation)])
示例15: linearCouplingCoeff2
def linearCouplingCoeff2(dataH, dataX, timeH, timeX, transFnXtoH, segStartTime,
segEndTime, timeShift, samplFreq, logFid, debugLevel):
# LINEARCOUPLINGCOEFF - calculate the cross correlation coeff b/w the gravitational
# ave channel H and the "projected" instrumental channel X. The noise in the
# instrumental channel X is projected to the domain of the H using a linear coupling
# function Txh
rXH = np.asarray([])
rMaxXH = np.asarray([])
if((len(dataH)==0) | (len(dataX)==0)):
logFid.write('Error: One or more data vectors are empty..\n')
logFid.write('Error: len(dataH) = %d len(dataX) = %d..\n' %(len(dataH), len(dataX[0])))
elif(len(dataH)!=len(dataX[0])):
logFid.write('Error: Different lengths. len(dataH) = %d len(dataX) = %d..\n'%(len(dataH), len(dataX[0])))
else:
dataH = dataH #- np.mean(dataH)
dataX = dataX[0] #- np.mean(dataX[0])
segIdxH = np.intersect1d(np.where(timeH>=segStartTime)[0], np.where(timeH<segEndTime)[0])
dataH = dataH[segIdxH]
segIdxX = np.intersect1d(np.where(timeX + timeShift >= segStartTime)[0], np.where(timeX + timeShift < segEndTime)[0])
dataX = dataX[segIdxX]
a = np.correlate(dataH, dataX)/(np.sqrt(np.correlate(dataH, dataH)*np.correlate(dataX, dataX)))
rXH = np.append(rXH, a)
rMaxXH = np.append(rMaxXH, a)
return [rXH, rMaxXH]