本文整理汇总了Python中numpy.union1d函数的典型用法代码示例。如果您正苦于以下问题:Python union1d函数的具体用法?Python union1d怎么用?Python union1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了union1d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cr_reject2
def cr_reject2(fl, er, nsig=10.0, fwhm=2, grow=1, debug=True):
""" interpolate across features that have widths smaller than the
expected fwhm resolution.
Parameters
----------
fwhm: int
Resolution fwhm in pixels
fl : array of floats, shape (N,)
Flux
er : array of floats, shape (N,)
Error
Returns the interpolated flux and error arrays.
"""
fl, er = (np.array(a, dtype=float) for a in (fl, er))
# interpolate over bad pixels
fl1 = convolve_psf(fl, fwhm)
ibad = np.where(np.abs(fl1 - fl) > nsig*er)[0]
if debug: print(len(ibad))
extras1 = np.concatenate([ibad + 1 + i for i in range(grow)])
extras2 = np.concatenate([ibad - 1 - i for i in range(grow)])
ibad = np.union1d(ibad, np.union1d(extras1, extras2))
ibad = ibad[(ibad > -1) & (ibad < len(fl))]
igood = np.setdiff1d(np.arange(len(fl1)), ibad)
fl[ibad] = np.interp(ibad, igood, fl[igood])
er[ibad] = np.nan
return fl,er
示例2: get_obstList
def get_obstList(self,X,Y,Z):
"""
Define areas external to pipe.
"""
#Pipe in - find all points exterior of small
pipe_in = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_in/2)**2)).flatten()
pipe_in_stop = np.array(np.where(Z <= 3 + 0.5*(self.diam_out - self.diam_in))).flatten()
pipe_in = np.intersect1d(pipe_in[:],pipe_in_stop[:])
#Expansion - find all points exterior of expansion
r_cone = self.diam_in
h_cone = self.diam_in
expansion = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (r_cone/h_cone)**2*(Z - 3)**2)).flatten()
expansion_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_out - self.diam_in)))
#expansion_stop = np.array(np.where(Z <= 4)).flatten()
expansion = np.intersect1d(expansion[:],expansion_start[:])
#expansion = np.intersect1d(expansion[:],expansion_stop[:])
#Pipe out - final all points exterior of smaller pipe
pipe_out = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_out/2)**2)).flatten()
pipe_out_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_in - self.diam_out))).flatten()
pipe_out = np.intersect1d(pipe_out[:],pipe_out_start[:])
#Put the pieces together
pipe = expansion[:]
pipe = np.union1d(expansion[:],pipe_in[:])
pipe = np.union1d(pipe[:],pipe_out[:])
obst_list = pipe[:]
return list(obst_list[:])
示例3: fit
def fit(self, X, y):
self._X_colcount = X.shape[1]
#self.learner.fit(self._transform(X), y)
self.get_learner(X, y)
classifier_features = self.getClassifierFeatures()
fe = SecondLayerFeatureEvaluator()
local_excluded_features = np.union1d(self.excluded_features,
classifier_features)
local_X = utilities.exclude_cols(X,
local_excluded_features)
scores = fe.evaluate(local_X, X[:,classifier_features],
n_jobs = self.n_jobs)
i = 0
for feature in classifier_features:
fc = sklearn.base.clone(
self.feature_confidence_estimator).set_params(
**self.feature_confidence_estimator.get_params())
fc.fit(X, feature,
scores[i],
local_excluded_features)
self.setFeatureConfidenceEstimator(feature, fc)
self._second_layer_features = np.union1d(
self._second_layer_features, fc.getFeatures())
i += 1
return(self)
示例4: set_cavity_walls
def set_cavity_walls(self,walls=['left','right','bottom','west','east']):
"""
set up to 5 walls as solid walls for the simulation
"""
solid_list_a = np.empty(0).flatten()
solid_list_b = np.empty(0).flatten()
solid_list_c = np.empty(0).flatten()
solid_list_d = np.empty(0).flatten()
solid_list_e = np.empty(0).flatten()
for w in walls:
if w=='right':
solid_list_a = np.array(np.where((self.x==0.))).flatten()
elif w=='left':
solid_list_b = np.array(np.where((self.x > (self.Lx_p-self.dx/2.)))).flatten()
elif w=='west':
solid_list_d = np.array(np.where((self.z == 0.))).flatten()
elif w=='bottom':
solid_list_c = np.array(np.where((self.y == 0.))).flatten()
elif w=='east':
solid_list_e = np.array(np.where((self.z > (self.Lz_p - self.dx/2.)))).flatten()
solid_list = np.array(np.union1d(solid_list_a,solid_list_b));
solid_list = np.array(np.union1d(solid_list,solid_list_c));
solid_list = np.array(np.union1d(solid_list,solid_list_e));
self.solid_list = np.array(np.union1d(solid_list,solid_list_d))
self.lid_list = np.array(np.where((self.y > (self.Ly_p-self.dx/2.)))).flatten()
示例5: dDCR_moments
def dDCR_moments(SED1, SED2, bandpass):
zenith_angle = np.pi/4.0 * galsim.radians
R500 = galsim.dcr.get_refraction(500, zenith_angle)
# analytic first moment differences
R = lambda w:(galsim.dcr.get_refraction(w, zenith_angle) - R500) / galsim.arcsec
x1 = np.union1d(bandpass.wave_list, SED1.wave_list)
x1 = x1[(x1 >= bandpass.blue_limit) & (x1 <= bandpass.red_limit)]
x2 = np.union1d(bandpass.wave_list, SED2.wave_list)
x2 = x2[(x2 >= bandpass.blue_limit) & (x2 <= bandpass.red_limit)]
numR1 = np.trapz(R(x1) * bandpass(x1) * SED1(x1), x1)
numR2 = np.trapz(R(x2) * bandpass(x2) * SED2(x2), x2)
den1 = SED1.calculateFlux(bandpass)
den2 = SED2.calculateFlux(bandpass)
R1 = numR1/den1
R2 = numR2/den2
dR_analytic = R1 - R2
# analytic second moment differences
V1_kernel = lambda w:(R(w) - R1)**2
V2_kernel = lambda w:(R(w) - R2)**2
numV1 = np.trapz(V1_kernel(x1) * bandpass(x1) * SED1(x1), x1)
numV2 = np.trapz(V2_kernel(x2) * bandpass(x2) * SED2(x2), x2)
V1 = numV1/den1
V2 = numV2/den2
dV_analytic = V1 - V2
return dR_analytic, dV_analytic, len(x2)
示例6: averageHopDistance
def averageHopDistance(transmissions):
"""
Take a list of numpy arrays which has rows as an information transmission.
Outputs the total number of hops of information divided by the total
number of original senders (those that did not receive from another person).
A measure of the average spread of information from each source.
"""
numIterations = len(transmissions)
originalInfoSenders = numpy.array([])
infoReceivers = numpy.array([])
totalHops = 0
#Assume transmissions are unique
for i in range(0, numIterations):
currentAlters = transmissions[i][:, 1]
infoReceivers = numpy.union1d(infoReceivers, currentAlters)
totalHops += transmissions[i].shape[0]
currentEgos = transmissions[i][:, 0]
originalInfoSenders = numpy.union1d(originalInfoSenders, currentEgos)
originalInfoSenders = numpy.setdiff1d(originalInfoSenders, infoReceivers)
#Number of path ends is infoReceivers.shape[0]
if originalInfoSenders.shape[0] != 0:
return float(totalHops)/originalInfoSenders.shape[0]
else:
return 0
示例7: chi
def chi(self, customattribute):
"""
计算其卡方值.
"""
attributeDict = dict()
classAttributeDict = dict()
for piece in self.chunks:
for (attribute, classAttribute), arrays in piece.groupby([customattribute, self.classAttribute]).studentID.unique().iteritems():
attributeDict.setdefault((attribute, classAttribute), np.array([]))
attributeDict[(attribute, classAttribute)] = np.union1d(attributeDict[(attribute, classAttribute)], arrays)
for classAttribute, arrays in piece.groupby(self.classAttribute).studentID.unique().iteritems():
classAttributeDict.setdefault(classAttribute, np.array([]))
classAttributeDict[classAttribute] = np.union1d(classAttributeDict[classAttribute], arrays)
#各个类别的毕业去向群体中所占的比例.
classSeries = Series(classAttributeDict).apply(lambda x:len(x))
classSeries /= classSeries.sum()
#在各个attribute上的实际观测值.
attributeObs = Series(attributeDict).apply(lambda x:len(x)).unstack(fill_value=0)
attributeExp = DataFrame(index=attributeObs.index, columns=attributeObs.columns)
#设置初始值.
for index in attributeExp.index:
attributeExp.ix[index] = attributeObs.ix[index].sum()
#根据各个目标类别中的比例来获得其期望值.
attributeExp = attributeExp.mul(classSeries).fillna(0)
#根据实际观测值与期望值来计算其卡方值,并返回p-value值.
return chisquare(attributeObs.stack(), attributeExp.stack()), attributeObs
示例8: reference_naive_aggregation
def reference_naive_aggregation(C):
S = np.array_split(C.indices, C.indptr[1:-1])
n = C.shape[0]
aggregates = np.empty(n, dtype=C.indices.dtype)
aggregates[:] = -1 # aggregates[j] denotes the aggregate j is in
R = np.zeros((0,)) # R stores already aggregated nodes
j = 0 # j is the aggregate counter
Cpts = []
# Only one aggregation pass
for i, row in enumerate(S):
# if i isn't already aggregated, grab all his neighbors
if aggregates[i] == -1:
unaggregated_neighbors = np.setdiff1d(row, R)
aggregates[unaggregated_neighbors] = j
aggregates[i] = j
j += 1
R = np.union1d(R, unaggregated_neighbors)
R = np.union1d(R, np.array([i]))
Cpts.append(i)
else:
pass
assert(np.unique(R).shape[0] == n)
Pj = aggregates
Pp = np.arange(n+1)
Px = np.ones(n)
return csr_matrix((Px, Pj, Pp)), np.array(Cpts)
示例9: __call__
def __call__(self, s, *pargs, **kargs):
if len(kargs) == 0 and len(kargs) == 0:
if s in self.a:
at = np.searchsorted(self.a, s)
return self.m[at]
else:
if s <= self.a[0]:
return self.m[0]
elif s >= self.a[-1]:
return self.m[-1]
else:
at = np.searchsorted(self.a, s)
d = (s - self.a[at-1]) / (self.a[at] - self.a[at-1])
r1 = self.m[at-1]
r2 = self.m[at]
# if isinstance(r1, mesh2d):
# # X= np.union1d(r1.X, r2.X)
# # Y = [res for res in r1(X)]
# # print Y
if isinstance(r1, mesh3d):
X = np.union1d(r1.X, r2.X)
Y = np.union1d(r1.X, r2.X)
res = np.zeros((X.size, Y.size))
for ix, vx in enumerate(X):
for iy, vy in enumerate(Y):
a = r1(vx, vy)
b = r2(vx, vy)
res[ix, iy] = a + d * (b - a)
return mesh3d(X=X, Y=Y, Z=res)
else:
print u"Pas dans la liste"
示例10: update_dimensions
def update_dimensions(self, test_cases):
for tc in test_cases:
self.x_range = num.union1d(self.x_range,
tc.test_parameters.items()[0].values())
self.y_range = num.union1d(self.y_range,
tc.test_parameters.items()[1].values())
self.z_range = num.union1d(self.z_range,
tc.test_parameters.items()[2].values())
示例11: change
def change(self):
'''
suggests a potential change to state
return a changed state
'''
ratio=self.params['transRotRatio']
position_idx=int(np.random.rand()*(self.params['m']))
noChangeFlag=1
if self.params['TtoDIsPhysical'] and np.random.rand()<=self.params['probFormChange'] and self.state[2][position_idx]==0:
changedForm=self.state[2].copy()
changedForm[position_idx]=1
changedState=(self.state[0],self.state[1],changedForm,self.state[3])
noChangeFlag=0
elif np.random.rand()<=ratio:
noChangeFlag=0
#translation.
changedPosition=self.state[0].copy()
if self.params['isAlt']:
#Alternative dynamics, where proteins only move if nearby position is open
u=np.random.uniform()
#Perform change of position
if u<= self.params['altProb'] and (self.state[0][position_idx]-1)%self.params['N'] not in self.state[0]:
changedPosition[position_idx]=changedPosition[position_idx]-1
elif u> self.params['altProb'] and (self.state[0][position_idx]+1)%self.params['N'] not in self.state[0]:
changedPosition[position_idx]=changedPosition[position_idx]+1
else:
#where jumping through proteins is allowed.
# to be more physical, we force proteins to get into empty slots
# not connected to any other proteins before they can get back
# into a slot next to other proteins.
# flag of whether chosen protein is currently connected to
# other proteins
if (self.state[0][position_idx]+1)%self.params['N'] in self.state[0] or (self.state[0][position_idx]-1)%self.params['N'] in self.state[0]:
# protein is currently connected to other proteins,
# so we force it to go into empty slots not connected to proteins
connectedEmptySlots=np.union1d(np.union1d(self.state[0],(self.state[0]+1)%self.params['N']),(self.state[0]-1)%self.params['N'])
slotsToChoose=np.array([x for x in xrange(self.params['N']) if x not in connectedEmptySlots])
else:
# if protein is currently not conencted to proteins, any slot not occupied can be taken
slotsToChoose = np.array([x for x in xrange(self.params['N']) if x not in self.state[0]])
if len(slotsToChoose)!=0:
changedPosition[position_idx]=random.choice(slotsToChoose)
changedState=(changedPosition,self.state[1],self.state[2],self.state[3])
else:
#Rotation
changedType=self.state[1].copy()
changedType[position_idx]=changedType[position_idx][::-1]
changedM=self.state[3].copy()
changedM[self.state[1][position_idx][1],self.state[1][position_idx][0]]-=1
changedM[changedType[position_idx][1],changedType[position_idx][0]]+=1
#note the above changedM, idx 1 is for row, while idx 0 is for column in the Type array. This is due to np.ravel of np.indices
changedState=(self.state[0],changedType,self.state[2],changedM)
# changedState=self.checkATP(changedState)
return changedState
示例12: CONSTRUCT_TREE
def CONSTRUCT_TREE(df_nodes, df_edges,source=-1,sources=[-1,-2]):
ids = np.union1d(df_edges.Pred_ID.unique(),df_edges.Prey_ID.unique())
df_tree = pd.DataFrame(columns=df_edges.columns)
for node_id in ids:
if node_id in sources: continue
df = df_edges[(df_edges.Pred_ID==node_id) & (df_edges.Prey_ID.isin(sources))].sort('BiomassIngested')
if len(df) > 0:
row = df.irow(-1)
else:
try:
row = df_edges[(df_edges.Pred_ID==node_id)].sort('BiomassIngested').irow(-1)
except:
print 'Discarding node without prey...'
print node_id
print df_edges[(df_edges.Pred_ID==node_id)].sort('BiomassIngested')
#continue
raise Exception('nodes without prey')
df_tree = df_tree.append(row)
df_tree[['Pred_ID', 'Prey_ID']] = df_tree[['Pred_ID', 'Prey_ID']].astype(int)
df_tree[['Biomass_Assimilated','BiomassIngested']] = df_tree[['Biomass_Assimilated','BiomassIngested']].astype(float)
ids = np.union1d(df_tree.Pred_ID.unique(),df_tree.Prey_ID.unique())
df_nodes = df_nodes[df_nodes.ID.isin(ids)]
a_values = {ii:0 for ii in ids}
c_values = {ii:0 for ii in ids}
print len(a_values)
print len(df_nodes)
print a_values
print df_tree.Prey_ID.unique()
df_nodes['A_value'] = a_values.values()
df_nodes['C_value'] = a_values.values()
#return df_nodes,df_tree
Tree.GET_A(df_tree,source,a_values,c_values)
df_nodes = df_nodes.set_index('ID',drop = False)
for key,value in a_values.iteritems():
#print key,value
df_nodes.loc[key,'A_value'] = value
for key,value in c_values.iteritems():
df_nodes.loc[key,'C_value'] = value
df_nodes[['A_value','C_value']] = df_nodes[['A_value','C_value']].astype(int)
return df_nodes,df_tree
示例13: create_error_matrix
def create_error_matrix(obs_data, prd_data, compact=True, classes=None):
"""
Create an error (confusion) matrix from observed and predicted data.
The data is assumed to represent classes rather than continuous data.
Parameters
----------
obs_data : array-like
Observed classes
prd_data : array-like
Predicted classes
compact : bool
Flag for whether or not to return error matrix in compact form. If
True, only the classes that are represented in the data will be
returned. If False, all classes in classes keyword should be
returned. Defaults to True.
classes : array-like
If compact is False, return error matrix for all classes. Defaults
to None.
Returns
-------
err_mat : np.array
Error matrix of classes
class_xwalk : dict
Dictionary of class value to row or column number
"""
if compact == True:
# Find all classes present in either the observed or predicted data
classes = np.union1d(np.unique(obs_data), np.unique(prd_data))
else:
if classes == None:
# No classes given - default to those present as above
classes = np.union1d(np.unique(obs_data), np.unique(prd_data))
else:
# Use the user-defined classes
classes = np.array(classes)
n = classes.size
# One liner for calculating error matrix
# http://stackoverflow.com/questions/10958702/
# python-one-liner-for-a-confusion-contingency-matrix-needed
err_mat = np.array([zip(obs_data, prd_data).count(x) for x in
itertools.product(classes, repeat=2)]).reshape(n, n)
# Create the dictionary of class value to row/column number
class_xwalk = dict((c, i) for (c, i) in zip(classes, xrange(n)))
return err_mat, class_xwalk
示例14: num_bipartisan_donors
def num_bipartisan_donors(df):
"""Find the number of people that have donated to more than one parties.
Args:
df: A DataFrame generated from the campaign finance data csv file
with the column "party" added.
Returns:
An integer count of the number of people that have donated to more than
one parties.
Initial Approach:
Select only contb amts > 0 (i.e. it is a donation)
Select only people, parties.
Group by people.
aggregate number of parties
select > 1
Count
'''
bipartyAgg= df[df['contb_receipt_amt']>0][['contbr_nm','party']].drop_duplicates(['contbr_nm','party']).groupby('contbr_nm', as_index=False).agg(lambda x: x.count())
bipartySet= bipartyAgg[bipartyAgg['party']>1]
return bipartySet['contbr_nm'].count()
# TODO: Implement this function.
'''
"""
# Obtain the set of unique contributor, party pairs
pplSet= df[df['contb_receipt_amt']>0][['contbr_nm','party']].drop_duplicates(['contbr_nm','party'])
# Obtain all of the democrat info
democ_d = np.array(pplSet[pplSet['party']=="Democrat"]["contbr_nm"])
# Obtain the republican subset
repub_d = np.array(pplSet[pplSet['party']=="Republican"]["contbr_nm"])
# OBtain the libertarian subset
liber_d = np.array(pplSet[pplSet['party']=="Libertarian"]["contbr_nm"])
# Intersect democrat set with republican
dr = np.intersect1d(democ_d, repub_d)
# Intersect republican set with libertarian
rl = np.intersect1d(repub_d, liber_d)
# Intersect democrat set with libertarian
ld = np.intersect1d(liber_d, democ_d)
# Return the union of all three intersections
retVal = np.union1d(dr, np.union1d(rl, ld))
return retVal
pass
示例15: process_eyelid_traces
def process_eyelid_traces(traces,time_vect,idx_CS_US,idx_US,idx_CS,thresh_CR=.1,time_CR_on=-.1,time_US_on=.05):
"""
preprocess traces output of get_behavior_traces
Parameters:
----------
traces: ndarray (N trials X t time points)
eyelid traces output of get_behavior_traces.
thresh_CR: float
fraction of eyelid closure considered a CR
time_CR_on: float
time of alleged beginning of CRs
time_US_on: float
time when US is considered to induce have a UR
Returns:
-------
eye_traces: ndarray
normalized eyelid traces
trigs: dict
dictionary containing various subdivision of the triggers according to behavioral responses
'idxCSUSCR': index of trials with CS+US with CR
'idxCSUSNOCR': index of trials with CS+US without CR
'idxCSCR':
'idxCSNOCR':
'idxNOCR': index of trials with no CRs
'idxCR': index of trials with CRs
'idxUS':
"""
#normalize by max amplitudes at US
eye_traces=old_div(traces,np.nanmax(np.nanmedian(traces[np.hstack([idx_CS_US,idx_US])][:,np.logical_and(time_vect>time_US_on,time_vect<time_US_on +.4 )],0)))
amplitudes_at_US=np.mean(eye_traces[:,np.logical_and( time_vect > time_CR_on , time_vect <= time_US_on )],1)
trigs=dict()
trigs['idxCSUSCR']=idx_CS_US[np.where(amplitudes_at_US[idx_CS_US]>thresh_CR)[-1]]
trigs['idxCSUSNOCR']=idx_CS_US[np.where(amplitudes_at_US[idx_CS_US]<thresh_CR)[-1]]
trigs['idxCSCR']=idx_CS[np.where(amplitudes_at_US[idx_CS]>thresh_CR)[-1]]
trigs['idxCSNOCR']=idx_CS[np.where(amplitudes_at_US[idx_CS]<thresh_CR)[-1]]
trigs['idxNOCR']=np.union1d(trigs['idxCSUSNOCR'],trigs['idxCSNOCR'])
trigs['idxCR']=np.union1d(trigs['idxCSUSCR'],trigs['idxCSCR'])
trigs['idxUS']=idx_US
return eye_traces,amplitudes_at_US, trigs