本文整理汇总了Python中scipy.zeros_like函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_like函数的具体用法?Python zeros_like怎么用?Python zeros_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fitKronApprox
def fitKronApprox(a):
Sbg = SP.zeros_like(S[0])
Kbg = SP.zeros_like(K[0])
for i in range(len(S)): Sbg+= a[i]*S[i]
for i in range(len(K)): Kbg+= a[i+len(S)]*K[i]
Gamma1 = SP.kron(Sbg,Kbg)
return ((Gamma-Gamma1)**2).sum()
示例2: estCumPos
def estCumPos(position,offset=0,chrom_len=None):
'''
compute the cumulative position of each variant given the position and the chromosome
Also return the starting cumulativeposition of each chromosome
Args:
position: pandas DataFrame of basepair positions (key='pos') and chromosome values (key='chrom')
The DataFrame will be updated with field 'pos_cum'
chrom_len: vector with predefined chromosome length
offset: offset between chromosomes for cumulative position (default 0 bp)
Returns:
chrom_pos,position:
chrom_pos: numpy.array of starting cumulative positions for each chromosome
position: augmented position object where cumulative positions are defined
'''
RV = position.copy()
chromvals = sp.unique(position['chrom'])# sp.unique is always sorted
chrom_pos_cum= sp.zeros_like(chromvals)#get the starting position of each Chrom
pos_cum= sp.zeros_like(position.shape[0])
if not 'pos_cum' in position:
RV["pos_cum"]= sp.zeros_like(position['pos'])#get the cum_pos of each variant.
pos_cum=RV['pos_cum'].values
maxpos_cum=0
for i,mychrom in enumerate(chromvals):
chrom_pos_cum[i] = maxpos_cum
i_chr=position['chrom']==mychrom
if chrom_len is None:
maxpos = position['pos'][i_chr].max()+offset
else:
maxpos = chrom_len[i]+offset
pos_cum[i_chr.values]=maxpos_cum+position.loc[i_chr,'pos']
maxpos_cum+=maxpos
return RV,chrom_pos_cum
示例3: makeinputh5
def makeinputh5(Iono,basedir):
basedir = Path(basedir).expanduser()
Param_List = Iono.Param_List
dataloc = Iono.Cart_Coords
times = Iono.Time_Vector
velocity = Iono.Velocity
zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
siz = list(Param_List.shape[1:])
vsiz = list(velocity.shape[1:])
datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
outdata = sp.zeros([len(zlist)]+siz)
outvel = sp.zeros([len(zlist)]+vsiz)
for izn,iz in enumerate(zlist):
arr = sp.argwhere(idx==izn)
outdata[izn]=sp.mean(Param_List[arr],axis=0)
outvel[izn]=sp.mean(velocity[arr],axis=0)
Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)
ofn = basedir/'startdata.h5'
print('writing {}'.format(ofn))
Ionoout.saveh5(str(ofn))
示例4: estCumPos
def estCumPos(pos,chrom,offset = 20000000):
'''
compute the cumulative position of each variant given the position and the chromosome
Also return the starting cumulativeposition of each chromosome
Args:
pos: scipy.array of basepair positions (on the chromosome)
chrom: scipy.array of chromosomes
offset: offset between chromosomes for cumulative position (default 20000000 bp)
Returns:
cum_pos: scipy.array of cumulative positions
chrom_pos: scipy.array of starting cumulative positions for each chromosme
'''
chromvals = SP.unique(chrom)#SP.unique is always sorted
chrom_pos=SP.zeros_like(chromvals)#get the starting position of each Chrom
cum_pos = SP.zeros_like(pos)#get the cum_pos of each variant.
maxpos_cum=0
for i,mychrom in enumerate(chromvals):
chrom_pos[i] = maxpos_cum
i_chr=chrom==mychrom
maxpos = pos[i_chr].max()+offset
maxpos_cum+=maxpos
cum_pos[i_chr]=chrom_pos[i]+pos[i_chr]
return cum_pos,chrom_pos
示例5: makeinputh5
def makeinputh5(Iono,basedir):
"""This will make a h5 file for the IonoContainer that can be used as starting
points for the fitter. The ionocontainer taken will be average over the x and y dimensions
of space to make an average value of the parameters for each altitude.
Inputs
Iono - An instance of the Ionocontainer class that will be averaged over so it can
be used for fitter starting points.
basdir - A string that holds the directory that the file will be saved to.
"""
# Get the parameters from the original data
Param_List = Iono.Param_List
dataloc = Iono.Cart_Coords
times = Iono.Time_Vector
velocity = Iono.Velocity
zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
siz = list(Param_List.shape[1:])
vsiz = list(velocity.shape[1:])
datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
outdata = sp.zeros([len(zlist)]+siz)
outvel = sp.zeros([len(zlist)]+vsiz)
# Do the averaging across space
for izn,iz in enumerate(zlist):
arr = sp.argwhere(idx==izn)
outdata[izn] = sp.mean(Param_List[arr],axis=0)
outvel[izn] = sp.mean(velocity[arr],axis=0)
Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)
Ionoout.saveh5(basedir/'startdata.h5')
示例6: _get_indices
def _get_indices(self,element,labels,return_indices,mode):
r'''
This is the actual method for getting indices, but should not be called
directly.
'''
if mode == 'union':
union = sp.zeros_like(self._get_info(element=element,label='all'),dtype=bool)
for item in labels: #iterate over labels list and collect all indices
union = union + self._get_info(element=element,label=item)
ind = union
elif mode == 'intersection':
intersect = sp.ones_like(self._get_info(element=element,label='all'),dtype=bool)
for item in labels: #iterate over labels list and collect all indices
intersect = intersect*self._get_info(element=element,label=item)
ind = intersect
elif mode == 'not_intersection':
not_intersect = sp.zeros_like(self._get_info(element=element,label='all'),dtype=int)
for item in labels: #iterate over labels list and collect all indices
info = self._get_info(element=element,label=item)
not_intersect = not_intersect + sp.int8(info)
ind = (not_intersect == 1)
elif mode == 'none':
none = sp.zeros_like(self._get_info(element=element,label='all'),dtype=int)
for item in labels: #iterate over labels list and collect all indices
info = self._get_info(element=element,label=item)
none = none - sp.int8(info)
ind = (none == 0)
if return_indices: ind = sp.where(ind==True)[0]
return ind
示例7: rankStandardizeNormal
def rankStandardizeNormal(X):
"""
Gaussianize X: [samples x phenotypes]
- each phentoype is converted to ranks and transformed back to normal using the inverse CDF
"""
Is = X.argsort(axis=0)
RV = SP.zeros_like(X)
rank = SP.zeros_like(X)
for i in xrange(X.shape[1]):
x = X[:,i]
i_nan = SP.isnan(x)
if 0:
Is = x.argsort()
rank = SP.zeros_like(x)
rank[Is] = SP.arange(X.shape[0])
#add one to ensure nothing = 0
rank +=1
else:
rank = st.rankdata(x[~i_nan])
#devide by (N+1) which yields uniform [0,1]
rank /= ((~i_nan).sum()+1)
#apply inverse gaussian cdf
RV[~i_nan,i] = SP.sqrt(2) * special.erfinv(2*rank-1)
RV[i_nan,i] = x[i_nan]
return RV
示例8: plot_drainage_curve
def plot_drainage_curve(self,
pore_volume='volume',
throat_volume='volume',pore_label='all',throat_label='all'):
r"""
Plot drainage capillary pressure curve
"""
try:
PcPoints = sp.unique(self['pore.inv_Pc'])
except:
raise Exception('Cannot print drainage curve: ordinary percolation simulation has not been run')
pores=self._net.pores(labels=pore_label)
throats = self._net.throats(labels=throat_label)
Snwp_t = sp.zeros_like(PcPoints)
Snwp_p = sp.zeros_like(PcPoints)
Pvol = self._net['pore.'+pore_volume]
Tvol = self._net['throat.'+throat_volume]
Pvol_tot = sum(Pvol)
Tvol_tot = sum(Tvol)
for i in range(0,sp.size(PcPoints)):
Pc = PcPoints[i]
Snwp_p[i] = sum(Pvol[self._p_inv[pores]<=Pc])/Pvol_tot
Snwp_t[i] = sum(Tvol[self._t_inv[throats]<=Pc])/Tvol_tot
if sp.mean(self._phase_inv["pore.contact_angle"]) < 90:
Snwp_p = 1 - Snwp_p
Snwp_t = 1 - Snwp_t
PcPoints *= -1
plt.plot(PcPoints,Snwp_p,'r.-')
plt.plot(PcPoints,Snwp_t,'b.-')
r'''
TODO: Add legend to distinguish the pore and throat curves
'''
#plt.xlim(xmin=0)
plt.show()
示例9: porosity_profile
def porosity_profile(network,
fig=None, axis=2):
r'''
Compute and plot the porosity profile in all three dimensions
Parameters
----------
network : OpenPNM Network object
axis : integer type 0 for x-axis, 1 for y-axis, 2 for z-axis
Notes
-----
the area of the porous medium at any position is calculated from the
maximum pore coordinates in each direction
'''
if fig is None:
fig = _plt.figure()
L_x = _sp.amax(network['pore.coords'][:,0]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
L_y = _sp.amax(network['pore.coords'][:,1]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
L_z = _sp.amax(network['pore.coords'][:,2]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
if axis is 0:
xlab = 'x-direction'
area = L_y*L_z
elif axis is 1:
xlab = 'y-direction'
area = L_x*L_z
else:
axis = 2
xlab = 'z-direction'
area = L_x*L_y
n_max = _sp.amax(network['pore.coords'][:,axis]) + _sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
steps = _sp.linspace(0,n_max,100,endpoint=True)
vals = _sp.zeros_like(steps)
p_area = _sp.zeros_like(steps)
t_area = _sp.zeros_like(steps)
rp = ((21/88.0)*network['pore.volume'])**(1/3.0)
p_upper = network['pore.coords'][:,axis] + rp
p_lower = network['pore.coords'][:,axis] - rp
TC1 = network['throat.conns'][:,0]
TC2 = network['throat.conns'][:,1]
t_upper = network['pore.coords'][:,axis][TC1]
t_lower = network['pore.coords'][:,axis][TC2]
for i in range(0,len(steps)):
p_temp = (p_upper > steps[i])*(p_lower < steps[i])
t_temp = (t_upper > steps[i])*(t_lower < steps[i])
p_area[i] = sum((22/7.0)*(rp[p_temp]**2 - (network['pore.coords'][:,axis][p_temp]-steps[i])**2))
t_area[i] = sum(network['throat.area'][t_temp])
vals[i] = (p_area[i]+t_area[i])/area
yaxis = vals
xaxis = steps/n_max
_plt.plot(xaxis,yaxis,'bo-')
_plt.xlabel(xlab)
_plt.ylabel('Porosity')
fig.show()
示例10: cut_to_stump
def cut_to_stump(self):
self.max_depth = 0
self.node_ind = 0
self.nodes[self.node_ind] = 0
self.start_index[self.node_ind] = 0
self.end_index[self.node_ind] = self.subsample.size
self.num_nodes = 1
self.num_leafs = 0
self.left_child = SP.zeros_like(self.left_child)
self.right_child = SP.zeros_like(self.right_child)
示例11: predict
def predict(data, coeffs):
"""
Calculate the an autoregressive linear prediction given the signal
and the prediction coefficients.
Parameters
----------
data : numpy array
The signal.
coeffs : numpy array
The prediction coefficients.
Returns
-------
data : numpy array
The predicted signal
Notes
-----
* The first coefficient, 1, is assumed to be left out.
Prediction works as follows:
P = a1+ a2+ a3+ a4
# _ _ _ _
# # _ _ _
# # # _ _
# = # + # + # + _
_ # # # #
_ _ # # #
_ _ _ # #
_ _ _ _ #
Where # is a number and _ is a "dont care"
This means
1. Create empty pred vector, padded by the number of coefficients
at the end
2. Pad original values by number of coefficients at both ends
3. Crop data in each step accordingly
4. Crop prediction
"""
coeffs *= -1
pred = scipy.zeros_like(data)
tmp = numpy.hstack((scipy.zeros_like(coeffs), data))
for j in range(0, coeffs.size):
offset = coeffs.size - j - 1
pred = pred + coeffs[j] * tmp[offset:offset + len(pred)]
return pred[:len(data)]
示例12: par_fixed_effect
def par_fixed_effect(tc, X, oob, depth):
import scipy as SP
dview = tc[:]
dview.block = True
results = dview.apply(fixed_effect, *[X, oob, depth])
fixed_sum = SP.zeros_like(results[0][0])
count = SP.zeros_like(results[0][1])
for res in results:
fixed_sum += res[0]
count += res[1]
return fixed_sum, count
示例13: par_get_variable_scores
def par_get_variable_scores(tc):
import scipy as SP
dview = tc[:]
dview.block = True
results = dview.apply(get_variable_scores)
var_used = SP.zeros_like((results[0])[0])
log_importance = SP.zeros_like(var_used)
for result in results:
var_used += result[0]
log_importance += result[1]
return var_used, log_importance
示例14: ranktrafo
def ranktrafo(data):
X = data.values[:, None]
Is = X.argsort(axis=0)
RV = sp.zeros_like(X)
rank = sp.zeros_like(X)
for i in xrange(X.shape[1]):
x = X[:,i]
rank = sp.stats.rankdata(x)
rank /= (X.shape[0]+1)
RV[:,i] = sp.sqrt(2) * sp.special.erfinv(2*rank-1)
return RV.flatten()
示例15: exp_gauss_warp
def exp_gauss_warp(X, n, l0, *msb):
"""Length scale function which is an exponential of a sum of Gaussians.
The centers and widths of the Gaussians are free parameters.
The length scale function is given by
.. math::
l = l_0 \exp\left ( \sum_{i=1}^{N}\beta_i\exp\left ( -\frac{(x-\mu_i)^2}{2\sigma_i^2} \right ) \right )
The number of parameters is equal to the three times the number of Gaussians
plus 1 (for :math:`l_0`). This function is inspired by what Gibbs used in
his PhD thesis.
Parameters
----------
X : 1d or 2d array of float
The points to evaluate the function at. If 2d, it should only have
one column (but this is not checked to save time).
n : int
The derivative order to compute. Used for all `X`.
l0 : float
The covariance length scale at the edges of the domain.
*msb : floats
Means, standard deviations and weights for each Gaussian, in that order.
"""
X = scipy.asarray(X, dtype=float)
msb = scipy.asarray(msb, dtype=float)
mm = msb[:len(msb) / 3]
ss = msb[len(msb) / 3:2 * len(msb) / 3]
bb = msb[2 * len(msb) / 3:]
# This is done with for-loops, because trying to get fancy with
# broadcasting was being too memory-intensive for some reason.
if n == 0:
l = scipy.zeros_like(X)
for m, s, b in zip(mm, ss, bb):
l += b * scipy.exp(-(X - m)**2.0 / (2.0 * s**2.0))
l = l0 * scipy.exp(l)
return l
elif n == 1:
l1 = scipy.zeros_like(X)
l2 = scipy.zeros_like(X)
for m, s, b in zip(mm, ss, bb):
term = b * scipy.exp(-(X - m)**2.0 / (2.0 * s**2.0))
l1 += term
l2 += term * (X - m) / s**2.0
l = -l0 * scipy.exp(l1) * l2
return l
else:
raise NotImplementedError("Only n <= 1 is supported!")