本文整理汇总了Python中scipy.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pore2centroid
def test_pore2centroid(self):
temp_coords = self.net['pore.coords']
self.geo['pore.centroid'] = sp.ones([self.geo.num_pores(), 3])
vo.pore2centroid(self.net)
assert sp.sum(self.net['pore.coords'] -
sp.ones([self.geo.num_pores(), 3])) == 0.0
self.net['pore.coords'] = temp_coords
示例2: kalman_filter
def kalman_filter(b,
V,
Phi,
y,
X,
sigma,
Sigma,
switch = 0,
D = None,
d = None,
G = None,
a = None,
c = None):
r"""
.. math::
:nowrap:
\begin{eqnarray*}
\beta_{t|t-1} = \Phi \: \beta_{t-1|t-1}\\
V_{t|t-1} = \Phi V_{t-1|t-1} \Phi ^T + \Sigma \\
e_t = y_t - X_t \beta_{t|t-1}\\
K_t = V_{t|t-1} X_t^T (\sigma + X_t V_{t|t-1} X_t )^{-1}\\
\beta_{t|t} = \beta_{t|t-1} + K_t e_t\\
V_{t|t} = (I - K_t X_t^T) V_{t|t-1}\\
\end{eqnarray*}
"""
n = scipy.shape(X)[1]
beta = scipy.empty(scipy.shape(X))
n = len(b)
if D is None:
D = scipy.ones((1, n))
if d is None:
d = scipy.matrix(1.)
if G is None:
G = scipy.identity(n)
if a is None:
a = scipy.zeros((n, 1))
if c is None:
c = scipy.ones((n, 1))
# import code; code.interact(local=locals())
(b, V) = kalman_predict(b, V, Phi, Sigma)
for i in xrange(len(X)):
beta[i] = scipy.array(b).T
(b, V, e, K) = kalman_upd(b,
V,
y[i],
X[i],
sigma,
Sigma,
switch,
D,
d,
G,
a,
c)
(b, V) = kalman_predict(b, V, Phi, Sigma)
return beta
示例3: calculateGradient
def calculateGradient(self):
# normalize rewards
# self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))
# initialize variables
R = ones((self.dataset.getNumSequences(), 1), float)
X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)
# collect sufficient statistics
print self.dataset.getNumSequences()
for n in range(self.dataset.getNumSequences()):
_state, _action, reward = self.dataset.getSequence(n)
seqidx = ravel(self.dataset['sequence_index'])
if n == self.dataset.getNumSequences() - 1:
# last sequence until end of dataset
loglh = self.loglh['loglh'][seqidx[n]:, :]
else:
loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]
X[n, :-1] = sum(loglh, 0)
R[n, 0] = sum(reward, 0)
# linear regression
beta = dot(pinv(X), R)
return beta[:-1]
示例4: createLargeSubMatrix
def createLargeSubMatrix():
# Create a large matrix, but with same amount of 'ones' as the small submatrix
t1 = time.time()
m=40000
n=1000000
M=sparse.lil_matrix((m,n))
m=500
n=20000
# Populate some of the matrix
M[0,:]=ones(n)
M[:,0]=1
M[(m/2),:]=ones(n)
M[:,(n/2)]=1
M[(m-1),:]=ones(n)
M[:,(n-1)]=1
t2 = time.time()
print 'Time used: ',(t2-t1)
return M
示例5: phenSpecificEffects
def phenSpecificEffects(snps,pheno1,pheno2,K=None,covs=None,test='lrt'):
"""
Univariate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
pheno1: [N x 1] SP.array of 1 phenotype for N individuals
pheno2: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
limix LMM object
"""
N=snps.shape[0]
if K is None:
K=SP.eye(N)
assert (pheno1.shape[1]==pheno2.shape[1]), "Only consider equal number of phenotype dimensions"
if covs is None:
covs = SP.ones(N,1)
assert (pheno1.shape[1]==1 and pheno2.shape[1]==1 and pheno1.shape[0]==N and pheno2.shape[0]==N and K.shape[0]==N and K.shape[1]==N and covs.shape[0]==N), "shapes missmatch"
Inter = SP.zeros((N*2,1))
Inter[0:N,0]=1
Inter0 = SP.ones((N*2,1))
Yinter=SP.concatenate((pheno1,pheno2),0)
Xinter = SP.tile(snps,(2,1))
Covitner= SP.tile(covs(2,1))
lm = simple_interaction(snps=Xinter,pheno=Yinter,covs=Covinter,Inter=Inter,Inter0=Inter0,test=test)
return lm
示例6: estimateBeta
def estimateBeta(X,Y,K,C=None,addBiasTerm=False,numintervals0=100,ldeltamin0=-5.0,ldeltamax0=5.0):
""" compute all pvalues
If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
"""
n,s=X.shape;
n_pheno=Y.shape[1];
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UX=SP.dot(U.T,X);
if (C==None):
Ucovariate=SP.dot(U.T,SP.ones([n,1]));
else:
if (addBiasTerm):
C_=SP.concatenate((C,SP.ones([n,1])),axis=1)
Ucovariate=SP.dot(U.T,C_);
else:
Ucovariate=SP.dot(U.T,C);
n_covar=Ucovariate.shape[1];
beta = SP.empty((n_pheno,s,n_covar+1));
LL=SP.ones((n_pheno,s))*(-SP.inf);
ldelta=SP.empty((n_pheno,s));
sigg2=SP.empty((n_pheno,s));
pval=SP.ones((n_pheno,s))*(-SP.inf);
for phen in SP.arange(n_pheno):
UY_=UY[:,phen];
ldelta[phen]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
for snp in SP.arange(s):
UX_=SP.hstack((UX[:,snp:snp+1],Ucovariate));
nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
beta[phen,snp,:]=beta_;
sigg2[phen,snp]=sigg2_;
LL[phen,snp]=-nLL_;
return beta, ldelta
示例7: _additionalInit
def _additionalInit(self):
assert self.numberOfCenters == 1, 'Mixtures of Gaussians not supported yet.'
xdim = self.numParameters
self.alphas = ones(self.numberOfCenters) / float(self.numberOfCenters)
self.mus = []
self.sigmas = []
if self.rangemins == None:
self.rangemins = -ones(xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(xdim)
if self.initCovariances == None:
if self.diagonalOnly:
self.initCovariances = ones(xdim)
else:
self.initCovariances = eye(xdim)
for _ in range(self.numberOfCenters):
self.mus.append(rand(xdim) * (self.rangemaxs - self.rangemins) + self.rangemins)
self.sigmas.append(dot(eye(xdim), self.initCovariances))
self.samples = list(range(self.windowSize))
self.fitnesses = zeros(self.windowSize)
self.generation = 0
self.allsamples = []
self.muevals = []
self.allmus = []
self.allsigmas = []
self.allalphas = []
self.allUpdateSizes = []
self.allfitnesses = []
self.meanShifts = [zeros((self.numParameters)) for _ in range(self.numberOfCenters)]
self._oneEvaluation(self._initEvaluable)
示例8: make_data_twoclass
def make_data_twoclass(N=50):
# generates some toy data
mu = sp.array([[0,2],[0,-2]]).T
C = sp.array([[5.,4.],[4.,5.]])
X = sp.hstack((mvn(mu[:,0],C,N/2).T, mvn(mu[:,1],C,N/2).T))
Y = sp.hstack((sp.ones((1,N/2.)),-sp.ones((1,N/2.))))
return X,Y
示例9: plot_median_errors
def plot_median_errors(RefinementLevels):
for i in RefinementLevels[0].cases:
x =[];
y =[];
print "Analyzing median error on: ", i ;
for r in RefinementLevels:
x.append(r.LUT.D_dim*r.LUT.P_dim)
r.get_REL_ERR_SU2(i)
y.append(r.SU2[i].median_ERR*100)
x = sp.array(x)
y = sp.array(y)
y = y[sp.argsort(x)]
x = x[sp.argsort(x)]
LHM = sp.ones((len(x),2))
RHS = sp.ones((len(x),1))
LHM[:,1] = sp.log10(x)
RHS[:,0] = sp.log10(y)
sols = sp.linalg.lstsq(LHM,RHS)
b = -sols[0][1]
plt.loglog(x,y, label='%s, %s'%(i,r'$O(\frac{1}{N})^{%s}$'%str(sp.around(b,2))), basex=10, basey=10, \
subsy=sp.linspace(10**(-5), 10**(-2),20),\
subsx=sp.linspace(10**(2), 10**(5),50))
#for r in RefinementLevels:
# x.append(r.LUT.D_dim*r.LUT.P_dim)
# r.get_REL_ERR_SciPy(i)
# y.append(r.SciPy[i].median_ERR*100)
#plt.plot(x,y, label='SciPy: %s'%i)
plt.grid(which='both')
plt.xlabel('Grid Nodes (N)')
plt.ylabel('Median relative error [%]')
return;
示例10: addFixedEffect
def addFixedEffect(self, F=None, A=None, Ftest=None):
"""
add fixed effect term to the model
Args:
F: sample design matrix for the fixed effect [N,K]
A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P]
Ftest: sample design matrix for test samples [Ntest,K]
"""
if A is None:
A = sp.eye(self.P)
if F is None:
F = sp.ones((self.N,1))
if self.Ntest is not None:
Ftest = sp.ones((self.Ntest,1))
assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape'
assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape'
if Ftest is not None:
assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)'
assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape'
assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape'
# add fixed effect
self.sample_designs.append(F)
self.sample_test_designs.append(Ftest)
self.trait_designs.append(A)
self._desync()
示例11: __init__
def __init__(self, evaluator, evaluable, **parameters):
BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
self.alphas = ones(self.numberOfCenters)/self.numberOfCenters
self.mus = []
self.sigmas = []
self.tau = 1.
if self.rangemins == None:
self.rangemins = -ones(self.xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(self.xdim)
if self.initCovariances == None:
self.initCovariances = eye(self.xdim)
if self.elitist and self.numberOfCenters == 1 and not self.noisyEvaluator:
# in the elitist case seperate evaluations are not necessary.
# CHECKME: maybe in the noisy case?
self.evalMus = False
assert not(self.useCauchy and self.numberOfCenters > 1)
for dummy in range(self.numberOfCenters):
self.mus.append(rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins)
self.sigmas.append(dot(eye(self.xdim), self.initCovariances))
self.reset()
示例12: gensquexpIPdraw
def gensquexpIPdraw(d,lb,ub,sl,su,sfn,sls,cfn):
#axis = 0 value = sl
#d dimensional objective +1 for s
nt=25
#print sp.hstack([sp.array([[sl]]),lb])
#print sp.hstack([sp.array([[su]]),ub])
[X,Y,S,D] = ESutils.gen_dataset(nt,d+1,sp.hstack([sp.array([[sl]]),lb]).flatten(),sp.hstack([sp.array([[su]]),ub]).flatten(),GPdc.SQUEXP,sp.array([1.5]+[sls]+[0.30]*d))
G = GPdc.GPcore(X,Y,S,D,GPdc.kernel(GPdc.SQUEXP,d+1,sp.array([1.5]+[sls]+[0.30]*d)))
def obj(x,s,d,override=False):
x = x.flatten()
if sfn(x)==0. or override:
noise = 0.
else:
noise = sp.random.normal(scale=sp.sqrt(sfn(x)))
return [G.infer_m(x,[d])[0,0]+noise,cfn(x)]
def dirwrap(x,y):
z = obj(sp.array([[sl]+[i for i in x]]),sl,[sp.NaN],override=True)
return (z,0)
[xmin0,ymin0,ierror] = DIRECT.solve(dirwrap,lb,ub,user_data=[], algmethod=1, maxf=89000, logfilename='/dev/null')
lb2 = xmin0-sp.ones(d)*1e-4
ub2 = xmin0+sp.ones(d)*1e-4
[xmin,ymin,ierror] = DIRECT.solve(dirwrap,lb2,ub2,user_data=[], algmethod=1, maxf=89000, logfilename='/dev/null')
#print "RRRRR"+str([xmin0,xmin,ymin0,ymin,xmin0-xmin,ymin0-ymin])
return [obj,xmin,ymin]
示例13: lossTraces
def lossTraces(fwrap, aclass, dim, maxsteps, storesteps=None, x0=None,
initNoise=0., minLoss=1e-10, algoparams={}):
""" Compute a number of loss curves, for the provided settings,
stored at specific storestep points. """
if not storesteps:
storesteps = range(maxsteps + 1)
# initial points, potentially noisy
if x0 is None:
x0 = ones(dim) + randn(dim) * initNoise
# tracking progress by callback
paramtraces = {'index':-1}
def storer(a):
lastseen = paramtraces['index']
for ts in [x for x in storesteps if x > lastseen and x <= a._num_updates]:
paramtraces[ts] = a.bestParameters.copy()
paramtraces['index'] = a._num_updates
# initialization
algo = aclass(fwrap, x0, callback=storer, **algoparams)
print algo, fwrap, dim, maxsteps,
# store initial step
algo.callback(algo)
algo.run(maxsteps)
# process learning curve
del paramtraces['index']
paramtraces = array([x for _, x in sorted(paramtraces.items())])
oloss = mean(fwrap.stochfun.expectedLoss(ones(100) * fwrap.stochfun.optimum))
ls = abs(fwrap.stochfun.expectedLoss(ravel(paramtraces)) - oloss) + minLoss
ls = reshape(ls, paramtraces.shape)
print median(ls[-1])
return ls
示例14: do_compare_wedges
def do_compare_wedges(file1="stars-82.txt", file2="Stripe82_coadd.csv", stripe=82,
mag=0, size=1.0):
""" Modify if size is not 1.0 """
one_run = fi.read_data(file1)
or_l = len(one_run[:,0])
or_hist = sv.plot_wedge_density(one_run, stripe, q=0.458, r0=19.4,
name="_rho1", mag=mag, plot=0, size=size)
coadd = fi.read_data(file2)
ca_l = len(coadd[:,0])
ca_hist = sv.plot_wedge_density(coadd, stripe, q=0.458, r0=19.4,
name="_rho2", mag=mag, plot=0, size=size)
# Separate into heights
or_h = or_hist[:,1]
ca_h = ca_hist[:,1]
# Divide the first data set by the second
if len(or_h) < len(ca_h):
l = len(or_h)
extra_h = -0.1*sc.ones((len(ca_h)-l))
else:
l = len(ca_h)
extra_h = 0.1*sc.ones((len(or_h)-l))
diff_h = sc.zeros(l)
for i in range(l):
diff_h[i] = ( or_h[i] / ca_h[i] )
out = sc.zeros((l,3))
for i in range(l):
out[i,0], out[i,1] = ca_hist[i,0], diff_h[i]
out[i,2] = 1.0 #ma.sqrt(or_hist[i,2]*or_hist[i,2] + ca_hist[i,2]*ca_hist[i,2])
return out
示例15: __init__
def __init__(self, evaluator, evaluable, **parameters):
BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
self.numParams = self.xdim + self.xdim * (self.xdim+1) / 2
if self.momentum != None:
self.momentumVector = zeros(self.numParams)
if self.learningRateSigma == None:
self.learningRateSigma = self.learningRate
if self.rangemins == None:
self.rangemins = -ones(self.xdim)
if self.rangemaxs == None:
self.rangemaxs = ones(self.xdim)
if self.initCovariances == None:
if self.diagonalOnly:
self.initCovariances = ones(self.xdim)
else:
self.initCovariances = eye(self.xdim)
self.x = rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins
self.sigma = dot(eye(self.xdim), self.initCovariances)
self.factorSigma = cholesky(self.sigma)
self.reset()