本文整理汇总了Python中scipy.hstack函数的典型用法代码示例。如果您正苦于以下问题:Python hstack函数的具体用法?Python hstack怎么用?Python hstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了hstack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bounds
def bounds(Xs,Ys,ns=100):
#use a gp to infer mean and bounds on sets of x/y data that have diffent x
#f,a = plt.subplots(2)
#for i in xrange(len(Ys)):
# a[0].plot(Xs[i],Ys[i])
X = sp.hstack(Xs)
np = X.size
Y = sp.hstack(Ys)
X.resize([np,1])
Y.resize([np,1])
#a[1].plot(X,Y,'r.')
np = X.size
S = sp.zeros(np)
D = [[sp.NaN]]*np
ki = GPdc.MAT52CS
mprior = sp.array([1.,2.,1.])
sprior = sp.array([2.,2.,2.])
#MAPH = GPdc.searchMAPhyp(X,Y,S,D,mprior,sprior, ki,mx=500)
MAPH = sp.array([0.5,5.,0.3])
g = GPdc.GPcore(X,Y,S,D,GPdc.kernel(ki,1,MAPH))
sup = sp.linspace(min(X),max(X),ns)
[m,V] = g.infer_diag_post(sup,[[sp.NaN]]*ns)
std = sp.sqrt(V+MAPH[2])
#plt.fill_between(sup.flatten(),(m-std).flatten(),(m+std).flatten(),facecolor='lightblue',edgecolor='lightblue',alpha=0.5)
#a[1].plot(sup,m.flatten(),'b')
return [sup,m,std]
示例2: make_data_twoclass
def make_data_twoclass(N=50):
# generates some toy data
mu = sp.array([[0,2],[0,-2]]).T
C = sp.array([[5.,4.],[4.,5.]])
X = sp.hstack((mvn(mu[:,0],C,N/2).T, mvn(mu[:,1],C,N/2).T))
Y = sp.hstack((sp.ones((1,N/2.)),-sp.ones((1,N/2.))))
return X,Y
示例3: getResultMatrix
def getResultMatrix(self, stst=False, lbls=False):
"""
Returns an array of result data. I'm keepin this for backwards compatibility but
it will be replaced by a getOutput() method when this scanner is updated to use
the new data_scan object.
- *stst* add steady-state data to output array
- *lbls* return a tuple of (array, column_header_list)
If *stst* is True output has dimensions [scan_parameters]+[state_species+state_flux]+[Useroutput]
otherwise [scan_parameters]+[Useroutput].
"""
output_array = None
labels = []
if stst:
if self.HAS_USER_OUTPUT:
output_array = scipy.hstack([self.ScanSpace, self.SteadyStateResults, self.UserOutputResults])
labels = self.GenOrder+list(self.mod.species)+list(self.mod.reactions)+self.UserOutputList
else:
output_array = scipy.hstack([self.ScanSpace, self.SteadyStateResults])
labels = self.GenOrder+list(self.mod.species)+list(self.mod.reactions)
else:
output_array = scipy.hstack([self.ScanSpace, self.UserOutputResults])
labels = self.GenOrder+self.UserOutputList
if lbls:
return output_array, labels
else:
return output_array
示例4: __init__
def __init__(self, type='random', pars=parameters()):
if type == 'random':
ee = (rand(pars['Ne'], pars['Ne']) < pars['p_ee'])
ei = (rand(pars['Ne'], pars['Ni']) < pars['p_ei'])
ii = (rand(pars['Ni'], pars['Ni']) < pars['p_ii'])
ie = (rand(pars['Ni'], pars['Ne']) < pars['p_ie'])
self.A = vstack((hstack((ee, ei)), hstack((ie, ii))))
self.A[range(pars['Ne'] + pars['Ni']), range(pars['Ne'] + pars['Ni'])] = 0 # remove selfloops
elif type == 'none':
self.A = zeros((pars['N'], pars['N'])) # no connectivity
elif type == 'uni_torus': # torus with uniform connectivity profile
self.A = zeros((pars['N'], pars['N']))
# construct matrix of pairwise distance
distMat = zeros((pars['N'], pars['N']))
for n1 in range(pars['N']):
coord1 = linear2grid(n1, pars['N_col'])
for n2 in arange(n1 + 1, pars['N']):
coord2 = linear2grid(n2, pars['N_col']) - coord1 # this sets neuron n1 to the origin
distMat[n1, n2] = toric_length(coord2, pars['N_row'], pars['N_col'])
distMat = distMat + distMat.transpose()
# construct adjajency matrix
for n1 in range(pars['N']):
neighbor_ids = nonzero(distMat[:, n1] < pars['sigma_con'])[0]
random.shuffle(neighbor_ids)
idx = neighbor_ids[0:min([pars['ncon'], len(neighbor_ids)])]
self.A[idx, n1] = 1
else:
print "type " + type + " not yet implemented"
示例5: MNEfit
def MNEfit(stim,resp,order):
# in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
# == calculate the constrained averages over the data set
Nsamples = sp.size(stim,0)
Ndim = sp.size(stim,1)
psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
avg = (1.0*stim.T*resp)/(Nsamples*1.0)
avgs = sp.vstack((psp,avg))
if(order > 1):
avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
avgs = sp.vstack((avgs,avgsqrd))
#initialize params:
pstart = sp.log(1/avgs[0,0] - 1)
pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
if(order > 1):
temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
#redefine functions with fixed vals:
def logLoss(p):
return LLF.log_loss(p, stim, resp, order)
def dlogLoss(p):
return LLF.d_log_loss(p, stim, avgs, order)
#run the function:
#pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
# conjugate-gradient:
pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
#pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
return pfinal
示例6: solver
def solver(M, _k, _sigma=0., _tol=1e-7):
#t_start = time()
try:
if scipy.__version__.split('.', 2)[1] == '10':
#
# eigsh sparse eigensolver, with sigma setting (in scipy>=0.10)
#
eigval, eigvec = SparseLinalg.eigsh(M, k=_k, sigma=_sigma, tol=_tol)
elif scipy.__version__.split('.', 2)[1] in ('8', '9'):
#
# eigsh sparse eigensolver, no sigma setting (in scipy<0.10)
# ask more then _k eigvecs, otherwise solver is unstable
#
eigval, eigvec = SparseLinalg.eigsh(M, k=_k*10, which='SM')
#_, eigval, eigvec = SparseLinalg.svds(W, k=_k*10)
except SparseLinalg.arpack.ArpackNoConvergence as excobj:
print "ARPACK iteration did not converge"
eigval, eigvec = excobj.eigenvalues, excobj.eigenvectors
eigval = scipy.hstack((eigval, numpy.zeros(_k-eigval.shape[0])))
eigvec = scipy.hstack((eigvec, numpy.zeros((n,_k-eigvec.shape[1]))))
#
# If eigval/eigvec pairs are not sorted on eigvals value
#
#ixEig = numpy.argsort(eigval)
#eigval = eigval[ixEig]
#eigvec = eigvec[:,ixEig]
#print 'Eigen-values/vectors found in %.6fs' % (time()-t_start)
return eigval, eigvec
示例7: coulomb_mat_eigvals
def coulomb_mat_eigvals(atoms, at_idx, r_cut, do_calc_connect=True, n_eigs=20):
if do_calc_connect:
atoms.set_cutoff(8.0)
atoms.calc_connect()
pos = sp.vstack((sp.asarray([sp.asarray(a.diff) for a in atoms.neighbours[at_idx]]), sp.zeros(3)))
Z = sp.hstack((sp.asarray([atoms.z[a.j] for a in atoms.neighbours[at_idx]]), atoms.z[at_idx]))
M = sp.outer(Z, Z) / (sp.spatial.distance_matrix(pos, pos) + np.eye(pos.shape[0]))
sp.fill_diagonal(M, 0.5 * Z ** 2.4)
# data = [[atoms.z[a.j], sp.asarray(a.diff)] for a in atoms.neighbours[at_idx]]
# data.append([atoms.z[at_idx], sp.array([0,0,0])]) # central atom
# M = sp.zeros((len(data), len(data)))
# for i, atom1 in enumerate(data):
# M[i,i] = 0.5 * atom1[0] ** 2.4
# for j, atom2 in enumerate(data[i+1:]):
# j += i+1
# M[i,j] = atom1[0] * atom2[0] / LA.norm(atom1[1] - atom2[1])
# M = 0.5 * (M + M.T)
eigs = (LA.eigh(M, eigvals_only=True))[::-1]
if n_eigs == None:
return eigs # all
elif eigs.size >= n_eigs:
return eigs[:n_eigs] # only first few eigenvectors
else:
return sp.hstack((eigs, sp.zeros(n_eigs - eigs.size))) # zero-padded extra fields
示例8: cv
def cv(nn_name,d_num = 10000,k_fold = 7,score_metrics = 'accuracy',verbose = 0):
suff = str(nn_name[:2])
if nn_name.find('calib') > 0:
X_data_name = 'train_data_icalib_'+ suff + '.npy'
y_data_name = 'labels_icalib_'+ suff + '.npy'
else:
X_data_name = 'train_data_'+ suff + '.npy'
y_data_name = 'labels_'+ suff + '.npy'
X,y = sp.load(X_data_name),sp.load(y_data_name)
d_num = min(len(X),d_num)
X = X[:d_num]
y = y[:d_num]
rates12 = sp.hstack((0.05 * sp.ones(25,dtype=sp.float32),0.005*sp.ones(15,dtype=sp.float32),0.0005*sp.ones(10,dtype=sp.float32)))
rates24 = sp.hstack((0.01 * sp.ones(25,dtype=sp.float32),0.0001*sp.ones(15,dtype=sp.float32)))
rates48 = sp.hstack ([0.05 * sp.ones(15,dtype=sp.float32),0.005*sp.ones(10,dtype=sp.float32) ])
if nn_name == '48-net':
X12 = sp.load('train_data_12.npy')[:d_num]
X24 = sp.load('train_data_24.npy')[:d_num]
elif nn_name == '24-net':
X12 = sp.load('train_data_12.npy')[:d_num]
if score_metrics == 'accuracy':
score_fn = accuracy_score
else:
score_fn = f1_score
scores = []
iteration = 0
for t_indx,v_indx in util.kfold(X,y,k_fold=k_fold):
nn = None
X_train,X_test,y_train,y_test = X[t_indx], X[v_indx], y[t_indx], y[v_indx]
#print('\t \t',str(iteration+1),'fold out of ',str(k_fold),'\t \t' )
if nn_name == '24-net':
nn = Cnnl(nn_name = nn_name,l_rates=rates24,subnet=Cnnl(nn_name = '12-net',l_rates=rates12).load_model(
'12-net_lasagne_.pickle'))
nn.fit(X = X_train,y = y_train,X12 = X12[t_indx])
elif nn_name == '48-net':
nn = Cnnl(nn_name = nn_name,l_rates=rates48,subnet=Cnnl(nn_name = '24-net',l_rates=rates24,subnet=Cnnl(nn_name = '12-net',l_rates=rates12).load_model(
'12-net_lasagne_.pickle')).load_model('24-net_lasagne_.pickle'))
nn.fit(X = X_train,y = y_train,X12 = X12[t_indx],X24 = X24[t_indx])
else:
nn = Cnnl(nn_name = nn_name,l_rates=rates12,verbose=verbose)
nn.fit(X = X_train,y = y_train)
if nn_name == '24-net':
y_pred = nn.predict(X_test,X12=X12[v_indx])
elif nn_name == '48-net':
y_pred = nn.predict(X_test,X12=X12[v_indx],X24=X24[v_indx])
else:
y_pred = nn.predict(X_test)
score = score_fn(y_test,y_pred)
#print(iteration,'fold score',score)
scores.append(score)
iteration += 1
score_mean = sp.array(scores).mean()
print(d_num,'mean score',score)
return score_mean
示例9: backprop
def backprop(self, A_in, Z_out, prev_delta, prev_params):
f = GRADFNS[self.modelfn]
num_pts = np.shape(Z_out)[0]
bias_ones = np.ones((num_pts, 1))
sgrd = f(np.hstack([bias_ones, Z_out]))
delta = np.dot(prev_params.T, prev_delta) * sgrd.T
grad = np.dot(delta[1:,:], np.hstack([bias_ones, A_in])) / num_pts
return grad, delta
示例10: funky
def funky():
x0 = sp.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = sp.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(sp.hstack((x0, [tx])), sp.hstack((y0, [ty])))
return t0, t1
示例11: pdist
def pdist(X,idx,q):
N = len(X)
p = scipy.zeros((N,N))
for i in idx:
for j in scipy.arange(i,N):
if i != j:
p[i,j] = dist(X[i],X[j])
q.put(scipy.hstack((i,p[i])))
q.put(scipy.hstack((-1,scipy.zeros(N))))
示例12: make_data_xor
def make_data_xor(N=80,noise=.25):
# generates some toy data
mu = sp.array([[-1,1],[1,1]]).T
C = sp.eye(2)*noise
X = sp.hstack((mvn(mu[:,0],C,N/4).T,mvn(-mu[:,0],C,N/4).T, mvn(mu[:,1],C,N/4).T,mvn(-mu[:,1],C,N/4).T))
Y = sp.hstack((sp.ones((1,N/2.)),-sp.ones((1,N/2.))))
randidx = sp.random.permutation(N)
Y = Y[0,randidx]
X = X[:,randidx]
return X,Y
示例13: stripe2
def stripe2():
Y1 = sp.vstack((sp.ones((50,1)), sp.zeros((50,1))))
Y2 = sp.vstack((sp.zeros((50,1)), sp.ones((50,1))))
Y = sp.hstack([Y1, Y2])
X1 = sp.random.multivariate_normal([-2,2], [[1,.8],[.8,1]],size=50)
X2 = sp.random.multivariate_normal([2,-1], [[1,.8],[.8,1]], size=50)
X = sp.hstack((sp.ones((100,1)),sp.vstack([X1,X2])))
return Y, X
示例14: plot
def plot(i,zz):
plt.figure(i, figsize=(10,10))
plt.plot(sp.hstack((quad_x,quad_x[0])),sp.hstack((quad_y,quad_y[0])), '-g')
plt.plot(quad_x[0],quad_y[0], 'ro')
plt.axis('equal')
plt.grid('on')
plt.xlim((9,12))
plt.ylim((9,12))
#plt.contourf(x_samples,y_samples,z_samples,100, interpolation=None)
plt.contourf(x_samples,y_samples,abs(zz),100, interpolation=None)
plt.colorbar()
示例15: draw_support_inplane
def draw_support_inplane(g, lb, ub, n, method, axis, value, para=1.0):
print "dsinplane axis:{} value:{}".format(axis, value)
if type(g) is int:
gf = g - 1
else:
gf = gpfake(g, axis, value)
lb_red = sp.hstack([lb[:axis], lb[axis + 1 :]])
ub_red = sp.hstack([ub[:axis], ub[axis + 1 :]])
X = draw_support(gf, lb_red, ub_red, n, method, para=para)
return sp.hstack([X[:, :axis], sp.ones([n, 1]) * value, X[:, axis:]])