本文整理汇总了Python中scipy.vstack函数的典型用法代码示例。如果您正苦于以下问题:Python vstack函数的具体用法?Python vstack怎么用?Python vstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了vstack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: MNEfit
def MNEfit(stim,resp,order):
# in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
# == calculate the constrained averages over the data set
Nsamples = sp.size(stim,0)
Ndim = sp.size(stim,1)
psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
avg = (1.0*stim.T*resp)/(Nsamples*1.0)
avgs = sp.vstack((psp,avg))
if(order > 1):
avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
avgs = sp.vstack((avgs,avgsqrd))
#initialize params:
pstart = sp.log(1/avgs[0,0] - 1)
pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
if(order > 1):
temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
#redefine functions with fixed vals:
def logLoss(p):
return LLF.log_loss(p, stim, resp, order)
def dlogLoss(p):
return LLF.d_log_loss(p, stim, avgs, order)
#run the function:
#pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
# conjugate-gradient:
pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
#pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
return pfinal
示例2: gpmapasrecc
def gpmapasrecc(optstate, **para):
if para["onlyafter"] > len(optstate.y) or not len(optstate.y) % para["everyn"] == 0:
return [sp.NaN for i in para["lb"]], {"didnotrun": True}
logger.info("gpmapas reccomender")
d = len(para["lb"])
x = sp.hstack([sp.vstack(optstate.x), sp.vstack([e["xa"] for e in optstate.ev])])
y = sp.vstack(optstate.y)
s = sp.vstack([e["s"] for e in optstate.ev])
dx = [e["d"] for e in optstate.ev]
MAP = GPdc.searchMAPhyp(x, y, s, dx, para["mprior"], para["sprior"], para["kindex"])
logger.info("MAPHYP {}".format(MAP))
G = GPdc.GPcore(x, y, s, dx, GPdc.kernel(para["kindex"], d + 1, MAP))
def directwrap(xq, y):
xq.resize([1, d])
xe = sp.hstack([xq, sp.array([[0.0]])])
# print xe
a = G.infer_m(xe, [[sp.NaN]])
return (a[0, 0], 0)
[xmin, ymin, ierror] = DIRECT.solve(
directwrap, para["lb"], para["ub"], user_data=[], algmethod=1, volper=para["volper"], logfilename="/dev/null"
)
logger.info("reccsearchresult: {}".format([xmin, ymin, ierror]))
return [i for i in xmin], {"MAPHYP": MAP, "ymin": ymin}
示例3: _pair_overlap
def _pair_overlap(waves1, waves2, mean1, mean2, cov1, cov2):
""" Calculate FP/FN estimates for two gaussian clusters
"""
from sklearn import mixture
means = sp.vstack([[mean1], [mean2]])
covars = sp.vstack([[cov1], [cov2]])
weights = sp.array([waves1.shape[1], waves2.shape[1]], dtype=float)
weights /= weights.sum()
# Create mixture of two Gaussians from the existing estimates
mix = mixture.GMM(n_components=2, covariance_type="full", init_params="")
mix.covars_ = covars
mix.weights_ = weights
mix.means_ = means
posterior1 = mix.predict_proba(waves1.T)[:, 1]
posterior2 = mix.predict_proba(waves2.T)[:, 0]
return (
posterior1.mean(),
posterior2.sum() / len(posterior1),
posterior2.mean(),
posterior1.sum() / len(posterior2),
)
示例4: PESvsaq
def PESvsaq(optstate,persist,**para):
para = copy.deepcopy(para)
if persist==None:
persist = {'n':0,'d':len(para['ub'])}
n = persist['n']
d = persist['d']
if n<para['nrandinit']:
persist['n']+=1
return randomaq(optstate,persist,**para)
logger.info('PESvsaq')
#logger.debug(sp.vstack([e[0] for e in optstate.ev]))
#raise
x=sp.vstack(optstate.x)
y=sp.vstack(optstate.y)
s= sp.vstack([e['s'] for e in optstate.ev])
dx=[e['d'] for e in optstate.ev]
pesobj = PES.PES(x,y,s,dx,para['lb'],para['ub'],para['kindex'],para['mprior'],para['sprior'],DH_SAMPLES=para['DH_SAMPLES'],DM_SAMPLES=para['DM_SAMPLES'], DM_SUPPORT=para['DM_SUPPORT'],DM_SLICELCBPARA=para['DM_SLICELCBPARA'],mode=para['SUPPORT_MODE'],noS=para['noS'])
[xmin,ymin,ierror] = pesobj.search_acq(para['cfn'],para['logsl'],para['logsu'],volper=para['volper'])
logger.debug([xmin,ymin,ierror])
para['ev']['s']=10**xmin[-1]
xout = [i for i in xmin[:-1]]
return xout,para['ev'],persist,{'HYPdraws':[k.hyp for k in pesobj.G.kf],'mindraws':pesobj.Z,'DIRECTmessage':ierror,'PESmin':ymin}
return
示例5: EIMAPaq
def EIMAPaq(optstate,persist,ev=None, ub = None, lb=None, nrandinit=None, mprior=None,sprior=None,kindex = None,directmaxiter=None):
para = copy.deepcopy(para)
if persist==None:
persist = {'n':0,'d':len(ub)}
n = persist['n']
d = persist['d']
if n<nrandinit:
persist['n']+=1
return randomaq(optstate,persist,ev=ev,lb=lb,ub=ub)
logger.info('EIMAPaq')
#logger.debug(sp.vstack([e[0] for e in optstate.ev]))
#raise
x=sp.vstack(optstate.x)
y=sp.vstack(optstate.y)
s= sp.vstack([e['s'] for e in optstate.ev])
dx=[e['d'] for e in optstate.ev]
MAP = GPdc.searchMAPhyp(x,y,s,dx,mprior,sprior, kindex)
logger.info('MAPHYP {}'.format(MAP))
G = GPdc.GPcore(x,y,s,dx,GPdc.kernel(kindex,d,MAP))
def directwrap(xq,y):
xq.resize([1,d])
a = G.infer_lEI(xq,[ev['d']])
return (-a[0,0],0)
[xmin,ymin,ierror] = DIRECT.solve(directwrap,lb,ub,user_data=[], algmethod=0, maxf = directmaxiter, logfilename='/dev/null')
#logger.debug([xmin,ymin,ierror])
persist['n']+=1
return [i for i in xmin],ev,persist,{'MAPHYP':MAP,'logEImin':ymin,'DIRECTmessage':ierror}
示例6: update
def update():
global i
if i == tvec.shape[0]-1:
i = 0
else:
i = i + 1
if show_left:
poi_left_scatter.setData(pos=sp.expand_dims(poi_left_pos[i],0))
hand_left_scatter.setData(pos=sp.expand_dims(hand_left_pos[i],0))
string_left_line.setData(pos=sp.vstack((hand_left_pos[i],poi_left_pos[i])))
# arm_left.setData(pos=sp.vstack((hand_left_pos[i],[0,-1*shoulder_width/2,0])))
arm_left.setData(pos=sp.vstack((hand_left_pos[i],[0,0,offset])))
else:
poi_left_scatter.hide()
poi_left_line.hide()
hand_left_scatter.hide()
hand_left_line.hide()
string_left_line.hide()
arm_left.hide()
if show_right:
poi_right_scatter.setData(pos=sp.expand_dims(poi_right_pos[i],0))
hand_right_scatter.setData(pos=sp.expand_dims(hand_right_pos[i],0))
string_right_line.setData(pos=sp.vstack((hand_right_pos[i],poi_right_pos[i])))
# arm_right.setData(pos=sp.vstack((hand_right_pos[i],[0,shoulder_width/2,0])))
arm_right.setData(pos=sp.vstack((hand_right_pos[i],[0,0,offset])))
else:
poi_right_scatter.hide()
poi_right_line.hide()
hand_right_scatter.hide()
hand_right_line.hide()
string_right_line.hide()
arm_right.hide()
示例7: Ei
def Ei(self, Pp, i):
""" Calculate E_i^P
Parameters
-------------
Pp : ndarray, shape (n, k)
Conditional choice probabilities for provinces
i : int, 1 to k
Province
Returns
-----------
Ei : ndarray, shape (n, )
Values of :math:`E_i^P(l, a)` in part (b)
Notes
----------
.. math::
E_i^P(l, s) = \sum_{a=0}^1 P_i[a | l, s] E_i^P(a, l, s)
"""
E = sp.vstack((self.Ei_ai(Pp, i, a) for a in (0, 1))).T
W = sp.vstack((Pp[:, _pp(i, a)] for a in (0, 1))).T
return (E * W).sum(1)
示例8: calc_probability_matrix
def calc_probability_matrix(trains_a, trains_b, metric, tau, z):
""" Calculates the probability matrix that one spike train from stimulus X
will be classified as spike train from stimulus Y.
:param list trains_a: Spike trains of stimulus A.
:param list trains_b: Spike trains of stimulus B.
:param str metric: Metric to base the classification on. Has to be a key in
:const:`metrics.metrics`.
:param tau: Time scale parameter for the metric.
:type tau: Quantity scalar.
:param float z: Exponent parameter for the classifier.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero")
dist_mat = calc_single_metric(trains_a + trains_b, metric, tau) ** z
dist_mat[sp.diag_indices_from(dist_mat)] = 0.0
assert len(trains_a) == len(trains_b)
l = len(trains_a)
classification_of_a = sp.argmin(sp.vstack((
sp.sum(dist_mat[:l, :l], axis=0) / (l - 1),
sp.sum(dist_mat[l:, :l], axis=0) / l)) ** (1.0 / z), axis=0)
classification_of_b = sp.argmin(sp.vstack((
sp.sum(dist_mat[:l, l:], axis=0) / l,
sp.sum(dist_mat[l:, l:], axis=0) / (l - 1))) ** (1.0 / z), axis=0)
confusion = sp.empty((2, 2))
confusion[0, 0] = sp.sum(classification_of_a == 0)
confusion[1, 0] = sp.sum(classification_of_a == 1)
confusion[0, 1] = sp.sum(classification_of_b == 0)
confusion[1, 1] = sp.sum(classification_of_b == 1)
return confusion / 2.0 / l
示例9: sqcover
def sqcover(A,n):
edge = sp.sqrt(A) # the length of an edge
d = edge/n # the distance between two adjacent points
r = d/2 # the "radius of "
end = edge - r # end point
base = sp.linspace(r, end, n)
first_line = sp.transpose(sp.vstack((base, r*sp.ones(n))))
increment = sp.transpose(sp.vstack((sp.zeros(n), d*sp.ones(n))))
pts = first_line
y_diff = increment
for i in range(n-1):
pts = sp.vstack((pts, first_line + y_diff))
y_diff = y_diff + increment
# Color matter
colors = []
for p in pts:
cval = n*p[0] + p[1] # the x-coord has a higher weight
cval = colormap.Spectral(cval/((n+1)*end)) # normalize by the max value that cval can take.
colors.append(cval)
colors = sp.array(colors)
cover = (pts, r, colors)
return cover
示例10: my_bh_fdr
def my_bh_fdr(p_val_vec):
index = scipy.argsort(p_val_vec)
exp_err = scipy.vstack((float(len(p_val_vec))/scipy.arange(1,len(p_val_vec) + 1)*p_val_vec[index],
scipy.tile(1, [1, len(p_val_vec)]))).min(axis = 0)
exp_err = scipy.vstack((exp_err,exp_err[scipy.r_[0,scipy.arange(len(exp_err)-1)]])).max(axis=0)
#scipy.r_[index[0], index[range(len(index)-1)]
resort_index = scipy.argsort(index)
return exp_err[resort_index]
示例11: infer_diag
def infer_diag(self,X_i,D_i):
ns=X_i.shape[0]
D = [0 if sp.isnan(x[0]) else int(sum([8**i for i in x])) for x in D_i]
R=sp.vstack([sp.empty([2,ns])]*self.size)
libGP.infer_diag(self.s,cint(self.size), ns,X_i.ctypes.data_as(ctpd),(cint*len(D))(*D),R.ctypes.data_as(ctpd))
m = sp.vstack([R[i*2,:] for i in xrange(self.size)])
V = sp.vstack([R[i*2+1,:] for i in xrange(self.size)])
return [m,V]
示例12: test_skip
def test_skip():
"""Test if only keeping every n'th sample works."""
X = scipy.vstack((scipy.arange(25), scipy.arange(25)))
X_ = skip(X, 2, 5)
print X_
des = scipy.vstack((scipy.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23 ,24]),
scipy.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23 ,24])))
assert (X_ == des).all(), 'wrong result'
示例13: extract_spikes
def extract_spikes(train, signals, length, align_time):
""" Extract spikes with waveforms from analog signals using a spike train.
Spikes that are too close to the beginning or end of the shortest signal
to be fully extracted are ignored.
:type train: :class:`neo.core.SpikeTrain`
:param train: The spike times.
:param sequence signals: A sequence of :class:`neo.core.AnalogSignal`
objects from which the spikes are extracted. The waveforms of
the returned spikes are extracted from these signals in the
same order they are given.
:type length: Quantity scalar
:param length: The length of the waveform to extract as time scalar.
:type align_time: Quantity scalar
:param align_time: The alignment time of the spike times as time scalar.
This is the time delta from the start of the extracted waveform
to the exact time of the spike.
:returns: A list of :class:`neo.core.Spike` objects, one for each
time point in ``train``. All returned spikes include their
``waveform`` property.
:rtype: list
"""
if len(set(s.sampling_rate for s in signals)) > 1:
raise ValueError(
'All signals for spike extraction need the same sampling rate')
wave_unit = signals[0].units
srate = signals[0].sampling_rate
end = min(s.shape[0] for s in signals)
aligned_train = train - align_time
cut_samples = int((length * srate).simplified)
st = sp.asarray((aligned_train * srate).simplified)
# Find extraction epochs
st_ok = (st >= 0) * (st < end - cut_samples)
epochs = sp.vstack((st[st_ok], st[st_ok] + cut_samples)).T
nspikes = epochs.shape[0]
if not nspikes:
return []
# Create data
data = sp.vstack([sp.asarray(s.rescale(wave_unit)) for s in signals])
nc = len(signals)
spikes = []
for s in xrange(nspikes):
waveform = sp.zeros((cut_samples, nc))
for c in xrange(nc):
waveform[:, c] = \
data[c, epochs[s, 0]:epochs[s, 1]]
spikes.append(neo.Spike(train[st_ok][s], waveform=waveform * wave_unit))
return spikes
示例14: stripe2
def stripe2():
Y1 = sp.vstack((sp.ones((50,1)), sp.zeros((50,1))))
Y2 = sp.vstack((sp.zeros((50,1)), sp.ones((50,1))))
Y = sp.hstack([Y1, Y2])
X1 = sp.random.multivariate_normal([-2,2], [[1,.8],[.8,1]],size=50)
X2 = sp.random.multivariate_normal([2,-1], [[1,.8],[.8,1]], size=50)
X = sp.hstack((sp.ones((100,1)),sp.vstack([X1,X2])))
return Y, X
示例15: load_single_player_data
def load_single_player_data(use_existing=False, num_train=0):
aa=np.load('/Users/amit/Desktop/Dropbox/Markov/IMSPL.npy')
bb=np.load('/Users/amit/Desktop/Dropbox/Markov/IMSBGD.npy')
aa=standardize_data(aa)
bb=standardize_data(bb)
#ii=np.int32(np.floor(np.random.rand(100)*bb.shape[0]))
# py.figure(1)
# for j,i in enumerate(ii):
# py.subplot(10,10,j+1)
# py.imshow(bb[i,:,:,:])
# py.axis('off')
# py.axis('equal')
# py.show()
if (num_train==0):
num=aa.shape[0]
else:
num=np.minimum(aa.shape[0],num_train)
if (not use_existing):
ii=range(num)
np.random.shuffle(ii)
np.save('ii.npy',ii)
aa=aa[ii,]
else:
if (os.path.isfile('ii.npy')):
ii=np.load('ii.npy')
aa=aa[ii,]
train_num=np.int32(num/2)
val_num=np.int32(num/4)
test_num=np.int32(num/4)
head=aa[:,0:25,:,:]
body=aa[:,20:45,:,:]
legs=aa[:,35:60,:,:]
bgd=bb[:,20:45,:,:]
val_start=train_num
val_end=val_num+val_start
test_start=val_end
test_end=test_num+test_start
X_train=scipy.vstack((head[0:train_num,],body[0:train_num,],legs[0:train_num],bgd[0:train_num,]))
X_val=scipy.vstack((head[val_start:val_end,],body[val_start:val_end,],
legs[val_start:val_end,],bgd[val_start:val_end,]))
X_test=scipy.vstack((head[test_start:test_end,],
body[test_start:test_end,],
legs[test_start:test_end,],
bgd[test_start:test_end,]))
X_train=X_train.transpose((0,3,1,2)) #/256.
X_val=X_val.transpose((0,3,1,2)) #/256.
X_test=X_test.transpose((0,3,1,2)) #/256.
y_train=np.repeat(range(4),train_num)
y_val=np.repeat(range(4),val_num)
y_test=np.repeat(range(4),test_num)
return (np.float32(X_train),np.uint8(y_train),np.float32(X_val),np.uint8(y_val),np.float32(X_test),np.uint8(y_test))