本文整理汇总了Python中scipy.sign函数的典型用法代码示例。如果您正苦于以下问题:Python sign函数的具体用法?Python sign怎么用?Python sign使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sign函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: f
def f(self,xarr,t):
x0dot = -self.coef*pl.exp(-self.k*(1.0+abs(xarr[3]-1.0)))*pl.sin(self.w*t-self.k*xarr[2])
x1dot = pl.sign(xarr[3]-1.0)*self.coef*pl.exp(-self.k*(1.0+abs(xarr[3]-1.0)))*pl.cos(self.w*t-self.k*xarr[2]) -\
pl.sign(xarr[3]-1.0)*9.8
x2dot = xarr[0]
x3dot = xarr[1]
return [x0dot,x1dot,x2dot,x3dot]
示例2: plot_histogram
def plot_histogram(X, Y, w, b):
''' Plots a histogram of classifier outputs (w^T X) for each class with pl.hist
The title of the histogram is the accuracy of the classification
Accuracy = #correctly classified points / N
Definition: plot_histogram(X, Y, w, b)
Input: X - DxN array of N data points with D features
Y - 1D array of length N of class labels
w - 1D array of length D, weight vector
b - bias term for linear classification
'''
# ... your code here
#Data:
output = (w.T.dot(X) - b)
wrong = (sp.sign(output) != Y).nonzero()[0]
correct = (sp.sign(output) == Y).nonzero()[0]
#Info:
acc = float(correct.shape[0])/float(output.shape[0]) * 100.
non_target = [output[i] for i in correct]
target = [output[i] for i in wrong]
#Plot:
pl.hist(non_target, bins = 10, histtype='bar',color='b', rwidth=0.4, label=['non-target'])
pl.hist(target, bins = 10, histtype='bar', color='g', rwidth=0.4, label=['target'])
pl.xlabel('w^T X')
pl.title('Acc %d'%acc +'%')
pl.legend()
示例3: rpropUpdate
def rpropUpdate(self, w):
""" edit the update vector according to the rprop mechanism. """
n = self.xdim
self.wStored.append(w.copy())
self.rpropPerformance.append(self.fx[0])
self.oldParams.append(self.combineParams(self.alpha, self.x, self.factorSigma))
if self.generation > 0:
neww = zeros(len(w))
self.delta.append(zeros((self.mu*(n*(n+1)/2+n+1))))
for i in range(len(w)-1):
self.delta[self.generation][i] = self.delta[self.generation - 1][i]
assert len(self.wStored[self.generation]) == len(self.wStored[self.generation-1])
if self.wStored[self.generation][i] * self.wStored[self.generation-1][i] > 0.0:
self.delta[self.generation][i] = min(self.delta[self.generation-1][i] * self.etaPlus, self.rpropMaxUpdate)
if self.rpropUseGradient:
neww[i] = self.wStored[self.generation][i] * self.delta[self.generation][i]
else:
neww[i] = sign(self.wStored[self.generation][i]) * self.delta[self.generation][i]
elif self.wStored[self.generation][i] * self.wStored[self.generation-1][i] < 0.0:
self.delta[self.generation][i] = max(self.delta[self.generation - 1][i] * self.etaMin, self.rpropMinUpdate)
if self.rpropPerformance[self.generation] < self.rpropPerformance[self.generation - 1]:
# undo the last update
neww[i] = self.oldParams[self.generation-1][i] - self.oldParams[self.generation][i]
self.wStored[self.generation][i] = 0.0
elif self.wStored[self.generation][i] * self.wStored[self.generation - 1][i] == 0.0:
if self.rpropUseGradient:
neww[i] = self.wStored[self.generation][i] * self.delta[self.generation][i]
else:
neww[i] = sign(self.wStored[self.generation][i]) * self.delta[self.generation][i]
self.updateVariables(neww)
示例4: train_perceptron
def train_perceptron(X,Y,iterations=200,eta=.1):
''' Trains a linear perceptron
Definition: w, b, acc = train_perceptron(X,Y,iterations=200,eta=.1)
Input: X - DxN array of N data points with D features
Y - 1D array of length N of class labels {-1, 1}
iter - optional, number of iterations, default 200
eta - optional, learning rate, default 0.1
Output: w - 1D array of length D, weight vector
b - bias term for linear classification
'''
#include the bias term by adding a row of ones to X
X = sp.concatenate((sp.ones((1,X.shape[1])), X))
#initialize weight vector
weights = sp.ones((X.shape[0]))/X.shape[0]
for it in sp.arange(iterations):
# indices of misclassified data
wrong = (sp.sign(weights.dot(X)) != Y).nonzero()[0]
if wrong.shape[0] > 0:
# pick a random misclassified data point
m = wrong[sp.random.random_integers(0, wrong.shape[0]-1)]
#update weight vector (use variable learning rate (eta/(1.+it)) )
weights = weights + (eta/(1.+it)) * X[:, m] * Y[m];
# compute accuracy
wrong = (sp.sign(weights.dot(X)) != Y).nonzero()[0]
b = -weights[0]
w = weights[1:]
return w,b
示例5: crossvalidate
def crossvalidate(X,Y, f=5, trainfun=train_ncc):
'''
Test generalization performance of a linear classifier by crossvalidation
Definition: crossvalidate(X,Y, f=5, trainfun=train_ncc)
Input: X - DxN array of N data points with D features
Y - 1D array of length N of class labels
f - number of cross-validation folds
trainfun - function for linear classification training
Output: acc_train - (f,) array of accuracies in test train folds
acc_test - (f,) array of accuracies in each test fold
'''
N = f*(X.shape[-1]/f)
idx = sp.reshape(sp.arange(N),(f,N/f))
acc_train = sp.zeros((f))
acc_test = sp.zeros((f))
for ifold in sp.arange(f):
testidx = sp.zeros((f),dtype=bool)
testidx[ifold] = 1
test = idx[testidx,:].flatten()
train = idx[~testidx,:].flatten()
w,b = trainfun(X[:,train],Y[train])
acc_train[ifold] = sp.sum(sp.sign(w.dot(X[:,train])-b)==Y[train])/sp.double(train.shape[0])
acc_test[ifold] = sp.sum(sp.sign(w.dot(X[:,test])-b)==Y[test])/sp.double(test.shape[0])
# pdb.set_trace()
return acc_train,acc_test
示例6: predict
def predict(self, Xtest):
print "starting predict with:",Xtest.shape[0]," samples : ",datetime.datetime.now()
num_batches = Xtest.shape[0] / float(self.n_pred_samples)
test_ids = []
for i in range(0, int(num_batches)):
test_ids.append(range((i) * self.n_pred_samples, (i + 1) * self.n_pred_samples))
if (num_batches - int(num_batches)) > 0:
test_ids.append(range(int(num_batches) * self.n_pred_samples, Xtest.shape[0]))
# values = [(test_id, exp_ids) for test_id in test_ids for exp_ids in self.X_exp_ids]
yhattotal = []
for i in range(0,len(test_ids)):
# print "computing result with batches:",i," of:",len(test_ids)
yraw = Parallel(n_jobs=self.workers)(delayed(svm_predict_raw_batches)(self.X, Xtest[test_ids[i]], \
self.w, v, self.gamma) for v in self.X_exp_ids)
yhat = sp.sign(sp.vstack(yraw).mean(axis=0))
yhattotal.append(yhat)
yhattotal = [item for sublist in yhattotal for item in sublist]
print "stopping predict:", datetime.datetime.now()
return sp.sign(yhattotal)
示例7: zero_crossing_rate
def zero_crossing_rate(self, frames):
nf = len(frames) # 帧数
zcr = np.zeros(nf) # 初始化
for k in range(nf):
x_sub = frames[k]
x_sub1 = x_sub[:-1]
x_sub2 = x_sub[1:]
zcr[k] = np.sum(np.abs(scipy.sign(x_sub1) - scipy.sign(x_sub2))) / 2 / len(x_sub1)
return zcr
示例8: dsekl_test_predict
def dsekl_test_predict(dname='sonar', num_test=1000, maxN=1000):
print "started loading:", datetime.datetime.now()
Xtotal, Ytotal = load_realdata(dname)
print "loading data done!", datetime.datetime.now()
# decrease dataset size
N = Xtotal.shape[0]
if maxN > 0:
N = sp.minimum(Xtotal.shape[0], maxN)
Xtotal = Xtotal[:N + num_test]
Ytotal = Ytotal[:N + num_test]
# randomize datapoints
print "randomization", datetime.datetime.now()
sp.random.seed(0)
idx = sp.random.permutation(Xtotal.shape[0])
print idx
Xtotal = Xtotal[idx]
Ytotal = Ytotal[idx]
# divide test and train
print "dividing in train and test", datetime.datetime.now()
Xtest = Xtotal[N:N+num_test]
Ytest = Ytotal[N:N+num_test]
Xtrain = Xtotal[:N]
Ytrain = Ytotal[:N]
print "densifying", datetime.datetime.now()
# unit variance and zero mean
Xtrain = Xtrain.todense()
Xtest = Xtest.todense()
if not sp.sparse.issparse(Xtrain):
scaler = StandardScaler()
print "fitting scaler", datetime.datetime.now()
scaler.fit(Xtrain) # Don't cheat - fit only on training data
print "transforming data train", datetime.datetime.now()
Xtrain = scaler.transform(Xtrain)
print "transforming data test", datetime.datetime.now()
Xtest = scaler.transform(Xtest)
else:
scaler = StandardScaler(with_mean=False)
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
Xtest = scaler.transform(Xtest)
DS = pickle.load(file("DS","rb"))
res_hl = DS.predict_support_hardlimits(Xtest)
res_hl = sp.mean(sp.sign(res_hl) != Ytest)
print "res_hl",res_hl
res_perc = DS.predict_support_percentiles(Xtest)
res_perc = sp.mean(sp.sign(res_perc) != Ytest)
print "res_perc",res_perc
示例9: performAstroActions
def performAstroActions(self):
for j, active in enumerate(self.astro_statuses):
assert active in [-1, 0, 1]
if active == 1:
assert sign(self.remaining_active_durs[j]) == 1
self.neur_in_ws[:,j] += self.neur_in_ws[:,j] * self.incr_percent
self.remaining_active_durs[j] -= 1
elif active == -1:
assert sign(self.remaining_active_durs[j]) == -1
self.neur_in_ws[:,j] += self.neur_in_ws[:,j] * -self.decr_percent
self.remaining_active_durs[j] += 1
示例10: svdInverse
def svdInverse(mat,maxEig=1e10,minEig=1e-10): #1e10,1e-10
u,w,vt = scipy.linalg.svd(mat)
if any(w==0.):
raise ZeroDivisionError, "Singular matrix."
wInv = w ** -1
largeIndices = pylab.find( abs(wInv) > maxEig )
if len(largeIndices) > 0: print "svdInverse:",len(largeIndices),"large singular values out of",len(w)
wInv[largeIndices] = maxEig*scipy.sign(wInv[largeIndices])
smallIndices = pylab.find( abs(wInv) < minEig )
if len(smallIndices) > 0: print "svdInverse:",len(smallIndices),"small singular values out of",len(w)
wInv[smallIndices] = minEig*scipy.sign(wInv[smallIndices])
return scipy.dot( scipy.dot(vt.T,scipy.diag(wInv)), u.T )
示例11: expectation_prop_inner
def expectation_prop_inner(m0,V0,Y,Z,F,z,needed):
#expectation propagation on multivariate gaussian for soft inequality constraint
#m0,v0 are mean vector , covariance before EP
#Y is inequality value, Z is sign, 1 for geq, -1 for leq, F is softness variance
#z is number of ep rounds to run
#returns mt, Vt the value and variance for observations created by ep
m0=sp.array(m0).flatten()
V0=sp.array(V0)
n = V0.shape[0]
print "expectation prpagation running on "+str(n)+" dimensions for "+str(z)+" loops:"
mt =sp.zeros(n)
Vt= sp.eye(n)*float(1e10)
m = sp.empty(n)
V = sp.empty([n,n])
conv = sp.empty(z)
for i in xrange(z):
#compute the m V give ep obs
m,V = gaussian_fusion(m0,mt,V0,Vt)
mtprev=mt.copy()
Vtprev=Vt.copy()
for j in [k for k in xrange(n) if needed[k]]:
print [i,j]
#the cavity dist at index j
tmp = 1./(Vt[j,j]-V[j,j])
v_ = (V[j,j]*Vt[j,j])*tmp
m_ = tmp*(m[j]*Vt[j, j]-mt[j]*V[j, j])
alpha = sp.sign(Z[j])*(m_-Y[j]) / (sp.sqrt(v_+F[j]))
pr = PhiR(alpha)
if sp.isnan(pr):
pr = -alpha
beta = pr*(pr+alpha)/(v_+F[j])
kappa = sp.sign(Z[j])*(pr+alpha) / (sp.sqrt(v_+F[j]))
#print [alpha,beta,kappa,pr]
mt[j] = m_+1./kappa
#mt[j] = min(abs(mt[j]),1e5)*sp.sign(mt[j])
Vt[j,j] = min(1e10,1./beta - v_)
#print sp.amax(mtprev-mt)
#print sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt))
#TODO make this a ratio instead of absolute
delta = max(sp.amax(mtprev-mt),sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt)))
conv[i]=delta
print "EP finished with final max deltas "+str(conv[-3:])
V = V0.dot(spl.solve(V0+Vt,Vt))
m = V.dot((spl.solve(V0,m0)+spl.solve(Vt,mt)).T)
return mt, Vt
示例12: performAstrocyteActions
def performAstrocyteActions(self):
i = len(self.neuronal_input_connection.params)/self.dim
for j, active in enumerate(self.astrocyte_statuses):
J = j*i
assert active in [-1, 0, 1]
if active == 1:
# NEED TO CHECK _setParameters and _params method
assert sign(self.remaining_active_durations[j]) == 1
self.neuronal_input_connection.params[J:J+i] += \
self.neuronal_input_connection.params[J:J+i]*self.increment_percent
self.remaining_active_durations[j] -= 1
elif active == -1:
assert sign(self.remaining_active_durations[j]) == -1
self.neuronal_input_connection.params[J:J+i] += \
self.neuronal_input_connection.params[J:J+i]*-self.decrement_percent
self.remaining_active_durations[j] += 1
示例13: calc_modal_vector
def calc_modal_vector(atoms1,atoms2):
"""
Calculate the 'modal vector', i.e. the difference vector between the two configurations.
The minimum image convention is applied!
"""
from scipy.linalg import inv
from scipy import array,dot
from scipy import sign,floor
cell1 = atoms1.get_cell()
cell2 = atoms2.get_cell()
# The cells need to be the same (otherwise the whole process won't make sense)
if (cell1 != cell2).any():
raise ValueError("Encountered different cells in atoms1 and atoms2. Those need to be the same.")
cell = cell1
icell = inv(cell)
frac1 = atoms1.get_scaled_positions()
frac2 = atoms2.get_scaled_positions()
modal_vector_frac = frac1 - frac2
for i in range(modal_vector_frac.shape[0]):
for j in range(modal_vector_frac.shape[1]):
if abs(modal_vector_frac[i,j]) > .5:
value = modal_vector_frac[i,j]
vsign = sign(modal_vector_frac[i,j])
absvalue = abs(value)
modal_vector_frac[i,j] = value - vsign*floor(absvalue+.5)
return dot(modal_vector_frac,cell)
示例14: plot_histogram
def plot_histogram(X, Y, w, b):
''' Plots a histogram of classifier outputs (w^T X) for each class with pl.hist
The title of the histogram is the accuracy of the classification
Accuracy = #correctly classified points / N
Definition: plot_histogram(X, Y, w, b)
Input: X - DxN array of N data points with D features
Y - 1D array of length N of class labels
w - 1D array of length D, weight vector
b - bias term for linear classification
'''
#calculate the correct classified
correct = (sp.sign(w.dot(X) - b) == Y).nonzero()[0]
#class labels 1
target = w.dot(X[:, (Y == 1)])
#class balels -1
non_target = w.dot(X[:, (Y == -1)])
pl.title("Acc %0.0f%%" % (float(correct.shape[0]) / X.shape[1] * 100,))
pl.xlabel('w^T X')
pl.hist(non_target)
pl.hist(target)
pl.legend(["non target", "target"], loc=0)
pl.show()
示例15: classify
def classify(self, xL, xR):
a1L, a1R, a2L, a2LR, a2R, a3, z1Lb, z1LRb, z1Rb, z2b, xLb, xRb = self.forward_pass(xL, xR)
if self.k == 2 :
classif = sp.sign(a3);
else :
classif = sp.argmax(a3,axis=0);
return a3, classif