本文整理汇总了Python中numpy.core.numeric.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __train__
def __train__(self, data, labels):
l = labels.reshape((-1,1))
self.__trainingData__ = data
self.__trainingLabels__ = l
N = len(l)
H = zeros((N,N))
for i in range(N):
for j in range(N):
H[i,j] = self.__trainingLabels__[i]*self.__trainingLabels__[j]*self.__kernelFunc__(self.__trainingData__[i],self.__trainingData__[j])
f = -1.0*ones(labels.shape)
lb = zeros(labels.shape)
ub = self.C * ones(labels.shape)
Aeq = labels
beq = 0.0
suppressOut = True
if suppressOut:
devnull = open('/dev/null', 'w')
oldstdout_fno = os.dup(sys.stdout.fileno())
os.dup2(devnull.fileno(), 1)
p = QP(matrix(H),f.tolist(),lb=lb.tolist(),ub=ub.tolist(),Aeq=Aeq.tolist(),beq=beq)
r = p.solve('cvxopt_qp')
if suppressOut:
os.dup2(oldstdout_fno, 1)
lim = 1e-4
r.xf[where(abs(r.xf)<lim)] = 0
self.__lambdas__ = r.xf
nonzeroindexes = where(r.xf>lim)[0]
# l1 = nonzeroindexes[0]
# self.w0 = 1.0/labels[l1]-dot(self.w,data[l1])
self.numSupportVectors = len(nonzeroindexes)
示例2: select
def select(condlist, choicelist, default=0):
""" Return an array composed of different elements of choicelist
depending on the list of conditions.
condlist is a list of condition arrays containing ones or zeros
choicelist is a list of choice arrays (of the "same" size as the
arrays in condlist). The result array has the "same" size as the
arrays in choicelist. If condlist is [c0, ..., cN-1] then choicelist
must be of length N. The elements of the choicelist can then be
represented as [v0, ..., vN-1]. The default choice if none of the
conditions are met is given as the default argument.
The conditions are tested in order and the first one statisfied is
used to select the choice. In other words, the elements of the
output array are found from the following tree (notice the order of
the conditions matters):
if c0: v0
elif c1: v1
elif c2: v2
...
elif cN-1: vN-1
else: default
Note that one of the condition arrays must be large enough to handle
the largest array in the choice list.
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError, "list of cases must be same length as list of conditions"
choicelist.insert(0, default)
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
示例3: lpc
def lpc(self, x0 = None, X=None, weights = None):
''' Will return the scaled curve if self._lpcParameters['scaled'] = True, to return the curve on the same scale as the originally input data, call getCurve with unscale = True
Arguments
---------
x0 : 2-dim numpy.array containing #rows equal to number of explicitly defined start points
and #columns equal to dimension of the feature space points; seeds for the start points algorithm
X : 2-dim numpy.array containing #rows equal to number of data points and #columns equal to dimension
of the feature space points
weights : see self._followxSingleDirection docs
'''
if X is None:
if self.Xi is None:
raise ValueError, 'Data points have not yet been set in this LPCImpl instance. Either supply as X parameter to this function or call setDataPoints'
else:
self.setDataPoints(X)
N = self.Xi.shape[0]
if self._lpcParameters['binary'] or weights is None:
self._weights = ones(N, dtype = float)
else:
self._weights = array(weights, dtype = float)
if self._weights.shape != (N):
raise ValueError, 'Weights must be one dimensional of vector of weights with size equal to the sample size'
self._selectStartPoints(x0)
#TODO add initialization relevant for other branches
m = self.x0.shape[0] #how many starting points were actually generated
way = self._lpcParameters['way']
self._curve = [self._followx(self.x0[j], way = way, weights = self._weights) for j in range(m)]
return self._curve
示例4: vander
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the i-th output column is the input vector to
the power of ``N - i - 1``. Such a matrix with a geometric progression
in each row is named Van Der Monde, or Vandermonde matrix, from
Alexandre-Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
References
----------
.. [1] Wikipedia, "Vandermonde matrix",
http://en.wikipedia.org/wiki/Vandermonde_matrix
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
"""
x = asarray(x)
if N is None:
N = len(x)
X = ones((len(x), N), x.dtype)
for i in range(N - 1):
X[:, i] = x ** (N - i - 1)
return X
示例5: gmmEM
def gmmEM(data, K, it,show=False,usekmeans=True):
#data += finfo(float128).eps*100
centroid = kmeans2(data, K)[0] if usekmeans else ((max(data) - min(data))*random_sample((K,data.shape[1])) + min(data))
N = data.shape[0]
gmm = GaussianMM(centroid)
if show: gmm.draw(data)
while it > 0:
print it," iterations remaining"
it = it - 1
# e-step
gausses = zeros((K, N), dtype = data.dtype)
for k in range(0, K):
gausses[k] = gmm.c[k]*mulnormpdf(data, gmm.mean[k], gmm.covm[k])
sums = sum(gausses, axis=0)
if count_nonzero(sums) != sums.size:
raise "Divide by Zero"
gausses /= sums
# m step
sg = sum(gausses, axis=1)
if count_nonzero(sg) != sg.size:
raise "Divide by Zero"
gmm.c = ones(sg.shape) / N * sg
for k in range(0, K):
gmm.mean[k] = sum(data * gausses[k].reshape((-1,1)), axis=0) / sg[k]
d = data - gmm.mean[k]
d1 = d.transpose()*gausses[k]
gmm.covm[k]=dot(d1,d)/sg[k]
if show: gmm.draw(data)
return gmm
示例6: polyint
def polyint(p, m=1, k=None):
"""Return the mth analytical integral of the polynomial p.
If k is None, then zero-valued constants of integration are used.
otherwise, k should be a list of length m (or a scalar if m=1) to
represent the constants of integration to use for each integration
(starting with k[0])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
if m == 0:
return p
else:
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
y = NX.zeros(len(p)+1, float)
y[:-1] = p*1.0/NX.arange(len(p), 0, -1)
y[-1] = k[0]
val = polyint(y, m-1, k=k[1:])
if truepoly:
val = poly1d(val)
return val
示例7: smooth
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
from numpy.core.numeric import ones
import numpy
x = numpy.array(x)
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
#if len(x) < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
示例8: train
def train(self, data, labels):
l = labels.reshape((-1,1))
xy = data * l
H = dot(xy,transpose(xy))
f = -1.0*ones(labels.shape)
lb = zeros(labels.shape)
ub = self.C * ones(labels.shape)
Aeq = labels
beq = 0.0
p = QP(matrix(H),f.tolist(),lb=lb.tolist(),ub=ub.tolist(),Aeq=Aeq.tolist(),beq=beq)
r = p.solve('cvxopt_qp')
r.xf[where(r.xf<1e-3)] = 0
self.w = dot(r.xf*labels,data)
nonzeroindexes = where(r.xf>1e-4)[0]
l1 = nonzeroindexes[0]
self.w0 = 1.0/labels[l1]-dot(self.w,data[l1])
self.numSupportVectors = len(nonzeroindexes)
示例9: tranNBO
def tranNBO(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs) #某个类发生的概率
p0Num = ones(numWords)
p1Num = ones(numWords) #初始样本个数为1,防止条件概率为0,影响结果
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = log(p1Num/p1Denom) #计算类标签为1时,其它属性发生的条件概率
p0Vect = log(p0Num/p0Denom) #计算类标签为0时,其它属性发生的条件概率
return p0Vect,p1Vect,pAbusive #返回条件概率喝类标签为1的概率
示例10: hamming
def hamming(M):
"""hamming(M) returns the M-point Hamming window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
示例11: bartlett
def bartlett(M):
"""bartlett(M) returns the M-point Bartlett window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
示例12: blackman
def blackman(M):
"""blackman(M) returns the M-point Blackman window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
示例13: readFile
def readFile(self, filePath,fileSize):
'''
file format
---------------------------
index1 xx.xxx yy.yyy
index2 xx.xxx yy.yyy
index3 xx.xxx yy.yyy
---------------------------
we eliminate the 'index' and extract two values into a set.
'''
try:
__fpath=filePath; # path of the file
__size=fileSize; # number of data items in file
# these values may subject to change depending on the data.txt format
__firstValStart=2;
__firstValEnd=12;
__secondValStart=13;
__secondValEnd=22;
from numpy import float64
# 3 columns. bias value, first value, second value
__array= ones((__size,3),float64);
f=open(__fpath, mode='r', buffering=1, encoding=None, errors=None, newline=None, closefd=True);
print('reading data from file....');
for i in range(0,__size):
line= f.readline();
__firstValue= line[__firstValStart:__firstValEnd];
__secondValue=line[__secondValStart:__secondValEnd];
__array[i,1]= __firstValue
__array[i,2]= __secondValue;
#print(__array[i,1],__array[i,2]);
print('data reading complete....');
return __array;
except IOError:
pass
示例14: logisticRegression
def logisticRegression(trainData, trainLabels, testData, testLabels):
#adjust the data, adding the 'free parameter' to the train data
trainDataWithFreeParam = hstack((trainData.copy(), ones(trainData.shape[0])[:,newaxis]))
testDataWithFreeParam = hstack((testData.copy(), ones(testData.shape[0])[:,newaxis]))
alpha = 10
oldW = zeros(trainDataWithFreeParam.shape[1])
newW = ones(trainDataWithFreeParam.shape[1])
iteration = 0
trainDataWithFreeParamTranspose = transpose(trainDataWithFreeParam)
alphaI = alpha * identity(oldW.shape[0])
while not array_equal(oldW, newW):
if iteration == 100:
break
oldW = newW.copy()
yVect = yVector(oldW, trainDataWithFreeParam)
r = R(yVect)
firstTerm = inv(alphaI + dot(dot(trainDataWithFreeParamTranspose, r), trainDataWithFreeParam))
secondTerm = dot(trainDataWithFreeParamTranspose, (yVect-trainLabels)) + alpha * oldW
newW = oldW - dot(firstTerm, secondTerm)
iteration += 1
#see how well we did
numCorrect = 0
for x,t in izip(testDataWithFreeParam, testLabels):
if yScalar(newW, x) >= 0.5:
if t == 1:
numCorrect += 1
else:
if t == 0:
numCorrect += 1
return float(numCorrect) / float(len(testLabels))
示例15: vander
def vander(x, N=None):
"""
Generate the Vandermonde matrix of vector x.
The i-th column of X is the the (N-i)-1-th power of x. N is the
maximum power to compute; if N is None it defaults to len(x).
"""
x = asarray(x)
if N is None: N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N-1):
X[:,i] = x**(N-i-1)
return X