本文整理汇总了Python中sandbox.util.Parameter.Parameter.checkInt方法的典型用法代码示例。如果您正苦于以下问题:Python Parameter.checkInt方法的具体用法?Python Parameter.checkInt怎么用?Python Parameter.checkInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sandbox.util.Parameter.Parameter
的用法示例。
在下文中一共展示了Parameter.checkInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: array1DToRow
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def array1DToRow(X, precision=3):
"""
Take a 1D numpy array and print in latex table row format i.e. x1 & x2 .. xn
:param X: The array to print
:type X: :class:`ndarray`
:param precision: The precision of the printed floating point numbers.
:type precision: :class:`int`
"""
Parameter.checkInt(precision, 0, 10)
if X.ndim != 1:
raise ValueError("Array must be one dimensional")
n = X.shape[0]
outputStr = ""
if X.dtype == float:
fmtStr = "%." + str(precision) + "f & "
endFmtStr = "%." + str(precision) + "f"
else:
fmtStr = "%d & "
endFmtStr = "%d"
for i in range(0, n):
if i != n - 1:
outputStr += fmtStr % X[i]
else:
outputStr += endFmtStr % X[i]
return outputStr
示例2: shuffleSplit
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def shuffleSplit(repetitions, numExamples, trainProportion=None):
"""
Random permutation cross-validation iterator. The training set is sampled
without replacement and of size (repetitions-1)/repetitions of the examples,
and the test set represents the remaining examples. Each repetition is
sampled independently.
:param repetitions: The number of repetitions to perform.
:type repetitions: :class:`int`
:param numExamples: The number of examples.
:type numExamples: :class:`int`
:param trainProp: The size of the training set relative to numExamples, between 0 and 1 or None to use (repetitions-1)/repetitions
:type trainProp: :class:`int`
"""
Parameter.checkInt(numExamples, 2, float('inf'))
Parameter.checkInt(repetitions, 1, float('inf'))
if trainProportion != None:
Parameter.checkFloat(trainProportion, 0.0, 1.0)
if trainProportion == None:
trainSize = int((repetitions-1)*numExamples/repetitions)
else:
trainSize = int(trainProportion*numExamples)
idx = []
for i in range(repetitions):
inds = numpy.random.permutation(numExamples)
trainInds = inds[0:trainSize]
testInds = inds[trainSize:]
idx.append((trainInds, testInds))
return idx
示例3: evaluateCvOuter
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def evaluateCvOuter(self, X, y, folds):
"""
Computer the average AUC using k-fold cross validation and the linear kernel.
"""
Parameter.checkInt(folds, 2, float('inf'))
idx = cross_val.StratifiedKFold(y, folds)
metricMethods = [Evaluator.auc2, Evaluator.roc]
if self.kernel == "linear":
logging.debug("Running linear rank SVM ")
trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2(X, y, idx, self.modelSelectLinear, self.predict, metricMethods)
elif self.kernel == "rbf":
logging.debug("Running RBF rank SVM")
trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2(X, y, idx, self.modelSelectRBF, self.predict, metricMethods)
bestTrainAUCs = trainMetrics[0]
bestTrainROCs = trainMetrics[1]
bestTestAUCs = testMetrics[0]
bestTestROCs = testMetrics[1]
bestParams = {}
bestMetaDicts = {}
allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs]
return (bestParams, allMetrics, bestMetaDicts)
示例4: setNumTrees
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def setNumTrees(self, numTrees):
"""
:param numTrees: The number of trees to generate in the forest.
:type numTrees: :class:`int`
"""
Parameter.checkInt(numTrees, 1, float('inf'))
self.numTrees = numTrees
示例5: setBestResponse
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def setBestResponse(self, bestResponse):
"""
:param bestResponse: the label corresponding to "positive"
:type bestResponse: :class:`int`
"""
Parameter.checkInt(bestResponse, -float("inf"), float("inf"))
self.bestResponse = bestResponse
示例6: generateGraph
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def generateGraph(self, alpha, p, dim):
Parameter.checkFloat(alpha, 0.0, float('inf'))
Parameter.checkFloat(p, 0.0, 1.0)
Parameter.checkInt(dim, 0, float('inf'))
numVertices = self.graph.getNumVertices()
self.X = numpy.random.rand(numVertices, dim)
D = KernelUtils.computeDistanceMatrix(numpy.dot(self.X, self.X.T))
P = numpy.exp(-alpha * D)
diagIndices = numpy.array(list(range(0, numVertices)))
P[(diagIndices, diagIndices)] = numpy.zeros(numVertices)
B = numpy.random.rand(numVertices, numVertices) <= P
#Note that B is symmetric - could just go through e.g. upper triangle
for i in range(numpy.nonzero(B)[0].shape[0]):
v1 = numpy.nonzero(B)[0][i]
v2 = numpy.nonzero(B)[1][i]
self.graph.addEdge(v1, v2)
erdosRenyiGenerator = ErdosRenyiGenerator(p)
self.graph = erdosRenyiGenerator.generate(self.graph, False)
return self.graph
示例7: setMinSplit
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def setMinSplit(self, minSplit):
"""
:param minSplit: the minimum number of examples in a node for it to be split.
:type minSplit: :class:`int`
"""
Parameter.checkInt(minSplit, 2, float("inf"))
self.minSplit = minSplit
示例8: setMaxDepth
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def setMaxDepth(self, maxDepth):
"""
:param maxDepth: the maximum depth of the learnt tree.
:type maxDepth: :class:`int`
"""
Parameter.checkInt(maxDepth, 1, float("inf"))
self.maxDepth = int(maxDepth)
示例9: predictEdges
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def predictEdges(self, vertexIndices):
"""
This makes a prediction for a series of edges using the following score
\sum_z \in n(x) \cup n(y) = 1/|log(n(z)|
Returns a matrix with rows are a ranked list of verticies of length self.windowSize.
"""
Parameter.checkInt(self.windowSize, 1, self.graph.getNumVertices())
logging.info("Running predictEdges in " + str(self.__class__.__name__))
P = numpy.zeros((vertexIndices.shape[0], self.windowSize))
S = numpy.zeros((vertexIndices.shape[0], self.windowSize))
W = self.graph.getWeightMatrix()
for i in range(vertexIndices.shape[0]):
Util.printIteration(i, self.printStep, vertexIndices.shape[0])
scores = numpy.zeros(self.graph.getNumVertices())
for j in range(0, self.graph.getNumVertices()):
commonNeighbours = numpy.nonzero(W[vertexIndices[i], :] * W[j, :])[0]
for k in commonNeighbours:
q = numpy.log(numpy.nonzero(W[k, :])[0].shape[0])
if q != 0:
scores[j] = scores[j] + 1/q
P[i, :], S[i, :] = self.indicesFromScores(vertexIndices[i], scores)
return P, S
示例10: bootstrap2
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def bootstrap2(repetitions, numExamples):
"""
Perform 0.632 bootstrap in whcih we take a sample with replacement from
the dataset of size numExamples. The examples not present in the training
set are used to form the test set. We oversample the test set to include
0.368 of the examples from the training set. Returns a list of tuples of the form
(trainIndices, testIndices).
:param repetitions: The number of repetitions of bootstrap to perform.
:type repetitions: :class:`int`
:param numExamples: The number of examples.
:type numExamples: :class:`int`
"""
Parameter.checkInt(numExamples, 2, float('inf'))
Parameter.checkInt(repetitions, 1, float('inf'))
inds = []
for i in range(repetitions):
trainInds = numpy.random.randint(numExamples, size=numExamples)
testInds = numpy.setdiff1d(numpy.arange(numExamples), numpy.unique(trainInds))
#testInds = numpy.r_[testInds, trainInds[0:(numExamples*0.368)]]
inds.append((trainInds, testInds))
return inds
示例11: randCrossValidation
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def randCrossValidation(folds, numExamples, dtype=numpy.int32):
"""
Returns a list of tuples (trainIndices, testIndices) using k-fold cross
validation. In this case we randomise the indices and then split into
folds.
:param folds: The number of cross validation folds.
:type folds: :class:`int`
:param numExamples: The number of examples.
:type numExamples: :class:`int`
"""
Parameter.checkInt(folds, 1, numExamples)
Parameter.checkInt(numExamples, 2, float('inf'))
foldSize = float(numExamples)/folds
indexList = []
inds = numpy.array(numpy.random.permutation(numExamples), dtype)
for i in range(0, folds):
testIndices = inds[int(foldSize*i): int(foldSize*(i+1))]
trainIndices = numpy.setdiff1d(numpy.arange(0, numExamples), testIndices)
indexList.append((trainIndices, testIndices))
return indexList
示例12: evaluateLearn
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def evaluateLearn(X, y, idx, learnModel, predict, metricMethod, progress=True):
"""
Evaluate this learning algorithm using the given list of training/test splits
The metricMethod is a method which takes (predictedY, realY) as input
and returns a metric about the quality of the evaluation.
:param X: A matrix with examples as rows
:type X: :class:`ndarray`
:param y: A vector of labels
:type y: :class:`ndarray`
:param idx: A list of training/test splits
:type idx: :class:`list`
:param learnModel: A function such that learnModel(X, y) finds a mapping from X to y
:type learnModel: :class:`function`
:param predict: A function such that predict(X) makes predictions for X
:type predict: :class:`function`
:param metricMethod: A function such that metricMethod(predY, testY) returns the quality of predicted labels predY
:type metricMethod: :class:`function`
Output: the mean and variation of the cross validation folds.
"""
#Parameter.checkClass(idx, list)
Parameter.checkClass(X, numpy.ndarray)
Parameter.checkArray(X, softCheck=True)
Parameter.checkInt(X.shape[0], 1, float('inf'))
Parameter.checkClass(y, numpy.ndarray)
Parameter.checkArray(y, softCheck=True)
if y.ndim != 1:
raise ValueError("Dimention of y must be 1")
i = 0
metrics = numpy.zeros(len(idx))
logging.debug("EvaluateLearn: Using " + str(len(idx)) + " splits on " + str(X.shape[0]) + " examples")
for idxtr, idxts in idx:
if progress:
Util.printConciseIteration(i, 1, len(idx))
trainX, testX = X[idxtr, :], X[idxts, :]
trainY, testY = y[idxtr], y[idxts]
#logging.debug("Distribution of labels in evaluateLearn train: " + str(numpy.bincount(trainY)))
#logging.debug("Distribution of labels in evaluateLearn test: " + str(numpy.bincount(testY)))
learnModel(trainX, trainY)
predY = predict(testX)
gc.collect()
metrics[i] = metricMethod(predY, testY)
i += 1
return metrics
示例13: eigenAdd
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def eigenAdd(omega, Q, Y, k):
"""
Perform an eigen update of the form A*A + Y*Y in which Y is a low-rank matrix
and A^*A = Q Omega Q*. We use the rank-k approximation of A: Q_k Omega_k Q_k^*
and then approximate [A^*A_k Y^*Y]_k.
"""
#logging.debug("< eigenAdd >")
Parameter.checkInt(k, 0, omega.shape[0])
#if not numpy.isrealobj(omega) or not numpy.isrealobj(Q):
# raise ValueError("Eigenvalues and eigenvectors must be real")
if omega.ndim != 1:
raise ValueError("omega must be 1-d array")
if omega.shape[0] != Q.shape[1]:
raise ValueError("Must have same number of eigenvalues and eigenvectors")
if __debug__:
Parameter.checkOrthogonal(Q, tol=EigenUpdater.tol, softCheck=True, arrayInfo="input Q in eigenAdd()")
#Taking the abs of the eigenvalues is correct
inds = numpy.flipud(numpy.argsort(numpy.abs(omega)))
omega, Q = Util.indEig(omega, Q, inds[numpy.abs(omega)>EigenUpdater.tol])
Omega = numpy.diag(omega)
YY = Y.conj().T.dot(Y)
QQ = Q.dot(Q.conj().T)
Ybar = Y - Y.dot(QQ)
Pbar, sigmaBar, Qbar = numpy.linalg.svd(Ybar, full_matrices=False)
inds = numpy.flipud(numpy.argsort(numpy.abs(sigmaBar)))
inds = inds[numpy.abs(sigmaBar)>EigenUpdater.tol]
Pbar, sigmaBar, Qbar = Util.indSvd(Pbar, sigmaBar, Qbar, inds)
SigmaBar = numpy.diag(sigmaBar)
Qbar = Ybar.T.dot(Pbar)
Qbar = Qbar.dot(numpy.diag(numpy.diag(Qbar.T.dot(Qbar))**-0.5))
r = sigmaBar.shape[0]
YQ = Y.dot(Q)
Zeros = numpy.zeros((r, omega.shape[0]))
D = numpy.c_[Q, Qbar]
YYQQ = YY.dot(QQ)
Z = D.conj().T.dot(YYQQ + YYQQ.conj().T).dot(D)
F = numpy.c_[numpy.r_[Omega - YQ.conj().T.dot(YQ), Zeros], numpy.r_[Zeros.T, SigmaBar.conj().dot(SigmaBar)]]
F = F + Z
pi, H = scipy.linalg.eigh(F)
inds = numpy.flipud(numpy.argsort(numpy.abs(pi)))
H = H[:, inds[0:k]]
pi = pi[inds[0:k]]
V = D.dot(H)
#logging.debug("</ eigenAdd >")
return pi, V
示例14: setPosteriorSampleSize
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def setPosteriorSampleSize(self, posteriorSampleSize):
"""
Set the sample size of the posterior distribution (population size).
:param posteriorSampleSize: The size of the population
:type posteriorSampleSize: `int`
"""
Parameter.checkInt(posteriorSampleSize, 0, numpy.float('inf'))
self.N = posteriorSampleSize
示例15: evaluateCvOuter
# 需要导入模块: from sandbox.util.Parameter import Parameter [as 别名]
# 或者: from sandbox.util.Parameter.Parameter import checkInt [as 别名]
def evaluateCvOuter(self, X, Y, folds):
"""
Run cross validation and output some ROC curves. In this case Y is a 1D array.
:param X: A matrix with examples as rows
:type X: :class:`ndarray`
:param y: A vector of labels
:type y: :class:`ndarray`
:param folds: The number of cross validation folds
:type folds: :class:`int`
"""
Parameter.checkClass(X, numpy.ndarray)
Parameter.checkClass(Y, numpy.ndarray)
Parameter.checkInt(folds, 2, float('inf'))
if Y.ndim != 1:
raise ValueError("Expecting Y to be 1D")
indexList = cross_val.StratifiedKFold(Y, folds)
bestParams = []
bestTrainAUCs = numpy.zeros(folds)
bestTrainROCs = []
bestTestAUCs = numpy.zeros(folds)
bestTestROCs = []
bestMetaDicts = []
i = 0
for trainInds, testInds in indexList:
Util.printIteration(i, 1, folds, "Outer CV: ")
trainX, trainY = X[trainInds, :], Y[trainInds]
testX, testY = X[testInds, :], Y[testInds]
self.learnModel(trainX, trainY)
#self.learnModelCut(trainX, trainY)
predTrainY = self.predict(trainX)
predTestY = self.predict(testX)
bestTrainAUCs[i] = Evaluator.auc(predTrainY, trainY)
bestTestAUCs[i] = Evaluator.auc(predTestY, testY)
#Store the parameters and ROC curves
bestTrainROCs.append(Evaluator.roc(trainY, predTrainY))
bestTestROCs.append(Evaluator.roc(testY, predTestY))
metaDict = {}
bestMetaDicts.append(metaDict)
i += 1
logging.debug("Mean test AUC = " + str(numpy.mean(bestTestAUCs)))
logging.debug("Std test AUC = " + str(numpy.std(bestTestAUCs)))
allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs]
return (bestParams, allMetrics, bestMetaDicts)