本文整理汇总了Python中LogisticRegression.LogisticRegression.getProbs方法的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegression.getProbs方法的具体用法?Python LogisticRegression.getProbs怎么用?Python LogisticRegression.getProbs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LogisticRegression.LogisticRegression
的用法示例。
在下文中一共展示了LogisticRegression.getProbs方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import getProbs [as 别名]
def __init__(self, stateIn, deepOut = False):
global pickle
print(" Loading previous state ...")
if stateIn.endswith('gz'):
f = gzip.open(stateIn,'rb')
else:
f = open(stateIn, 'r')
state_name = pickle.load(f)
state = state_name[0]
self.names = state_name[1]
convValues = state.convValues
w0 = convValues[0][0]
b0 = convValues[0][1]
w1 = convValues[1][0]
b1 = convValues[1][1]
hiddenVals = state.hiddenValues
wHidden = hiddenVals[0]
bHidden = hiddenVals[1]
logRegValues = state.logRegValues
wLogReg = logRegValues[0]
bLogReg = logRegValues[1]
topo = state.topoplogy
nkerns = topo.nkerns
n_out = len(self.names)
assert(n_out == np.shape(wLogReg)[1])
print(" Some Values ...")
print(" Number of Kernels : " + str(nkerns))
print(" First Kernel w0[0][0] :\n" + str(w0[0][0]))
print(" bHidden :\n" + str(bHidden))
print(" bLogReg :\n" + str(bLogReg))
print(" Building the theano model")
batch_size = 1
x = T.matrix('x') # the data is presented as rasterized images
layer0_input = x.reshape((batch_size, 1, topo.ishape[0], topo.ishape[1]))
rng = np.random.RandomState(23455)
layer0 = LeNetConvPoolLayer(None, input=layer0_input,
image_shape=(batch_size, 1, topo.ishape[0], topo.ishape[0]),
filter_shape=(nkerns[0], 1, topo.filter_1, topo.filter_1),
poolsize=(topo.pool_1, topo.pool_1), wOld=w0, bOld=b0, deepOut=deepOut)
layer1 = LeNetConvPoolLayer(None, input=layer0.output,
image_shape=(batch_size, nkerns[0], topo.in_2, topo.in_2),
filter_shape=(nkerns[1], nkerns[0], topo.filter_2, topo.filter_2),
poolsize=(topo.pool_2, topo.pool_2), wOld=w1, bOld=b1, deepOut=deepOut)
layer2_input = layer1.output.flatten(2)
layer2 = HiddenLayer(None, input=layer2_input, n_in=nkerns[1] * topo.hidden_input,
n_out=topo.numLogisticInput, activation=T.tanh, Wold = wHidden, bOld = bHidden)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=topo.numLogisticInput, n_out=n_out, Wold = wLogReg, bOld=bLogReg )
# create a function to compute the mistakes that are made by the model
# index = T.lscalar()
# test_model = theano.function([index], layer3.getProbs(),
# givens={x: test_set_x[index * batch_size: (index + 1) * batch_size]})
self.predict_model = theano.function([x], layer3.getProbs())
if (deepOut):
self.layer0_out = theano.function([x], layer0.output)
self.layer0_conv= theano.function([x], layer0.conv_out)
self.layer1_conv= theano.function([x], layer1.conv_out)
self.layer1_out = theano.function([x], layer1.output)
self.b0 = b0
self.b1 = b1
self.w0 = w0
self.w1 = w1
示例2: evaluate_lenet5
# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import getProbs [as 别名]
#.........这里部分代码省略.........
# paper_14 again 0.02 dropout
# paper_15 again no dropout
layer2 = HiddenLayer(rng, input=layer2_input, n_in=topo.nkerns[1] * topo.hidden_input,
n_out=topo.numLogisticInput, activation=T.tanh, Wold = wHidden, bOld = bHidden)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=topo.numLogisticInput, n_out=n_out, Wold = wLogReg, bOld=bLogReg )
# Some regularisation (not for the conv-Kernels)
L2_sqr = (layer2.W ** 2).sum() + (layer3.W ** 2).sum()
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y) + 0.001 * L2_sqr
# create a function to compute the mistakes that are made by the model
test_model = theano.function([index], layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
# Functions for statistics
test_logloss = theano.function([index], cost,
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
validate_logloss = theano.function([index], cost,
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]})
test_probs_fct = theano.function([index], layer3.getProbs(),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size]})
validate_model = theano.function([index], layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]})
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i],grads[i]) pairs.
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - learning_rate * grad_i))
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is