本文整理汇总了Python中neuralnet.NeuralNet.update方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.update方法的具体用法?Python NeuralNet.update怎么用?Python NeuralNet.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neuralnet.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.update方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_xor_network
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import update [as 别名]
def train_xor_network():
# two training sets
training_one = [ Instance( [0,0], [0] ), Instance( [0,1], [1] ), Instance( [1,0], [1] ), Instance( [1,1], [0] ) ]
training_two = [ Instance( [0,0], [0,0] ), Instance( [0,1], [1,1] ), Instance( [1,0], [1,1] ), Instance( [1,1], [0,0] ) ]
settings = {
# Required settings
"n_inputs" : 2, # Number of network input signals
"n_outputs" : 1, # Number of desired outputs from the network
"n_hidden_layers" : 1, # Number of nodes in each hidden layer
"n_hiddens" : 2, # Number of hidden layers in the network
"activation_functions" : [ tanh_function, sigmoid_function ], # specify activation functions per layer eg: [ hidden_layer, output_layer ]
# Optional settings
"weights_low" : -0.1, # Lower bound on initial weight range
"weights_high" : 0.1, # Upper bound on initial weight range
"save_trained_network" : False, # Whether to write the trained weights to disk
"input_layer_dropout" : 0.0, # dropout fraction of the input layer
"hidden_layer_dropout" : 0.1, # dropout fraction in all hidden layers
"batch_size" : 0, # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
}
# initialize the neural network
global network
network = NeuralNet( settings )
# load a stored network configuration
# network = NeuralNet.load_from_file( "xor_trained_configuration.pkl" )
# start training on test set one
network.backpropagation(
training_one, # specify the training set
ERROR_LIMIT = 1e-6, # define an acceptable error limit
learning_rate = 0.03, # learning rate
momentum_factor = 0.95 # momentum
)
# Test the network by looping through the specified dataset and print the results.
for instance in training_one:
print "Input: {features} -> Output: {output} \t| target: {target}".format(
features = str(instance.features),
output = str(network.update( np.array([instance.features]) )),
target = str(instance.targets)
)
# save the trained network
network.save_to_file("networks/XOR_Operator/XOR_Operator.obj")
示例2: __init__
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import update [as 别名]
class Instance:
def __init__(self, features, target):
self.features = np.array(features)
self.targets = np.array(target)
#end Instance
# training set
training_one = [ Instance( [0,0], [0] ), Instance( [0,1], [1] ), Instance( [1,0], [1] ), Instance( [1,1], [0] ) ]
training_two = [ Instance( [0,0], [0,0] ), Instance( [0,1], [1,1] ), Instance( [1,0], [1,1] ), Instance( [1,1], [0,0] ) ]
n_inputs = 2
n_outputs = 1
n_hiddens = 2
n_hidden_layers = 1
# specify activation functions per layer
activation_functions = [ tanh_function ]*n_hidden_layers + [ sigmoid_function ]
# initialize your neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers, activation_functions)
# start training
network.backpropagation(training_one, ERROR_LIMIT=1e-4)
for instance in training_one:
print instance.features, network.update( np.array([instance.features]) ), "\ttarget:", instance.targets
示例3: NeuralNet
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import update [as 别名]
}
# initialize the neural network
network = NeuralNet( settings )
# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )
# start training on test set one
network.backpropagation(
training_one, # specify the training set
ERROR_LIMIT = 1e-6, # define an acceptable error limit
learning_rate = 0.03, # learning rate
momentum_factor = 0.95 # momentum
)
# Test the network by looping through the specified dataset and print the results.
for instance in training_one:
print "Input: {features} -> Output: {output} \t| target: {target}".format(
features = str(instance.features),
output = str(network.update( np.array([instance.features]) )),
target = str(instance.targets)
)
if settings.get("save_trained_network", False):
# save the trained network
network.save_to_file( "trained_configuration.pkl" )