当前位置: 首页>>代码示例>>Python>>正文


Python RecurrentNetwork.activate方法代码示例

本文整理汇总了Python中pybrain.structure.RecurrentNetwork.activate方法的典型用法代码示例。如果您正苦于以下问题:Python RecurrentNetwork.activate方法的具体用法?Python RecurrentNetwork.activate怎么用?Python RecurrentNetwork.activate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.structure.RecurrentNetwork的用法示例。


在下文中一共展示了RecurrentNetwork.activate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
class MoveBrain:
    def __init__(self):
        self.n = RecurrentNetwork()
        inLayer = LinearLayer(8)
        hiddenLayer = SigmoidLayer(4)
        self.numInputs = 8
        outLayer = LinearLayer(4)
        self.n.addInputModule(inLayer)
        self.n.addModule(hiddenLayer)
        self.n.addOutputModule(outLayer)

        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer, outLayer)

        self.n.addConnection(in_to_hidden)
        self.n.addConnection(hidden_to_out)

        self.n.sortModules()
        self.ds = SupervisedDataSet(8, 4) 
        self.trainer = BackpropTrainer(self.n, self.ds)

    def run(inputs):
        if inputs.size() == self.numInputs:
            self.n.activate(inputs)
        else:
            print "num of inputs do not match"

    def addRule(self,rule):
        self.ds.append(rule)

    def saveNetwork(self):
        fileObject = open('networks/avoidandfindv1', 'w')
        pickle.dump(self.n, fileObject)

        fileObject.close()
开发者ID:SlightlyCyborg,项目名称:pybot,代码行数:37,代码来源:brain.py

示例2: __init__

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
class BrainController:

    indim = 2
    outdim = 2

    def __init__(self, trained_net = None):
        if trained_net == None:
            self.net = RecurrentNetwork()
            self.init_network(self.net)
        else:
            self.net = trained_net

    def init_network(self, net):
        net.addInputModule(LinearLayer(2, 'in'))
        net.addModule(SigmoidLayer(3, 'hidden'))
        net.addOutputModule(LinearLayer(2, 'out'))
        net.addModule(BiasUnit(name='bias'))
        net.addConnection(FullConnection(net['in'], net['hidden']))
        net.addConnection(FullConnection(net['hidden'], net['out']))
        net.sortModules()

    def train(self, data):
        ds = SupervisedDataSet(2, 2)
        for i in range(0, len(data)):
            input, target = data[i]
            ds.addSample(input, target)

        trainer = BackpropTrainer(self.net, ds, learningrate=0.01, momentum=0.99,
                verbose=True)

        max_error = 1e-5
        error = 1
        while abs(error) >= max_error:
            error = trainer.train()

        #self.validate_net()
        f = open('neuro.net', 'w')
        pickle.dump(self.net, f)
        f.close()

    def validate_net(self):
        print self.net.activate([0, 0])
        print self.net.activate([0, 1])
        print self.net.activate([0, 2])
        print self.net.activate([1, 0])
        print self.net.activate([1, 1])
        print self.net.activate([1, 2])
开发者ID:xjie0403,项目名称:communication-swarm-intelligence,代码行数:49,代码来源:brain_controller.py

示例3: BackpropTrainer

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
#Train net
from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, ds, momentum=0.1, verbose=True, weightdecay=0.01)

#for i in range(10):
#    if i%20==0:
#        print i
#    trainer.trainEpochs(1)
    
trnerr,testerr = trainer.trainUntilConvergence(dataset=ds,maxEpochs=10)
plt.plot(trnerr,'b',valerr,'r')

# <codecell>

print net.activate(Xtest.ix[1,:])
print ytest.ix[1,:]

# <codecell>

to_hidden=numpy.dot(in_to_hidden.params.reshape(hiddenLayer.dim,inLayer.dim),Xtest.ix[0,:].as_matrix())

# <codecell>

to_out=hiddenLayer.activate(to_hidden)

# <codecell>

in_to_hidden.params.reshape(hiddenLayer.dim,inLayer.dim)

# <codecell>
开发者ID:bilykigor,项目名称:qimb,代码行数:32,代码来源:QimbWorkbook.py

示例4: SigmoidLayer

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
hiddenLayerB = SigmoidLayer(hiddenVector, name='hiddenLayerB')
outputLayer = LinearLayer(outputVector, name='outputLayer')

n.addInputModule(inLayer)
n.addModule(hiddenLayerA)
n.addModule(hiddenLayerB)
n.addOutputModule(outputLayer)

n.addConnection(FullConnection(n['inputLayer'], n['hiddenLayerA'], name='c1'))
n.addConnection(FullConnection(n['hiddenLayerA'], n['hiddenLayerB'], name='c2'))
n.addConnection(FullConnection(n['hiddenLayerB'], n['outputLayer'], name='c3'))

n.addRecurrentConnection(FullConnection(n['hiddenLayerA'], n['hiddenLayerB'], name='rec3'))

n.sortModules()
print 'Network One (Recurrent)' + str(n.activate([1,2,3]))
print 'Network One (Recurrent)' + str(n.activate([1,2,3]))

####
#FEED FORWARD NETWORK
####

n2 = FeedForwardNetwork()

inLayer2 = LinearLayer(inputVector, name='inputLayer')
hiddenLayerA2 = SigmoidLayer(hiddenVector, name='hiddenLayerA')
hiddenLayerB2 = SigmoidLayer(hiddenVector, name='hiddenLayerB')
outputLayer2 = LinearLayer(outputVector, name='outputLayer')

n2.addInputModule(inLayer)
n2.addModule(hiddenLayerA)
开发者ID:ElDonClaudio,项目名称:NNET,代码行数:33,代码来源:tst.py

示例5:

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

n.sortModules()


r.addInputModule(LinearLayer(2, name='in'))
r.addModule(SigmoidLayer(3, name='hidden'))
r.addOutputModule(LinearLayer(1, name='out'))
r.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
r.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

r.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

r.sortModules()

#Show trainable weights
print "These are the trainable weights"
print in_to_hidden.params
print hidden_to_out.params

#Test Prints
print n.activate([1, 2])
print n

print ""

print r.activate((2, 2))
print r
开发者ID:Oregand,项目名称:4THYEARPROJECT,代码行数:31,代码来源:Network.py

示例6: str

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
    net.addConnection(FullConnection(net[('hidden' + str(x))], net['hidden' + str(x + 1)], name=('c' + str(x + 1))))
net.addConnection(FullConnection(net['hidden' + str(layerCount - 1)], net['out'], name='cOut'))
net.sortModules()
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
    startingduration = random.choice(range(1,17))
    startingduration2 = random.choice(range(1, 17))
    song = [[startingnote, startingduration, 1, 1, 0, startingnote2, startingduration2, 1, 1, 0]]
    length = 50
    while len(song) < length:
        song.append(net.activate(song[-1]).tolist())
    newsong = []
    for x in song:
        newx = []
        newy = []
        for i in x:
            if len(newx) < 5:
                newx.append(int(i))
            else:
                newy.append(int(i))
        newsong.append(newx)
        newsong.append(newy)

    print newsong
    print "The above song is after " + str(epochcount) + " epochs."
    trainer.trainEpochs(epochs=1)
开发者ID:ml-lab,项目名称:Bach_AI,代码行数:33,代码来源:musicnetwork.py

示例7: range

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
                    for i in range(Xtr.shape[0]):
                        ds.addSample(Xtr[i,:],Ytr[i])

                    for i in range(Xte.shape[0]):
                        dst.addSample(Xte[i,:],Yte[i])

                    #net = buildNetwork(ds.indim,ds.indim,ds.indim,ds.indim,ds.outdim,recurrent=False)
                    trainer = BackpropTrainer(net,learningrate=learnRate,momentum=moment,verbose=False)
                    #trainer.trainOnDataset(ds,30)
                    trainer.trainUntilConvergence(ds,10)

                    #trainer.testOnData(verbose=True)

                    mse = 0.0
                    for i in range(Xte.shape[0]):
                        mse += pow(net.activate(Xte[i])[0]-Yte[i],2)
                    mse /= Xte.shape[0]
                    mseTrain = 0.0
                    for i in range(Xtr.shape[0]):
                        mseTrain += pow(net.activate(Xtr[i])[0]-Ytr[i],2)
                    mseTrain /= Xtr.shape[0]
                    print 'mse(test):{},mse(train):{},epoch:{},width:{},depth:{},momentum:{},learnrate:{}'.format(mse,mseTrain,epochs,hidw,depth,moment,learnRate)
                    testdat.write('{},{},{},{},{},{},{}\n'.format(mse,mseTrain,epochs,hidw,depth,learnRate,moment))

testdat.close()
#modelfile = open('model.dat','w')
#pickle.dump(net,modelfile)
#modelfile.close()

#fh = open('predictions.csv','w')    # open file for upload
#fh.write('ID,Prediction\n')         # output header line
开发者ID:austinsherron,项目名称:cs178-project,代码行数:33,代码来源:PyBrainTest2.py

示例8: RecurrentNetwork

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
if __name__ == "__main__":

    from pybrain.structure import RecurrentNetwork
    from pybrain.structure import LinearLayer
    from pybrain.structure import SigmoidLayer
    from pybrain.structure import FullConnection

    net = RecurrentNetwork()

    net.addInputModule(LinearLayer(2, "in"))
    net.addModule(SigmoidLayer(3, "hidden"))
    net.addOutputModule(LinearLayer(1, "out"))

    net.addConnection(FullConnection(net["in"], net["hidden"], "c1"))
    net.addConnection(FullConnection(net["hidden"], net["out"], "c2"))
    net.addRecurrentConnection(FullConnection(net["hidden"], net["hidden"], "c3-recurrent"))

    net.sortModules()

    print net

    for i in xrange(5):
        print net.activate([2, 2])

    print "reset"
    net.reset()

    for i in xrange(5):
        print net.activate([2, 2])
开发者ID:trungnq97,项目名称:snippets,代码行数:31,代码来源:build_recurrent_net.py

示例9: RecurrentNetwork

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
train_set, test_set = DS.splitWithProportion(0.7)

# build our recurrent network with 10 hidden neurodes, one recurrent
# connection, using tanh activation functions
net = RecurrentNetwork()
hidden_neurodes = 10
net.addInputModule(LinearLayer(len(train_set["input"][0]), name="in"))
net.addModule(TanhLayer(hidden_neurodes, name="hidden1"))
net.addOutputModule(LinearLayer(len(train_set["target"][0]), name="out"))
net.addConnection(FullConnection(net["in"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["out"], name="c2"))
net.addRecurrentConnection(FullConnection(net["out"], net["hidden1"], name="cout"))
net.sortModules()
net.randomize()

# train for 30 epochs (overkill) using the rprop- training algorithm
trainer = RPropMinusTrainer(net, dataset=train_set, verbose=True)
trainer.trainOnDataset(train_set, 30)

# test on training set
predictions_train = np.array([net.activate(train_set["input"][i])[0] for i in xrange(len(train_set))])
plt.plot(train_set["target"], c="k")
plt.plot(predictions_train, c="r")
plt.show()

# and on test set
predictions_test = np.array([net.activate(test_set["input"][i])[0] for i in xrange(len(test_set))])
plt.plot(test_set["target"], c="k")
plt.plot(predictions_test, c="r")
plt.show()
开发者ID:patrikdal,项目名称:BitcoinTradingAlgorithmToolkit,代码行数:32,代码来源:example.py

示例10: SupervisedDataSet

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
ds = SupervisedDataSet(2,1)


ds.addSample([1,1],[0])
ds.addSample([0,0],[0])
ds.addSample([0,1],[1])
ds.addSample([1,0],[1])

#Train the network
trainer = BackpropTrainer(network, ds, momentum=0.99)

print network

print "\nInitial weights: ", network.params

max_error = 1e-7
error, count = 1, 1000
#Train
while abs(error) >= max_error and count > 0:
    error = trainer.train()
    count = count - 1

print "Final weights: ", network.params
print "Error: ", error

#Test data
print '\n1 XOR 1:',network.activate([1,1])[0]
print '1 XOR 0:',network.activate([1,0])[0]


开发者ID:radut,项目名称:aja,代码行数:30,代码来源:xor.py

示例11: BackpropTrainer

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
ds.endOfData()

# Create bp trainer
trainer = BackpropTrainer(net, ds)

# Trains the datasets
print 'Training ...'
epoch = 1000
error = 1.0
while error > delta_error and epoch >= 0:
    error = trainer.train()
    epoch -= 1
    print 'Epoch = %d, Error = %f' % (epoch, error)

# To store the epoch error
err_array = []

for row in df.itertuples(index=False):
    result = net.activate(row[0:columns-2])
    expect = row[columns-2]
    error = abs(expect - result)
    err_array.append(error)
    print 'Result = %f, Expect = %f, Error = %f' % (result, expect, error)

err_df = pd.DataFrame(err_array)
err_df.plot()
plt.show()

print 'Sum Error = %f' % err_df.sum(axis=0)
开发者ID:ammeyjohn,项目名称:rubbish,代码行数:31,代码来源:neural_network.py

示例12: print

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
  if tstresult <= 0.5 :
       print('Bingo !!!!!!!!!!!!!!!!!!!!!!')
       break

  # export network
  NetworkWriter.writeToFile(net, 'signal_weight.xml')

# run test
actual_price = np.array([n[3] for n in testing_input])
predict_short = []
predict_long = []
result_long = []
result_short = []

for i, (x, y) in enumerate(zip(testing_input, testing_output)):
  z = net.activate(x)
  predict_short.append(z[0])
  predict_long.append(z[1])
  result_long.append(abs(testing_output[0] - z[0]))
  result_short.append(abs(testing_output[1] - z[1]))

predict_short = np.asarray(predict_short)
predict_long = np.asarray(predict_long)
short_up_idxs = predict_short > 0
short_down_idxs = predict_short < 0
long_up_idxs = predict_long > 0
long_down_idxs = predict_long < 0

# print test
def normalize_result(data):
  max_v = np.max(data)
开发者ID:indiejoseph,项目名称:nn_trading,代码行数:33,代码来源:train_signal.py

示例13: neural_network

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
class neural_network(object):
    def __init__(self, name, dataset, trained, store):
        self.name = name
        self.store = store
        self.trained = trained
        self.dataset = dataset

        self.net = RecurrentNetwork()
        self.net.addInputModule(LinearLayer(2, name='in'))
        self.net.addModule(SigmoidLayer(3, name='hidden'))
        self.net.addOutputModule(LinearLayer(2, name='out'))
        self.net.addConnection(FullConnection(self.net['in'], self.net['out'], name='c1'))
        self.net.addConnection(FullConnection(self.net['hidden'], self.net['out'], name='c2'))
        self.net.addRecurrentConnection(FullConnection(self.net['hidden'], self.net['hidden'], name='c3'))
        self.net.sortModules()
        '''
        self.net = buildNetwork(2, 3, 2)
        '''
        if not self.trained:
            self.train()

        return

    def save(self):
        self.store.save_neural_network(self.name, self.dataset, self.trained)
        return

    @classmethod
    def get_saved(cls, name, store):
        result = store.get_neural_network(name)

        return cls(name, result[0], result[1], store) if result else None

    @classmethod
    def get_list(cls, store):
        result = store.get_neural_network_list()
        print result
        return [x for x in result]

    @classmethod
    def new(cls, name, store, ds_file_uri):
        dataset = rttl.dataset_from_file(ds_file_uri)

        store.new_neural_network(name, dataset)
        return

    def evaluate(self, genome):
        err = 0.0
        for i in range(len(genome) - 1):
            print '---------- input ------------'
            print genome[i]
            output = self.net.activate(genome[i])
            print '--------- output ------------'
            print output
            target = genome[i + 1]
            err += (math.fabs(output[0] - target[0]) + math.fabs(output[1] - target[1]))

        return 1/err

    def train(self):
        ds_store = []
        for song in self.dataset:
            ds_in = song[:len(song) - 1]
            ds_out = song[1:]

            ds = SupervisedDataSet(2, 2)

            for i in range(len(song) -1):
                #if ds_in[i] not in ds_store:
                ds.addSample(ds_in[i], ds_out[i])
                ds_store.append(ds_in[i])

            if len(ds):
                trainer = BackpropTrainer(self.net, ds, verbose=True)
                trainer.trainUntilConvergence() 
        self.save()
开发者ID:cagatay,项目名称:Evolution-9,代码行数:78,代码来源:NN.py

示例14: epochs

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
class LanguageLearner:

	__OUTPUT = "Sample at {0} epochs (prompt=\"{1}\", length={2}): {3}"

	def __init__(self, trainingText, hiddenLayers, hiddenNodes):
		self.__initialized = False
		with open(trainingText) as f:
			self.raw = f.read()
		self.characters = list(self.raw)
		self.rawData = list(map(ord, self.characters))
		print("Creating alphabet mapping...")
		self.mapping = []
		for charCode in self.rawData:
			if charCode not in self.mapping:
				self.mapping.append(charCode)
		print("Mapping of " + str(len(self.mapping)) + " created.")
		print(str(self.mapping))
		print("Converting data to mapping...")
		self.data = []
		for charCode in self.rawData:
			self.data.append(self.mapping.index(charCode))
		print("Done.")
		self.dataIn = self.data[:-1:]
		self.dataOut = self.data[1::]
		self.inputs = 1
		self.hiddenLayers = hiddenLayers
		self.hiddenNodes = hiddenNodes
		self.outputs = 1

	def initialize(self, verbose):
		print("Initializing language learner...")
		self.verbose = verbose

		# Create network and modules
		self.net = RecurrentNetwork()
		inp = LinearLayer(self.inputs, name="in")
		hiddenModules = []
		for i in range(0, self.hiddenLayers):
			hiddenModules.append(LSTMLayer(self.hiddenNodes, name=("hidden-" + str(i + 1))))
		outp = LinearLayer(self.outputs, name="out")

		# Add modules to the network with recurrence
		self.net.addOutputModule(outp)
		self.net.addInputModule(inp)
		
		for module in hiddenModules:
			self.net.addModule(module)

		# Create connections

		self.net.addConnection(FullConnection(self.net["in"], self.net["hidden-1"]))
		for i in range(0, len(hiddenModules) - 1):
			self.net.addConnection(FullConnection(self.net["hidden-" + str(i + 1)], self.net["hidden-" + str(i + 2)]))
			self.net.addRecurrentConnection(FullConnection(self.net["hidden-" + str(i + 1)], self.net["hidden-" + str(i + 1)]))
		self.net.addRecurrentConnection(FullConnection(self.net["hidden-" + str(len(hiddenModules))],
			self.net["hidden-" + str(len(hiddenModules))]))
		self.net.addConnection(FullConnection(self.net["hidden-" + str(len(hiddenModules))], self.net["out"]))
		self.net.sortModules()

		self.trainingSet = SequentialDataSet(self.inputs, self.outputs)
		for x, y in zip(self.dataIn, self.dataOut):
			self.trainingSet.newSequence()
			self.trainingSet.appendLinked([x], [y])

		self.net.randomize()

		print("Neural network initialzed with structure:")
		print(self.net)

		self.trainer = BackpropTrainer(self.net, self.trainingSet, verbose=verbose)
		self.__initialized = True
		print("Successfully initialized network.")

	def train(self, epochs, frequency, prompt, length):
		if not self.__initialized:
			raise Exception("Attempted to train uninitialized LanguageLearner")
		print ("Beginning training for " + str(epochs) + " epochs...")
		if frequency >= 0:
			print(LanguageLearner.__OUTPUT.format(0, prompt, length, self.sample(prompt, length)))
		for i in range(1, epochs):
			print("Error at " + str(i) + " epochs: " + str(self.trainer.train()))
			if i % frequency == 0:
				print(LanguageLearner.__OUTPUT.format(i, prompt, length, self.sample(prompt, length)))
		print("Completed training.")

	def sample(self, prompt, length):
		self.net.reset()
		if prompt == None:
			prompt = chr(random.choice(self.mapping))
		output = prompt
		charCode = ord(prompt)
		for i in range(0, length):
			sampledResult = self.net.activate([charCode])
			charCode = int(round(sampledResult[0]))
			if charCode < 0 or charCode >= len(self.mapping):
				return output + "#TERMINATED_SAMPLE(reason: learner guessed invalid character)"
			output += chr(self.mapping[charCode])
		return output
开发者ID:sl,项目名称:babble,代码行数:100,代码来源:languagelearner.py

示例15: RecurrentNetwork

# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activate [as 别名]
from pybrain.structure import FullConnection

#get no of words from corpus and store in numInputNodes and numOutputNodes
numInputNodes = 10
numHiddenNodes = 5
numOutputNodes = 10

#Creating a recurrent network with 1 input node, 1 output node and 10 hidden nodes
network = RecurrentNetwork()
network.addInputModule(LinearLayer(numInputNodes, name='in'))
network.addModule(SigmoidLayer(numHiddenNodes, name='hidden'))
network.addOutputModule(LinearLayer(numOutputNodes, name='out'))
in_to_hidden = FullConnection(network['in'], network['hidden'], name='connection1')
hidden_to_out = FullConnection(network['hidden'], network['out'], name='connection2')
hidden_to_hidden = FullConnection(network['hidden'], network['hidden'], name='connection3')
network.addConnection(in_to_hidden)
network.addConnection(hidden_to_out)
network.addRecurrentConnection(hidden_to_hidden)
network.sortModules()
print network.activate([1,0,0,0,0,0,0,0,0,0])
'''
print in_to_hidden.params
print "\n\n"
print hidden_to_out.params
print "\n\n"
print hidden_to_hidden.params
print "\n\n"
'''

#Input value initially: 1 of n coding of word + previous state s(t-1)
开发者ID:sukki89,项目名称:Word-Prediction,代码行数:32,代码来源:network.py


注:本文中的pybrain.structure.RecurrentNetwork.activate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。