当前位置: 首页>>代码示例>>Python>>正文


Python FeedForwardNetwork.activate方法代码示例

本文整理汇总了Python中pybrain.structure.FeedForwardNetwork.activate方法的典型用法代码示例。如果您正苦于以下问题:Python FeedForwardNetwork.activate方法的具体用法?Python FeedForwardNetwork.activate怎么用?Python FeedForwardNetwork.activate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.structure.FeedForwardNetwork的用法示例。


在下文中一共展示了FeedForwardNetwork.activate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_network

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def create_network():
    # Create the network itself
    network = FeedForwardNetwork()
    # Create layers
    NUMBER_OF_INPUT_BYTES = 1600 # because at input we have picture 40x40 size
    NUMBER_OF_HIDDEN_LAYERS = 10  # number of hidden layers
    NUMBER_OF_OUTPUT_CLASSES = 8 # because in output we have 8 classes
    inLayer = LinearLayer( NUMBER_OF_INPUT_BYTES )
    hiddenLayer = SigmoidLayer( NUMBER_OF_HIDDEN_LAYERS )
    outLayer = LinearLayer( NUMBER_OF_OUTPUT_CLASSES )
    # Create connections between layers
    # We create FullConnection - each neuron of one layer is connected to each neuron of other layer
    in_to_hidden = FullConnection( inLayer, hiddenLayer )
    hidden_to_out = FullConnection( hiddenLayer, outLayer )
    # Add layers to our network
    network.addInputModule( inLayer )
    network.addModule( hiddenLayer )
    network.addOutputModule( outLayer )
    # Add connections to network
    network.addConnection( in_to_hidden )
    network.addConnection( hidden_to_out )
    # Sort modules to make multilayer perceptron usable
    network.sortModules()
    # prepare array to activate network
    d_letter_array = read_array( "d" )
    # activate network
    network.activate( d_letter_array )
    return network
开发者ID:wojciech161,项目名称:softcomputing-project,代码行数:30,代码来源:perceptron.py

示例2: main

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def main():
    n = FeedForwardNetwork()

    in_layer = LinearLayer(2)
    hidden_layer = SigmoidLayer(3)
    out_layer = LinearLayer(1)

    n.addInputModule(in_layer)
    n.addModule(hidden_layer)
    n.addOutputModule(out_layer)

    in_to_hidden = FullConnection(in_layer, hidden_layer)
    hidden_to_out = FullConnection(hidden_layer, out_layer)

    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)

    n.sortModules()

    print(">>> print n")
    print(n)

    print(">>> n.activate([1, 2])")
    print(n.activate([1, 2]))

    print(">>> in_to_hidden.params")
    print(in_to_hidden.params)

    print(">>> hidden_to_out.params")
    print(hidden_to_out.params)

    print(">>> n.params")
    print(n.params)
开发者ID:laysakura,项目名称:DeepLearning-shugyou,代码行数:35,代码来源:print_FeedForwardNetwork.py

示例3: NNet

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
class NNet(FunctionApproximator):
	def __init__(self, num_features, num_hidden_neurons):
		super(NNet,self).__init__(num_features)

		self.ds = SupervisedDataSet(num_features, 1)

		self.net = FeedForwardNetwork()
		self.net.addInputModule(LinearLayer(num_features, name='in'))
		self.net.addModule(LinearLayer(num_hidden_neurons, name='hidden'))
		self.net.addOutputModule(LinearLayer(1, name='out'))
		self.net.addConnection(FullConnection(self.net['in'], self.net['hidden'], name='c1'))
		self.net.addConnection(FullConnection(self.net['hidden'], self.net['out'], name='c2'))
		self.net.sortModules()

	def getY(self, inpt):
		#giving NAN
		return self.net.activate(inpt)

	def update(self, inpt, target):
		q_old = self.qvalue(state, action)
		q_new = self.qvalue(new_state, new_action)
		target = q_old + self.alpha*(reward + (self.gamma*q_new)-q_old)
		

		self.ds.addSample(inpt, target)
		# print inpt.shape, target.shape
		# print inpt, target
		trainer = BackpropTrainer(self.net, self.ds)
		# try:
		# 	trainer.trainUntilConvergence()
		# except:
		trainer.train()
开发者ID:rahul003,项目名称:rl_page_replacement,代码行数:34,代码来源:approximator.py

示例4: NeuralNetwork

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
class NeuralNetwork(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        inp_neu=4,
        hid_neu=3,
        out_neu=1,
        learn_rate=0.1,
        nomentum=0.5,
        weight_dec=0.0001,
        epochs=100,
        split_prop=0.25,
    ):
        self.inp_neu = inp_neu
        self.hid_neu = hid_neu
        self.out_neu = out_neu
        self.learn_rate = learn_rate
        self.nomentum = nomentum
        self.weight_dec = weight_dec
        self.epochs = epochs
        self.split_prop = split_prop

    def data(self, X, y=None):
        DS = SupervisedDataSet(self.inp_neu, self.out_neu)
        for i in range(0, len(X)):
            DS.addSample((X[i][0], X[i][1], X[i][2], X[i][3]), y[i])  # ATTENTION pas optimisé pour toutes les tailles
        return DS

    def fit(self, X, y):
        self.n = FeedForwardNetwork()

        self.n.addInputModule(SigmoidLayer(self.inp_neu, name="in"))
        self.n.addModule(SigmoidLayer(self.hid_neu, name="hidden"))
        self.n.addOutputModule(LinearLayer(self.out_neu, name="out"))
        self.n.addConnection(FullConnection(self.n["in"], self.n["hidden"], name="c1"))
        self.n.addConnection(FullConnection(self.n["hidden"], self.n["out"], name="c2"))

        self.n.sortModules()  # initialisation

        self.tstdata, trndata = self.data(X, y).splitWithProportion(self.split_prop)

        trainer = BackpropTrainer(
            self.n, trndata, learningrate=self.learn_rate, momentum=self.nomentum, weightdecay=self.weight_dec
        )
        trainer.trainUntilConvergence(verbose=True, maxEpochs=self.epochs)

        return self

    def predict(self, X):
        self.yhat = []
        for i in X:
            self.yhat.append(float(self.n.activate(i)))
        self.yhat = np.array(self.yhat)
        return self.yhat

    def score(self, y):
        vect_se = (self.yhat - y) ** 2
        mse = float(np.sum(vect_se)) / float(len(vect_se))
        return mse
开发者ID:pcolo,项目名称:regret,代码行数:60,代码来源:nn.py

示例5: logicTest

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def logicTest():
	inLayer = LinearLayer(2)
	hiddenLayer = SigmoidLayer(6)
	outLayer = LinearLayer(4) # OR, AND, NOT, XOR
	
	n=FeedForwardNetwork()
	n.addInputModule(inLayer)
	n.addModule(hiddenLayer)
	n.addOutputModule(outLayer)

	inToHidden = FullConnection(inLayer, hiddenLayer)
	hiddenToOut = FullConnection(hiddenLayer, outLayer)

	n.addConnection(inToHidden)
	n.addConnection(hiddenToOut)

	n.sortModules()
	
	print n.activate([0, 1])
开发者ID:phughk,项目名称:my-pybrain-scripts,代码行数:21,代码来源:simple.py

示例6: main

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def main(T=10, load_brain=False, save_brain=False):
    singles = [room for room in rooms.allRooms if room.capacity == "Single"]
    preprocessed = preprocess_rooms(singles)
    all_vectors = [room_to_feature_vector(room, preprocessed) for room in singles]
    
    training_sequences = getLabeledRoomsFeaturesAndLabels(getRoomsMap(singles, all_vectors))
    
    input_units = len(all_vectors[0])

    if load_brain and "net" in brain_shelf:
        net = brain_shelf["net"]
        net.sorted = False
        net.sortModules()
    else:
        net = FeedForwardNetwork()
        layer_in = LinearLayer(input_units)
        layer_hidden = SigmoidLayer(1000)
        layer_hidden2 = SigmoidLayer(100)
        layer_out = LinearLayer(1)
        net.addInputModule(layer_in)
        net.addModule(layer_hidden)
        net.addModule(layer_hidden2)
        net.addOutputModule(layer_out)

        in_to_hidden = FullConnection(layer_in, layer_hidden)
        hidden_to_hidden = FullConnection(layer_hidden, layer_hidden2)
        hidden_to_out = FullConnection(layer_hidden2, layer_out)
        net.addConnection(in_to_hidden)
        net.addConnection(hidden_to_hidden)
        net.addConnection(hidden_to_out)

        net.sortModules()

        training_data = SupervisedDataSet(len(all_vectors[0]), 1)
        for training_seq in training_sequences: 
            training_data.appendLinked(training_seq[1], training_seq[2])
        trainer = BackpropTrainer(net, training_data)
        for i in xrange(T):
            error = trainer.train()
            print "Training iteration %d.  Error: %f" % (i + 1, error)

        if save_brain:
            brain_shelf["net"] = net
    
    labeled_rooms = []
    for i, vector in enumerate(all_vectors):
        labeled_rooms.append((singles[i], net.activate(vector)))
    
    available_rooms = available.get_available_rooms()

    labeled_rooms.sort(key=lambda x: -x[1])
    for room, label in labeled_rooms:
        if room.num in available_rooms:
            print "%16.12f: %s" % (label, room)
开发者ID:kkleidal,项目名称:SimmonsRoomingNeuralNet,代码行数:56,代码来源:neural.py

示例7: fit_predict

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def fit_predict(xTrain,yTrain,xTest,epochs,neurons):

  # Check edge cases
  if (not len(xTrain) == len(yTrain) or len(xTrain) == 0 or 
    len(xTest) == 0 or epochs <= 0):
    return

  # Randomize the training data (probably not necessary but pybrain might
  # not shuffle the data itself, so perform as safety check)
  indices = np.arange(len(xTrain))
  np.random.shuffle(indices)

  trainSwapX = [xTrain[x] for x in indices]
  trainSwapY = [yTrain[x] for x in indices]

  supTrain = SupervisedDataSet(len(xTrain[0]),1)
  for x in range(len(trainSwapX)):
    supTrain.addSample(trainSwapX[x],trainSwapY[x])

  # Construct the feed-forward neural network

  n = FeedForwardNetwork()

  inLayer = LinearLayer(len(xTrain[0]))
  hiddenLayer1 = SigmoidLayer(neurons)
  outLayer = LinearLayer(1)

  n.addInputModule(inLayer)
  n.addModule(hiddenLayer1)
  n.addOutputModule(outLayer)

  in_to_hidden = FullConnection(inLayer, hiddenLayer1)
  hidden_to_out = FullConnection(hiddenLayer1, outLayer)
  
  n.addConnection(in_to_hidden)
  n.addConnection(hidden_to_out)

  n.sortModules() 

  # Train the neural network on the training partition, validating
  # the training progress on the validation partition

  trainer = BackpropTrainer(n,dataset=supTrain,momentum=0.1,learningrate=0.01
    ,verbose=False,weightdecay=0.01)
  
  trainer.trainUntilConvergence(dataset=supTrain,
    maxEpochs=epochs,validationProportion=0.30)

  outputs = []
  for x in xTest:
    outputs.append(n.activate(x))

  return outputs
开发者ID:lbenning,项目名称:Load-Forecasting,代码行数:55,代码来源:neural.py

示例8: neuralNet

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def neuralNet(info, test_data):
    ann = FeedForwardNetwork()
    
    ''' 
        Initiate the input nodes, hidden layer nodes,
        and the output layer nodes.
    '''
    inputLayer = LinearLayer(5)
    hiddenLayer = SigmoidLayer(20) 
    outputLayer = LinearLayer(1)
    
    '''
        Add the nodes to the corresponding layer
    '''
    ann.addInputModule(inputLayer)
    ann.addModule(hiddenLayer)
    ann.addOutputModule(outputLayer)
    
    '''
        Connect the input layer to hidden layer,
        then connect hidden layer to output layer
    '''
    in_to_hidden = FullConnection(inputLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outputLayer)
    
    ann.addConnection(in_to_hidden)
    ann.addConnection(hidden_to_out)
    
    ann.sortModules ()
    
    data_set = SupervisedDataSet(5, 1)
    for data in info:
        data_set.addSample(data[:-1], data[-1])
    trainer = BackpropTrainer(ann, data_set, verbose=False)
    
    #test_data, train_data = data_set.splitWithProportion(0.2)
    train_data = data_set
    test_data = test_data
    '''
        Using 50 epochs for testing purposes, it will train
        the network until convergence within the first 50 epochs
    
    '''
    train = trainer.trainUntilConvergence(dataset=train_data, maxEpochs=10)
    NetworkWriter.writeToFile(ann, 'filename5.xml')
    
    for d in test_data:
        out = ann.activate(d)
        #print (train)
        print (out) 
        
    '''
开发者ID:TeamBall,项目名称:CapstoneProject,代码行数:54,代码来源:neuralNetwork.py

示例9: __init__

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
class UnmannedNet:
  def __init__(self,n_in,n_hidden,n_out):
      self.net = FeedForwardNetwork()
      inLayer = LinearLayer(n_in)
      hiddenLayer1 = SigmoidLayer(n_hidden) 
      hiddenLayer2 = SigmoidLayer(n_hidden) 
      outLayer = LinearLayer(n_out)
      self.net.addInputModule(inLayer)
      self.net.addModule(hiddenLayer1)
      self.net.addModule(hiddenLayer2)
      self.net.addOutputModule(outLayer)
      in_to_hidden = FullConnection(inLayer, hiddenLayer1)
      hidden_to_out = FullConnection(hiddenLayer2, outLayer)
      hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
      self.net.addConnection(in_to_hidden)
      self.net.addConnection(hidden_to_hidden)
      self.net.addConnection(hidden_to_out)
      self.net.sortModules()
      #self.net.params
      self.ds = SupervisedDataSet(n_in, n_out)

  def load_network(self,fName='./data/mynetwork.xml'):
      self.net = NetworkReader.readFrom(fName)

  def save_network(self,fName='./data/mynetwork.xml'):
      NetworkWriter.writeToFile(self.net, fName)

  def train(self,number):
      self.trainer = BackpropTrainer(self.net, self.ds)
      self.trainer.trainEpochs(number)
  
  def add_data(self,image,control):
      self.ds.addSample(image, control)

  def save_data(self,fName="./data/mydata"):
      SupervisedDataSet.saveToFile(self.ds, fName)

  def read_data(self,fName="./data/mydata"):
      self.ds = SupervisedDataSet.loadFromFile(fName)

  def prediction(self,image):
      return self.net.activate(image)

  def evaluate(self,valueFaultTolerant):
      target = self.ds.data.get('target')
      inputvalue = self.ds.data.get('input')
      numberOfSample = target.shape[0]
      numberOfCorrect = 0
      for i in range(0,numberOfSample):
         if (abs(target[i]-self.prediction(inputvalue[i]))<=valueFaultTolerant):
            numberOfCorrect+=1
      print "Correct rate is"+str(float(numberOfCorrect)/float(numberOfSample)) 
开发者ID:huangzhixin,项目名称:Mutifunction_Auto,代码行数:54,代码来源:network.py

示例10: test

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
def test():
    ds = SupervisedDataSet(2, 1)

    net = FeedForwardNetwork()
    inLayer = LinearLayer(2)
    hiddenLayer = SigmoidLayer(5)
    outLayer = SigmoidLayer(1)
    bias = BiasUnit('bias')

    net.addInputModule(inLayer)
    net.addModule(hiddenLayer)
    net.addOutputModule(outLayer)
    net.addModule(bias)

    in_to_hidd = FullConnection(inLayer, hiddenLayer)
    hi_to_out = FullConnection(hiddenLayer, outLayer)
    bias_to_hidd = FullConnection(bias, hiddenLayer)
    bias_to_out = FullConnection(bias, outLayer)

    net.addConnection(in_to_hidd)
    net.addConnection(hi_to_out)
    net.addConnection(bias_to_hidd)
    net.addConnection(bias_to_out)

    net.sortModules()
    pairs = [(0, 0, 0), (1, 1, 1), (0, 1, 0), (1, 0, 0)]
    for x in range(40):
        for i in range(4):
            p = pairs[i]
            ds.addSample((p[0], p[1]), p[2])

    trainer = BackpropTrainer(net, ds)

    for i in range(400):
        trainer.train()
        print "Epoch : %4d" % i

    print net.activate((0, 0))
    return net
开发者ID:Tskatom,项目名称:Finance,代码行数:41,代码来源:neural_net.py

示例11: train

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
 def train(self):
     # We will build up a network piecewise in order to create a new dataset
     # for each layer.
     dataset = self.dataset
     piecenet = FeedForwardNetwork()
     piecenet.addInputModule(copy.deepcopy(self.net.inmodules[0]))
     # Add a bias
     bias = BiasUnit()
     piecenet.addModule(bias)
     # Add the first visible layer
     firstRbm = self.iterRbms().next()
     visible = copy.deepcopy(firstRbm.visible)
     piecenet.addModule(visible)
     # For saving the rbms and their inverses
     self.invRbms = []
     self.rbms = []
     for rbm in self.iterRbms():
         self.net.sortModules()
         # Train the first layer with an rbm trainer for `epoch` epochs.
         trainer = self.trainerKlass(rbm, dataset, self.cfg)
         for _ in xrange(self.epochs):
             trainer.train()
         self.invRbms.append(trainer.invRbm)
         self.rbms.append(rbm)
         # Add the connections and the hidden layer of the rbm to the net.
         hidden = copy.deepcopy(rbm.hidden)
         biascon = FullConnection(bias, hidden)
         biascon.params[:] = rbm.biasWeights
         con = FullConnection(visible, hidden)
         con.params[:] = rbm.weights
         
         piecenet.addConnection(biascon)
         piecenet.addConnection(con)
         piecenet.addModule(hidden)
         # Overwrite old outputs
         piecenet.outmodules = [hidden]
         piecenet.outdim = rbm.hiddenDim
         piecenet.sortModules()
         
         dataset = UnsupervisedDataSet(rbm.hiddenDim)
         for sample, in self.dataset:
             new_sample = piecenet.activate(sample)
             dataset.addSample(new_sample)
         visible = hidden
开发者ID:HKou,项目名称:pybrain,代码行数:46,代码来源:deepbelief.py

示例12: transform

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
 def transform(self, X):
     #self.representationNet.sortModules()
     an = FeedForwardNetwork()
     an.addInputModule(self.inLayer)
     an.addOutputModule(self.hiddenLayer)
     an.addModule(self.b)
     an.addConnection(self.in_to_hidden)
     an.addConnection(self.b_to_hidden)
     an.sortModules()
     an.owner = self.supervisedNet
     #print self.representationNet.params
     #print 'kuku'
     #print self.supervisedNet.params
     transformed = []
     for x in X:
         #res = self.representationNet.activate(x)
         res = an.activate(x)
         transformed.append(res)
     return np.array(transformed)
开发者ID:ranBernstein,项目名称:Laban,代码行数:21,代码来源:autoencoder.py

示例13: NeuralNetworkPlayer

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
class NeuralNetworkPlayer(BasePlayer):
    """ NN-backed player"""
    def __init__(self):
        super(NeuralNetworkPlayer, self).__init__()

        # Create the network
        self.net = FeedForwardNetwork()

        # Internal Layers
        inLayer = LinearLayer(5)
        hiddenLayer1 = SigmoidLayer(6)
        hiddenLayer2 = SigmoidLayer(6)
        outLayer = LinearLayer(7)

        self.net.addInputModule(inLayer)
        self.net.addModule(hiddenLayer1)
        self.net.addModule(hiddenLayer2)
        self.net.addOutputModule(outLayer)

        self.net.addConnection(FullConnection(inLayer, hiddenLayer1))
        self.net.addConnection(FullConnection(hiddenLayer1, hiddenLayer2))
        self.net.addConnection(FullConnection(hiddenLayer2, outLayer))

        self.net.sortModules()

    def get_move(self, in_vals):
        results = self.net.activate(in_vals)
        return np.argmax(results)

    def set_params(self, params):
        self.net._params = params

    def get_params(self):
        return self.net._params

    def param_dim(self):
        return self.net.paramdim

    def reward(self, amount): pass
    def reset(self): pass
    def learn(self): pass
开发者ID:A-Malone,项目名称:coop-neural-nets,代码行数:43,代码来源:neural_net_player.py

示例14: insomnia

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
    def insomnia(self, falling_asleep, awakenings, cant_fall_back, low_sleep_hours):
        parameters = [falling_asleep, waking_up, cant_fall_back, low_sleep_hours]

        # Init network
        network = FeedForwardNetwork()
        # Init Layers
        inLayer = LinearLayer(4)
        outLayer = LinearLayer(1)
        # Init connection
        in_to_out = FullConnection(inLayer, outLayer)
        # Add modules
        network.addInputModule(inLayer)
        network.addInputModule(outLayer)
        # Add connections
        network.addConnection(in_to_out)
        # Sort
        network.sortModules()
        # Set equal weights
        # TODO: Use learning to learn weights over time
        # in_to_out._setParameters([.1,.1,.1,.1])
        probability = network.activate(parameters)[0]

        return probability
开发者ID:JCDJulian,项目名称:forty-winks,代码行数:25,代码来源:neural_network.py

示例15: narcolepsy

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activate [as 别名]
    def narcolepsy(self, naps, awakenings, obesity):
        parameters = [naps, awakenings, obesity]

        # Init network
        network = FeedForwardNetwork()
        # Init Layers
        inLayer = LinearLayer(3)
        outLayer = LinearLayer(1)
        # Init connection
        in_to_out = FullConnection(inLayer, outLayer)
        # Add modules
        network.addInputModule(inLayer)
        network.addInputModule(outLayer)
        # Add connections
        network.addConnection(in_to_out)
        # Sort
        network.sortModules()
        # Set equal weights
        # TODO: Use learning to learn weights over time
        # in_to_out._setParameters([.1,.1,.1])
        probability = network.activate(parameters)[0]

        return probability
开发者ID:JCDJulian,项目名称:forty-winks,代码行数:25,代码来源:neural_network.py


注:本文中的pybrain.structure.FeedForwardNetwork.activate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。