当前位置: 首页>>代码示例>>Python>>正文


Python networkreader.NetworkReader类代码示例

本文整理汇总了Python中pybrain.tools.customxml.networkreader.NetworkReader的典型用法代码示例。如果您正苦于以下问题:Python NetworkReader类的具体用法?Python NetworkReader怎么用?Python NetworkReader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了NetworkReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: runThirdStageClassifier

    def runThirdStageClassifier(self):
        out = []
        true = []
        #SingleBatIDToAdd = [1, 2, 3, 5, 6] # for single
        Correct = 0
        print "Loading Network.."
        net = NetworkReader.readFrom("C:\Users\Anoch\PycharmProjects\BatClassification\ThirdStageClassifier.xml")
        print "Loading feature data with SSC = 1 (Single call type)"
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target, path = self.getDistrubedTestDataRUNVERSIONTSC()
        SAMPLE_SIZE = len(minFreq)
        for i in range(0, SAMPLE_SIZE):
            ClassifierOutput= net.activate([minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i]])

            ClassifierOutputID = np.argmax(ClassifierOutput)
            currentTarget = self.convertIDSingleTSC(target[i])
            out.append(ClassifierOutputID)
            true.append(currentTarget)

            #MAPPING FROM BATID TO TSC value:
            TSC_value = ClassifierOutputID
            # Metadata Setup, get path and write: TSC = value
            ds = self.HDFFile[path[i]]
            ds.attrs["TSC"] = TSC_value
        self.HDFFile.flush()
        self.ConfusionMatrix =  self.CorrectRatio(out, true)
        return self.ConfusionMatrix
开发者ID:AnochjhnIruthayam,项目名称:BatClassification,代码行数:26,代码来源:ClassifierConnected.py

示例2: __init__

    def __init__(self, loadWeightsFromFile, filename):
        #neural network as function approximator
        #Initialize neural network
        if loadWeightsFromFile:
	    self.nn = NetworkReader.readFrom(filename)
	else:
	    self.nn = buildNetwork(NODE_INPUT, NODE_HIDDEN, NODE_OUTPUT, bias = True)
开发者ID:DiNAi,项目名称:nn2014-RL-atari,代码行数:7,代码来源:agent.py

示例3: buildNet

	def buildNet(self):
		print "Building a network..."
		if  os.path.isfile(self.path): 
			self.trained = True
 			return NetworkReader.readFrom(self.path) 
		else:
 			return buildNetwork(self.all_data.indim, self.d[self.path]['hidden_dim'], self.all_data.outdim, outclass=SoftmaxLayer)
开发者ID:davidlavy88,项目名称:FaceIdentifier,代码行数:7,代码来源:identify.py

示例4: runClassifier

 def runClassifier(self):
     out = []
     true = []
     #BatIDToAdd = [1, 2, 3, 5, 6, 10, 11, 12, 14, 8, 9] #1-14 are bats; 8 is noise; 9 is something else
     print "Loading Network.."
     net = NetworkReader.readFrom("SecondStageClassifier.xml")
     print "Loading feature data with FSC = 1 (Bat calls)"
     minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target, path = self.getDistrubedTestDataRUNVERSION()
     SAMPLE_SIZE = len(minFreq)
     for i in range(0, SAMPLE_SIZE):
         ClassifierOutput = net.activate([minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i]])
         ClassifierOutputID = np.argmax(ClassifierOutput)
         currentTarget = self.convertIDMultiSingle(target[i])
         out.append(ClassifierOutputID)
         true.append(currentTarget)
         #MAPPING FROM BATID TO TSC value:
         SSC_value = ClassifierOutputID
         # Metadata Setup, get path and write: TSC = value
         ds = self.HDFFile[path[i]]
         ds.attrs["SSC"] = SSC_value
     # Close HDF5 file to save to disk. This is also done to make sure the next stage classifier can open the file
     self.HDFFile.flush()
     self.HDFFile.close()
     self.ConfusionMatrix = self.CorrectRatio(out, true)
     return self.ConfusionMatrix
开发者ID:AnochjhnIruthayam,项目名称:BatClassification,代码行数:25,代码来源:ClassifierSecondStage.py

示例5: getPersistedData

 def getPersistedData(self, name):
     pathToData = self.relPathFromFilename(name)
     if os.path.isfile(pathToData):
         with open(pathToData, "rb") as f:
             data = pickle.load(f)
         if name == NEURAL_NET_DUMP_NAME:
             data.net = NetworkReader.readFrom(self.relPathFromFilename(name + DATA_DUMP_NN_EXT))
         return data
开发者ID:TanaySinghal,项目名称:SPCSAISelfDrivingCar,代码行数:8,代码来源:learning.py

示例6: testNets

def testNets():
    ds = SupervisedDataSet.loadFromFile('SynapsemonPie/boards')
    net20 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer20.xml') 
    net50 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer50.xml') 
    net80 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer80.xml') 
    net110 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer110.xml') 
    net140 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer140.xml') 
    trainer20 = BackpropTrainer(net20, ds)
    trainer50 = BackpropTrainer(net50, ds)
    trainer80 = BackpropTrainer(net80, ds)
    trainer110 = BackpropTrainer(net110, ds)
    trainer140 = BackpropTrainer(net140, ds)
    print trainer20.train()
    print trainer50.train()
    print trainer80.train()
    print trainer110.train()
    print trainer140.train()
开发者ID:johnny-zheng,项目名称:SynapsemonPy,代码行数:17,代码来源:primer_evaluation.py

示例7: main

def main():
    train_file = 'data/train.csv'
    # validation_file = 'data/validation.csv'
    output_model_file = 'model.xml'

    # hidden_size = 4
    epochs = 500

    # load data
    # def loadData():
    train = np.loadtxt(train_file, delimiter=' ')
    Input = train[0:,0:3]
    Output = train[0:,3:5]

    # validation = np.loadtxt(validation_file, delimiter=',')
    # train = np.vstack((train, validation))

    # x_train = train[:, 0:-1]
    # y_train = train[:, -1]
    # y_train = y_train.reshape(-1, 1)

    # input_size = x_train.shape[1]
    # target_size = y_train.shape[1]

    # prepare dataset
    # def prepare dataset(input_size, target_size):
    ds = SDS(Input,Output)
    # ds.addSample(input_size)
    # ds.setField('input', x_train)
    # ds.setField('target', y_train)

    # init and train
    # def initTrain(input_size, hidden_size, input, output):
    # net = buildNetwork(input_size, hidden_size, target_size, bias=True)
    net = buildNetwork(3,  # input layer
                                 4,  # hidden0
                                 2,  # output
                                 hiddenclass=SigmoidLayer,
                                 outclass=SigmoidLayer,
                                 bias=True
                                 )
    net = NetworkReader.readFrom('model.xml')
    for i,o in zip(Input,Output):
        ds.addSample(i,o)
        print i, o

    trainer = BackpropTrainer(net, ds)
        
    print "training for {} epochs...".format(epochs)

    for i in range(epochs):
        mse = trainer.train()
        rmse = sqrt(mse)
        print "training RMSE, epoch {}: {}".format(i + 1, rmse)
        if os.path.isfile("../stopfile.txt") == True:
            break
    
    NetworkWriter.writeToFile(net, output_model_file)
开发者ID:amaneureka,项目名称:iResQ,代码行数:58,代码来源:train.py

示例8: __init__

 def __init__(self):
     print "start a new instance"
     self.loaded=False
     self.has_data_source=False
     try:
         self.net=NetworkReader.readFrom('pickled_ANN')
         print "ANN has been found from an ash jar"
         self.loaded=True
     except IOError:
         print "ash jar is empty, use train() to start a new ANN"
开发者ID:lkong,项目名称:Pickle_ANN,代码行数:10,代码来源:NetFlow_ANN.py

示例9: nfq_action_value

def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
    # TODO generalize away from 9 action values. Ask the network how many
    # discrete action values there are.
    n_actions = 9
    network = NetworkReader.readFrom(network_fname)
    actionvalues = np.empty(n_actions)
    for i_action in range(n_actions):
        network_input = r_[state, one_to_n(i_action, n_actions)]
        actionvalues[i_action] = network.activate(network_input)
    return actionvalues
开发者ID:chrisdembia,项目名称:agent-bicycle,代码行数:10,代码来源:analysis.py

示例10: exoplanet_search

 def exoplanet_search(self,
                      find=default_find):
      """
      This method searches for exoplanets.
      The output will have the format:
          (exostar1_streak, exostar2_streak, ...)
      where an exostar is a star with an exoplanet, and a streak is
      a list of states in which the exostar was observed to have exoplanetary
      behaviour.
      At least 5 stars must be tracked.
      """
      stars, deleted = self.find_objects(find=find)
      print str(deleted / len(self.photos)) + "% of the data was ignored"
      """
      There must be an integer multiple of 5 stars
      in stars, and the stars must be grouped together in lumps
      of 5.
      """
      exostreaks = []
      net = NetworkReader.readFrom("../../Identifier/network.xml")
      for starnum in range(0, len(stars), 5):
          search_stars = stars[starnum: starnum + 5]
          start_time = search_stars[0].states[0].time
          stop_time = search_stars[0].states[-1].time
          for photonum in range(start_time, stop_time + 1, 10):
              print self.photos[photonum]
              photonum = min(photonum, stop_time - 10)
              intensities = []
              for slide in range(photonum, photonum + 10):
                  intensities.append([])
                  photo = self.photos[slide]
                  photo.load()
                  for star in search_stars:
                      state = star.track(slide)
                      brightness = photo.intensity(state.position, state.radius)
                      intensities[-1].append(brightness)
                  photo.close()
              inpt = []
              for starothernum in range(5):
                  lightcurve = []
                  for slides_from_zero in range(10):
                      lightcurve.append(intensities[slides_from_zero][starothernum])
                  array_version = array(lightcurve)
                  array_version /= average(array_version)
                  inpt += list(array_version)
              nnet_output = net.activate(tuple(inpt))
              for o in range(5):
                  if nnet_output[o] > 0.5:
                      exostreak = []
                      for slide in range(photonum, photonum + 10):
                          state = search_stars[o].track(slide)
                          exostreak.append(state)
                      exostreaks.append(exostreak)
      return exostreaks
开发者ID:Bushwallyta271828,项目名称:StarTracker,代码行数:54,代码来源:extract.py

示例11: load_network_from_file

    def load_network_from_file(self, filename):
        """Load Network from File

        Using a NetworkWriter written file, data from the saved network
        will be reconstituted into a new PathPlanningNetwork class.
        This is used to load saved networks.

        Arguments:
            filename: The filename of the saved xml file.
        """
        self._network = NetworkReader.readFrom(filename)

        return
开发者ID:evansneath,项目名称:surgicalsim,代码行数:13,代码来源:network.py

示例12: __init__

 def __init__(self, data, machineID, eta, lmda, netPath, input_size=30, epochs=20, train_str_index=1000, train_end_index=3000):
     '''
     Constructor
     '''
     self.data = data
     self.machineID = machineID
     self.eta = eta
     self.lmda = lmda
     self.INPUT_SIZE = input_size
     self.epochs = epochs
     self.str_train = train_str_index
     self.end_train = train_end_index
     self.net = NetworkReader.readFrom(netPath)
开发者ID:Manrich121,项目名称:ForecastingCloud,代码行数:13,代码来源:Rnn_model.py

示例13: trainNetwork

def trainNetwork():
	print "[Training] Network has Started..."
	inputSize = 0
	with open('file1.txt', 'r') as f:			#automatically closes file at the end of the block
  		#first_line = f.readline()
  		#inputSize = len(first_line)
		dataset = SupervisedDataSet(4, 1)	 #specify size of data and target
		f.seek(0) 							#Move back to beginnning of file
		#iterate through the file. 1 picture per line
		for line in f:
			mylist = json.loads(line)		#list object
	    	target = mylist[-1]				#retrieve and then delete the target classification
	    	del mylist[-2:]
	    	#print target
	    	dataset.addSample(tuple(mylist), (target,))
	        #print json.loads(line)
	if os.path.isfile('annModel.xml'):
		skynet = NetworkReader.readFrom('annModel.xml')#for use if individual sample files used
	else:
		skynet = buildNetwork(dataset.indim, 8, dataset.outdim, bias=True, hiddenclass=TanhLayer) #input,hidden,output
	#SoftmaxLayer, SigmoidLayer, LinearLayer, GaussianLayer
	#Note hidden neuron number is arbitrary, can try 1 or 4 or 3 or 5 if this methods doesnt work out
	trainer = BackpropTrainer(skynet, dataset,learningrate = 0.3, weightdecay = 0.01,momentum = 0.9)
	#trainer.trainUntilConvergence()
	for i in xrange(1000):
		trainer.train()
    #trainer.trainEpochs(1000)
    #Save the now trained neural network
	NetworkWriter.writeToFile(skynet,'annModel.xml')
	print "[Network] has been Written"

################## SVM Method #######################
#Change append method in write method for target persistence
	dataX = []
	datay = []
	with open(writeFile, 'r') as f:
		for line in f:
			mylist = json.loads(line)
			target2 = mylist[-1]
			dataX.append(mylist[:-2])
			datay.append(target2)
	#datay = [target2] * len(dataX) #Targets, size is n_samples, for use with indiviual sample files with same target
	print [target2]
	print dataX
	print datay
	clf = svm.LinearSVC()
	clf.fit(dataX,datay)
    #Persist the trained model
	joblib.dump(clf,'svmModel.pkl')
开发者ID:phalax4,项目名称:illumination,代码行数:49,代码来源:writeUnit.py

示例14: __init__

 def __init__(self, data, machineID, netPath, eta, lmda, input_size=30, epochs=20, train_str_index=1000, train_end_index=3000):
     '''
     Constructor
     '''
     self.cpuData = data[0]
     self.memData = data[1]
     self.machineID = machineID
     self.eta = eta
     self.lmda = lmda
     self.INPUT_SIZE = input_size
     self.epochs = epochs
     self.str_train = train_str_index
     self.end_train = train_end_index
     self.net = NetworkReader.readFrom(netPath)
     
     self.memForecasts = np.genfromtxt("d:/data/memory_fnn/"+machineID.replace("cpu", "memory"),delimiter=',').ravel()
开发者ID:Manrich121,项目名称:ForecastingCloud,代码行数:16,代码来源:Entwine_model.py

示例15: LoadNetwork

    def LoadNetwork(self):
        """
        Loading network dump from file.
        """
        FCLogger.debug('Loading network from PyBrain xml-formatted file...')
        net = None

        if os.path.exists(self.networkFile):
            net = NetworkReader.readFrom(self.networkFile)

            FCLogger.info('Network loaded from dump-file: {}'.format(os.path.abspath(self.networkFile)))

        else:
            FCLogger.warning('{} - file with Neural Network configuration not exist!'.format(os.path.abspath(self.networkFile)))

        self.network = net
开发者ID:chrinide,项目名称:FuzzyClassificator,代码行数:16,代码来源:PyBrainLearning.py


注:本文中的pybrain.tools.customxml.networkreader.NetworkReader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。