当前位置: 首页>>代码示例>>Python>>正文


Python SupervisedDataSet.setField方法代码示例

本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.setField方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.setField方法的具体用法?Python SupervisedDataSet.setField怎么用?Python SupervisedDataSet.setField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.datasets.SupervisedDataSet的用法示例。


在下文中一共展示了SupervisedDataSet.setField方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def train(self, x, y):
        ''' Trains on the given inputs and labels for either a fixed number of epochs or until convergence.
            Normalizes the input with a z-transform'''

        print "training..."
        
        # normalize input
        m = x.mean()
        s = x.std()
        x = self.z_transform(x, m, s)

        ds = SupervisedDataSet(x.shape[1], 1) 
        ds.setField('input', x)
        ds.setField('target', y)
        
        trainer = BackpropTrainer(self.n,ds, learningrate=self.learning_rate, momentum=self.momentum, verbose=True)

        if (self.epochs == 0):
            trainer.trainUntilConvergence()
        else:
            for i in range(0, self.epochs):
                start_time = time.time()
                trainer.train() 
                print "epoch: ", i
                print "time: ", time.time() - start_time, " seconds"
            
        print "finished"
开发者ID:ttaschke,项目名称:digit-recognizer,代码行数:29,代码来源:neural_net.py

示例2: get_dataset_txt

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
def get_dataset_txt(filename):
    """
        creates a dataset for the neural network to use

        input type: string represnting filename created from numpy array
        return type: dataset
    """
    array = np.loadtxt(filename)

    # assume last field in txt is single target variable
    # and all other fields are input variables
    number_of_columns = array.shape[1]
 #   dataset = ClassificationDataSet(number_of_columns - 1, 1, nb_classes=4,
 #                               class_labels=['angry','happy','neutral','sad'])

    dataset = SupervisedDataSet(number_of_columns - 1, 4)
    
    #print array[0]
    #print array[:,:-1]
    #print array[:,-1]
    #dataset.addSample(array[:,:-1], array[:,-1])
    #dataset.addSample(array[:,:-1], array[:,-2:-1])
    dataset.setField('input', array[:,:-4])
    dataset.setField('target', array[:,-4:])

##    # one output neuron per class
##    dataset._convertToOneOfMany( )
##
##    print dataset.getField('target').transpose()
##    print dataset.getField('class').transpose()

    return dataset
开发者ID:ErinCoughlan,项目名称:CS152FinalProject,代码行数:34,代码来源:getDataSet.py

示例3: test_simple_predictor

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def test_simple_predictor(self):
        output_model_file = "Train/TestData/model.pkl"
        test_file = "Train/TestData/test.csv"
        prediction_file = "Train/TestData/prediction.txt"

        net = pickle.load(open(output_model_file, 'rb'))

        test = np.loadtxt(test_file, delimiter=',')
        x_test = test[:, 0:-1]
        y_test = test[:, -1]
        y_test = y_test.reshape(-1, 1)

        y_test_dummy = np.zeros(y_test.shape)

        input_size = x_test.shape[1]
        target_size = y_test.shape[1]

        assert(net.indim == input_size)
        assert(net.outdim == target_size)

        ds = SupervisedDataSet(input_size, target_size)
        ds.setField('input', x_test)
        ds.setField('target', y_test_dummy)

        p = net.activateOnDataset(ds)
#        mse = MSE(y_test, p)
#        rmse = sqrt(mse)
#        print "testing RMSE:{}".format(rmse)

        np.savetxt(prediction_file, p, fmt='%.6f')
开发者ID:rvanderwall,项目名称:PlanktonNN,代码行数:32,代码来源:Neural.py

示例4: _prepare_dataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def _prepare_dataset(self, X, y, model_type):
        X, y, sample_weight = check_inputs(X, y, sample_weight=None, allow_none_weights=True,
                                           allow_multiple_targets=model_type == 'regression')
        X = self._transform_data(X, y, fit=not self.is_fitted())

        if model_type == 'classification':
            if not self.is_fitted():
                self._set_classes(y)
            target = one_hot_transform(y, n_classes=len(self.classes_))
        elif model_type == 'regression':
            if len(y.shape) == 1:
                target = y.reshape((len(y), 1))
            else:
                # multi regression
                target = y

            if not self.is_fitted():
                self.n_targets = target.shape[1]
        else:
            raise ValueError('Wrong model type')

        dataset = SupervisedDataSet(X.shape[1], target.shape[1])
        dataset.setField('input', X)
        dataset.setField('target', target)

        return dataset
开发者ID:AlexanderTek,项目名称:rep,代码行数:28,代码来源:pybrain.py

示例5: _prepare_dataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def _prepare_dataset(self, X, y, model_type):
        """
        Prepare data in pybrain format.

        :param pandas.DataFrame X: data of shape [n_samples, n_features]
        :param y: values for samples --- array-like of shape [n_samples]
        :param str model_type: classification or regression label
        :return: self
        """
        X, y, sample_weight = check_inputs(X, y, sample_weight=None, allow_none_weights=True,
                                           allow_multiple_targets=model_type == 'regression')
        X = self._transform_data(X, y, fit=not self._is_fitted())

        if model_type == 'classification':
            if not self._is_fitted():
                self._set_classes(y)
            target = one_hot_transform(y, n_classes=len(self.classes_))
        elif model_type == 'regression':
            if len(y.shape) == 1:
                target = y.reshape((len(y), 1))
            else:
                # multi regression
                target = y

            if not self._is_fitted():
                self.n_targets = target.shape[1]
        else:
            raise ValueError('Wrong model type')

        dataset = SupervisedDataSet(X.shape[1], target.shape[1])
        dataset.setField('input', X)
        dataset.setField('target', target)

        return dataset
开发者ID:arogozhnikov,项目名称:rep,代码行数:36,代码来源:pybrain.py

示例6: main

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
def main():
    #read in pre-processed features
    print('reading preprocessed data')
    bag = read_bag_of_word('features')
    #read in sentimental dictionary
    print('reading dictionary')
    [word_vector, sentiments] = read_dictionary("positive.txt", "negative.txt")
    features,target,features_dict=create_feature_matrix(bag, sentiments)
    # Sort dates in order
    dates=dow_jones_labels.keys()
    dates = [datetime.datetime.strptime(ts, "%Y-%m-%d") for ts in dates]
    dates.sort()
    dates = [datetime.datetime.strftime(ts, "%Y-%m-%d") for ts in dates]

    ds = SupervisedDataSet(4, 1)
    ds.setField('input', features)
    target=np.array(target).reshape( -1, 1 )
    ds.setField('target', target)
    net = buildNetwork(4, 40, 1, bias=True)
    trainer = BackpropTrainer(net, ds)
    trainer.trainUntilConvergence(verbose=True, validationProportion=0.15, maxEpochs=10000, continueEpochs=10)
    count=0
    for i in range(0,len(target)):
        print("predict={0},actual={1}".format(net.activate(features[i]),target[i]))
        if net.activate(features[i])*target[i]>0:
            count+=1
    print("accuracy={0}".format(float(count) / len(dow_jones_labels)))
开发者ID:lunrongchen,项目名称:Setrend,代码行数:29,代码来源:count_sentiments.py

示例7: _prepare_net_and_dataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def _prepare_net_and_dataset(self, X, y, model_type):
        X, y, sample_weight = check_inputs(X, y, sample_weight=None, allow_none_weights=True)
        self._check_init_input(self.layers, self.hiddenclass)
        X = self._transform_data(X, y, fit=True)

        if self.layers is None:
            self.layers = [10]

        if self.hiddenclass is None:
            self.hiddenclass = []
            for i in range(len(self.layers)):
                self.hiddenclass.append('SigmoidLayer')

        net_options = {'bias': True,
                       'outputbias': True,
                       'peepholes': False,
                       'recurrent': False}
        for key in self.params:
            if key not in net_options.keys():
                raise ValueError('Unexpected parameter ' + key)
            net_options[key] = self.params[key]
        net_options['hiddenclass'] = LAYER_CLASS[self.hiddenclass[0]]
        net_options['fast'] = False

        if model_type == 'classification':
            net_options['outclass'] = structure.SoftmaxLayer

            self._set_classes(y)
            layers_for_net = [X.shape[1], self.layers[0], len(self.classes_)]
            ds = SupervisedDataSet(X.shape[1], len(self.classes_))

            y = y.reshape((len(y), 1))
            label = numpy.array(OneHotEncoder(n_values=len(self.classes_)).fit_transform(y).todense())

            for i in range(0, len(y)):
                ds.addSample(tuple(X[i, :]), tuple(label[i]))

        elif model_type == 'regression':
            net_options['outclass'] = structure.LinearLayer

            if len(y.shape) == 1:
                y = y.reshape((len(y), 1))
            layers_for_net = [X.shape[1], self.layers[0], y.shape[1]]

            ds = SupervisedDataSet(X.shape[1], y.shape[1])
            ds.setField('input', X)
            ds.setField('target', y)

        else:
            raise ValueError('Wrong model type')

        self.net = buildNetwork(*layers_for_net, **net_options)

        for i in range(1, len(self.layers)):
            hid_layer = LAYER_CLASS[self.hiddenclass[i]](self.layers[i])
            self.net.addModule(hid_layer)
        self.net.sortModules()

        return ds
开发者ID:tyamana,项目名称:rep,代码行数:61,代码来源:pybrain.py

示例8: buildDataSet

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
def buildDataSet(timeCat, length):
    ds = SupervisedDataSet(5,1) #initialize dataset (inputs, outputs)
    MACDvalueArray = np.array([0])
    KValueArray = np.array([0])
    priceArray = np.array([0])
    polo = poloniex(POLO_API_KEY, POLO_SECRET)
    if(timeCat == "days"):
        startTime = datetime.datetime.utcnow() + datetime.timedelta(days=length)
    elif(timeCat == "hours"):
        startTime = datetime.datetime.utcnow() + datetime.timedelta(hours=length)
    unixTime = calendar.timegm(startTime.utctimetuple())
    endTime = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
    chartData = polo.returnChartData(unixTime,endTime,300) #get all our data! start time, end time, period
    ia = np.array([0,0,0,0,0]) #heres our input array
    ta = np.array([0]) #and the output
    for i in chartData:
        #calculate our indicators
        calculateMACD(i['close']) 
        calculateStchOsc(i['close'],i['high'],i['low'])
        MACDvalueArray = np.vstack((MACDvalueArray,MACD_Histo))
        KValueArray = np.vstack((KValueArray,KValue))
        priceArray = np.vstack((priceArray,i['close']))
    #delete the first one because its all 0s
    MACDvalueArray = np.delete(MACDvalueArray,0,0) 
    KValueArray = np.delete(KValueArray,0,0)
    priceArray = np.delete(priceArray,0,0)
    MACD_max = max(MACDvalueArray)
    MACD_min = min(MACDvalueArray)
    K_max = max(KValueArray)
    K_min = min(KValueArray)
    price_max = max(priceArray)
    price_min = min(priceArray)
    #make a scaling function... Neural nets work better if all the input values are in the same range. Here we map to values between 0,1
    m = interp1d([MACD_min[0],MACD_max[0]],[0,1])
    k = interp1d([K_min[0],K_max[0]],[0,1])
    p = interp1d([price_min[0],price_max[0]],[0,1])
    #result = interp1d([0,1],[price_min[0],price_max[0]])
    for i in range(0,priceArray.size):
        scaledM = float(m(MACDvalueArray[i]))
        scaledK = float(k(KValueArray[i]))
        scaledP = float(p(priceArray[i]))
        #build the input and output arrays
        ia = np.vstack((ia,[scaledM,scaledK,ppr[0],ppr[1],ppr[2]]))
        ta = np.vstack((ta,[scaledP]))
        #this is a queue that keeps the last 3 values, appendleft for FIFO action
        ppr.appendleft(scaledP)
    np.savetxt('test1.out',ia,delimiter=',')
    #delete first 15 values because thats how long the MACD takes to get initialized to proper values
    for i in range(0,15):
        ia = np.delete(ia,0,0)
        ta = np.delete(ta,0,0)
    np.savetxt('test2.out',ia,delimiter=',') #this was just for testing, outputs all data to text file
    assert (ia.shape[0] == ta.shape[0]) #make sure input and output are same size
    ds.setField('input',ia)
    ds.setField('target',ta)
    print(str(len(ds))) #print out how many data points we have
    return ds
开发者ID:TB2706,项目名称:moneymoneymoney,代码行数:59,代码来源:NNtest2.py

示例9: castToRegression

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
 def castToRegression(self, values):
     """Converts data set into a SupervisedDataSet for regression. Classes
     are used as indices into the value array given."""
     regDs = SupervisedDataSet(self.indim, 1)
     fields = self.getFieldNames()
     fields.remove('target')
     for f in fields:
         regDs.setField(f, self[f])
     regDs.setField('target', values[self['class'].astype(int)])
     return regDs
开发者ID:fh-wedel,项目名称:pybrain,代码行数:12,代码来源:classification.py

示例10: initialize_dataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
def initialize_dataset(regression_task, train_x, train_y):
    number_of_features = train_x.shape[1]
    if regression_task:
        ds = SupervisedDataSet(number_of_features, 1)
    else:
        ds = ClassificationDataSet(number_of_features, nb_classes=2, class_labels=['no success', '1st down or TD'])

    ds.setField('input', train_x)
    ds.setField('target', train_y.reshape((len(train_y), 1)))
    return ds, number_of_features
开发者ID:dannypage,项目名称:NFLPlayPrediction,代码行数:12,代码来源:neural_network_prediction.py

示例11: _activate_on_dataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def _activate_on_dataset(self, X):
        assert self.is_fitted(), "Net isn't fitted, please call 'fit' first"

        X = self._transform_data(X, fit=False)
        y_test_dummy = numpy.zeros((len(X), 1))

        ds = SupervisedDataSet(X.shape[1], y_test_dummy.shape[1])
        ds.setField('input', X)
        ds.setField('target', y_test_dummy)

        return self.net.activateOnDataset(ds)
开发者ID:AlexanderTek,项目名称:rep,代码行数:13,代码来源:pybrain.py

示例12: _set_dataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
    def _set_dataset(self, trn_index, tst_index):
        '''
        set the dataset according to the index of the training data and the test data
        Then do feature normalization
        '''
        this_trn = self.tot_descs[trn_index]
        this_tst = self.tot_descs[tst_index]
        this_trn_target = self.tot_target[trn_index]
        this_tst_target = self.tot_target[tst_index]
        
        # get the normalizer
        trn_normalizer = self._getNormalizer(this_trn)
        
        # feature normal and target log for traning data
        trn_normed = self._featureNorm(this_trn, trn_normalizer)
        trn_log_tar = np.log(this_trn_target)
        
        # feature normalization for the test data, with the normalizer of the training data
        tst_normed = self._featureNorm(this_tst, trn_normalizer)
        tst_log_tar = np.log(this_tst_target)
        
        trn_ds_ann = SupervisedDataSet(self.indim, self.outdim)
        trn_ds_ann.setField('input', trn_normed)
        trn_log_tar = trn_log_tar.reshape((trn_log_tar.shape[0],1))
        trn_ds_ann.setField('target', trn_log_tar)
        
        tst_ds_ann = SupervisedDataSet(self.indim, self.outdim)
        tst_ds_ann.setField('input', tst_normed)
        tst_log_tar = tst_log_tar.reshape((tst_log_tar.shape[0],1))
        tst_ds_ann.setField('target', tst_log_tar)

        return trn_ds_ann, tst_ds_ann
开发者ID:RunshengSong,项目名称:CLiCC_Packages,代码行数:34,代码来源:cross_validator.py

示例13: train

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
	def train(self, x_train=None, y_train=None):
		if x_train is None and y_train is None:
			x_train, y_train = shuffle(self.processed_data, self.processed_labels)

		ds = SupervisedDataSet(x_train.shape[1], 1)
		assert(x_train.shape[0] == y_train.shape[0])
		ds.setField('input', x_train)
		ds.setField('target', y_train)

		if self.hidden_size == 0:
			hs = x_train.shape[1]
		self.nn = buildNetwork(x_train.shape[1], hs, 1, bias=True, hiddenclass=self.hiddenclass, outclass=self.outclass)
		trainer = BackpropTrainer(self.nn, ds, verbose=self.verbose)
		trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)
开发者ID:al-chen,项目名称:marchmathness,代码行数:16,代码来源:marchine.py

示例14: main

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
def main():
    args = parser.parse_args()

    hidden_size = 50
    epochs = 5

    dataset_len = _params_count(args.model_folder, 'params_train.txt')
    rows_per_step = 10000
    total_batches = dataset_len // rows_per_step


    params_len = _params_count(args.model_folder, 'dict.txt')
    output_layer_num = 601
    net = _init_net(params_len, output_layer_num, hidden_size)


    for batch_num in range(total_batches - 1):
        trainParams = _build_params(os.path.join(args.model_folder, 'params_train.txt'), args.model_folder, batch_num, rows_per_step)

        print('params ready')

        y = []

        for y_val in trainParams['y']:
            y_vec = [0] * output_layer_num
            y_vec[y_val - 1] = 1
            y.append(y_vec)

        print(len(trainParams['x']))
        print(len(y))

        # TODO: fix the number of pictures
        ds = SupervisedDataSet(params_len, output_layer_num)
        ds.setField('input', trainParams['x'])
        ds.setField('target', y)


        trainer = BackpropTrainer(net, ds)


        print("training for {} epochs...".format(epochs))

        #trainer.trainUntilConvergence(verbose=True)

        for i in range(epochs):
            mse = trainer.train()
            rmse = sqrt(mse)
            print("training RMSE, epoch {}: {}".format(i + 1, rmse))

    pickle.dump(net, open(os.path.join(args.model_folder, 'model_nn.pkl'), 'wb'))
开发者ID:pandakot,项目名称:machine_learning,代码行数:52,代码来源:train_nn.py

示例15: train_with_shuffle

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import setField [as 别名]
def train_with_shuffle(inp, targ, nn, epoch):
    
    
    ds = SupervisedDataSet(len(inp), len(targ))
    ds.setField('input', inp)
    ds.setField('target', targ)
    
    trainer = BackpropTrainer(nn, ds)
    trainer.trainUntilConvergence(verbose=True, validationProportion=0.15, maxEpochs=100, continueEpochs=10)

    for i in range(epoch):
        mse = trainer.train()
        rmse = np.sqrt(mse)
        print "training RSME, epoch {}: {}".format(i+1, rmse)
    return nn
开发者ID:sara-ahmadzadeh,项目名称:First-ANN,代码行数:17,代码来源:First+NN+for+TS+Forecasting.py


注:本文中的pybrain.datasets.SupervisedDataSet.setField方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。