当前位置: 首页>>代码示例>>Python>>正文


Python SupervisedDataSet.setField方法代码示例

本文整理汇总了Python中pybrain.datasets.supervised.SupervisedDataSet.setField方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.setField方法的具体用法?Python SupervisedDataSet.setField怎么用?Python SupervisedDataSet.setField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.datasets.supervised.SupervisedDataSet的用法示例。


在下文中一共展示了SupervisedDataSet.setField方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: validate

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def validate(X, y, net):
    # Test Set.
    x_test = X[split_at:, :]
    y_test = y.__getslice__(split_at, y.shape[0])
    y_test = y_test.reshape(-1, 1)

    # you'll need labels. In case you don't have them...
    y_test_dummy = np.zeros(y_test.shape)

    input_size = x_test.shape[1]
    target_size = y_test.shape[1]

    assert (net.indim == input_size)
    assert (net.outdim == target_size)

    # prepare dataset
    ds = SDS(input_size, target_size)
    ds.setField('input', x_test)
    ds.setField('target', y_test)

    # predict

    p = net.activateOnDataset(ds)

    mse = MSE(y_test, p)
    print "testing MSE:", mse
    np.savetxt(output_predictions_file, p, fmt='%.6f')
开发者ID:Gabeesh,项目名称:CS273a-Introduction-to-Machine-Learning,代码行数:29,代码来源:PyBrainImplementation.py

示例2: train

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def train():

	print "-------------------------------------------------"
	print "loading data..."
	print "file to be loaded: ", train_file

	# regresa un ndarray de numpy
	train = np.loadtxt( train_file, delimiter = ',' )

	print "data loaded to a ", type(train),   " of size: ", train.shape, " and type:", train.dtype
	print "Spliting inputs and output for training..."

	inputs_train = train[:,0:-1]
	output_train = train[:,-1]
	output_train = output_train.reshape( -1, 1 )


	print "inputs in a ", type(inputs_train),   " of size: ", inputs_train.shape, " and type:", inputs_train.dtype
	print "output in a ", type(output_train),   " of size: ", output_train.shape, " and type:", output_train.dtype
	print "-------------------------------------------------"



	print "Setting up supervised dataset por pyBrain training..."
	input_size = inputs_train.shape[1]
	target_size = output_train.shape[1]
	dataset = SDS( input_size, target_size )
	dataset.setField( 'input', inputs_train )
	dataset.setField( 'target', output_train )
	print "-------------------------------------------------"



	print "Setting up supervised dataset por pyBrain training..."
	hidden_size = 50
	epochs = 600
	crime_network = buildNetwork( input_size, hidden_size, target_size, bias = True, hiddenclass = SigmoidLayer, outclass = LinearLayer )
	trainer = BackpropTrainer( crime_network,dataset )
	print "-------------------------------------------------"


	rmse_vector = []
	print "training for {} epochs...".format( epochs )
	for i in range( epochs ):
		mse = trainer.train()
		rmse = sqrt( mse )
		print "training RMSE, epoch {}: {}".format( i + 1, rmse )
		rmse_vector.append(rmse)

	print "-------------------------------------------------"
	
	pickle.dump( crime_network, open( output_model_file, 'wb' ))

	print "Training done!"
	print "-------------------------------------------------"

	return rmse_vector
开发者ID:sam1017383,项目名称:Recommender-platform,代码行数:59,代码来源:draft1.py

示例3: nn

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def nn(train_source, test_source, validation=False, v_size=0.5):

	hidden_size = 100
	epochs = 600

	# load data
	train = read_csv(train_source)
	tmp = open(train_source)
	feature_count = None
	for line in tmp:
		feature_count = len(line.split(","))
		break

	trainX = np.asarray(train[range(1, feature_count)])
	trainY = np.asarray(train[[0]]).ravel()
	# print "All Data size: " + str(len(trainX))
	testX = None
	testY = None

	if validation:
		# --- CROSS VALIDATION ---
		trainX, testX, trainY, testY = cross_validation.train_test_split(
			trainX, trainY, test_size=v_size, random_state=0)
	else:
		# --- TEST DATA ---
		test = read_csv(test_source)
		testX = np.asarray(test[range(1, feature_count)])
		testY = np.asarray(test[[0]]).ravel()

	# print testX
	# print testY
	input_size = len(trainX[0])
	target_size = 1
	print input_size
	print target_size
	# prepare dataset

	ds = SDS( input_size, target_size )
	ds.setField( 'input', trainX )
	ds.setField( 'target', [[item] for item in trainY] )

	# init and train

	net = buildNetwork( input_size, hidden_size, target_size, bias = True )
	trainer = BackpropTrainer(net, ds)

	print "training for {} epochs...".format(epochs)

	for i in range( epochs ):
		mse = trainer.train()
		rmse = sqrt(mse)
		print "training RMSE, epoch {}: {}".format(i + 1, rmse)
开发者ID:junk2112,项目名称:detector,代码行数:54,代码来源:classificator.py

示例4: validate

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
    def validate(self):
        """ The main method of this class. It runs the crossvalidation process
            and returns the validation result (e.g. performance).
        """
        dataset = self._dataset
        trainer = self._trainer
        n_folds = self._n_folds
        l = dataset.getLength()
        inp = dataset.getField("input")
        tar = dataset.getField("target")
        indim = dataset.indim
        outdim = dataset.outdim
        assert l > n_folds

        perms = array_split(permutation(l), n_folds)

        perf = 0.
        for i in range(n_folds):
            # determine train indices
            train_perms_idxs = range(n_folds)
            train_perms_idxs.pop(i)
            temp_list = []
            for train_perms_idx in train_perms_idxs:
                temp_list.append(perms[ train_perms_idx ])
            train_idxs = concatenate(temp_list)

            # determine test indices
            test_idxs = perms[i]

            # train
            #print "training iteration", i
            train_ds = SupervisedDataSet(indim, outdim)
            train_ds.setField("input"  , inp[train_idxs])
            train_ds.setField("target" , tar[train_idxs])
            trainer = copy.deepcopy(self._trainer)
            trainer.setData(train_ds)
            if not self._max_epochs:
                trainer.train
            else:
                trainer.trainEpochs(self._max_epochs)

            # test
            #print "testing iteration", i
            test_ds = SupervisedDataSet(indim, outdim)
            test_ds.setField("input"  , inp[test_idxs])
            test_ds.setField("target" , tar[test_idxs])
#            perf += self.getPerformance( trainer.module, dataset )
            perf += self._calculatePerformance(trainer.module, dataset)

        perf /= n_folds
        return perf
开发者ID:pachkun,项目名称:Machine_learning,代码行数:53,代码来源:validation.py

示例5: train

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def train(
    train,
    label,
    custom_net=None,
    training_mse_threshold=0.40,
    testing_mse_threshold=0.60,
    epoch_threshold=10,
    epochs=100,
    hidden_size=20,
):
    # Test Set.
    x_train = train[0:split_at, :]
    y_train_slice = label.__getslice__(0, split_at)
    y_train = y_train_slice.reshape(-1, 1)
    x_test = train[split_at:, :]
    y_test_slice = label.__getslice__(split_at, label.shape[0])
    y_test = y_test_slice.reshape(-1, 1)

    # Shape.
    input_size = x_train.shape[1]
    target_size = y_train.shape[1]

    # prepare dataset
    ds = SDS(input_size, target_size)
    ds.setField("input", x_train)
    ds.setField("target", y_train)

    # prepare dataset
    ds_test = SDS(input_size, target_size)
    ds_test.setField("input", x_test)
    ds_test.setField("target", y_test)

    min_mse = 1000000

    # init and train
    if custom_net == None:
        net = buildNetwork(input_size, hidden_size, target_size, bias=True)
    else:
        print "Picking up the custom network"
        net = custom_net

    trainer = RPropMinusTrainer(net, dataset=ds, verbose=False, weightdecay=0.01, batchlearning=True)
    print "training for {} epochs...".format(epochs)

    for i in range(epochs):
        mse = trainer.train()
        print "training mse, epoch {}: {}".format(i + 1, math.sqrt(mse))

        p = net.activateOnDataset(ds_test)
        mse = math.sqrt(MSE(y_test, p))
        print "-- testing mse, epoch {}: {}".format(i + 1, mse)
        pickle.dump(net, open("current_run", "wb"))

        if min_mse > mse:
            print "Current minimum found at ", i
            pickle.dump(net, open("current_min_epoch_" + model_file, "wb"))
            min_mse = mse

    pickle.dump(net, open(model_file, "wb"))
    return net
开发者ID:korkam,项目名称:beta_learning-matlab-through-case-studies,代码行数:62,代码来源:PyBrainWithCV.py

示例6: CV_NN

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def CV_NN(X_train, Y, N_CV=1, test_sze=0.3, n_middle = 14):
    hidden_size = n_middle
    sss = cross_validation.StratifiedShuffleSplit(
        Y, N_CV, test_size=test_sze, random_state=0)

    overall_accuracy = 0
    overall_error = 0
    confusion_matrix = np.zeros((7, 7), dtype=np.int)
    for train_block, test_block in sss:
        x_train=X_train.as_matrix()[train_block]
        input_size = x_train.shape[1]
        y_vals = Y[train_block]
        y_train=np.zeros((len(y_vals),7))
        for i,y in enumerate(y_vals):
            y_train[i][y-1]=1
        target_size = y_train.shape[1]
        # print x_train.shape, y_train.shape

        ds = SDS( input_size, target_size)
        ds.setField( 'input', x_train)
        ds.setField( 'target', y_train)
        
        net = buildNetwork( input_size, hidden_size, target_size, bias = True, hiddenclass=SigmoidLayer, outclass=SoftmaxLayer )
        trainer = BackpropTrainer( net, ds, learningrate=0.1, verbose=True)
        trainer.trainUntilConvergence( verbose = False, validationProportion = 0.2, maxEpochs = 64, continueEpochs = 4 )
        trainer = BackpropTrainer( net, ds, learningrate=0.05, verbose=True)
        trainer.trainUntilConvergence( verbose = False, validationProportion = 0.2, maxEpochs = 64, continueEpochs = 8 )
        trainer = BackpropTrainer( net, ds, learningrate=0.01, verbose=True)
        trainer.trainUntilConvergence( verbose = False, validationProportion = 0.2, maxEpochs = 512, continueEpochs = 16 )
        trainer = BackpropTrainer( net, ds, learningrate=0.005, verbose=True)
        trainer.trainUntilConvergence( verbose = False, validationProportion = 0.2, maxEpochs = 1024, continueEpochs = 64 )

        y_vals = Y[test_block]
        y_test=np.zeros((len(y_vals),7))
        for i,y in enumerate(y_vals):
            y_test[i][y-1]=1
        x_test = X_train.as_matrix()[test_block]

        ds = SDS( input_size, target_size)
        ds.setField( 'input', x_test )
        ds.setField( 'target', y_test )

        Y_predict = net.activateOnDataset( ds )
        y_predict=Y_predict.argmax(axis=1)
        y_test=y_vals-1
        accuracy = (y_test == y_predict).mean()
        for x, y in zip(y_test, y_predict):
            confusion_matrix[x - 1, y - 1] += 1
        overall_accuracy += accuracy
        overall_error += accuracy * accuracy
    confusion_matrix *= 1.0 / N_CV
    print confusion_matrix
    overall_accuracy *= 1.0 / N_CV
    overall_error = np.sqrt(
        (overall_error / N_CV - overall_accuracy ** 2) / N_CV)
    print overall_accuracy, overall_error
开发者ID:mcminis1,项目名称:forest-cover,代码行数:58,代码来源:neural_net.py

示例7: predict

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def predict(X, net):
    # Test Set.
    x_test = X[:, :]

    # you'll need labels. In case you don't have them...
    y_test_dummy = np.zeros((X.shape[0], 1))

    input_size = x_test.shape[1]
    target_size = y_test_dummy.shape[1]

    assert (net.indim == input_size)
    assert (net.outdim == target_size)

    # prepare dataset
    ds = SDS(input_size, target_size)
    ds.setField('input', x_test)
    ds.setField('target', y_test_dummy)

    p = net.activateOnDataset(ds)
    print p.shape
    np.savetxt("1_" + output_predictions_file, p, fmt='%.6f')
    s = pd.Series(p[:, 0])
    s.index += 1
    s.to_csv('neural_prediction_3.csv', header=['Prediction'], index=True, index_label='ID')
开发者ID:Gabeesh,项目名称:CS273a-Introduction-to-Machine-Learning,代码行数:26,代码来源:PyBrainImplementation.py

示例8: train_ann

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def train_ann(data_dicts, input_fields, hidden_size, epochs):

	#print "-------------------------------------------------"
	#print "loading data..."
	
	# regresa un ndarray de numpy
	train = dicts_to_np_array(data_dicts, input_fields)

	#print "data loaded to a ", type(train),   " of size: ", train.shape, " and type:", train.dtype
	#print "Spliting inputs and output for training..."

	inputs_train = train[:,2:]
	outputs_train = train[:,:2]
	outputs_train = outputs_train.reshape( -1, 2 )

	#print "inputs in a ", type(inputs_train),   " of size: ", inputs_train.shape, " and type:", inputs_train.dtype
	#print "output in a ", type(outputs_train),   " of size: ", outputs_train.shape, " and type:", outputs_train.dtype

	# Setting up supervised dataset por pyBrain training...
	input_size = inputs_train.shape[1]
	target_size = outputs_train.shape[1]
	dataset = SDS( input_size, target_size )
	dataset.setField( 'input', inputs_train )
	dataset.setField( 'target', outputs_train )
	

	#Setting up network for supervised learning in pyBrain...
	appraisal_network = FeedForwardNetwork()
	inLayer = LinearLayer(input_size)
	hiddenLayer1 = SigmoidLayer(hidden_size)
	outLayer = LinearLayer(target_size)
	appraisal_network.addInputModule(inLayer)
	appraisal_network.addModule(hiddenLayer1)
	appraisal_network.addOutputModule(outLayer)
	in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
	hidden1_to_out = FullConnection(hiddenLayer1, outLayer)
 	appraisal_network.addConnection(in_to_hidden1)
	appraisal_network.addConnection(hidden1_to_out)
	appraisal_network.sortModules()


	trainer = BackpropTrainer( appraisal_network,dataset )

	start_time = time.time()
	rmse_vector = []
	rmse_min = sys.float_info.max
	# training for epochs...
	for i in range( epochs ):
		mse = trainer.train()
		rmse = sqrt( mse )

		# training RMSE 
		rmse_vector.append(rmse)

		if rmse < rmse_min:
			rmse_min = rmse
			#print "training RMSE, epoch {}: {}".format( i + 1, rmse )
		
	elapsed_time = time.time() - start_time

	report_fields_training = {"time_elapsed":elapsed_time, 
						"epochs":epochs,
						"rmse_min":rmse_min,
						"hidden_layers":1,
						"hidden_neurons":hidden_size,
						"input_neurons":input_size,
						"output_neurons":target_size}
	
	return report_fields_training, appraisal_network
开发者ID:sam1017383,项目名称:Recommender-platform,代码行数:71,代码来源:ann_engine.py

示例9: read_X_Y

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
print file_name + ': reading data'
(Xtrn, Xtst, Ytrn, f_out) = read_X_Y(f_in_trn, f_in_tst, sol_dir, my_dim)

# PARAMETERS
hidden_size = 100
epochs = 600
continue_epochs = 10
val_prop = 0.2

# Prepare dataset
print file_name + ': preparing ds'
Ytrn = Ytrn[:,1:]  # Remove ID col
input_size = Xtrn.shape[1]  # ncols
target_size = Ytrn.shape[1]  # ncols
ds = SupervisedDataSet(input_size, target_size)
ds.setField('input', Xtrn)
ds.setField('target', Ytrn)

# Train a network
print file_name + ': training network'
net = buildNetwork(input_size, hidden_size, target_size, bias = True)
trainer = BackpropTrainer(net, ds)
    
trainer.trainUntilConvergence(verbose = True, validationProportion = val_prop,
                              maxEpochs = epochs, continueEpochs = continue_epochs)

# Save model
print file_name + ': saving model'
pickle.dump(net, open(f_out_model, 'wb'))

# Predict on test data, save to file
开发者ID:mdelhey,项目名称:kaggle-galaxy,代码行数:33,代码来源:brain.py

示例10: SDS

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
model_file = 'model.pkl'
output_predictions_file = 'predictions.txt'

X2 = pd.read_csv('Test/Test_Combine.csv', usecols=[
                 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y2 = pd.read_csv('Test/Test_Combine.csv', usecols=['PM 2.5'])

X2 = X2.values
Y2 = Y2.values
net = pickle.load(open(model_file, 'rb'))

y_test_dummy = np.zeros(Y2.shape)

input_size = X2.shape[1]
target_size = X2.shape[1]

ds = SDS(input_size, target_size)
ds.setField('input', X2)
ds.setField('target', y_test_dummy)

p = net.activateOnDataset(ds)

mse = MSE(Y2, p)
rmse = sqrt(mse)

print "testing RMSE:", rmse
print "testing MSE: ", mse

main(Y2, p)
np.savetxt(output_predictions_file, p, fmt='%.6f')
开发者ID:alyakhtar,项目名称:AQI-Delhi,代码行数:32,代码来源:NNRegPred.py

示例11: train_ann_multihidden

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def train_ann_multihidden(data_dicts, input_fields, layers, hidden_size, epochs):

	print "-------------------------------------------------"
 	print "loading data..."
 	# regresa un ndarray de numpy
 	train = dicts_to_np_array(data_dicts, input_fields)

 	print "data loaded to a ", type(train),   " of size: ", train.shape, " and type:", train.dtype
 	print "Spliting inputs and output for training..."

 	inputs_train = train[:,2:]
 	outputs_train = train[:,:2]
 	outputs_train = outputs_train.reshape( -1, 2 )


 	print "inputs in a ", type(inputs_train),   " of size: ", inputs_train.shape, " and type:", inputs_train.dtype
 	print "output in a ", type(outputs_train),   " of size: ", outputs_train.shape, " and type:", outputs_train.dtype
 	print "-------------------------------------------------"

 	print "primeros vectores de inputs: ", inputs_train[0:2,:]

 	print "primeros vectores de outputs: ", outputs_train[0:2,:]


 	print "Setting up supervised dataset por pyBrain training..."
 	input_size = inputs_train.shape[1]
 	target_size = outputs_train.shape[1]
 	dataset = SDS( input_size, target_size )
 	dataset.setField( 'input', inputs_train )
 	dataset.setField( 'target', outputs_train )
 	print "-------------------------------------------------"

	
	print "Setting up network for supervised learning in pyBrain..."

 	appraisal_network = FeedForwardNetwork()
 	inLayer = LinearLayer(input_size)
 	hiddenLayer1 = SigmoidLayer(hidden_size)
 	hiddenLayer2 = SigmoidLayer(hidden_size//2)
 	outLayer = LinearLayer(target_size)
 	appraisal_network.addInputModule(inLayer)
 	appraisal_network.addModule(hiddenLayer1)
 	appraisal_network.addModule(hiddenLayer2)
 	appraisal_network.addOutputModule(outLayer)
 	in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
 	hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
 	hidden2_to_out = FullConnection(hiddenLayer2, outLayer)
 	appraisal_network.addConnection(in_to_hidden1)
 	appraisal_network.addConnection(hidden1_to_hidden2)
 	appraisal_network.addConnection(hidden2_to_out)
 	appraisal_network.sortModules()


 	trainer = BackpropTrainer( appraisal_network,dataset )

 	print "-------------------------------------------------"

 	start_time = time.time()
 	rmse_vector = []
 	rmse_min = sys.float_info.max
 	#print "training for {} epochs...".format( epochs )
 	for i in range( epochs ):
 		mse = trainer.train()
 		rmse = sqrt( mse )
 		print "training RMSE, epoch {}: {}".format( i + 1, rmse )
 		rmse_vector.append(rmse)
 		if rmse < rmse_min:
 			rmse_min = rmse
 	#print "-------------------------------------------------"
 	elapsed_time = time.time() - start_time

# 	pickle.dump( crime_ann, open( output_model_file, 'wb' ))

 	#print "Training done!"
 	#print "-------------------------------------------------"

# 	return rmse_vector
	
	return {"time_elapsed":elapsed_time, 
			"epochs:":epochs,
			"rmse_vector":rmse_vector,
			"rmse_min":rmse_min,
			"hidden_layers":1,
			"hidden_neurons":hidden_size
			}, appraisal_network
开发者ID:sam1017383,项目名称:Recommender-platform,代码行数:87,代码来源:ann_engine.py

示例12: train_4_hidden

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def train_4_hidden():

	print "-------------------------------------------------"
	print "loading data..."
	print "file to be loaded: ", train_file

	# regresa un ndarray de numpy
	train = np.loadtxt( train_file, delimiter = ',' )

	print "data loaded to a ", type(train),   " of size: ", train.shape, " and type:", train.dtype
	print "Spliting inputs and output for training..."

	inputs_train = train[:,0:-1]
	output_train = train[:,-1]
	output_train = output_train.reshape( -1, 1 )


	print "inputs in a ", type(inputs_train),   " of size: ", inputs_train.shape, " and type:", inputs_train.dtype
	print "output in a ", type(output_train),   " of size: ", output_train.shape, " and type:", output_train.dtype
	print "-------------------------------------------------"



	print "Setting up supervised dataset por pyBrain training..."
	input_size = inputs_train.shape[1]
	target_size = output_train.shape[1]
	dataset = SDS( input_size, target_size )
	dataset.setField( 'input', inputs_train )
	dataset.setField( 'target', output_train )
	print "-------------------------------------------------"



	print "Setting up network for supervised learning in pyBrain..."
	
	#crime_network = buildNetwork( input_size, hidden_size, target_size, bias = True, hiddenclass = SigmoidLayer, outclass = LinearLayer )
	
	


	crime_ann = FeedForwardNetwork()

	inLayer = LinearLayer(input_size)
	hiddenLayer1 = TanhLayer(hidden_size)
	hiddenLayer2 = TanhLayer(hidden_size)
	hiddenLayer3 = TanhLayer(hidden_size)
	hiddenLayer4 = TanhLayer(hidden_size)
	outLayer = LinearLayer(target_size)
	crime_ann.addInputModule(inLayer)
	crime_ann.addModule(hiddenLayer1)
	crime_ann.addModule(hiddenLayer2)
	crime_ann.addModule(hiddenLayer3)
	crime_ann.addModule(hiddenLayer4)
	crime_ann.addOutputModule(outLayer)
	in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
	hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
	hidden2_to_hidden3 = FullConnection(hiddenLayer2, hiddenLayer3)
	hidden3_to_hidden4 = FullConnection(hiddenLayer3, hiddenLayer4)
	hidden4_to_out = FullConnection(hiddenLayer4, outLayer)
	crime_ann.addConnection(in_to_hidden1)
	crime_ann.addConnection(hidden1_to_hidden2)
	crime_ann.addConnection(hidden2_to_hidden3)
	crime_ann.addConnection(hidden3_to_hidden4)
	crime_ann.addConnection(hidden4_to_out)
	crime_ann.sortModules()


	trainer = BackpropTrainer( crime_ann,dataset )

	print "-------------------------------------------------"


	rmse_vector = []
	print "training for {} epochs...".format( epochs )
	for i in range( epochs ):
		mse = trainer.train()
		rmse = sqrt( mse )
		print "training RMSE, epoch {}: {}".format( i + 1, rmse )
		rmse_vector.append(rmse)

	print "-------------------------------------------------"
	
	pickle.dump( crime_ann, open( output_model_file, 'wb' ))

	print "Training done!"
	print "-------------------------------------------------"

	return rmse_vector
开发者ID:sam1017383,项目名称:Recommender-platform,代码行数:90,代码来源:crime_test.py

示例13: SDS

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
X = pd.read_csv('Train/Train_Combine.csv', usecols=[
                'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])
Y = pd.read_csv('Train/Train_Combine.csv', usecols=['PM 2.5'])

X = X.values
Y = Y.values

hidden_size = 100
epochs = 600

input_size = X.shape[1]
target_size = Y.shape[1]

ds = SDS(input_size, target_size)
ds.setField('input', X)
ds.setField('target', Y)

net = buildNetwork(
    input_size, hidden_size, target_size, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)

print "training for {} epochs...".format(epochs)

for i in range(epochs):
    mse = trainer.train()
    rmse = sqrt(mse)
    print "training RMSE, epoch {}: {}".format(i + 1, rmse)

pickle.dump(net, open(output_model_file, 'wb'))
开发者ID:alyakhtar,项目名称:AQI-Delhi,代码行数:31,代码来源:NNRegTrain.py

示例14: open

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
true_positive = 0
false_positive = 0
true_negative = 0
false_negative = 0

# load model
net = pickle.load( open(var.output_model_file, 'rb' ))
#load data
test = np.loadtxt( var.test_file, delimiter = ',' )
input_data = test[:,0:-1]
target_data = test[:,-1]
target_data = target_data.reshape( -1, 1 )
#print input_data,target_data
# prepare dataset
ds = SDS( var.no_of_clusters, var.output )
ds.setField( 'input', input_data )
ds.setField( 'target', target_data )
#activate network
predict_list = net.activateOnDataset(ds)	
for predict,ground_truth in zip(predict_list,target_data):
	if predict <= 0.0:
			if ground_truth <= 0 : true_negative += 1
			else: false_negative += 1
			print "Pedicted: NOT Car"
	else : 
		if ground_truth <= 0 : false_positive += 1
		else: true_positive += 1
		print "Predicted: Car"
#print true_positive,true_negative,false_positive,false_negative
precision =  true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
开发者ID:ojuneja,项目名称:image_classification,代码行数:33,代码来源:testdata.py

示例15: FitNeuralNetworkDeptAnimate

# 需要导入模块: from pybrain.datasets.supervised import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.supervised.SupervisedDataSet import setField [as 别名]
def FitNeuralNetworkDeptAnimate(dept = 1, num = 1000):

	train_file = input_file_path + train_file_name[0] + str(dept) + train_file_name[1]
	test_file = input_file_path + test_file_name[0] + str(dept) + test_file_name[1]

	train = np.loadtxt( train_file, delimiter = ' ' )
	test = np.loadtxt( test_file, delimiter = ' ' )
	print len(train)
	x_train = train[0:num, 0 : -1]
	y_train = train[0:num, -1]

	y_max = max(y_train)
	y_min = min(y_train)
	y_train = (y_train - y_min) / (y_max-y_min)
	y_train = y_train.reshape(-1,1)

	input_size = x_train.shape[1]
	target_size = y_train.shape[1]

	x_test = test[0:num/4, 0 : -1]
	y_test = test[0:num/4, -1]
	y_test = y_test.reshape(-1,1)

	
	ds_test = SDS( input_size, target_size )
	ds_test.setField( 'input', x_test )
	ds_test.setField( 'target', y_test )

	ds = SDS( input_size, target_size )
	ds.setField( 'input', x_train )
	ds.setField( 'target', y_train )


	hidden_size = input_size*hidden_size_ratio


	n = RecurrentNetwork()


	n.addInputModule(LinearLayer(input_size, name='in'))
	n.addModule(BiasUnit('bias'))
	for i in range(0, num_hidden_layer+1):
		hidden_name = 'hidden'+str(i)
		n.addModule(SigmoidLayer(hidden_size, name=hidden_name))
	n.addOutputModule(LinearLayer(target_size, name='out'))

	n.addConnection(FullConnection(n['in'], n['hidden0'], name='c1'))
	next_hidden = 'hidden0'

	for i in range(0,num_hidden_layer ):
		current_hidden = 'hidden'+str(i)
		next_hidden = 'hidden'+str(i+1)
		n.addConnection(FullConnection(n[current_hidden], n[next_hidden], name='c'+str(i+2)))

	n.addConnection(FullConnection(n[next_hidden], n['out'], name='c'+str(num_hidden_layer+2)))

	n.addConnection(FullConnection(n['bias'], n['hidden0'], name='c'+str(num_hidden_layer+7)))


	n.sortModules()
	print n


	trainer = BackpropTrainer(n,ds ,weightdecay=weightdecay, learningrate=learningrate, lrdecay=1.0, momentum = momentum)
	
	
	plt.ion()
	fig = plt.figure()
	ax = fig.add_subplot(111)

	plt.annotate("Dept1", (10,-15000))
	plt.annotate("Dept2", (180,-30000))
	plt.annotate("Dept3", (300,-15000))
	plt.annotate("Dept4", (450,-30000))
	plt.annotate("Dept5", (600,-15000))
	plt.annotate("Dept6", (700,-30000))
	plt.annotate("Dept7", (900,-15000))
	
	line1, = ax.plot([],[],'-b',label='train')
	line2, = ax.plot([],[],'-r',label='test')
	ax.legend()

	dummy = raw_input("Plot the graph?")

	for i in range(epochs):
		error = trainer.train()
		print "Epoch: %d, Error: %7.4f" % (i, error)


		p_train = n.activateOnDataset( ds )
		p_test = n.activateOnDataset( ds_test )
		plot_result = np.vstack((p_train*(y_max-y_min) + y_min, p_test*(y_max-y_min) + y_min ))


		p_test_print = p_test.reshape(-1,len(p_test))
		p_test_print = p_test_print*(y_max-y_min) + y_min

		line1.set_ydata(y_train*(y_max-y_min) + y_min)
		line1.set_xdata(range(len(y_train)))
		line2.set_ydata(plot_result)
#.........这里部分代码省略.........
开发者ID:pgnepal,项目名称:Walmart-Forecasting-NN,代码行数:103,代码来源:FitNNforWalmartCrossDeptAnimation.py


注:本文中的pybrain.datasets.supervised.SupervisedDataSet.setField方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。