当前位置: 首页>>代码示例>>Python>>正文


Python MinMaxScaler.inverse_transform方法代码示例

本文整理汇总了Python中sklearn.preprocessing.MinMaxScaler.inverse_transform方法的典型用法代码示例。如果您正苦于以下问题:Python MinMaxScaler.inverse_transform方法的具体用法?Python MinMaxScaler.inverse_transform怎么用?Python MinMaxScaler.inverse_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.preprocessing.MinMaxScaler的用法示例。


在下文中一共展示了MinMaxScaler.inverse_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_min_max_scaler_iris

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def test_min_max_scaler_iris():
    X = iris.data
    scaler = MinMaxScaler()
    # default params
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(X_trans.min(axis=0), 0)
    assert_array_almost_equal(X_trans.min(axis=0), 0)
    assert_array_almost_equal(X_trans.max(axis=0), 1)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)

    # not default params: min=1, max=2
    scaler = MinMaxScaler(feature_range=(1, 2))
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(X_trans.min(axis=0), 1)
    assert_array_almost_equal(X_trans.max(axis=0), 2)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)

    # min=-.5, max=.6
    scaler = MinMaxScaler(feature_range=(-.5, .6))
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(X_trans.min(axis=0), -.5)
    assert_array_almost_equal(X_trans.max(axis=0), .6)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)

    # raises on invalid range
    scaler = MinMaxScaler(feature_range=(2, 1))
    assert_raises(ValueError, scaler.fit, X)
开发者ID:abouaziz,项目名称:scikit-learn,代码行数:32,代码来源:test_preprocessing.py

示例2: predict

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def predict():
    # normalization
    scaler = MinMaxScaler()
    records['power'] = scaler.fit_transform(records['power'])

    saver = tf.train.Saver()
    with tf.Session() as sess:
        # restore model
        saver.restore(sess, model_path)
        test_data = get_cycle_time_batch_data(records, batch_size, cycle_timesteps)
        test_y_list = [] 
        test_y_pre_list = []
        test_all_loss = []
        for batch in test_data:
            predict, loss = sess.run([y_pre, loss_func], feed_dict = {cycle_model.X_ : batch[0], y : batch[1]})
            test_y_list.extend(batch[1])
            test_y_pre_list.extend(predict)
            test_all_loss.append(loss)

        # display
        test_x = list(range(len(test_y_list)))

        # inverse normalization
        test_y_list = scaler.inverse_transform(test_y_list)
        test_y_pre_list = scaler.inverse_transform(test_y_pre_list)
        test_y_list = np.array(test_y_list)
        test_y_pre_list = np.array(test_y_pre_list)

        mse = np.mean( (test_y_list - test_y_pre_list) ** 2)
        print('---------------- Test Loss:', np.mean(test_all_loss), 'MSE:', mse)
        plt.plot(test_x, test_y_list, 'r', test_x, test_y_pre_list, 'b')
        plt.show()
开发者ID:Maple728,项目名称:ChargeTask,代码行数:34,代码来源:process+-+3.py

示例3: predict_new

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
 def predict_new(self, input):
     model = self.train_model()
     assert len(input) == 5 and type(input) == list
     scaler = MinMaxScaler(feature_range=(0, 1))
     scaler.fit(self.data)
     inp = scaler.transform([input])
     print(scaler.inverse_transform(model.predict(numpy.array(inp).reshape(1, 1, 5))))
开发者ID:at553,项目名称:golden_touch,代码行数:9,代码来源:predict.py

示例4: sample_from_generator

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def sample_from_generator(history, nb_samples, latent_dim=12, 
                          valid_split=0.3, random_split=True,
                          hidden_dims=None, **kwargs):
    scaler = MinMaxScaler()
    scaler.fit(history)
    scaled = scaler.transform(history)
    
    nb_train = history.shape[0]    
    if not valid_split:
        nb_valid = 0
    elif isinstance(valid_split, float):
        nb_valid = nb_train - int(np.floor(nb_train*valid_split))
    else:
        nb_valid = valid_split
        
    if nb_valid > 0:
        if random_split:
            ind = np.arange(nb_train)
            np.random.shuffle(ind)
            x_valid = scaled[ind[-nb_valid:], :]
            x_train = scaled[ind[:-nb_valid], :]
        else:
            x_valid = scaled[-nb_valid:, :]
            x_train = scaled[:-nb_valid, :]
    else:
        x_valid = None
        x_train = scaled
    
    _, generator = build_model(latent_dim, x_train, x_valid=x_valid, 
                               hidden_dims=hidden_dims, **kwargs)
    
    normal_sample = np.random.standard_normal((nb_samples, latent_dim))
    draws = generator.predict(normal_sample)
    return scaler.inverse_transform(draws)
开发者ID:Andres-Hernandez,项目名称:CalibrationNN,代码行数:36,代码来源:variational_autoencoder.py

示例5: test_min_max_scaler_zero_variance_features

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def test_min_max_scaler_zero_variance_features():
    """Check min max scaler on toy data with zero variance features"""
    X = [[0.,  1.,  0.5],
         [0.,  1., -0.1],
         [0.,  1.,  1.1]]

    X_new = [[+0.,  2.,  0.5],
             [-1.,  1.,  0.0],
             [+0.,  1.,  1.5]]

    # default params
    scaler = MinMaxScaler()
    X_trans = scaler.fit_transform(X)
    X_expected_0_1 = [[0.,  0.,  0.5],
                      [0.,  0.,  0.0],
                      [0.,  0.,  1.0]]
    assert_array_almost_equal(X_trans, X_expected_0_1)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)

    X_trans_new = scaler.transform(X_new)
    X_expected_0_1_new = [[+0.,  1.,  0.500],
                          [-1.,  0.,  0.083],
                          [+0.,  0.,  1.333]]
    assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)

    # not default params
    scaler = MinMaxScaler(feature_range=(1, 2))
    X_trans = scaler.fit_transform(X)
    X_expected_1_2 = [[1.,  1.,  1.5],
                      [1.,  1.,  1.0],
                      [1.,  1.,  2.0]]
    assert_array_almost_equal(X_trans, X_expected_1_2)
开发者ID:Big-Data,项目名称:scikit-learn,代码行数:35,代码来源:test_preprocessing.py

示例6: one_input_LSTM

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def one_input_LSTM(dataset):
	# normalize the dataset
	scaler = MinMaxScaler(feature_range=(0, 1))
	dataset = scaler.fit_transform(dataset)
	# split into train and test sets
	train_size = int(len(dataset) * 0.67)
	test_size = len(dataset) - train_size
	train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
	# reshape into X=t and Y=t+1
	look_back = 1
	trainX, trainY = create_dataset(train, look_back)
	testX, testY = create_dataset(test, look_back)
	# reshape input to be [samples, time steps, features]
	trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
	testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
	# create and fit the LSTM network
	model = Sequential()
	model.add(LSTM(4, input_shape=(1, look_back)))
	model.add(Dense(1))
	model.compile(loss='mean_squared_error', optimizer='adam')
	model.fit(trainX, trainY, epochs=20, batch_size=1, verbose=2)
	# make predictions
	trainPredict = model.predict(trainX)
	testPredict = model.predict(testX)
	# invert predictions
	trainPredict = scaler.inverse_transform(trainPredict)
	trainY = scaler.inverse_transform([trainY])
	testPredict = scaler.inverse_transform(testPredict)
	testY = scaler.inverse_transform([testY])
	# calculate root mean squared error
	trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
	print('Train Score: %.2f RMSE' % (trainScore))
	testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
	print('Test Score: %.2f RMSE' % (testScore))
	# shift train predictions for plotting
	trainPredictPlot = np.empty_like(dataset)
	trainPredictPlot[:, :] = np.nan
	trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
	# shift test predictions for plotting
	testPredictPlot = np.empty_like(dataset)
	testPredictPlot[:, :] = np.nan
	testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
开发者ID:yennanliu,项目名称:analysis,代码行数:44,代码来源:run_zone_itime_LSTM_dev.py

示例7: __init__

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
class SerialDataScaler:
    
    def __init__(self, data):
        data = numpy.reshape(data, (len(data), 1))
        data = data.astype("float32")
        self.scaler = MinMaxScaler(feature_range=(0, 1))
        self.scaler.fit(data)
    
    def transform(self, X):
        #return X
        return self.scaler.transform(numpy.reshape(X, (len(X), 1)))

    def inverse_transform(self, x):
        return self.scaler.inverse_transform(x)
开发者ID:ericsolo,项目名称:python,代码行数:16,代码来源:DataPrepare.py

示例8: get_net_prediction

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def get_net_prediction( train_data, train_truth, test_data, test_truth
                      , hidden=(5,), weight_decay=0.0, dropout_prob=0.0
                      , learning_rate=None, epochs=25, verbose=False
                      , iter_id=None
                      ):

    container = NeuralNetContainer()
    container.learning_rate = learning_rate
    container.dropout_prob = dropout_prob
    container.weight_decay = weight_decay
    container.epochs = epochs
    container.hidden_layers = hidden
    container.verbose = verbose
    container.plot = get_should_plot()

    mms = MinMaxScaler(feature_range= (-1, 1)) # Scale output from -1 to 1.
    train_y = mms.fit_transform(train_truth[:,np.newaxis])

    n_features = train_data.shape[1]

    collect_time_stats = get_is_time_stats()
    if collect_time_stats: 
        start = time.time()

    # Find and return an effectively initialized network to start.
    container = _get_initial_net(container, n_features, train_data, train_y)

    # Train the network.
    if collect_time_stats:
        # Train a specific time, never terminating early.
        _train_net(container, train_data, train_y, override_epochs=TIMING_EPOCHS, is_check_train=False)
    else: 
        # Normal training, enable all heuristics.
        _train_net(container, train_data, train_y)

    if collect_time_stats: 
        end = time.time()
        print('Fitting took {} seconds'.format(end - start))
        print(json.dumps({'seconds': end - start, 'hidden': container.hidden_layers}))

    # Unsupervised (test) dataset.
    predicted = _predict(container, test_data)
    predicted = mms.inverse_transform(predicted)
    
    return predicted.ravel()
开发者ID:rileymcdowell,项目名称:genomic-neuralnet,代码行数:47,代码来源:generic_keras_net.py

示例9: get_fast_nn_dom_prediction

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def get_fast_nn_dom_prediction(train_data, train_truth, test_data, test_truth, hidden=(5,), weight_decay=0.0): 
    # Convert data to individual alleles to capture dominance.
    train_data, test_data = tuple(map(_convert_to_individual_alleles, [train_data, test_data]))

    scaler = MinMaxScaler(feature_range = (-1, 1))
    train_truth = scaler.fit_transform(train_truth)
    test_truth = scaler.transform(test_truth)

    net = _get_nn(train_data.shape[1], hidden)

    _train_nn(net, train_data, train_truth, weight_decay)

    out = []
    for i in range(test_data.shape[0]):
        sample = test_data[i,:]
        res = net.run(sample)
        out.append(res)

    predicted = scaler.inverse_transform(np.array(out))

    return predicted.ravel()
开发者ID:rileymcdowell,项目名称:genomic-neuralnet,代码行数:23,代码来源:neural_net_dominance_fast.py

示例10: print

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
# verify
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('loss func')
plt.ylabel('loss/val_loss')
plt.xlabel('epochs')
plt.legend(['train', 'test'], loc='upper right')
plt.show()

scores = model.evaluate(tX_test, ty_test)
print('\n%s: %.3f' % (model.metrics_names, scores))

# Step 4: test

y_pred = model.predict(tX_test)
y_pred = y_scaler.inverse_transform(y_pred)
print('y_pred: \n', y_pred[:5])

import pandas as pd
dfy = pd.DataFrame({'Truth class': y_test[...,0],
                  'Pred class': y_pred[...,0]})

long_dfy = pd.melt(dfy, value_vars=['Truth class', 'Pred class'])

from plotnine import *
(ggplot(long_dfy, aes(x='value', color='variable', fill='variable'))
  + geom_density(alpha=0.5)
  + theme_bw())
开发者ID:nsbinhan,项目名称:keras_for_beginner,代码行数:31,代码来源:regression_estimate_body_fat.py

示例11: Sequential

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
prediction. The default sigmoid activation function is used for the
LSTM blocks. The network is trained for 100 epochs and a batch size of
1 is used."""

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_dim=look_back))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)

# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print(trainY[0])
print(trainPredict[:,0])
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print(testY[0])
print(testPredict[:,0])
print('Test Score: %.2f RMSE' % (testScore))

# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
开发者ID:khushhallchandra,项目名称:Deep-Learning,代码行数:33,代码来源:run.py

示例12: print

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
    # Training is now complete!

    # Get the final accuracy scores by running the "cost" operation on the training and test data sets
    final_training_cost = session.run(cost, feed_dict={X: X_scaled_training, Y: Y_scaled_training})
    final_testing_cost = session.run(cost, feed_dict={X: X_scaled_testing, Y: Y_scaled_testing})

    print("Final Training cost: {}".format(final_training_cost))
    print("Final Testing cost: {}".format(final_testing_cost))

    # Now that the neural network is trained, let's use it to make predictions for our test data.
    # Pass in the X testing data and run the "prediciton" operation
    Y_predicted_scaled = session.run(prediction, feed_dict={X: X_scaled_testing})

    # Unscale the data back to it's original units (dollars)
    Y_predicted = Y_scaler.inverse_transform(Y_predicted_scaled)

    real_earnings = test_data_df['total_earnings'].values[0]
    predicted_earnings = Y_predicted[0][0]

    print("The actual earnings of Game #1 were ${}".format(real_earnings))
    print("Our neural network predicted earnings of ${}".format(predicted_earnings))

    model_builder = tf.saved_model.builder.SavedModelBuilder("exported_model")

    inputs = {
        'input': tf.saved_model.utils.build_tensor_info(X)
        }
    outputs = {
        'earnings': tf.saved_model.utils.build_tensor_info(prediction)
        }
开发者ID:kireetigupta,项目名称:tensorflowtut,代码行数:32,代码来源:export_model_for_cloud+final.py

示例13: Sequential

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(6, input_dim=look_back))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)


# Estimate model performance
trainScore = model.evaluate(trainX, trainY, verbose=0)
print('Train Score: ', scaler.inverse_transform(numpy.array([[trainScore]])))
testScore = model.evaluate(testX, testY, verbose=0)
print('Test Score: ', scaler.inverse_transform(numpy.array([[testScore]])))

# generate predictions for training
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)

#print(scaler.inverse_transform(testPredict))

# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict

# shift test predictions for plotting
开发者ID:at553,项目名称:golden_touch,代码行数:32,代码来源:test_model.py

示例14: main

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
def main():
    # fix random seed for reproducibility
    numpy.random.seed(7)

    # Get CLI arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--gpus',
        help='Number of GPUs to use.',
        type=int, default=1)
    args = parser.parse_args()
    gpus = args.gpus

    # load the dataset
    dataframe = DataFrame(
        [0.00000, 5.99000, 11.92016, 17.73121, 23.36510, 28.76553, 33.87855,
         38.65306, 43.04137, 46.99961, 50.48826, 53.47244, 55.92235, 57.81349,
         59.12698, 59.84970, 59.97442, 59.49989, 58.43086, 56.77801, 54.55785,
         51.79256, 48.50978, 44.74231, 40.52779, 35.90833, 30.93008, 25.64279,
         20.09929, 14.35496, 8.46720, 2.49484, -3.50245, -9.46474, -15.33247,
         -21.04699, -26.55123, -31.79017, -36.71147, -41.26597, -45.40815,
         -49.09663, -52.29455, -54.96996, -57.09612, -58.65181, -59.62146,
         -59.99540, -59.76988, -58.94716, -57.53546, -55.54888, -53.00728,
         -49.93605, -46.36587, -42.33242, -37.87600, -33.04113, -27.87613,
         -22.43260, -16.76493, -10.92975, -4.98536, 1.00883, 6.99295, 12.90720,
         18.69248, 24.29100, 29.64680, 34.70639, 39.41920, 43.73814, 47.62007,
         51.02620, 53.92249, 56.28000, 58.07518, 59.29009, 59.91260, 59.93648,
         59.36149, 58.19339, 56.44383, 54.13031, 51.27593, 47.90923, 44.06383,
         39.77815, 35.09503, 30.06125, 24.72711, 19.14590, 13.37339, 7.46727,
         1.48653, -4.50907, -10.45961, -16.30564, -21.98875, -27.45215,
         -32.64127, -37.50424, -41.99248, -46.06115, -49.66959, -52.78175,
         -55.36653, -57.39810, -58.85617, -59.72618, -59.99941, -59.67316,
         -58.75066, -57.24115, -55.15971, -52.52713, -49.36972, -45.71902,
         -41.61151, -37.08823, -32.19438, -26.97885, -21.49376, -15.79391,
         -9.93625, -3.97931, 2.01738, 7.99392, 13.89059, 19.64847, 25.21002,
         30.51969, 35.52441, 40.17419, 44.42255, 48.22707, 51.54971, 54.35728,
         56.62174, 58.32045, 59.43644, 59.95856, 59.88160, 59.20632, 57.93947,
         56.09370, 53.68747, 50.74481, 47.29512, 43.37288, 39.01727, 34.27181,
         29.18392, 23.80443, 18.18710, 12.38805, 6.46522, 0.47779, -5.51441,
         -11.45151])
    dataset = dataframe.values
    dataset = dataset.astype('float32')

    # normalize the dataset
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset)

    # split into train and test sets
    train_size = int(len(dataset) * 0.67)
    test_size = len(dataset) - train_size
    train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]

    # reshape into X=t and Y=t+1
    look_back = 1
    trainX, trainY = create_dataset(train, look_back)
    testX, testY = create_dataset(test, look_back)

    # reshape input to be [samples, time steps, features]
    trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

    # Create layers for model
    x_tensor = Input(shape=(1, look_back))
    layer_1 = LSTM(4)(x_tensor)
    y_tensor = Dense(1)(layer_1)

    # Create and fit the LSTM network
    with tf.device('/cpu:0'):
        serial_model = Model(inputs=x_tensor, outputs=y_tensor)

    # Modify model for GPUs if necessary
    if gpus == 1:
        parallel_model = serial_model
    else:
        parallel_model = multi_gpu_model(
            serial_model,
            cpu_relocation=True,
            gpus=gpus)
    parallel_model.compile(
        loss='mean_squared_error', optimizer='adam')
    parallel_model.fit(
        trainX, trainY,
        epochs=100,
        batch_size=int(dataset.size * gpus / 20),
        verbose=2)

    # make predictions
    if gpus == 1:
        trainPredict = parallel_model.predict(trainX)
        testPredict = parallel_model.predict(testX)
    else:
        trainPredict = serial_model.predict(trainX)
        testPredict = serial_model.predict(testX)

    # invert predictions
    trainPredict = scaler.inverse_transform(trainPredict)
    trainY = scaler.inverse_transform([trainY])
    testPredict = scaler.inverse_transform(testPredict)
    testY = scaler.inverse_transform([testY])

#.........这里部分代码省略.........
开发者ID:palisadoes,项目名称:AI,代码行数:103,代码来源:test-nosequential.py

示例15: MinMaxScaler

# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import inverse_transform [as 别名]
dataset = df.values
dataset = dataset.astype('float32')

# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

#prepare the X and Y label
X,y = create_dataset(dataset, 1)

print X.shape, 'XXXXX', y.shape

#Take 80% of data as the training sample and 20% as testing sample
trainX, testX, trainY, testY = train_test_split(X, y, test_size=0.20, shuffle=False)

print scaler.inverse_transform(trainX[:10])
print len(testY), 'testY length'

# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
print trainX[:3],trainX.shape, 'ttttttttttX'
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history=model.fit(trainX, trainY, nb_epoch=5, batch_size=1, validation_data=(testX, testY), verbose=2)

plt.plot(history.history['loss'], label='train')
开发者ID:fzhurd,项目名称:fzwork,代码行数:33,代码来源:predict_bitcoins_price_v2d.py


注:本文中的sklearn.preprocessing.MinMaxScaler.inverse_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。