本文整理匯總了Python中load_data.load_data方法的典型用法代碼示例。如果您正苦於以下問題:Python load_data.load_data方法的具體用法?Python load_data.load_data怎麽用?Python load_data.load_data使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類load_data
的用法示例。
在下文中一共展示了load_data.load_data方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: import load_data [as 別名]
# 或者: from load_data import load_data [as 別名]
def main():
data_path = "./Data/GasPrice.csv"
P = 12 #sequence length
step = 1 #ahead predict steps
X_train,Y_train,X_test,Y_test,data_df_combined_clean = load_data(data_path,P=P,step=step)
print(X_train.shape)
print(Y_train.shape)
model = Wavelet_LSTM(P,32,1)
model = model.double()
train(model,X_train,Y_train,epochs=20)
test(model,X_test,Y_test,data_df_combined_clean)
示例2: data
# 需要導入模塊: import load_data [as 別名]
# 或者: from load_data import load_data [as 別名]
def data(name, mode='default', sep=',', delimiter=None, header='infer'):
'''Function for loading one of the Autonomio dataset.
OPTIONS: Either set mode to 'file' or use name without mode parameter.
FILENAMES:
'election_in_twitter'
Dataset consisting of 10 minute samples of 80 million tweets.
'tweet_sentiment'
Dataset with tweet text classified for sentiment using NLTK Vader.
'sites_category_and_vec'
4,000 sites with word vectors and 5 categories.
'programmatic_ad_fraud'
Data from both buy and sell side and over 10 other sources.
'parties_and_employment'
9 years of monthly poll and unemployment numbers.
'random_tweets'
20,000 tweets main intended for.
'''
out = load_data(name, mode, sep, delimiter, header)
return out
示例3: build_model
# 需要導入模塊: import load_data [as 別名]
# 或者: from load_data import load_data [as 別名]
def build_model(self, config, train):
if train:
tfrecord_list = glob(os.path.join(config.dataset, '**', '*.tfrecords'), recursive=True)
assert (tfrecord_list)
shuffle(tfrecord_list)
print('\n\n====================\ntfrecords list:')
[print(f) for f in tfrecord_list]
print('====================\n\n')
with tf.device('/cpu:0'):
filename_queue = tf.train.string_input_producer(tfrecord_list)
self.in_LDRs, self.in_HDRs, self.ref_LDRs, self.ref_HDR, _, _ = load_data(filename_queue, config)
self.G_HDR = self.generator(self.in_LDRs,self.in_HDRs, train=train)
self.G_tonemapped = tonemap(self.G_HDR)
self.G_sum = tf.summary.image("G", self.G_tonemapped)
# l2 loss
self.g_loss = tf.reduce_mean((self.G_tonemapped - tonemap(self.ref_HDR))**2) # after tonemapping
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
t_vars = tf.trainable_variables()
self.g_vars = [var for var in t_vars if 'g_' in var.name]
with tf.device('/cpu:0'):
sample_tfrecord_list = glob(os.path.join(
'./dataset/tf_records', '**', '*.tfrecords'), recursive=True)
shuffle(sample_tfrecord_list)
filename_queue_sample = tf.train.string_input_producer(sample_tfrecord_list)
self.in_LDRs_sample, self.in_HDRs_sample, self.ref_LDRs_sample, self.ref_HDR_sample, _, _ = \
load_data(filename_queue_sample, config)
self.sampler_HDR = self.generator(self.in_LDRs_sample, self.in_HDRs_sample, train=False, reuse = True)
self.sampler_tonemapped = tonemap(self.sampler_HDR)
# testing
else:
self.in_LDRs_sample = tf.placeholder(
tf.float32, [self.batch_size, config.test_h, config.test_w, self.c_dim*self.num_shots], name='input_LDR_sample')
self.in_HDRs_sample = tf.placeholder(
tf.float32, [self.batch_size, config.test_h, config.test_w, self.c_dim*self.num_shots], name='input_HDR_sample')
self.sampler_HDR = self.generator(self.in_LDRs_sample, self.in_HDRs_sample, train=False, free_size=True)
self.sampler_tonemapped = tonemap(self.sampler_HDR)
self.saver = tf.train.Saver(max_to_keep=50)