本文整理汇总了Python中load_data.load_data方法的典型用法代码示例。如果您正苦于以下问题:Python load_data.load_data方法的具体用法?Python load_data.load_data怎么用?Python load_data.load_data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类load_data
的用法示例。
在下文中一共展示了load_data.load_data方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import load_data [as 别名]
# 或者: from load_data import load_data [as 别名]
def main():
data_path = "./Data/GasPrice.csv"
P = 12 #sequence length
step = 1 #ahead predict steps
X_train,Y_train,X_test,Y_test,data_df_combined_clean = load_data(data_path,P=P,step=step)
print(X_train.shape)
print(Y_train.shape)
model = Wavelet_LSTM(P,32,1)
model = model.double()
train(model,X_train,Y_train,epochs=20)
test(model,X_test,Y_test,data_df_combined_clean)
示例2: data
# 需要导入模块: import load_data [as 别名]
# 或者: from load_data import load_data [as 别名]
def data(name, mode='default', sep=',', delimiter=None, header='infer'):
'''Function for loading one of the Autonomio dataset.
OPTIONS: Either set mode to 'file' or use name without mode parameter.
FILENAMES:
'election_in_twitter'
Dataset consisting of 10 minute samples of 80 million tweets.
'tweet_sentiment'
Dataset with tweet text classified for sentiment using NLTK Vader.
'sites_category_and_vec'
4,000 sites with word vectors and 5 categories.
'programmatic_ad_fraud'
Data from both buy and sell side and over 10 other sources.
'parties_and_employment'
9 years of monthly poll and unemployment numbers.
'random_tweets'
20,000 tweets main intended for.
'''
out = load_data(name, mode, sep, delimiter, header)
return out
示例3: build_model
# 需要导入模块: import load_data [as 别名]
# 或者: from load_data import load_data [as 别名]
def build_model(self, config, train):
if train:
tfrecord_list = glob(os.path.join(config.dataset, '**', '*.tfrecords'), recursive=True)
assert (tfrecord_list)
shuffle(tfrecord_list)
print('\n\n====================\ntfrecords list:')
[print(f) for f in tfrecord_list]
print('====================\n\n')
with tf.device('/cpu:0'):
filename_queue = tf.train.string_input_producer(tfrecord_list)
self.in_LDRs, self.in_HDRs, self.ref_LDRs, self.ref_HDR, _, _ = load_data(filename_queue, config)
self.G_HDR = self.generator(self.in_LDRs,self.in_HDRs, train=train)
self.G_tonemapped = tonemap(self.G_HDR)
self.G_sum = tf.summary.image("G", self.G_tonemapped)
# l2 loss
self.g_loss = tf.reduce_mean((self.G_tonemapped - tonemap(self.ref_HDR))**2) # after tonemapping
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
t_vars = tf.trainable_variables()
self.g_vars = [var for var in t_vars if 'g_' in var.name]
with tf.device('/cpu:0'):
sample_tfrecord_list = glob(os.path.join(
'./dataset/tf_records', '**', '*.tfrecords'), recursive=True)
shuffle(sample_tfrecord_list)
filename_queue_sample = tf.train.string_input_producer(sample_tfrecord_list)
self.in_LDRs_sample, self.in_HDRs_sample, self.ref_LDRs_sample, self.ref_HDR_sample, _, _ = \
load_data(filename_queue_sample, config)
self.sampler_HDR = self.generator(self.in_LDRs_sample, self.in_HDRs_sample, train=False, reuse = True)
self.sampler_tonemapped = tonemap(self.sampler_HDR)
# testing
else:
self.in_LDRs_sample = tf.placeholder(
tf.float32, [self.batch_size, config.test_h, config.test_w, self.c_dim*self.num_shots], name='input_LDR_sample')
self.in_HDRs_sample = tf.placeholder(
tf.float32, [self.batch_size, config.test_h, config.test_w, self.c_dim*self.num_shots], name='input_HDR_sample')
self.sampler_HDR = self.generator(self.in_LDRs_sample, self.in_HDRs_sample, train=False, free_size=True)
self.sampler_tonemapped = tonemap(self.sampler_HDR)
self.saver = tf.train.Saver(max_to_keep=50)