本文整理汇总了Python中env.Env.dataset方法的典型用法代码示例。如果您正苦于以下问题:Python Env.dataset方法的具体用法?Python Env.dataset怎么用?Python Env.dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类env.Env
的用法示例。
在下文中一共展示了Env.dataset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: norm
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import dataset [as 别名]
# optimizer = tf.train.RMSPropOptimizer(lrate)
optimizer = tf.train.AdagradOptimizer(lrate)
# optimizer = tf.train.GradientDescentOptimizer(lrate)
apply_grads = optimizer.minimize(cost)
# tvars = tf.trainable_variables()
# grads_raw = tf.gradients(cost, tvars)
# grads, _ = tf.clip_by_global_norm(grads_raw, 5.0)
# apply_grads = optimizer.apply_gradients(zip(grads, tvars))
##################################
# DATA
fname = env.dataset([f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0])
df = env.run("test_data.pkl")
if not os.path.exists(df):
song_data_raw, source_sr = lr.load(fname)
print "Got sampling rate {}, resampling to {} ...".format(source_sr, target_sr)
song_data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
song_data = song_data[:30000,]
np.save(open(df, "w"), song_data)
else:
song_data = np.load(open(df))
inputs_v, data_denom = norm(song_data)
示例2: NormalDistribution
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import dataset [as 别名]
# NormalDistribution((batch_size, config.z_dim), "normal0")
# )
distribution = NormalDistribution((batch_size, config.z_dim))
model = AvbModel(config, distribution)
z, z_prior, neg_log_x_z, kl, adv_prior, post_mu, net_loss, adv_loss, net_step, adv_step = AvbModel.build(model, input, mode, full_sample=False)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
test_data = []
test_data_classes = []
for f1, f2, f3, f4, f5, f6, f7, f8, cl in np.recfromcsv(env.dataset("IRIS.csv"), delimiter=","):
test_data.append(np.asarray([[f1, f2, f3, f4, f5, f6, f7, f8]]))
test_data_classes.append(cl)
test_data = np.concatenate(test_data)
uniq_classes = list(set(test_data_classes))
test_data_classes = np.asarray([ uniq_classes.index(cl) for cl in test_data_classes ])
writer = tf.summary.FileWriter("{}/tf_old".format(os.environ["HOME"]), graph=tf.get_default_graph())
# test_data = np.asarray([
# [1.0, 1.0, 1.0, 0.0],
# [1.0, 1.0, 0.0, 1.0],
# [1.0, 0.0, 1.0, 1.0],
示例3: Env
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import dataset [as 别名]
net_size = 50
epochs = 10000
bptt_steps = seq_size = 50
le_size = 10
lrate = 0.0001
decay_rate = 1.0 #0.999
forecast_step = 1
env = Env("piano")
source_data_file_list = []
for f in sorted(os.listdir(env.dataset())):
if f.endswith("sparse_acoustic_data.dump"):
print "Considering {} as input".format(f)
source_data_file_list.append(env.dataset(f))
data_file_list = source_data_file_list[:]
max_t, input_size = 0, None
data_denoms = []
data_corpus = None
data_ends = []
for source_id, inp_file in enumerate(data_file_list):
print "Reading {}".format(inp_file)
d = SparseAcoustic.deserialize(inp_file)
示例4: moving_average
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import dataset [as 别名]
sess.run(tf.global_variables_initializer())
tmp_dir = env.run()
tmp_grad_dir = env.run("grads")
if not os.path.exists(tmp_grad_dir):
os.makedirs(tmp_grad_dir)
[ os.remove(pj(tmp_dir, f)) for f in os.listdir(tmp_dir) if f[-4:] == ".png" ]
[ os.remove(pj(tmp_grad_dir, f)) for f in os.listdir(tmp_grad_dir) if f[-4:] == ".png" ]
# input_v = moving_average(np.random.randn(seq_size), 50).reshape(1, seq_size, batch_size, 1)
data_source = []
for f in sorted(os.listdir(env.dataset())):
if f.endswith(".wav"):
data_source.append(env.dataset(f))
def read_song(source_id, target_sr):
song_data_raw, source_sr = lr.load(data_source[source_id])
song_data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
song_data = song_data[1500:(1500+2*seq_size)] #song_data.shape[0]/10]
song_data, data_denom = norm(song_data, return_denom=True)
return song_data, source_sr, data_denom
def generate(iterations=1):
state_v = np.zeros((batch_size, net_size))
示例5: Env
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import dataset [as 别名]
from matplotlib import pyplot as plt
import librosa as lr
from os.path import join as pj
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
from conv_lib import SparseAcoustic, norm
from conv_model import ConvModel, restore_hidden
from env import Env
env = Env("piano")
data_source = []
for f in sorted(os.listdir(env.dataset())):
if f.endswith(".wav"):
data_source.append(env.dataset(f))
model_fname = env.run("model.ckpt")
batch_size = 30000
L = 150
filters_num = 100
target_sr = 3000
gamma = 1e-03
epochs = 2000
lrate = 1e-04
k = 8 # filter strides
avg_size = 5
sel = None