本文整理汇总了Python中env.Env.run方法的典型用法代码示例。如果您正苦于以下问题:Python Env.run方法的具体用法?Python Env.run怎么用?Python Env.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类env.Env
的用法示例。
在下文中一共展示了Env.run方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Config
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
config = Config()
config.lrate = 1e-02
config.z_dim = 2
config.input_dim = 8
config.z_interm = 25
config.x_interm = 25
config.a_interm = 25
config.output_dim = config.input_dim
config.layers_num = 2
config.weight_factor = 0.1
env = Env("avb")
env.clear_pics(env.run())
mode = RunMode.VAE
input = tf.placeholder(tf.float32, shape=(batch_size, config.input_dim), name="x")
# distribution = HierarchicalDistribution(
# NormalDistribution((batch_size, 5*config.z_dim), "normal0"),
# NormalDistribution((batch_size, config.z_dim), "normal1")
# )
# distribution = HierarchicalDistribution(
# BernoulliDistribution((batch_size, 10*config.z_dim), "b0"),
# NormalDistribution((batch_size, config.z_dim), "normal0")
# )
示例2: norm
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
optimizer = tf.train.AdagradOptimizer(lrate)
# optimizer = tf.train.GradientDescentOptimizer(lrate)
apply_grads = optimizer.minimize(cost)
# tvars = tf.trainable_variables()
# grads_raw = tf.gradients(cost, tvars)
# grads, _ = tf.clip_by_global_norm(grads_raw, 5.0)
# apply_grads = optimizer.apply_gradients(zip(grads, tvars))
##################################
# DATA
fname = env.dataset([f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0])
df = env.run("test_data.pkl")
if not os.path.exists(df):
song_data_raw, source_sr = lr.load(fname)
print "Got sampling rate {}, resampling to {} ...".format(source_sr, target_sr)
song_data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
song_data = song_data[:30000,]
np.save(open(df, "w"), song_data)
else:
song_data = np.load(open(df))
inputs_v, data_denom = norm(song_data)
示例3: len
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
loss = tf.reduce_mean(recc_loss + kl_loss)
optimizer = tf.train.AdamOptimizer(lrate)
# optimizer = tf.train.RMSPropOptimizer(lrate)
# optimizer = tf.train.GradientDescentOptimizer(lrate)
tvars = tf.trainable_variables()
grads_raw = tf.gradients(loss, tvars)
grads, _ = tf.clip_by_global_norm(grads_raw, 5.0)
apply_grads = optimizer.apply_gradients(zip(grads, tvars))
sess = tf.Session()
model_fname = env.run("model.ckpt")
saver = tf.train.Saver()
if len(glob("{}*".format(model_fname))) > 0:
print "Restoring from {}".format(model_fname)
saver.restore(sess, model_fname)
epochs = 1000
else:
sess.run(tf.global_variables_initializer())
tmp_dir = env.run()
tmp_grad_dir = env.run("grads")
if not os.path.exists(tmp_grad_dir):
os.makedirs(tmp_grad_dir)
示例4: int
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
# inputs_v[si, bi, int((float(si)/seq_size) * input_size)] = 1.0
targets_v = np.zeros((seq_size, batch_size, visible_size))
for bi in xrange(batch_size):
for si in xrange(seq_size):
if si % visible_size == 0:
targets_v[si, bi, int((float(si)/seq_size) * visible_size)] = 1.0
# targets_v[seq_size/2, 0, 2] = 1.0
sess = tf.Session()
saver = tf.train.Saver()
model_fname = env.run("glm_model.ckpt")
if os.path.exists(model_fname):
print "Restoring from {}".format(model_fname)
saver.restore(sess, model_fname)
epochs = 0
else:
sess.run(tf.initialize_all_variables())
target_smooth = smooth_matrix(np.squeeze(targets_v))
reward_v, reward_mean_v = None, None
epochs = 200
for e in xrange(epochs):
state_v = GLMStateTuple(
示例5: Config
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
config = Config()
config.lrate = 1e-02
config.z_dim = 25
config.input_dim = 2
config.z_interm = 25
config.x_interm = 25
config.a_interm = 25
config.output_dim = config.input_dim
config.layers_num = 2
config.weight_factor = 1.0
env = Env("avb")
env.clear_pics(env.run())
mode = RunMode.VAE
input = tf.placeholder(tf.float32, shape=(batch_size, config.input_dim), name="Input")
distribution = HierarchicalDistribution(
# BernoulliDistribution((batch_size, config.z_dim), "poisson0"),
# NormalDistribution((batch_size, config.z_dim), "normal0"),
NormalDistribution((batch_size, 2), "normal1")
)
model = AvbModel(config, distribution)
示例6: xrange
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
np.zeros((batch_size, net_size)),
np.zeros((batch_size, net_size)),
np.zeros((batch_size, net_size)),
np.zeros((batch_size, net_size)),
np.zeros((batch_size, net_size)),
np.zeros((net_size, net_size)),
np.zeros((net_size, net_size)),
) for _ in xrange(layers_num) )
sess = tf.Session()
saver = tf.train.Saver()
env = Env("simple_test", clear_pics=True)
model_fname = env.run("glm_model.ckpt")
if os.path.exists(model_fname):
print "Restoring from {}".format(model_fname)
saver.restore(sess, model_fname)
epochs = 0
else:
sess.run(tf.global_variables_initializer())
epochs = 100
ww, wr = [], []
for e in xrange(epochs):
out = sess.run(
[
spikes,
finstate,
示例7: read_song
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
data_source.append(env.dataset(f))
def read_song(source_id):
song_data_raw, source_sr = lr.load(data_source[source_id])
song_data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
song_data = song_data[:song_data.shape[0]/10]
song_data, data_denom = norm(song_data)
return song_data, source_sr, data_denom
data, source_sr, data_denom = read_song(0)
sess = tf.Session()
model_fname = env.run("model.ckpt")
saver = tf.train.Saver()
if os.path.exists(model_fname):
print "Restoring from {}".format(model_fname)
saver.restore(sess, model_fname)
epochs = 0
else:
sess.run(tf.initialize_all_variables())
for e in xrange(epochs):
mc = []
output_data = []
hidden_data = []
zero_hidden = set(xrange(filters_num))
for id_start in xrange(0, data.shape[0], seq_size):
示例8: xrange
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
# optimizer = tf.train.RMSPropOptimizer(lr)
# optimizer = tf.train.AdadeltaOptimizer(lr)
train_step = optimizer.apply_gradients(zip(grads, tvars))
weights, recc_weights, bias = [], [], []
outputs_info, states_info, winput_info = [], [], []
grads_info = []
sess = tf.Session()
saver = tf.train.Saver()
writer = tf.train.SummaryWriter("{}/tf".format(os.environ["HOME"]), sess.graph)
model_fname = env.run("nn_model.ckpt")
if os.path.exists(model_fname):
print "Restoring from {}".format(model_fname)
saver.restore(sess, model_fname)
epochs = 0
else:
sess.run(tf.initialize_all_variables())
outputs_v = None
for e in xrange(epochs):
state_v = np.zeros((batch_size, state_size))
ep_lrate = lrate * (decay_rate ** e)
sess.run(tf.assign(lr, ep_lrate))
batch_ids = get_random_batch_ids(data_ends, seq_size, batch_size, forecast_step)
示例9: norm
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
# optimizer = tf.train.RMSPropOptimizer(c.lrate)
# optimizer = tf.train.AdagradOptimizer(c.lrate)
# optimizer = tf.train.GradientDescentOptimizer(c.lrate)
tvars = tf.trainable_variables()
grads_raw = tf.gradients(cost, tvars)
grads, _ = tf.clip_by_global_norm(grads_raw, 5.0)
apply_grads = optimizer.apply_gradients(zip(grads, tvars))
# df = env.dataset("test_ts.csv")
# data = np.loadtxt(df)
fname = env.dataset([f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0])
df = env.run("test_data.pkl")
if not os.path.exists(df):
song_data_raw, source_sr = lr.load(fname)
print "Got sampling rate {}, resampling to {} ...".format(source_sr, c.target_sr)
song_data = lr.resample(song_data_raw, source_sr, c.target_sr, scale=True)
song_data = song_data[:30000,]
np.save(open(df, "w"), song_data)
else:
song_data = np.load(open(df))
data, data_denom = norm(song_data)
sess = tf.Session()
示例10: Env
# 需要导入模块: from env import Env [as 别名]
# 或者: from env.Env import run [as 别名]
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
from conv_lib import SparseAcoustic, norm
from conv_model import ConvModel, restore_hidden
from env import Env
env = Env("piano")
data_source = []
for f in sorted(os.listdir(env.dataset())):
if f.endswith(".wav"):
data_source.append(env.dataset(f))
model_fname = env.run("model.ckpt")
batch_size = 30000
L = 150
filters_num = 100
target_sr = 3000
gamma = 1e-03
epochs = 2000
lrate = 1e-04
k = 8 # filter strides
avg_size = 5
sel = None
cm = ConvModel(batch_size, L, filters_num, k, avg_size, lrate, gamma)