本文整理汇总了Python中tensorflow.reset_default_graph函数的典型用法代码示例。如果您正苦于以下问题:Python reset_default_graph函数的具体用法?Python reset_default_graph怎么用?Python reset_default_graph使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reset_default_graph函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, params=params, dyn='FCC'):
tf.reset_default_graph()
data = self.sample_mog(params['batch_size'])
noise = ds.Normal(tf.zeros(params['z_dim']),
tf.ones(params['z_dim'])).sample(params['batch_size'])
# Construct generator and discriminator nets
with slim.arg_scope([slim.fully_connected], weights_initializer=tf.orthogonal_initializer(gain=1.4)):
samples = self.generator(noise, output_dim=params['x_dim'])
real_score = self.discriminator(data)
fake_score = self.discriminator(samples, reuse=True)
# Saddle objective
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=real_score, labels=tf.ones_like(real_score)) +
tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_score, labels=tf.zeros_like(fake_score)))
gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator")
disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "discriminator")
gen_shapes = [tuple(v.get_shape().as_list()) for v in gen_vars]
disc_shapes = [tuple(v.get_shape().as_list()) for v in disc_vars]
# Generator gradient
g_opt = tf.train.GradientDescentOptimizer(learning_rate=params['gen_learning_rate'])
g_grads = g_opt.compute_gradients(-loss, var_list=gen_vars)
# Discriminator gradient
d_opt = tf.train.GradientDescentOptimizer(learning_rate=params['disc_learning_rate'])
d_grads = d_opt.compute_gradients(loss, var_list=disc_vars)
# Squared Norm of Gradient: d/dx 1/2||F||^2 = J^T F
grads_norm_sep = [tf.reduce_sum(g[0]**2) for g in g_grads+d_grads]
grads_norm = 0.5*tf.reduce_sum(grads_norm_sep)
# Gradient of Squared Norm
JTF = tf.gradients(grads_norm, xs=gen_vars+disc_vars)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.params = params
self.data = data
self.samples = samples
self.gen_vars = gen_vars
self.disc_vars = disc_vars
self.gen_shapes = gen_shapes
self.disc_shapes = disc_shapes
self.Fg = g_grads
self.Fd = d_grads
self.JTF = JTF
self.sess = sess
self.findiff_step = params['findiff_step']
self.gamma = params['gamma']
self.dyn = dyn
if dyn == 'FCC':
self.F = self.FCC
else:
self.F = self._F
示例2: generate_testdata
def generate_testdata(self, include_text=True, logdir=None):
tf.reset_default_graph()
sess = tf.Session()
placeholder = tf.placeholder(tf.string)
summary_tensor = tf.summary.text('message', placeholder)
vector_summary = tf.summary.text('vector', placeholder)
scalar_summary = tf.summary.scalar('twelve', tf.constant(12))
run_names = ['fry', 'leela']
for run_name in run_names:
subdir = os.path.join(logdir or self.logdir, run_name)
writer = tf.summary.FileWriter(subdir)
writer.add_graph(sess.graph)
step = 0
for gem in GEMS:
message = run_name + ' *loves* ' + gem
feed_dict = {
placeholder: message,
}
if include_text:
summ = sess.run(summary_tensor, feed_dict=feed_dict)
writer.add_summary(summ, global_step=step)
step += 1
vector_message = ['one', 'two', 'three', 'four']
if include_text:
summ = sess.run(vector_summary, feed_dict={placeholder: vector_message})
writer.add_summary(summ)
summ = sess.run(scalar_summary, feed_dict={placeholder: []})
writer.add_summary(summ)
writer.close()
示例3: setUp
def setUp(self):
tf.reset_default_graph()
self.m = AddModel()
self.m._compile()
rng = np.random.RandomState(0)
self.x = rng.randn(10, 20)
self.y = rng.randn(10, 20)
示例4: tf_baseline_conv2d
def tf_baseline_conv2d():
import tensorflow as tf
import cntk.contrib.crosstalk.crosstalk_tensorflow as crtf
ci = crtf.instance
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [batch_size, num_chars, char_emb_dim])
filter_bank = tf.get_variable("char_filter_bank",
shape=[filter_width, char_emb_dim, num_filters],
dtype=tf.float32)
bias = tf.get_variable("char_filter_biases", shape=[num_filters], dtype=tf.float32)
char_conv = tf.expand_dims(tf.transpose(tf.nn.conv1d(x, filter_bank, stride=1, padding='VALID') + bias, perm=[0,2,1]), -1)
ci.watch(cstk.Conv2DArgs(W=crtf.find_trainable('char_filter_bank'), b=crtf.find_trainable('char_filter_biases')), 'conv2d', var_type=cstk.Conv2DAttr,
attr=cstk.Conv2DAttr(filter_shape=(filter_width, char_emb_dim,), num_filters=num_filters))
ci.watch(char_conv, 'conv2d_out', var_type=crtf.VariableType) # note the output is transposed to NCHW
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data = {x:input_data}
ci.set_workdir(workdir)
ci.set_data(sess, data)
ci.fetch('conv2d_out', save=True)
ci.fetch('conv2d', save=True)
ci.assign('conv2d', load=True)
assert ci.compare('conv2d_out')
ci.reset()
sess.close()
示例5: rename
def rename(checkpoint, replace_from, replace_to, add_prefix, dry_run, force_prefix=False):
import tensorflow as tf
tf.reset_default_graph()
with tf.Session() as sess:
for var_name, _ in tf.contrib.framework.list_variables(checkpoint):
# Load the variable
var = tf.contrib.framework.load_variable(checkpoint, var_name)
# Set the new name
new_name = var_name
if None not in [replace_from, replace_to]:
new_name = new_name.replace(replace_from, replace_to)
if add_prefix:
if force_prefix or not new_name.startswith(add_prefix):
# force prefix or add prefix if it does not exist yet
new_name = add_prefix + new_name
if dry_run:
print('%s would be renamed to %s.' % (var_name, new_name))
else:
if var_name == new_name:
print('No change for {}'.format(var_name))
else:
print('Renaming %s to %s.' % (var_name, new_name))
# Rename the variable
tf.Variable(var, name=new_name)
if not dry_run:
# Save the variables
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.save(sess, checkpoint)
tf.reset_default_graph()
示例6: eval_snapshot
def eval_snapshot(envname, checkptfile, last_snapshot_idx, n_trajs, mode):
import tensorflow as tf
if mode == 'rltools':
import h5py
with h5py.File(checkptfile, 'r') as f:
args = json.loads(f.attrs['args'])
elif mode == 'rllab':
params_file = os.path.join(checkptfile, 'params.json')
with open(params_file, 'r') as df:
args = json.load(df)
env = envname2env(envname, args)
bestidx = 0
bestret = -np.inf
bestevr = {}
for idx in range((last_snapshot_idx - 10), (last_snapshot_idx + 1)):
tf.reset_default_graph()
minion = Evaluator(env, args, args['max_traj_len'] if mode == 'rltools' else
args['max_path_length'], n_trajs, False, mode)
if mode == 'rltools':
evr = minion(checkptfile, file_key='snapshots/iter%07d' % idx)
elif mode == 'rllab':
evr = minion(os.path.join(checkptfile, 'itr_{}.pkl'.format(idx)))
if np.mean(evr['ret']) > bestret:
bestret = np.mean(evr['ret'])
bestevr = evr
bestidx = idx
return bestevr, bestidx
示例7: setUp
def setUp(self):
tf.reset_default_graph()
rng = np.random.RandomState(0)
X = rng.rand(20,1)*10
Y = np.sin(X) + 0.9 * np.cos(X*1.6) + rng.randn(*X.shape)* 0.8
self.Xtest = rng.rand(10,1)*10
m1 = GPflow.gpr.GPR(X, Y, kern=GPflow.kernels.RBF(1),\
mean_function=GPflow.mean_functions.Constant())
m2 = GPflow.vgp.VGP(X, Y, GPflow.kernels.RBF(1), likelihood=GPflow.likelihoods.Gaussian(),\
mean_function=GPflow.mean_functions.Constant())
m3 = GPflow.svgp.SVGP(X, Y, GPflow.kernels.RBF(1),
likelihood=GPflow.likelihoods.Gaussian(),
Z=X.copy(), q_diag=False,\
mean_function=GPflow.mean_functions.Constant())
m3.Z.fixed = True
m4 = GPflow.svgp.SVGP(X, Y, GPflow.kernels.RBF(1),
likelihood=GPflow.likelihoods.Gaussian(),
Z=X.copy(), q_diag=False, whiten=True,\
mean_function=GPflow.mean_functions.Constant())
m4.Z.fixed=True
m5 = GPflow.sgpr.SGPR(X, Y, GPflow.kernels.RBF(1),
Z=X.copy(),\
mean_function=GPflow.mean_functions.Constant())
m5.Z.fixed = True
m6 = GPflow.sgpr.GPRFITC(X, Y, GPflow.kernels.RBF(1), Z=X.copy(),\
mean_function=GPflow.mean_functions.Constant())
m6.Z.fixed = True
self.models = [m1, m2, m3, m4, m5, m6]
for m in self.models:
m.optimize(display=False, max_iters=300)
print('.') # stop travis timing out
示例8: fit_em
def fit_em(X, initial_mus, max_steps, tol, min_covar=MIN_COVAR_DEFAULT):
tf.reset_default_graph()
N, D = X.shape
K, Dmu = initial_mus.shape
assert D == Dmu
mus0 = initial_mus
sigmas0 = np.tile(np.var(X, axis=0), (K, 1))
alphas0 = np.ones(K) / K
X = tf.constant(X)
mus, sigmas, alphas = (tf.Variable(x, dtype='float64') for x in [mus0, sigmas0, alphas0])
all_ll, resp = estep(X, mus, sigmas, alphas)
cmus, csigmas, calphas = mstep(X, resp, min_covar=min_covar)
update_mus_step = tf.assign(mus, cmus)
update_sigmas_step = tf.assign(sigmas, csigmas)
update_alphas_step = tf.assign(alphas, calphas)
init_op = tf.initialize_all_variables()
ll = prev_ll = -np.inf
with tf.Session() as sess:
sess.run(init_op)
for i in range(max_steps):
ll = sess.run(tf.reduce_mean(all_ll))
sess.run((update_mus_step, update_sigmas_step, update_alphas_step))
#print('EM iteration', i, 'log likelihood', ll)
if abs(ll - prev_ll) < tol:
break
prev_ll = ll
m, s, a = sess.run((mus, sigmas, alphas))
return ll, m, s, a
示例9: build
def build(self, configuration):
tf.reset_default_graph()
# --- specify input data
self.inputs = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x')
self.labels = tf.placeholder(tf.float32, [None, 10], name='labels')
# tf.summary.image('input', inputs, 3)
# TODO add name scopes and summaries
# --- specify layers of network
# TODO try another strides for conv layer
# TODO try to get rid of pooling layer
conv1 = tf.layers.conv2d(inputs=self.inputs, filters=configuration[0], kernel_size=[5, 5], padding="same",
activation=tf.nn.relu, name='conv1')
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, name='pool1')
conv2 = tf.layers.conv2d(inputs=pool1, filters=configuration[1], kernel_size=[5, 5], padding="same",
activation=tf.nn.relu, name='conv2')
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, name='pool2')
flattened = tf.reshape(pool2, [-1, 7 * 7 * configuration[1]])
dense = tf.layers.dense(inputs=flattened, units=1024, activation=tf.nn.relu, name='fc')
logits = tf.layers.dense(inputs=dense, units=10, name='output')
# --- specify cost function and how training is performed
with tf.name_scope("train"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=logits)
self.train_step = tf.train.AdamOptimizer(0.015).minimize(cross_entropy)
# --- specify function to calculate accuracy
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
self.summary = tf.summary.merge_all()
示例10: run
def run(data_seed, dropout, input_noise, augmentation,
test_phase=False, n_labeled=250, n_extra_unlabeled=0, model_type='mean_teacher'):
minibatch_size = 100
hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled)
tf.reset_default_graph()
model = Model(RunContext(__file__, data_seed))
svhn = SVHN(n_labeled=n_labeled,
n_extra_unlabeled=n_extra_unlabeled,
data_seed=data_seed,
test_phase=test_phase)
model['ema_consistency'] = hyperparams['ema_consistency']
model['max_consistency_cost'] = hyperparams['max_consistency_cost']
model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled']
model['training_length'] = hyperparams['training_length']
model['student_dropout_probability'] = dropout
model['teacher_dropout_probability'] = dropout
model['input_noise'] = input_noise
model['translate'] = augmentation
training_batches = minibatching.training_batches(svhn.training,
minibatch_size,
hyperparams['n_labeled_per_batch'])
evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation,
minibatch_size)
tensorboard_dir = model.save_tensorboard_graph()
LOG.info("Saved tensorboard graph to %r", tensorboard_dir)
model.train(training_batches, evaluation_batches_fn)
示例11: run
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length):
minibatch_size = 100
n_labeled_per_batch = 100
tf.reset_default_graph()
model = Model(RunContext(__file__, data_seed))
cifar = SVHN(n_labeled=n_labeled,
data_seed=data_seed,
test_phase=test_phase)
model['ema_consistency'] = True
model['max_consistency_cost'] = 0.0
model['apply_consistency_to_labeled'] = False
model['rampdown_length'] = rampdown_length
model['training_length'] = training_length
# Turn off augmentation
model['translate'] = False
model['flip_horizontally'] = False
training_batches = minibatching.training_batches(cifar.training,
minibatch_size,
n_labeled_per_batch)
evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation,
minibatch_size)
tensorboard_dir = model.save_tensorboard_graph()
LOG.info("Saved tensorboard graph to %r", tensorboard_dir)
model.train(training_batches, evaluation_batches_fn)
示例12: testBasic
def testBasic(self):
base_path = tf.test.test_src_dir_path(
"contrib/session_bundle/example/half_plus_two/00000123")
tf.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path, target="", config=tf.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSigantures(signatures, sess)
示例13: saveAndRestoreModel
def saveAndRestoreModel(self, fw_lstm_layer, bw_lstm_layer, sess, saver,
is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
lstm cell.
bw_lstm_layer: The backward lstm layer either a single lstm cell or a
multi lstm cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(fw_lstm_layer, bw_lstm_layer,
is_dynamic_rnn)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
示例14: setUp
def setUp(self):
tf.reset_default_graph()
self.x = tf.placeholder(tf.float64)
self.x_np = np.random.randn(10)
self.session = tf.Session()
self.transforms = [C() for C in GPflow.transforms.Transform.__subclasses__()]
self.transforms.append(GPflow.transforms.Logistic(7.3, 19.4))
示例15: train
def train(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or TrainingVideoFrameSampler)(gan)
samples = 0
#metrics = [batch_accuracy(gan.inputs.x, gan.uniform_sample), batch_diversity(gan.uniform_sample)]
#sum_metrics = [0 for metric in metrics]
for i in range(args.steps):
gan.step()
if args.action == 'train' and i % args.save_every == 0 and i > 0:
print("saving " + save_file)
gan.save(save_file)
if i % args.sample_every == 0:
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
#if i > args.steps * 9.0/10:
# for k, metric in enumerate(gan.session.run(metrics)):
# print("Metric "+str(k)+" "+str(metric))
# sum_metrics[k] += metric
tf.reset_default_graph()
return []#sum_metrics