本文整理汇总了Python中tensorflow.python.framework.ops.reset_default_graph函数的典型用法代码示例。如果您正苦于以下问题:Python reset_default_graph函数的具体用法?Python reset_default_graph怎么用?Python reset_default_graph使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reset_default_graph函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clear_session
def clear_session():
global _SESSION
global _LEARNING_PHASE
reset_default_graph()
reset_uids()
_SESSION = None
_LEARNING_PHASE = tf.placeholder(dtype='uint8', name='keras_learning_phase')
示例2: testSimpleCodeView
def testSimpleCodeView(self):
ops.reset_default_graph()
opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts['output'] = 'file:outfile=' + outfile
opts['account_type_regexes'] = ['.*']
opts['show_name_regexes'] = ['.*model_analyzer_testlib.*']
opts['account_displayed_op_only'] = False
# TODO(xpan): Test 'micros'. Since the execution time changes each run,
# it's a bit difficult to test it now.
opts['select'] = [
'bytes', 'params', 'float_ops', 'num_hidden_ops', 'device',
'input_shapes'
]
with session.Session() as sess:
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.print_model_analysis(
sess.graph, run_meta, tfprof_cmd='code', tfprof_options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'node name | output bytes | # parameters | # float_ops | assigned devices | input',
f.read()[0:80])
示例3: _train
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
sess.run(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
示例4: testCodeViewLeafGraphNode
def testCodeViewLeafGraphNode(self):
ops.reset_default_graph()
opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
opts['account_type_regexes'] = ['.*']
opts['account_displayed_op_only'] = False
opts['select'] = [
'bytes', 'params', 'float_ops', 'device'
]
opts['output'] = 'none'
with session.Session() as sess:
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.print_model_analysis(
sess.graph, run_meta, tfprof_cmd='code', tfprof_options=opts)
leaf = tfprof_node
while leaf.children:
self.assertEqual(0, len(leaf.graph_nodes))
leaf = leaf.children[0]
self.assertEqual(1, len(leaf.graph_nodes))
示例5: testAdvisor
def testAdvisor(self):
ops.reset_default_graph()
with session.Session() as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
advice_pb = model_analyzer.advise(sess.graph, run_meta)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
示例6: load_policy
def load_policy(cls, policy_dict_path, tf_generator, network_config=None):
"""
For when we only need to load a policy for the forward pass. For instance, to run on the robot from
a checkpointed policy.
"""
from tensorflow.python.framework import ops
ops.reset_default_graph() # we need to destroy the default graph before re_init or checkpoint won't restore.
pol_dict = pickle.load(open(policy_dict_path, "rb"))
tf_map = tf_generator(dim_input=pol_dict['deg_obs'], dim_output=pol_dict['deg_action'],
batch_size=1, network_config=network_config)
sess = tf.Session()
init_op = tf.initialize_all_variables()
sess.run(init_op)
saver = tf.train.Saver()
check_file = pol_dict['checkpoint_path_tf']
saver.restore(sess, check_file)
device_string = pol_dict['device_string']
cls_init = cls(pol_dict['deg_action'], tf_map.get_input_tensor(), tf_map.get_output_op(), np.zeros((1,)),
sess, device_string)
cls_init.chol_pol_covar = pol_dict['chol_pol_covar']
cls_init.scale = pol_dict['scale']
cls_init.bias = pol_dict['bias']
cls_init.x_idx = pol_dict['x_idx']
return cls_init
示例7: testTimeline
def testTimeline(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'timeline')
opts = (builder(builder.trainable_variables_parameter())
.with_max_depth(100000)
.with_step(0)
.with_timeline_output(outfile)
.with_accounted_types(['.*']).build())
with session.Session() as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='graph', options=opts)
with gfile.Open(outfile, 'r') as f:
# Test that a json file is created.
# TODO(xpan): tfprof Timeline isn't quite correct on Windows.
# Investigate why.
if os.name != 'nt':
self.assertLess(1000, len(f.read()))
else:
self.assertLess(1, len(f.read()))
示例8: testSelectEverything
def testSelectEverything(self):
ops.reset_default_graph()
opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts['output'] = 'file:outfile=' + outfile
opts['account_type_regexes'] = ['.*']
opts['select'] = [
'bytes', 'params', 'float_ops', 'occurrence',
'device', 'op_types'
]
with session.Session() as sess, ops.device('/cpu:0'):
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.print_model_analysis(
sess.graph, run_meta, tfprof_options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'node name | # parameters | # float_ops | output bytes | assigned devices | op types\n_TFProfRoot (--/451 params, --/10.44k flops, --/5.28KB, _kTFScopeParent)\n Conv2D (0/0 params, 5.83k/5.83k flops, 432B/432B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Conv2D)\n Conv2D_1 (0/0 params, 4.61k/4.61k flops, 384B/384B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Conv2D)\n DW (3x3x3x6, 162/162 params, 0/0 flops, 648B/1.30KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|VariableV2|_trainable_variables)\n DW/Assign (0/0 params, 0/0 flops, 0B/0B, Assign)\n DW/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n DW/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n DW/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n DW/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n DW/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n DW/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n DW/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n DW/read (0/0 params, 0/0 flops, 648B/648B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Identity)\n DW2 (2x2x6x12, 288/288 params, 0/0 flops, 1.15KB/2.30KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|VariableV2|_trainable_variables)\n DW2/Assign (0/0 params, 0/0 flops, 0B/0B, Assign)\n DW2/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n DW2/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n DW2/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n DW2/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n DW2/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n DW2/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n DW2/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n DW2/read (0/0 params, 0/0 flops, 1.15KB/1.15KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Identity)\n ScalarW (1, 1/1 params, 0/0 flops, 0B/0B, VariableV2|_trainable_variables)\n ScalarW/Assign (0/0 params, 0/0 flops, 0B/0B, Assign)\n ScalarW/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n ScalarW/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n ScalarW/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n ScalarW/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n ScalarW/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n ScalarW/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n ScalarW/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n ScalarW/read (0/0 params, 0/0 flops, 0B/0B, Identity)\n init (0/0 params, 0/0 flops, 0B/0B, NoOp)\n zeros (0/0 params, 0/0 flops, 864B/864B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Const)\n',
f.read())
示例9: testSelectEverything
def testSelectEverything(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'occurrence', 'device', 'op_types',
'input_shapes']).build())
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
with session.Session(config=config) as sess, ops.device('/device:CPU:0'):
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, options=opts)
示例10: testBasic
def testBasic(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
示例11: setUp
def setUp(self):
ops.reset_default_graph()
dim = 1
num = 3
with ops.name_scope('some_scope'):
# Basically from 0 to dim*num-1.
flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
bias = variables.Variable(
array_ops.reshape(flat_data, (num, dim)), name='bias')
save = saver.Saver([bias])
with self.test_session() as sess:
variables.global_variables_initializer().run()
self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint')
save.save(sess, self.bundle_file)
self.new_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
self.old_class_vocab_file = os.path.join(
test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
self.init_val = 42
def _init_val_initializer(shape, dtype=None, partition_info=None):
del dtype, partition_info # Unused by this unit-testing initializer.
return array_ops.tile(
constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)
self.initializer = _init_val_initializer
示例12: testBasics
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
self.evaluate(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
self.assertEqual(set([15, 50, 100]), set(pctx.get_profiles("op").keys()))
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
self.assertEqual(profile_str, f.read())
示例13: test_graph_replace_gradients
def test_graph_replace_gradients(self):
ops.reset_default_graph()
w = variables.VariableV1(0.0, name="w")
y = math_ops.multiply(math_ops.multiply(w, w, name="mul1"), w, name="mul2")
g = gradients_impl.gradients(y, w, name="grad")[0]
# Extract the operations.
replacement_ts = {w.value(): g}
original_mul1_grad = (ops.get_default_graph().
get_operation_by_name("grad/mul1_grad/Mul_1"))
# Should not raise exception.
res = ge.graph_replace(g, replacement_ts, dst_scope="res")
# Extract the operations after graph_replace.
result_mul1_grad = (ops.get_default_graph().
get_operation_by_name("res/grad/mul1_grad/Mul_1"))
# Make sure _original_ops are as expected.
self.assertEqual(original_mul1_grad._original_op.name, u"mul1")
self.assertEqual(result_mul1_grad._original_op.name, u"res/mul1")
self.assertNotEqual(res.name, g.name)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
g_val, res_val = sess.run([g, res])
self.assertNear(g_val, 0.0, ERROR_TOLERANCE)
self.assertNear(res_val, 0.0, ERROR_TOLERANCE)
示例14: generate_testdata
def generate_testdata(self):
ops.reset_default_graph()
sess = session.Session()
placeholder = array_ops.placeholder(dtypes.string)
summary_tensor = text_summary.text_summary('message', placeholder)
vector_summary = text_summary.text_summary('vector', placeholder)
run_names = ['fry', 'leela']
for run_name in run_names:
subdir = os.path.join(self.logdir, run_name)
writer = summary.FileWriter(subdir)
writer.add_graph(sess.graph)
step = 0
for gem in GEMS:
message = run_name + ' *loves* ' + gem
feed_dict = {placeholder: message}
summ = sess.run(summary_tensor, feed_dict=feed_dict)
writer.add_summary(summ, global_step=step)
step += 1
vector_message = ['one', 'two', 'three', 'four']
summ = sess.run(vector_summary, feed_dict={placeholder: vector_message})
writer.add_summary(summ)
writer.close()
示例15: testDropoutWrapperWithSeed
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)