本文整理汇总了Python中tensorflow.__version__方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.__version__方法的具体用法?Python tensorflow.__version__怎么用?Python tensorflow.__version__使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.__version__方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setupTF
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def setupTF(self):
""" Initialize TensorFlow """
print('Python: ' + sys.version)
print('Tensorflow: ' + tf.__version__)
sess = tf.Session() # Tensorflow session
saver = tf.train.Saver(max_to_keep=3) # Saver saves model to file
modelDir = '../model/'
latestSnapshot = tf.train.latest_checkpoint(modelDir) # Is there a saved model?
# If model must be restored (for inference), there must be a snapshot
if self.mustRestore and not latestSnapshot:
raise Exception('No saved model found in: ' + modelDir)
# Load saved model if available
if latestSnapshot:
print('Init with stored values from ' + latestSnapshot)
saver.restore(sess, latestSnapshot)
else:
print('Init with new values')
sess.run(tf.global_variables_initializer())
return (sess, saver)
开发者ID:sushant097,项目名称:Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow,代码行数:22,代码来源:Model.py
示例2: reset_seeds
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def reset_seeds(reset_graph_with_backend=None, verbose=1):
if reset_graph_with_backend is not None:
K = reset_graph_with_backend
K.clear_session()
tf.compat.v1.reset_default_graph()
if verbose:
print("KERAS AND TENSORFLOW GRAPHS RESET")
np.random.seed(1)
random.seed(2)
if tf.__version__[0] == '2':
tf.random.set_seed(3)
else:
tf.set_random_seed(3)
if verbose:
print("RANDOM SEEDS RESET")
示例3: keras_should_run_eagerly
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def keras_should_run_eagerly(request):
"""Fixture to run in graph and two eager modes.
The modes are:
- Graph mode
- TensorFlow eager and Keras eager
- TensorFlow eager and Keras not eager
The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
should run eagerly.
"""
if request.param == "graph":
if version.parse(tf.__version__) >= version.parse("2"):
pytest.skip("Skipping graph mode for TensorFlow 2+.")
with context.graph_mode():
yield
else:
with context.eager_mode():
yield request.param == "tf_keras_eager"
示例4: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def main(_):
print(FLAGS)
print(tf.__version__, "==tensorflow version==")
init_checkpoint = os.path.join(FLAGS.buckets, FLAGS.init_checkpoint)
checkpoint_dir = os.path.join(FLAGS.buckets, FLAGS.model_output)
export_dir = os.path.join(FLAGS.buckets, FLAGS.export_dir)
print(init_checkpoint, checkpoint_dir, export_dir)
export.export_model(FLAGS,
init_checkpoint,
checkpoint_dir,
export_dir,
input_target=FLAGS.input_target)
示例5: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def main(_):
print(FLAGS)
print(tf.__version__, "==tensorflow version==")
init_checkpoint = os.path.join(FLAGS.buckets, FLAGS.init_checkpoint)
checkpoint_dir = os.path.join(FLAGS.buckets, FLAGS.model_output)
export_dir = os.path.join(FLAGS.buckets, FLAGS.export_dir)
print(init_checkpoint, checkpoint_dir, export_dir)
export_model.export_model(FLAGS,
init_checkpoint,
checkpoint_dir,
export_dir,
input_target=FLAGS.input_target)
示例6: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def main(_):
print(FLAGS)
print(tf.__version__, "==tensorflow version==")
init_checkpoint = os.path.join(FLAGS.buckets, FLAGS.init_checkpoint)
checkpoint_dir = os.path.join(FLAGS.buckets, FLAGS.model_output)
export_dir = os.path.join(FLAGS.buckets, FLAGS.export_dir, "sample_sequence")
print(init_checkpoint, checkpoint_dir, export_dir)
export.export_model(FLAGS,
init_checkpoint,
checkpoint_dir,
export_dir,
input_target=FLAGS.input_target,
predict_type='sample_sequence')
示例7: setup
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def setup(tf, order=None):
"""
Sets up global variables (currently only the tensorflow version) to adapt to peculiarities of
different tensorflow versions. This function should only be called before :py:class:`Model`
creation, not for evaluation. Therefore, the tensorflow module *tf* must be passed:
.. code-block:: python
import tensorflow as tf
import tfdeploy as td
td.setup(tf)
# ...
Also, when *order* is not *None*, it is forwarded to :py:func:`optimize` for convenience.
"""
global _tf_version_string, _tf_version
_tf_version_string = tf.__version__
_tf_version = _parse_tf_version(_tf_version_string)
if order is not None:
optimize(order)
示例8: get_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def get_config():
"""Get model config."""
config = None
if FLAGS.model == "small":
config = SmallConfig()
elif FLAGS.model == "medium":
config = MediumConfig()
elif FLAGS.model == "large":
config = LargeConfig()
elif FLAGS.model == "test":
config = TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
if FLAGS.rnn_mode:
config.rnn_mode = FLAGS.rnn_mode
if FLAGS.num_gpus != 1 or tf.__version__ < "1.3.0" :
config.rnn_mode = BASIC
return config
示例9: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def __call__(self):
x_batch = self.model.make_input_placeholder()
y_batch = self.model.make_label_placeholder()
if LooseVersion(tf.__version__) < LooseVersion('1.0.0'):
raise NotImplementedError()
predictions = self.model.get_probs(x_batch)
correct = tf.equal(tf.argmax(y_batch, axis=-1),
tf.argmax(predictions, axis=-1))
return (x_batch, y_batch), (correct,)
示例10: reduce_function
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def reduce_function(op_func, input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
Handler function for Tensorflow depreciation of keep_dims for tf 1.8
and above, but tf 1.4 requires keep_dims
:param op_func: expects the function to handle eg: tf.reduce_sum.
:param input_tensor: The tensor to reduce. Should have numeric type.
:param axis: The dimensions to reduce. If None (the default),
reduces all dimensions. Must be in the range
[-rank(input_tensor), rank(input_tensor)).
:param keepdims: If true, retains reduced dimensions with length 1.
:param name: A name for the operation (optional).
:param reduction_indices: The old (deprecated) name for axis.
:param keep_dims: Deprecated alias for keepdims.
:return: outputs same value as op_func.
"""
if LooseVersion(tf.__version__) < LooseVersion('1.8.0'):
warning = "Running on tensorflow version " + \
LooseVersion(tf.__version__).vstring + \
". Support for this version in CleverHans is deprecated " + \
"and may be removed on or after 2019-01-26"
warnings.warn(warning)
out = op_func(input_tensor, axis=axis,
keep_dims=keepdims, name=name,
reduction_indices=reduction_indices)
else:
out = op_func(input_tensor, axis=axis,
keepdims=keepdims, name=name,
reduction_indices=reduction_indices)
return out
示例11: softmax_cross_entropy_with_logits
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def softmax_cross_entropy_with_logits(sentinel=None,
labels=None,
logits=None,
dim=-1):
"""
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
deprecated warning
"""
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)"
% name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
try:
labels = tf.stop_gradient(labels)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, dim=dim)
except AttributeError:
warning = "Running on tensorflow version " + \
LooseVersion(tf.__version__).vstring + \
". Support for this version in CleverHans is deprecated " + \
"and may be removed on or after 2019-01-26"
warnings.warn(warning)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, dim=dim)
return loss
示例12: tf_later_than
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def tf_later_than(v):
return LooseVersion(tf.__version__) > LooseVersion(v)
示例13: tf_equal_to
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def tf_equal_to(v):
return tf.__version__ == v
示例14: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
print("tensorflow version: %s" % tf.__version__)
evaluate()
示例15: http_get
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import __version__ [as 别名]
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent=None):
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += "; torch/{}".format(torch.__version__)
if is_tf_available():
ua += "; tensorflow/{}".format(tf.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()