本文整理匯總了Python中tensorflow.compat.v1.float64方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.float64方法的具體用法?Python v1.float64怎麽用?Python v1.float64使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.float64方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: two_class_log_likelihood
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
"""Log-likelihood for two class classification with 0/1 labels.
Args:
predictions: A float valued tensor of shape [`batch_size`]. Each
component should be between 0 and 1.
labels: An int valued tensor of shape [`batch_size`]. Each component
should either be 0 or 1.
weights_fn: unused.
Returns:
A pair, with the average log likelihood in the first component.
"""
del weights_fn
float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
chosen_probs = tf.einsum(
"ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
return avg_log_likelihood, tf.constant(1.0)
示例2: testTwoClassLogLikelihoodVersusOldImplementation
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def testTwoClassLogLikelihoodVersusOldImplementation(self):
def alt_two_class_log_likelihood_impl(predictions, labels):
float_labels = tf.cast(labels, dtype=tf.float64)
float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
# likelihood should be just p for class 1, and 1 - p for class 0.
# signs is 1 for class 1, and -1 for class 0
signs = 2 * float_labels - tf.ones_like(float_labels)
# constant_term is 1 for class 0, and 0 for class 1.
constant_term = tf.ones_like(float_labels) - float_labels
likelihoods = constant_term + signs * float_predictions
log_likelihoods = tf.log(likelihoods)
avg_log_likelihood = tf.reduce_mean(log_likelihoods)
return avg_log_likelihood
predictions = np.random.rand(1, 10, 1)
targets = np.random.randint(2, size=10)
with self.test_session() as session:
new_log_likelihood, _ = metrics.two_class_log_likelihood(
predictions, targets)
alt_log_likelihood = alt_two_class_log_likelihood_impl(
predictions, targets)
new_impl, alt_impl = session.run([new_log_likelihood, alt_log_likelihood])
self.assertAlmostEqual(new_impl, alt_impl)
示例3: _verify_infiniteness_ops
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ['in_data:0'], '{}:0'.format(name))
示例4: simulate
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
with tf.name_scope("environment/simulate"):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, "action")
def step(action):
step_response = self._batch_env.step(action)
# Current env doesn't return `info`, but EnvProblem does.
# TODO(afrozm): The proper way to do this is to make T2TGymEnv return
# an empty info return value.
if len(step_response) == 3:
(observ, reward, done) = step_response
else:
(observ, reward, done, _) = step_response
return (observ, reward.astype(np.float32), done)
observ, reward, done = tf.py_func(
step, [action],
[self.observ_dtype, tf.float32, tf.bool], name="step")
reward = tf.check_numerics(reward, "reward")
reward.set_shape((len(self),))
done.set_shape((len(self),))
with tf.control_dependencies([self._observ.assign(observ)]):
return tf.identity(reward), tf.identity(done)
示例5: get_sari
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4):
"""Computes the SARI scores from the given source, prediction and targets.
Args:
source_ids: A 2D tf.Tensor of size (batch_size , sequence_length)
prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length)
target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets,
sequence_length)
max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,
bigrams, and trigrams)
Returns:
A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and
the keep, addition and deletion scores.
"""
def get_sari_numpy(source_ids, prediction_ids, target_ids):
"""Iterate over elements in the batch and call the SARI function."""
sari_scores = []
keep_scores = []
add_scores = []
deletion_scores = []
# Iterate over elements in the batch.
for source_ids_i, prediction_ids_i, target_ids_i in zip(
source_ids, prediction_ids, target_ids):
sari, keep, add, deletion = get_sari_score(
source_ids_i, prediction_ids_i, target_ids_i, max_gram_size,
BETA_FOR_SARI_DELETION_F_MEASURE)
sari_scores.append(sari)
keep_scores.append(keep)
add_scores.append(add)
deletion_scores.append(deletion)
return (np.asarray(sari_scores), np.asarray(keep_scores),
np.asarray(add_scores), np.asarray(deletion_scores))
sari, keep, add, deletion = tf.py_func(
get_sari_numpy,
[source_ids, prediction_ids, target_ids],
[tf.float64, tf.float64, tf.float64, tf.float64])
return sari, keep, add, deletion
示例6: get_dtype
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def get_dtype(hparams):
if hparams.activation_dtype == "float32":
return tf.float32
elif hparams.activation_dtype == "float64":
return tf.float64
elif hparams.activation_dtype == "bfloat16":
return tf.bfloat16
else:
return None
示例7: test_quantile_function
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def test_quantile_function(self):
# Test that quantile function inverts cumulative.
scale = tf.placeholder(tf.float64, [None])
layer = self.subclass(scale, [1], dtype=tf.float64)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
quantiles = np.array([1e-5, 1e-2, .1, .5, .6, .8])
locations = layer._standardized_quantile(quantiles)
locations = tf.constant(locations, tf.float64)
values, = sess.run([layer._standardized_cumulative(locations)])
self.assertAllClose(quantiles, values, rtol=1e-12, atol=0)
示例8: testFloat64
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def testFloat64(self):
# Given action values that are float 64:
action_values = tf.convert_to_tensor([1., 2., 4., 3.], dtype=tf.float64)
epsilon = 0.1
expected = [0.025, 0.025, 0.925, 0.025]
result = policy_ops.epsilon_greedy(action_values, epsilon).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
示例9: test_forward_matmul
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def test_forward_matmul():
""" MatMul op test"""
_test_matmul(1, 3, 6, 'int32')
_test_matmul(5, 3, 1, 'float64')
示例10: test_forward_unstack
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def test_forward_unstack():
'''test unstack layer'''
_test_unstack((6,), 0, 'int32')
_test_unstack((2, 6), 1, 'float64')
# negative axis
_test_unstack((1, 4), -1, 'int32')
_test_unstack((3, 6, 4), -2, 'float32')
#######################################################################
# Tile
# ----
示例11: test_forward_tile
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def test_forward_tile():
'''test Tile'''
_test_tile((2, ), (3, ), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
示例12: test_forward_reverse_v2
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
示例13: _make_example
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def _make_example(input_ids, problem, input_feature_name="inputs"):
"""Make a tf.train.Example for the problem.
features[input_feature_name] = input_ids
Also fills in any other required features with dummy values.
Args:
input_ids: list<int>.
problem: Problem.
input_feature_name: name of feature for input_ids.
Returns:
tf.train.Example
"""
features = {
input_feature_name:
tf.train.Feature(int64_list=tf.train.Int64List(value=input_ids))
}
# Fill in dummy values for any other required features that presumably
# will not actually be used for prediction.
data_fields, _ = problem.example_reading_spec()
for fname, ftype in data_fields.items():
if fname == input_feature_name:
continue
if not isinstance(ftype, tf.FixedLenFeature):
# Only FixedLenFeatures are required
continue
if ftype.default_value is not None:
# If there's a default value, no need to fill it in
continue
num_elements = functools.reduce(lambda acc, el: acc * el, ftype.shape, 1)
if ftype.dtype in [tf.int32, tf.int64]:
value = tf.train.Feature(
int64_list=tf.train.Int64List(value=[0] * num_elements))
if ftype.dtype in [tf.float32, tf.float64]:
value = tf.train.Feature(
float_list=tf.train.FloatList(value=[0.] * num_elements))
if ftype.dtype == tf.bytes:
value = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[""] * num_elements))
tf.logging.info("Adding dummy value for feature %s as it is required by "
"the Problem.", fname)
features[fname] = value
return tf.train.Example(features=tf.train.Features(feature=features))
示例14: op
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def op(
name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None,
):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description
)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(
name="histogram_summary",
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata,
)
示例15: test_forward_space_to_batch_nd
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import float64 [as 別名]
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(
input_shape=[1, 2, 2, 1],
block_shape=[2, 2],
paddings=[[0, 0], [0, 0]]
)
_test_space_to_batch_nd(
input_shape=[1, 2, 2, 3],
block_shape=[2, 2],
paddings=[[0, 0], [0, 0]]
)
_test_space_to_batch_nd(
input_shape=[1, 4, 4, 1],
block_shape=[2, 2],
paddings=[[0, 0], [0, 0]]
)
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1],
block_shape=[2, 2],
paddings=[[0, 0], [2, 0]],
dtype='int64'
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(
input_shape=[2, 3],
block_shape=[2],
paddings=[[1, 0]],
dtype='float32'
)
_test_space_to_batch_nd(
input_shape=[2, 3, 2],
block_shape=[2],
paddings=[[1, 0]],
dtype='float64'
)
_test_space_to_batch_nd_infer_paddings(
input_shape=[2, 3, 2],
block_shape=[2]
)
#######################################################################
# BatchToSpaceND
# --------------