本文整理匯總了Python中tensorflow.compat.v1.add方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.add方法的具體用法?Python v1.add怎麽用?Python v1.add使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.add方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: add_scope
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def add_scope(scope=None, scope_fn=None):
"""Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
name = kwargs.pop("name", None) # Python 2 hack for keyword only args
with scope_fn(name or scope or f.__name__):
return f(*args, **kwargs)
return decorated
return decorator
示例2: post_attention
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def post_attention(self, token, x):
"""Called after self-attention. The memory can be updated here.
Args:
token: Data returned by pre_attention, which can be used to carry over
state related to the current memory operation.
x: a Tensor of data after self-attention and feed-forward
Returns:
a (possibly modified) version of the input x
"""
with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
depth = common_layers.shape_list(x)[-1]
actual_batch_size = common_layers.shape_list(x)[0]
memory_output = tf.gather(token["retrieved_mem"],
tf.range(actual_batch_size))
output = tf.add(tf.layers.dense(x, depth, use_bias=False),
tf.layers.dense(memory_output, depth))
with tf.control_dependencies([output]):
with tf.control_dependencies([
self.write(token["x"], token["access_logits"])]):
return tf.identity(output)
示例3: testGetRegularizerForConcatWithNone
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def testGetRegularizerForConcatWithNone(self, test_concat, depth):
image = tf.constant(0.0, shape=[1, 17, 19, 3])
conv2 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv2')
other_input = tf.add(
tf.identity(tf.constant(3.0, shape=[1, 17, 19, depth])), 3.0)
# other_input has None as regularizer.
concat = tf.concat([other_input, conv2], 3)
output = tf.add(concat, concat, name='output_out')
op = concat.op if test_concat else output.op
# Instantiate OpRegularizerManager.
op_handler_dict = self._default_op_handler_dict
op_handler_dict['Conv2D'] = StubConvSourceOpHandler(add_concat_model_stub)
op_reg_manager = orm.OpRegularizerManager([output.op], op_handler_dict)
expected_alive = add_concat_model_stub.expected_alive()
alive = op_reg_manager.get_regularizer(op).alive_vector
self.assertAllEqual([True] * depth, alive[:depth])
self.assertAllEqual(expected_alive['conv2'], alive[depth:])
示例4: testInit_AddConcat_AllOps
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def testInit_AddConcat_AllOps(self):
with arg_scope(self._batch_norm_scope()):
inputs = tf.zeros([2, 4, 4, 3])
c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
add = c1 + c2
c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')
out = tf.identity(c3)
concat = tf.concat([c1, c2], axis=3)
c4 = layers.conv2d(concat, num_outputs=10, kernel_size=3, scope='conv4')
manager = orm.OpRegularizerManager(
[out.op], self._default_op_handler_dict, SumGroupingRegularizer)
# Op c4 is not in the DFS path of out. Verify that OpRegularizerManager
# does not process c4.
self.assertNotIn(c4.op, manager.ops)
self.assertNotIn(concat.op, manager.ops)
示例5: testInit_BlacklistGroup
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def testInit_BlacklistGroup(self):
with arg_scope(self._batch_norm_scope()):
inputs = tf.zeros([2, 4, 4, 3])
c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
add = c1 + c2
c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')
# Verify c2 has a regularizer.
manager = orm.OpRegularizerManager(
[c3.op], self._default_op_handler_dict, SumGroupingRegularizer)
self.assertIsNotNone(manager.get_regularizer(c2.op))
# Verify c2 has None regularizer after blacklisting c1 which is grouped.
manager = orm.OpRegularizerManager(
[c3.op], self._default_op_handler_dict, SumGroupingRegularizer,
regularizer_blacklist=['conv1'])
self.assertIsNone(manager.get_regularizer(c2.op))
示例6: testCorrectSourceOpsWithSkipConnection
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def testCorrectSourceOpsWithSkipConnection(self):
inputs = tf.zeros([2, 4, 4, 3])
x0 = layers.conv2d(
inputs, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv0')
x1 = tf.nn.relu(layers.batch_norm(x0, scale=True, scope='bn0'))
x1 = layers.conv2d(
x1, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv1')
x2 = tf.add_n([x0, x1], name='add')
final_op = tf.nn.relu(layers.batch_norm(x2, scale=True, scope='bn1'))
op_handler_dict = self._default_op_handler_dict
op_reg_manager = orm.OpRegularizerManager([final_op.op], op_handler_dict)
# All ops are in the same group
group = list(op_reg_manager._op_group_dict.values())[0]
source_op_names = [s.op.name for s in group.source_op_slices]
self.assertSetEqual(set(['bn0/FusedBatchNormV3', 'bn1/FusedBatchNormV3']),
set(source_op_names))
示例7: _variable_with_weight_decay
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例8: _get_cost_function
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def _get_cost_function(self):
"""Compute the cost of the Mittens objective function.
If self.mittens = 0, this is the same as the cost of GloVe.
"""
self.weights = tf.placeholder(
tf.float32, shape=[self.n_words, self.n_words])
self.log_coincidence = tf.placeholder(
tf.float32, shape=[self.n_words, self.n_words])
self.diffs = tf.subtract(self.model, self.log_coincidence)
cost = tf.reduce_sum(
0.5 * tf.multiply(self.weights, tf.square(self.diffs)))
if self.mittens > 0:
self.mittens = tf.constant(self.mittens, tf.float32)
cost += self.mittens * tf.reduce_sum(
tf.multiply(
self.has_embedding,
self._tf_squared_euclidean(
tf.add(self.W, self.C),
self.original_embedding)))
tf.summary.scalar("cost", cost)
return cost
示例9: f1_metric
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def f1_metric(precision, precision_op, recall, recall_op):
"""Computes F1 based on precision and recall.
Args:
precision: <float> [batch_size]
precision_op: Update op for precision.
recall: <float> [batch_size]
recall_op: Update op for recall.
Returns:
tensor and update op for F1.
"""
f1_op = tf.group(precision_op, recall_op)
numerator = 2 * tf.multiply(precision, recall)
denominator = tf.add(precision, recall)
f1 = tf.divide(numerator, denominator)
# <float> [batch_size]
zero_vec = tf.zeros_like(f1)
is_valid = tf.greater(denominator, zero_vec)
f1 = tf.where(is_valid, x=f1, y=zero_vec)
return f1, f1_op
示例10: remap_labels
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def remap_labels(labels,
original_labels=None,
new_label=None):
"""Remaps labels that have an id in original_labels to new_label.
Args:
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
original_labels: int list of original labels that should be mapped from.
new_label: int label to map to
Returns:
Remapped labels
"""
new_labels = labels
for original_label in original_labels:
change = tf.where(
tf.equal(new_labels, original_label),
tf.add(tf.zeros_like(new_labels), new_label - original_label),
tf.zeros_like(new_labels))
new_labels = tf.add(
new_labels,
change)
new_labels = tf.reshape(new_labels, tf.shape(labels))
return new_labels
示例11: _test_fill
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def _test_fill(dims, value_data, value_dtype):
""" Use the fill op to create a tensor of value_data with constant dims."""
value_data = np.array(value_data, dtype=value_dtype)
# TF 1.13 TFLite convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
with tf.Graph().as_default():
value = array_ops.placeholder(dtype=value_dtype, name="value", shape=[])
out = tf.fill(dims, value)
compare_tflite_with_tvm([value_data], ["value"], [value], [out])
with tf.Graph().as_default():
input1 = array_ops.placeholder(dtype=value_dtype, name="input1", shape=dims)
# Fill op gets converted to static tensor during conversion
out = tf.fill(dims, value_data)
out1 = tf.add(out, input1)
input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype)
compare_tflite_with_tvm([input1_data], ["input1"], [input1], [out1])
示例12: _test_quantize_dequantize
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def _test_quantize_dequantize(data):
""" One iteration of quantize and dequantize """
# Keras model to force TFLite converter to insert 2 TFLite quantize ops.
# First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize.
# Second TFLite quantize op converts int8 tensor to int8 tensor - Qnn requantize.
data_in = tf.keras.layers.Input(shape=data.shape[1:])
relu = tf.keras.layers.ReLU()(data_in)
add = tf.keras.layers.Add()([data_in, relu])
concat = tf.keras.layers.Concatenate(axis=0)([relu, add])
keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat)
input_name = data_in.name.split(":")[0]
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for i in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen)
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, input_name)
tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),
rtol=1e-5, atol=1e-2)
示例13: test_forward_multi_input
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name='in1')
in2 = tf.placeholder(tf.int32, shape=[3, 3], name='in2')
in3 = tf.placeholder(tf.int32, shape=[3, 3], name='in3')
in4 = tf.placeholder(tf.int32, shape=[3, 3], name='in4')
out1 = tf.add(in1, in2, name='out1')
out2 = tf.subtract(in3, in4, name='out2')
out = tf.multiply(out1, out2, name='out')
in_data = np.arange(9, dtype='int32').reshape([3, 3])
compare_tf_with_tvm([in_data, in_data, in_data, in_data],
['in1:0', 'in2:0', 'in3:0', 'in4:0'], 'out:0')
#######################################################################
# Multi Output to Graph
# ---------------------
示例14: test_placeholder
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name='in1')
var2 = array_ops.placeholder_with_default(var1, None, name='place1')
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(
shape=in_data1.shape, dtype=in_data1.dtype, name='in2')
out1 = tf.math.add(var1, var2, name='out1')
out2 = tf.math.add(out1, place1, name='out2')
compare_tf_with_tvm([in_data1, in_data2], ['place1:0', 'in2:0'], 'out2:0',
init_global_variables=True)
#######################################################################
# OneHot
# ----------------------
示例15: _test_spop_placeholder_without_shape_info
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import add [as 別名]
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32]*2)
def Forward(x,y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32,name="pl1")
pl2 = tf.placeholder(tf.int32,name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'],
['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True)