本文整理汇总了Python中tensorflow.python.ops.nn.bias_add方法的典型用法代码示例。如果您正苦于以下问题:Python nn.bias_add方法的具体用法?Python nn.bias_add怎么用?Python nn.bias_add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.nn
的用法示例。
在下文中一共展示了nn.bias_add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
示例2: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
w = self.compute_spectral_norm()
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
rank = common_shapes.rank(inputs)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, w, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = gen_math_ops.mat_mul(inputs, w)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
示例3: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
shape = inputs.get_shape().as_list()
input_dim = shape[-1]
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Reshape the input to 2D.
output_shape_tensors = array_ops.unstack(array_ops.shape(inputs))
output_shape_tensors[-1] = self.units
output_shape_tensor = array_ops.stack(output_shape_tensors)
inputs = array_ops.reshape(inputs, [-1, input_dim])
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if len(output_shape) > 2:
# Reshape the output back to the original ndim of the input.
outputs = array_ops.reshape(outputs, output_shape_tensor)
outputs.set_shape(output_shape)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
示例4: testDeterministicGradients
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def testDeterministicGradients(self):
with self.session(force_gpu=True):
# There are problems with using force_gpu=True and cached_session with
# both eager mode and graph mode in the same test. Using a non-cached
# session and putting everything inside the same session context is
# a compromise.
for op_binding in (tf.nn.bias_add, nn.bias_add, nn_ops.bias_add):
for data_layout in ('channels_first', 'channels_last'):
# With the selected layer configuration, at least in TensorFlow
# version 2.0, when data_layout='channels_last', bias_add operates
# deterministically by default. I don't know if this is true for
# all layer configurations. These cases are still being tested here,
# for completeness.
for data_rank in (1, 2, 3):
for data_type in (dtypes.float16, dtypes.float32, dtypes.float64):
self._testDeterministicGradientsCase(op_binding, data_layout,
data_rank, data_type)
示例5: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs, training=True):
# BN if training
if training:
inputs = tf.keras.layers.BatchNormalization()(inputs)
# dropout if training
if training and self.dropout_rate > 0.0:
inputs = dropout(inputs, self.dropout_rate, self.num_features_nonzero, self.is_sparse_inputs)
if not training:
print("gcn not training now")
# convolve
hidden_vectors = list()
for i in range(len(self.adjs)):
pre_sup = tf.matmul(inputs, self.kernels[i], a_is_sparse=self.is_sparse_inputs)
hidden_vector = tf.sparse.sparse_dense_matmul(tf.cast(self.adjs[i], tf.float32), pre_sup)
hidden_vectors.append(hidden_vector)
outputs = tf.add_n(hidden_vectors)
# bias
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
# activation
if self.activation is not None:
return self.activation(outputs)
return outputs
示例6: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if context.in_graph_mode():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:core.py
示例7: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:convolutional.py
示例8: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
ndim = self._input_rank
shape = self.gamma.get_shape().as_list()
gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
# Compute normalization pool.
if self.data_format == 'channels_first':
norm_pool = nn.convolution(
math_ops.square(inputs),
gamma,
'VALID',
data_format='NC' + 'DHW' [-(ndim - 2):])
if ndim == 3:
norm_pool = array_ops.expand_dims(norm_pool, 2)
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.squeeze(norm_pool, [2])
elif ndim == 5:
shape = array_ops.shape(norm_pool)
norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.reshape(norm_pool, shape)
else: # ndim == 4
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
else: # channels_last
norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')
norm_pool = math_ops.sqrt(norm_pool)
if self.inverse:
outputs = inputs * norm_pool
else:
outputs = inputs / norm_pool
outputs.set_shape(inputs.get_shape())
return outputs
示例9: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
# bias_add only supports NHWC.
# TODO(fchollet): remove this when `bias_add` is feature-complete.
if self.rank == 1:
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
bias = array_ops.reshape(self.bias, (1, self.filters, 1, 1))
outputs += bias
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs,
[outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3],
outputs_shape[4]])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
示例10: bias_add
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: Data format for 3D, 4D or 5D tensors:
one of "channels_first", "channels_last".
Returns:
Output tensor.
Raises:
ValueError: In case of invalid `data_format` argument.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if ndim(x) == 5:
if data_format == 'channels_first':
x += reshape(bias, (1, int_shape(bias)[0], 1, 1, 1))
elif data_format == 'channels_last':
x += reshape(bias, (1, 1, 1, 1, int_shape(bias)[0]))
elif ndim(x) == 4:
if data_format == 'channels_first':
# No support yet for NCHW in bias_add.
x += reshape(bias, (1, int_shape(bias)[0], 1, 1))
elif data_format == 'channels_last':
x = nn.bias_add(x, bias, data_format='NHWC')
elif ndim(x) == 3:
if data_format == 'channels_first':
x += reshape(bias, (1, int_shape(bias)[0], 1))
elif data_format == 'channels_last':
x += reshape(bias, (1, 1, int_shape(bias)[0]))
else:
x = nn.bias_add(x, bias)
return x
# RANDOMNESS
示例11: call
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.rank != 2 and self.data_format == 'channels_first':
# bias_add does not support channels_first for non-4D inputs.
if self.rank == 1:
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
if self.rank == 3:
bias = array_ops.reshape(self.bias, (1, self.filters, 1, 1))
outputs += bias
else:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=utils.convert_data_format(self.data_format, 4))
# Note that we passed rank=4 because bias_add will only accept
# NHWC and NCWH even if the rank of the inputs is 3 or 5.
if self.activation is not None:
return self.activation(outputs)
return outputs
示例12: _patch_bias_add
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def _patch_bias_add():
tf.nn.bias_add = _new_bias_add_1_14 # access via public API
nn.bias_add = _new_bias_add_1_14 # called from tf.keras.layers.convolutional.Conv
nn_ops.bias_add = _new_bias_add_1_14 # called from tests
# The original, pre-patched method can be viewed at
# https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628
示例13: _testBias
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.cached_session(use_gpu=use_gpu):
tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias))
self.assertAllCloseAccordingToType(np_val, tf_val)
示例14: _testBiasNCHW
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.cached_session(use_gpu=use_gpu):
tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias,
data_format="NCHW"))
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
示例15: _eval_op
# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import bias_add [as 别名]
def _eval_op(self, features, labels, logits=None, logits_input=None,
name="eval_op"):
labels = _check_labels(labels, self._label_name)
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
loss_unweighted = self._eval_loss_fn(logits, labels)
loss, _ = _loss(loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
predictions = self._logits_to_prediction(logits)
return predictions, loss