当前位置: 首页>>代码示例>>Python>>正文


Python nn_ops.bias_add方法代码示例

本文整理汇总了Python中tensorflow.python.ops.nn_ops.bias_add方法的典型用法代码示例。如果您正苦于以下问题:Python nn_ops.bias_add方法的具体用法?Python nn_ops.bias_add怎么用?Python nn_ops.bias_add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.nn_ops的用法示例。


在下文中一共展示了nn_ops.bias_add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: relu_layer

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def relu_layer(x, weights, biases, name=None):
  """Computes Relu(x * weight + biases).

  Args:
    x: a 2D tensor.  Dimensions typically: batch, in_units
    weights: a 2D tensor.  Dimensions typically: in_units, out_units
    biases: a 1D tensor.  Dimensions: out_units
    name: A name for the operation (optional).  If not specified
      "nn_relu_layer" is used.

  Returns:
    A 2-D Tensor computing relu(matmul(x, weights) + biases).
    Dimensions typically: batch, out_units.
  """
  with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
    x = ops.convert_to_tensor(x, name="x")
    weights = ops.convert_to_tensor(weights, name="weights")
    biases = ops.convert_to_tensor(biases, name="biases")
    xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
    return nn_ops.relu(xw_plus_b, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:nn_impl.py

示例2: call

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def call(self, inputs, state):
    """Run one time step of the IndRNN.

    Calculates the output and new hidden state using the IndRNN equation

      `output = new_state = act(W * input + u (*) state + b)`

    where `*` is the matrix multiplication and `(*)` is the Hadamard product.

    Args:
      inputs: Tensor, 2-D tensor of shape `[batch, num_units]`.
      state: Tensor, 2-D tensor of shape `[batch, num_units]` containing the
        previous hidden state.

    Returns:
      A tuple containing the output and new hidden state. Both are the same
        2-D tensor of shape `[batch, num_units]`.
    """
    gate_inputs = math_ops.matmul(inputs, self._input_kernel)
    recurrent_update = math_ops.multiply(state, self._recurrent_kernel)
    gate_inputs = math_ops.add(gate_inputs, recurrent_update)
    gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
    output = self._activation(gate_inputs)
    return output, output 
开发者ID:batzner,项目名称:indrnn,代码行数:26,代码来源:ind_rnn_cell.py

示例3: __call__

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def __call__(self, args):
    if not self._is_sequence:
      args = [args]

    if len(args) == 1:
      res = math_ops.matmul(args[0], self._weights)
    else:
      # Explicitly creating a one for a minor performance improvement.
      one = constant_op.constant(1, dtype=dtypes.int32)
      res = math_ops.matmul(array_ops.concat(args, one), self._weights)
    if self._build_bias:
      res = nn_ops.bias_add(res, self._biases)
    return res


# TODO(xpan): Remove this function in a follow up. 
开发者ID:shaohua0116,项目名称:Multiview2Novelview,代码行数:18,代码来源:core_rnn_cell.py

示例4: testDeterministicGradients

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def testDeterministicGradients(self):
    with self.session(force_gpu=True):
      # There are problems with using force_gpu=True and cached_session with
      # both eager mode and graph mode in the same test. Using a non-cached
      # session and putting everything inside the same session context is
      # a compromise.
      for op_binding in (tf.nn.bias_add, nn.bias_add, nn_ops.bias_add):
        for data_layout in ('channels_first', 'channels_last'):
          # With the selected layer configuration, at least in TensorFlow
          # version 2.0, when data_layout='channels_last', bias_add operates
          # deterministically by default. I don't know if this is true for
          # all layer configurations. These cases are still being tested here,
          # for completeness.
          for data_rank in (1, 2, 3):
            for data_type in (dtypes.float16, dtypes.float32, dtypes.float64):
              self._testDeterministicGradientsCase(op_binding, data_layout,
                                                   data_rank, data_type) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:19,代码来源:test_patch_bias_add.py

示例5: __call__

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def __call__(self, args):

        if not self._is_sequence:
            args = [args]

        if len(args) == 1:

            res = math_ops.matmul(args[0], self._weights)

        else:

            res = math_ops.matmul(array_ops.concat(args, 1), self._weights)

        if self._build_bias:
            res = nn_ops.bias_add(res, self._biases)

        return res 
开发者ID:shenweichen,项目名称:DeepCTR,代码行数:19,代码来源:utils.py

示例6: _test_biasadd

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _test_biasadd(tensor_in_sizes, data_format):
    """ One iteration of biasadd with given shapes and attributes """

    total_size_1 = 1
    for s in tensor_in_sizes:
        total_size_1 *= s
    tensor_bias_sizes = [tensor_in_sizes[1]
                         ] if data_format == 'NCHW' else [tensor_in_sizes[3]]
    total_size_2 = tensor_bias_sizes[0]
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
    bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
        in_bias = constant_op.constant(
            bias_array, shape=tensor_bias_sizes, dtype='float32')
        nn_ops.bias_add(in_data,
                        in_bias,
                        data_format=data_format)

        compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'),
                            'Placeholder:0', 'BiasAdd:0') 
开发者ID:apache,项目名称:incubator-tvm,代码行数:26,代码来源:test_forward.py

示例7: test_forward_bias_add

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def test_forward_bias_add():
    """test Op BiasAdd"""
    def check_bias_add(lh_shpae, rh_shape, dtype):
        tf.reset_default_graph()
        lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
        rh_data = np.random.uniform(size=rh_shape).astype(dtype)
        with tf.Graph().as_default():
            lft_data = tf.placeholder(dtype, name="lft_data")
            rgt_data = tf.placeholder(dtype, name="rgt_data")
            tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
            compare_tf_with_tvm([lh_data, rh_data], [
                                'lft_data:0', 'rgt_data:0'], 'BiasAdd:0')

    check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
    check_bias_add((10, 20), (20,), dtype="float32")


#######################################################################
# Split
# ----- 
开发者ID:apache,项目名称:incubator-tvm,代码行数:22,代码来源:test_forward.py

示例8: _linear

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _linear(self, args):
    out_size = 4 * self._num_units
    proj_size = args.get_shape()[-1]
    weights = vs.get_variable("kernel", [proj_size, out_size])
    out = math_ops.matmul(args, weights)
    if not self._layer_norm:
      bias = vs.get_variable("bias", [out_size])
      out = nn_ops.bias_add(out, bias)
    return out 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:11,代码来源:rnn_cell.py

示例9: _linear

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _linear(self, args):
    out_size = 4 * self._num_units
    proj_size = args.get_shape()[-1]
    weights = vs.get_variable("weights", [proj_size, out_size])
    out = math_ops.matmul(args, weights)
    if not self._layer_norm:
      bias = vs.get_variable("biases", [out_size])
      out = nn_ops.bias_add(out, bias)
    return out 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:11,代码来源:rnn_cell.py

示例10: _linear

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _linear(self, args):
    out_size = 4 * self._num_units
    proj_size = args.get_shape()[-1]
    dtype = args.dtype
    weights = vs.get_variable("kernel", [proj_size, out_size], dtype=dtype)
    out = math_ops.matmul(args, weights)
    if not self._layer_norm:
      bias = vs.get_variable("bias", [out_size], dtype=dtype)
      out = nn_ops.bias_add(out, bias)
    return out 
开发者ID:shaohua0116,项目名称:Multiview2Novelview,代码行数:12,代码来源:rnn_cell.py

示例11: _patch_bias_add

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _patch_bias_add():
  tf.nn.bias_add = _new_bias_add_1_14 # access via public API
  nn.bias_add = _new_bias_add_1_14 # called from tf.keras.layers.convolutional.Conv
  nn_ops.bias_add = _new_bias_add_1_14 # called from tests

# The original, pre-patched method can be viewed at
# https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:9,代码来源:patch.py

示例12: _testBias

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _testBias(self, np_inputs, np_bias, use_gpu=False):
    np_val = self._npBias(np_inputs, np_bias)
    with self.cached_session(use_gpu=use_gpu):
      tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias))
    self.assertAllCloseAccordingToType(np_val, tf_val) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:7,代码来源:test_patch_bias_add.py

示例13: _testBiasNCHW

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
    np_val = self._npBias(np_inputs, np_bias)
    np_inputs = self._NHWCToNCHW(np_inputs)
    with self.cached_session(use_gpu=use_gpu):
      tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias,
                                             data_format="NCHW"))
    tf_val = self._NCHWToNHWC(tf_val)
    self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:10,代码来源:test_patch_bias_add.py

示例14: optimized_trilinear_for_attention

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def optimized_trilinear_for_attention(args, c_maxlen, q_maxlen, input_keep_prob=1.0,
    scope='efficient_trilinear',
    bias_initializer=tf.zeros_initializer(),
    kernel_initializer=initializer()):
    assert len(args) == 2, "just use for computing attention with two input"
    arg0_shape = args[0].get_shape().as_list()
    arg1_shape = args[1].get_shape().as_list()
    if len(arg0_shape) != 3 or len(arg1_shape) != 3:
        raise ValueError("`args` must be 3 dims (batch_size, len, dimension)")
    if arg0_shape[2] != arg1_shape[2]:
        raise ValueError("the last dimension of `args` must equal")
    arg_size = arg0_shape[2]
    dtype = args[0].dtype
    droped_args = [tf.nn.dropout(arg, input_keep_prob) for arg in args]
    with tf.variable_scope(scope):
        weights4arg0 = tf.get_variable(
            "linear_kernel4arg0", [arg_size, 1],
            dtype=dtype,
            regularizer=regularizer,
            initializer=kernel_initializer)
        weights4arg1 = tf.get_variable(
            "linear_kernel4arg1", [arg_size, 1],
            dtype=dtype,
            regularizer=regularizer,
            initializer=kernel_initializer)
        weights4mlu = tf.get_variable(
            "linear_kernel4mul", [1, 1, arg_size],
            dtype=dtype,
            regularizer=regularizer,
            initializer=kernel_initializer)
        biases = tf.get_variable(
            "linear_bias", [1],
            dtype=dtype,
            regularizer=regularizer,
            initializer=bias_initializer)
        subres0 = tf.tile(dot(droped_args[0], weights4arg0), [1, 1, q_maxlen])
        subres1 = tf.tile(tf.transpose(dot(droped_args[1], weights4arg1), perm=(0, 2, 1)), [1, c_maxlen, 1])
        subres2 = batch_dot(droped_args[0] * weights4mlu, tf.transpose(droped_args[1], perm=(0, 2, 1)))
        res = subres0 + subres1 + subres2
        nn_ops.bias_add(res, biases)
        return res 
开发者ID:NLPLearn,项目名称:QANet,代码行数:43,代码来源:layers.py

示例15: testGradients

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import bias_add [as 别名]
def testGradients(self):
    with ops.Graph().as_default():
      inp = constant(1.0, shape=[32, 100], name="in")
      w = constant(1.0, shape=[100, 10], name="w")
      b = constant(1.0, shape=[10], name="b")
      xw = math_ops.matmul(inp, w, name="xw")
      h = bias_add(xw, b, name="h")
      w_grad = gradients.gradients(h, w)[0]
    self.assertEquals("MatMul", w_grad.op.type)
    self.assertEquals(w_grad.op._original_op, xw.op)
    self.assertTrue(w_grad.op.get_attr("transpose_a"))
    self.assertFalse(w_grad.op.get_attr("transpose_b")) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:14,代码来源:gradients_test.py


注:本文中的tensorflow.python.ops.nn_ops.bias_add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。