當前位置: 首頁>>代碼示例>>Python>>正文


Python nn_ops.conv2d方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.nn_ops.conv2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn_ops.conv2d方法的具體用法?Python nn_ops.conv2d怎麽用?Python nn_ops.conv2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.nn_ops的用法示例。


在下文中一共展示了nn_ops.conv2d方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _Conv2DBackpropInputGrad

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _Conv2DBackpropInputGrad(op, grad):
  """The derivatives for deconvolution.

  Args:
    op: the Deconvolution op.
    grad: the tensor representing the gradient w.r.t. the output

  Returns:
    the gradients w.r.t. the input and the filter
  """
  return [None,
          nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
                                        op.inputs[2], op.get_attr("strides"),
                                        op.get_attr("padding"),
                                        op.get_attr("use_cudnn_on_gpu"),
                                        op.get_attr("data_format")),
          nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
                        op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
                        op.get_attr("data_format"))] 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:21,代碼來源:nn_grad.py

示例2: _Conv2DBackpropFilterGrad

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _Conv2DBackpropFilterGrad(op, grad):
  return [
      nn_ops.conv2d_backprop_input(
          array_ops.shape(op.inputs[0]), grad, op.inputs[2],
          op.get_attr("strides"),
          op.get_attr("padding"),
          op.get_attr("use_cudnn_on_gpu"),
          op.get_attr("data_format")),
      None,
      nn_ops.conv2d(
          op.inputs[0], grad,
          op.get_attr("strides"),
          op.get_attr("padding"),
          op.get_attr("use_cudnn_on_gpu"),
          op.get_attr("data_format"))
  ] 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:nn_grad.py

示例3: _attention

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:24,代碼來源:rnn_cell.py

示例4: _attention

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      if self._linear3 is None:
        self._linear3 = _Linear(query, self._attn_vec_size, True)
      y = self._linear3(query)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
開發者ID:shaohua0116,項目名稱:Multiview2Novelview,代碼行數:26,代碼來源:rnn_cell.py

示例5: _attention

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("Attention"):
      k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("AttnV", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:23,代碼來源:rnn_cell.py

示例6: _strict_conv1d

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _strict_conv1d(x, h):
  """Return x * h for rank 1 tensors x and h."""
  with ops.name_scope('strict_conv1d', values=[x, h]):
    x = array_ops.reshape(x, (1, -1, 1, 1))
    h = array_ops.reshape(h, (-1, 1, 1, 1))
    result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
    return array_ops.reshape(result, [-1]) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:9,代碼來源:histogram_ops.py

示例7: testFuseResizePadAndConv

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def testFuseResizePadAndConv(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      resize_op = image_ops.resize_bilinear(
          input_op, [12, 4], align_corners=False)
      pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
                             mode="REFLECT")
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("MirrorPad", node.op)
      self.assertNotEqual("ResizeBilinear", node.op) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:32,代碼來源:optimize_for_inference_test.py

示例8: testFuseResizeAndConv

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def testFuseResizeAndConv(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      resize_op = image_ops.resize_bilinear(
          input_op, [12, 4], align_corners=False)
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("ResizeBilinear", node.op) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:29,代碼來源:optimize_for_inference_test.py

示例9: _test_convolution

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _test_convolution(tensor_in_sizes, filter_in_sizes,
                      dilations, strides, padding, data_format):
    """ One iteration of convolution with given shapes and attributes """

    total_size_1 = 1
    total_size_2 = 1
    for s in tensor_in_sizes:
        total_size_1 *= s
    for s in filter_in_sizes:
        total_size_2 *= s
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
    filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
        in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
        strides = [1] + strides + [1]
        dilations = [1] + dilations + [1]

        nn_ops.conv2d(in_data,
                      in_filter,
                      strides=strides,
                      padding=padding,
                      data_format=data_format)

        compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'),
                            'Placeholder:0', 'Conv2D:0') 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:31,代碼來源:test_forward.py

示例10: _conv2d

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _conv2d(self, inputs):
        output_filters = 4 * self._filters
        input_shape = inputs.get_shape().as_list()
        kernel_shape = list(self._kernel_size) + [input_shape[-1], output_filters]
        kernel = vs.get_variable("kernel", kernel_shape, dtype=dtypes.float32,
                                 initializer=init_ops.truncated_normal_initializer(stddev=0.02))
        outputs = nn_ops.conv2d(inputs, kernel, [1] * 4, padding='SAME')
        if not self._normalizer_fn:
            bias = vs.get_variable('bias', [output_filters], dtype=dtypes.float32,
                                   initializer=init_ops.zeros_initializer())
            outputs = nn_ops.bias_add(outputs, bias)
        return outputs 
開發者ID:alexlee-gk,項目名稱:video_prediction,代碼行數:14,代碼來源:rnn_ops.py

示例11: call

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def call(self, inputs, training=None):
    if training is None:
      training = K.learning_phase()

    conv_out = super(_DepthwiseConvBatchNorm2D, self).call(inputs)

    self.batchnorm.call(conv_out)

    folded_conv_kernel_multiplier = self.batchnorm.gamma * math_ops.rsqrt(
        self.batchnorm.moving_variance + self.batchnorm.epsilon)

    folded_conv_bias = math_ops.subtract(
        self.batchnorm.beta,
        self.batchnorm.moving_mean * folded_conv_kernel_multiplier,
        name='folded_conv_bias')

    depthwise_weights_shape = [
        self.depthwise_kernel.get_shape().as_list()[2],
        self.depthwise_kernel.get_shape().as_list()[3]
    ]
    folded_conv_kernel_multiplier = array_ops.reshape(
        folded_conv_kernel_multiplier, depthwise_weights_shape)

    folded_conv_kernel = math_ops.mul(
        folded_conv_kernel_multiplier,
        self.depthwise_kernel,
        name='folded_conv_kernel')

    if self.is_quantized:
      folded_conv_kernel = self._apply_weight_quantizer(training,
                                                        folded_conv_kernel)

    # TODO(alanchiao): this is an internal API.
    # See if Keras would make this public, like
    # backend.conv2d is.
    #
    # From DepthwiseConv2D layer call() function.
    folded_conv_out = K.depthwise_conv2d(
        inputs,
        folded_conv_kernel,
        strides=self.strides,
        padding=self.padding,
        dilation_rate=self.dilation_rate,
        data_format=self.data_format,
    )

    outputs = K.bias_add(
        folded_conv_out, folded_conv_bias, data_format=self.data_format)

    if self.post_activation is not None:
      outputs = self.post_activation(outputs)
    if self.is_quantized:
      outputs = self._apply_activation_quantizer(training, outputs)
    return outputs 
開發者ID:tensorflow,項目名稱:model-optimization,代碼行數:56,代碼來源:conv_batchnorm.py

示例12: testFoldBatchNorms

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def testFoldBatchNorms(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      conv_op = nn_ops.conv2d(
          input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
      mean_op = constant_op.constant(
          np.array([10, 20]), shape=[2], dtype=dtypes.float32)
      variance_op = constant_op.constant(
          np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
      beta_op = constant_op.constant(
          np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
      gamma_op = constant_op.constant(
          np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
      ops.get_default_graph().graph_def_versions.producer = 8
      gen_nn_ops._batch_norm_with_global_normalization(
          conv_op,
          mean_op,
          variance_op,
          beta_op,
          gamma_op,
          0.00001,
          False,
          name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
        original_graph_def)

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("BatchNormWithGlobalNormalization", node.op) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:44,代碼來源:optimize_for_inference_test.py

示例13: _conv_linear

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
  """convolution:
  Args:
    args: a 4D Tensor or a list of 4D, batch x n, Tensors.
    filter_size: int tuple of filter height and width.
    num_features: int, number of features.
    bias_start: starting value to initialize the bias; 0 by default.
    scope: VariableScope for the created subgraph; defaults to "Linear".
  Returns:
    A 4D Tensor with shape [batch h w num_features]
  Raises:
    ValueError: if some of the arguments has unspecified or wrong shape.
  """

  # Calculate the total size of arguments on dimension 1.
  total_arg_size_depth = 0
  shapes = [a.get_shape().as_list() for a in args]
  for shape in shapes:
    if len(shape) != 4:
      raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes))
    if not shape[3]:
      raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes))
    else:
      total_arg_size_depth += shape[3]

  dtype = [a.dtype for a in args][0]

  # Now the computation.
  with tf.variable_scope(scope or "Conv"):
    matrix = tf.get_variable(
        "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype)
    if len(args) == 1:
      res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
    else:
      res = tf.nn.conv2d(tf.concat(axis=3, values=args), matrix, strides=[1, 1, 1, 1], padding='SAME')
    if not bias:
      return res
    bias_term = tf.get_variable(
        "Bias", [num_features],
        dtype=dtype,
        initializer=tf.constant_initializer(
            bias_start, dtype=dtype))
  return res + bias_term 
開發者ID:kuleshov,項目名稱:audio-super-res,代碼行數:45,代碼來源:convrnn.py

示例14: _test_convolution

# 需要導入模塊: from tensorflow.python.ops import nn_ops [as 別名]
# 或者: from tensorflow.python.ops.nn_ops import conv2d [as 別名]
def _test_convolution(opname, tensor_in_sizes, filter_in_sizes,
                      dilations, strides, padding, data_format,
                      deconv_output_shape=[]):
    """ One iteration of convolution with given shapes and attributes """

    total_size_1 = np.prod(tensor_in_sizes)
    total_size_2 = np.prod(filter_in_sizes)
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
    filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
        in_filter = constant_op.constant(
            filter_array, shape=filter_in_sizes, dtype='float32')
        if data_format == 'NHWC':
            strides = [1] + strides + [1]
            dilations = [1] + dilations + [1]
        else:
            strides = [1, 1] + strides
            dilations = [1, 1] + dilations

        if opname == 'conv':
            nn_ops.conv2d(in_data,
                          in_filter,
                          strides=strides,
                          dilations=dilations,
                          padding=padding,
                          data_format=data_format)

            compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'),
                                'Placeholder:0', 'Conv2D:0')
        elif opname == 'conv_transpose':
            nn_ops.conv2d_transpose(in_data,
                                    in_filter,
                                    output_shape=deconv_output_shape,
                                    strides=strides,
                                    padding=padding,
                                    data_format=data_format)

            compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'),
                                'Placeholder:0', 'conv2d_transpose:0')
        else:
            nn_ops.depthwise_conv2d_native(in_data,
                                           in_filter,
                                           strides=strides,
                                           dilations=dilations,
                                           padding=padding,
                                           data_format=data_format)

            compare_tf_with_tvm(np.reshape(data_array, tensor_in_sizes).astype('float32'),
                                'Placeholder:0', 'DepthwiseConv2dNative:0') 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:55,代碼來源:test_forward.py


注:本文中的tensorflow.python.ops.nn_ops.conv2d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。