当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.add函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.add函数的典型用法代码示例。如果您正苦于以下问题:Python add函数的具体用法?Python add怎么用?Python add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了add函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: GetParams

 def GetParams(self):
   """Create a graph containing multiple segment."""
   input_name = "input"
   input_dims = [2, 32, 32, 3]
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtypes.float32, shape=input_dims, name=input_name)
     with g.device("/GPU:0"):
       n = inp
       c = constant_op.constant(1.0, name="c")
       n = math_ops.add(n, c, name="add")
       n = math_ops.mul(n, n, name="mul")
       n = math_ops.add(n, n, name="add1")
       n = self.trt_incompatible_op(n, name="incompatible1")
       n = math_ops.add(n, c, name="add2")
       n = math_ops.mul(n, n, name="mul1")
       n = math_ops.add(n, n, name="add3")
     array_ops.squeeze(n, name=self.output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       expected_engines={
           "my_trt_op_0": ["add2", "add3", "mul1"],
           # Why segment ["add", "add1", "mul"] was assigned segment id 1
           # instead of 0: the parent node of this segment is actually const
           # node 'c', but it's removed later since it's const output of the
           # segment which is not allowed.
           "my_trt_op_1": ["add", "add1", "mul"]
       },
       expected_output_dims=tuple(input_dims),
       allclose_atol=1.e-06,
       allclose_rtol=1.e-06)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:34,代码来源:base_test.py

示例2: test_multiple_outputs

  def test_multiple_outputs(self):
    #   -         +
    #  / \y0   y1/ \
    # x    split    z
    #       |
    #       y         (nodes are ops; edges are going up)
    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(dtypes.float32, shape=[1], name='x')
      y = array_ops.placeholder(dtypes.float32, shape=[2], name='y')
      y0, y1 = array_ops.split(y, num_or_size_splits=2, axis=0)
      z = array_ops.placeholder(dtypes.float32, shape=[1], name='z')
      math_ops.add(x, y0)
      math_ops.subtract(y1, z)

    y1_pattern = graph_matcher.OpTypePattern('*')
    minus_pattern = graph_matcher.OpTypePattern('Sub', inputs=[y1_pattern, '*'])
    matcher = graph_matcher.GraphMatcher(minus_pattern)

    match_results = list(matcher.match_graph(g))
    self.assertEqual(1, len(match_results))
    match_result = match_results[0]

    self.assertEqual(y0.op, y1.op)
    self.assertEqual(match_result.get_op(y1_pattern), y1.op)
    self.assertEqual(match_result.get_tensor(y1_pattern), y1)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:26,代码来源:graph_matcher_test.py

示例3: testAggregate

  def testAggregate(self):
    a = array_ops.constant([3., 4.])
    b = array_ops.constant([5., 6.])
    hint = op_hint.OpHint("agg")
    a0, a1 = array_ops.unstack(a)
    b0, b1 = array_ops.unstack(b)

    a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
    b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
    a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
    b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)

    c0 = math_ops.add(a0, b0, name="addleft")
    c1 = math_ops.add(a1, b1, name="addright")
    c0 = hint.add_output(
        c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
    c1 = hint.add_output(
        c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)

    curr = array_ops.stack([c0, c1])
    output = array_ops.identity(curr, name="FINAL_OUTPUT")
    with self.cached_session() as sess:
      stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
          graph_def=sess.graph_def)
      self.assertEqual(
          self._getGraphOpTypes(
              stubbed_graphdef,
              output_nodes=[op_hint._tensor_name_base(output.name)]),
          set(["agg", "Const", "Identity"]))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:convert_test.py

示例4: GetParams

 def GetParams(self):
   """Create a graph containing multiple segment."""
   input_name = "input"
   input_dims = [2, 32, 32, 3]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtypes.float32, shape=input_dims, name=input_name)
     with g.device("/GPU:0"):
       c1 = constant_op.constant(1.0, name="c1")
       c2 = constant_op.constant(1.0, name="c2")
       d1 = constant_op.constant(1.0, name="d1")
       d2 = self.trt_incompatible_op(inp, name="d2")
       with g.control_dependencies([d1, d2]):
         add = math_ops.add(inp, c1, name="add")
       with g.control_dependencies([d1, d2]):
         mul = math_ops.mul(add, add, name="mul")
       with g.control_dependencies([d1, d2]):
         add1 = math_ops.add(mul, mul, name="add1")
       edge = self.trt_incompatible_op(add1, name="incompatible")
       with g.control_dependencies([d1, d2, add, mul]):
         add2 = math_ops.add(edge, c2, name="add2")
       with g.control_dependencies([d1, d2, add1, mul]):
         mul1 = math_ops.mul(add2, add2, name="mul1")
       with g.control_dependencies([d1, d2, add, add1]):
         add3 = math_ops.add(mul1, mul1, name="add3")
     array_ops.squeeze(add3, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[tuple(input_dims)])
开发者ID:aeverall,项目名称:tensorflow,代码行数:34,代码来源:base_test.py

示例5: _test_add

def _test_add(data):
    """ One iteration of add """

    assert len(data) == 2
    need_transpose = False
    if len(data[0].shape) == 1 or len(data[0].shape) == 2:
        tvm_data = data
    elif len(data[0].shape) == 3:
        need_transpose = True
        tvm_data = [np.transpose(d, axes=(0, 2, 1)) for d in data]
    elif len(data[0].shape) == 4:
        need_transpose = True
        tvm_data = [np.transpose(d, axes=(0, 3, 1, 2)) for d in data]
    else:
        raise NotImplementedError("Not support input shape {} of add : ".
                                  format(str(len(data.shape))))

    # Test with two tensors
    with tf.Graph().as_default():
        in_data = [array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in_0'),
                   array_ops.placeholder(shape=data[1].shape, dtype=data[1].dtype, name='in_1')]
        out = math_ops.add(in_data[0], in_data[1])
        compare_tflite_with_tvm(data, tvm_data, ['in_0:0','in_1:0'],
                                in_data, [out], need_transpose)

    # Test with tensor and constant
    with tf.Graph().as_default():
        in_data = [array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')]
        out = math_ops.add(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype))
        compare_tflite_with_tvm([data[0]], [tvm_data[0]], ['in:0'],
                                in_data, [out], need_transpose)
开发者ID:bddppq,项目名称:tvm,代码行数:31,代码来源:test_forward.py

示例6: testWhileWithScopedAllocator

  def testWhileWithScopedAllocator(self):
    group_size = 2
    group_key = 1
    instance_key0 = 1
    instance_key1 = 2

    config = config_pb2.ConfigProto(device_count={'CPU': group_size})
    rewrite_options = config.graph_options.rewrite_options
    rewrite_options.scoped_allocator_optimization = (
        rewriter_config_pb2.RewriterConfig.ON)
    del rewrite_options.scoped_allocator_opts.enable_op[:]
    rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')

    with self.session(config=config) as sess:
      run_ops = []
      for i in range(group_size):
        with ops.device('CPU:%d' % i):
          constant = constant_op.constant(0.)
          cond = lambda i: math_ops.less(i, 10.)
          body = lambda i: math_ops.add(i, 1.)
          input0 = control_flow_ops.while_loop(cond, body, [constant])
          input1 = math_ops.add(constant, 5)
          colred0 = collective_ops.all_reduce(input0, group_size, group_key,
                                              instance_key0, 'Add', 'Id')
          colred1 = collective_ops.all_reduce(input1, group_size, group_key,
                                              instance_key1, 'Add', 'Id')
          run_ops.append(math_ops.add_n([colred0, colred1]))
      results = sess.run(run_ops)
      self.assertEqual(results, [30., 30.])
开发者ID:aritratony,项目名称:tensorflow,代码行数:29,代码来源:collective_ops_test.py

示例7: _TestRandomGraphWithDevices

 def _TestRandomGraphWithDevices(self,
                                 sess,
                                 seed,
                                 op_placement,
                                 devices,
                                 debug_mode=False):
   data = []
   shape = (self._dim, self._dim)
   feed_dict = {}
   # Initialize the matrices
   for i in range(len(devices)):
     with ops.device(devices[i]):
       var = array_ops.placeholder(dtypes.float32, shape=shape)
       np.random.seed(seed + i)
       feed_dict[var] = np.random.uniform(
           low=0, high=0.1, size=shape).astype(np.float32)
       data.append(var)
   # Run the 'add' operations on those matrices
   for op in op_placement:
     with ops.device(devices[op[2]]):
       data[op[2]] = math_ops.add(data[op[0]], data[op[1]])
   with ops.device('/cpu:0'):
     s = data[0]
     for i in range(1, len(data)):
       s = math_ops.add(s, data[i])
   if debug_mode:
     logging.info(ops.get_default_graph().as_graph_def())
   result = sess.run(s, feed_dict=feed_dict)
   self._LogMatrix(result, self._dim)
   return result
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:virtual_gpu_test.py

示例8: fn

 def fn(x):
   with context.device('/gpu:0'):
     b = constant_op.constant(2.0)
     c = math_ops.add(x.gpu(), b)
     # TODO(apassos): remove cpu below by making TensorVSPace aware
     # of devices.
     return math_ops.add(c, constant_op.constant(3.0)).cpu()
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:7,代码来源:backprop_test.py

示例9: testIgnoredArguments

  def testIgnoredArguments(self):
    """Tests that JIT computations can ignore formal parameters."""

    with self.session(config=NoRewriteSessionConfig()) as sess:
      x = array_ops.placeholder(dtypes.int32)
      y = array_ops.placeholder(dtypes.int32)
      with jit_scope():
        z = math_ops.add(x, x)
        w = math_ops.add(y, y)
        # Pulls 'w' into the same compilation via control dependencies.
        with ops.control_dependencies([w]):
          n = control_flow_ops.no_op()
        with ops.control_dependencies([n]):
          t = math_ops.add(z, z)

      run_metadata = config_pb2.RunMetadata()
      out = test_utils.RunWithWarmup(
          sess,
          t, {
              x: np.int32(7),
              y: np.int32(404)
          },
          run_metadata=run_metadata,
          options=config_pb2.RunOptions(
              trace_level=config_pb2.RunOptions.FULL_TRACE))
      self.assert_(MetadataHasXlaRunOp(run_metadata))
      self.assertAllClose(28, out)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:jit_test.py

示例10: testDebugMakeCallableFromOptionsWithCustomOptionsAndMetadataWorks

  def testDebugMakeCallableFromOptionsWithCustomOptionsAndMetadataWorks(self):
    variable_1 = variables.Variable(
        10.5, dtype=dtypes.float32, name="variable_1")
    a = math_ops.add(variable_1, variable_1, "callable_a")
    math_ops.add(a, a, "callable_b")
    self.sess.run(variable_1.initializer)

    wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
        [["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
    callable_options = config_pb2.CallableOptions()
    callable_options.fetch.append("callable_b")
    callable_options.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE

    sess_callable = wrapped_sess._make_callable_from_options(callable_options)

    run_metadata = config_pb2.RunMetadata()
    # Call the callable with a custom run_metadata.
    callable_output = sess_callable(run_metadata=run_metadata)
    # Verify that step_stats is populated in the custom run_metadata.
    self.assertTrue(run_metadata.step_stats)
    self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])

    debug_dumps = wrapped_sess.observers["debug_dumps"]
    self.assertEqual(1, len(debug_dumps))
    debug_dump = debug_dumps[0]
    node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
    self.assertItemsEqual(
        ["callable_a", "callable_b", "variable_1", "variable_1/read"],
        node_names)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:29,代码来源:local_cli_wrapper_test.py

示例11: _session_run_for_graph_structure_lookup

  def _session_run_for_graph_structure_lookup(self):
    with session.Session() as sess:
      u_name = "testDumpGraphStructureLookup/u"
      v_name = "testDumpGraphStructureLookup/v"
      w_name = "testDumpGraphStructureLookup/w"

      u_init = constant_op.constant([2.0, 4.0])
      u = variables.Variable(u_init, name=u_name)
      v = math_ops.add(u, u, name=v_name)
      w = math_ops.add(v, v, name=w_name)

      u.initializer.run()

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(
          run_options,
          sess.graph,
          debug_ops=["DebugIdentity"],
          debug_urls=self._debug_urls())

      run_metadata = config_pb2.RunMetadata()
      sess.run(w, options=run_options, run_metadata=run_metadata)

    self.assertEqual(self._expected_partition_graph_count,
                     len(run_metadata.partition_graphs))

    dump = debug_data.DebugDumpDir(
        self._dump_root, partition_graphs=run_metadata.partition_graphs)

    return u_name, v_name, w_name, dump
开发者ID:moolighty,项目名称:tensorflow,代码行数:30,代码来源:session_debug_testlib.py

示例12: testDebuggingMakeCallableFromOptionsWithTwoFeedsWorks

  def testDebuggingMakeCallableFromOptionsWithTwoFeedsWorks(self):
    ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
    ph2 = array_ops.placeholder(dtypes.float32, name="callable_ph2")
    a = math_ops.add(ph1, ph2, "callable_a")
    math_ops.add(a, a, "callable_b")

    wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
        [["run"]] * 3, self.sess, dump_root=self._tmp_dir)
    callable_options = config_pb2.CallableOptions()
    callable_options.feed.append("callable_ph1")
    callable_options.feed.append("callable_ph2")
    callable_options.fetch.append("callable_b")
    sess_callable = wrapped_sess._make_callable_from_options(callable_options)

    ph1_value = np.array(5.0, dtype=np.float32)
    ph2_value = np.array(16.0, dtype=np.float32)

    for _ in range(2):
      callable_output = sess_callable(ph1_value, ph2_value)
      self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])

    debug_dumps = wrapped_sess.observers["debug_dumps"]
    self.assertEqual(2, len(debug_dumps))
    for debug_dump in debug_dumps:
      node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
      self.assertItemsEqual(["callable_a", "callable_b"], node_names)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:26,代码来源:local_cli_wrapper_test.py

示例13: fn

 def fn(x):
   with context.device('/gpu:0'):
     b = tensor.Tensor(2.0)
     c = math_ops.add(x.as_gpu_tensor(), b)
     # TODO(apassos): remove as_cpu_tensor below by making TensorVSPace aware
     # of devices.
     return math_ops.add(c, tensor.Tensor(3.0)).as_cpu_tensor()
开发者ID:chdinh,项目名称:tensorflow,代码行数:7,代码来源:backprop_test.py

示例14: GetParams

 def GetParams(self):
   """Test for rank 2 input in TF-TRT."""
   input_names = ["input", "input2"]
   # Two paths: first with rank 2 input, second with rank 4 input.
   input_dims = [[12, 5], [12, 5, 2, 2]]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     outputs = []
     for i in range(2):
       x = array_ops.placeholder(
           dtype=dtypes.float32, shape=input_dims[i], name=input_names[i])
       c = constant_op.constant(1.0, name="c%d_1" % i)
       q = math_ops.add(x, c, name="add%d_1" % i)
       q = math_ops.abs(q, name="abs%d_1" % i)
       c = constant_op.constant(2.2, name="c%d_2" % i)
       q = math_ops.add(q, c, name="add%d_2" % i)
       q = math_ops.abs(q, name="abs%d_2" % i)
       c = constant_op.constant(3.0, name="c%d_3" % i)
       q = math_ops.add(q, c, name="add%d_3" % i)
       if i == 0:
         for j in range(2):
           q = array_ops.expand_dims(q, -1, name="expand%d_%d" % (i, j))
       q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
       outputs.append(q)
     # Combine both paths
     q = math_ops.add(outputs[0], outputs[1], name="add")
     array_ops.squeeze(q, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=input_names,
       input_dims=input_dims,
       output_names=[output_name],
       expected_output_dims=[tuple(input_dims[1])])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:rank_two_test.py

示例15: body

 def body(value, denom, i, ret_rate):
   i += 1
   ret_rate = r_(value, denom)
   with ops.control_dependencies([ret_rate]):
     value = math_ops.add(value, 2)
     denom = math_ops.add(denom, 1)
   return [value, denom, i, ret_rate]
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:rate_test.py


注:本文中的tensorflow.python.ops.math_ops.add函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。