当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.constant函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.constant函数的典型用法代码示例。如果您正苦于以下问题:Python constant函数的具体用法?Python constant怎么用?Python constant使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了constant函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testUpdateClipCoeff

  def testUpdateClipCoeff(self):
    with ops.Graph().as_default(), self.test_session() as sess:
      grads_and_vars = [(array_ops.constant([[1., 2.], [3., 4.]]), None),
                        (array_ops.constant([[2., 3.], [4., 5.]]), None)]
      pgrads_and_vars = [(array_ops.constant([[3., 4.], [5., 6.]]), None),
                         (array_ops.constant([[7., 8.], [9., 10.]]), None)]
      lrate = 0.1

      # Note: without rescaling, the squared Fisher norm of the update
      # is 1.74

      # If the update already satisfies the norm constraint, there should
      # be no rescaling.
      opt = optimizer.KfacOptimizer(
          lrate, 0.2, 0.3, dummy_layer_collection(), norm_constraint=10.)
      coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars)
      self.assertAlmostEqual(1., sess.run(coeff), places=5)

      # If the update violates the constraint, it should be rescaled to
      # be on the constraint boundary.
      opt = optimizer.KfacOptimizer(
          lrate, 0.2, 0.3, dummy_layer_collection(), norm_constraint=0.5)
      coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars)
      sq_norm_pgrad = opt._squared_fisher_norm(grads_and_vars, pgrads_and_vars)
      sq_norm_update = lrate**2 * coeff**2 * sq_norm_pgrad
      self.assertAlmostEqual(0.5, sess.run(sq_norm_update), places=5)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:26,代码来源:optimizer_test.py

示例2: test_mixture_dev

  def test_mixture_dev(self):
    mixture_weights = np.array([
        [1.0/3, 1.0/3, 1.0/3],
        [0.750, 0.250, 0.000]
    ])
    component_means = np.array([
        [1.0, 1.0, 1.0],
        [-5, 0, 1.25]
    ])
    component_devs = np.array([
        [1.0, 1.0, 1.0],
        [0.01, 2.0, 0.1]
    ])

    # The first case should trivially have a standard deviation of 1.0 because
    # all components are identical and have that standard deviation.
    # The second case was computed by hand.
    expected_devs = np.array([
        1.0,
        2.3848637277
    ])

    weights_tf = array_ops.constant(mixture_weights)
    means_tf = array_ops.constant(component_means)
    sigmas_tf = array_ops.constant(component_devs)
    mix_dev = distribution_util.mixture_stddev(weights_tf,
                                               means_tf,
                                               sigmas_tf)

    with self.test_session() as sess:
      actual_devs = sess.run(mix_dev)

    self.assertAllClose(actual_devs, expected_devs)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:33,代码来源:distribution_util_test.py

示例3: testOptimizerInit

  def testOptimizerInit(self):
    with ops.Graph().as_default():
      layer_collection = lc.LayerCollection()

      inputs = array_ops.ones((2, 1)) * 2
      weights_val = np.ones((1, 1), dtype=np.float32) * 3.
      weights = variable_scope.get_variable(
          'w', initializer=array_ops.constant(weights_val))
      bias = variable_scope.get_variable(
          'b', initializer=init_ops.zeros_initializer(), shape=(1, 1))
      output = math_ops.matmul(inputs, weights) + bias

      layer_collection.register_fully_connected((weights, bias), inputs, output)

      logits = math_ops.tanh(output)
      targets = array_ops.constant([[0.], [1.]])
      output = math_ops.reduce_mean(
          nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))

      layer_collection.register_categorical_predictive_distribution(logits)

      optimizer.KfacOptimizer(
          0.1,
          0.2,
          0.3,
          layer_collection,
          momentum=0.5,
          momentum_type='regular')
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:28,代码来源:optimizer_test.py

示例4: testScopeStringFromParamsMultipleTypes

 def testScopeStringFromParamsMultipleTypes(self):
   with tf_ops.Graph().as_default():
     x = array_ops.constant(1,)
     y = array_ops.constant(2,)
     scope_string = ff.scope_string_from_params([[1, 2, 3], 'foo', True, 4,
                                                 (x, y)])
     self.assertEqual('1-2-3_foo_True_4_Const__Const_1', scope_string)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:fisher_factors_test.py

示例5: testFullFBInitTensorTuple

  def testFullFBInitTensorTuple(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(200)
      params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
      block = fb.FullFB(lc.LayerCollection(), params, 32)

      self.assertAllEqual(params, block.tensors_to_compute_grads())
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:7,代码来源:fisher_blocks_test.py

示例6: testAggregateGradients

  def testAggregateGradients(self):

    def fn(x):
      ind1 = tensor.Tensor(np.array([0, 1]))
      ind2 = tensor.Tensor(np.array([2, 3]))
      ind3 = tensor.Tensor(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * tensor.Tensor(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = tensor.Tensor(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]

    with context.graph_mode(), self.test_session():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad.numpy(), tf_dense_grad.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:33,代码来源:backprop_test.py

示例7: testMultiplyInverseAgainstExplicit

  def testMultiplyInverseAgainstExplicit(self):
    with ops.Graph().as_default(), self.test_session() as sess:
      random_seed.set_random_seed(200)
      params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
      block = fb.FullFB(lc.LayerCollection(), params)
      block.register_additional_minibatch(32)
      grads = (array_ops.constant([2., 3.]), array_ops.constant(4.))
      damping = 0.5
      block.instantiate_factors((grads,), damping)
      block._factor.instantiate_cov_variables()
      block.register_inverse()
      block._factor.instantiate_inv_variables()

      # Make sure our inverse is something other than the identity.
      sess.run(state_ops.assign(block._factor._cov, _make_psd(3)))
      sess.run(block._factor.make_inverse_update_ops())

      v_flat = np.array([4., 5., 6.], dtype=np.float32)
      vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
      output = block.multiply_inverse(vector)
      output_flat = sess.run(utils.tensors_to_column(output)).ravel()

      full = sess.run(block.full_fisher_block())
      explicit = np.dot(np.linalg.inv(full + damping * np.eye(3)), v_flat)

      self.assertAllClose(output_flat, explicit)
开发者ID:DILASSS,项目名称:tensorflow,代码行数:26,代码来源:fisher_blocks_test.py

示例8: test_parameter_switching

 def test_parameter_switching(self):
   parameter = array_ops.constant(5)
   overridden_parameter = array_ops.constant(3)
   with self.cached_session():
     getter = model_utils.parameter_switch({overridden_parameter: 4})
     self.assertEqual(5, getter(parameter))
     self.assertEqual(4, getter(overridden_parameter))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:model_utils_test.py

示例9: testMakeSparseSplitAllEmptyDimensions

 def testMakeSparseSplitAllEmptyDimensions(self):
   """Tests split handler op when all dimensions have only bias bucket id."""
   with self.test_session() as sess:
     # The data looks like the following after dividing by number of steps (2).
     # Gradients    | Partition | Dimension | bucket ID       |
     # (0.9, 0.39)  | 0         |    0      |  -1             |
     # (4.0, 0.13)  | 1         |    0      |  -1             |
     partition_ids = array_ops.constant([0, 1], dtype=dtypes.int32)
     # We have only 1 dimension in our sparse feature column.
     bucket_ids = array_ops.constant([[-1, 0], [-1, 0]], dtype=dtypes.int64)
     gradients = array_ops.constant([1.8, 8.0])
     hessians = array_ops.constant([0.78, 0.26])
     bucket_boundaries = array_ops.constant([0.3, 0.52])
     partitions, gains, splits = (
         split_handler_ops.build_sparse_inequality_splits(
             num_minibatches=2,
             partition_ids=partition_ids,
             bucket_ids=bucket_ids,
             gradients=gradients,
             hessians=hessians,
             bucket_boundaries=bucket_boundaries,
             l1_regularization=0,
             l2_regularization=2,
             tree_complexity_regularization=0,
             min_node_weight=0,
             feature_column_group_id=0,
             bias_feature_id=-1,
             class_id=-1,
             multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
     partitions, gains, splits = (sess.run([partitions, gains, splits]))
   self.assertEqual(0, len(partitions))
   self.assertEqual(0, len(splits))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:32,代码来源:split_handler_ops_test.py

示例10: testMakeDenseSplitEmptyInputs

 def testMakeDenseSplitEmptyInputs(self):
   """Tests empty inputs op."""
   with self.test_session() as sess:
     partition_ids = array_ops.constant([], dtype=dtypes.int32)
     bucket_ids = array_ops.constant([[]], dtype=dtypes.int64)
     gradients = array_ops.constant([])
     hessians = array_ops.constant([])
     bucket_boundaries = [0.3, 0.52]
     partitions, gains, splits = (
         split_handler_ops.build_dense_inequality_splits(
             num_minibatches=0,
             partition_ids=partition_ids,
             bucket_ids=bucket_ids,
             gradients=gradients,
             hessians=hessians,
             bucket_boundaries=bucket_boundaries,
             l1_regularization=0.1,
             l2_regularization=1,
             tree_complexity_regularization=0,
             min_node_weight=0,
             class_id=-1,
             feature_column_group_id=0,
             multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
     partitions, gains, splits = sess.run([partitions, gains, splits])
   # .assertEmpty doesn't exist on ubuntu-contrib
   self.assertEqual(0, len(partitions))
   self.assertEqual(0, len(gains))
   self.assertEqual(0, len(splits))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:split_handler_ops_test.py

示例11: testColumnToTensors

  def testColumnToTensors(self):
    with ops.Graph().as_default(), self.test_session() as sess:
      random_seed.set_random_seed(200)

      vector_template = array_ops.constant(np.array([[0., 1.], [2., 3.]]))
      colvec = array_ops.constant(np.arange(4.)[:, None])
      output = sess.run(utils.column_to_tensors(vector_template, colvec))
      self.assertAllClose(output, np.array([[0., 1.], [2., 3.]]))

      vector_template = self._fully_connected_layer_params()
      colvec = array_ops.constant(np.arange(6.)[:, None])
      output = sess.run(utils.column_to_tensors(vector_template, colvec))

      self.assertIsInstance(output, tuple)
      self.assertEqual(len(output), 2)
      a, b = output
      self.assertAllClose(a, np.array([[0., 1.], [2., 3.]]))
      self.assertAllClose(b, np.array([4., 5.]))

      vector_template = list(vector_template)
      vector_template.append(array_ops.constant([[6.], [7.], [8.], [9.]]))
      colvec = array_ops.constant(np.arange(10.)[:, None])
      output = sess.run(utils.column_to_tensors(vector_template, colvec))
      self.assertIsInstance(output, tuple)
      self.assertEqual(len(output), 3)
      a, b, c = output
      self.assertAllClose(a, np.array([[0., 1.], [2., 3.]]))
      self.assertAllClose(b, np.array([4., 5.]))
      self.assertAllClose(c, np.array([[6.], [7.], [8.], [9.]]))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:29,代码来源:utils_test.py

示例12: testTrackPersistentBytes

  def testTrackPersistentBytes(self):
    ops.reset_default_graph()
    a = array_ops.constant(np.ones((100, 100)))
    b = array_ops.constant(np.ones((100, 100)))
    c = a * b

    with session.Session() as sess:
      run_options = config_pb2.RunOptions(
          trace_level=config_pb2.RunOptions.FULL_TRACE)
      run_metadata = config_pb2.RunMetadata()
      sess.run(c, options=run_options, run_metadata=run_metadata)

      options = option_builder.ProfileOptionBuilder.time_and_memory()
      options['min_bytes'] = 0
      options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
                           'residual_bytes')
      ret = model_analyzer.profile(
          sess.graph, run_meta=run_metadata, cmd='scope', options=options)

      run_metadata = config_pb2.RunMetadata()
      sess.run(c, options=run_options, run_metadata=run_metadata)
      ret2 = model_analyzer.profile(
          sess.graph, run_meta=run_metadata, cmd='scope', options=options)

      n = lib.SearchTFProfNode(ret, 'mul')
      n2 = lib.SearchTFProfNode(ret2, 'mul')
      self.assertGreater(n.peak_bytes, 0)
      self.assertGreater(n.output_bytes, 0)
      self.assertGreater(n.residual_bytes, 0)
      self.assertEqual(n.peak_bytes, n2.peak_bytes)
      self.assertEqual(n.output_bytes, n2.output_bytes)
      self.assertEqual(n.residual_bytes, n2.residual_bytes)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:32,代码来源:model_analyzer_test.py

示例13: testConsistent

 def testConsistent(self):
   nums, divs = self.intTestData()
   with self.test_session():
     tf_result = (
         math_ops.floor_div(nums, divs) * divs + math_ops.floormod(nums, divs)
     ).eval()
     tf_nums = array_ops.constant(nums)
     tf_divs = array_ops.constant(divs)
     tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
     np_result = (nums // divs) * divs + (nums % divs)
     # consistentcy with numpy
     self.assertAllEqual(tf_result, np_result)
     # consistentcy with two forms of divide
     self.assertAllEqual(tf_result, tf2_result)
     # consistency for truncation form
     tf3_result = (
         math_ops.truncatediv(nums, divs) * divs
         + math_ops.truncatemod(nums, divs)
     ).eval()
     expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
                                (nums.shape[0], divs.shape[1]))
     # Consistent with desire to get numerator
     self.assertAllEqual(tf3_result, expanded_nums)
     # Consistent with desire to get numerator
     self.assertAllEqual(tf_result, expanded_nums)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:25,代码来源:math_ops_test.py

示例14: testSwishLiteHint

  def testSwishLiteHint(self):
    """Makes a custom op swish and makes sure it gets converted as a unit."""
    image = array_ops.constant([1., 2., 3., 4.])
    swish_scale = array_ops.constant(1.0)

    def _swish(input_tensor, scale):
      custom = op_hint.OpHint("cool_activation")
      input_tensor, scale = custom.add_inputs(input_tensor, scale)
      output = math_ops.sigmoid(input_tensor) * input_tensor * scale
      output, = custom.add_outputs(output)
      return output
    output = array_ops.identity(_swish(image, swish_scale), name="ModelOutput")

    with self.cached_session() as sess:
      # check if identities have been put into the graph (2 input, 1 output,
      # and 1 final output).
      self.assertEqual(self._countIdentities(sess.graph_def.node), 4)

      stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
          graph_def=sess.graph_def)

      self.assertEqual(
          self._getGraphOpTypes(
              stubbed_graphdef,
              output_nodes=[op_hint._tensor_name_base(output.name)]),
          set(["cool_activation", "Const", "Identity"]))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:convert_test.py

示例15: testScaleAndBiasAndIdentity

  def testScaleAndBiasAndIdentity(self):
    """This tests a scaled add which has 3 inputs and 2 outputs."""
    a = array_ops.constant(1.)
    x = array_ops.constant([2., 3.])
    b = array_ops.constant([4., 5.])

    def _scaled_and_bias_and_identity(a, x, b):
      custom = op_hint.OpHint("scale_and_bias_and_identity")
      a, x, b = custom.add_inputs(a, x, b)
      return custom.add_outputs(a * x + b, x)
    output = array_ops.identity(_scaled_and_bias_and_identity(a, x, b),
                                name="ModelOutput")

    with self.cached_session() as sess:
      # make sure one identity for each input (3) and output (2) => 3 + 2 = 5
      # +1 for the final output
      self.assertEqual(self._countIdentities(sess.graph_def.node), 6)

      stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
          graph_def=sess.graph_def)

      self.assertEqual(
          self._getGraphOpTypes(
              stubbed_graphdef,
              output_nodes=[op_hint._tensor_name_base(output.name)]),
          set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:convert_test.py


注:本文中的tensorflow.python.ops.array_ops.constant函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。